Upstream version 7.36.149.0
[platform/framework/web/crosswalk.git] / src / content / common / gpu / media / dxva_video_decode_accelerator.cc
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "content/common/gpu/media/dxva_video_decode_accelerator.h"
6
7 #if !defined(OS_WIN)
8 #error This file should only be built on Windows.
9 #endif   // !defined(OS_WIN)
10
11 #include <ks.h>
12 #include <codecapi.h>
13 #include <mfapi.h>
14 #include <mferror.h>
15 #include <wmcodecdsp.h>
16
17 #include "base/bind.h"
18 #include "base/callback.h"
19 #include "base/command_line.h"
20 #include "base/debug/trace_event.h"
21 #include "base/logging.h"
22 #include "base/memory/scoped_handle.h"
23 #include "base/memory/scoped_ptr.h"
24 #include "base/memory/shared_memory.h"
25 #include "base/message_loop/message_loop.h"
26 #include "base/win/windows_version.h"
27 #include "media/video/video_decode_accelerator.h"
28 #include "ui/gl/gl_bindings.h"
29 #include "ui/gl/gl_surface_egl.h"
30 #include "ui/gl/gl_switches.h"
31
32 namespace content {
33
34 // We only request 5 picture buffers from the client which are used to hold the
35 // decoded samples. These buffers are then reused when the client tells us that
36 // it is done with the buffer.
37 static const int kNumPictureBuffers = 5;
38
39 #define RETURN_ON_FAILURE(result, log, ret)  \
40   do {                                       \
41     if (!(result)) {                         \
42       DLOG(ERROR) << log;                    \
43       return ret;                            \
44     }                                        \
45   } while (0)
46
47 #define RETURN_ON_HR_FAILURE(result, log, ret)                    \
48   RETURN_ON_FAILURE(SUCCEEDED(result),                            \
49                     log << ", HRESULT: 0x" << std::hex << result, \
50                     ret);
51
52 #define RETURN_AND_NOTIFY_ON_FAILURE(result, log, error_code, ret)  \
53   do {                                                              \
54     if (!(result)) {                                                \
55       DVLOG(1) << log;                                              \
56       StopOnError(error_code);                                      \
57       return ret;                                                   \
58     }                                                               \
59   } while (0)
60
61 #define RETURN_AND_NOTIFY_ON_HR_FAILURE(result, log, error_code, ret)  \
62   RETURN_AND_NOTIFY_ON_FAILURE(SUCCEEDED(result),                      \
63                                log << ", HRESULT: 0x" << std::hex << result, \
64                                error_code, ret);
65
66 // Maximum number of iterations we allow before aborting the attempt to flush
67 // the batched queries to the driver and allow torn/corrupt frames to be
68 // rendered.
69 enum { kMaxIterationsForD3DFlush = 10 };
70
71 static IMFSample* CreateEmptySample() {
72   base::win::ScopedComPtr<IMFSample> sample;
73   HRESULT hr = MFCreateSample(sample.Receive());
74   RETURN_ON_HR_FAILURE(hr, "MFCreateSample failed", NULL);
75   return sample.Detach();
76 }
77
78 // Creates a Media Foundation sample with one buffer of length |buffer_length|
79 // on a |align|-byte boundary. Alignment must be a perfect power of 2 or 0.
80 static IMFSample* CreateEmptySampleWithBuffer(int buffer_length, int align) {
81   CHECK_GT(buffer_length, 0);
82
83   base::win::ScopedComPtr<IMFSample> sample;
84   sample.Attach(CreateEmptySample());
85
86   base::win::ScopedComPtr<IMFMediaBuffer> buffer;
87   HRESULT hr = E_FAIL;
88   if (align == 0) {
89     // Note that MFCreateMemoryBuffer is same as MFCreateAlignedMemoryBuffer
90     // with the align argument being 0.
91     hr = MFCreateMemoryBuffer(buffer_length, buffer.Receive());
92   } else {
93     hr = MFCreateAlignedMemoryBuffer(buffer_length,
94                                      align - 1,
95                                      buffer.Receive());
96   }
97   RETURN_ON_HR_FAILURE(hr, "Failed to create memory buffer for sample", NULL);
98
99   hr = sample->AddBuffer(buffer);
100   RETURN_ON_HR_FAILURE(hr, "Failed to add buffer to sample", NULL);
101
102   return sample.Detach();
103 }
104
105 // Creates a Media Foundation sample with one buffer containing a copy of the
106 // given Annex B stream data.
107 // If duration and sample time are not known, provide 0.
108 // |min_size| specifies the minimum size of the buffer (might be required by
109 // the decoder for input). If no alignment is required, provide 0.
110 static IMFSample* CreateInputSample(const uint8* stream, int size,
111                                     int min_size, int alignment) {
112   CHECK(stream);
113   CHECK_GT(size, 0);
114   base::win::ScopedComPtr<IMFSample> sample;
115   sample.Attach(CreateEmptySampleWithBuffer(std::max(min_size, size),
116                                             alignment));
117   RETURN_ON_FAILURE(sample, "Failed to create empty sample", NULL);
118
119   base::win::ScopedComPtr<IMFMediaBuffer> buffer;
120   HRESULT hr = sample->GetBufferByIndex(0, buffer.Receive());
121   RETURN_ON_HR_FAILURE(hr, "Failed to get buffer from sample", NULL);
122
123   DWORD max_length = 0;
124   DWORD current_length = 0;
125   uint8* destination = NULL;
126   hr = buffer->Lock(&destination, &max_length, &current_length);
127   RETURN_ON_HR_FAILURE(hr, "Failed to lock buffer", NULL);
128
129   CHECK_EQ(current_length, 0u);
130   CHECK_GE(static_cast<int>(max_length), size);
131   memcpy(destination, stream, size);
132
133   hr = buffer->Unlock();
134   RETURN_ON_HR_FAILURE(hr, "Failed to unlock buffer", NULL);
135
136   hr = buffer->SetCurrentLength(size);
137   RETURN_ON_HR_FAILURE(hr, "Failed to set buffer length", NULL);
138
139   return sample.Detach();
140 }
141
142 static IMFSample* CreateSampleFromInputBuffer(
143     const media::BitstreamBuffer& bitstream_buffer,
144     DWORD stream_size,
145     DWORD alignment) {
146   base::SharedMemory shm(bitstream_buffer.handle(), true);
147   RETURN_ON_FAILURE(shm.Map(bitstream_buffer.size()),
148                     "Failed in base::SharedMemory::Map", NULL);
149
150   return CreateInputSample(reinterpret_cast<const uint8*>(shm.memory()),
151                            bitstream_buffer.size(),
152                            stream_size,
153                            alignment);
154 }
155
156 // Maintains information about a DXVA picture buffer, i.e. whether it is
157 // available for rendering, the texture information, etc.
158 struct DXVAVideoDecodeAccelerator::DXVAPictureBuffer {
159  public:
160   static linked_ptr<DXVAPictureBuffer> Create(
161       const DXVAVideoDecodeAccelerator& decoder,
162       const media::PictureBuffer& buffer,
163       EGLConfig egl_config);
164   ~DXVAPictureBuffer();
165
166   void ReusePictureBuffer();
167   // Copies the output sample data to the picture buffer provided by the
168   // client.
169   // The dest_surface parameter contains the decoded bits.
170   bool CopyOutputSampleDataToPictureBuffer(
171       const DXVAVideoDecodeAccelerator& decoder,
172       IDirect3DSurface9* dest_surface);
173
174   bool available() const {
175     return available_;
176   }
177
178   void set_available(bool available) {
179     available_ = available;
180   }
181
182   int id() const {
183     return picture_buffer_.id();
184   }
185
186   gfx::Size size() const {
187     return picture_buffer_.size();
188   }
189
190  private:
191   explicit DXVAPictureBuffer(const media::PictureBuffer& buffer);
192
193   bool available_;
194   media::PictureBuffer picture_buffer_;
195   EGLSurface decoding_surface_;
196   base::win::ScopedComPtr<IDirect3DTexture9> decoding_texture_;
197   // Set to true if RGB is supported by the texture.
198   // Defaults to true.
199   bool use_rgb_;
200
201   DISALLOW_COPY_AND_ASSIGN(DXVAPictureBuffer);
202 };
203
204 // static
205 linked_ptr<DXVAVideoDecodeAccelerator::DXVAPictureBuffer>
206 DXVAVideoDecodeAccelerator::DXVAPictureBuffer::Create(
207     const DXVAVideoDecodeAccelerator& decoder,
208     const media::PictureBuffer& buffer,
209     EGLConfig egl_config) {
210   linked_ptr<DXVAPictureBuffer> picture_buffer(new DXVAPictureBuffer(buffer));
211
212   EGLDisplay egl_display = gfx::GLSurfaceEGL::GetHardwareDisplay();
213
214   EGLint use_rgb = 1;
215   eglGetConfigAttrib(egl_display, egl_config, EGL_BIND_TO_TEXTURE_RGB,
216                      &use_rgb);
217
218   EGLint attrib_list[] = {
219     EGL_WIDTH, buffer.size().width(),
220     EGL_HEIGHT, buffer.size().height(),
221     EGL_TEXTURE_FORMAT, use_rgb ? EGL_TEXTURE_RGB : EGL_TEXTURE_RGBA,
222     EGL_TEXTURE_TARGET, EGL_TEXTURE_2D,
223     EGL_NONE
224   };
225
226   picture_buffer->decoding_surface_ = eglCreatePbufferSurface(
227       egl_display,
228       egl_config,
229       attrib_list);
230   RETURN_ON_FAILURE(picture_buffer->decoding_surface_,
231                     "Failed to create surface",
232                     linked_ptr<DXVAPictureBuffer>(NULL));
233
234   HANDLE share_handle = NULL;
235   EGLBoolean ret = eglQuerySurfacePointerANGLE(
236       egl_display,
237       picture_buffer->decoding_surface_,
238       EGL_D3D_TEXTURE_2D_SHARE_HANDLE_ANGLE,
239       &share_handle);
240
241   RETURN_ON_FAILURE(share_handle && ret == EGL_TRUE,
242                     "Failed to query ANGLE surface pointer",
243                     linked_ptr<DXVAPictureBuffer>(NULL));
244
245   HRESULT hr = decoder.device_->CreateTexture(
246       buffer.size().width(),
247       buffer.size().height(),
248       1,
249       D3DUSAGE_RENDERTARGET,
250       use_rgb ? D3DFMT_X8R8G8B8 : D3DFMT_A8R8G8B8,
251       D3DPOOL_DEFAULT,
252       picture_buffer->decoding_texture_.Receive(),
253       &share_handle);
254
255   RETURN_ON_HR_FAILURE(hr, "Failed to create texture",
256                        linked_ptr<DXVAPictureBuffer>(NULL));
257   picture_buffer->use_rgb_ = !!use_rgb;
258   return picture_buffer;
259 }
260
261 DXVAVideoDecodeAccelerator::DXVAPictureBuffer::DXVAPictureBuffer(
262     const media::PictureBuffer& buffer)
263     : available_(true),
264       picture_buffer_(buffer),
265       decoding_surface_(NULL),
266       use_rgb_(true) {
267 }
268
269 DXVAVideoDecodeAccelerator::DXVAPictureBuffer::~DXVAPictureBuffer() {
270   if (decoding_surface_) {
271     EGLDisplay egl_display = gfx::GLSurfaceEGL::GetHardwareDisplay();
272
273     eglReleaseTexImage(
274         egl_display,
275         decoding_surface_,
276         EGL_BACK_BUFFER);
277
278     eglDestroySurface(
279         egl_display,
280         decoding_surface_);
281     decoding_surface_ = NULL;
282   }
283 }
284
285 void DXVAVideoDecodeAccelerator::DXVAPictureBuffer::ReusePictureBuffer() {
286   DCHECK(decoding_surface_);
287   EGLDisplay egl_display = gfx::GLSurfaceEGL::GetHardwareDisplay();
288   eglReleaseTexImage(
289     egl_display,
290     decoding_surface_,
291     EGL_BACK_BUFFER);
292   set_available(true);
293 }
294
295 bool DXVAVideoDecodeAccelerator::DXVAPictureBuffer::
296     CopyOutputSampleDataToPictureBuffer(
297         const DXVAVideoDecodeAccelerator& decoder,
298         IDirect3DSurface9* dest_surface) {
299   DCHECK(dest_surface);
300
301   D3DSURFACE_DESC surface_desc;
302   HRESULT hr = dest_surface->GetDesc(&surface_desc);
303   RETURN_ON_HR_FAILURE(hr, "Failed to get surface description", false);
304
305   D3DSURFACE_DESC texture_desc;
306   decoding_texture_->GetLevelDesc(0, &texture_desc);
307
308   if (texture_desc.Width != surface_desc.Width ||
309       texture_desc.Height != surface_desc.Height) {
310     NOTREACHED() << "Decode surface of different dimension than texture";
311     return false;
312   }
313
314   hr = decoder.d3d9_->CheckDeviceFormatConversion(
315       D3DADAPTER_DEFAULT, D3DDEVTYPE_HAL, surface_desc.Format,
316       use_rgb_ ? D3DFMT_X8R8G8B8 : D3DFMT_A8R8G8B8);
317   RETURN_ON_HR_FAILURE(hr, "Device does not support format converision", false);
318
319   // This function currently executes in the context of IPC handlers in the
320   // GPU process which ensures that there is always an OpenGL context.
321   GLint current_texture = 0;
322   glGetIntegerv(GL_TEXTURE_BINDING_2D, &current_texture);
323
324   glBindTexture(GL_TEXTURE_2D, picture_buffer_.texture_id());
325
326   glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
327
328   base::win::ScopedComPtr<IDirect3DSurface9> d3d_surface;
329   hr = decoding_texture_->GetSurfaceLevel(0, d3d_surface.Receive());
330   RETURN_ON_HR_FAILURE(hr, "Failed to get surface from texture", false);
331
332   hr = decoder.device_->StretchRect(
333       dest_surface, NULL, d3d_surface, NULL, D3DTEXF_NONE);
334   RETURN_ON_HR_FAILURE(hr, "Colorspace conversion via StretchRect failed",
335                         false);
336
337   // Ideally, this should be done immediately before the draw call that uses
338   // the texture. Flush it once here though.
339   hr = decoder.query_->Issue(D3DISSUE_END);
340   RETURN_ON_HR_FAILURE(hr, "Failed to issue END", false);
341
342   // The DXVA decoder has its own device which it uses for decoding. ANGLE
343   // has its own device which we don't have access to.
344   // The above code attempts to copy the decoded picture into a surface
345   // which is owned by ANGLE. As there are multiple devices involved in
346   // this, the StretchRect call above is not synchronous.
347   // We attempt to flush the batched operations to ensure that the picture is
348   // copied to the surface owned by ANGLE.
349   // We need to do this in a loop and call flush multiple times.
350   // We have seen the GetData call for flushing the command buffer fail to
351   // return success occassionally on multi core machines, leading to an
352   // infinite loop.
353   // Workaround is to have an upper limit of 10 on the number of iterations to
354   // wait for the Flush to finish.
355   int iterations = 0;
356   while ((decoder.query_->GetData(NULL, 0, D3DGETDATA_FLUSH) == S_FALSE) &&
357          ++iterations < kMaxIterationsForD3DFlush) {
358     Sleep(1);  // Poor-man's Yield().
359   }
360   EGLDisplay egl_display = gfx::GLSurfaceEGL::GetHardwareDisplay();
361   eglBindTexImage(
362       egl_display,
363       decoding_surface_,
364       EGL_BACK_BUFFER);
365   glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
366   glBindTexture(GL_TEXTURE_2D, current_texture);
367   return true;
368 }
369
370 DXVAVideoDecodeAccelerator::PendingSampleInfo::PendingSampleInfo(
371     int32 buffer_id, IMFSample* sample)
372     : input_buffer_id(buffer_id) {
373   output_sample.Attach(sample);
374 }
375
376 DXVAVideoDecodeAccelerator::PendingSampleInfo::~PendingSampleInfo() {}
377
378 // static
379 bool DXVAVideoDecodeAccelerator::CreateD3DDevManager() {
380   TRACE_EVENT0("gpu", "DXVAVideoDecodeAccelerator_CreateD3DDevManager");
381
382   HRESULT hr = Direct3DCreate9Ex(D3D_SDK_VERSION, d3d9_.Receive());
383   RETURN_ON_HR_FAILURE(hr, "Direct3DCreate9Ex failed", false);
384
385   D3DPRESENT_PARAMETERS present_params = {0};
386   present_params.BackBufferWidth = 1;
387   present_params.BackBufferHeight = 1;
388   present_params.BackBufferFormat = D3DFMT_UNKNOWN;
389   present_params.BackBufferCount = 1;
390   present_params.SwapEffect = D3DSWAPEFFECT_DISCARD;
391   present_params.hDeviceWindow = ::GetShellWindow();
392   present_params.Windowed = TRUE;
393   present_params.Flags = D3DPRESENTFLAG_VIDEO;
394   present_params.FullScreen_RefreshRateInHz = 0;
395   present_params.PresentationInterval = 0;
396
397   hr = d3d9_->CreateDeviceEx(D3DADAPTER_DEFAULT,
398                              D3DDEVTYPE_HAL,
399                              ::GetShellWindow(),
400                              D3DCREATE_FPU_PRESERVE |
401                              D3DCREATE_SOFTWARE_VERTEXPROCESSING |
402                              D3DCREATE_DISABLE_PSGP_THREADING |
403                              D3DCREATE_MULTITHREADED,
404                              &present_params,
405                              NULL,
406                              device_.Receive());
407   RETURN_ON_HR_FAILURE(hr, "Failed to create D3D device", false);
408
409   hr = DXVA2CreateDirect3DDeviceManager9(&dev_manager_reset_token_,
410                                          device_manager_.Receive());
411   RETURN_ON_HR_FAILURE(hr, "DXVA2CreateDirect3DDeviceManager9 failed", false);
412
413   hr = device_manager_->ResetDevice(device_, dev_manager_reset_token_);
414   RETURN_ON_HR_FAILURE(hr, "Failed to reset device", false);
415
416   hr = device_->CreateQuery(D3DQUERYTYPE_EVENT, query_.Receive());
417   RETURN_ON_HR_FAILURE(hr, "Failed to create D3D device query", false);
418   // Ensure query_ API works (to avoid an infinite loop later in
419   // CopyOutputSampleDataToPictureBuffer).
420   hr = query_->Issue(D3DISSUE_END);
421   RETURN_ON_HR_FAILURE(hr, "Failed to issue END test query", false);
422   return true;
423 }
424
425 DXVAVideoDecodeAccelerator::DXVAVideoDecodeAccelerator(
426     const base::Callback<bool(void)>& make_context_current)
427     : client_(NULL),
428       dev_manager_reset_token_(0),
429       egl_config_(NULL),
430       state_(kUninitialized),
431       pictures_requested_(false),
432       inputs_before_decode_(0),
433       make_context_current_(make_context_current),
434       weak_this_factory_(this) {
435   memset(&input_stream_info_, 0, sizeof(input_stream_info_));
436   memset(&output_stream_info_, 0, sizeof(output_stream_info_));
437 }
438
439 DXVAVideoDecodeAccelerator::~DXVAVideoDecodeAccelerator() {
440   client_ = NULL;
441 }
442
443 bool DXVAVideoDecodeAccelerator::Initialize(media::VideoCodecProfile profile,
444                                             Client* client) {
445   DCHECK(CalledOnValidThread());
446
447   client_ = client;
448
449   // Not all versions of Windows 7 and later include Media Foundation DLLs.
450   // Instead of crashing while delay loading the DLL when calling MFStartup()
451   // below, probe whether we can successfully load the DLL now.
452   //
453   // See http://crbug.com/339678 for details.
454   HMODULE mfplat_dll = ::LoadLibrary(L"MFPlat.dll");
455   RETURN_ON_FAILURE(mfplat_dll, "MFPlat.dll is required for decoding", false);
456
457   // TODO(ananta)
458   // H264PROFILE_HIGH video decoding is janky at times. Needs more
459   // investigation.
460   if (profile != media::H264PROFILE_BASELINE &&
461       profile != media::H264PROFILE_MAIN &&
462       profile != media::H264PROFILE_HIGH) {
463     RETURN_AND_NOTIFY_ON_FAILURE(false,
464         "Unsupported h264 profile", PLATFORM_FAILURE, false);
465   }
466
467   RETURN_AND_NOTIFY_ON_FAILURE(
468       gfx::g_driver_egl.ext.b_EGL_ANGLE_surface_d3d_texture_2d_share_handle,
469       "EGL_ANGLE_surface_d3d_texture_2d_share_handle unavailable",
470       PLATFORM_FAILURE,
471       false);
472
473   RETURN_AND_NOTIFY_ON_FAILURE((state_ == kUninitialized),
474       "Initialize: invalid state: " << state_, ILLEGAL_STATE, false);
475
476   HRESULT hr = MFStartup(MF_VERSION, MFSTARTUP_FULL);
477   RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, "MFStartup failed.", PLATFORM_FAILURE,
478       false);
479
480   RETURN_AND_NOTIFY_ON_FAILURE(CreateD3DDevManager(),
481                                "Failed to initialize D3D device and manager",
482                                PLATFORM_FAILURE,
483                                false);
484
485   RETURN_AND_NOTIFY_ON_FAILURE(InitDecoder(profile),
486       "Failed to initialize decoder", PLATFORM_FAILURE, false);
487
488   RETURN_AND_NOTIFY_ON_FAILURE(GetStreamsInfoAndBufferReqs(),
489       "Failed to get input/output stream info.", PLATFORM_FAILURE, false);
490
491   RETURN_AND_NOTIFY_ON_FAILURE(
492       SendMFTMessage(MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0),
493       "Send MFT_MESSAGE_NOTIFY_BEGIN_STREAMING notification failed",
494       PLATFORM_FAILURE, false);
495
496   RETURN_AND_NOTIFY_ON_FAILURE(
497       SendMFTMessage(MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0),
498       "Send MFT_MESSAGE_NOTIFY_START_OF_STREAM notification failed",
499       PLATFORM_FAILURE, false);
500
501   state_ = kNormal;
502   return true;
503 }
504
505 void DXVAVideoDecodeAccelerator::Decode(
506     const media::BitstreamBuffer& bitstream_buffer) {
507   DCHECK(CalledOnValidThread());
508
509   RETURN_AND_NOTIFY_ON_FAILURE((state_ == kNormal || state_ == kStopped ||
510                                 state_ == kFlushing),
511       "Invalid state: " << state_, ILLEGAL_STATE,);
512
513   base::win::ScopedComPtr<IMFSample> sample;
514   sample.Attach(CreateSampleFromInputBuffer(bitstream_buffer,
515                                             input_stream_info_.cbSize,
516                                             input_stream_info_.cbAlignment));
517   RETURN_AND_NOTIFY_ON_FAILURE(sample, "Failed to create input sample",
518                                PLATFORM_FAILURE,);
519
520   RETURN_AND_NOTIFY_ON_HR_FAILURE(sample->SetSampleTime(bitstream_buffer.id()),
521       "Failed to associate input buffer id with sample", PLATFORM_FAILURE,);
522
523   DecodeInternal(sample);
524 }
525
526 void DXVAVideoDecodeAccelerator::AssignPictureBuffers(
527     const std::vector<media::PictureBuffer>& buffers) {
528   DCHECK(CalledOnValidThread());
529
530   RETURN_AND_NOTIFY_ON_FAILURE((state_ != kUninitialized),
531       "Invalid state: " << state_, ILLEGAL_STATE,);
532   RETURN_AND_NOTIFY_ON_FAILURE((kNumPictureBuffers == buffers.size()),
533       "Failed to provide requested picture buffers. (Got " << buffers.size() <<
534       ", requested " << kNumPictureBuffers << ")", INVALID_ARGUMENT,);
535
536   // Copy the picture buffers provided by the client to the available list,
537   // and mark these buffers as available for use.
538   for (size_t buffer_index = 0; buffer_index < buffers.size();
539        ++buffer_index) {
540     linked_ptr<DXVAPictureBuffer> picture_buffer =
541         DXVAPictureBuffer::Create(*this, buffers[buffer_index], egl_config_);
542     RETURN_AND_NOTIFY_ON_FAILURE(picture_buffer.get(),
543         "Failed to allocate picture buffer", PLATFORM_FAILURE,);
544
545     bool inserted = output_picture_buffers_.insert(std::make_pair(
546         buffers[buffer_index].id(), picture_buffer)).second;
547     DCHECK(inserted);
548   }
549   ProcessPendingSamples();
550   if (state_ == kFlushing && pending_output_samples_.empty())
551     FlushInternal();
552 }
553
554 void DXVAVideoDecodeAccelerator::ReusePictureBuffer(
555     int32 picture_buffer_id) {
556   DCHECK(CalledOnValidThread());
557
558   RETURN_AND_NOTIFY_ON_FAILURE((state_ != kUninitialized),
559       "Invalid state: " << state_, ILLEGAL_STATE,);
560
561   if (output_picture_buffers_.empty())
562     return;
563
564   OutputBuffers::iterator it = output_picture_buffers_.find(picture_buffer_id);
565   RETURN_AND_NOTIFY_ON_FAILURE(it != output_picture_buffers_.end(),
566       "Invalid picture id: " << picture_buffer_id, INVALID_ARGUMENT,);
567
568   it->second->ReusePictureBuffer();
569   ProcessPendingSamples();
570
571   if (state_ == kFlushing && pending_output_samples_.empty())
572     FlushInternal();
573 }
574
575 void DXVAVideoDecodeAccelerator::Flush() {
576   DCHECK(CalledOnValidThread());
577
578   DVLOG(1) << "DXVAVideoDecodeAccelerator::Flush";
579
580   RETURN_AND_NOTIFY_ON_FAILURE((state_ == kNormal || state_ == kStopped),
581       "Unexpected decoder state: " << state_, ILLEGAL_STATE,);
582
583   state_ = kFlushing;
584
585   RETURN_AND_NOTIFY_ON_FAILURE(SendMFTMessage(MFT_MESSAGE_COMMAND_DRAIN, 0),
586       "Failed to send drain message", PLATFORM_FAILURE,);
587
588   if (!pending_output_samples_.empty())
589     return;
590
591   FlushInternal();
592 }
593
594 void DXVAVideoDecodeAccelerator::Reset() {
595   DCHECK(CalledOnValidThread());
596
597   DVLOG(1) << "DXVAVideoDecodeAccelerator::Reset";
598
599   RETURN_AND_NOTIFY_ON_FAILURE((state_ == kNormal || state_ == kStopped),
600       "Reset: invalid state: " << state_, ILLEGAL_STATE,);
601
602   state_ = kResetting;
603
604   pending_output_samples_.clear();
605
606   NotifyInputBuffersDropped();
607
608   RETURN_AND_NOTIFY_ON_FAILURE(SendMFTMessage(MFT_MESSAGE_COMMAND_FLUSH, 0),
609       "Reset: Failed to send message.", PLATFORM_FAILURE,);
610
611   base::MessageLoop::current()->PostTask(
612       FROM_HERE,
613       base::Bind(&DXVAVideoDecodeAccelerator::NotifyResetDone,
614                  weak_this_factory_.GetWeakPtr()));
615
616   state_ = DXVAVideoDecodeAccelerator::kNormal;
617 }
618
619 void DXVAVideoDecodeAccelerator::Destroy() {
620   DCHECK(CalledOnValidThread());
621   Invalidate();
622   delete this;
623 }
624
625 bool DXVAVideoDecodeAccelerator::InitDecoder(media::VideoCodecProfile profile) {
626   if (profile < media::H264PROFILE_MIN || profile > media::H264PROFILE_MAX)
627     return false;
628
629   // We mimic the steps CoCreateInstance uses to instantiate the object. This
630   // was previously done because it failed inside the sandbox, and now is done
631   // as a more minimal approach to avoid other side-effects CCI might have (as
632   // we are still in a reduced sandbox).
633   HMODULE decoder_dll = ::LoadLibrary(L"msmpeg2vdec.dll");
634   RETURN_ON_FAILURE(decoder_dll,
635                     "msmpeg2vdec.dll required for decoding is not loaded",
636                     false);
637
638   typedef HRESULT(WINAPI * GetClassObject)(
639       const CLSID & clsid, const IID & iid, void * *object);
640
641   GetClassObject get_class_object = reinterpret_cast<GetClassObject>(
642       GetProcAddress(decoder_dll, "DllGetClassObject"));
643   RETURN_ON_FAILURE(
644       get_class_object, "Failed to get DllGetClassObject pointer", false);
645
646   base::win::ScopedComPtr<IClassFactory> factory;
647   HRESULT hr = get_class_object(__uuidof(CMSH264DecoderMFT),
648                                 __uuidof(IClassFactory),
649                                 reinterpret_cast<void**>(factory.Receive()));
650   RETURN_ON_HR_FAILURE(hr, "DllGetClassObject for decoder failed", false);
651
652   hr = factory->CreateInstance(NULL,
653                                __uuidof(IMFTransform),
654                                reinterpret_cast<void**>(decoder_.Receive()));
655   RETURN_ON_HR_FAILURE(hr, "Failed to create decoder instance", false);
656
657   RETURN_ON_FAILURE(CheckDecoderDxvaSupport(),
658                     "Failed to check decoder DXVA support", false);
659
660   hr = decoder_->ProcessMessage(
661             MFT_MESSAGE_SET_D3D_MANAGER,
662             reinterpret_cast<ULONG_PTR>(device_manager_.get()));
663   RETURN_ON_HR_FAILURE(hr, "Failed to pass D3D manager to decoder", false);
664
665   EGLDisplay egl_display = gfx::GLSurfaceEGL::GetHardwareDisplay();
666
667   EGLint config_attribs[] = {
668     EGL_BUFFER_SIZE, 32,
669     EGL_RED_SIZE, 8,
670     EGL_GREEN_SIZE, 8,
671     EGL_BLUE_SIZE, 8,
672     EGL_SURFACE_TYPE, EGL_PBUFFER_BIT,
673     EGL_ALPHA_SIZE, 0,
674     EGL_NONE
675   };
676
677   EGLint num_configs;
678
679   if (!eglChooseConfig(
680       egl_display,
681       config_attribs,
682       &egl_config_,
683       1,
684       &num_configs))
685     return false;
686
687   return SetDecoderMediaTypes();
688 }
689
690 bool DXVAVideoDecodeAccelerator::CheckDecoderDxvaSupport() {
691   base::win::ScopedComPtr<IMFAttributes> attributes;
692   HRESULT hr = decoder_->GetAttributes(attributes.Receive());
693   RETURN_ON_HR_FAILURE(hr, "Failed to get decoder attributes", false);
694
695   UINT32 dxva = 0;
696   hr = attributes->GetUINT32(MF_SA_D3D_AWARE, &dxva);
697   RETURN_ON_HR_FAILURE(hr, "Failed to check if decoder supports DXVA", false);
698
699   hr = attributes->SetUINT32(CODECAPI_AVDecVideoAcceleration_H264, TRUE);
700   RETURN_ON_HR_FAILURE(hr, "Failed to enable DXVA H/W decoding", false);
701   return true;
702 }
703
704 bool DXVAVideoDecodeAccelerator::SetDecoderMediaTypes() {
705   RETURN_ON_FAILURE(SetDecoderInputMediaType(),
706                     "Failed to set decoder input media type", false);
707   return SetDecoderOutputMediaType(MFVideoFormat_NV12);
708 }
709
710 bool DXVAVideoDecodeAccelerator::SetDecoderInputMediaType() {
711   base::win::ScopedComPtr<IMFMediaType> media_type;
712   HRESULT hr = MFCreateMediaType(media_type.Receive());
713   RETURN_ON_HR_FAILURE(hr, "MFCreateMediaType failed", false);
714
715   hr = media_type->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video);
716   RETURN_ON_HR_FAILURE(hr, "Failed to set major input type", false);
717
718   hr = media_type->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_H264);
719   RETURN_ON_HR_FAILURE(hr, "Failed to set subtype", false);
720
721   // Not sure about this. msdn recommends setting this value on the input
722   // media type.
723   hr = media_type->SetUINT32(MF_MT_INTERLACE_MODE,
724                              MFVideoInterlace_MixedInterlaceOrProgressive);
725   RETURN_ON_HR_FAILURE(hr, "Failed to set interlace mode", false);
726
727   hr = decoder_->SetInputType(0, media_type, 0);  // No flags
728   RETURN_ON_HR_FAILURE(hr, "Failed to set decoder input type", false);
729   return true;
730 }
731
732 bool DXVAVideoDecodeAccelerator::SetDecoderOutputMediaType(
733     const GUID& subtype) {
734   base::win::ScopedComPtr<IMFMediaType> out_media_type;
735
736   for (uint32 i = 0;
737        SUCCEEDED(decoder_->GetOutputAvailableType(0, i,
738                                                   out_media_type.Receive()));
739        ++i) {
740     GUID out_subtype = {0};
741     HRESULT hr = out_media_type->GetGUID(MF_MT_SUBTYPE, &out_subtype);
742     RETURN_ON_HR_FAILURE(hr, "Failed to get output major type", false);
743
744     if (out_subtype == subtype) {
745       hr = decoder_->SetOutputType(0, out_media_type, 0);  // No flags
746       RETURN_ON_HR_FAILURE(hr, "Failed to set decoder output type", false);
747       return true;
748     }
749     out_media_type.Release();
750   }
751   return false;
752 }
753
754 bool DXVAVideoDecodeAccelerator::SendMFTMessage(MFT_MESSAGE_TYPE msg,
755                                                 int32 param) {
756   HRESULT hr = decoder_->ProcessMessage(msg, param);
757   return SUCCEEDED(hr);
758 }
759
760 // Gets the minimum buffer sizes for input and output samples. The MFT will not
761 // allocate buffer for input nor output, so we have to do it ourselves and make
762 // sure they're the correct size. We only provide decoding if DXVA is enabled.
763 bool DXVAVideoDecodeAccelerator::GetStreamsInfoAndBufferReqs() {
764   HRESULT hr = decoder_->GetInputStreamInfo(0, &input_stream_info_);
765   RETURN_ON_HR_FAILURE(hr, "Failed to get input stream info", false);
766
767   hr = decoder_->GetOutputStreamInfo(0, &output_stream_info_);
768   RETURN_ON_HR_FAILURE(hr, "Failed to get decoder output stream info", false);
769
770   DVLOG(1) << "Input stream info: ";
771   DVLOG(1) << "Max latency: " << input_stream_info_.hnsMaxLatency;
772   // There should be three flags, one for requiring a whole frame be in a
773   // single sample, one for requiring there be one buffer only in a single
774   // sample, and one that specifies a fixed sample size. (as in cbSize)
775   CHECK_EQ(input_stream_info_.dwFlags, 0x7u);
776
777   DVLOG(1) << "Min buffer size: " << input_stream_info_.cbSize;
778   DVLOG(1) << "Max lookahead: " << input_stream_info_.cbMaxLookahead;
779   DVLOG(1) << "Alignment: " << input_stream_info_.cbAlignment;
780
781   DVLOG(1) << "Output stream info: ";
782   // The flags here should be the same and mean the same thing, except when
783   // DXVA is enabled, there is an extra 0x100 flag meaning decoder will
784   // allocate its own sample.
785   DVLOG(1) << "Flags: "
786           << std::hex << std::showbase << output_stream_info_.dwFlags;
787   CHECK_EQ(output_stream_info_.dwFlags, 0x107u);
788   DVLOG(1) << "Min buffer size: " << output_stream_info_.cbSize;
789   DVLOG(1) << "Alignment: " << output_stream_info_.cbAlignment;
790   return true;
791 }
792
793 void DXVAVideoDecodeAccelerator::DoDecode() {
794   // This function is also called from FlushInternal in a loop which could
795   // result in the state transitioning to kStopped due to no decoded output.
796   RETURN_AND_NOTIFY_ON_FAILURE((state_ == kNormal || state_ == kFlushing ||
797                                 state_ == kStopped),
798       "DoDecode: not in normal/flushing/stopped state", ILLEGAL_STATE,);
799
800   MFT_OUTPUT_DATA_BUFFER output_data_buffer = {0};
801   DWORD status = 0;
802
803   HRESULT hr = decoder_->ProcessOutput(0,  // No flags
804                                        1,  // # of out streams to pull from
805                                        &output_data_buffer,
806                                        &status);
807   IMFCollection* events = output_data_buffer.pEvents;
808   if (events != NULL) {
809     VLOG(1) << "Got events from ProcessOuput, but discarding";
810     events->Release();
811   }
812   if (FAILED(hr)) {
813     // A stream change needs further ProcessInput calls to get back decoder
814     // output which is why we need to set the state to stopped.
815     if (hr == MF_E_TRANSFORM_STREAM_CHANGE) {
816       if (!SetDecoderOutputMediaType(MFVideoFormat_NV12)) {
817         // Decoder didn't let us set NV12 output format. Not sure as to why
818         // this can happen. Give up in disgust.
819         NOTREACHED() << "Failed to set decoder output media type to NV12";
820         state_ = kStopped;
821       } else {
822         DVLOG(1) << "Received output format change from the decoder."
823                     " Recursively invoking DoDecode";
824         DoDecode();
825       }
826       return;
827     } else if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT) {
828       // No more output from the decoder. Stop playback.
829       state_ = kStopped;
830       return;
831     } else {
832       NOTREACHED() << "Unhandled error in DoDecode()";
833       return;
834     }
835   }
836   TRACE_EVENT_END_ETW("DXVAVideoDecodeAccelerator.Decoding", this, "");
837
838   TRACE_COUNTER1("DXVA Decoding", "TotalPacketsBeforeDecode",
839                  inputs_before_decode_);
840
841   inputs_before_decode_ = 0;
842
843   RETURN_AND_NOTIFY_ON_FAILURE(ProcessOutputSample(output_data_buffer.pSample),
844       "Failed to process output sample.", PLATFORM_FAILURE,);
845 }
846
847 bool DXVAVideoDecodeAccelerator::ProcessOutputSample(IMFSample* sample) {
848   RETURN_ON_FAILURE(sample, "Decode succeeded with NULL output sample", false);
849
850   base::win::ScopedComPtr<IMFMediaBuffer> output_buffer;
851   HRESULT hr = sample->GetBufferByIndex(0, output_buffer.Receive());
852   RETURN_ON_HR_FAILURE(hr, "Failed to get buffer from output sample", false);
853
854   base::win::ScopedComPtr<IDirect3DSurface9> surface;
855   hr = MFGetService(output_buffer, MR_BUFFER_SERVICE,
856                     IID_PPV_ARGS(surface.Receive()));
857   RETURN_ON_HR_FAILURE(hr, "Failed to get D3D surface from output sample",
858                        false);
859
860   LONGLONG input_buffer_id = 0;
861   RETURN_ON_HR_FAILURE(sample->GetSampleTime(&input_buffer_id),
862                        "Failed to get input buffer id associated with sample",
863                        false);
864
865   pending_output_samples_.push_back(
866       PendingSampleInfo(input_buffer_id, sample));
867
868   // If we have available picture buffers to copy the output data then use the
869   // first one and then flag it as not being available for use.
870   if (output_picture_buffers_.size()) {
871     ProcessPendingSamples();
872     return true;
873   }
874   if (pictures_requested_) {
875     DVLOG(1) << "Waiting for picture slots from the client.";
876     return true;
877   }
878
879   // We only read the surface description, which contains its width/height when
880   // we need the picture buffers from the client. Once we have those, then they
881   // are reused.
882   D3DSURFACE_DESC surface_desc;
883   hr = surface->GetDesc(&surface_desc);
884   RETURN_ON_HR_FAILURE(hr, "Failed to get surface description", false);
885
886   // Go ahead and request picture buffers.
887   base::MessageLoop::current()->PostTask(
888       FROM_HERE,
889       base::Bind(&DXVAVideoDecodeAccelerator::RequestPictureBuffers,
890                  weak_this_factory_.GetWeakPtr(),
891                  surface_desc.Width,
892                  surface_desc.Height));
893
894   pictures_requested_ = true;
895   return true;
896 }
897
898 void DXVAVideoDecodeAccelerator::ProcessPendingSamples() {
899   RETURN_AND_NOTIFY_ON_FAILURE(make_context_current_.Run(),
900       "Failed to make context current", PLATFORM_FAILURE,);
901
902   OutputBuffers::iterator index;
903
904   for (index = output_picture_buffers_.begin();
905        index != output_picture_buffers_.end() &&
906        !pending_output_samples_.empty();
907        ++index) {
908     if (index->second->available()) {
909       PendingSampleInfo sample_info = pending_output_samples_.front();
910
911       base::win::ScopedComPtr<IMFMediaBuffer> output_buffer;
912       HRESULT hr = sample_info.output_sample->GetBufferByIndex(
913           0, output_buffer.Receive());
914       RETURN_AND_NOTIFY_ON_HR_FAILURE(
915           hr, "Failed to get buffer from output sample", PLATFORM_FAILURE,);
916
917       base::win::ScopedComPtr<IDirect3DSurface9> surface;
918       hr = MFGetService(output_buffer, MR_BUFFER_SERVICE,
919                         IID_PPV_ARGS(surface.Receive()));
920       RETURN_AND_NOTIFY_ON_HR_FAILURE(
921           hr, "Failed to get D3D surface from output sample",
922           PLATFORM_FAILURE,);
923
924       D3DSURFACE_DESC surface_desc;
925       hr = surface->GetDesc(&surface_desc);
926       RETURN_AND_NOTIFY_ON_HR_FAILURE(
927           hr, "Failed to get surface description", PLATFORM_FAILURE,);
928
929       if (surface_desc.Width !=
930               static_cast<uint32>(index->second->size().width()) ||
931           surface_desc.Height !=
932               static_cast<uint32>(index->second->size().height())) {
933         HandleResolutionChanged(surface_desc.Width, surface_desc.Height);
934         return;
935       }
936
937       RETURN_AND_NOTIFY_ON_FAILURE(
938           index->second->CopyOutputSampleDataToPictureBuffer(*this, surface),
939           "Failed to copy output sample",
940           PLATFORM_FAILURE, );
941
942       media::Picture output_picture(index->second->id(),
943                                     sample_info.input_buffer_id);
944       base::MessageLoop::current()->PostTask(
945           FROM_HERE,
946           base::Bind(&DXVAVideoDecodeAccelerator::NotifyPictureReady,
947                      weak_this_factory_.GetWeakPtr(),
948                      output_picture));
949
950       index->second->set_available(false);
951       pending_output_samples_.pop_front();
952     }
953   }
954
955   if (!pending_input_buffers_.empty() && pending_output_samples_.empty()) {
956     base::MessageLoop::current()->PostTask(
957         FROM_HERE,
958         base::Bind(&DXVAVideoDecodeAccelerator::DecodePendingInputBuffers,
959                    weak_this_factory_.GetWeakPtr()));
960   }
961 }
962
963 void DXVAVideoDecodeAccelerator::StopOnError(
964   media::VideoDecodeAccelerator::Error error) {
965   DCHECK(CalledOnValidThread());
966
967   if (client_)
968     client_->NotifyError(error);
969   client_ = NULL;
970
971   if (state_ != kUninitialized) {
972     Invalidate();
973   }
974 }
975
976 void DXVAVideoDecodeAccelerator::Invalidate() {
977   if (state_ == kUninitialized)
978     return;
979   weak_this_factory_.InvalidateWeakPtrs();
980   output_picture_buffers_.clear();
981   pending_output_samples_.clear();
982   pending_input_buffers_.clear();
983   decoder_.Release();
984   MFShutdown();
985   state_ = kUninitialized;
986 }
987
988 void DXVAVideoDecodeAccelerator::NotifyInputBufferRead(int input_buffer_id) {
989   if (client_)
990     client_->NotifyEndOfBitstreamBuffer(input_buffer_id);
991 }
992
993 void DXVAVideoDecodeAccelerator::NotifyFlushDone() {
994   if (client_)
995     client_->NotifyFlushDone();
996 }
997
998 void DXVAVideoDecodeAccelerator::NotifyResetDone() {
999   if (client_)
1000     client_->NotifyResetDone();
1001 }
1002
1003 void DXVAVideoDecodeAccelerator::RequestPictureBuffers(int width, int height) {
1004   // This task could execute after the decoder has been torn down.
1005   if (state_ != kUninitialized && client_) {
1006     client_->ProvidePictureBuffers(
1007         kNumPictureBuffers,
1008         gfx::Size(width, height),
1009         GL_TEXTURE_2D);
1010   }
1011 }
1012
1013 void DXVAVideoDecodeAccelerator::NotifyPictureReady(
1014     const media::Picture& picture) {
1015   // This task could execute after the decoder has been torn down.
1016   if (state_ != kUninitialized && client_)
1017     client_->PictureReady(picture);
1018 }
1019
1020 void DXVAVideoDecodeAccelerator::NotifyInputBuffersDropped() {
1021   if (!client_ || !pending_output_samples_.empty())
1022     return;
1023
1024   for (PendingInputs::iterator it = pending_input_buffers_.begin();
1025        it != pending_input_buffers_.end(); ++it) {
1026     LONGLONG input_buffer_id = 0;
1027     RETURN_ON_HR_FAILURE((*it)->GetSampleTime(&input_buffer_id),
1028                          "Failed to get buffer id associated with sample",);
1029     client_->NotifyEndOfBitstreamBuffer(input_buffer_id);
1030   }
1031   pending_input_buffers_.clear();
1032 }
1033
1034 void DXVAVideoDecodeAccelerator::DecodePendingInputBuffers() {
1035   RETURN_AND_NOTIFY_ON_FAILURE((state_ != kUninitialized),
1036       "Invalid state: " << state_, ILLEGAL_STATE,);
1037
1038   if (pending_input_buffers_.empty() || !pending_output_samples_.empty())
1039     return;
1040
1041   PendingInputs pending_input_buffers_copy;
1042   std::swap(pending_input_buffers_, pending_input_buffers_copy);
1043
1044   for (PendingInputs::iterator it = pending_input_buffers_copy.begin();
1045        it != pending_input_buffers_copy.end(); ++it) {
1046     DecodeInternal(*it);
1047   }
1048 }
1049
1050 void DXVAVideoDecodeAccelerator::FlushInternal() {
1051   // The DoDecode function sets the state to kStopped when the decoder returns
1052   // MF_E_TRANSFORM_NEED_MORE_INPUT.
1053   // The MFT decoder can buffer upto 30 frames worth of input before returning
1054   // an output frame. This loop here attempts to retrieve as many output frames
1055   // as possible from the buffered set.
1056   while (state_ != kStopped) {
1057     DoDecode();
1058     if (!pending_output_samples_.empty())
1059       return;
1060   }
1061
1062   base::MessageLoop::current()->PostTask(
1063       FROM_HERE,
1064       base::Bind(&DXVAVideoDecodeAccelerator::NotifyFlushDone,
1065                  weak_this_factory_.GetWeakPtr()));
1066
1067   state_ = kNormal;
1068 }
1069
1070 void DXVAVideoDecodeAccelerator::DecodeInternal(
1071     const base::win::ScopedComPtr<IMFSample>& sample) {
1072   DCHECK(CalledOnValidThread());
1073   
1074   if (state_ == kUninitialized)
1075     return;
1076
1077   if (!pending_output_samples_.empty() || !pending_input_buffers_.empty()) {
1078     pending_input_buffers_.push_back(sample);
1079     return;
1080   }
1081
1082   if (!inputs_before_decode_) {
1083     TRACE_EVENT_BEGIN_ETW("DXVAVideoDecodeAccelerator.Decoding", this, "");
1084   }
1085   inputs_before_decode_++;
1086
1087   HRESULT hr = decoder_->ProcessInput(0, sample, 0);
1088   // As per msdn if the decoder returns MF_E_NOTACCEPTING then it means that it
1089   // has enough data to produce one or more output samples. In this case the
1090   // recommended options are to
1091   // 1. Generate new output by calling IMFTransform::ProcessOutput until it
1092   //    returns MF_E_TRANSFORM_NEED_MORE_INPUT.
1093   // 2. Flush the input data
1094   // We implement the first option, i.e to retrieve the output sample and then
1095   // process the input again. Failure in either of these steps is treated as a
1096   // decoder failure.
1097   if (hr == MF_E_NOTACCEPTING) {
1098     DoDecode();
1099     RETURN_AND_NOTIFY_ON_FAILURE((state_ == kStopped || state_ == kNormal),
1100         "Failed to process output. Unexpected decoder state: " << state_,
1101         PLATFORM_FAILURE,);
1102     hr = decoder_->ProcessInput(0, sample, 0);
1103     // If we continue to get the MF_E_NOTACCEPTING error we do the following:-
1104     // 1. Add the input sample to the pending queue.
1105     // 2. If we don't have any output samples we post the
1106     //    DecodePendingInputBuffers task to process the pending input samples.
1107     //    If we have an output sample then the above task is posted when the
1108     //    output samples are sent to the client.
1109     // This is because we only support 1 pending output sample at any
1110     // given time due to the limitation with the Microsoft media foundation
1111     // decoder where it recycles the output Decoder surfaces.
1112     if (hr == MF_E_NOTACCEPTING) {
1113       pending_input_buffers_.push_back(sample);
1114       if (pending_output_samples_.empty()) {
1115         base::MessageLoop::current()->PostTask(
1116             FROM_HERE,
1117             base::Bind(&DXVAVideoDecodeAccelerator::DecodePendingInputBuffers,
1118                        weak_this_factory_.GetWeakPtr()));
1119       }
1120       return;
1121     }
1122   }
1123   RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, "Failed to process input sample",
1124       PLATFORM_FAILURE,);
1125
1126   DoDecode();
1127
1128   RETURN_AND_NOTIFY_ON_FAILURE((state_ == kStopped || state_ == kNormal),
1129       "Failed to process output. Unexpected decoder state: " << state_,
1130       ILLEGAL_STATE,);
1131
1132   LONGLONG input_buffer_id = 0;
1133   RETURN_ON_HR_FAILURE(sample->GetSampleTime(&input_buffer_id),
1134                        "Failed to get input buffer id associated with sample",);
1135   // The Microsoft Media foundation decoder internally buffers up to 30 frames
1136   // before returning a decoded frame. We need to inform the client that this
1137   // input buffer is processed as it may stop sending us further input.
1138   // Note: This may break clients which expect every input buffer to be
1139   // associated with a decoded output buffer.
1140   // TODO(ananta)
1141   // Do some more investigation into whether it is possible to get the MFT
1142   // decoder to emit an output packet for every input packet.
1143   // http://code.google.com/p/chromium/issues/detail?id=108121
1144   // http://code.google.com/p/chromium/issues/detail?id=150925
1145   base::MessageLoop::current()->PostTask(
1146       FROM_HERE,
1147       base::Bind(&DXVAVideoDecodeAccelerator::NotifyInputBufferRead,
1148                  weak_this_factory_.GetWeakPtr(),
1149                  input_buffer_id));
1150 }
1151
1152 void DXVAVideoDecodeAccelerator::HandleResolutionChanged(int width,
1153                                                          int height) {
1154   base::MessageLoop::current()->PostTask(
1155       FROM_HERE,
1156       base::Bind(&DXVAVideoDecodeAccelerator::DismissStaleBuffers,
1157                  weak_this_factory_.GetWeakPtr(),
1158                  output_picture_buffers_));
1159
1160   base::MessageLoop::current()->PostTask(
1161       FROM_HERE,
1162       base::Bind(&DXVAVideoDecodeAccelerator::RequestPictureBuffers,
1163                  weak_this_factory_.GetWeakPtr(),
1164                  width,
1165                  height));
1166
1167   output_picture_buffers_.clear();
1168 }
1169
1170 void DXVAVideoDecodeAccelerator::DismissStaleBuffers(
1171     const OutputBuffers& picture_buffers) {
1172   OutputBuffers::const_iterator index;
1173
1174   for (index = picture_buffers.begin();
1175        index != picture_buffers.end();
1176        ++index) {
1177     DVLOG(1) << "Dismissing picture id: " << index->second->id();
1178     client_->DismissPictureBuffer(index->second->id());
1179   }
1180 }
1181
1182 }  // namespace content