#include "base/at_exit.h"
#include "base/bind.h"
#include "base/command_line.h"
-#include "base/file_util.h"
+#include "base/files/file_util.h"
#include "base/files/memory_mapped_file.h"
#include "base/memory/scoped_vector.h"
#include "base/numerics/safe_conversions.h"
-#include "base/process/process.h"
+#include "base/process/process_handle.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_split.h"
#include "base/time/time.h"
-#include "content/common/gpu/media/v4l2_video_encode_accelerator.h"
#include "content/common/gpu/media/video_accelerator_unittest_helpers.h"
#include "media/base/bind_to_current_loop.h"
#include "media/base/bitstream_buffer.h"
#include "media/video/video_encode_accelerator.h"
#include "testing/gtest/include/gtest/gtest.h"
+#if defined(USE_X11)
+#include "ui/gfx/x/x11_types.h"
+#endif
+
+#if defined(OS_CHROMEOS) && defined(ARCH_CPU_ARMEL)
+#include "content/common/gpu/media/v4l2_video_encode_accelerator.h"
+#elif defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY) && defined(USE_X11)
+#include "content/common/gpu/media/vaapi_video_encode_accelerator.h"
+#else
+#error The VideoEncodeAcceleratorUnittest is not supported on this platform.
+#endif
+
using media::VideoEncodeAccelerator;
namespace content {
const double kBitrateTolerance = 0.1;
// Minimum required FPS throughput for the basic performance test.
const uint32 kMinPerfFPS = 30;
+// Minimum (arbitrary) number of frames required to enforce bitrate requirements
+// over. Streams shorter than this may be too short to realistically require
+// an encoder to be able to converge to the requested bitrate over.
+// The input stream will be looped as many times as needed in bitrate tests
+// to reach at least this number of frames before calculating final bitrate.
+const unsigned int kMinFramesForBitrateTests = 300;
// The syntax of multiple test streams is:
// test-stream1;test-stream2;test-stream3
// - |requested_subsequent_framerate| framerate to switch to in the middle
// of the stream.
// Bitrate is only forced for tests that test bitrate.
-const char* g_default_in_filename = "sync_192p20_frames.yuv";
+const char* g_default_in_filename = "bear_320x192_40frames.yuv";
const char* g_default_in_parameters = ":320:192:1:out.h264:200000";
-base::FilePath::StringType* g_test_stream_data;
+// Environment to store test stream data for all test cases.
+class VideoEncodeAcceleratorTestEnvironment;
+VideoEncodeAcceleratorTestEnvironment* g_env;
struct TestStream {
TestStream()
- : requested_bitrate(0),
+ : num_frames(0),
+ aligned_buffer_size(0),
+ requested_bitrate(0),
requested_framerate(0),
requested_subsequent_bitrate(0),
requested_subsequent_framerate(0) {}
~TestStream() {}
- gfx::Size size;
- base::MemoryMappedFile input_file;
- media::VideoCodecProfile requested_profile;
+ gfx::Size visible_size;
+ gfx::Size coded_size;
+ unsigned int num_frames;
+
+ // Original unaligned input file name provided as an argument to the test.
+ // And the file must be an I420 (YUV planar) raw stream.
+ std::string in_filename;
+
+ // A temporary file used to prepare aligned input buffers of |in_filename|.
+ // The file makes sure starting address of YUV planes are 64 byte-aligned.
+ base::FilePath aligned_in_file;
+
+ // The memory mapping of |aligned_in_file|
+ base::MemoryMappedFile mapped_aligned_in_file;
+
+ // Byte size of a frame of |aligned_in_file|.
+ size_t aligned_buffer_size;
+
+ // Byte size for each aligned plane of a frame
+ std::vector<size_t> aligned_plane_size;
+
std::string out_filename;
+ media::VideoCodecProfile requested_profile;
unsigned int requested_bitrate;
unsigned int requested_framerate;
unsigned int requested_subsequent_bitrate;
unsigned int requested_subsequent_framerate;
};
+inline static size_t Align64Bytes(size_t value) {
+ return (value + 63) & ~63;
+}
+
+// Write |data| of |size| bytes at |offset| bytes into |file|.
+static bool WriteFile(base::File* file,
+ const off_t offset,
+ const uint8* data,
+ size_t size) {
+ size_t written_bytes = 0;
+ while (written_bytes < size) {
+ int bytes = file->Write(offset + written_bytes,
+ reinterpret_cast<const char*>(data + written_bytes),
+ size - written_bytes);
+ if (bytes <= 0)
+ return false;
+ written_bytes += bytes;
+ }
+ return true;
+}
+
+// ARM performs CPU cache management with CPU cache line granularity. We thus
+// need to ensure our buffers are CPU cache line-aligned (64 byte-aligned).
+// Otherwise newer kernels will refuse to accept them, and on older kernels
+// we'll be treating ourselves to random corruption.
+// Since we are just mapping and passing chunks of the input file directly to
+// the VEA as input frames to avoid copying large chunks of raw data on each
+// frame and thus affecting performance measurements, we have to prepare a
+// temporary file with all planes aligned to 64-byte boundaries beforehand.
+static void CreateAlignedInputStreamFile(const gfx::Size& coded_size,
+ TestStream* test_stream) {
+ // Test case may have many encoders and memory should be prepared once.
+ if (test_stream->coded_size == coded_size &&
+ test_stream->mapped_aligned_in_file.IsValid())
+ return;
+
+ // All encoders in multiple encoder test reuse the same test_stream, make
+ // sure they requested the same coded_size
+ ASSERT_TRUE(!test_stream->mapped_aligned_in_file.IsValid() ||
+ coded_size == test_stream->coded_size);
+ test_stream->coded_size = coded_size;
+
+ size_t num_planes = media::VideoFrame::NumPlanes(kInputFormat);
+ std::vector<size_t> padding_sizes(num_planes);
+ std::vector<size_t> coded_bpl(num_planes);
+ std::vector<size_t> visible_bpl(num_planes);
+ std::vector<size_t> visible_plane_rows(num_planes);
+
+ // Calculate padding in bytes to be added after each plane required to keep
+ // starting addresses of all planes at a 64 byte boudnary. This padding will
+ // be added after each plane when copying to the temporary file.
+ // At the same time we also need to take into account coded_size requested by
+ // the VEA; each row of visible_bpl bytes in the original file needs to be
+ // copied into a row of coded_bpl bytes in the aligned file.
+ for (size_t i = 0; i < num_planes; i++) {
+ size_t size =
+ media::VideoFrame::PlaneAllocationSize(kInputFormat, i, coded_size);
+ test_stream->aligned_plane_size.push_back(Align64Bytes(size));
+ test_stream->aligned_buffer_size += test_stream->aligned_plane_size.back();
+
+ coded_bpl[i] =
+ media::VideoFrame::RowBytes(i, kInputFormat, coded_size.width());
+ visible_bpl[i] = media::VideoFrame::RowBytes(
+ i, kInputFormat, test_stream->visible_size.width());
+ visible_plane_rows[i] = media::VideoFrame::Rows(
+ i, kInputFormat, test_stream->visible_size.height());
+ size_t padding_rows =
+ media::VideoFrame::Rows(i, kInputFormat, coded_size.height()) -
+ visible_plane_rows[i];
+ padding_sizes[i] = padding_rows * coded_bpl[i] + Align64Bytes(size) - size;
+ }
+
+ base::MemoryMappedFile src_file;
+ CHECK(src_file.Initialize(base::FilePath(test_stream->in_filename)));
+ CHECK(base::CreateTemporaryFile(&test_stream->aligned_in_file));
+
+ size_t visible_buffer_size = media::VideoFrame::AllocationSize(
+ kInputFormat, test_stream->visible_size);
+ CHECK_EQ(src_file.length() % visible_buffer_size, 0U)
+ << "Stream byte size is not a product of calculated frame byte size";
+
+ test_stream->num_frames = src_file.length() / visible_buffer_size;
+ uint32 flags = base::File::FLAG_CREATE_ALWAYS | base::File::FLAG_WRITE |
+ base::File::FLAG_READ;
+
+ // Create a temporary file with coded_size length.
+ base::File dest_file(test_stream->aligned_in_file, flags);
+ CHECK_GT(test_stream->aligned_buffer_size, 0UL);
+ dest_file.SetLength(test_stream->aligned_buffer_size *
+ test_stream->num_frames);
+
+ const uint8* src = src_file.data();
+ off_t dest_offset = 0;
+ for (size_t frame = 0; frame < test_stream->num_frames; frame++) {
+ for (size_t i = 0; i < num_planes; i++) {
+ // Assert that each plane of frame starts at 64 byte boundary.
+ ASSERT_EQ(dest_offset & 63, 0)
+ << "Planes of frame should be mapped at a 64 byte boundary";
+ for (size_t j = 0; j < visible_plane_rows[i]; j++) {
+ CHECK(WriteFile(&dest_file, dest_offset, src, visible_bpl[i]));
+ src += visible_bpl[i];
+ dest_offset += coded_bpl[i];
+ }
+ dest_offset += padding_sizes[i];
+ }
+ }
+ CHECK(test_stream->mapped_aligned_in_file.Initialize(dest_file.Pass()));
+ // Assert that memory mapped of file starts at 64 byte boundary. So each
+ // plane of frames also start at 64 byte boundary.
+ ASSERT_EQ(
+ reinterpret_cast<off_t>(test_stream->mapped_aligned_in_file.data()) & 63,
+ 0)
+ << "File should be mapped at a 64 byte boundary";
+
+ CHECK_EQ(test_stream->mapped_aligned_in_file.length() %
+ test_stream->aligned_buffer_size,
+ 0U)
+ << "Stream byte size is not a product of calculated frame byte size";
+ CHECK_GT(test_stream->num_frames, 0UL);
+ CHECK_LE(test_stream->num_frames, kMaxFrameNum);
+}
+
// Parse |data| into its constituent parts, set the various output fields
// accordingly, read in video stream, and store them to |test_streams|.
static void ParseAndReadTestStreamData(const base::FilePath::StringType& data,
CHECK_LE(fields.size(), 9U) << data;
TestStream* test_stream = new TestStream();
- base::FilePath::StringType filename = fields[0];
+ test_stream->in_filename = fields[0];
int width, height;
CHECK(base::StringToInt(fields[1], &width));
CHECK(base::StringToInt(fields[2], &height));
- test_stream->size = gfx::Size(width, height);
- CHECK(!test_stream->size.IsEmpty());
+ test_stream->visible_size = gfx::Size(width, height);
+ CHECK(!test_stream->visible_size.IsEmpty());
int profile;
CHECK(base::StringToInt(fields[3], &profile));
CHECK_GT(profile, media::VIDEO_CODEC_PROFILE_UNKNOWN);
CHECK(base::StringToUint(fields[8],
&test_stream->requested_subsequent_framerate));
}
-
- CHECK(test_stream->input_file.Initialize(base::FilePath(filename)));
test_streams->push_back(test_stream);
}
}
-// Set default parameters of |test_streams| and update the parameters according
-// to |mid_stream_bitrate_switch| and |mid_stream_framerate_switch|.
-static void UpdateTestStreamData(bool mid_stream_bitrate_switch,
- bool mid_stream_framerate_switch,
- ScopedVector<TestStream>* test_streams) {
- for (size_t i = 0; i < test_streams->size(); i++) {
- TestStream* test_stream = (*test_streams)[i];
- // Use defaults for bitrate/framerate if they are not provided.
- if (test_stream->requested_bitrate == 0)
- test_stream->requested_bitrate = kDefaultBitrate;
-
- if (test_stream->requested_framerate == 0)
- test_stream->requested_framerate = kDefaultFramerate;
-
- // If bitrate/framerate switch is requested, use the subsequent values if
- // provided, or, if not, calculate them from their initial values using
- // the default ratios.
- // Otherwise, if a switch is not requested, keep the initial values.
- if (mid_stream_bitrate_switch) {
- if (test_stream->requested_subsequent_bitrate == 0) {
- test_stream->requested_subsequent_bitrate =
- test_stream->requested_bitrate * kDefaultSubsequentBitrateRatio;
- }
- } else {
- test_stream->requested_subsequent_bitrate =
- test_stream->requested_bitrate;
- }
- if (test_stream->requested_subsequent_bitrate == 0)
- test_stream->requested_subsequent_bitrate = 1;
-
- if (mid_stream_framerate_switch) {
- if (test_stream->requested_subsequent_framerate == 0) {
- test_stream->requested_subsequent_framerate =
- test_stream->requested_framerate * kDefaultSubsequentFramerateRatio;
- }
- } else {
- test_stream->requested_subsequent_framerate =
- test_stream->requested_framerate;
- }
- if (test_stream->requested_subsequent_framerate == 0)
- test_stream->requested_subsequent_framerate = 1;
- }
-}
-
enum ClientState {
CS_CREATED,
CS_ENCODER_SET,
CS_INITIALIZED,
CS_ENCODING,
- CS_FINISHING,
CS_FINISHED,
CS_ERROR,
};
seen_pps_(false),
seen_idr_(false) {}
- void ProcessStreamBuffer(const uint8* stream, size_t size) OVERRIDE;
+ virtual void ProcessStreamBuffer(const uint8* stream, size_t size) override;
private:
// Set to true when encoder provides us with the corresponding NALU type.
bool seen_sps_;
bool seen_pps_;
bool seen_idr_;
+
+ media::H264Parser h264_parser_;
};
void H264Validator::ProcessStreamBuffer(const uint8* stream, size_t size) {
- media::H264Parser h264_parser;
- h264_parser.SetStream(stream, size);
+ h264_parser_.SetStream(stream, size);
while (1) {
media::H264NALU nalu;
media::H264Parser::Result result;
- result = h264_parser.AdvanceToNextNALU(&nalu);
+ result = h264_parser_.AdvanceToNextNALU(&nalu);
if (result == media::H264Parser::kEOStream)
break;
- ASSERT_EQ(result, media::H264Parser::kOk);
+ ASSERT_EQ(media::H264Parser::kOk, result);
bool keyframe = false;
case media::H264NALU::kIDRSlice:
ASSERT_TRUE(seen_sps_);
ASSERT_TRUE(seen_pps_);
- seen_idr_ = keyframe = true;
+ seen_idr_ = true;
+ keyframe = true;
// fallthrough
- case media::H264NALU::kNonIDRSlice:
+ case media::H264NALU::kNonIDRSlice: {
ASSERT_TRUE(seen_idr_);
if (!frame_cb_.Run(keyframe))
return;
break;
+ }
- case media::H264NALU::kSPS:
+ case media::H264NALU::kSPS: {
+ int sps_id;
+ ASSERT_EQ(media::H264Parser::kOk, h264_parser_.ParseSPS(&sps_id));
seen_sps_ = true;
break;
+ }
- case media::H264NALU::kPPS:
+ case media::H264NALU::kPPS: {
ASSERT_TRUE(seen_sps_);
+ int pps_id;
+ ASSERT_EQ(media::H264Parser::kOk, h264_parser_.ParsePPS(&pps_id));
seen_pps_ = true;
break;
+ }
default:
break;
: StreamValidator(frame_cb),
seen_keyframe_(false) {}
- void ProcessStreamBuffer(const uint8* stream, size_t size) OVERRIDE;
+ virtual void ProcessStreamBuffer(const uint8* stream, size_t size) override;
private:
// Have we already got a keyframe in the stream?
class VEAClient : public VideoEncodeAccelerator::Client {
public:
- VEAClient(const TestStream& test_stream,
+ VEAClient(TestStream* test_stream,
ClientStateNotification<ClientState>* note,
bool save_to_file,
unsigned int keyframe_period,
bool force_bitrate,
- bool test_perf);
+ bool test_perf,
+ bool mid_stream_bitrate_switch,
+ bool mid_stream_framerate_switch);
virtual ~VEAClient();
void CreateEncoder();
void DestroyEncoder();
double frames_per_second();
// VideoDecodeAccelerator::Client implementation.
- void RequireBitstreamBuffers(unsigned int input_count,
- const gfx::Size& input_coded_size,
- size_t output_buffer_size) OVERRIDE;
- void BitstreamBufferReady(int32 bitstream_buffer_id,
- size_t payload_size,
- bool key_frame) OVERRIDE;
- void NotifyError(VideoEncodeAccelerator::Error error) OVERRIDE;
+ virtual void RequireBitstreamBuffers(unsigned int input_count,
+ const gfx::Size& input_coded_size,
+ size_t output_buffer_size) override;
+ virtual void BitstreamBufferReady(int32 bitstream_buffer_id,
+ size_t payload_size,
+ bool key_frame) override;
+ virtual void NotifyError(VideoEncodeAccelerator::Error error) override;
private:
bool has_encoder() { return encoder_.get(); }
// Provide the encoder with a new output buffer.
void FeedEncoderWithOutput(base::SharedMemory* shm);
- // Feed the encoder with num_required_input_buffers_ of black frames to force
- // it to encode and return all inputs that came before this, effectively
- // flushing it.
- void FlushEncoder();
-
// Called on finding a complete frame (with |keyframe| set to true for
// keyframes) in the stream, to perform codec-independent, per-frame checks
// and accounting. Returns false once we have collected all frames we needed.
// the input stream, ready to be sent to encoder.
scoped_refptr<media::VideoFrame> PrepareInputFrame(off_t position);
+ // Update the parameters according to |mid_stream_bitrate_switch| and
+ // |mid_stream_framerate_switch|.
+ void UpdateTestStreamData(bool mid_stream_bitrate_switch,
+ bool mid_stream_framerate_switch);
+
ClientState state_;
scoped_ptr<VideoEncodeAccelerator> encoder_;
- const TestStream& test_stream_;
+ TestStream* test_stream_;
// Used to notify another thread about the state. VEAClient does not own this.
ClientStateNotification<ClientState>* note_;
// Current offset into input stream.
off_t pos_in_input_stream_;
- // Calculated from input_coded_size_, in bytes.
- size_t input_buffer_size_;
gfx::Size input_coded_size_;
// Requested by encoder.
unsigned int num_required_input_buffers_;
size_t output_buffer_size_;
- // Precalculated number of frames in the stream.
- unsigned int num_frames_in_stream_;
+ // Number of frames to encode. This may differ from the number of frames in
+ // stream if we need more frames for bitrate tests.
+ unsigned int num_frames_to_encode_;
// Number of encoded frames we've got from the encoder thus far.
unsigned int num_encoded_frames_;
scoped_ptr<StreamValidator> validator_;
- // The time when the encoder has initialized.
- base::TimeTicks encoder_initialized_time_;
+ // The time when the encoding started.
+ base::TimeTicks encode_start_time_;
// The time when the last encoded frame is ready.
base::TimeTicks last_frame_ready_time_;
// All methods of this class should be run on the same thread.
base::ThreadChecker thread_checker_;
+
+ // Requested bitrate in bits per second.
+ unsigned int requested_bitrate_;
+
+ // Requested initial framerate.
+ unsigned int requested_framerate_;
+
+ // Bitrate to switch to in the middle of the stream.
+ unsigned int requested_subsequent_bitrate_;
+
+ // Framerate to switch to in the middle of the stream.
+ unsigned int requested_subsequent_framerate_;
};
-VEAClient::VEAClient(const TestStream& test_stream,
+VEAClient::VEAClient(TestStream* test_stream,
ClientStateNotification<ClientState>* note,
bool save_to_file,
unsigned int keyframe_period,
bool force_bitrate,
- bool test_perf)
+ bool test_perf,
+ bool mid_stream_bitrate_switch,
+ bool mid_stream_framerate_switch)
: state_(CS_CREATED),
test_stream_(test_stream),
note_(note),
next_input_id_(1),
next_output_buffer_id_(0),
pos_in_input_stream_(0),
- input_buffer_size_(0),
num_required_input_buffers_(0),
output_buffer_size_(0),
- num_frames_in_stream_(0),
+ num_frames_to_encode_(0),
num_encoded_frames_(0),
num_frames_since_last_check_(0),
seen_keyframe_in_this_buffer_(false),
current_requested_bitrate_(0),
current_framerate_(0),
encoded_stream_size_since_last_check_(0),
- test_perf_(test_perf) {
+ test_perf_(test_perf),
+ requested_bitrate_(0),
+ requested_framerate_(0),
+ requested_subsequent_bitrate_(0),
+ requested_subsequent_framerate_(0) {
if (keyframe_period_)
CHECK_LT(kMaxKeyframeDelay, keyframe_period_);
validator_ = StreamValidator::Create(
- test_stream_.requested_profile,
+ test_stream_->requested_profile,
base::Bind(&VEAClient::HandleEncodedFrame, base::Unretained(this)));
CHECK(validator_.get());
if (save_to_file_) {
- CHECK(!test_stream_.out_filename.empty());
- base::FilePath out_filename(test_stream_.out_filename);
+ CHECK(!test_stream_->out_filename.empty());
+ base::FilePath out_filename(test_stream_->out_filename);
// This creates or truncates out_filename.
// Without it, AppendToFile() will not work.
EXPECT_EQ(0, base::WriteFile(out_filename, NULL, 0));
}
+ // Initialize the parameters of the test streams.
+ UpdateTestStreamData(mid_stream_bitrate_switch, mid_stream_framerate_switch);
+
thread_checker_.DetachFromThread();
}
DCHECK(thread_checker_.CalledOnValidThread());
CHECK(!has_encoder());
+#if defined(OS_CHROMEOS) && defined(ARCH_CPU_ARMEL)
scoped_ptr<V4L2Device> device = V4L2Device::Create(V4L2Device::kEncoder);
encoder_.reset(new V4L2VideoEncodeAccelerator(device.Pass()));
+#elif defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY) && defined(USE_X11)
+ encoder_.reset(new VaapiVideoEncodeAccelerator(gfx::GetXDisplay()));
+#endif
+
SetState(CS_ENCODER_SET);
- DVLOG(1) << "Profile: " << test_stream_.requested_profile
- << ", initial bitrate: " << test_stream_.requested_bitrate;
+ DVLOG(1) << "Profile: " << test_stream_->requested_profile
+ << ", initial bitrate: " << requested_bitrate_;
if (!encoder_->Initialize(kInputFormat,
- test_stream_.size,
- test_stream_.requested_profile,
- test_stream_.requested_bitrate,
+ test_stream_->visible_size,
+ test_stream_->requested_profile,
+ requested_bitrate_,
this)) {
- DLOG(ERROR) << "VideoEncodeAccelerator::Initialize() failed";
+ LOG(ERROR) << "VideoEncodeAccelerator::Initialize() failed";
SetState(CS_ERROR);
return;
}
- SetStreamParameters(test_stream_.requested_bitrate,
- test_stream_.requested_framerate);
+ SetStreamParameters(requested_bitrate_, requested_framerate_);
SetState(CS_INITIALIZED);
- encoder_initialized_time_ = base::TimeTicks::Now();
}
void VEAClient::DestroyEncoder() {
DCHECK(thread_checker_.CalledOnValidThread());
if (!has_encoder())
return;
- encoder_.release()->Destroy();
+ encoder_.reset();
+}
+
+void VEAClient::UpdateTestStreamData(bool mid_stream_bitrate_switch,
+ bool mid_stream_framerate_switch) {
+ // Use defaults for bitrate/framerate if they are not provided.
+ if (test_stream_->requested_bitrate == 0)
+ requested_bitrate_ = kDefaultBitrate;
+ else
+ requested_bitrate_ = test_stream_->requested_bitrate;
+
+ if (test_stream_->requested_framerate == 0)
+ requested_framerate_ = kDefaultFramerate;
+ else
+ requested_framerate_ = test_stream_->requested_framerate;
+
+ // If bitrate/framerate switch is requested, use the subsequent values if
+ // provided, or, if not, calculate them from their initial values using
+ // the default ratios.
+ // Otherwise, if a switch is not requested, keep the initial values.
+ if (mid_stream_bitrate_switch) {
+ if (test_stream_->requested_subsequent_bitrate == 0)
+ requested_subsequent_bitrate_ =
+ requested_bitrate_ * kDefaultSubsequentBitrateRatio;
+ else
+ requested_subsequent_bitrate_ =
+ test_stream_->requested_subsequent_bitrate;
+ } else {
+ requested_subsequent_bitrate_ = requested_bitrate_;
+ }
+ if (requested_subsequent_bitrate_ == 0)
+ requested_subsequent_bitrate_ = 1;
+
+ if (mid_stream_framerate_switch) {
+ if (test_stream_->requested_subsequent_framerate == 0)
+ requested_subsequent_framerate_ =
+ requested_framerate_ * kDefaultSubsequentFramerateRatio;
+ else
+ requested_subsequent_framerate_ =
+ test_stream_->requested_subsequent_framerate;
+ } else {
+ requested_subsequent_framerate_ = requested_framerate_;
+ }
+ if (requested_subsequent_framerate_ == 0)
+ requested_subsequent_framerate_ = 1;
}
double VEAClient::frames_per_second() {
- base::TimeDelta duration = last_frame_ready_time_ - encoder_initialized_time_;
+ base::TimeDelta duration = last_frame_ready_time_ - encode_start_time_;
return num_encoded_frames_ / duration.InSecondsF();
}
ASSERT_EQ(state_, CS_INITIALIZED);
SetState(CS_ENCODING);
- // TODO(posciak): For now we only support input streams that meet encoder
- // size requirements exactly (i.e. coded size == visible size).
- input_coded_size_ = input_coded_size;
- ASSERT_EQ(input_coded_size_, test_stream_.size);
-
- input_buffer_size_ = media::VideoFrame::AllocationSize(kInputFormat,
- input_coded_size_);
- CHECK_GT(input_buffer_size_, 0UL);
-
- // ARM performs CPU cache management with CPU cache line granularity. We thus
- // need to ensure our buffers are CPU cache line-aligned (64 byte-aligned).
- // Otherwise newer kernels will refuse to accept them, and on older kernels
- // we'll be treating ourselves to random corruption.
- // Since we are just mmapping and passing chunks of the input file, to ensure
- // alignment, if the starting virtual addresses of the frames in it were not
- // 64 byte-aligned, we'd have to use a separate set of input buffers and copy
- // the frames into them before sending to the encoder. It would have been an
- // overkill here though, because, for now at least, we only test resolutions
- // that result in proper alignment, and it would have also interfered with
- // performance testing. So just assert that the frame size is a multiple of
- // 64 bytes. This ensures all frames start at 64-byte boundary, because
- // MemoryMappedFile should be mmapp()ed at virtual page start as well.
- ASSERT_EQ(input_buffer_size_ & 63, 0)
- << "Frame size has to be a multiple of 64 bytes";
- ASSERT_EQ(reinterpret_cast<off_t>(test_stream_.input_file.data()) & 63, 0)
- << "Mapped file should be mapped at a 64 byte boundary";
+ CreateAlignedInputStreamFile(input_coded_size, test_stream_);
+ // We may need to loop over the stream more than once if more frames than
+ // provided is required for bitrate tests.
+ if (force_bitrate_ && test_stream_->num_frames < kMinFramesForBitrateTests) {
+ DVLOG(1) << "Stream too short for bitrate test ("
+ << test_stream_->num_frames << " frames), will loop it to reach "
+ << kMinFramesForBitrateTests << " frames";
+ num_frames_to_encode_ = kMinFramesForBitrateTests;
+ } else {
+ num_frames_to_encode_ = test_stream_->num_frames;
+ }
+
+ input_coded_size_ = input_coded_size;
num_required_input_buffers_ = input_count;
ASSERT_GT(num_required_input_buffers_, 0UL);
- num_frames_in_stream_ = test_stream_.input_file.length() / input_buffer_size_;
- CHECK_GT(num_frames_in_stream_, 0UL);
- CHECK_LE(num_frames_in_stream_, kMaxFrameNum);
- CHECK_EQ(num_frames_in_stream_ * input_buffer_size_,
- test_stream_.input_file.length());
-
output_buffer_size_ = output_size;
ASSERT_GT(output_buffer_size_, 0UL);
FeedEncoderWithOutput(shm);
}
+ encode_start_time_ = base::TimeTicks::Now();
FeedEncoderWithInputs();
}
if (save_to_file_) {
int size = base::checked_cast<int>(payload_size);
- EXPECT_EQ(base::AppendToFile(
- base::FilePath::FromUTF8Unsafe(test_stream_.out_filename),
- static_cast<char*>(shm->memory()),
- size),
- size);
+ EXPECT_TRUE(base::AppendToFile(
+ base::FilePath::FromUTF8Unsafe(test_stream_->out_filename),
+ static_cast<char*>(shm->memory()),
+ size));
}
FeedEncoderWithOutput(shm);
}
scoped_refptr<media::VideoFrame> VEAClient::PrepareInputFrame(off_t position) {
- CHECK_LE(position + input_buffer_size_, test_stream_.input_file.length());
+ CHECK_LE(position + test_stream_->aligned_buffer_size,
+ test_stream_->mapped_aligned_in_file.length());
- uint8* frame_data =
- const_cast<uint8*>(test_stream_.input_file.data() + position);
+ uint8* frame_data_y = const_cast<uint8*>(
+ test_stream_->mapped_aligned_in_file.data() + position);
+ uint8* frame_data_u = frame_data_y + test_stream_->aligned_plane_size[0];
+ uint8* frame_data_v = frame_data_u + test_stream_->aligned_plane_size[1];
- CHECK_GT(current_framerate_, 0);
+ CHECK_GT(current_framerate_, 0U);
scoped_refptr<media::VideoFrame> frame =
media::VideoFrame::WrapExternalYuvData(
kInputFormat,
input_coded_size_,
- gfx::Rect(test_stream_.size),
- test_stream_.size,
+ gfx::Rect(test_stream_->visible_size),
+ test_stream_->visible_size,
input_coded_size_.width(),
input_coded_size_.width() / 2,
input_coded_size_.width() / 2,
- frame_data,
- frame_data + input_coded_size_.GetArea(),
- frame_data + (input_coded_size_.GetArea() * 5 / 4),
+ frame_data_y,
+ frame_data_u,
+ frame_data_v,
base::TimeDelta().FromMilliseconds(
next_input_id_ * base::Time::kMillisecondsPerSecond /
current_framerate_),
while (inputs_at_client_.size() <
num_required_input_buffers_ + kNumExtraInputFrames) {
- size_t bytes_left = test_stream_.input_file.length() - pos_in_input_stream_;
- if (bytes_left < input_buffer_size_) {
+ size_t bytes_left =
+ test_stream_->mapped_aligned_in_file.length() - pos_in_input_stream_;
+ if (bytes_left < test_stream_->aligned_buffer_size) {
DCHECK_EQ(bytes_left, 0UL);
- FlushEncoder();
- return;
+ // Rewind if at the end of stream and we are still encoding.
+ // This is to flush the encoder with additional frames from the beginning
+ // of the stream, or if the stream is shorter that the number of frames
+ // we require for bitrate tests.
+ pos_in_input_stream_ = 0;
+ continue;
}
bool force_keyframe = false;
scoped_refptr<media::VideoFrame> video_frame =
PrepareInputFrame(pos_in_input_stream_);
- pos_in_input_stream_ += input_buffer_size_;
+ pos_in_input_stream_ += test_stream_->aligned_buffer_size;
encoder_->Encode(video_frame, force_keyframe);
}
if (!has_encoder())
return;
- if (state_ != CS_ENCODING && state_ != CS_FINISHING)
+ if (state_ != CS_ENCODING)
return;
base::SharedMemoryHandle dup_handle;
- CHECK(shm->ShareToProcess(base::Process::Current().handle(), &dup_handle));
+ CHECK(shm->ShareToProcess(base::GetCurrentProcessHandle(), &dup_handle));
media::BitstreamBuffer bitstream_buffer(
next_output_buffer_id_++, dup_handle, output_buffer_size_);
encoder_->UseOutputBitstreamBuffer(bitstream_buffer);
}
-void VEAClient::FlushEncoder() {
- ASSERT_EQ(state_, CS_ENCODING);
- SetState(CS_FINISHING);
-
- // Feed the encoder with an additional set of num_required_input_buffers_
- // to flush it, using the first frame in the input stream. The resulting
- // encoded frames will be ignored.
- for (unsigned int i = 0; i < num_required_input_buffers_; ++i) {
- scoped_refptr<media::VideoFrame> frame = PrepareInputFrame(0);
- encoder_->Encode(frame, false);
- }
-}
-
bool VEAClient::HandleEncodedFrame(bool keyframe) {
// This would be a bug in the test, which should not ignore false
// return value from this method.
- CHECK_LE(num_encoded_frames_, num_frames_in_stream_);
+ CHECK_LE(num_encoded_frames_, num_frames_to_encode_);
++num_encoded_frames_;
++num_frames_since_last_check_;
// it, comes back encoded.
EXPECT_LE(num_encoded_frames_, keyframe_requested_at_ + kMaxKeyframeDelay);
- if (num_encoded_frames_ == num_frames_in_stream_ / 2) {
+ if (num_encoded_frames_ == num_frames_to_encode_ / 2) {
VerifyStreamProperties();
- if (test_stream_.requested_subsequent_bitrate !=
- current_requested_bitrate_ ||
- test_stream_.requested_subsequent_framerate != current_framerate_) {
- SetStreamParameters(test_stream_.requested_subsequent_bitrate,
- test_stream_.requested_subsequent_framerate);
+ if (requested_subsequent_bitrate_ != current_requested_bitrate_ ||
+ requested_subsequent_framerate_ != current_framerate_) {
+ SetStreamParameters(requested_subsequent_bitrate_,
+ requested_subsequent_framerate_);
}
- } else if (num_encoded_frames_ == num_frames_in_stream_) {
+ } else if (num_encoded_frames_ == num_frames_to_encode_) {
VerifyPerf();
VerifyStreamProperties();
SetState(CS_FINISHED);
}
}
+// Setup test stream data and delete temporary aligned files at the beginning
+// and end of unittest. We only need to setup once for all test cases.
+class VideoEncodeAcceleratorTestEnvironment : public ::testing::Environment {
+ public:
+ VideoEncodeAcceleratorTestEnvironment(
+ scoped_ptr<base::FilePath::StringType> data) {
+ test_stream_data_ = data.Pass();
+ }
+
+ virtual void SetUp() {
+ ParseAndReadTestStreamData(*test_stream_data_, &test_streams_);
+ }
+
+ virtual void TearDown() {
+ for (size_t i = 0; i < test_streams_.size(); i++) {
+ base::DeleteFile(test_streams_[i]->aligned_in_file, false);
+ }
+ }
+
+ ScopedVector<TestStream> test_streams_;
+
+ private:
+ scoped_ptr<base::FilePath::StringType> test_stream_data_;
+};
+
// Test parameters:
// - Number of concurrent encoders.
// - If true, save output to file (provided an output filename was supplied).
Tuple7<int, bool, int, bool, bool, bool, bool> > {};
TEST_P(VideoEncodeAcceleratorTest, TestSimpleEncode) {
- const int num_concurrent_encoders = GetParam().a;
+ const size_t num_concurrent_encoders = GetParam().a;
const bool save_to_file = GetParam().b;
const unsigned int keyframe_period = GetParam().c;
const bool force_bitrate = GetParam().d;
const bool mid_stream_bitrate_switch = GetParam().f;
const bool mid_stream_framerate_switch = GetParam().g;
- // Initialize the test streams.
- ScopedVector<TestStream> test_streams;
- ParseAndReadTestStreamData(*g_test_stream_data, &test_streams);
- UpdateTestStreamData(
- mid_stream_bitrate_switch, mid_stream_framerate_switch, &test_streams);
-
ScopedVector<ClientStateNotification<ClientState> > notes;
- // The clients can only be deleted after the encoder threads are stopped.
ScopedVector<VEAClient> clients;
- ScopedVector<base::Thread> encoder_threads;
+ base::Thread encoder_thread("EncoderThread");
+ ASSERT_TRUE(encoder_thread.Start());
- // Create all the encoders.
- for (int i = 0; i < num_concurrent_encoders; i++) {
- int test_stream_index = i % test_streams.size();
+ // Create all encoders.
+ for (size_t i = 0; i < num_concurrent_encoders; i++) {
+ size_t test_stream_index = i % g_env->test_streams_.size();
// Disregard save_to_file if we didn't get an output filename.
bool encoder_save_to_file =
(save_to_file &&
- !test_streams[test_stream_index]->out_filename.empty());
+ !g_env->test_streams_[test_stream_index]->out_filename.empty());
notes.push_back(new ClientStateNotification<ClientState>());
- clients.push_back(new VEAClient(*test_streams[test_stream_index],
+ clients.push_back(new VEAClient(g_env->test_streams_[test_stream_index],
notes.back(),
encoder_save_to_file,
keyframe_period,
force_bitrate,
- test_perf));
-
- // Initialize the encoder thread.
- char thread_name[32];
- sprintf(thread_name, "EncoderThread%d", i);
- base::Thread* encoder_thread = new base::Thread(thread_name);
- encoder_thread->Start();
- encoder_thread->message_loop()->PostTask(
+ test_perf,
+ mid_stream_bitrate_switch,
+ mid_stream_framerate_switch));
+
+ encoder_thread.message_loop()->PostTask(
FROM_HERE,
base::Bind(&VEAClient::CreateEncoder,
base::Unretained(clients.back())));
- encoder_threads.push_back(encoder_thread);
}
- // Wait all the encoders to finish.
- for (int i = 0; i < num_concurrent_encoders; i++) {
- ASSERT_EQ(notes[i]->Wait(), CS_ENCODER_SET);
- ASSERT_EQ(notes[i]->Wait(), CS_INITIALIZED);
- ASSERT_EQ(notes[i]->Wait(), CS_ENCODING);
- ASSERT_EQ(notes[i]->Wait(), CS_FINISHING);
- ASSERT_EQ(notes[i]->Wait(), CS_FINISHED);
- encoder_threads[i]->message_loop()->PostTask(
+ // All encoders must pass through states in this order.
+ enum ClientState state_transitions[] = {CS_ENCODER_SET, CS_INITIALIZED,
+ CS_ENCODING, CS_FINISHED};
+
+ // Wait for all encoders to go through all states and finish.
+ // Do this by waiting for all encoders to advance to state n before checking
+ // state n+1, to verify that they are able to operate concurrently.
+ // It also simulates the real-world usage better, as the main thread, on which
+ // encoders are created/destroyed, is a single GPU Process ChildThread.
+ // Moreover, we can't have proper multithreading on X11, so this could cause
+ // hard to debug issues there, if there were multiple "ChildThreads".
+ for (size_t state_no = 0; state_no < arraysize(state_transitions); ++state_no)
+ for (size_t i = 0; i < num_concurrent_encoders; i++)
+ ASSERT_EQ(notes[i]->Wait(), state_transitions[state_no]);
+
+ for (size_t i = 0; i < num_concurrent_encoders; ++i) {
+ encoder_thread.message_loop()->PostTask(
FROM_HERE,
base::Bind(&VEAClient::DestroyEncoder, base::Unretained(clients[i])));
- encoder_threads[i]->Stop();
}
+
+ // This ensures all tasks have finished.
+ encoder_thread.Stop();
}
INSTANTIATE_TEST_CASE_P(
::testing::Values(MakeTuple(1, false, 0, true, false, false, true)));
INSTANTIATE_TEST_CASE_P(
- MidStreamParamSwitchBitrateAndFPS,
- VideoEncodeAcceleratorTest,
- ::testing::Values(MakeTuple(1, false, 0, true, false, true, true)));
-
-INSTANTIATE_TEST_CASE_P(
MultipleEncoders,
VideoEncodeAcceleratorTest,
::testing::Values(MakeTuple(3, false, 0, false, false, false, false),
- MakeTuple(3, false, 0, true, false, true, true)));
+ MakeTuple(3, false, 0, true, false, false, true),
+ MakeTuple(3, false, 0, true, false, true, false)));
// TODO(posciak): more tests:
// - async FeedEncoderWithOutput
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv); // Removes gtest-specific args.
- CommandLine::Init(argc, argv);
+ base::CommandLine::Init(argc, argv);
base::ShadowingAtExitManager at_exit_manager;
scoped_ptr<base::FilePath::StringType> test_stream_data(
new base::FilePath::StringType(
media::GetTestDataFilePath(content::g_default_in_filename).value() +
content::g_default_in_parameters));
- content::g_test_stream_data = test_stream_data.get();
// Needed to enable DVLOG through --vmodule.
logging::LoggingSettings settings;
settings.logging_dest = logging::LOG_TO_SYSTEM_DEBUG_LOG;
CHECK(logging::InitLogging(settings));
- CommandLine* cmd_line = CommandLine::ForCurrentProcess();
+ const base::CommandLine* cmd_line = base::CommandLine::ForCurrentProcess();
DCHECK(cmd_line);
- CommandLine::SwitchMap switches = cmd_line->GetSwitches();
- for (CommandLine::SwitchMap::const_iterator it = switches.begin();
+ base::CommandLine::SwitchMap switches = cmd_line->GetSwitches();
+ for (base::CommandLine::SwitchMap::const_iterator it = switches.begin();
it != switches.end();
++it) {
if (it->first == "test_stream_data") {
LOG(FATAL) << "Unexpected switch: " << it->first << ":" << it->second;
}
+ content::g_env =
+ reinterpret_cast<content::VideoEncodeAcceleratorTestEnvironment*>(
+ testing::AddGlobalTestEnvironment(
+ new content::VideoEncodeAcceleratorTestEnvironment(
+ test_stream_data.Pass())));
+
return RUN_ALL_TESTS();
}