1 // Copyright (C) 2018-2019 Intel Corporation
2 // SPDX-License-Identifier: Apache-2.0
6 #include <gtest/gtest.h>
10 #include "mock_allocator.hpp"
12 #include <cpp/ie_cnn_net_reader.h>
13 #include <gmock/gmock-spec-builders.h>
18 #define UNUSED __attribute__((unused))
21 using namespace ::testing;
23 using namespace InferenceEngine;
25 class BlobTests: public ::testing::Test {
27 virtual void TearDown() {
30 virtual void SetUp() {
33 shared_ptr<MockAllocator> createMockAllocator() {
34 return shared_ptr<MockAllocator>(new MockAllocator());
43 chrono::high_resolution_clock::time_point t0;
44 function<void(int)> cb;
46 ScopedTimer(function<void(int)> callback)
47 : t0(chrono::high_resolution_clock::now())
53 auto t1 = chrono::high_resolution_clock::now();
54 auto milli = chrono::duration_cast<chrono::microseconds>(t1-t0).count();
60 TEST_F(BlobTests, canCreateBlobUsingDefaultAllocator)
62 SizeVector v = {1,2,3};
63 auto allocator = createMockAllocator();
65 EXPECT_CALL(*allocator.get(), alloc(1 * 2 * 3 * sizeof(float))).WillRepeatedly(Return((void*)1));
66 EXPECT_CALL(*allocator.get(), free(_)).Times(1);
69 TBlob<float> blob(Precision::FP32, CHW, v, dynamic_pointer_cast<IAllocator>(allocator));
74 TEST_F(BlobTests, secondAllocateWontMemLeak) {
75 SizeVector v = {1,2,3};
76 auto allocator = createMockAllocator();
78 EXPECT_CALL(*allocator.get(), alloc(1 * 2 * 3 * sizeof(float))).Times(2).WillRepeatedly(Return((void*)1));
79 EXPECT_CALL(*allocator.get(), free(_)).Times(2).WillRepeatedly(Return(true));
82 TBlob<float> blob(Precision::FP32, CHW, v, dynamic_pointer_cast<IAllocator>(allocator));
89 TEST_F(BlobTests, doesNotUnlockIfLockFailed)
91 SizeVector v = {1,2,3};
92 auto allocator = createMockAllocator();
94 EXPECT_CALL(*allocator.get(), alloc(1 * 2 * 3 * sizeof(float))).WillRepeatedly(Return((void*)1));
95 EXPECT_CALL(*allocator.get(), lock((void*)1,LOCK_FOR_WRITE)).Times(1);
96 EXPECT_CALL(*allocator.get(), free(_)).Times(1);
98 TBlob<float> blob(Precision::FP32, CHW, v, dynamic_pointer_cast<IAllocator>(allocator));
101 float UNUSED *ptr = blob.data();
105 TEST_F(BlobTests, canAccessDataUsingAllocator)
107 SizeVector v = {1,2,3};
108 auto allocator = createMockAllocator();
110 float data[] = {5.f,6.f,7.f};
112 EXPECT_CALL(*allocator.get(), alloc(1 * 2 * 3 * sizeof(float))).WillRepeatedly(Return((void*)1));
113 EXPECT_CALL(*allocator.get(), lock((void*)1, LOCK_FOR_WRITE)).WillRepeatedly(Return(data));
114 EXPECT_CALL(*allocator.get(), unlock((void*)1)).Times(1);
115 EXPECT_CALL(*allocator.get(), free(_)).Times(1);
117 TBlob<float> blob(Precision::FP32, CHW, v, dynamic_pointer_cast<IAllocator>(allocator));
120 float *ptr = blob.data();
121 ASSERT_EQ(ptr[2] , 7);
127 TEST_F(BlobTests, canLockReadOnlyDataForRead)
129 SizeVector v = {1, 2, 3};
130 auto allocator = createMockAllocator();
132 float data[] = {5,6,7};
134 EXPECT_CALL(*allocator.get(), alloc(1 * 2 * 3 * sizeof(float))).WillRepeatedly(Return((void*)1));
135 EXPECT_CALL(*allocator.get(), lock(_,LOCK_FOR_READ)).WillRepeatedly(Return(data));
136 EXPECT_CALL(*allocator.get(), free(_)).Times(1);
137 EXPECT_CALL(*allocator.get(), unlock((void*)1)).Times(1);
139 TBlob<float> blob(Precision::FP32, CHW, v, dynamic_pointer_cast<IAllocator>(allocator));
142 const float *ptr = blob.readOnly();
143 ASSERT_EQ(ptr[2] , 7);
146 TEST_F(BlobTests, canAccessDataUsingBufferBaseMethod)
148 SizeVector v = {1, 2, 3};
149 auto allocator = createMockAllocator();
151 float data[] = {5,6,7};
153 EXPECT_CALL(*allocator.get(), alloc(1 * 2 * 3 * sizeof(float))).WillRepeatedly(Return((void*)1));
154 EXPECT_CALL(*allocator.get(), lock(_,LOCK_FOR_WRITE)).WillRepeatedly(Return(data));
155 EXPECT_CALL(*allocator.get(), unlock((void*)1)).Times(1);
156 EXPECT_CALL(*allocator.get(), free(_)).Times(1);
158 TBlob<float> blob(Precision::FP32, CHW, v, dynamic_pointer_cast<IAllocator>(allocator));
160 auto buffer = blob.buffer();
161 float *ptr = (float * )(void*)buffer;
162 ASSERT_EQ(ptr[2] , 7);
165 TEST_F(BlobTests, canMoveFromTBlobWithSameType)
167 SizeVector v = {1, 2, 3};
168 auto allocator = createMockAllocator();
170 uint8_t data[] = {5,6};
172 EXPECT_CALL(*allocator.get(), alloc(1 * 2 * 3 * sizeof(uint8_t))).WillRepeatedly(Return((void*)1));
173 EXPECT_CALL(*allocator.get(), lock(_,LOCK_FOR_WRITE)).WillRepeatedly(Return(data));
174 EXPECT_CALL(*allocator.get(), unlock((void*)1)).Times(1);
175 EXPECT_CALL(*allocator.get(), free(_)).Times(1);
177 TBlob<uint8_t > blob(Precision::U8, CHW, v, dynamic_pointer_cast<IAllocator>(allocator));
180 TBlob<uint8_t > newBlob(std::move(blob));
182 auto buffer = newBlob.buffer();
183 uint8_t *ptr = (uint8_t * )(void*)buffer;
184 ASSERT_EQ(ptr[0] , data[0]);
187 TEST_F(BlobTests, saveDimsAndSizeAfterMove)
189 SizeVector v = {1, 2, 3};
190 auto allocator = createMockAllocator();
192 TBlob<uint8_t > blob(Precision::U8, CHW, v, dynamic_pointer_cast<IAllocator>(allocator));
194 TBlob<uint8_t > newBlob(std::move(blob));
196 ASSERT_EQ(newBlob.size(), 1 * 2 * 3);
197 ASSERT_EQ(newBlob.dims()[0], 1);
198 ASSERT_EQ(newBlob.dims()[1], 2);
199 ASSERT_EQ(newBlob.dims()[2], 3);
203 TEST_F(BlobTests, canSetAfterFree)
205 SizeVector v = {1, 3};
206 TBlob<uint8_t> blob(Precision::U8, HW, v);
213 ASSERT_NO_THROW(blob.set({1,2,3}));
216 TEST_F(BlobTests, canSetAfterFreeNonAllocated)
218 SizeVector v = {1, 3};
219 TBlob<uint8_t> blob(Precision::U8, HW, v);
221 ASSERT_NO_THROW(blob.set({1,2,3}));
225 TEST_F(BlobTests, canCopyBlob)
227 SizeVector v = {1, 3};
228 TBlob<uint8_t> blob(Precision::U8, HW,v);
234 TBlob<uint8_t> blob2(blob);
236 ASSERT_EQ(blob2.dims().size(), blob.dims().size());
237 ASSERT_EQ(blob2.dims()[0], blob.dims()[0]);
238 ASSERT_EQ(blob2.dims()[1], blob.dims()[1]);
239 ASSERT_EQ(blob2.size(), blob.size());
240 ASSERT_EQ(blob2.data()[0], blob.data()[0]);
241 ASSERT_EQ(blob2.data()[1], blob.data()[1]);
242 ASSERT_EQ(blob2.data()[2], blob.data()[2]);
245 TEST_F(BlobTests, canCompareToNullPtrWithoutDereferencing) {
246 SizeVector v = {1, 2, 3};
247 auto allocator = createMockAllocator();
249 TBlob<uint8_t> blob(Precision::U8, CHW, v, dynamic_pointer_cast<IAllocator>(allocator));
251 ASSERT_TRUE(blob.readOnly() == nullptr);
252 ASSERT_TRUE(blob.data() == nullptr);
253 ASSERT_TRUE(blob.buffer() == nullptr);
255 ASSERT_TRUE(nullptr == blob.readOnly());
256 ASSERT_TRUE(nullptr == blob.data());
257 ASSERT_TRUE(nullptr == blob.buffer());
260 TEST_F(BlobTests, canCreateBlob) {
261 InferenceEngine::SizeVector size = { 1, 1, 1 };
262 InferenceEngine::TBlob<float> blob(Precision::FP32, CHW, size);
263 ASSERT_NE(blob.size(), 0);
264 ASSERT_EQ(blob.buffer(), nullptr);
267 TEST_F(BlobTests, canAllocateBlob) {
268 InferenceEngine::SizeVector size = { 1, 1, 1 };
269 InferenceEngine::TBlob<float> blob(Precision::FP32, CHW, size);
271 float* buffer = static_cast<float*>(blob.data());
272 ASSERT_NE(buffer, nullptr);
275 TEST_F(BlobTests, canDeallocateBlob) {
276 InferenceEngine::SizeVector size = { 1, 1, 1 };
277 InferenceEngine::TBlob<float> blob(Precision::FP32, CHW, size);
280 ASSERT_EQ(nullptr, blob.data().as<float*>());
283 TEST_F(BlobTests, canCreateBlobWithoutDims) {
284 InferenceEngine::TBlob<float> blob(Precision::FP32, NCHW);
285 ASSERT_EQ(blob.dims().size(), 0);
288 TEST_F(BlobTests, canSetToBlobWithoutDims) {
289 InferenceEngine::TBlob<float> blob(Precision::FP32, C);
290 std::vector<float> data = { 1.0f, 2.0f, 3.0f };
292 ASSERT_EQ(blob.byteSize(), data.size() * sizeof(float));
295 TEST_F(BlobTests, canReadDataFromConstBlob) {
296 InferenceEngine::TBlob<float> blob(Precision::FP32, CHW, { 1, 1, 1 });
298 InferenceEngine::TBlob<float> const blob2 = blob;
299 const float* buf = blob2.readOnly();
300 ASSERT_NE(buf, nullptr);
303 TEST_F(BlobTests, canMakeSharedBlob) {
304 InferenceEngine::SizeVector size = { 1, 1, 1 };
305 InferenceEngine::TBlob<float>::Ptr blob1 = InferenceEngine::make_shared_blob<float>(Precision::FP32, NCHW);
306 InferenceEngine::TBlob<float>::Ptr blob2 = InferenceEngine::make_shared_blob<float>(Precision::FP32, CHW, size);
307 InferenceEngine::TBlob<float>::Ptr blob3
308 = InferenceEngine::make_shared_blob<float, InferenceEngine::SizeVector >(Precision::FP32, C, { 0 });
309 ASSERT_EQ(blob1->size(), 0);
310 ASSERT_EQ(blob2->size(), 1);
311 ASSERT_EQ(blob3->size(), 0);
314 TEST_F(BlobTests, cannotCreateBlobWithIncorrectPrecision) {
315 InferenceEngine::TensorDesc desc(InferenceEngine::Precision::FP16, {1, 3, 227, 227}, Layout::NCHW);
316 ASSERT_THROW(InferenceEngine::make_shared_blob<float>(desc), InferenceEngine::details::InferenceEngineException);
319 TEST_F(BlobTests, canUseBlobInMoveSemantics) {
321 TBlob<float> b(Precision::FP32, C);
322 b.set({1.0f, 2.0f, 3.0f});
324 std::vector<float> dump;
326 for (const auto & e: b) {
330 ASSERT_EQ(dump.size(), 3);
332 ASSERT_EQ(dump[0], 1.0f);
333 ASSERT_EQ(dump[1], 2.0f);
334 ASSERT_EQ(dump[2], 3.0f);
338 TEST_F(BlobTests, DISABLED_canUseLockedMemoryAsRvalueReference) {
340 std::vector<float> dump;
341 for (auto e: *make_shared_blob(Precision::FP32, C, std::vector<float>({1.0f, 2.0f, 3.0f}))) {
345 ASSERT_EQ(dump.size(), 3);
347 ASSERT_EQ(dump[0], 1.0f);
348 ASSERT_EQ(dump[1], 2.0f);
349 ASSERT_EQ(dump[2], 3.0f);
352 TEST_F(BlobTests, canCreateBlobOnExistedMemory) {
354 float input[] = {0.1f, 0.2f, 0.3f};
356 auto b = make_shared_blob<float>(Precision::FP32, HW, {1, 2}, input);
358 ASSERT_NEAR(*i, 0.1, 0.00001);
360 ASSERT_NEAR(*i, 0.2, 0.00001);
362 ASSERT_EQ(i, b->end());
364 ASSERT_EQ(&*b->begin(), input);
368 TEST_F(BlobTests, preAllocatorWillnotWorkIfPtrNotAlocated) {
369 ASSERT_ANY_THROW(TBlob<float>(Precision::FP32, C, {1}, nullptr));
372 TEST_F(BlobTests, cannotIncreaseSizeOfPreallocated) {
374 float input[] = {0.1f, 0.2f, 0.3f};
375 auto b = make_shared_blob(Precision::FP32, HW, {1, 2}, input);
377 //since allocator isno't releasing, user have to be carefull that this still use old array
378 ASSERT_EQ(nullptr, b->buffer().as<float*>());
381 ASSERT_NE(nullptr, b->buffer().as<float*>());
384 ASSERT_NE(nullptr, b->buffer().as<float*>());
388 TEST_F(BlobTests, canAcceptpreallocatedSize) {
390 float input[] = {0.1f, 0.2f, 0.3f};
391 auto b = make_shared_blob(Precision::FP32, HW, {1, 2}, input, 100);
393 //since allocator isno't releasing, user have to be carefull that this still use old array
394 ASSERT_EQ(nullptr, b->buffer().as<float*>());
397 ASSERT_NE(nullptr, b->buffer().as<float*>());
400 TEST_F(BlobTests, ifBlobCannotReleaseItWillReuseOldMemory) {
402 SizeVector v = {1,2,3};
403 auto allocator = createMockAllocator();
405 EXPECT_CALL(*allocator.get(), alloc(1 * 2 * 3 * sizeof(float))).WillOnce(Return((void*)1));
406 EXPECT_CALL(*allocator.get(), alloc(1 * 2 * 4 * sizeof(float))).WillOnce(Return((void*)1));
407 EXPECT_CALL(*allocator.get(), free(_)).WillRepeatedly(Return(false));
410 TBlob<float> blob(Precision::FP32, CHW, v, dynamic_pointer_cast<IAllocator>(allocator));
412 blob.Resize({1,2,4});
416 TEST_F(BlobTests, ifBlobCannotReleaseItWillReuseOldMemoryOnlyIfAllocated) {
418 SizeVector v = {1,2,3};
419 auto allocator = createMockAllocator();
421 EXPECT_CALL(*allocator.get(), free(_)).WillRepeatedly(Return(false));
424 TBlob<float> blob(Precision::FP32, CHW, v, dynamic_pointer_cast<IAllocator>(allocator));
425 blob.Resize({1,2,4});
429 TEST_F(BlobTests, canModifyDataInRangedFor) {
431 SizeVector v = {1,2,3};
432 TBlob<int> blob(Precision::I32, CHW, v);
435 for (auto & data : blob) {
439 for(int i=0;i<v.size();i++) {
440 ASSERT_EQ(5, blob.data()[i]) << "Mismatch at" << i;
444 TEST_F(BlobTests, makeRoiBlobNchw) {
445 // we create main blob with NCHW layout. We will crop ROI from this blob.
446 SizeVector dims = {1, 3, 6, 5}; // RGB picture of size (WxH) = 5x6
447 Blob::Ptr blob = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, dims, NCHW));
450 // create ROI blob based on the already created blob
451 ROI roi = {0, 2, 1, 2, 4}; // cropped picture with: id = 0, (x,y) = (2,1), sizeX (W) = 2, sizeY (H) = 4
452 Blob::Ptr roiBlob = make_shared_blob(blob, roi);
454 // check that BlockingDesc is constructed properly for the ROI blob
455 SizeVector refDims = {1, 3, 4, 2};
456 SizeVector refOrder = {0, 1, 2, 3};
457 size_t refOffset = 7;
458 SizeVector refStrides = {90, 30, 5, 1};
459 ASSERT_EQ(roiBlob->getTensorDesc().getBlockingDesc().getBlockDims(), refDims);
460 ASSERT_EQ(roiBlob->getTensorDesc().getBlockingDesc().getOrder(), refOrder);
461 ASSERT_EQ(roiBlob->getTensorDesc().getBlockingDesc().getOffsetPadding(), refOffset);
462 ASSERT_EQ(roiBlob->getTensorDesc().getBlockingDesc().getStrides(), refStrides);
465 TEST_F(BlobTests, makeRoiBlobNhwc) {
466 // we create main blob with NHWC layout. We will crop ROI from this blob.
467 SizeVector dims = {1, 3, 4, 8}; // RGB picture of size (WxH) = 8x4
468 Blob::Ptr blob = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, dims, NHWC));
471 // create ROI blob based on the already created blob
472 ROI roi = {0, 3, 2, 5, 2}; // cropped picture with: id = 0, (x,y) = (3,2), sizeX (W) = 5, sizeY (H) = 2
473 Blob::Ptr roiBlob = make_shared_blob(blob, roi);
475 // check that BlockingDesc is constructed properly for the ROI blob
476 SizeVector refDims = {1, 2, 5, 3};
477 SizeVector refOrder = {0, 2, 3, 1};
478 size_t refOffset = 57;
479 SizeVector refStrides = {96, 24, 3, 1};
480 ASSERT_EQ(roiBlob->getTensorDesc().getBlockingDesc().getBlockDims(), refDims);
481 ASSERT_EQ(roiBlob->getTensorDesc().getBlockingDesc().getOrder(), refOrder);
482 ASSERT_EQ(roiBlob->getTensorDesc().getBlockingDesc().getOffsetPadding(), refOffset);
483 ASSERT_EQ(roiBlob->getTensorDesc().getBlockingDesc().getStrides(), refStrides);
486 TEST_F(BlobTests, makeRoiBlobWrongSize) {
487 // we create main blob with NCHW layout. We will crop ROI from this blob.
488 SizeVector dims = {1, 3, 4, 4}; // RGB picture of size (WxH) = 4x4
489 Blob::Ptr blob = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, dims, NCHW));
492 // try to create ROI blob with wrong size
493 ROI roi = {0, 1, 1, 4, 4}; // cropped picture with: id = 0, (x,y) = (1,1), sizeX (W) = 4, sizeY (H) = 4
494 ASSERT_THROW(make_shared_blob(blob, roi), InferenceEngine::details::InferenceEngineException);