Publishing 2019 R1 content
[platform/upstream/dldt.git] / inference-engine / src / inference_engine / blob_factory.cpp
1 // Copyright (C) 2018-2019 Intel Corporation
2 // SPDX-License-Identifier: Apache-2.0
3 //
4
5 #include <memory>
6
7 #include "blob_factory.hpp"
8
9
10 InferenceEngine::Blob::Ptr make_blob_with_precision(const InferenceEngine::TensorDesc& desc) {
11     return make_blob_with_precision(desc.getPrecision(), desc);
12 }
13
14 InferenceEngine::Blob::Ptr make_blob_with_precision(const InferenceEngine::TensorDesc& desc, void *ptr) {
15     return make_blob_with_precision(desc.getPrecision(), desc, ptr);
16 }
17
18
19 InferenceEngine::Blob::Ptr make_blob_with_precision(const InferenceEngine::TensorDesc& desc, const std::shared_ptr<InferenceEngine::IAllocator>& alloc) {
20     return make_blob_with_precision(desc.getPrecision(), desc, alloc);
21 }
22
23 InferenceEngine::Layout plain_layout(InferenceEngine::SizeVector dims) {
24     int n = dims.size();
25     return n == 1 ? InferenceEngine::C    :
26            n == 2 ? InferenceEngine::NC   :
27            n == 3 ? InferenceEngine::CHW  :
28            n == 4 ? InferenceEngine::NCHW :
29                     InferenceEngine::ANY;
30 }
31
32 InferenceEngine::Blob::Ptr make_plain_blob(InferenceEngine::Precision prec, const InferenceEngine::SizeVector dims) {
33     return make_blob_with_precision({prec, dims, plain_layout(dims)});
34 }
35
36 InferenceEngine::Blob::Ptr CreateBlobFromData(const InferenceEngine::DataPtr &data) {
37     // TODO Here some decision should be made about the layout.
38     // For now we just pass the layout and use conversion to NCHW for ANY.
39     InferenceEngine::Layout targetLayout = data->getLayout();
40     if (data->getLayout() == InferenceEngine::Layout::ANY) {
41         targetLayout = InferenceEngine::Layout::NCHW;
42     }
43
44     switch (data->getPrecision()) {
45         case InferenceEngine::Precision::FP32:
46             return std::make_shared<InferenceEngine::TBlob<float>>(data->getPrecision(), targetLayout, data->getDims());
47         case InferenceEngine::Precision::Q78:
48         case InferenceEngine::Precision::I16:
49         case InferenceEngine::Precision::FP16:
50             return std::make_shared<InferenceEngine::TBlob<short>>(data->getPrecision(), targetLayout, data->getDims());
51         case InferenceEngine::Precision::U8:
52             return std::make_shared<InferenceEngine::TBlob<uint8_t>>(data->getPrecision(), targetLayout, data->getDims());
53         case InferenceEngine::Precision::I8:
54             return std::make_shared<InferenceEngine::TBlob<int8_t>>(data->getPrecision(), targetLayout, data->getDims());
55         case InferenceEngine::Precision::I32:
56             return std::make_shared<InferenceEngine::TBlob<int32_t>>(data->getPrecision(), targetLayout, data->getDims());
57         default:
58             THROW_IE_EXCEPTION << "precision is no set";
59     }
60 }