2 * Copyright (c) 2020 Samsung Electronics Co., Ltd All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
21 #include "common/logger.h"
27 const PlatformEnum<ml_nnfw_hw_e> HWTypeEnum{
28 {"ANY", ML_NNFW_HW_ANY},
29 {"AUTO", ML_NNFW_HW_AUTO},
30 {"CPU", ML_NNFW_HW_CPU},
31 {"CPU_NEON", ML_NNFW_HW_CPU_NEON},
32 {"CPU_SIMD", ML_NNFW_HW_CPU_SIMD},
33 {"GPU", ML_NNFW_HW_GPU},
34 {"NPU", ML_NNFW_HW_NPU},
35 {"NPU_EDGE_TPU", ML_NNFW_HW_NPU_EDGE_TPU},
36 {"NPU_MOVIDIUS", ML_NNFW_HW_NPU_MOVIDIUS},
37 {"NPU_SLSI", ML_NNFW_HW_NPU_SLSI},
38 {"NPU_SR", ML_NNFW_HW_NPU_SR},
39 {"NPU_VIVANTE", ML_NNFW_HW_NPU_VIVANTE}};
41 const PlatformEnum<ml_nnfw_type_e> NNFWTypeEnum{
42 {"ANY", ML_NNFW_TYPE_ANY},
43 {"ARM_NN", ML_NNFW_TYPE_ARMNN},
44 {"CUSTOM_FILTER", ML_NNFW_TYPE_CUSTOM_FILTER},
45 {"EDGE_TPU", ML_NNFW_TYPE_EDGE_TPU},
46 {"MVNC", ML_NNFW_TYPE_MVNC},
47 {"NNFW", ML_NNFW_TYPE_NNFW},
48 {"NNTR_INF", ML_NNFW_TYPE_NNTR_INF},
49 {"OPEN_VINO", ML_NNFW_TYPE_OPENVINO},
50 {"PYTORCH", ML_NNFW_TYPE_PYTORCH},
51 {"SNAP", ML_NNFW_TYPE_SNAP},
52 {"SNPE", ML_NNFW_TYPE_SNPE},
53 {"TRIX_ENGINE", ML_NNFW_TYPE_TRIX_ENGINE},
54 {"TENSORFLOW", ML_NNFW_TYPE_TENSORFLOW},
55 {"TENSORFLOW_LITE", ML_NNFW_TYPE_TENSORFLOW_LITE},
56 {"VD_AIFW", ML_NNFW_TYPE_VD_AIFW},
57 {"VIVANTE", ML_NNFW_TYPE_VIVANTE}};
59 const PlatformEnum<ml_tensor_type_e> TensorTypeEnum{
60 {"INT8", ML_TENSOR_TYPE_INT8}, {"UINT8", ML_TENSOR_TYPE_UINT8},
61 {"INT16", ML_TENSOR_TYPE_INT16}, {"UINT16", ML_TENSOR_TYPE_UINT16},
62 {"FLOAT32", ML_TENSOR_TYPE_FLOAT32}, {"INT32", ML_TENSOR_TYPE_INT32},
63 {"UINT32", ML_TENSOR_TYPE_UINT32}, {"FLOAT64", ML_TENSOR_TYPE_FLOAT64},
64 {"INT64", ML_TENSOR_TYPE_INT64}, {"UINT64", ML_TENSOR_TYPE_UINT64},
65 {"UNKNOWN", ML_TENSOR_TYPE_UNKNOWN}};
67 const PlatformEnum<ml_train_optimizer_type_e> OptimizerTypeEnum{
68 {"OPTIMIZER_ADAM", ML_TRAIN_OPTIMIZER_TYPE_ADAM},
69 {"OPTIMIZER_SGD", ML_TRAIN_OPTIMIZER_TYPE_SGD},
70 {"OPTIMIZER_UNKNOWN", ML_TRAIN_OPTIMIZER_TYPE_UNKNOWN}};
72 const PlatformEnum<ml_train_layer_type_e> LayerTypeEnum{
73 {"LAYER_IN", ML_TRAIN_LAYER_TYPE_INPUT},
74 {"LAYER_FC", ML_TRAIN_LAYER_TYPE_FC},
75 {"LAYER_BN", ML_TRAIN_LAYER_TYPE_BN},
76 {"LAYER_CONV2D", ML_TRAIN_LAYER_TYPE_CONV2D},
77 {"LAYER_POOLING2D", ML_TRAIN_LAYER_TYPE_POOLING2D},
78 {"LAYER_FLATTEN", ML_TRAIN_LAYER_TYPE_FLATTEN},
79 {"LAYER_ACTIVATION", ML_TRAIN_LAYER_TYPE_ACTIVATION},
80 {"LAYER_ADDITION", ML_TRAIN_LAYER_TYPE_ADDITION},
81 {"LAYER_CONCAT", ML_TRAIN_LAYER_TYPE_CONCAT},
82 {"LAYER_MULTIOUT", ML_TRAIN_LAYER_TYPE_MULTIOUT},
83 {"LAYER_EMBEDDING", ML_TRAIN_LAYER_TYPE_EMBEDDING},
84 {"LAYER_RNN", ML_TRAIN_LAYER_TYPE_RNN},
85 {"LAYER_LOSS_MSE", ML_TRAIN_LAYER_TYPE_LOSS_MSE},
86 {"LAYER_LOSS_CROSS_ENTROPY_SIGMOID",
87 ML_TRAIN_LAYER_TYPE_LOSS_CROSS_ENTROPY_SIGMOID},
88 {"LAYER_LOSS_CROSS_ENTROPY_SOFTMAX",
89 ML_TRAIN_LAYER_TYPE_LOSS_CROSS_ENTROPY_SOFTMAX},
90 {"LAYER_BACKBONE_NNSTREAMER", ML_TRAIN_LAYER_TYPE_BACKBONE_NNSTREAMER},
91 {"LAYER_UNKNOWN", ML_TRAIN_LAYER_TYPE_UNKNOWN}};
93 const PlatformEnum<ml_train_summary_type_e> SummaryTypeEnum{
94 {"SUMMARY_MODEL", ML_TRAIN_SUMMARY_MODEL},
95 {"SUMMARY_LAYER", ML_TRAIN_SUMMARY_LAYER},
96 {"SUMMARY_TENSOR", ML_TRAIN_SUMMARY_TENSOR}};
102 PlatformResult ToPlatformResult(int ml_error_code,
103 const std::string& error_message_beginning) {
104 ScopeLogger("ml_error_code: [%d] (%s)", ml_error_code,
105 get_error_message(ml_error_code));
107 switch (ml_error_code) {
109 return PlatformResult{};
110 case ML_ERROR_INVALID_PARAMETER:
111 return PlatformResult{ErrorCode::INVALID_VALUES_ERR,
112 error_message_beginning + ": invalid parameter"};
113 case ML_ERROR_PERMISSION_DENIED:
114 return PlatformResult{ErrorCode::SECURITY_ERR,
115 error_message_beginning + ": permission denied"};
116 case ML_ERROR_TRY_AGAIN:
117 return PlatformResult{ErrorCode::INVALID_STATE_ERR,
118 error_message_beginning + ": invalid state"};
119 case ML_ERROR_TIMED_OUT:
120 return PlatformResult{ErrorCode::TIMEOUT_ERR,
121 error_message_beginning + ": timeout"};
122 case ML_ERROR_NOT_SUPPORTED:
123 return PlatformResult{ErrorCode::NOT_SUPPORTED_ERR,
124 error_message_beginning + ": not supported"};
125 case ML_ERROR_STREAMS_PIPE:
126 case ML_ERROR_UNKNOWN:
127 case ML_ERROR_OUT_OF_MEMORY:
129 return PlatformResult{
130 ErrorCode::ABORT_ERR,
131 error_message_beginning + ": an unknown error occurred"};
135 bool CheckNNFWAvailability(const std::string& nnfw, const std::string& hw,
136 optional<std::string> customRequirement) {
138 ml_nnfw_type_e nnfw_e = ML_NNFW_TYPE_ANY;
139 ml_nnfw_hw_e hw_e = ML_NNFW_HW_ANY;
141 PlatformResult result = types::NNFWTypeEnum.getValue(nnfw, &nnfw_e);
143 LoggerE("NNFWTypeEnum.getValue() failed, error: %s",
144 result.message().c_str());
147 result = types::HWTypeEnum.getValue(hw, &hw_e);
149 LoggerE("HWTypeEnum.getValue() failed, error: %s",
150 result.message().c_str());
153 const char* customRequirementPtr =
154 customRequirement ? customRequirement->c_str() : nullptr;
155 bool available = false;
156 int ret = ml_check_nnfw_availability_full(nnfw_e, hw_e, customRequirementPtr,
159 if (ML_ERROR_NONE != ret) {
160 LoggerE("ml_check_nnfw_availability_full failed: %d (%s)", ret,
161 get_error_message(ret));
165 LoggerD("ml_check_nnfw_availability_full: %s", available ? "true" : "false");
169 PlatformResult GetDimensionsFromJsonArray(
170 const picojson::array& dim, unsigned int dimensions[ML_TENSOR_RANK_LIMIT]) {
172 bool foundValidValue = false;
173 unsigned int validDimensions[ML_TENSOR_RANK_LIMIT];
174 for (int i = 0; i < ML_TENSOR_RANK_LIMIT; i++) {
175 validDimensions[i] = 1;
177 int dimSize = ML_TENSOR_RANK_LIMIT;
178 if (dim.size() <= ML_TENSOR_RANK_LIMIT) {
179 dimSize = dim.size();
181 LoggerD("Provided dimensions array is bigger than supported");
184 for (int i = dimSize - 1; i >= 0; i--) {
186 if (!d.is<double>()) {
187 LoggerE("dimensions array contains an invalid value: %s",
188 d.serialize().c_str());
189 return PlatformResult(ErrorCode::INVALID_VALUES_ERR,
190 "dimensions array contains an invalid value");
193 int v = static_cast<int>(d.get<double>());
195 // dimensions with zeros at the end are valid
196 // 0 after valid value is not accepted
197 if (foundValidValue || (v < 0)) {
198 LoggerE("dimensions array contains non-positive value: %d", v);
199 return PlatformResult(ErrorCode::INVALID_VALUES_ERR,
200 "dimensions array contains non-positive value");
205 foundValidValue = true;
206 validDimensions[i] = static_cast<unsigned int>(v);
209 if (!foundValidValue) {
210 LoggerE("No valid values found in dimensions array");
211 return PlatformResult(ErrorCode::INVALID_VALUES_ERR,
212 "dimensions array contains invalid values");
215 for (int i = 0; i < ML_TENSOR_RANK_LIMIT; i++) {
216 dimensions[i] = validDimensions[i];
218 return PlatformResult(ErrorCode::NO_ERROR);
221 PlatformResult GetLocationFromJsonArray(
222 const picojson::array& array, unsigned int location[ML_TENSOR_RANK_LIMIT]) {
223 if (array.size() > ML_TENSOR_RANK_LIMIT) {
224 LoggerD("Provided size array is bigger than supported");
227 for (const auto& a : array) {
229 if (a.is<double>()) {
230 num = a.get<double>();
233 LoggerE("location array contains negative value: %s",
234 a.serialize().c_str());
235 return PlatformResult(ErrorCode::INVALID_VALUES_ERR,
236 "location array contains negative value");
238 location[i] = static_cast<unsigned int>(num);
240 if (i == ML_TENSOR_RANK_LIMIT) {
244 return PlatformResult(ErrorCode::NO_ERROR);
247 PlatformResult GetSizeFromJsonArray(
248 const picojson::array& array, unsigned int location[ML_TENSOR_RANK_LIMIT],
249 unsigned int dimensions[ML_TENSOR_RANK_LIMIT],
250 unsigned int size[ML_TENSOR_RANK_LIMIT]) {
251 if (array.size() > ML_TENSOR_RANK_LIMIT) {
252 LoggerD("Provided size array is bigger than supported");
255 for (const auto& a : array) {
257 if (a.is<double>()) {
258 num = a.get<double>();
261 LoggerE("size array contains zero value: %s", a.serialize().c_str());
262 return PlatformResult(ErrorCode::INVALID_VALUES_ERR,
263 "size array contains zero value");
264 } else if (num > 0) {
265 size[i] = static_cast<unsigned int>(num);
267 // in case of negative value, size becomes size from location to end of
269 size[i] = dimensions[i] - location[i];
272 if (i == ML_TENSOR_RANK_LIMIT) {
276 for (; i < ML_TENSOR_RANK_LIMIT; i++) {
277 size[i] = dimensions[i] - location[i];
279 return PlatformResult(ErrorCode::NO_ERROR);
284 } // namespace extension