2 * Copyright (c) 2021 Samsung Electronics Co., Ltd All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include "ml_tensors_data_manager.h"
18 #include "ml_tensors_info_manager.h"
20 using common::ErrorCode;
21 using common::PlatformResult;
26 TensorsData::TensorsData(ml_tensors_data_h handle, int id, TensorsInfo* tensors_info,
27 bool owns_native_handle, bool immutable)
30 tensors_info_(tensors_info),
31 owns_native_handle_(owns_native_handle),
32 immutable_(immutable) {
36 TensorsData::~TensorsData() {
37 ScopeLogger("id_: %d, owns_native_handle_: %s", id_, owns_native_handle_ ? "true" : "false");
38 if (owns_native_handle_) {
39 if (!this->NativeDestroy()) {
40 LoggerE("TensorsData NativeDestroy failed");
43 // TensorsDataManager releases tensors_info_
46 ml_tensors_data_h TensorsData::Handle() {
50 int TensorsData::Id() {
54 int TensorsData::TensorsInfoId() {
55 return this->tensors_info_->Id();
58 int TensorsData::Count() {
59 return tensors_info_->Count();
62 bool TensorsData::DisposableFromJS() {
63 return owns_native_handle_;
66 ml_tensor_type_e TensorsData::GetTensorType(int index) {
67 ScopeLogger("id_: %d, index: %d", id_, index);
68 ml_tensor_type_e tensor_type_enum = ML_TENSOR_TYPE_UNKNOWN;
69 PlatformResult result = tensors_info_->NativeGetTensorType(index, &tensor_type_enum);
71 LoggerE("Failed to get tensor type");
73 return tensor_type_enum;
76 PlatformResult TensorsData::GetTensorRawData(
77 int index, unsigned int location[util::kWebApiMLTensorRankLimit],
78 unsigned int size[util::kWebApiMLTensorRankLimit],
79 TensorRawData* tensor_raw_data) {
80 ScopeLogger("id_: %d, index: %d", id_, index);
82 if (nullptr == tensor_raw_data) {
83 LoggerE("Invalid tensor_raw_data");
84 return PlatformResult(ErrorCode::ABORT_ERR);
89 PlatformResult result = NativeGetTensorData(index, &void_data, &data_size);
93 uint8_t* data = static_cast<uint8_t*>(void_data);
95 // Dimensions of whole tensor
96 unsigned int dim[util::kWebApiMLTensorRankLimit];
97 // Dimensions of expected tensors relative to location coordiantes
98 unsigned int size_rel[util::kWebApiMLTensorRankLimit];
100 result = tensors_info_->NativeGetTensorDimensions(index, dim);
105 // Check if update is partial due to location change
106 bool partial = false;
107 for (int i = 0; i < util::kWebApiMLTensorRankLimit; i++) {
108 if (location[i] >= dim[i]) {
109 // Input data starts outside of current data
110 LoggerE("Requested data location is invalid on [%d]: %u", i, location[i]);
111 return PlatformResult{ErrorCode::INVALID_VALUES_ERR, "Requested data location is invalid"};
112 } else if (location[i] != 0) {
117 uint8_t bytes_per_element = tensors_info_->GetBytesPerElement(index);
118 size_t data_to_be_returned_size = bytes_per_element;
120 // Check if data will fit in TensorData and calculate dimensions
121 // of returned part, also check if update is partial due to size change
122 for (int i = 0; i < util::kWebApiMLTensorRankLimit; i++) {
123 size_rel[i] = location[i] + size[i];
124 if (size_rel[i] < dim[i]) {
127 size_rel[i] = dim[i];
129 data_to_be_returned_size *= (size_rel[i] - location[i]);
133 LoggerD("Partial get of tensor data");
134 // Allocate data, it will be freed on TensorRawData destruction
135 auto new_data = std::make_unique<uint8_t[]>(data_to_be_returned_size);
136 size_t position_in_new_data = 0;
138 size_t delta2 = dim[1] * dim[0] * bytes_per_element;
139 size_t delta1 = dim[0] * bytes_per_element;
141 size_t position = location[3] * dim[2] * delta2;
142 for (unsigned int i = location[3]; i < size_rel[3]; i++) {
143 position += (location[2]) * delta2;
144 for (unsigned int j = location[2]; j < size_rel[2]; j++) {
145 position += (location[1]) * delta1;
146 for (unsigned int k = location[1]; k < size_rel[1]; k++) {
147 position += location[0] * bytes_per_element;
148 size_t length = (size_rel[0] - location[0]) * bytes_per_element;
149 mempcpy(&new_data[position_in_new_data], &data[position], length);
150 position_in_new_data += length;
151 position += (dim[0] - location[0]) * bytes_per_element;
153 position += (dim[1] - size_rel[1]) * delta1;
155 position += (dim[2] - size_rel[2]) * delta2;
157 if (position_in_new_data != data_to_be_returned_size) {
158 LoggerE("Error while copying data, expected: %zu, got: %zu", data_to_be_returned_size,
159 position_in_new_data);
160 return PlatformResult{ErrorCode::ABORT_ERR, "Internal error while fetching the data"};
162 tensor_raw_data->data = new_data.release();
163 tensor_raw_data->size_in_bytes = position_in_new_data;
164 tensor_raw_data->SetOwnership(true);
166 tensor_raw_data->data = data;
167 tensor_raw_data->size_in_bytes = data_size;
170 for (int i = 0; i < util::kWebApiMLTensorRankLimit; i++) {
171 tensor_raw_data->shape[i] = size_rel[i] - location[i];
174 result = types::TensorTypeEnum.getName(this->GetTensorType(index), &tensor_raw_data->type_str);
179 return PlatformResult(ErrorCode::NO_ERROR);
182 PlatformResult TensorsData::SetTensorRawData(
183 int index, unsigned int location[util::kWebApiMLTensorRankLimit],
184 unsigned int size[util::kWebApiMLTensorRankLimit],
185 TensorRawData& tensor_raw_data) {
186 ScopeLogger("id_: %d, index: %d, tensor_raw_data.size_in_bytes: %zu, immutable_: %s", id_, index,
187 tensor_raw_data.size_in_bytes, immutable_ ? "true" : "false");
190 return PlatformResult(ErrorCode::NO_ERROR);
193 // Dimensions of whole tensor
194 unsigned int dim[util::kWebApiMLTensorRankLimit];
195 // Dimensions of updated tensors relative to location coordiantes
196 unsigned int size_rel[util::kWebApiMLTensorRankLimit];
198 PlatformResult result = tensors_info_->NativeGetTensorDimensions(index, dim);
202 uint8_t bytes_per_element = tensors_info_->GetBytesPerElement(index);
204 // Check if update is partial due to location change
205 bool partial = false;
206 for (int i = 0; i < util::kWebApiMLTensorRankLimit; i++) {
207 if (location[i] >= dim[i]) {
208 // Input data starts outside of current data
209 LoggerE("Input data location is invalid on [%d]: %u", i, location[i]);
210 return PlatformResult{ErrorCode::INVALID_VALUES_ERR, "Input data location is invalid"};
211 } else if (location[i] != 0) {
216 // Check if data will fit in TensorData and calculate dimensions
217 // of modified part, also check if update is partial due to size change
218 size_t data_to_be_updated_size = bytes_per_element;
219 for (int i = 0; i < util::kWebApiMLTensorRankLimit; i++) {
220 size_rel[i] = location[i] + size[i];
221 if (size_rel[i] > dim[i]) {
222 LoggerE("Input data will not fit in TensorData [%d]: %u > %u", i, size_rel[i], dim[i]);
223 return PlatformResult{ErrorCode::INVALID_VALUES_ERR, "Input data will not fit in TensorData"};
225 data_to_be_updated_size *= size[i];
226 if (size_rel[i] < dim[i]) {
231 // Check if provided TensorRawData is big enough
232 if (data_to_be_updated_size > tensor_raw_data.size_in_bytes) {
233 LoggerE("Input data is too small, expected: %zu, got: %zu", data_to_be_updated_size,
234 tensor_raw_data.size_in_bytes);
235 return PlatformResult{ErrorCode::INVALID_VALUES_ERR, "Input data is too small"};
237 // Check if provided TensorRawData is not too big
238 if (data_to_be_updated_size < tensor_raw_data.size_in_bytes) {
239 LoggerE("Input data is too big, expected: %zu, got: %zu", data_to_be_updated_size,
240 tensor_raw_data.size_in_bytes);
241 return PlatformResult{ErrorCode::INVALID_VALUES_ERR, "Input data is too big"};
246 LoggerD("Partial update of tensor data");
248 void* void_data = nullptr;
250 result = NativeGetTensorData(index, &void_data, &data_size);
254 uint8_t* data = static_cast<uint8_t*>(void_data);
255 // Allocate space for new data
256 auto new_data = std::make_unique<uint8_t[]>(data_size);
258 size_t position_in_new_data = 0;
259 // Modified data is in range from location to size_rel
260 // boolean values helps to optimize checks for updates
261 // if value's position in respective axis [a] is outside of range <location[a]; size_rel[a])
262 // then there is no need to update value on that position
263 for (unsigned int i = 0; i < dim[3]; i++) {
264 bool update_3 = true;
265 if ((i < location[3]) || (i >= size_rel[3])) {
268 for (unsigned int j = 0; j < dim[2]; j++) {
269 bool update_2 = update_3;
270 if (update_2 && ((j < location[2]) || (j >= size_rel[2]))) {
273 for (unsigned int k = 0; k < dim[1]; k++) {
274 bool update_1 = update_2;
275 if (update_1 && ((k < location[1]) || (k >= size_rel[1]))) {
278 for (unsigned int l = 0; l < dim[0]; l++) {
279 bool update_0 = update_1;
280 if (update_0 && ((l < location[0]) || (l >= size_rel[0]))) {
284 mempcpy(&new_data[position], &tensor_raw_data.data[position_in_new_data],
286 position_in_new_data += bytes_per_element;
288 mempcpy(&new_data[position], &data[position], bytes_per_element);
290 position += bytes_per_element;
295 LoggerD("Updated %zu bytes out of %zu bytes", position_in_new_data, position);
296 ret = ml_tensors_data_set_tensor_data(handle_, index, new_data.get(), data_size);
297 // new_data is released by unique_ptr
299 // All data is changed
300 ret = ml_tensors_data_set_tensor_data(handle_, index, tensor_raw_data.data,
301 tensor_raw_data.size_in_bytes);
304 if (ML_ERROR_NONE != ret) {
305 LoggerE("ml_tensors_data_set_tensor_data failed: %d (%s)", ret, get_error_message(ret));
306 return util::ToPlatformResult(ret, "Internal TensorsData error");
309 return PlatformResult(ErrorCode::NO_ERROR);
312 TensorsInfo* TensorsData::GetTensorsInfo() {
313 return tensors_info_;
316 PlatformResult TensorsData::NativeDestroy() {
317 ScopeLogger("id_: %d", id_);
318 int ret = ml_tensors_data_destroy(handle_);
319 if (ML_ERROR_NONE != ret) {
320 LoggerE("ml_tensors_data_destroy failed: %d (%s)", ret, get_error_message(ret));
321 return util::ToPlatformResult(ret, "Failed to destroy handle");
323 return PlatformResult(ErrorCode::NO_ERROR);
326 PlatformResult TensorsData::NativeGetTensorData(int index, void** raw_data, size_t* size) {
327 int ret = ml_tensors_data_get_tensor_data(handle_, index, raw_data, size);
328 if (ML_ERROR_NONE != ret) {
329 LoggerE("ml_tensors_data_get_tensor_data failed: %d (%s)", ret, get_error_message(ret));
330 return util::ToPlatformResult(ret, "Internal TensorsData error");
332 return PlatformResult(ErrorCode::NO_ERROR);
335 TensorsDataManager::TensorsDataManager() : nextId_(0) {
339 TensorsDataManager::~TensorsDataManager() {
342 std::lock_guard<std::mutex> lock{map_and_next_id_mutex_};
346 TensorsData* TensorsDataManager::CreateTensorsData(TensorsInfo* tensors_info) {
348 if (nullptr == tensors_info) {
349 LoggerE("Could not find tensor");
353 ml_tensors_data_h tensors_data_handle;
354 int ret = ml_tensors_data_create(tensors_info->Handle(), &tensors_data_handle);
355 if (ML_ERROR_NONE != ret) {
356 LoggerE("ml_tensors_data_create failed: %d (%s)", ret, get_error_message(ret));
360 std::lock_guard<std::mutex> lock{map_and_next_id_mutex_};
362 auto t = std::make_unique<TensorsData>(tensors_data_handle, id, tensors_info);
363 map_[id] = std::move(t);
365 return map_[id].get();
368 TensorsData* TensorsDataManager::CreateTensorsData(TensorsInfo* tensors_info,
369 const ml_tensors_data_h tensors_data_handle,
370 bool owns_native_handle, bool immutable) {
371 ScopeLogger("owns_native_handle: %s, immutable: %s", owns_native_handle ? "true" : "false",
372 immutable ? "true" : "false");
374 if (nullptr == tensors_info) {
375 LoggerE("tensors_info is a nullptr");
379 if (nullptr == tensors_data_handle) {
380 LoggerE("tensors_data_handle is nullptr");
384 std::lock_guard<std::mutex> lock{map_and_next_id_mutex_};
386 auto t = std::make_unique<TensorsData>(tensors_data_handle, id, tensors_info, owns_native_handle,
388 map_[id] = std::move(t);
390 return map_[id].get();
393 TensorsData* TensorsDataManager::GetTensorsData(int id) {
394 ScopeLogger("id: %d", id);
396 std::lock_guard<std::mutex> lock{map_and_next_id_mutex_};
397 if (map_.end() != map_.find(id)) {
398 return map_[id].get();
404 PlatformResult TensorsDataManager::DisposeTensorsData(int id) {
405 ScopeLogger("id: %d", id);
407 TensorsData* t = GetTensorsData(id);
409 return DisposeTensorsData(t);
412 PlatformResult TensorsDataManager::DisposeTensorsData(TensorsData* t) {
416 LoggerE("Could not find tensor");
417 return PlatformResult(ErrorCode::ABORT_ERR);
420 std::lock_guard<std::mutex> lock{map_and_next_id_mutex_};
423 return PlatformResult(ErrorCode::NO_ERROR);