#include "common/platform_result.h"
#include "common/tools.h"
+static_assert(ML_TENSOR_RANK_LIMIT == 4,
+ "This implementation requires different ML_TENSOR_RANK_LIMIT. Please fix the code.");
+
namespace extension {
namespace ml {
ReportSuccess(val, out);
}
-PlatformResult GetDimensionsFromJsonArray(picojson::array& dim,
- unsigned int dimensions[ML_TENSOR_RANK_LIMIT]) {
- ScopeLogger();
- bool foundValidValue = false;
- unsigned int validDimensions[ML_TENSOR_RANK_LIMIT];
- for (int i = 0; i < ML_TENSOR_RANK_LIMIT; i++) {
- validDimensions[i] = 1;
- }
- int dimSize = ML_TENSOR_RANK_LIMIT;
- if (dim.size() <= ML_TENSOR_RANK_LIMIT) {
- dimSize = dim.size();
- } else {
- LoggerD("Provided dimensions array is bigger than supported");
- }
-
- for (int i = dimSize - 1; i >= 0; i--) {
- auto& d = dim[i];
- if (!d.is<double>()) {
- LoggerE("dimensions array contains an invalid value: %s", d.serialize().c_str());
- return PlatformResult(ErrorCode::INVALID_VALUES_ERR,
- "dimensions array contains an invalid value");
- }
-
- int v = static_cast<int>(d.get<double>());
- if (v <= 0) {
- // dimensions with zeros at the end are valid
- // 0 after valid value is not accepted
- if (foundValidValue || (v < 0)) {
- LoggerE("dimensions array contains non-positive value: %d", v);
- return PlatformResult(ErrorCode::INVALID_VALUES_ERR,
- "dimensions array contains non-positive value");
- }
- continue;
- }
-
- foundValidValue = true;
- validDimensions[i] = static_cast<unsigned int>(v);
- }
-
- if (!foundValidValue) {
- LoggerE("No valid values found in dimensions array");
- return PlatformResult(ErrorCode::INVALID_VALUES_ERR,
- "dimensions array contains invalid values");
- }
-
- for (int i = 0; i < ML_TENSOR_RANK_LIMIT; i++) {
- dimensions[i] = validDimensions[i];
- }
- return PlatformResult(ErrorCode::NO_ERROR);
-}
-
void MlInstance::MLTensorsInfoAddTensorInfo(const picojson::value& args, picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kTensorsInfoId, double, out);
unsigned int dimensions[ML_TENSOR_RANK_LIMIT] = {};
auto dim = args.get(kDimensions).get<picojson::array>();
- result = GetDimensionsFromJsonArray(dim, dimensions);
+ result = util::GetDimensionsFromJsonArray(dim, dimensions);
if (!result) {
LogAndReportError(result, &out);
return;
unsigned int dimensions[ML_TENSOR_RANK_LIMIT] = {};
auto dim = args.get(kDimensions).get<picojson::array>();
- PlatformResult result = GetDimensionsFromJsonArray(dim, dimensions);
+ PlatformResult result = util::GetDimensionsFromJsonArray(dim, dimensions);
if (!result) {
LogAndReportError(result, &out);
return;
return;
}
// TODO: validate location and size - will be done in future commit
- int location[ML_TENSOR_RANK_LIMIT];
- int size[ML_TENSOR_RANK_LIMIT];
+ unsigned int location[ML_TENSOR_RANK_LIMIT];
+ unsigned int size[ML_TENSOR_RANK_LIMIT];
TensorRawData raw_data;
PlatformResult result = tensors_data->GetTensorRawData(index, location, size, &raw_data);
if (!result) {
return;
}
- std::vector<std::uint8_t> out_data{raw_data.data, raw_data.data + raw_data.size};
+ std::vector<std::uint8_t> out_data{raw_data.data, raw_data.data + raw_data.size_in_bytes};
out[kBuffer] = picojson::value(picojson::string_type, true);
common::encode_binary_in_string(out_data, out[kBuffer].get<std::string>());
}
void MlInstance::MLTensorsDataSetTensorRawData(const picojson::value& args, picojson::object& out) {
- ScopeLogger("args: %s", args.serialize().c_str());
+ ScopeLogger();
CHECK_ARGS(args, kTensorsDataId, double, out);
CHECK_ARGS(args, kIndex, double, out);
CHECK_ARGS(args, kBuffer, std::string, out);
CHECK_ARGS(args, kLocation, picojson::array, out);
CHECK_ARGS(args, kSize, picojson::array, out);
+ LoggerD("%s, %s", kTensorsDataId.c_str(), args.get(kTensorsDataId).serialize().c_str());
+ LoggerD("%s, %s", kIndex.c_str(), args.get(kIndex).serialize().c_str());
+ LoggerD("%s, %s", kLocation.c_str(), args.get(kLocation).serialize().c_str());
+ LoggerD("%s, %s", kSize.c_str(), args.get(kSize).serialize().c_str());
int tensors_data_id = static_cast<int>(args.get(kTensorsDataId).get<double>());
int index = static_cast<int>(args.get(kIndex).get<double>());
return;
}
- int location[ML_TENSOR_RANK_LIMIT] = {0, 0, 0, 0};
- int size[ML_TENSOR_RANK_LIMIT] = {-1, -1, -1, -1};
- // TODO: validate location and size - will be done in future commit
+ unsigned int location[ML_TENSOR_RANK_LIMIT] = {};
+ PlatformResult result =
+ util::GetLocationFromJsonArray(args.get(kLocation).get<picojson::array>(), location);
+ if (!result) {
+ LogAndReportError(result, &out);
+ return;
+ }
+
+ unsigned int dimensions[ML_TENSOR_RANK_LIMIT] = {};
+ result = tensors_data->GetTensorsInfo()->NativeGetTensorDimensions(index, dimensions);
+ if (!result) {
+ LogAndReportError(result, &out);
+ return;
+ }
+ unsigned int size[ML_TENSOR_RANK_LIMIT] = {};
+ result = util::GetSizeFromJsonArray(args.get(kSize).get<picojson::array>(), location, dimensions,
+ size);
+ if (!result) {
+ LogAndReportError(result, &out);
+ return;
+ }
const std::string& str_buffer = args.get(kBuffer).get<std::string>();
std::vector<std::uint8_t> buffer;
common::decode_binary_from_string(str_buffer, buffer);
- TensorRawData rawData{.data = buffer.data(), .size = buffer.size()};
- PlatformResult result = tensors_data->SetTensorRawData(index, location, size, rawData);
+ TensorRawData rawData{.data = buffer.data(), .size_in_bytes = buffer.size()};
+ result = tensors_data->SetTensorRawData(index, location, size, rawData);
if (!result) {
LogAndReportError(result, &out);
return;
return tensor_type_enum;
}
-PlatformResult TensorsData::GetTensorRawData(int index, int location[ML_TENSOR_RANK_LIMIT],
- int size[ML_TENSOR_RANK_LIMIT],
+PlatformResult TensorsData::GetTensorRawData(int index, unsigned int location[ML_TENSOR_RANK_LIMIT],
+ unsigned int size[ML_TENSOR_RANK_LIMIT],
TensorRawData* tensor_raw_data) {
ScopeLogger("id_: %d, index: %d", id_, index);
if (nullptr == tensor_raw_data) {
}
void* data;
size_t data_size;
- int ret = ml_tensors_data_get_tensor_data(handle_, index, &data, &data_size);
- if (ML_ERROR_NONE != ret) {
- LoggerE("ml_tensors_data_get_tensor_data failed: %d (%s)", ret, get_error_message(ret));
- return util::ToPlatformResult(ret, "Internal TensorsData error");
+ PlatformResult result = NativeGetTensorData(index, &data, &data_size);
+ if (!result) {
+ return result;
}
// TODO: add support for location and size - will be done in future commit
// Dimensions of whole tensor
unsigned int dim[ML_TENSOR_RANK_LIMIT];
- PlatformResult result = tensors_info_->NativeGetTensorDimensions(index, dim);
+ result = tensors_info_->NativeGetTensorDimensions(index, dim);
if (!result) {
return result;
}
}
tensor_raw_data->data = static_cast<uint8_t*>(data);
- tensor_raw_data->size = data_size;
+ tensor_raw_data->size_in_bytes = data_size;
return PlatformResult(ErrorCode::NO_ERROR);
}
-PlatformResult TensorsData::SetTensorRawData(int index, int location[ML_TENSOR_RANK_LIMIT],
- int size[ML_TENSOR_RANK_LIMIT],
+PlatformResult TensorsData::SetTensorRawData(int index, unsigned int location[ML_TENSOR_RANK_LIMIT],
+ unsigned int size[ML_TENSOR_RANK_LIMIT],
TensorRawData& tensor_raw_data) {
- ScopeLogger("id_: %d, index: %d", id_, index);
+ ScopeLogger("id_: %d, index: %d, tensor_raw_data.size_in_bytes: %zu", id_, index,
+ tensor_raw_data.size_in_bytes);
+ // Dimensions of whole tensor
+ unsigned int dim[ML_TENSOR_RANK_LIMIT];
+ // Dimensions of updated tensors relative to location coordiantes
+ unsigned int size_rel[ML_TENSOR_RANK_LIMIT];
+
+ PlatformResult result = tensors_info_->NativeGetTensorDimensions(index, dim);
+ if (!result) {
+ return result;
+ }
+ uint8_t bytes_per_element = tensors_info_->GetBytesPerElement(index);
+
+ // Check if update is partial due to location change
+ bool partial = false;
+ for (int i = 0; i < ML_TENSOR_RANK_LIMIT; i++) {
+ if (location[i] >= dim[i]) {
+ // Input data starts outside of current data
+ LoggerE("Input data location is invalid on [%d]: %u", i, location[i]);
+ return PlatformResult{ErrorCode::INVALID_VALUES_ERR, "Input data location is invalid"};
+ } else if (location[i] != 0) {
+ partial = true;
+ }
+ }
+
+ // Check if data will fit in TensorData and calculate dimensions
+ // of modified part, also check if update is partial due to size change
+ size_t data_to_be_updated_size = bytes_per_element;
+ for (int i = 0; i < ML_TENSOR_RANK_LIMIT; i++) {
+ size_rel[i] = location[i] + size[i];
+ if (size_rel[i] > dim[i]) {
+ LoggerE("Input data will not fit in TensorData [%d]: %u > %u", i, size_rel[i], dim[i]);
+ return PlatformResult{ErrorCode::INVALID_VALUES_ERR, "Input data will not fit in TensorData"};
+ } else {
+ data_to_be_updated_size *= size[i];
+ if (size_rel[i] < dim[i]) {
+ partial = true;
+ }
+ }
+ }
+ // Check if provided TensorRawData is big enough
+ if (data_to_be_updated_size > tensor_raw_data.size_in_bytes) {
+ LoggerE("Input data is too small, expected: %zu, got: %zu", data_to_be_updated_size,
+ tensor_raw_data.size_in_bytes);
+ return PlatformResult{ErrorCode::INVALID_VALUES_ERR, "Input data is too small"};
+ }
+ // Check if provided TensorRawData is not too big
+ if (data_to_be_updated_size < tensor_raw_data.size_in_bytes) {
+ LoggerE("Input data is too big, expected: %zu, got: %zu", data_to_be_updated_size,
+ tensor_raw_data.size_in_bytes);
+ return PlatformResult{ErrorCode::INVALID_VALUES_ERR, "Input data is too big"};
+ }
+
+ int ret = 0;
+ if (partial) {
+ LoggerD("Partial update of tensor data");
+ // Get current data
+ void* void_data = nullptr;
+ size_t data_size;
+ result = NativeGetTensorData(index, &void_data, &data_size);
+ if (!result) {
+ return result;
+ }
+ uint8_t* data = static_cast<uint8_t*>(void_data);
+ // Allocate space for new data
+ auto new_data = std::make_unique<uint8_t[]>(data_size);
+ size_t position = 0;
+ size_t position_in_new_data = 0;
+ // Modified data is in range from location to size_rel
+ // boolean values helps to optimize checks for updates
+ // if value's position in respective axis [a] is outside of range <location[a]; size_rel[a])
+ // then there is no need to update value on that position
+ for (unsigned int i = 0; i < dim[3]; i++) {
+ bool update_3 = true;
+ if ((i < location[3]) || (i >= size_rel[3])) {
+ update_3 = false;
+ }
+ for (unsigned int j = 0; j < dim[2]; j++) {
+ bool update_2 = update_3;
+ if (update_2 && ((j < location[2]) || (j >= size_rel[2]))) {
+ update_2 = false;
+ }
+ for (unsigned int k = 0; k < dim[1]; k++) {
+ bool update_1 = update_2;
+ if (update_1 && ((k < location[1]) || (k >= size_rel[1]))) {
+ update_1 = false;
+ }
+ for (unsigned int l = 0; l < dim[0]; l++) {
+ bool update_0 = update_1;
+ if (update_0 && ((l < location[0]) || (l >= size_rel[0]))) {
+ update_0 = false;
+ }
+ if (update_0) {
+ mempcpy(&new_data[position], &tensor_raw_data.data[position_in_new_data],
+ bytes_per_element);
+ position_in_new_data += bytes_per_element;
+ } else {
+ mempcpy(&new_data[position], &data[position], bytes_per_element);
+ }
+ position += bytes_per_element;
+ }
+ }
+ }
+ }
+ LoggerD("Updated %zu bytes out of %zu bytes", position_in_new_data, position);
+ ret = ml_tensors_data_set_tensor_data(handle_, index, new_data.get(), data_size);
+ // new_data is released by unique_ptr
+ } else {
+ // All data is changed
+ ret = ml_tensors_data_set_tensor_data(handle_, index, tensor_raw_data.data,
+ tensor_raw_data.size_in_bytes);
+ }
- // TODO: add support for location and size - will be done in future commit
- int ret =
- ml_tensors_data_set_tensor_data(handle_, index, tensor_raw_data.data, tensor_raw_data.size);
if (ML_ERROR_NONE != ret) {
LoggerE("ml_tensors_data_set_tensor_data failed: %d (%s)", ret, get_error_message(ret));
return util::ToPlatformResult(ret, "Internal TensorsData error");
return PlatformResult(ErrorCode::NO_ERROR);
}
+TensorsInfo* TensorsData::GetTensorsInfo() {
+ return tensors_info_;
+}
+
PlatformResult TensorsData::NativeDestroy() {
ScopeLogger("id_: %d", id_);
int ret = ml_tensors_data_destroy(handle_);
return PlatformResult(ErrorCode::NO_ERROR);
}
+PlatformResult TensorsData::NativeGetTensorData(int index, void** raw_data, size_t* size) {
+ int ret = ml_tensors_data_get_tensor_data(handle_, index, raw_data, size);
+ if (ML_ERROR_NONE != ret) {
+ LoggerE("ml_tensors_data_get_tensor_data failed: %d (%s)", ret, get_error_message(ret));
+ return util::ToPlatformResult(ret, "Internal TensorsData error");
+ }
+ return PlatformResult(ErrorCode::NO_ERROR);
+}
+
TensorsDataManager::TensorsDataManager() : nextId_(0) {
ScopeLogger();
}
struct TensorRawData {
// TensorRawData does not take ownership of data, remember to handle it outside
uint8_t* data;
- size_t size;
+ size_t size_in_bytes;
std::string type_str;
unsigned int shape[ML_TENSOR_RANK_LIMIT];
};
int TensorsInfoId();
int Count();
ml_tensor_type_e GetTensorType(int index);
- PlatformResult GetTensorRawData(int index, int location[ML_TENSOR_RANK_LIMIT],
- int size[ML_TENSOR_RANK_LIMIT], TensorRawData* tensor_raw_data);
- PlatformResult SetTensorRawData(int index, int location[ML_TENSOR_RANK_LIMIT],
- int size[ML_TENSOR_RANK_LIMIT], TensorRawData& tensor_raw_data);
+ PlatformResult GetTensorRawData(int index, unsigned int location[ML_TENSOR_RANK_LIMIT],
+ unsigned int size[ML_TENSOR_RANK_LIMIT],
+ TensorRawData* tensor_raw_data);
+ PlatformResult SetTensorRawData(int index, unsigned int location[ML_TENSOR_RANK_LIMIT],
+ unsigned int size[ML_TENSOR_RANK_LIMIT],
+ TensorRawData& tensor_raw_data);
+ TensorsInfo* GetTensorsInfo();
PlatformResult NativeDestroy();
TensorsData(TensorsData const&) = delete;
TensorsData& operator=(TensorsData const&) = delete;
+ PlatformResult NativeGetTensorData(int index, void** raw_data, size_t* size);
+
ml_tensors_data_h handle_;
int id_;
TensorsInfo* tensors_info_;
return PlatformResult(ErrorCode::NO_ERROR);
}
+uint8_t TensorsInfo::GetBytesPerElement(int index) {
+ ScopeLogger("id_: %d, index: %d", id_, index);
+ ml_tensor_type_e type = ML_TENSOR_TYPE_UNKNOWN;
+ int ret = ml_tensors_info_get_tensor_type(handle_, index, &type);
+ if (ML_ERROR_NONE == ret) {
+ switch (type) {
+ case ML_TENSOR_TYPE_INT8:
+ case ML_TENSOR_TYPE_UINT8:
+ case ML_TENSOR_TYPE_UNKNOWN:
+ return 1;
+ case ML_TENSOR_TYPE_INT16:
+ case ML_TENSOR_TYPE_UINT16:
+ return 2;
+ case ML_TENSOR_TYPE_INT32:
+ case ML_TENSOR_TYPE_UINT32:
+ case ML_TENSOR_TYPE_FLOAT32:
+ return 4;
+ case ML_TENSOR_TYPE_INT64:
+ case ML_TENSOR_TYPE_UINT64:
+ case ML_TENSOR_TYPE_FLOAT64:
+ return 8;
+ default:
+ return 1;
+ }
+ }
+ LoggerE("ml_tensors_info_get_tensor_type failed: [%d] (%s)", ret, get_error_message(ret));
+ return 1;
+}
+
TensorsInfoManager::TensorsInfoManager(TensorsDataManager* tensors_data_manager)
: nextId_(0), tensors_data_manager_(tensors_data_manager) {
ScopeLogger();
PlatformResult NativeSetTensorType(int index, const ml_tensor_type_e type);
PlatformResult NativeGetTensorSize(int index, size_t* size);
+ uint8_t GetBytesPerElement(int index);
+
private:
ml_tensors_info_h handle_;
int id_;
return available;
}
+PlatformResult GetDimensionsFromJsonArray(const picojson::array& dim,
+ unsigned int dimensions[ML_TENSOR_RANK_LIMIT]) {
+ ScopeLogger();
+ bool foundValidValue = false;
+ unsigned int validDimensions[ML_TENSOR_RANK_LIMIT];
+ for (int i = 0; i < ML_TENSOR_RANK_LIMIT; i++) {
+ validDimensions[i] = 1;
+ }
+ int dimSize = ML_TENSOR_RANK_LIMIT;
+ if (dim.size() <= ML_TENSOR_RANK_LIMIT) {
+ dimSize = dim.size();
+ } else {
+ LoggerD("Provided dimensions array is bigger than supported");
+ }
+
+ for (int i = dimSize - 1; i >= 0; i--) {
+ auto& d = dim[i];
+ if (!d.is<double>()) {
+ LoggerE("dimensions array contains an invalid value: %s", d.serialize().c_str());
+ return PlatformResult(ErrorCode::INVALID_VALUES_ERR,
+ "dimensions array contains an invalid value");
+ }
+
+ int v = static_cast<int>(d.get<double>());
+ if (v <= 0) {
+ // dimensions with zeros at the end are valid
+ // 0 after valid value is not accepted
+ if (foundValidValue || (v < 0)) {
+ LoggerE("dimensions array contains non-positive value: %d", v);
+ return PlatformResult(ErrorCode::INVALID_VALUES_ERR,
+ "dimensions array contains non-positive value");
+ }
+ continue;
+ }
+
+ foundValidValue = true;
+ validDimensions[i] = static_cast<unsigned int>(v);
+ }
+
+ if (!foundValidValue) {
+ LoggerE("No valid values found in dimensions array");
+ return PlatformResult(ErrorCode::INVALID_VALUES_ERR,
+ "dimensions array contains invalid values");
+ }
+
+ for (int i = 0; i < ML_TENSOR_RANK_LIMIT; i++) {
+ dimensions[i] = validDimensions[i];
+ }
+ return PlatformResult(ErrorCode::NO_ERROR);
+}
+
+PlatformResult GetLocationFromJsonArray(const picojson::array& array,
+ unsigned int location[ML_TENSOR_RANK_LIMIT]) {
+ if (array.size() > ML_TENSOR_RANK_LIMIT) {
+ LoggerD("Provided size array is bigger than supported");
+ }
+ int i = 0;
+ for (const auto& a : array) {
+ double num = -1;
+ if (a.is<double>()) {
+ num = a.get<double>();
+ }
+ if (num < 0) {
+ LoggerE("location array contains negative value: %s", a.serialize().c_str());
+ return PlatformResult(ErrorCode::INVALID_VALUES_ERR,
+ "location array contains negative value");
+ }
+ location[i] = static_cast<unsigned int>(num);
+ i++;
+ if (i == ML_TENSOR_RANK_LIMIT) {
+ break;
+ }
+ }
+ return PlatformResult(ErrorCode::NO_ERROR);
+}
+
+PlatformResult GetSizeFromJsonArray(const picojson::array& array,
+ unsigned int location[ML_TENSOR_RANK_LIMIT],
+ unsigned int dimensions[ML_TENSOR_RANK_LIMIT],
+ unsigned int size[ML_TENSOR_RANK_LIMIT]) {
+ if (array.size() > ML_TENSOR_RANK_LIMIT) {
+ LoggerD("Provided size array is bigger than supported");
+ }
+ int i = 0;
+ for (const auto& a : array) {
+ double num = 0;
+ if (a.is<double>()) {
+ num = a.get<double>();
+ }
+ if (num == 0) {
+ LoggerE("size array contains zero value: %s", a.serialize().c_str());
+ return PlatformResult(ErrorCode::INVALID_VALUES_ERR, "size array contains zero value");
+ } else if (num > 0) {
+ size[i] = static_cast<unsigned int>(num);
+ } else {
+ // in case of negative value, size becomes size from location to end of axis
+ size[i] = dimensions[i] - location[i];
+ }
+ i++;
+ if (i == ML_TENSOR_RANK_LIMIT) {
+ break;
+ }
+ }
+ for (; i < ML_TENSOR_RANK_LIMIT; i++) {
+ size[i] = dimensions[i] - location[i];
+ }
+ return PlatformResult(ErrorCode::NO_ERROR);
+}
+
} // util
} // ml
} // extension
bool CheckNNFWAvailability(const std::string& nnfw, const std::string& hw);
+PlatformResult GetDimensionsFromJsonArray(const picojson::array& dim,
+ unsigned int dimensions[ML_TENSOR_RANK_LIMIT]);
+PlatformResult GetLocationFromJsonArray(const picojson::array& array,
+ unsigned int location[ML_TENSOR_RANK_LIMIT]);
+PlatformResult GetSizeFromJsonArray(const picojson::array& array,
+ unsigned int location[ML_TENSOR_RANK_LIMIT],
+ unsigned int dimensions[ML_TENSOR_RANK_LIMIT],
+ unsigned int size[ML_TENSOR_RANK_LIMIT]);
+
} // util
} // ml
} // extension