From: 박세희/동작제어Lab(SR)/Principal Engineer/삼성전자 Date: Wed, 21 Nov 2018 08:15:16 +0000 (+0900) Subject: [tflchef] Introduce TFliteImport (#2355) X-Git-Tag: nncc_backup~1288 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=15760ea815e9e773dc1f90fa59d9b24b12db1500;p=platform%2Fcore%2Fml%2Fnnfw.git [tflchef] Introduce TFliteImport (#2355) * [tflchef] Introduce TFliteImport This will introduce TFliteImport class that provides needed information from tflite::Model to build recipe Signed-off-by: SaeHie Park * add brief --- diff --git a/contrib/tflchef/tflite/src/TFliteImport.cpp b/contrib/tflchef/tflite/src/TFliteImport.cpp new file mode 100644 index 0000000..2e5ef50 --- /dev/null +++ b/contrib/tflchef/tflite/src/TFliteImport.cpp @@ -0,0 +1,145 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "TFliteImport.h" + +#include "Convert.h" + +#include + +namespace tflchef +{ + +const char *kEmptyTensorName = "(noname)"; + +const char *tensor_type(const tflite::Tensor *tensor) +{ + return tflite::EnumNameTensorType(tensor->type()); +} + +const char *tensor_name(const tflite::Tensor *tensor) +{ + auto name = tensor->name(); + if (name) + return name->c_str(); + return kEmptyTensorName; +} + +bool is_valid(const tflite::OperatorCode *opcode) +{ + tflite::BuiltinOperator code = opcode->builtin_code(); + return (tflite::BuiltinOperator_MIN <= code && code <= tflite::BuiltinOperator_MAX); +} + +bool is_custom(const tflite::OperatorCode *opcode) +{ + tflite::BuiltinOperator code = opcode->builtin_code(); + return (code == tflite::BuiltinOperator_CUSTOM); +} + +TFliteImport::TFliteImport(const tflite::Model *model) +{ + _subgraphs = model->subgraphs(); + _buffers = model->buffers(); + + auto opcodes = model->operator_codes(); + for (const ::tflite::OperatorCode *opcode : *opcodes) + { + _op_codes.push_back(opcode); + } +} + +bool TFliteImport::select_sub_graph(uint32_t sgindex) +{ + _tensors = nullptr; + _operators = nullptr; + _inputs.clear(); + _outputs.clear(); + + if (_subgraphs->Length() <= sgindex) + { + assert(false); + return false; + } + + const tflite::SubGraph *subgraph = (*_subgraphs)[sgindex]; + + _tensors = subgraph->tensors(); + _operators = subgraph->operators(); + + _inputs = FlatBufferIntArrayToVector(subgraph->inputs()); + _outputs = FlatBufferIntArrayToVector(subgraph->outputs()); + + return true; +} + +tflite::BuiltinOperator TFliteImport::builtin_code(const tflite::Operator *op) const +{ + uint32_t index = op->opcode_index(); + assert(index < _op_codes.size()); + const tflite::OperatorCode *opcode = _op_codes.at(index); + + return opcode->builtin_code(); +} + +std::string TFliteImport::opcode_name(const tflite::Operator *op) const +{ + uint32_t index = op->opcode_index(); + assert(index < _op_codes.size()); + const tflite::OperatorCode *opcode = _op_codes.at(index); + + if (!is_valid(opcode)) + { + std::ostringstream oss; + oss << "(invalid: " << index << ")"; + return oss.str(); + } + + if (is_custom(opcode)) + { + if (!opcode->custom_code()) + return "(invalid custom)"; + + return opcode->custom_code()->c_str(); + } + + tflite::BuiltinOperator code = opcode->builtin_code(); + return EnumNameBuiltinOperator(code); +} + +size_t TFliteImport::buffer_info(const tflite::Tensor *tensor, const uint8_t **buff_data) +{ + *buff_data = nullptr; + + if (tensor->buffer() == 0) + return 0; + + if (auto *buffer = (*_buffers)[tensor->buffer()]) + { + if (auto *array = buffer->data()) + { + if (size_t size = array->size()) + { + *buff_data = reinterpret_cast(array->data()); + return size; + } + } + } + + return 0; +} + +} // namespace tflchef diff --git a/contrib/tflchef/tflite/src/TFliteImport.h b/contrib/tflchef/tflite/src/TFliteImport.h new file mode 100644 index 0000000..a6ce62f --- /dev/null +++ b/contrib/tflchef/tflite/src/TFliteImport.h @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __TFLITE_IMPORT_H__ +#define __TFLITE_IMPORT_H__ + +#include + +#include + +#include +#include + +namespace tflchef +{ + +using TFliteSubGraphs_t = flatbuffers::Vector>; +using TFliteTensors_t = flatbuffers::Vector>; +using TFliteBuffers_t = flatbuffers::Vector>; +using TFliteOperators_t = flatbuffers::Vector>; + +const char *tensor_type(const tflite::Tensor *tensor); +const char *tensor_name(const tflite::Tensor *tensor); +bool is_valid(const tflite::OperatorCode *opcode); +bool is_custom(const tflite::OperatorCode *opcode); + +/** + * @brief Loads TF lite file and provides helpers to access attributes + */ +class TFliteImport +{ +public: + TFliteImport(const tflite::Model *model); + + TFliteImport() = delete; + +public: + bool select_sub_graph(uint32_t subgraph); + +public: + const TFliteBuffers_t *buffers() { return _buffers; } + const TFliteTensors_t *tensors() { return _tensors; } + const TFliteOperators_t *operators() { return _operators; } + const std::vector &inputs() const { return _inputs; } + const std::vector &outputs() const { return _outputs; } + + uint32_t num_subgraph() const { return _subgraphs->Length(); } + + tflite::BuiltinOperator builtin_code(const tflite::Operator *op) const; + std::string opcode_name(const tflite::Operator *op) const; + size_t buffer_info(const tflite::Tensor *tensor, const uint8_t **buff_data); + + /** + * @brief This will record the tensor by index, if it needs filler option, + * such as kernel, bias. + */ + void set_tensor_filler(uint32_t tensor_index) { _tensor_filler[tensor_index] = true; } + + /** + * @brief This will return true if the tensor by index, needs a filler option. + */ + bool get_tensor_filler(uint32_t tensor_index) + { + auto it = _tensor_filler.find(tensor_index); + if (it != _tensor_filler.end()) + { + return it->second; + } + return false; + } + +private: + const TFliteSubGraphs_t *_subgraphs; + const TFliteBuffers_t *_buffers; + const TFliteTensors_t *_tensors; + const TFliteOperators_t *_operators; + + std::vector _op_codes; + std::vector _inputs; + std::vector _outputs; + + std::map _tensor_filler; +}; + +} // namespace tflchef + +#endif // __TFLITE_IMPORT_H__