2 * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include "ANeuralNetworksModel.h"
18 #include "OperationFactory.h"
19 #include "NNAPIConvert.h"
21 #include "ir/Operations.Include.h"
22 #include "util/logging.h"
27 // ANeuralNetworksModel
29 ANeuralNetworksModel::ANeuralNetworksModel() noexcept
30 : _finished_building{false}, _optional_operands{}, _operand_usages{}, _allowFloat32toFloat16{
33 _graph = std::make_shared<onert::ir::Graph>();
36 bool ANeuralNetworksModel::addOperand(const ANeuralNetworksOperandType *type) noexcept
40 const auto shape = NNAPIConvert::getShape(type);
41 const auto typeInfo = NNAPIConvert::getTypeInfo(type);
42 _graph->addOperand(shape, typeInfo);
43 _operand_usages.emplace_back(OperandUsage::NOT_DEFINED);
45 catch (const std::exception &e)
47 VERBOSE(EXCEPTION) << e.what() << std::endl;
55 bool ANeuralNetworksModel::setOperandValue(uint32_t index, const void *buffer, size_t length,
56 bool optional, bool copy) noexcept
58 const onert::ir::OperandIndex ind{index};
62 _operand_usages[index] = OperandUsage::CONSTANT;
64 // Remain operands.at(ind).data()->base() as nullptr for optional operand
65 // This will be filled when model finished
68 setOptionalOperand(ind);
71 using onert::ir::CachedData;
72 using onert::ir::ExternalData;
75 _graph->operands().at(ind).data(
76 std::make_unique<CachedData>(reinterpret_cast<const uint8_t *>(buffer), length));
80 _graph->operands().at(ind).data(
81 std::make_unique<ExternalData>(reinterpret_cast<const uint8_t *>(buffer), length));
84 catch (const std::exception &e)
86 VERBOSE(EXCEPTION) << e.what() << std::endl;
94 bool ANeuralNetworksModel::addOperation(ANeuralNetworksOperationType type, uint32_t inputCount,
95 const uint32_t *inputs, uint32_t outputCount,
96 const uint32_t *outputs) noexcept
100 for (uint32_t i = 0; i < outputCount; i++)
102 _operand_usages[outputs[i]] = OperandUsage::OPERATION_OUTPUT;
105 auto &factory = OperationFactory::get();
106 OperationFactory::Param param{inputCount, inputs, outputCount, outputs};
108 auto node = factory.create(type, param, _graph->operands());
109 _graph->addOperation(std::unique_ptr<onert::ir::Operation>{node});
111 // TODO Move these codes to delegate.cpp
112 if (type == ANEURALNETWORKS_FULLY_CONNECTED)
114 const auto &input_operand =
115 _graph->operands().at(node->getInputs().at(onert::ir::operation::FullyConnected::INPUT));
116 auto &weights_operand =
117 _graph->operands().at(node->getInputs().at(onert::ir::operation::FullyConnected::WEIGHT));
118 if (input_operand.typeInfo().type() == onert::ir::DataType::FLOAT32 &&
119 weights_operand.typeInfo().type() == onert::ir::DataType::QUANT_UINT8_ASYMM)
121 weights_operand.type(onert::ir::DataType::QUANT_INT8_SYMM);
125 catch (const std::exception &e)
127 VERBOSE(EXCEPTION) << e.what() << std::endl;
135 bool ANeuralNetworksModel::addOperationEx(ANeuralNetworksOperationTypeEx type, uint32_t inputCount,
136 const uint32_t *inputs, uint32_t outputCount,
137 const uint32_t *outputs) noexcept
141 for (uint32_t i = 0; i < outputCount; i++)
143 _operand_usages[outputs[i]] = OperandUsage::OPERATION_OUTPUT;
146 auto &factory = OperationFactory::get();
147 OperationFactory::Param param{inputCount, inputs, outputCount, outputs};
149 auto node = factory.create(type, param, _graph->operands());
150 _graph->addOperation(std::unique_ptr<onert::ir::Operation>{node});
152 catch (const std::exception &e)
159 bool ANeuralNetworksModel::addModelInput(uint32_t index) noexcept
163 _operand_usages[index] = OperandUsage::MODEL_INPUT;
165 const onert::ir::OperandIndex ind{index};
166 _graph->addInput(ind);
168 catch (const std::exception &e)
170 VERBOSE(EXCEPTION) << e.what() << std::endl;
177 bool ANeuralNetworksModel::addModelOutput(uint32_t index) noexcept
181 const onert::ir::OperandIndex ind{index};
183 // Duplicated output is not allowed
184 if (_graph->getOutputs().contains(ind))
189 _graph->addOutput(ind);
191 catch (const std::exception &e)
193 VERBOSE(EXCEPTION) << e.what() << std::endl;
201 void ANeuralNetworksModel::allowFloat32toFloat16(bool allow) noexcept
203 _allowFloat32toFloat16 = allow;
206 bool ANeuralNetworksModel::finish() noexcept
210 fillOptionalOperand();
213 _operand_usages.clear();
214 _finished_building = true;
216 catch (const std::exception &e)
218 VERBOSE(EXCEPTION) << e.what() << '\n';
226 bool ANeuralNetworksModel::isFinished() noexcept { return _finished_building; }
228 bool ANeuralNetworksModel::isExistOperand(uint32_t index) noexcept
230 return _graph->operands().exist(onert::ir::OperandIndex{index});
233 size_t ANeuralNetworksModel::operandSize(uint32_t index) noexcept
237 return _graph->operands().at(onert::ir::OperandIndex{index}).operandSize();
239 catch (const std::exception &e)
241 VERBOSE(EXCEPTION) << e.what() << '\n';
247 bool ANeuralNetworksModel::isUsageSet(uint32_t index) noexcept
249 return (_operand_usages[index] != OperandUsage::NOT_DEFINED);
252 bool ANeuralNetworksModel::isOperationOutput(uint32_t index) noexcept
254 return (_operand_usages[index] == OperandUsage::OPERATION_OUTPUT);
257 void ANeuralNetworksModel::setOptionalOperand(const onert::ir::OperandIndex idx)
259 _optional_operands.insert(idx);
262 void ANeuralNetworksModel::fillOptionalOperand(void)
264 _graph->operations().iterate([&](const onert::ir::OperationIndex &, onert::ir::Operation &node) {
265 for (auto input : node.getInputs())
267 // TODO fill default value for optional operands
268 if (_optional_operands.find(input) != _optional_operands.end())
270 throw std::runtime_error{"Optional operand is not supported yet"};
276 std::shared_ptr<onert::ir::Model> ANeuralNetworksModel::getModel() const
278 auto model = std::make_shared<onert::ir::Model>();
280 model->push(onert::ir::SubgraphIndex{0}, _graph);
281 // TODO Find all child subgraphs and copy them to all_subgs
282 // Must find the same subgraph by using to compare pointer of subgraphs and set subgraph's index
283 // to operands of control flow operations
284 // Must clean all child subgraphs's pointer to prevent memory leak in case of that graph has
285 // subgraph itself recursively