2 * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include "ANeuralNetworksModel.h"
18 #include "OperationFactory.h"
19 #include "NNAPIConvert.h"
21 #include "ir/Operations.Include.h"
22 #include "util/logging.h"
27 // ANeuralNetworksModel
29 ANeuralNetworksModel::ANeuralNetworksModel() noexcept
30 : _optional_operands{}, _operand_usages{}, _allowFloat32toFloat16{false}
32 _graph = std::make_shared<onert::ir::Graph>();
35 bool ANeuralNetworksModel::addOperand(const ANeuralNetworksOperandType *type) noexcept
39 const auto shape = NNAPIConvert::getShape(type);
40 const auto typeInfo = NNAPIConvert::getTypeInfo(type);
41 _graph->addOperand(shape, typeInfo);
42 _operand_usages.emplace_back(OperandUsage::NOT_DEFINED);
44 catch (const std::exception &e)
46 VERBOSE(EXCEPTION) << e.what() << std::endl;
54 bool ANeuralNetworksModel::setOperandValue(uint32_t index, const void *buffer, size_t length,
55 bool optional, bool copy) noexcept
57 const onert::ir::OperandIndex ind{index};
61 _operand_usages[index] = OperandUsage::CONSTANT;
63 // Remain operands.at(ind).data()->base() as nullptr for optional operand
64 // This will be filled when model finished
67 setOptionalOperand(ind);
70 using onert::ir::CachedData;
71 using onert::ir::ExternalData;
74 _graph->operands().at(ind).data(
75 std::make_unique<CachedData>(reinterpret_cast<const uint8_t *>(buffer), length));
79 _graph->operands().at(ind).data(
80 std::make_unique<ExternalData>(reinterpret_cast<const uint8_t *>(buffer), length));
83 catch (const std::exception &e)
85 VERBOSE(EXCEPTION) << e.what() << std::endl;
93 bool ANeuralNetworksModel::addOperation(ANeuralNetworksOperationType type, uint32_t inputCount,
94 const uint32_t *inputs, uint32_t outputCount,
95 const uint32_t *outputs) noexcept
99 for (uint32_t i = 0; i < outputCount; i++)
101 _operand_usages[outputs[i]] = OperandUsage::OPERATION_OUTPUT;
104 auto &factory = OperationFactory::get();
105 OperationFactory::Param param{inputCount, inputs, outputCount, outputs};
107 auto node = factory.create(type, param, _graph->operands());
108 _graph->addOperation(std::unique_ptr<onert::ir::Operation>{node});
110 // TODO Move these codes to delegate.cpp
111 if (type == ANEURALNETWORKS_FULLY_CONNECTED)
113 const auto &input_operand =
114 _graph->operands().at(node->getInputs().at(onert::ir::operation::FullyConnected::INPUT));
115 auto &weights_operand =
116 _graph->operands().at(node->getInputs().at(onert::ir::operation::FullyConnected::WEIGHT));
117 if (input_operand.typeInfo().type() == onert::ir::DataType::FLOAT32 &&
118 weights_operand.typeInfo().type() == onert::ir::DataType::QUANT_UINT8_ASYMM)
120 weights_operand.type(onert::ir::DataType::QUANT_INT8_SYMM);
124 catch (const std::exception &e)
126 VERBOSE(EXCEPTION) << e.what() << std::endl;
134 bool ANeuralNetworksModel::addOperationEx(ANeuralNetworksOperationTypeEx type, uint32_t inputCount,
135 const uint32_t *inputs, uint32_t outputCount,
136 const uint32_t *outputs) noexcept
140 for (uint32_t i = 0; i < outputCount; i++)
142 _operand_usages[outputs[i]] = OperandUsage::OPERATION_OUTPUT;
145 auto &factory = OperationFactory::get();
146 OperationFactory::Param param{inputCount, inputs, outputCount, outputs};
148 auto node = factory.create(type, param, _graph->operands());
149 _graph->addOperation(std::unique_ptr<onert::ir::Operation>{node});
151 catch (const std::exception &e)
158 bool ANeuralNetworksModel::addModelInput(uint32_t index) noexcept
162 _operand_usages[index] = OperandUsage::MODEL_INPUT;
164 const onert::ir::OperandIndex ind{index};
165 _graph->addInput(ind);
167 catch (const std::exception &e)
169 VERBOSE(EXCEPTION) << e.what() << std::endl;
176 bool ANeuralNetworksModel::addModelOutput(uint32_t index) noexcept
180 const onert::ir::OperandIndex ind{index};
182 // Duplicated output is not allowed
183 if (_graph->getOutputs().contains(ind))
188 _graph->addOutput(ind);
190 catch (const std::exception &e)
192 VERBOSE(EXCEPTION) << e.what() << std::endl;
200 void ANeuralNetworksModel::allowFloat32toFloat16(bool allow) noexcept
202 _allowFloat32toFloat16 = allow;
205 bool ANeuralNetworksModel::finish() noexcept
209 fillOptionalOperand();
211 _graph->finishBuilding();
213 _operand_usages.clear();
215 catch (const std::exception &e)
217 VERBOSE(EXCEPTION) << e.what() << '\n';
225 bool ANeuralNetworksModel::isFinished() noexcept { return !_graph->isBuildingPhase(); }
227 bool ANeuralNetworksModel::isExistOperand(uint32_t index) noexcept
229 return _graph->operands().exist(onert::ir::OperandIndex{index});
232 size_t ANeuralNetworksModel::operandSize(uint32_t index) noexcept
236 return _graph->operands().at(onert::ir::OperandIndex{index}).operandSize();
238 catch (const std::exception &e)
240 VERBOSE(EXCEPTION) << e.what() << '\n';
246 bool ANeuralNetworksModel::isUsageSet(uint32_t index) noexcept
248 return (_operand_usages[index] != OperandUsage::NOT_DEFINED);
251 bool ANeuralNetworksModel::isOperationOutput(uint32_t index) noexcept
253 return (_operand_usages[index] == OperandUsage::OPERATION_OUTPUT);
256 void ANeuralNetworksModel::setOptionalOperand(const onert::ir::OperandIndex idx)
258 _optional_operands.insert(idx);
261 void ANeuralNetworksModel::fillOptionalOperand(void)
263 _graph->operations().iterate([&](const onert::ir::OperationIndex &, onert::ir::Operation &node) {
264 for (auto input : node.getInputs())
266 // TODO fill default value for optional operands
267 if (_optional_operands.find(input) != _optional_operands.end())
269 throw std::runtime_error{"Optional operand is not supported yet"};
275 std::shared_ptr<onert::ir::Subgraphs> ANeuralNetworksModel::getSubGraphs() const
277 auto all_subgs = std::make_shared<onert::ir::Subgraphs>();
279 all_subgs->push(onert::ir::SubgraphIndex{0}, _graph);
280 // TODO Find all child subgraphs and copy them to all_subgs
281 // Must find the same subgraph by using to compare pointer of subgraphs and set subgraph's index
282 // to operands of control flow operations
283 // Must clean all child subgraphs's pointer to prevent memory leak in case of that graph has
284 // subgraph itself recursively