2 * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include "luci/Importer.h"
19 #include <luci/IR/CircleNode.h>
20 #include <luci/Plan/CircleNodeExecutionPlan.h>
22 #include <gtest/gtest.h>
23 #include <mio/circle/schema_generated.h>
24 #include <flatbuffers/flatbuffers.h>
26 TEST(TensorFlowLiteImport, Dummy)
28 luci::Importer import;
33 // helpers for flatbuffers
37 struct BasicCircleModel
39 std::unique_ptr<circle::ModelT> model;
43 model = std::make_unique<circle::ModelT>();
44 model->buffers.push_back(std::make_unique<circle::BufferT>());
45 model->description = "nnpackage";
49 uint32_t add_subgraph()
51 model->subgraphs.push_back(std::make_unique<circle::SubGraphT>());
52 model->subgraphs.back()->name = "";
53 model->subgraphs.back()->data_format = circle::DataFormat_CHANNELS_LAST;
54 return model->subgraphs.size() - 1;
57 void add_subgraph_inputs(uint32_t subgraph_id, const std::vector<uint32_t> &inputs)
59 model->subgraphs[subgraph_id]->inputs.assign(inputs.begin(), inputs.end());
62 void add_subgraph_outputs(uint32_t subgraph_id, const std::vector<uint32_t> &outputs)
64 model->subgraphs[subgraph_id]->outputs.assign(outputs.begin(), outputs.end());
67 uint32_t add_builtin_opcode(circle::BuiltinOperator opcode)
69 uint32_t id = model->operator_codes.size();
70 model->operator_codes.push_back(std::make_unique<circle::OperatorCodeT>());
71 model->operator_codes[id]->builtin_code = opcode;
72 model->operator_codes[id]->version = 1;
78 model->buffers.push_back(std::make_unique<circle::BufferT>());
79 return model->buffers.size() - 1;
82 uint32_t add_float_tensor(uint32_t graph_id, const std::vector<int32_t> &shape,
85 auto &graph = model->subgraphs[graph_id];
86 uint32_t idx = graph->tensors.size();
87 graph->tensors.push_back(std::make_unique<circle::TensorT>());
88 graph->tensors[idx]->shape = shape;
89 graph->tensors[idx]->type = circle::TensorType_FLOAT32;
90 graph->tensors[idx]->buffer = buffer_id;
91 graph->tensors[idx]->name = std::to_string(idx);
92 graph->tensors[idx]->quantization.reset(nullptr);
93 graph->tensors[idx]->is_variable = false;
94 graph->tensors[idx]->sparsity.reset(nullptr);
95 (void)graph->tensors[idx]->shape_signature;
99 uint32_t add_builtin_operator(uint32_t graph_id, uint32_t opcode_id,
100 const std::vector<uint32_t> &inputs,
101 const std::vector<uint32_t> &outputs)
103 auto &graph = model->subgraphs[graph_id];
104 auto idx = graph->operators.size();
105 graph->operators.push_back(std::make_unique<circle::OperatorT>());
106 graph->operators[idx]->opcode_index = opcode_id;
107 graph->operators[idx]->inputs.assign(inputs.begin(), inputs.end());
108 graph->operators[idx]->outputs.assign(outputs.begin(), outputs.end());
109 graph->operators[idx]->builtin_options.Reset();
110 (void)graph->operators[idx]->custom_options;
111 graph->operators[idx]->custom_options_format = circle::CustomOptionsFormat_FLEXBUFFERS;
112 (void)graph->operators[idx]->mutating_variable_inputs;
113 (void)graph->operators[idx]->intermediates;
117 uint32_t add_plan_metadata(uint32_t buffer_id)
119 static_assert(sizeof(uint32_t) == 4, "metadata is stored in blocks of 32 bit unsiged ints");
120 uint32_t idx = model->metadata.size();
121 model->metadata.push_back(std::make_unique<circle::MetadataT>());
122 model->metadata[idx]->name = "ONE_execution_plan_table";
123 model->metadata[idx]->buffer = buffer_id;
124 model->buffers[buffer_id]->data.resize(4);
125 auto &entries_count = *reinterpret_cast<uint32_t *>(model->buffers[buffer_id]->data.data());
130 void add_plan_entry(uint32_t plan_buffer_id, uint32_t execution_order,
131 const std::vector<uint32_t> &offsets)
133 auto &buffer = model->buffers[plan_buffer_id]->data;
134 auto old_size = buffer.size();
135 assert(old_size % 4 == 0);
136 assert(old_size > 0);
138 // Allocate space for new entry:
139 // 4 bytes for entry id
140 // 4 bytes for entry size
141 // 4 bytes for execution order
142 // offsets.size() * 4 bytes for offsets
143 buffer.resize(old_size + 12 + offsets.size() * 4);
144 uint32_t *number_of_entries_ptr = reinterpret_cast<uint32_t *>(buffer.data());
145 *number_of_entries_ptr += 1;
147 uint32_t *entry_data_ptr = reinterpret_cast<uint32_t *>(buffer.data() + old_size);
149 entry_data_ptr[0] = *number_of_entries_ptr - 1; // entry id
150 entry_data_ptr[1] = 1 + offsets.size(); // entry size
151 entry_data_ptr[2] = execution_order; // execution order
152 std::copy(offsets.begin(), offsets.end(), entry_data_ptr + 3);
156 struct SimpleRELUModel : public BasicCircleModel
160 auto relu_opcode_id = add_builtin_opcode(circle::BuiltinOperator_RELU);
162 uint32_t subgraph_id = add_subgraph();
164 auto input_buffer_id = add_buffer();
165 auto output_buffer_id = add_buffer();
167 auto input_tensor_idx = add_float_tensor(subgraph_id, {1, 2, 3, 4}, input_buffer_id);
168 auto output_tensor_idx = add_float_tensor(subgraph_id, {1, 2, 3, 4}, output_buffer_id);
170 add_subgraph_inputs(subgraph_id, {input_tensor_idx});
171 add_subgraph_outputs(subgraph_id, {output_tensor_idx});
173 add_builtin_operator(subgraph_id, relu_opcode_id, {0}, {1});
180 * This test checks that one op RELU model with execution plan is successfully imported
182 TEST(TensorFlowLiteImport, simple_plan)
184 SimpleRELUModel model;
185 auto metadata_buffer_id = model.add_buffer();
186 model.add_plan_metadata(metadata_buffer_id);
188 model.add_plan_entry(metadata_buffer_id, 1, {100});
189 model.add_plan_entry(metadata_buffer_id, 2, {300});
190 model.add_plan_entry(metadata_buffer_id, 3, {200});
192 flatbuffers::FlatBufferBuilder fbb;
193 auto model_offset = circle::Model::Pack(fbb, model.model.get(), nullptr);
194 circle::FinishModelBuffer(fbb, model_offset);
196 auto model_ptr = circle::GetModel(fbb.GetBufferPointer());
197 luci::Importer import;
199 auto luci_module = import.importModule(model_ptr);
201 auto main_graph = luci_module->graph();
202 for (int i = 0; i < main_graph->nodes()->size(); ++i)
204 auto node = loco::must_cast<luci::CircleNode *>(main_graph->nodes()->at(i));
205 switch (node->opcode())
207 case luci::CircleOpcode::CIRCLEINPUT:
209 ASSERT_TRUE(luci::has_execution_plan(node));
210 auto plan = luci::get_execution_plan(node);
211 ASSERT_EQ(plan.order_in_plan(), 1);
212 ASSERT_EQ(plan.offsets().size(), 1);
213 ASSERT_EQ(plan.offsets()[0], 100);
216 case luci::CircleOpcode::CIRCLEOUTPUT:
218 ASSERT_TRUE(luci::has_execution_plan(node));
219 auto plan = luci::get_execution_plan(node);
220 ASSERT_EQ(plan.order_in_plan(), 3);
221 ASSERT_EQ(plan.offsets().size(), 1);
222 ASSERT_EQ(plan.offsets()[0], 200);
225 case luci::CircleOpcode::RELU:
227 ASSERT_TRUE(luci::has_execution_plan(node));
228 auto plan = luci::get_execution_plan(node);
229 ASSERT_EQ(plan.order_in_plan(), 2);
230 ASSERT_EQ(plan.offsets().size(), 1);
231 ASSERT_EQ(plan.offsets()[0], 300);
241 * This test checks that model with incomplete execution plan is successfully imported
243 TEST(TensorFlowLiteImport, DISABLED_incomplete_plan_NEG)
245 SimpleRELUModel model;
246 auto metadata_buffer_id = model.add_buffer();
247 model.add_plan_metadata(metadata_buffer_id);
249 model.add_plan_entry(metadata_buffer_id, 1, {100});
251 flatbuffers::FlatBufferBuilder fbb;
252 auto model_offset = circle::Model::Pack(fbb, model.model.get(), nullptr);
253 circle::FinishModelBuffer(fbb, model_offset);
255 auto model_ptr = circle::GetModel(fbb.GetBufferPointer());
256 luci::Importer import;
258 auto luci_module = import.importModule(model_ptr);
260 auto main_graph = luci_module->graph();
261 for (int i = 0; i < main_graph->nodes()->size(); ++i)
263 auto node = loco::must_cast<luci::CircleNode *>(main_graph->nodes()->at(i));
264 switch (node->opcode())
266 case luci::CircleOpcode::CIRCLEINPUT:
268 ASSERT_TRUE(luci::has_execution_plan(node));
269 auto plan = luci::get_execution_plan(node);
270 ASSERT_EQ(plan.order_in_plan(), 1);
271 ASSERT_EQ(plan.offsets().size(), 1);
272 ASSERT_EQ(plan.offsets()[0], 100);
275 case luci::CircleOpcode::CIRCLEOUTPUT:
276 case luci::CircleOpcode::RELU:
278 ASSERT_FALSE(luci::has_execution_plan(node));
288 * This test checks that corrupted execution plan induce exception
290 TEST(TensorFlowLiteImport, corrupted_plan_NEG)
292 SimpleRELUModel model;
293 auto metadata_buffer_id = model.add_buffer();
294 model.add_plan_metadata(metadata_buffer_id);
296 model.add_plan_entry(metadata_buffer_id, 1, {100});
297 model.add_plan_entry(metadata_buffer_id, 2, {300});
298 model.add_plan_entry(metadata_buffer_id, 3, {200});
301 *reinterpret_cast<uint32_t *>(model.model->buffers[metadata_buffer_id]->data.data()) = 4;
303 flatbuffers::FlatBufferBuilder fbb;
304 auto model_offset = circle::Model::Pack(fbb, model.model.get(), nullptr);
305 circle::FinishModelBuffer(fbb, model_offset);
307 auto model_ptr = circle::GetModel(fbb.GetBufferPointer());
308 luci::Importer import;
310 ASSERT_ANY_THROW(import.importModule(model_ptr));