From e3bf3f1f7c86ba7ef7d554036399fc353885365c Mon Sep 17 00:00:00 2001 From: =?utf8?q?=EC=98=A4=ED=98=95=EC=84=9D/On-Device=20Lab=28SR=29/Staff?= =?utf8?q?=20Engineer/=EC=82=BC=EC=84=B1=EC=A0=84=EC=9E=90?= Date: Thu, 23 May 2019 11:00:49 +0900 Subject: [PATCH] Add interpreter unittest using buffer (#5221) * Add interpreter unittest using buffer Add interpreter unittest using two add operation with buffer output and constant operand Move add kernel code order for correct exception Signed-off-by: Hyeongseok Oh * Update example model comment --- .../neurun/core/src/exec/interp/operations/Add.cc | 22 ++++-- runtimes/neurun/test/interp/ExecManager.cc | 85 ++++++++++++++++++++++ 2 files changed, 101 insertions(+), 6 deletions(-) diff --git a/runtimes/neurun/core/src/exec/interp/operations/Add.cc b/runtimes/neurun/core/src/exec/interp/operations/Add.cc index 44b2f6c..82674d0 100644 --- a/runtimes/neurun/core/src/exec/interp/operations/Add.cc +++ b/runtimes/neurun/core/src/exec/interp/operations/Add.cc @@ -37,19 +37,15 @@ void invokeAdd(ExecEnv *env, const model::Operation &node) // Check lhs shape is same with rhs (with broadcast) const auto lhs_tensor = env->tensorAt(lhs_index); const auto rhs_tensor = env->tensorAt(rhs_index); - auto out_tensor = env->tensorAt(out_index); - // Check shape and type lhs is same with rhs and output + // Check shape and type lhs is same with rhs // TODO Util function to compare TensorInfo // TODO Handle broadcasting assert(lhs_tensor->data_type() == rhs_tensor->data_type()); - assert(lhs_tensor->data_type() == out_tensor->data_type()); assert(lhs_tensor->num_dimensions() == rhs_tensor->num_dimensions()); - assert(lhs_tensor->num_dimensions() == out_tensor->num_dimensions()); for (uint32_t i = 0; i < lhs_tensor->num_dimensions(); i++) { assert(lhs_tensor->dimension(i) == rhs_tensor->dimension(i)); - assert(lhs_tensor->dimension(i) == out_tensor->dimension(i)); } // Check activation data type @@ -60,12 +56,26 @@ void invokeAdd(ExecEnv *env, const model::Operation &node) throw std::runtime_error("NYI"); } - // Already allocated (ex. model output) + // Output's shape and type should be same with input (don't consider broadcast) + auto output_info = lhs_tensor->tensorInfo(); + // We can handle already allocated (ex. model output) if (!env->contains(out_index)) { throw std::runtime_error("NYI: buffer allocator"); } + auto out_tensor = env->tensorAt(out_index); + + // Check shape and type lhs is same with output + // TODO Util function to compare TensorInfo + // TODO Handle broadcasting + assert(lhs_tensor->data_type() == out_tensor->data_type()); + assert(lhs_tensor->num_dimensions() == out_tensor->num_dimensions()); + for (uint32_t i = 0; i < lhs_tensor->num_dimensions(); i++) + { + assert(lhs_tensor->dimension(i) == out_tensor->dimension(i)); + } + const auto lhs_buffer = lhs_tensor->bufferRO(); const auto rhs_buffer = rhs_tensor->bufferRO(); auto out_buffer = out_tensor->buffer(); diff --git a/runtimes/neurun/test/interp/ExecManager.cc b/runtimes/neurun/test/interp/ExecManager.cc index feb87f5..6aabe7f 100644 --- a/runtimes/neurun/test/interp/ExecManager.cc +++ b/runtimes/neurun/test/interp/ExecManager.cc @@ -76,6 +76,64 @@ protected: _executor = nnfw::cpp14::make_unique(_graph->shareModel()); } + void CreateTwoStepModel() + { + // Model: two elementwise add operation + // model input: lhs, rhs1 + // model output: second add result (result2) + // constant: rhs2 + // result1 <= (lhs + rhs) + // result2 <= (result1 + rhs2) + // lhs, rhs1, rh2, result1, result2 shape: {1, 2, 2, 1} + // activation: none (constant) + std::unique_ptr model = nnfw::cpp14::make_unique(); + + // 1st add operands (result1 <= lhs + rhs1) + + Shape shape{1, 2, 2, 1}; + TypeInfo type{DataType::INT32}; + Shape shape_scalar(0); + TypeInfo type_scalar{DataType::INT32}; + + static int32_t rhs2_data[4] = {3, 1, -1, 5}; + + auto operand_lhs = model->operands.append(shape, type); + auto operand_rhs1 = model->operands.append(shape, type); + auto operand_result1 = model->operands.append(shape, type); + auto operand_rhs2 = model->operands.append(shape, type); + auto operand_result2 = model->operands.append(shape, type); + model->operands.at(operand_rhs2) + .data(nnfw::cpp14::make_unique(reinterpret_cast(&rhs2_data), + 16)); + + // 2nd add operations (result2 <= result1 + rhs2) + + operation::AddNode::Param param1; + param1.activation = neurun::model::Activation::NONE; + auto input_set1 = OperandIndexSequence{operand_lhs, operand_rhs1}; + auto output_set1 = OperandIndexSequence{operand_result1}; + model->operations.append( + nnfw::cpp14::make_unique(input_set1, output_set1, param1)); + + operation::AddNode::Param param2; + param2.activation = neurun::model::Activation::NONE; + auto input_set2 = OperandIndexSequence{operand_result1, operand_rhs2}; + auto output_set2 = OperandIndexSequence{operand_result2}; + model->operations.append( + nnfw::cpp14::make_unique(input_set2, output_set2, param2)); + + // Identify model inputs and outputs + + model->inputs.append(operand_lhs); + model->inputs.append(operand_rhs1); + model->outputs.append(operand_result2); + + _graph = nnfw::cpp14::make_unique<::neurun::graph::Graph>(std::move(model)); + _graph->finishBuilding(); + + _executor = nnfw::cpp14::make_unique(_graph->shareModel()); + } + void CreateUnspecifiedDimensionsModel() { // Model: one elementwise add operation @@ -239,4 +297,31 @@ TEST_F(InterpExecManagerTest, execute) EXPECT_EQ(output_buffer[3], -6); } +TEST_F(InterpExecManagerTest, executeTwoStep) +{ + CreateTwoStepModel(); + + auto input1 = IOIndex{0}; + auto input2 = IOIndex{1}; + auto input1_idx = _graph->getInputs().at(input1); + auto input2_idx = _graph->getInputs().at(input2); + + const int32_t input1_buffer[4] = {1, 0, -1, -2}; + const int32_t input2_buffer[4] = {1, -3, 2, -4}; + + auto output = IOIndex{0}; + auto output_idx = _graph->getOutputs().at(output); + + int32_t output_buffer[4] = {}; + + EXPECT_NO_THROW(_executor->setInput(input1, reinterpret_cast(input1_buffer), 16)); + EXPECT_NO_THROW(_executor->setInput(input2, reinterpret_cast(input2_buffer), 16)); + EXPECT_NO_THROW(_executor->setOutput(output, reinterpret_cast(output_buffer), 16)); + EXPECT_THROW(_executor->execute(), std::runtime_error); + // EXPECT_EQ(output_buffer[0], 5); + // EXPECT_EQ(output_buffer[1], -2); + // EXPECT_EQ(output_buffer[2], 0); + // EXPECT_EQ(output_buffer[3], -1); +} + } // namespace -- 2.7.4