From 8f9ec89a5256b23ad384f83c9510863f96ff6d96 Mon Sep 17 00:00:00 2001 From: =?utf8?q?=EA=B9=80=EC=88=98=EC=A7=84/On-Device=20Lab=28SR=29/Enginee?= =?utf8?q?r/=EC=82=BC=EC=84=B1=EC=A0=84=EC=9E=90?= Date: Tue, 29 Jan 2019 18:31:32 +0900 Subject: [PATCH] [neurun] Support Quant8 for Concat (#4352) This commit supports `quant8` data type for `Concat` on `acl_cl` backend and remove skip related test list. Signed-off-by: sjsujinkim --- .../neurun/src/backend/acl_cl/kernel/ConcatLayer.cc | 19 +++++++++---------- .../neurun/src/backend/acl_cl/kernel/ConcatLayer.h | 8 +++----- tests/nnapi/nnapi_gtest.skip.armv7l-linux.neurun | 1 - 3 files changed, 12 insertions(+), 16 deletions(-) diff --git a/runtimes/neurun/src/backend/acl_cl/kernel/ConcatLayer.cc b/runtimes/neurun/src/backend/acl_cl/kernel/ConcatLayer.cc index 142e9c5..e6da204 100644 --- a/runtimes/neurun/src/backend/acl_cl/kernel/ConcatLayer.cc +++ b/runtimes/neurun/src/backend/acl_cl/kernel/ConcatLayer.cc @@ -53,12 +53,12 @@ namespace kernel { ConcatLayer::ConcatLayer() - : _input_allocs(), _output_alloc(nullptr), _axis(0), _input_type(OperandType::SCALAR_FLOAT32) + : _input_allocs(), _output_alloc(nullptr), _axis(0), _input_type(arm_compute::DataType::F32) { // DO NOTHING } -bool ConcatLayer::concatenationFloat32() +template bool ConcatLayer::concatenate() { // Input and output size check { @@ -84,12 +84,12 @@ bool ConcatLayer::concatenationFloat32() auto &queue = ::arm_compute::CLScheduler::get().queue(); _output_alloc->map(queue); - util::feature::nchw::View output_view{_output_alloc}; + util::feature::nchw::View output_view{_output_alloc}; for (auto input : _input_allocs) { input->map(queue); - const util::feature::nchw::View input_reader{input}; + const util::feature::nchw::View input_reader{input}; for (uint32_t n = 0; n < input_reader.shape().N; n++) { @@ -143,19 +143,18 @@ void ConcatLayer::configure( // NHWC -> WHCN _axis = ToARMComputeAxis(output_alloc->num_dimensions(), axis).value(); - // TODO Support Quant8 - _input_type = OperandType::TENSOR_FLOAT32; + _input_type = input_allocs[0]->data_type(); } void ConcatLayer::run() { - if (_input_type == OperandType::TENSOR_FLOAT32) + if (_input_type == arm_compute::DataType::F32) { - concatenationFloat32(); + concatenate(); } - else if (_input_type == OperandType::TENSOR_QUANT8_ASYMM) + else if (_input_type == arm_compute::DataType::QASYMM8) { - throw std::runtime_error("NYI - concatenationQuant8()"); + concatenate(); } } diff --git a/runtimes/neurun/src/backend/acl_cl/kernel/ConcatLayer.h b/runtimes/neurun/src/backend/acl_cl/kernel/ConcatLayer.h index 057e337..cc38c03 100644 --- a/runtimes/neurun/src/backend/acl_cl/kernel/ConcatLayer.h +++ b/runtimes/neurun/src/backend/acl_cl/kernel/ConcatLayer.h @@ -20,12 +20,10 @@ #include #include +#include -#include "model/operand/DataType.h" #include "backend/acl_cl/operand/ICLTensor.h" -using OperandType = neurun::model::operand::DataType; - namespace neurun { namespace backend @@ -53,13 +51,13 @@ public: void run(); private: - bool concatenationFloat32(); + template bool concatenate(); private: std::vector<::neurun::backend::acl_cl::operand::ICLTensor *> _input_allocs; ::neurun::backend::acl_cl::operand::ICLTensor *_output_alloc; int32_t _axis; - OperandType _input_type; + arm_compute::DataType _input_type; }; } // namespace kernel diff --git a/tests/nnapi/nnapi_gtest.skip.armv7l-linux.neurun b/tests/nnapi/nnapi_gtest.skip.armv7l-linux.neurun index 8b912fc..8ab0a94 100644 --- a/tests/nnapi/nnapi_gtest.skip.armv7l-linux.neurun +++ b/tests/nnapi/nnapi_gtest.skip.armv7l-linux.neurun @@ -71,7 +71,6 @@ GeneratedTests.tensorflowmax_ex* GeneratedTests.reduce_sum_ex* GeneratedTests.topk_v2* # Unhandled exception -GeneratedTests.concat* GeneratedTests.fully_connected* GeneratedTests.reshape* # Unexpected result -- 2.7.4