From 7f1a8940dfe0fd689a9812875256ce63c73a04e2 Mon Sep 17 00:00:00 2001 From: =?utf8?q?=EA=B9=80=EC=88=98=EC=A7=84/On-Device=20Lab=28SR=29/Enginee?= =?utf8?q?r/=EC=82=BC=EC=84=B1=EC=A0=84=EC=9E=90?= Date: Fri, 18 Jan 2019 12:22:06 +0900 Subject: [PATCH] [neurun] Support to initialize 2D constant (#4257) * [neurun] Support to initialize 2D constant Until now, `ConstantInitializer` in `neurun` has supported only for `FullyConnected`. This commit supports to initialize 2D constant for all other operations. * Change some comment Signed-off-by: sjsujinkim --- .../neurun/src/compiler/ConstantInitializer.cc | 119 +++++++++++---------- 1 file changed, 62 insertions(+), 57 deletions(-) diff --git a/runtimes/neurun/src/compiler/ConstantInitializer.cc b/runtimes/neurun/src/compiler/ConstantInitializer.cc index 64c811d..c9917ec 100644 --- a/runtimes/neurun/src/compiler/ConstantInitializer.cc +++ b/runtimes/neurun/src/compiler/ConstantInitializer.cc @@ -80,73 +80,78 @@ void ConstantInitializer::operator()() } case 2: { - // NOTE This is a WORKAROUND which supports FullyConnected weight only - // For FullyConnected, we must know the IFM shape to deduce 2D weight shape from - // 4D - // IFM. - // This is because of NHWC/NCHW layout, the order of mapping will be different. - // TODO Support general case - explicitly insert Reshape op for IFM as 2D - // Find corresponding FullyConnected IFM auto operation_index = _graph.operands().at(index).getUses().list().front(); auto operation = &_graph.operations().at(operation_index); auto fc_operation = dynamic_cast(operation); - if (fc_operation == nullptr) - break; - - auto ifm_index = fc_operation->getInputs().at( - neurun::model::operation::FullyConnectedNode::Input::INPUT); - const auto &ifm = _graph.operands().at(ifm_index); - const auto ifm_shape = ifm.shape().asFeature(); - const auto num_output = shape.dim(0); - - const ::nnfw::misc::feature::Shape ker_shape{num_output, ifm_shape.C, ifm_shape.H, - ifm_shape.W}; - const util::feature::nhwc::Reader from{ker_shape, base, size}; - - if (layout == neurun::graph::operand::Layout::NHWC) + if (fc_operation != nullptr) { - ::nnfw::misc::feature::iterate(ker_shape) - << [&](uint32_t nth, uint32_t ch, uint32_t row, uint32_t col) { - const auto value = from.at(nth, ch, row, col); - - uint32_t offset = 0; - - // NNAPI uses NHWC ordering - offset += nth * ifm_shape.H * ifm_shape.W * ifm_shape.C; - offset += row * ifm_shape.W * ifm_shape.C; - offset += col * ifm_shape.C; - offset += ch; - - float *into = reinterpret_cast(tensor.buffer()) + offset; - - *into = value; - }; + // NOTE We must know the IFM shape to deduce 2D weight shape from 4D IFM. + // This is because of NHWC/NCHW layout, the order of mapping will be different. + auto ifm_index = fc_operation->getInputs().at( + neurun::model::operation::FullyConnectedNode::Input::INPUT); + const auto &ifm = _graph.operands().at(ifm_index); + const auto ifm_shape = ifm.shape().asFeature(); + const auto num_output = shape.dim(0); + + const ::nnfw::misc::feature::Shape ker_shape{num_output, ifm_shape.C, ifm_shape.H, + ifm_shape.W}; + const util::feature::nhwc::Reader from{ker_shape, base, size}; + + if (layout == neurun::graph::operand::Layout::NHWC) + { + ::nnfw::misc::feature::iterate(ker_shape) + << [&](uint32_t nth, uint32_t ch, uint32_t row, uint32_t col) { + const auto value = from.at(nth, ch, row, col); + + uint32_t offset = 0; + + // NNAPI uses NHWC ordering + offset += nth * ifm_shape.H * ifm_shape.W * ifm_shape.C; + offset += row * ifm_shape.W * ifm_shape.C; + offset += col * ifm_shape.C; + offset += ch; + + float *into = reinterpret_cast(tensor.buffer()) + offset; + + *into = value; + }; + } + else + { + assert(layout == neurun::graph::operand::Layout::NCHW); + + ::nnfw::misc::feature::iterate(ker_shape) + << [&](uint32_t nth, uint32_t ch, uint32_t row, uint32_t col) { + const auto value = from.at(nth, ch, row, col); + + uint32_t offset = 0; + + // 'NCHW' ordering + offset += nth * ifm_shape.C * ifm_shape.H * ifm_shape.W; + offset += ch * ifm_shape.H * ifm_shape.W; + offset += row * ifm_shape.W; + offset += col; + + float *into = reinterpret_cast(tensor.buffer()) + offset; + + *into = value; + }; + } } - else + else // operation != fc operation { - assert(layout == neurun::graph::operand::Layout::NCHW); - - ::nnfw::misc::feature::iterate(ker_shape) - << [&](uint32_t nth, uint32_t ch, uint32_t row, uint32_t col) { - const auto value = from.at(nth, ch, row, col); - - uint32_t offset = 0; - - // 'NCHW' ordering - offset += nth * ifm_shape.C * ifm_shape.H * ifm_shape.W; - offset += ch * ifm_shape.H * ifm_shape.W; - offset += row * ifm_shape.W; - offset += col; - - float *into = reinterpret_cast(tensor.buffer()) + offset; - - *into = value; - }; + auto matrix_shape = shape.asMatrix(); + + for (auto h = 0; h < matrix_shape.H; ++h) + { + neurun::util::feature::Coordinate4D coord{0, h, 0, 0}; + memcpy(tensor.buffer() + tensor.calcOffset(coord), base + h * matrix_shape.W, + matrix_shape.W * sizeof(float)); + } } - break; } case 4: -- 2.7.4