Fix setting TensorInfo to wrong shape in FullyConnected. (#2496)
author장지섭/동작제어Lab(SR)/Engineer/삼성전자 <jiseob.jang@samsung.com>
Tue, 28 Aug 2018 05:55:34 +0000 (14:55 +0900)
committer이춘석/동작제어Lab(SR)/Staff Engineer/삼성전자 <chunseok.lee@samsung.com>
Tue, 28 Aug 2018 05:55:34 +0000 (14:55 +0900)
This commit fixes setting TensorInfo to wrong shape in FullyConnected.

Signed-off-by: jiseob.jang <jiseob.jang@samsung.com>
runtimes/pure_arm_compute/src/compilation.cc
runtimes/tests/neural_networks_test/runtime_run_android_nn_test.skip.armv7l-linux
runtimes/tests/neural_networks_test/runtime_run_android_nn_test.skip.armv7l-tizen

index 3b77ea7..181727d 100644 (file)
@@ -1975,165 +1975,23 @@ void Planner::visit(const ::internal::tflite::op::FullyConnected::Node &node)
 
   assert(_ctx.at(input_index).shape().rank() >= 2);
   assert(_ctx.at(output_index).shape().rank() == 2);
-
-  auto no_of_input_elements = 1;
-  for (size_t i = 0; i < _ctx.at(input_index).shape().rank(); i++)
-  {
-    no_of_input_elements *= _ctx.at(input_index).shape().dim(i);
-  }
-
-  const auto output_size = _ctx.at(output_index).shape().dim(1);
   assert(_ctx.at(weight_index).shape().rank() == 2);
-  const auto num_output = _ctx.at(weight_index).shape().dim(0);
-  const auto input_size = _ctx.at(weight_index).shape().dim(1);
-
-  int32_t C, N, H, W;
-  if (_ctx.at(input_index).shape().rank() == 2)
-  {
-    nnfw::util::matrix::Shape ifm_shape_matrix;
-    ifm_shape_matrix = _ctx.at(input_index).shape().asMatrix();
-    H = ifm_shape_matrix.H;
-    W = ifm_shape_matrix.W;
-    N = num_output;
-    C = 1;
-    _builder.addShapeConstr(input_index, asTensorInfo(ifm_shape_matrix, _ctx.at(input_index).type(),
-                                                      _ctx.at(input_index).scale(),
-                                                      _ctx.at(input_index).zeroPoint()));
-  }
-  else
-  {
-    nnfw::util::feature::Shape ifm_shape_feature;
-    ifm_shape_feature = _ctx.at(input_index).shape().asFeature();
-    H = ifm_shape_feature.H;
-    W = ifm_shape_feature.W;
-    N = num_output;
-    C = ifm_shape_feature.C;
-    assert(C * H * W == input_size);
-    _builder.addShapeConstr(
-        input_index, asTensorInfo(ifm_shape_feature, _ctx.at(input_index).type(),
-                                  _ctx.at(input_index).scale(), _ctx.at(input_index).zeroPoint()));
-  }
-
-  const auto batches = no_of_input_elements / input_size;
-  const auto bias_size = _ctx.at(bias_index).shape().asVector();
+  assert(_ctx.at(bias_index).shape().rank() == 1);
 
   // TODO Should move to the place where the operand is handled, if it is possible.
   // Set Shape Constraints
   _builder.addShapeConstr(
-      output_index, asTensorInfo(batches, num_output, _ctx.at(output_index).type(),
+      output_index, asTensorInfo(_ctx.at(output_index).shape(), _ctx.at(output_index).type(),
                                  _ctx.at(output_index).scale(), _ctx.at(output_index).zeroPoint()));
   _builder.addShapeConstr(
-      weight_index, asTensorInfo(num_output /*H*/, input_size /*W*/, _ctx.at(weight_index).type(),
+      input_index, asTensorInfo(_ctx.at(input_index).shape(), _ctx.at(input_index).type(),
+                                _ctx.at(input_index).scale(), _ctx.at(input_index).zeroPoint()));
+  _builder.addShapeConstr(
+      weight_index, asTensorInfo(_ctx.at(weight_index).shape(), _ctx.at(weight_index).type(),
                                  _ctx.at(weight_index).scale(), _ctx.at(weight_index).zeroPoint()));
-  _builder.addShapeConstr(bias_index, asTensorInfo(bias_size, _ctx.at(bias_index).type(),
-                                                   _ctx.at(bias_index).scale(),
-                                                   _ctx.at(bias_index).zeroPoint()));
-
-  // Set initializer for weight
-  // Workaround for https://github.sec.samsung.net/STAR/nnfw/issues/2319
-  if (_ctx.at(weight_index).hasData())
-  {
-    auto weight_base = _ctx.at(weight_index).data().base();
-    auto weight_size = _ctx.at(weight_index).data().size();
-    auto weight_type = _ctx.at(weight_index).type();
-
-    // TODO Should change to using initKernelTensor() if the method of calculating offset is not
-    // different from other kernel.
-    switch (weight_type)
-    {
-      case ANEURALNETWORKS_TENSOR_FLOAT32:
-      {
-        auto initializer = [num_output, N, C, H, W, weight_base,
-                            weight_size](::arm_compute::ITensor &tensor) {
-          const ::nnfw::util::kernel::Shape ker_shape{N, C, H, W};
-          const ::internal::nnapi::kernel::Reader<float> from{
-              ker_shape, reinterpret_cast<const float *>(weight_base), weight_size};
-          ::nnfw::util::kernel::iterate(ker_shape)
-              << [&](uint32_t nth, uint32_t ch, uint32_t row, uint32_t col) {
-                   const auto value = from.at(nth, ch, row, col);
-                   uint32_t offset = 0;
-
-                   // ARM Compute Library uses 'NCHW' ordering
-                   offset += nth * C * H * W;
-                   offset += ch * H * W;
-                   offset += row * W;
-                   offset += col;
-
-                   const ::arm_compute::Coordinates coordinate{offset};
-
-                   auto into = reinterpret_cast<float *>(tensor.ptr_to_element(coordinate));
-
-                   *into = value;
-                 };
-        };
-
-        _builder.addInitializer(weight_index, initializer);
-        break;
-      }
-      case ANEURALNETWORKS_TENSOR_QUANT8_ASYMM:
-      {
-        auto initializer = [num_output, N, C, H, W, weight_base,
-                            weight_size](::arm_compute::ITensor &tensor) {
-          const ::nnfw::util::kernel::Shape ker_shape{N, C, H, W};
-
-          const ::internal::nnapi::kernel::Reader<uint8_t> from{ker_shape, weight_base,
-                                                                weight_size};
-          ::nnfw::util::kernel::iterate(ker_shape)
-              << [&](uint32_t nth, uint32_t ch, uint32_t row, uint32_t col) {
-                   const auto value = from.at(nth, ch, row, col);
-                   uint32_t offset = 0;
-
-                   // ARM Compute Library uses 'NCHW' ordering
-                   offset += nth * C * H * W;
-                   offset += ch * H * W;
-                   offset += row * W;
-                   offset += col;
-
-                   const ::arm_compute::Coordinates coordinate{offset};
-
-                   auto into = reinterpret_cast<uint8_t *>(tensor.ptr_to_element(coordinate));
-
-                   *into = value;
-                 };
-        };
-
-        _builder.addInitializer(weight_index, initializer);
-        break;
-      }
-      default:
-      {
-        throw std::runtime_error("Not supported");
-      }
-    }
-  }
-
-  // Set initializer for bias
-  // Workaround for https://github.sec.samsung.net/STAR/nnfw/issues/2319
-  if (_ctx.at(bias_index).hasData())
-  {
-    auto bias_base = _ctx.at(bias_index).data().base();
-    auto bias_type = _ctx.at(bias_index).type();
-
-    switch (bias_type)
-    {
-      case ANEURALNETWORKS_TENSOR_FLOAT32:
-      {
-        auto initializer = std::bind(initVectorTensor<float>, _1, bias_base, bias_size);
-        _builder.addInitializer(bias_index, initializer);
-        break;
-      }
-      case ANEURALNETWORKS_TENSOR_INT32:
-      {
-        auto initializer = std::bind(initVectorTensor<int32_t>, _1, bias_base, bias_size);
-        _builder.addInitializer(bias_index, initializer);
-        break;
-      }
-      default:
-      {
-        throw std::runtime_error("Not supported");
-      }
-    }
-  }
+  _builder.addShapeConstr(
+      bias_index, asTensorInfo(_ctx.at(bias_index).shape(), _ctx.at(bias_index).type(),
+                               _ctx.at(bias_index).scale(), _ctx.at(bias_index).zeroPoint()));
 
   // Construct operation parameters
   struct Param
index 8b87825..a72c85e 100644 (file)
@@ -13,6 +13,7 @@ GeneratedTests.depth_to_space_quant8_2
 GeneratedTests.embedding_lookup
 GeneratedTests.fully_connected_float_4d_simple
 GeneratedTests.fully_connected_quant8_2
+GeneratedTests.fully_connected_float_1_nnfw
 GeneratedTests.hashtable_lookup_float
 GeneratedTests.hashtable_lookup_quant8
 GeneratedTests.l2_normalization
@@ -88,4 +89,3 @@ ValidationTestExecution.EventWait
 GeneratedTests.concat_float_1
 GeneratedTests.concat_float_2
 GeneratedTests.concat_quant8_2
-GeneratedTests.fully_connected_float_2
index 8b87825..a72c85e 100644 (file)
@@ -13,6 +13,7 @@ GeneratedTests.depth_to_space_quant8_2
 GeneratedTests.embedding_lookup
 GeneratedTests.fully_connected_float_4d_simple
 GeneratedTests.fully_connected_quant8_2
+GeneratedTests.fully_connected_float_1_nnfw
 GeneratedTests.hashtable_lookup_float
 GeneratedTests.hashtable_lookup_quant8
 GeneratedTests.l2_normalization
@@ -88,4 +89,3 @@ ValidationTestExecution.EventWait
 GeneratedTests.concat_float_1
 GeneratedTests.concat_float_2
 GeneratedTests.concat_quant8_2
-GeneratedTests.fully_connected_float_2