Add kernels implementation placeholder (#448)
author박세희/동작제어Lab(SR)/Principal Engineer/삼성전자 <saehie.park@samsung.com>
Fri, 6 Apr 2018 06:13:32 +0000 (15:13 +0900)
committer서상민/동작제어Lab(SR)/Senior Engineer/삼성전자 <sangmin7.seo@samsung.com>
Fri, 6 Apr 2018 06:13:32 +0000 (15:13 +0900)
* Add kernels implementation placeholder

This will add empty kernels placeholder source and initial cmakefiles and empty test source

Signed-off-by: SaeHie Park <saehie.park@samsung.com>
src/CMakeLists.txt
src/kernel/CMakeLists.txt [new file with mode: 0644]
src/kernel/acl/CMakeLists.txt [new file with mode: 0644]
src/kernel/acl/src/Conv2D_acl.cpp [new file with mode: 0644]
src/kernel/acl/src/Conv2D_acl.test.cpp [new file with mode: 0644]

index 561ec86..a570d75 100644 (file)
@@ -1,5 +1,6 @@
 if(BUILD_NN_RUNTIME)
   add_subdirectory(runtime)
+  add_subdirectory(kernel)
 endif(BUILD_NN_RUNTIME)
 add_subdirectory(util)
 add_subdirectory(support)
diff --git a/src/kernel/CMakeLists.txt b/src/kernel/CMakeLists.txt
new file mode 100644 (file)
index 0000000..debaa08
--- /dev/null
@@ -0,0 +1,3 @@
+if(${TARGET_ARCH_BASE} STREQUAL "arm")
+  add_subdirectory(acl)
+endif()
diff --git a/src/kernel/acl/CMakeLists.txt b/src/kernel/acl/CMakeLists.txt
new file mode 100644 (file)
index 0000000..6263b82
--- /dev/null
@@ -0,0 +1,59 @@
+set(LIB_KERNELACL kernelacl)
+set(LIB_KERNELACL_TEST kernelacl_test)
+
+# TODO remove this when default goes to c++14
+if(CMAKE_VERSION VERSION_LESS 3.1.0)
+  set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++14")
+else(CMAKE_VERSION VERSION_LESS 3.1.0)
+  set(CMAKE_CXX_STANDARD 14)
+endif(CMAKE_VERSION VERSION_LESS 3.1.0)
+
+# runtime information
+set(PATH_RUNTIME ${CMAKE_SOURCE_DIR}/src/runtime)
+set(PATH_RUNTIME_NN ${PATH_RUNTIME}/ref/nn)
+SET(RUNTIME_INCLUDES ${PATH_RUNTIME_NN}/common/include
+                     ${PATH_RUNTIME_NN}/runtime/include
+                     ${PATH_RUNTIME_NN}/depend/hal/include
+                     ${PATH_RUNTIME_NN}/depend/libhidl/base/include
+                     ${PATH_RUNTIME_NN}/depend/libcutils/include
+                     ${PATH_RUNTIME_NN}/depend/libutils/include
+                     ${PATH_RUNTIME_NN}/depend/android-base/include
+                     )
+
+# TODO move to top
+# gtest
+set(GTEST_LIBS libgtest.a libgtest_main.a pthread)
+
+# common
+link_directories(${CMAKE_INSTALL_PREFIX}/lib)
+
+# kernel library
+set(KERNELACL_SRCS "src/Conv2D_acl.cpp"
+                   )
+
+add_library(${LIB_KERNELACL} SHARED ${KERNELACL_SRCS})
+target_include_directories(${LIB_KERNELACL} PUBLIC
+                           ${NNFW_INCLUDE_DIR}
+                           ${RUNTIME_INCLUDES}
+                           ${NNFW_ACL_INCLUDES}
+                           )
+target_link_libraries(${LIB_KERNELACL} nnfw_util ${NNFW_ACL_LIBS})
+install(TARGETS ${LIB_KERNELACL} DESTINATION lib)
+
+# kernel test executable
+set(KERNELACL_TEST_SRCS "src/Conv2D_acl.test.cpp"
+                        )
+
+add_executable(${LIB_KERNELACL_TEST} ${KERNELACL_TEST_SRCS})
+target_include_directories(${LIB_KERNELACL_TEST} PUBLIC
+                           ${NNFW_INCLUDE_DIR}
+                           ${RUNTIME_INCLUDES}
+                           ${NNFW_ACL_INCLUDES}
+                           )
+add_dependencies(${LIB_KERNELACL_TEST} googletest)
+target_link_libraries(${LIB_KERNELACL_TEST}
+                      ${LIB_KERNELACL}
+                      nnfw_util ${NNFW_ACL_LIBS}
+                      ${GTEST_LIBS}
+                      )
+install(TARGETS ${LIB_KERNELACL_TEST} DESTINATION unittest)
diff --git a/src/kernel/acl/src/Conv2D_acl.cpp b/src/kernel/acl/src/Conv2D_acl.cpp
new file mode 100644 (file)
index 0000000..e5bde29
--- /dev/null
@@ -0,0 +1,23 @@
+#include "OperationsUtils.h"
+
+namespace nnfw {
+namespace kernel {
+namespace acl {
+
+bool convFloat32(const float* inputData, const android::nn::Shape& inputShape,
+                 const float* filterData, const android::nn::Shape& filterShape,
+                 const float* biasData, const android::nn::Shape& biasShape,
+                 int32_t padding_left, int32_t padding_right,
+                 int32_t padding_top, int32_t padding_bottom,
+                 int32_t stride_width, int32_t stride_height,
+                 int32_t activation,
+                 float* outputData, const android::nn::Shape& outputShape)
+{
+  // TODO implement this
+
+  return true;
+}
+
+} // namespace acl
+} // namespace kernel
+} // namespace nnfw
diff --git a/src/kernel/acl/src/Conv2D_acl.test.cpp b/src/kernel/acl/src/Conv2D_acl.test.cpp
new file mode 100644 (file)
index 0000000..2dd1d1d
--- /dev/null
@@ -0,0 +1,65 @@
+#include "gtest/gtest.h"
+#include "OperationsUtils.h"
+
+namespace nnfw {
+namespace kernel {
+namespace acl {
+
+bool convFloat32(const float* inputData, const android::nn::Shape& inputShape,
+                 const float* filterData, const android::nn::Shape& filterShape,
+                 const float* biasData, const android::nn::Shape& biasShape,
+                 int32_t padding_left, int32_t padding_right,
+                 int32_t padding_top, int32_t padding_bottom,
+                 int32_t stride_width, int32_t stride_height,
+                 int32_t activation,
+                 float* outputData, const android::nn::Shape& outputShape);
+
+} // namespace acl
+} // namespace kernel
+} // namespace nnfw
+
+static void initData(float* data, int num, float value)
+{
+  for (int i = 0; i < num; i++) {
+    *(data + i) = value;
+  }
+}
+
+using namespace nnfw::kernel::acl;
+
+TEST(KernelACL_TC, convFloat32_test)
+{
+  float inputData[9];
+  const android::nn::Shape inputShape = { OperandType::FLOAT32, {1,3,3,1}, 1.0, 0 };
+  float filterData[9];
+  const android::nn::Shape filterShape = { OperandType::FLOAT32, {1,3,3,1}, 1.0, 0 };
+  float biasData[1] = { 1.0 };
+  const android::nn::Shape biasShape = { OperandType::FLOAT32, {1,1,1,1}, 1.0, 0 };
+  int32_t padding_left = 0;
+  int32_t padding_right = 0;
+  int32_t padding_top = 0;
+  int32_t padding_bottom = 0;
+  int32_t stride_width = 1;
+  int32_t stride_height = 1;
+  int32_t activation = static_cast<int32_t>(FusedActivationFunc::RELU);
+  float* outputData = new float[9];
+  const android::nn::Shape outputShape = { OperandType::FLOAT32, {1,1,1,1}, 1.0, 0 };
+  bool bret;
+
+  initData(inputData, sizeof(inputData) / sizeof(inputData[0]), 1.0);
+  initData(filterData, sizeof(filterData) / sizeof(filterData[0]), 1.0);
+  initData(outputData, sizeof(outputData) / sizeof(outputData[0]), 0.0);
+
+  bret = convFloat32(inputData, inputShape,
+                     filterData, filterShape,
+                     biasData, biasShape,
+                     padding_left, padding_right,
+                     padding_top, padding_bottom,
+                     stride_width, stride_height,
+                     activation,
+                     outputData, outputShape);
+
+  // TODO compare outputData with some expected data
+
+  EXPECT_EQ(bret, true);
+}