The system tests framework for the ACL soft backend has been created. The test cases for the operations used in the Mobilenet and the Inception-v3 operations have been added.
Signed-off-by: Timur Ablyazimov <t.ablyazimov@samsung.com>
add_subdirectory(import)
add_subdirectory(soft_backend)
add_subdirectory(interpreter)
+add_subdirectory(acl_soft_backend)
--- /dev/null
+#include "gtest/gtest.h"
+#include <sstream>
+#include <thread>
+#include <cmath>
+#include <memory>
+#include <H5Cpp.h>
+#include <stdlib.h>
+#include "BuildInfo.h"
+
+using namespace std;
+
+static string netAddr(getenv("ODROID_NET_ADDR") ? getenv("ODROID_NET_ADDR") : "");
+
+static unique_ptr<char[]> readTensorDataFromHdf5File(const string& file_name, vector<int>& shape) {
+ try {
+ H5::H5File h5File(file_name, H5F_ACC_RDONLY);
+ auto tensor_name = h5File.getObjnameByIdx(0);
+ auto dataset = h5File.openDataSet(tensor_name);
+ auto dataspace = dataset.getSpace();
+ auto rank = dataspace.getSimpleExtentNdims();
+
+ if (rank < 2)
+ return nullptr;
+
+ hsize_t dims[rank];
+
+ if (dataspace.getSimpleExtentDims(dims) != rank)
+ return nullptr;
+
+ int size = 1;
+
+ for (int i = 0; i < rank; ++i) {
+ size *= dims[i];
+ shape.push_back(dims[i]);
+ }
+
+ auto result = unique_ptr<char[]>(new char[size * sizeof(float)]);
+ dataset.read(&result[0], H5::PredType::NATIVE_FLOAT);
+ return result;
+ } catch (H5::FileIException&) {
+ return nullptr;
+ }
+}
+
+// TODO: this function was copied from CPPOperations.cpp, move it to a shared place.
+bool areFloatsNear(float a, float b, int32_t ulp, float eps) {
+ assert(ulp < (1 << 23) && "this algorithm is not applicable for such large diffs");
+ assert(eps >= 0 && "epsilon should be positive number");
+ if (fabs(a - b) <= eps)
+ return true;
+ // since this point need to dind difference between numbers
+ // in terms of ULP
+ int32_t ai;
+ int32_t bi;
+ memcpy(&ai, &a, sizeof(float));
+ memcpy(&bi, &b, sizeof(float));
+ // compare mantissa of numbers
+ if (ai > bi)
+ return ai - bi <= ulp;
+ return bi - ai <= ulp;
+}
+
+static void compareHdf5Files(const string& file_name1, const string& file_name2) {
+ vector<int> shape1;
+ auto tensor1 = readTensorDataFromHdf5File(file_name1, shape1);
+ float* tensorData1 = reinterpret_cast<float*>(&tensor1[0]);
+ ASSERT_NE(tensorData1, nullptr);
+ vector<int> shape2;
+ auto tensor2 = readTensorDataFromHdf5File(file_name2, shape2);
+ float* tensorData2 = reinterpret_cast<float*>(&tensor2[0]);
+ ASSERT_NE(tensorData2, nullptr);
+ ASSERT_EQ(shape1.size(), shape2.size());
+ int size = 1;
+
+ for (int i = 0; i < shape1.size(); ++i) {
+ ASSERT_EQ(shape1[i], shape2[i]);
+ size *= shape1[i];
+ }
+
+ for (int i = 0; i < size; ++i) {
+ ASSERT_TRUE(areFloatsNear(tensorData1[i], tensorData2[i], 32, 1e-6));
+ }
+}
+
+static string genTmpDirName() {
+ string result("/tmp/nnc_test_");
+ stringstream ss;
+ ss << this_thread::get_id();
+ result += ss.str();
+
+ return result;
+}
+
+static bool runOnOdroid(const string& remote_cmd) {
+ string cmd = "ssh " + netAddr + " \"" + remote_cmd + "\"";
+ return system(cmd.c_str()) == 0;
+}
+
+static bool copyToOdroid(const string& src, const string& dst) {
+ string cmd("scp -q " + src + " " + netAddr + ":" + dst);
+ return system(cmd.c_str()) == 0;
+}
+
+static bool copyFromOdroid(const string& src, const string& dst) {
+ string cmd("scp -q " + netAddr + ":" + src + " " + dst);
+ return system(cmd.c_str()) == 0;
+}
+
+static void runAclSystemTest(const string& name) {
+ // Ensure the Odroid device net address was set.
+ ASSERT_TRUE(!netAddr.empty());
+
+ // The name of the temporary directory which is generated on the remote device.
+ string dir_name = genTmpDirName();
+
+ // Insure there is no such the directory on the remote device.
+ ASSERT_TRUE(runOnOdroid("rm -rf " + dir_name));
+
+ // Create the temporary directory on the remote device.
+ ASSERT_TRUE(runOnOdroid("mkdir " + dir_name));
+
+ // Copy the executable artifact file to the remote device.
+ ASSERT_TRUE(copyToOdroid(binDir + "/" + name + "/nnc_test", dir_name));
+
+ // Copy the artifact parameter file to the remote device.
+ ASSERT_TRUE(copyToOdroid(binDir + "/" + name + "/AclArtifact.par", dir_name));
+
+ // Copy the model input HDF5 file to the remote device.
+ ASSERT_TRUE(copyToOdroid(binDir + "/" + name + "/in_" + name + "_caffe.hdf5",
+ dir_name + "/in.hdf5"));
+
+ // Switch to the artifact directory on the remote device and run the artifact.
+ ASSERT_TRUE(runOnOdroid("cd " + dir_name + "; ./nnc_test"));
+
+ // Copy the resulting file from the remote device to the host.
+ ASSERT_TRUE(copyFromOdroid(dir_name + "/out.hdf5", binDir + "/" + name));
+
+ // Remove the temporary test case directory from the remote device.
+ ASSERT_TRUE(runOnOdroid("rm -rf " + dir_name));
+
+ // Compare the resulting HDF5 file with the reference one.
+ compareHdf5Files(binDir + "/" + name + "/ref.hdf5", binDir + "/" + name + "/out.hdf5");
+}
+
+TEST(acl_cpp_operations_test, convolution) {
+ runAclSystemTest("convolution");
+}
+
+TEST(acl_cpp_operations_test, depthwise_convolution) {
+ runAclSystemTest("depthwise_convolution");
+}
+
+TEST(acl_cpp_operations_test, convolution_with_bias) {
+ runAclSystemTest("convolution_with_bias");
+}
+
+TEST(acl_cpp_operations_test, scale) {
+ runAclSystemTest("scale");
+}
+
+TEST(acl_cpp_operations_test, relu) {
+ runAclSystemTest("relu");
+}
+
+TEST(acl_cpp_operations_test, pooling_max) {
+ runAclSystemTest("pooling_max");
+}
+
+TEST(acl_cpp_operations_test, pooling_avg) {
+ runAclSystemTest("pooling_avg");
+}
+
+TEST(acl_cpp_operations_test, concatenate) {
+ runAclSystemTest("concatenate");
+}
+
+TEST(acl_cpp_operations_test, reshape) {
+ runAclSystemTest("reshape");
+}
+
+TEST(acl_cpp_operations_test, fully_connected) {
+ runAclSystemTest("fully_connected");
+}
--- /dev/null
+#ifndef _NNC_BUILD_INFO_H_IN_H_
+#define _NNC_BUILD_INFO_H_IN_H_
+
+static std::string binDir = "${CMAKE_CURRENT_BINARY_DIR}";
+
+#endif //_NNC_BUILD_INFO_H_IN_H_
--- /dev/null
+function(acl_warn MESSAGE)
+ message(WARNING "The ACL backend system tests will not be built: ${MESSAGE}")
+endfunction(acl_warn)
+
+if(NOT TARGET caffegen)
+ acl_warn("the CAFFEGEN was not built.")
+ return()
+endif(NOT TARGET caffegen)
+
+if(NOT TARGET nni)
+ acl_warn("the NNKIT was not built.")
+ return()
+endif(NOT TARGET nni)
+
+# Path to the folder where the Odroid root folder is either mounted or copied.
+if(NOT DEFINED ENV{ODROID_MIRROR_DIR})
+ acl_warn("the ODROID_MIRROR_DIR environment variable was not defined.\n\
+ As a cross-build is done, this variable should point to a directory\n\
+ which is either mounted to (with e.g. SSHFS) or contains a copy of the target\n\
+ (e.g. Odroid XU4) device file system.")
+ return()
+endif()
+
+# Path to the ACL library root on the Odroid device.
+if(NOT DEFINED ENV{ODROID_ACL_DIR})
+ acl_warn("the ODROID_ACL_DIR environment variable was not defined.\n\
+ As a cross-build is done, this variable should contain the path to the root directory\n\
+ of the Arm Compute Library on the target (e.g. Odroid XU4) device.")
+ return()
+endif()
+
+# Path to the HDF5 library on the Odroid.
+# It must most likely be: /usr/lib/arm-linux-gnueabihf/hdf5/serial
+if(NOT DEFINED ENV{ODROID_H5_DIR})
+ acl_warn("the ODROID_H5_DIR environment variable was not defined.\n\
+ As a cross-build is done, this variable should contain the path to the root directory\n\
+ of the HDF5 library on the target (e.g. Odroid XU4) device.\n\
+ Often it would be the: /usr/lib/arm-linux-gnueabihf/hdf5/serial directory.")
+ return()
+endif()
+
+find_package(HDF5 COMPONENTS CXX REQUIRED)
+
+# Provide the test suite with the information where to locate executalbes to run etc.
+configure_file(BuildInfo.h.in BuildInfo.h)
+
+add_nncc_test(nnc_acl_soft_backend_system_test AclCppOperations.cpp)
+target_include_directories(nnc_acl_soft_backend_system_test PRIVATE ${CMAKE_CURRENT_BINARY_DIR}
+ ${HDF5_INCLUDE_DIRS})
+target_link_libraries(nnc_acl_soft_backend_system_test ${HDF5_CXX_LIBRARIES})
+
+file(GLOB MODELS RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} "models/*.prototxt")
+
+# Loop over all the existing system test models.
+foreach(MODEL IN ITEMS ${MODELS})
+ # Set the model-related variables used inside the loop.
+ set(MODEL_FILE ${CMAKE_CURRENT_SOURCE_DIR}/${MODEL})
+ get_filename_component(MODEL_NAME ${MODEL} NAME_WE)
+ set(MODEL_DIR ${CMAKE_CURRENT_BINARY_DIR}/${MODEL_NAME})
+ set(COPIED_MODEL_FILE ${MODEL_DIR}/${MODEL_NAME}.prototxt)
+ set(INITIALIZED_MODEL_FILE ${MODEL_DIR}/${MODEL_NAME}.prototxt.weights)
+ set(ENCODED_MODEL_FILE ${MODEL_DIR}/${MODEL_NAME}.caffemodel)
+ set(INPUT_FILE ${MODEL_DIR}/in_${MODEL_NAME}_caffe.hdf5)
+ set(REFERENCE_OUTPUT ${MODEL_DIR}/ref.hdf5)
+ set(MODEL_DIR_TARGET nnc_acl_soft_backend_${MODEL_NAME}_dir)
+ set(COPIED_MODEL_TARGET nnc_acl_soft_backend_${MODEL_NAME}_prototxt)
+ set(INITIALIZED_MODEL_TARGET nnc_acl_soft_backend_${MODEL_NAME}_weights)
+ set(ENCODED_MODEL_TARGET nnc_acl_soft_backend_${MODEL_NAME}_caffemodel)
+ set(INPUT_TARGET nnc_acl_soft_backend_${MODEL_NAME}_input)
+ set(REFERENCE_OUTPUT_TARGET nnc_acl_soft_backend_${MODEL_NAME}_reference_output)
+ set(MAIN_FILE_TARGET nnc_acl_soft_backend_${MODEL_NAME}_main)
+ set(ARTIFACT_TARGET_CMAKE nnc_acl_soft_backend_${MODEL_NAME}_cmake)
+ set(ARTIFACT_TARGET_TOOLCHAIN nnc_acl_soft_backend_${MODEL_NAME}_toolchain)
+ set(ARTIFACT_TARGET nnc_acl_soft_backend_${MODEL_NAME}_artifact)
+ set(ARTIFACT_TARGET_MAKE nnc_acl_soft_backend_${MODEL_NAME}_artifact_make)
+ set(ARTIFACT_TARGET_BINARY nnc_acl_soft_backend_${MODEL_NAME}_artifact_binary)
+
+ # Create a directrory for handling the model in the binary directory.
+ add_custom_target(${MODEL_DIR_TARGET} ALL ${CMAKE_COMMAND} -E make_directory ${MODEL_DIR})
+
+ # Copy the model prototxt to the model build directory.
+ add_custom_target(${COPIED_MODEL_TARGET} ALL ${CMAKE_COMMAND} -E copy ${MODEL_FILE} ${COPIED_MODEL_FILE})
+
+ # Copy the artifact main and project files into the model subfolder inside the binary directory.
+ add_custom_target(${MAIN_FILE_TARGET} ALL ${CMAKE_COMMAND}
+ -E copy ${CMAKE_CURRENT_SOURCE_DIR}/artifact_cmake/main.cpp ${MODEL_DIR}/main.cpp)
+
+ # Copy the artifact CMakeLists.txt to the artifact build directory.
+ add_custom_target(${ARTIFACT_TARGET_CMAKE} ALL ${CMAKE_COMMAND}
+ -E copy ${CMAKE_CURRENT_SOURCE_DIR}/artifact_cmake/CMakeLists.txt ${MODEL_DIR}/CMakeLists.txt)
+
+ # Copy the artifact toolchain file to the artifact build directory.
+ add_custom_target(${ARTIFACT_TARGET_TOOLCHAIN} ALL ${CMAKE_COMMAND}
+ -E copy ${CMAKE_CURRENT_SOURCE_DIR}/artifact_cmake/odroid.cmake ${MODEL_DIR}/odroid.cmake)
+
+ # Initialize the model with weights.
+ add_custom_target(${INITIALIZED_MODEL_TARGET} ALL cat ${COPIED_MODEL_FILE} | GLOG_minloglevel=2
+ $<TARGET_FILE:caffegen> init > ${INITIALIZED_MODEL_FILE}
+ DEPENDS ${COPIED_MODEL_TARGET})
+
+ # Encode the model.
+ add_custom_target(${ENCODED_MODEL_TARGET} ALL cat ${INITIALIZED_MODEL_FILE} | GLOG_minloglevel=2
+ $<TARGET_FILE:caffegen> encode > ${ENCODED_MODEL_FILE}
+ DEPENDS ${INITIALIZED_MODEL_TARGET})
+
+ add_custom_target(${INPUT_TARGET} ALL $<TARGET_FILE:tensor_gen> data ${MODEL_NAME} 1 3 4 8
+ WORKING_DIRECTORY ${MODEL_DIR})
+
+ message("INPUT_FILE = ${INPUT_FILE}")
+
+ # Generate the 'reference' output with NNKIT.
+ add_custom_target(${REFERENCE_OUTPUT_TARGET} ALL $<TARGET_FILE:nni>
+ --backend $<TARGET_FILE:nnkit_caffe_backend> --backend-arg ${INITIALIZED_MODEL_FILE}
+ --pre $<TARGET_FILE:nnkit_HDF5_import_action> --pre-arg ${INPUT_FILE}
+ --post $<TARGET_FILE:nnkit_HDF5_export_action> --post-arg ${REFERENCE_OUTPUT}
+ DEPENDS $<TARGET_FILE:nni> ${INPUT_TARGET} ${INITIALIZED_MODEL_TARGET})
+
+ # Generate an artifact from the model.
+ add_custom_target(${ARTIFACT_TARGET} ALL $<TARGET_FILE:nnc>
+ --caffe -m ${ENCODED_MODEL_FILE} -o AclArtifact -d ${MODEL_DIR} --target=arm-gpu-c++
+ DEPENDS $<TARGET_FILE:nnc> ${ENCODED_MODEL_FILE})
+
+ # Generate a Makefile for the artifact cross-building.
+ add_custom_target(${ARTIFACT_TARGET_MAKE} ALL cmake .
+ -DCMAKE_TOOLCHAIN_FILE=odroid.cmake
+ DEPENDS ${ARTIFACT_TARGET_CMAKE} ${ARTIFACT_TARGET_TOOLCHAIN} ${ARTIFACT_TARGET}
+ WORKING_DIRECTORY ${MODEL_DIR})
+
+ # Cross-build the artifact with the generated Makefile.
+ add_custom_target(${ARTIFACT_TARGET_BINARY} ALL make
+ DEPENDS ${ARTIFACT_TARGET_MAKE}
+ WORKING_DIRECTORY ${MODEL_DIR})
+endforeach(MODEL)
--- /dev/null
+cmake_minimum_required(VERSION 3.5)
+project(nnc_test)
+
+set(CMAKE_CXX_STANDARD 11)
+
+set(ODROID_MIRROR_DIR $ENV{ODROID_MIRROR_DIR})
+set(ODROID_ACL_DIR $ENV{ODROID_ACL_DIR})
+set(ODROID_ACL_INC_DIR ${ODROID_ACL_DIR}/include)
+set(ODROID_ACL_BUILD_DIR ${ODROID_ACL_DIR}/build)
+
+find_library(OPEN_CL OpenCL /usr/lib/arm-linux-gnueabihf)
+find_library(ARM_COMPUTE arm_compute PATHS ${ODROID_ACL_BUILD_DIR})
+find_library(ARM_COMPUTE_CORE arm_compute_core PATHS ${ODROID_ACL_BUILD_DIR})
+find_package(HDF5 COMPONENTS CXX REQUIRED)
+
+add_executable(nnc_test main.cpp AclArtifact.cpp)
+
+target_include_directories(nnc_test PRIVATE . ${ODROID_MIRROR_DIR}${ODROID_ACL_INC_DIR}
+ ${ODROID_MIRROR_DIR}${ODROID_ACL_DIR} ${ODROID_MIRROR_DIR}${HDF5_INCLUDE_DIRS})
+target_link_libraries(nnc_test ${ARM_COMPUTE} ${ARM_COMPUTE_CORE} ${OPEN_CL} ${HDF5_CXX_LIBRARIES})
+target_compile_definitions(nnc_test PRIVATE ARM_COMPUTE_CL)
--- /dev/null
+#include "AclArtifact.h"
+#include <iostream>
+#include <memory>
+#include <H5Cpp.h>
+
+using namespace std;
+using namespace arm_compute;
+
+static unique_ptr<char[]> getTensorData(CLTensor& tensor) {
+ auto buf = unique_ptr<char[]>(new char[tensor.info()->total_size()]);
+ tensor.map();
+ Window window;
+ window.use_tensor_dimensions(tensor.info()->tensor_shape());
+ Iterator i(&tensor, window);
+ char* ptr = &buf[0];
+
+ execute_window_loop(window, [&i, &ptr](const Coordinates&) {
+ memcpy(ptr, i.ptr(), sizeof(float));
+ ptr += sizeof(float);
+ }, i);
+
+ tensor.unmap();
+ return buf;
+}
+
+static void readTensor(CLTensor& tensor, H5::DataSet& dataset) {
+ auto buf = unique_ptr<char[]>(new char[tensor.info()->total_size()]);
+ dataset.read(&buf[0], H5::PredType::NATIVE_FLOAT);
+ tensor.map();
+ Window window;
+ window.use_tensor_dimensions(tensor.info()->tensor_shape());
+ Iterator i(&tensor, window);
+ char* ptr = &buf[0];
+
+ execute_window_loop(window, [&i, &ptr](const Coordinates&) {
+ memcpy(i.ptr(), ptr, sizeof(float));
+ ptr += sizeof(float);
+ }, i);
+
+ tensor.unmap();
+}
+
+static bool readTensorFromHDF5File(CLTensor& tensor, const string& file_name) {
+ // Read from the .hdf5 file
+ try {
+ H5::H5File h5File(file_name, H5F_ACC_RDONLY);
+ auto tensor_name = h5File.getObjnameByIdx(0);
+ auto dataset = h5File.openDataSet(tensor_name);
+ auto dataspace = dataset.getSpace();
+ auto rank = dataspace.getSimpleExtentNdims();
+
+ if (rank < 2)
+ return false;
+
+ hsize_t dims[rank];
+
+ if (dataspace.getSimpleExtentDims(dims) != rank)
+ return false;
+
+ TensorShape shape;
+ shape.set_num_dimensions(rank - 1);
+
+ for (int i = 1; i < rank; ++i)
+ shape[rank - i - 1] = dims[i];
+
+ readTensor(tensor, dataset);
+ } catch (H5::FileIException&) {
+ return false;
+ }
+
+ return true;
+}
+
+static void writeTensorToHDF5File(CLTensor& tensor, const string& tensor_name,
+ const string& file_name) {
+ const TensorShape& orig_shape = tensor.info()->tensor_shape();
+ const TensorShape& transposed_shape = orig_shape;
+ int rank = transposed_shape.num_dimensions();
+ hsize_t dims[rank + 1];
+ dims[0] = 1;
+
+ for (int i = 0; i < rank; ++i)
+ dims[rank - i] = transposed_shape[i];
+
+ // Write to the .hdf5 file
+ H5::H5File h5File(file_name, H5F_ACC_TRUNC);
+ H5::DataSpace dataspace(rank + 1, dims);
+ auto dataset = h5File.createDataSet(tensor_name, H5::PredType::IEEE_F32BE, dataspace);
+ dataset.write(&getTensorData(tensor)[0], H5::PredType::NATIVE_FLOAT);
+}
+
+int main(int argc, char* argv[]) {
+ CLScheduler::get().default_init();
+
+ if (!CLScheduler::get().is_initialised()) {
+ cout << "Failed to initialise the ACL scheduler" << endl;
+ return 1;
+ }
+
+ AclArtifact artifact;
+ CLTensor& artifact_in = artifact.getInput();
+ readTensorFromHDF5File(artifact_in, "in.hdf5");
+
+ artifact.Inference();
+
+ CLTensor& artifact_out = artifact.getOutput();
+ writeTensorToHDF5File(artifact_out, "out", "out.hdf5");
+
+ return 0;
+}
--- /dev/null
+set(CMAKE_SYSTEM_NAME Linux)
+set(CMAKE_SYSTEM_PROCESSOR arm)
+
+set(CMAKE_C_COMPILER /usr/bin/arm-linux-gnueabihf-gcc)
+set(CMAKE_CXX_COMPILER /usr/bin/arm-linux-gnueabihf-g++)
+
+set(ODROID_MIRROR_DIR $ENV{ODROID_MIRROR_DIR})
+set(ODROID_H5_DIR $ENV{ODROID_H5_DIR})
+
+set(CMAKE_FIND_ROOT_PATH ${ODROID_MIRROR_DIR}${ODROID_H5_DIR})
+set(CMAKE_SYSROOT ${ODROID_MIRROR_DIR})
+set(CMAKE_PREFIX_PATH ${ODROID_MIRROR_DIR})
+
+set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
+set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
+set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
+set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY)
--- /dev/null
+name: "CONCANET"
+layer {
+ name: "input"
+ type: "Input"
+ top: "data"
+ input_param {
+ shape {
+ dim: 1
+ dim: 3
+ dim: 4
+ dim: 8
+ }
+ }
+}
+layer {
+ name: "inception_a1_output"
+ type: "Concat"
+ bottom: "data"
+ bottom: "data"
+ bottom: "data"
+ top: "inception_a1_output"
+ phase: TEST
+}
--- /dev/null
+name: "CONVONET"
+layer {
+ name: "input"
+ type: "Input"
+ top: "data"
+ input_param {
+ shape {
+ dim: 1
+ dim: 3
+ dim: 4
+ dim: 8
+ }
+ }
+}
+layer {
+ name: "conv1"
+ type: "Convolution"
+ bottom: "data"
+ top: "conv1"
+ param {
+ lr_mult: 1
+ decay_mult: 1
+ }
+ convolution_param {
+ num_output: 3
+ bias_term: false
+ pad: 1
+ kernel_size: 3
+ stride: 2
+ weight_filler {
+ type: "msra"
+ }
+ }
+}
--- /dev/null
+name: "CONVONET"
+layer {
+ name: "input"
+ type: "Input"
+ top: "data"
+ input_param {
+ shape {
+ dim: 1
+ dim: 3
+ dim: 4
+ dim: 8
+ }
+ }
+}
+layer {
+ name: "conv1"
+ type: "Convolution"
+ bottom: "data"
+ top: "conv1"
+ param {
+ lr_mult: 1
+ decay_mult: 1
+ }
+ convolution_param {
+ num_output: 3
+ bias_term: true
+ pad: 1
+ kernel_size: 3
+ stride: 2
+ weight_filler {
+ type: "msra"
+ }
+ bias_filler {
+ type: "constant"
+ value: 3.14
+ }
+ }
+}
--- /dev/null
+name: "DEPTHCONVNET"
+layer {
+ name: "input"
+ type: "Input"
+ top: "data"
+ input_param {
+ shape {
+ dim: 1
+ dim: 3
+ dim: 4
+ dim: 8
+ }
+ }
+}
+layer {
+ name: "conv2_1/dw"
+ type: "Convolution"
+ bottom: "data"
+ top: "conv2_1/dw"
+ param {
+ lr_mult: 1
+ decay_mult: 1
+ }
+ convolution_param {
+ num_output: 3
+ bias_term: false
+ pad: 1
+ kernel_size: 3
+ group: 3
+ stride: 1
+ weight_filler {
+ type: "msra"
+ }
+ }
+}
--- /dev/null
+name: "FULLYCONNET"
+layer {
+ name: "input"
+ type: "Input"
+ top: "data"
+ input_param {
+ shape {
+ dim: 1
+ dim: 3
+ dim: 4
+ dim: 8
+ }
+ }
+}
+layer {
+ name: "fc8"
+ type: "InnerProduct"
+ param { lr_mult: 1 decay_mult: 1 }
+ inner_product_param {
+ num_output: 10
+ weight_filler {
+ type: "gaussian"
+ std: 0.01
+ }
+ bias_term: false
+ }
+ bottom: "data"
+ top: "fc8"
+}
--- /dev/null
+name: "POOLINGAVGNET"
+layer {
+ name: "input"
+ type: "Input"
+ top: "data"
+ input_param {
+ shape {
+ dim: 1
+ dim: 3
+ dim: 4
+ dim: 8
+ }
+ }
+}
+layer {
+ name: "inception_a1_pool"
+ type: "Pooling"
+ bottom: "data"
+ top: "inception_a1_pool"
+ pooling_param {
+ pool: AVE
+ kernel_size: 3
+ stride: 1
+ pad: 1
+ }
+}
--- /dev/null
+name: "POOLINGMAXNET"
+layer {
+ name: "input"
+ type: "Input"
+ top: "data"
+ input_param {
+ shape {
+ dim: 1
+ dim: 3
+ dim: 4
+ dim: 8
+ }
+ }
+}
+layer {
+ name: "inception_a1_pool"
+ type: "Pooling"
+ bottom: "data"
+ top: "inception_a1_pool"
+ pooling_param {
+ pool: MAX
+ kernel_size: 3
+ stride: 1
+ pad: 1
+ }
+}
--- /dev/null
+name: "RELUNET"
+layer {
+ name: "input"
+ type: "Input"
+ top: "data"
+ input_param {
+ shape {
+ dim: 1
+ dim: 3
+ dim: 4
+ dim: 8
+ }
+ }
+}
+layer {
+ name: "relu1"
+ type: "ReLU"
+ bottom: "data"
+ top: "relu1"
+}
--- /dev/null
+name: "RESHAPENET"
+layer {
+ name: "input"
+ type: "Input"
+ top: "data"
+ input_param {
+ shape {
+ dim: 1
+ dim: 3
+ dim: 4
+ dim: 8
+ }
+ }
+}
+layer {
+ name: "reshape"
+ type: "Reshape"
+ bottom: "data"
+ top: "output"
+ reshape_param {
+ shape {
+ dim: 1
+ dim: 96
+ }
+ }
+}
--- /dev/null
+name: "SCALENET"
+layer {
+ name: "input"
+ type: "Input"
+ top: "data"
+ input_param {
+ shape {
+ dim: 1
+ dim: 3
+ dim: 4
+ dim: 8
+ }
+ }
+}
+layer {
+ name: "scale1"
+ type: "Scale"
+ bottom: "data"
+ top: "scale1"
+ param {
+ lr_mult: 1
+ decay_mult: 0
+ }
+ scale_param {
+ filler {
+ value: 2.71
+ }
+ bias_term: false
+ }
+}