Remove legacy runtime dependency (#2025)
author이춘석/동작제어Lab(SR)/Staff Engineer/삼성전자 <chunseok.lee@samsung.com>
Thu, 19 Jul 2018 11:04:13 +0000 (20:04 +0900)
committerGitHub Enterprise <noreply-CODE@samsung.com>
Thu, 19 Jul 2018 11:04:13 +0000 (20:04 +0900)
This commit removes all the legacy runtime dependency from repo

Signed-off-by: Chunseok Lee <chunseok.lee@samsung.com>
12 files changed:
cmake/CfgOptionFlags.cmake
cmake/option/option_linux.cmake
libs/CMakeLists.txt
runtimes/CMakeLists.txt
runtimes/tests/CMakeLists.txt
runtimes/tests/bring_up_test/CMakeLists.txt [deleted file]
runtimes/tests/bring_up_test/add_main.cpp [deleted file]
runtimes/tests/bring_up_test/cplusplus_main.cpp [deleted file]
runtimes/tests/bring_up_test/simple_model.cpp [deleted file]
runtimes/tests/bring_up_test/simple_model.h [deleted file]
runtimes/tests/bring_up_test/simple_model_main.cpp [deleted file]
runtimes/tests/neural_networks_test/CMakeLists.txt

index c16a648..cef9440 100644 (file)
@@ -5,7 +5,6 @@ option(BUILD_ACL "Build ARM Compute Library" OFF)
 option(BUILD_PURE_ARM_COMPUTE "Build pure_arm_compute runtime" ON)
 option(BUILD_ACL_STATIC_LIB "Build ARM Comput Static Library" OFF)
 option(BUILD_BENCHMARK_ACL "Build ARM Compute Library Benchmarks" OFF)
-option(BUILD_NN_RUNTIME "Build NN Runtime" OFF)
 option(BUILD_NEURUN "Build neurun" OFF) #if implementation is done, it would replace nn runtime.
 option(BUILD_LABS "Build lab projects" ON)
 option(BUILD_ANDROID_NN_RUNTIME_TEST "Build Android NN Runtime Test" ON)
index ac89467..f668fad 100644 (file)
@@ -27,5 +27,3 @@ set(LIB_PTHREAD pthread)
 set(NNFW_INCLUDE_DIR ${CMAKE_SOURCE_DIR}/include)
 set(NNFW_EXTERNALS_DIR ${CMAKE_SOURCE_DIR}/externals)
 set(TFLITE_DEPEND_DIR ${NNFW_EXTERNALS_DIR}/tensorflow/tensorflow/contrib/lite/downloads)
-
-set(NNFW_NN_RUNTIME_ROOT ${CMAKE_SOURCE_DIR}/runtimes/nn)
index 8865a92..50335a7 100644 (file)
@@ -1,5 +1,2 @@
 add_subdirectory(util)
-if(BUILD_NN_RUNTIME)
-  add_subdirectory(kernel)
-endif(BUILD_NN_RUNTIME)
 add_subdirectory(support)
index 10235f0..3e1bdeb 100644 (file)
@@ -1,11 +1,5 @@
 set(BUILD_RUNTIME_TESTS OFF)
 
-if(BUILD_NN_RUNTIME)
-  set(LIB_RUNTIME runtime)
-  add_subdirectory(nn)
-  set(BUILD_RUNTIME_TESTS ON)
-endif(BUILD_NN_RUNTIME)
-
 if(BUILD_NEURUN)
   set(LIB_NEURUN neurun)
   add_subdirectory(neurun)
index 9b3d446..795c544 100644 (file)
@@ -2,9 +2,6 @@ set(NNRUNTIME_TEST_INC_COMMON ${NNFW_INCLUDE_DIR}
                               ${CMAKE_CURRENT_SOURCE_DIR}/include
 )
 
-if (BUILD_NN_RUNTIME)
-  add_subdirectory(bring_up_test)
-endif()
 if (BUILD_ANDROID_NN_RUNTIME_TEST)
   add_subdirectory(neural_networks_test)
 endif()
diff --git a/runtimes/tests/bring_up_test/CMakeLists.txt b/runtimes/tests/bring_up_test/CMakeLists.txt
deleted file mode 100644 (file)
index d9c1085..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-# Executable `runtime_run` (Dummy runner executable using NN API)
-SET (TEST_SRCS add_main.cpp)
-add_executable(runtime_run_add ${TEST_SRCS})
-target_include_directories(runtime_run_add PRIVATE ${NNRUNTIME_TEST_INC_COMMON})
-target_link_libraries(runtime_run_add ${LIB_RUNTIME})
-target_link_libraries(runtime_run_add ${LIB_PTHREAD})
-
-# Executable `runtime_run_cplusplus` (Dummy runner executable using C++ wrapper)
-SET (TEST_SRCS cplusplus_main.cpp)
-add_executable(runtime_run_cplusplus ${TEST_SRCS})
-target_include_directories(runtime_run_cplusplus PRIVATE ${NNRUNTIME_TEST_INC_COMMON})
-target_link_libraries(runtime_run_cplusplus ${LIB_RUNTIME})
-target_link_libraries(runtime_run_cplusplus ${LIB_PTHREAD})
-
-# Executable `runtime_sample_run` (Dummy runner executable for simple testing bring-up stage)
-SET (SAMPLE_SRCS simple_model_main.cpp
-                 simple_model.cpp)
-add_executable(runtime_run_simple_model ${SAMPLE_SRCS})
-target_include_directories(runtime_run_simple_model PRIVATE ${NNRUNTIME_TEST_INC_COMMON})
-target_include_directories(runtime_run_simple_model PRIVATE ${NNFW_NN_RUNTIME_ROOT}/depend/libcutils/include)
-target_link_libraries(runtime_run_simple_model ${LIB_RUNTIME})
-target_link_libraries(runtime_run_simple_model ${LIB_PTHREAD})
diff --git a/runtimes/tests/bring_up_test/add_main.cpp b/runtimes/tests/bring_up_test/add_main.cpp
deleted file mode 100644 (file)
index 32a1884..0000000
+++ /dev/null
@@ -1,117 +0,0 @@
-#include "NeuralNetworks.h"
-
-#include <iostream>
-
-template <typename T>
-void printValue(T value[][4])
-{
-  for(int y = 0; y < 3; y++)
-  {
-    std::cout << "[";
-    for(int x = 0; x < 4; x++)
-    {
-      std::cout << value[y][x] << " ";
-    }
-    std::cout << "] ";
-  }
-  std::cout << std::endl;
-}
-
-int main()
-{
-  // This is a simple test for bring-up stage
-  // TODO Remove this file when we have unit tests ready.
-
-  // Create model
-  ANeuralNetworksModel* model = NULL;
-  ANeuralNetworksModel_create(&model);
-
-  // Example : 3x4 ADD 3x4
-  // Define operand type
-  ANeuralNetworksOperandType tensor3x4Type;
-  tensor3x4Type.type = ANEURALNETWORKS_TENSOR_FLOAT32;
-  tensor3x4Type.scale = 0.f;    // These fields are useful for quantized tensors.
-  tensor3x4Type.zeroPoint = 0;  // These fields are useful for quantized tensors.
-  tensor3x4Type.dimensionCount = 2;
-  uint32_t dims[2] = {3, 4};
-  tensor3x4Type.dimensions = dims;
-
-  // Define Acivation
-  ANeuralNetworksOperandType activationType;
-  activationType.type = ANEURALNETWORKS_INT32;
-  activationType.scale = 0.f;
-  activationType.zeroPoint = 0;
-  activationType.dimensionCount = 0;
-  activationType.dimensions = NULL;
-
-  // Add operand
-  ANeuralNetworksModel_addOperand(model, &tensor3x4Type);  // operand 0
-  ANeuralNetworksModel_addOperand(model, &tensor3x4Type);  // operand 1
-  ANeuralNetworksModel_addOperand(model, &activationType); // operand 2, activation
-  
-  ANeuralNetworksModel_addOperand(model, &tensor3x4Type);  // operand 3, output
-
-  // Set operand value
-  float opValue[3][4] = {{2,3,4,5},{2,3,4,5},{2,3,4,5}};
-
-  ANeuralNetworksModel_setOperandValue(model, 1, &opValue, sizeof(opValue));
-  
-  // Set activation
-  int32_t nonValue = ANEURALNETWORKS_FUSED_NONE;
-  ANeuralNetworksModel_setOperandValue(model, 2, &nonValue, sizeof(nonValue));
-  
-  // Add operation
-  uint32_t addInputIndexes[3] = {1, 0, 2};
-  uint32_t addOutputIndexes[1] = {3};
-  ANeuralNetworksModel_addOperation(model, ANEURALNETWORKS_ADD, 3, addInputIndexes, 1, addOutputIndexes);
-
-  // Add input/output
-  uint32_t modelInputIndexes[1] = {0};
-  uint32_t modelOutputIndexes[1] = {3};
-  ANeuralNetworksModel_identifyInputsAndOutputs(model, 1, modelInputIndexes, 1, modelOutputIndexes);
-
-  // Finish model
-  ANeuralNetworksModel_finish(model);
-
-  // Compile model
-  ANeuralNetworksCompilation* compilation;
-  ANeuralNetworksCompilation_create(model, &compilation);
-
-  ANeuralNetworksCompilation_setPreference(compilation, ANEURALNETWORKS_PREFER_LOW_POWER);
-
-  ANeuralNetworksCompilation_finish(compilation);
-
-  // Execute model
-  ANeuralNetworksExecution* run = NULL;
-  ANeuralNetworksExecution_create(compilation, &run);
-
-  float myInput[3][4] = {{1,2,3,4},{1,2,3,4},{1,2,3,4}};
-  ANeuralNetworksExecution_setInput(run, 0, NULL, myInput, sizeof(myInput));
-
-  float myOutput[3][4];
-  ANeuralNetworksExecution_setOutput(run, 0, NULL, myOutput, sizeof(myOutput));
-
-  // Set event and run
-  ANeuralNetworksEvent* run_end = NULL;
-  ANeuralNetworksExecution_startCompute(run, &run_end);
-
-  ANeuralNetworksEvent_wait(run_end);
-  ANeuralNetworksEvent_free(run_end);
-  
-  // CleanUp
-  ANeuralNetworksExecution_free(run);
-  ANeuralNetworksCompilation_free(compilation);
-  ANeuralNetworksModel_free(model); 
-
-  // Print
-  std::cout << "my input :" << std::endl;
-  printValue(myInput);
-  
-  std::cout << "op value :" << std::endl;
-  printValue(opValue);
-
-  std::cout << "my output :" << std::endl;
-  printValue(myOutput);
-  
-  return 0;
-}
diff --git a/runtimes/tests/bring_up_test/cplusplus_main.cpp b/runtimes/tests/bring_up_test/cplusplus_main.cpp
deleted file mode 100644 (file)
index 8467ea1..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-#include "NeuralNetworksWrapper.h"
-
-using namespace nnfw::rt::wrapper;
-
-int main()
-{
-  // This is a simple test for bring-up stage
-  // TODO Remove this file when we have unit tests ready.
-
-  // Create model
-  Model model;
-
-  model.finish();
-
-  return 0;
-}
diff --git a/runtimes/tests/bring_up_test/simple_model.cpp b/runtimes/tests/bring_up_test/simple_model.cpp
deleted file mode 100644 (file)
index 86d34db..0000000
+++ /dev/null
@@ -1,469 +0,0 @@
-/**
- * Copyright 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include "simple_model.h"
-
-#include <cutils/ashmem.h>
-#include <sys/mman.h>
-#include <string>
-#include <unistd.h>
-#include <iostream>
-
-/**
- * SimpleModel Constructor.
- *
- * Initialize the member variables, including the shared memory objects.
- */
-SimpleModel::SimpleModel(size_t size, int protect, int fd, size_t offset) :
-        model_(nullptr),
-        compilation_(nullptr),
-        dimLength_(TENSOR_SIZE),
-        modelDataFd_(fd),
-        offset_(offset) {
-    tensorSize_ = dimLength_;
-    inputTensor1_.resize(tensorSize_);
-
-    // Create ANeuralNetworksMemory from a file containing the trained data.
-    int32_t status = ANeuralNetworksMemory_createFromFd(size + offset, protect, fd, 0,
-                                                        &memoryModel_);
-    if (status != ANEURALNETWORKS_NO_ERROR) {
-        std::cerr << "ANeuralNetworksMemory_createFromFd failed for trained weights" << std::endl;
-        return;
-    }
-
-    // Create ASharedMemory to hold the data for the second input tensor and output output tensor.
-    inputTensor2Fd_ = ashmem_create_region("input2", tensorSize_ * sizeof(float));
-    outputTensorFd_ = ashmem_create_region("output", tensorSize_ * sizeof(float));
-
-    // Create ANeuralNetworksMemory objects from the corresponding ASharedMemory objects.
-    status = ANeuralNetworksMemory_createFromFd(tensorSize_ * sizeof(float),
-                                                PROT_READ,
-                                                inputTensor2Fd_, 0,
-                                                &memoryInput2_);
-    if (status != ANEURALNETWORKS_NO_ERROR) {
-        std::cerr << "ANeuralNetworksMemory_createFromFd failed for Input2" << std::endl;
-        return;
-    }
-    status = ANeuralNetworksMemory_createFromFd(tensorSize_ * sizeof(float),
-                                                PROT_READ | PROT_WRITE,
-                                                outputTensorFd_, 0,
-                                                &memoryOutput_);
-    if (status != ANEURALNETWORKS_NO_ERROR) {
-        std::cerr << "ANeuralNetworksMemory_createFromFd failed for Output" << std::endl;
-        return;
-    }
-}
-
-/**
- * Create a graph that consists of three operations: two additions and a
- * multiplication.
- * The sums created by the additions are the inputs to the multiplication. In
- * essence, we are creating a graph that computes:
- *        (tensor0 + tensor1) * (tensor2 + tensor3).
- *
- * tensor0 ---+
- *            +--- ADD ---> intermediateOutput0 ---+
- * tensor1 ---+                                    |
- *                                                 +--- MUL---> output
- * tensor2 ---+                                    |
- *            +--- ADD ---> intermediateOutput1 ---+
- * tensor3 ---+
- *
- * Two of the four tensors, tensor0 and tensor2 being added are constants, defined in the
- * model. They represent the weights that would have been learned during a training process.
- *
- * The other two tensors, tensor1 and tensor3 will be inputs to the model. Their values will be
- * provided when we execute the model. These values can change from execution to execution.
- *
- * Besides the two input tensors, an optional fused activation function can
- * also be defined for ADD and MUL. In this example, we'll simply set it to NONE.
- *
- * The graph then has 10 operands:
- *  - 2 tensors that are inputs to the model. These are fed to the two
- *      ADD operations.
- *  - 2 constant tensors that are the other two inputs to the ADD operations.
- *  - 1 fuse activation operand reused for the ADD operations and the MUL operation.
- *  - 2 intermediate tensors, representing outputs of the ADD operations and inputs to the
- *      MUL operation.
- *  - 1 model output.
- *
- * @return true for success, false otherwise
- */
-bool SimpleModel::CreateCompiledModel() {
-    int32_t status;
-
-    // Create the ANeuralNetworksModel handle.
-    status = ANeuralNetworksModel_create(&model_);
-    if (status != ANEURALNETWORKS_NO_ERROR) {
-        std::cerr << "ANeuralNetworksModel_create failed" << std::endl;
-        return false;
-    }
-
-    uint32_t dimensions[] = {dimLength_};
-    ANeuralNetworksOperandType float32TensorType{
-            .type = ANEURALNETWORKS_TENSOR_FLOAT32,
-            .dimensionCount = sizeof(dimensions) / sizeof(dimensions[0]),
-            .dimensions = dimensions,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-    };
-    ANeuralNetworksOperandType scalarInt32Type{
-            .type = ANEURALNETWORKS_INT32,
-            .dimensionCount = 0,
-            .dimensions = nullptr,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-    };
-
-    /**
-     * Add operands and operations to construct the model.
-     *
-     * Operands are implicitly identified by the order in which they are added to the model,
-     * starting from 0.
-     *
-     * These indexes are not returned by the model_addOperand call. The application must
-     * manage these values. Here, we use opIdx to do the bookkeeping.
-     */
-    uint32_t opIdx = 0;
-
-    // We first add the operand for the NONE activation function, and set its
-    // value to ANEURALNETWORKS_FUSED_NONE.
-    // This constant scalar operand will be used for all 3 operations.
-    status = ANeuralNetworksModel_addOperand(model_, &scalarInt32Type);
-    uint32_t fusedActivationFuncNone = opIdx++;
-    if (status != ANEURALNETWORKS_NO_ERROR) {
-        std::cerr << "ANeuralNetworksModel_addOperand failed for operand "<< fusedActivationFuncNone << std::endl;
-        return false;
-    }
-
-    FuseCode fusedActivationCodeValue = ANEURALNETWORKS_FUSED_NONE;
-    status = ANeuralNetworksModel_setOperandValue(
-            model_, fusedActivationFuncNone, &fusedActivationCodeValue,
-            sizeof(fusedActivationCodeValue));
-    if (status != ANEURALNETWORKS_NO_ERROR) {
-        std::cerr << "ANeuralNetworksModel_setOperandValue failed for operand " << fusedActivationFuncNone << std::endl;
-        return false;
-    }
-
-    // Add operands for the tensors.
-    status = ANeuralNetworksModel_addOperand(model_, &float32TensorType);
-    uint32_t tensor0 = opIdx++;
-    if (status != ANEURALNETWORKS_NO_ERROR) {
-        std::cerr << "ANeuralNetworksModel_addOperand failed for operand " << tensor0 << std::endl;
-        return false;
-    }
-    // tensor0 is a constant tensor that was established during training.
-    // We read these values from the corresponding ANeuralNetworksMemory object.
-    status = ANeuralNetworksModel_setOperandValueFromMemory(model_,
-                                                            tensor0,
-                                                            memoryModel_,
-                                                            offset_,
-                                                            tensorSize_ * sizeof(float));
-    if (status != ANEURALNETWORKS_NO_ERROR) {
-        std::cerr << "ANeuralNetworksModel_setOperandValueFromMemory failed for operand " << tensor0 << std::endl;
-        return false;
-    }
-
-    // tensor1 is one of the user provided input tensors to the trained model.
-    // Its value is determined pre-execution.
-    status = ANeuralNetworksModel_addOperand(model_, &float32TensorType);
-    uint32_t tensor1 = opIdx++;
-    if (status != ANEURALNETWORKS_NO_ERROR) {
-        std::cerr << "ANeuralNetworksModel_addOperand failed for operand " << tensor1 << std::endl;
-        return false;
-    }
-
-    // tensor2 is a constant tensor that was established during training.
-    // We read these values from the corresponding ANeuralNetworksMemory object.
-    status = ANeuralNetworksModel_addOperand(model_, &float32TensorType);
-    uint32_t tensor2 = opIdx++;
-    if (status != ANEURALNETWORKS_NO_ERROR) {
-        std::cerr << "ANeuralNetworksModel_addOperand failed for operand " << tensor2 << std::endl;
-        return false;
-    }
-    status = ANeuralNetworksModel_setOperandValueFromMemory(
-            model_, tensor2, memoryModel_, offset_ + tensorSize_ * sizeof(float),
-            tensorSize_ * sizeof(float));
-    if (status != ANEURALNETWORKS_NO_ERROR) {
-        std::cerr << "ANeuralNetworksModel_setOperandValueFromMemory failed for operand " << tensor2 << std::endl;
-        return false;
-    }
-
-    // tensor3 is one of the user provided input tensors to the trained model.
-    // Its value is determined pre-execution.
-    status = ANeuralNetworksModel_addOperand(model_, &float32TensorType);
-    uint32_t tensor3 = opIdx++;
-    if (status != ANEURALNETWORKS_NO_ERROR) {
-        std::cerr << "ANeuralNetworksModel_addOperand failed for operand " << tensor3 << std::endl;
-        return false;
-    }
-
-    // intermediateOutput0 is the output of the first ADD operation.
-    // Its value is computed during execution.
-    status = ANeuralNetworksModel_addOperand(model_, &float32TensorType);
-    uint32_t intermediateOutput0 = opIdx++;
-    if (status != ANEURALNETWORKS_NO_ERROR) {
-        std::cerr << "ANeuralNetworksModel_addOperand failed for operand " << intermediateOutput0 << std::endl;
-        return false;
-    }
-
-    // intermediateOutput1 is the output of the second ADD operation.
-    // Its value is computed during execution.
-    status = ANeuralNetworksModel_addOperand(model_, &float32TensorType);
-    uint32_t intermediateOutput1 = opIdx++;
-    if (status != ANEURALNETWORKS_NO_ERROR) {
-        std::cerr << "ANeuralNetworksModel_addOperand failed for operand " << intermediateOutput1 << std::endl;
-        return false;
-    }
-
-    // multiplierOutput is the output of the MUL operation.
-    // Its value will be computed during execution.
-    status = ANeuralNetworksModel_addOperand(model_, &float32TensorType);
-    uint32_t multiplierOutput = opIdx++;
-    if (status != ANEURALNETWORKS_NO_ERROR) {
-        std::cerr << "ANeuralNetworksModel_addOperand failed for operand " << multiplierOutput << std::endl;
-        return false;
-    }
-
-    // Add the first ADD operation.
-    std::vector<uint32_t> add1InputOperands = {
-            tensor0,
-            tensor1,
-            fusedActivationFuncNone,
-    };
-    status = ANeuralNetworksModel_addOperation(model_, ANEURALNETWORKS_ADD,
-                                               add1InputOperands.size(), add1InputOperands.data(),
-                                               1, &intermediateOutput0);
-    if (status != ANEURALNETWORKS_NO_ERROR) {
-        std::cerr << "ANeuralNetworksModel_addOperation failed for ADD_1" << std::endl;
-        return false;
-    }
-
-    // Add the second ADD operation.
-    // Note the fusedActivationFuncNone is used again.
-    std::vector<uint32_t> add2InputOperands = {
-            tensor2,
-            tensor3,
-            fusedActivationFuncNone,
-    };
-    status = ANeuralNetworksModel_addOperation(model_, ANEURALNETWORKS_ADD,
-                                               add2InputOperands.size(),add2InputOperands.data(),
-                                               1, &intermediateOutput1);
-    if (status != ANEURALNETWORKS_NO_ERROR) {
-        std::cerr << "ANeuralNetworksModel_addOperation failed for ADD_2" << std::endl;
-        return false;
-    }
-
-    // Add the MUL operation.
-    // Note that intermediateOutput0 and intermediateOutput1 are specified
-    // as inputs to the operation.
-    std::vector<uint32_t> mulInputOperands = {
-            intermediateOutput0,
-            intermediateOutput1,
-            fusedActivationFuncNone};
-    status = ANeuralNetworksModel_addOperation(model_, ANEURALNETWORKS_MUL,
-                                               mulInputOperands.size(),mulInputOperands.data(),
-                                               1, &multiplierOutput);
-    if (status != ANEURALNETWORKS_NO_ERROR) {
-        std::cerr << "ANeuralNetworksModel_addOperation failed for MUL" << std::endl;
-        return false;
-    }
-
-    // Identify the input and output tensors to the model.
-    // Inputs: {tensor1, tensor3}
-    // Outputs: {multiplierOutput}
-    std::vector<uint32_t> modelInputOperands = {
-            tensor1, tensor3,
-    };
-    status = ANeuralNetworksModel_identifyInputsAndOutputs(model_,
-                                                           modelInputOperands.size(),
-                                                           modelInputOperands.data(),
-                                                           1,
-                                                           &multiplierOutput);
-    if (status != ANEURALNETWORKS_NO_ERROR) {
-        std::cerr << "ANeuralNetworksModel_identifyInputsAndOutputs failed" << std::endl;
-        return false;
-    }
-
-    // Finish constructing the model.
-    // The values of constant and intermediate operands cannot be altered after
-    // the finish function is called.
-    status = ANeuralNetworksModel_finish(model_);
-    if (status != ANEURALNETWORKS_NO_ERROR) {
-        std::cerr << "ANeuralNetworksModel_finish failed" << std::endl;
-        return false;
-    }
-
-    // Create the ANeuralNetworksCompilation object for the constructed model.
-    status = ANeuralNetworksCompilation_create(model_, &compilation_);
-    if (status != ANEURALNETWORKS_NO_ERROR) {
-        std::cerr << "ANeuralNetworksCompilation_create failed" << std::endl;
-        return false;
-    }
-
-    // Set the preference for the compilation, so that the runtime and drivers
-    // can make better decisions.
-    // Here we prefer to get the answer quickly, so we choose
-    // ANEURALNETWORKS_PREFER_FAST_SINGLE_ANSWER.
-    status = ANeuralNetworksCompilation_setPreference(compilation_,
-                                                      ANEURALNETWORKS_PREFER_FAST_SINGLE_ANSWER);
-    if (status != ANEURALNETWORKS_NO_ERROR) {
-        std::cerr << "ANeuralNetworksCompilation_setPreference failed" << std::endl;
-        return false;
-    }
-
-    // Finish the compilation.
-    status = ANeuralNetworksCompilation_finish(compilation_);
-    if (status != ANEURALNETWORKS_NO_ERROR) {
-        std::cerr << "ANeuralNetworksCompilation_finish failed" << std::endl;
-        return false;
-    }
-
-    return true;
-}
-
-/**
- * Compute with the given input data.
- * @param modelInputs:
- *    inputValue1:   The values to fill tensor1
- *    inputValue2:   The values to fill tensor3
- * @return  computed result, or 0.0f if there is error.
- */
-bool SimpleModel::Compute(float inputValue1, float inputValue2,
-                          float *result) {
-    if (!result) {
-        return false;
-    }
-
-    // Create an ANeuralNetworksExecution object from the compiled model.
-    // Note:
-    //   1. All the input and output data are tied to the ANeuralNetworksExecution object.
-    //   2. Multiple concurrent execution instances could be created from the same compiled model.
-    // This sample only uses one execution of the compiled model.
-    ANeuralNetworksExecution *execution;
-    int32_t status = ANeuralNetworksExecution_create(compilation_, &execution);
-    if (status != ANEURALNETWORKS_NO_ERROR) {
-        std::cerr << "ANeuralNetworksExecution_create failed" << std::endl;
-        return false;
-    }
-
-    // Set all the elements of the first input tensor (tensor1) to the same value as inputValue1.
-    // It's not a realistic example but it shows how to pass a small tensor
-    // to an execution.
-    std::fill(inputTensor1_.data(), inputTensor1_.data() + tensorSize_,
-              inputValue1);
-
-    // Tell the execution to associate inputTensor1 to the first of the two model inputs.
-    // Note that the index "0" here means the first operand of the modelInput list
-    // {tensor1, tensor3}, which means tensor1.
-    status = ANeuralNetworksExecution_setInput(execution, 0, nullptr,
-                                               inputTensor1_.data(),
-                                               tensorSize_ * sizeof(float));
-    if (status != ANEURALNETWORKS_NO_ERROR) {
-        std::cerr << "ANeuralNetworksExecution_setInput failed for input1" << std::endl;
-        return false;
-    }
-
-    // Set the values of the the second input operand (tensor3) to be inputValue2.
-    // In reality, the values in the shared memory region will be manipulated by
-    // other modules or processes.
-    float *inputTensor2Ptr = reinterpret_cast<float *>(mmap(nullptr, tensorSize_ * sizeof(float),
-                                                            PROT_READ | PROT_WRITE, MAP_SHARED,
-                                                            inputTensor2Fd_, 0));
-    for (int i = 0; i < tensorSize_; i++) {
-        *inputTensor2Ptr = inputValue2;
-        inputTensor2Ptr++;
-    }
-    munmap(inputTensor2Ptr, tensorSize_ * sizeof(float));
-
-    // ANeuralNetworksExecution_setInputFromMemory associates the operand with a shared memory
-    // region to minimize the number of copies of raw data.
-    // Note that the index "1" here means the second operand of the modelInput list
-    // {tensor1, tensor3}, which means tensor3.
-    status = ANeuralNetworksExecution_setInputFromMemory(execution, 1, nullptr,
-                                                         memoryInput2_, 0,
-                                                         tensorSize_ * sizeof(float));
-    if (status != ANEURALNETWORKS_NO_ERROR) {
-        std::cerr << "ANeuralNetworksExecution_setInputFromMemory failed for input2" << std::endl;
-        return false;
-    }
-
-    // Set the output tensor that will be filled by executing the model.
-    // We use shared memory here to minimize the copies needed for getting the output data.
-    status = ANeuralNetworksExecution_setOutputFromMemory(execution, 0, nullptr,
-                                                          memoryOutput_, 0,
-                                                          tensorSize_ * sizeof(float));
-    if (status != ANEURALNETWORKS_NO_ERROR) {
-        std::cerr << "ANeuralNetworksExecution_setOutputFromMemory failed for output" << std::endl;
-        return false;
-    }
-
-    // Start the execution of the model.
-    // Note that the execution here is asynchronous, and an ANeuralNetworksEvent object will be
-    // created to monitor the status of the execution.
-    ANeuralNetworksEvent *event = nullptr;
-    status = ANeuralNetworksExecution_startCompute(execution, &event);
-    if (status != ANEURALNETWORKS_NO_ERROR) {
-        std::cerr << "ANeuralNetworksExecution_startCompute failed" << std::endl;
-        return false;
-    }
-
-    // Wait until the completion of the execution. This could be done on a different
-    // thread. By waiting immediately, we effectively make this a synchronous call.
-    status = ANeuralNetworksEvent_wait(event);
-    if (status != ANEURALNETWORKS_NO_ERROR) {
-        std::cerr << "ANeuralNetworksEvent_wait failed" << std::endl;
-        return false;
-    }
-
-    ANeuralNetworksEvent_free(event);
-    ANeuralNetworksExecution_free(execution);
-
-    // Validate the results.
-    const float goldenRef = (inputValue1 + 0.5f) * (inputValue2 + 0.5f);
-    float *outputTensorPtr = reinterpret_cast<float *>(mmap(nullptr,
-                                                            tensorSize_ * sizeof(float),
-                                                            PROT_READ, MAP_SHARED,
-                                                            outputTensorFd_, 0));
-    for (int32_t idx = 0; idx < tensorSize_; idx++) {
-        float delta = outputTensorPtr[idx] - goldenRef;
-        delta = (delta < 0.0f) ? (-delta) : delta;
-        if (delta > FLOAT_EPISILON) {
-            std::cerr << "Output computation Error: output" << idx  << "("  << outputTensorPtr[idx]
-                      << "), delta(" << delta
-                      << ") @ idx(" << idx
-                      << ")" << std::endl;
-        }
-    }
-    *result = outputTensorPtr[0];
-    munmap(outputTensorPtr, tensorSize_ * sizeof(float));
-    return result;
-}
-
-/**
- * SimpleModel Destructor.
- *
- * Release NN API objects and close the file descriptors.
- */
-SimpleModel::~SimpleModel() {
-    ANeuralNetworksCompilation_free(compilation_);
-    ANeuralNetworksModel_free(model_);
-    ANeuralNetworksMemory_free(memoryModel_);
-    ANeuralNetworksMemory_free(memoryInput2_);
-    ANeuralNetworksMemory_free(memoryOutput_);
-    close(inputTensor2Fd_);
-    close(outputTensorFd_);
-    close(modelDataFd_);
-}
diff --git a/runtimes/tests/bring_up_test/simple_model.h b/runtimes/tests/bring_up_test/simple_model.h
deleted file mode 100644 (file)
index bc61f59..0000000
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- * Copyright 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef NNAPI_SIMPLE_MODEL_H
-#define NNAPI_SIMPLE_MODEL_H
-
-#include "NeuralNetworks.h"
-#include <vector>
-
-#define FLOAT_EPISILON (1e-6)
-#define TENSOR_SIZE 200
-
-/**
- * SimpleModel
- * Build up the hardcoded graph of
- *   ADD_1 ---+
- *            +--- MUL--->output result
- *   ADD_2 ---+
- *
- *   Operands are all 2-D TENSOR_FLOAT32 of:
- *       dimLength x dimLength
- *   with NO fused_activation operation
- *
- */
-class SimpleModel {
-public:
-    explicit SimpleModel(size_t size, int protect, int fd, size_t offset);
-    ~SimpleModel();
-
-    bool CreateCompiledModel();
-    bool Compute(float inputValue1, float inputValue2, float *result);
-
-private:
-    ANeuralNetworksModel *model_;
-    ANeuralNetworksCompilation *compilation_;
-    ANeuralNetworksMemory *memoryModel_;
-    ANeuralNetworksMemory *memoryInput2_;
-    ANeuralNetworksMemory *memoryOutput_;
-
-    uint32_t dimLength_;
-    uint32_t tensorSize_;
-    size_t offset_;
-
-    std::vector<float> inputTensor1_;
-    int modelDataFd_;
-    int inputTensor2Fd_;
-    int outputTensorFd_;
-};
-
-#endif  // NNAPI_SIMPLE_MODEL_H
diff --git a/runtimes/tests/bring_up_test/simple_model_main.cpp b/runtimes/tests/bring_up_test/simple_model_main.cpp
deleted file mode 100644 (file)
index edb6f0f..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-#include <string>
-#include <fcntl.h>
-#include <sys/mman.h>
-#include <unistd.h>
-#include <iostream>
-
-#include "simple_model.h"
-
-int main()
-{
-    // This is a simple test for bring-up stage
-    // TODO Remove this file when we have unit tests ready.
-
-    const char *fname = "model_data.bin";
-    float input1 = 1.0f;
-    float input2 = 2.0f;
-    int fd = open(fname, O_RDONLY);
-    off_t offset = 0;
-    off_t length = lseek(fd, 0, SEEK_END);
-
-    SimpleModel* nn_model = new SimpleModel(length, PROT_READ, fd, offset);
-    if (!nn_model->CreateCompiledModel()) {
-        std::cerr << "Failed to prepare the model." << std::endl;
-        return 1;
-    }
-
-    float result = 0.0f;
-    nn_model->Compute(input1, input2, &result);
-
-    std::cout << "result : " << result << std::endl;
-
-    delete(nn_model);
-
-    return 0;
-}
index 6494503..caa1f18 100644 (file)
@@ -24,9 +24,7 @@ target_include_directories(${RUNTIME_ANDROID_NN_TEST} PRIVATE
                               ${NNRUNTIME_TEST_INC_COMMON}
                               ${CMAKE_CURRENT_SOURCE_DIR}
                               ${CMAKE_CURRENT_SOURCE_DIR}/include)
-if (BUILD_NN_RUNTIME)
-  target_link_libraries(${RUNTIME_ANDROID_NN_TEST} ${LIB_RUNTIME})
-elseif (BUILD_NEURUN)
+if (BUILD_NEURUN)
   target_link_libraries(${RUNTIME_ANDROID_NN_TEST} ${LIB_NEURUN})
 elseif (BUILD_PURE_ARM_COMPUTE)
   target_link_libraries(${RUNTIME_ANDROID_NN_TEST} ${LIB_PUREACL_RUNTIME})