Remove remaining ExecutionPlan codes in ExecutionBuilder (#723)
author김수진/동작제어Lab(SR)/Engineer/삼성전자 <sjsujin.kim@samsung.com>
Tue, 17 Apr 2018 01:25:49 +0000 (10:25 +0900)
committer서상민/동작제어Lab(SR)/Senior Engineer/삼성전자 <sangmin7.seo@samsung.com>
Tue, 17 Apr 2018 01:25:49 +0000 (10:25 +0900)
This commit removes remaining ExecutionPlan codes in ExecutionBuilder.

Signed-off-by: sjsujinkim <sjsujin.kim@samsung.com>
src/runtime/ref/nn/runtime/ExecutionBuilder.cpp
src/runtime/ref/nn/runtime/ExecutionBuilder.h

index 9e0e04a..7edd5b0 100644 (file)
@@ -89,9 +89,6 @@ int ModelArgumentInfo::updateDimensionInfo(const Operand& operand,
 
 ExecutionBuilder::ExecutionBuilder(const CompilationBuilder* compilation) :
         mModel(compilation->mModel),
-#if 0 // REF-ANN
-        mPlan(&compilation->mPlan),
-#endif
         mInputs(mModel->inputCount()),
         mOutputs(mModel->outputCount()) {
     VLOG(EXECUTION) << "ExecutionBuilder::ExecutionBuilder";
@@ -181,48 +178,11 @@ int ExecutionBuilder::setOutputFromMemory(uint32_t index, const ANeuralNetworksO
 int ExecutionBuilder::startCompute(sp<ExecutionCallback>* synchronizationCallback) {
     // Run on the CPU.
     VLOG(EXECUTION) << "ExecutionBuilder::startCompute (without plan) on CPU";
-#if 0 // REF-ANN
-    StepExecutor executor(this, mModel,
-                          nullptr /* no IDevice, so CPU */,
-                          nullptr /* no IPreparedModel */);
-#endif // REF-ANN
     StepExecutor executor(this, mModel);
     executor.mapInputsAndOutputsTrivially();
     return executor.startCompute(synchronizationCallback);
 }
 
-// TODO-NNRT: Consider removing StepExecutor completely if it's not necessary
-#if 0 // REF-ANN
-// Figures out how to place each of the input or outputs in a buffer. This just does the layout,
-// it does not copy data.  Aligns each input a bit.
-int StepExecutor::allocatePointerArgumentsToPool(std::vector<ModelArgumentInfo>* args,
-                                                 Memory* memory) {
-    uint32_t nextPoolIndex = mMemories.size();
-    int64_t total = 0;
-    for (auto& info : *args) {
-        if (info.state == ModelArgumentInfo::POINTER) {
-            DataLocation& loc = info.locationAndLength;
-            // TODO Good enough alignment?
-            total += alignBytesNeeded(static_cast<uint32_t>(total), loc.length);
-            loc.poolIndex = nextPoolIndex;
-            loc.offset = static_cast<uint32_t>(total);
-            total += loc.length;
-        }
-    };
-    if (total > 0xFFFFFFFF) {
-        LOG(ERROR) << "ANeuralNetworksExecution_startCompute Size of all inputs or outputs exceeds "
-                      "2^32.";
-        return ANEURALNETWORKS_BAD_DATA;
-    }
-    hidl_memory hidlMemory;
-    if (total > 0) {
-        memory->create(total);  // TODO check error
-        mMemories.add(memory);
-    }
-    return ANEURALNETWORKS_NO_ERROR;
-}
-#endif // REF-ANN
-
 static void setRequestArgumentArray(const std::vector<ModelArgumentInfo>& argumentInfos,
                                      hidl_vec<RequestArgument>* ioInfos) {
     size_t count = argumentInfos.size();
@@ -245,29 +205,7 @@ void StepExecutor::mapInputsAndOutputsTrivially() {
     mOutputs = mExecutionBuilder->mOutputs;
     mMemories = mExecutionBuilder->mMemories;
 }
-#if 0 // REF-ANN
-void StepExecutor::mapInputOrOutput(const ModelArgumentInfo& builderInputOrOutput,
-                                    ModelArgumentInfo* executorInputOrOutput) {
-    *executorInputOrOutput = builderInputOrOutput;
-    switch (executorInputOrOutput->state) {
-        default:
-            nnAssert(!"unexpected ModelArgumentInfo::state");
-        case ModelArgumentInfo::POINTER:
-        case ModelArgumentInfo::UNSPECIFIED:
-            break;
-        case ModelArgumentInfo::MEMORY: {
-            const uint32_t builderPoolIndex =
-                    builderInputOrOutput.locationAndLength.poolIndex;
-            const Memory* memory = mExecutionBuilder->mMemories[builderPoolIndex];
-            const uint32_t executorPoolIndex = mMemories.add(memory);
-            executorInputOrOutput->locationAndLength.poolIndex =
-                    executorPoolIndex;
-            break;
-        }
-    }
-}
 
-#endif // REF-ANN
 int StepExecutor::startCompute(sp<ExecutionCallback>* synchronizationCallback) {
     // Run on CPU only
     return startComputeOnCpu(synchronizationCallback);
index 9e2f05a..aee7425 100644 (file)
@@ -33,10 +33,6 @@ using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallba
 namespace android {
 namespace nn {
 
-#if 0 // REF-ANN
-class CompilationBuilder;
-class ExecutionPlan;
-#endif
 class Memory;
 class ModelBuilder;
 class StepExecutor;
@@ -86,9 +82,6 @@ public:
 
 private:
     const ModelBuilder* mModel;
-#if 0 // REF-ANN : ExecutionPlan is not yet considered.
-    const ExecutionPlan* mPlan;
-#endif // REF-ANN
     // The information we'll send to the driver about the inputs and outputs.
     // Note that we build this in two steps:
     // 1. As the arguments are specified, set the corresponding mInputs or mOutputs element.
@@ -121,47 +114,19 @@ public:
     // in the case where we have a single-"step" execution (i.e., the executor
     // is executing the entire model from the ExecutionBuilder).
     void mapInputsAndOutputsTrivially();
-#if 0 // REF-ANN
-    // Map inputs and outputs from ExecutionBuilder to StepExecutor,
-    // one at a time.  Note that these are input/output indexes, not
-    // operand indexes.
-    void mapInput(uint32_t builderIndex, uint32_t executorIndex) {
-        mapInputOrOutput(mExecutionBuilder->mInputs[builderIndex],
-                         &mInputs[executorIndex]);
-    }
-    void mapOutput(uint32_t builderIndex, uint32_t executorIndex) {
-        mapInputOrOutput(mExecutionBuilder->mOutputs[builderIndex],
-                         &mOutputs[executorIndex]);
-    }
-
-#endif // REF-ANN
     // Executes using the (driver, preparedModel) specified at construction time.
     int startCompute(sp<ExecutionCallback>* synchronizationCallback);
 
     // Executes using the CPU, regardless of the (driver,
     // preparedModel) specified at construction time.
     int startComputeOnCpu(sp<ExecutionCallback>* synchronizationCallback);
-#if 0 // REF-ANN
-    bool isCpu() const { return mDriver == nullptr; }
 
 private:
-    int allocatePointerArgumentsToPool(std::vector<ModelArgumentInfo>* args, Memory* memory);
-
-    void mapInputOrOutput(const ModelArgumentInfo& builderInputOrOutput,
-                          ModelArgumentInfo* executorInputOrOutput);
-
-    // describes the full (possibly multiple-"step") execution
-#endif // REF-ANN
-private:
     const ExecutionBuilder* mExecutionBuilder;
 
     // model to be executed on the executor, in both original and
     // compiled forms; and device on which to execute it
     const ModelBuilder* mModel;
-#if 0 // REF-ANN
-    sp<IDevice> mDriver;                // nullptr if CPU execution
-    sp<IPreparedModel> mPreparedModel;  // nullptr if CPU execution or if bypassing ExecutionPlan
-#endif
 
     // The information we'll send to the driver about the inputs and outputs.
     // Note that we build this in two steps: