Implement ANeuralNetworksModel_create
authorsjsujinkim <sjsujin.kim@samsung.com>
Wed, 21 Mar 2018 07:42:44 +0000 (16:42 +0900)
committer최형규/동작제어Lab(SR)/Senior Engineer/삼성전자 <hk0110.choi@samsung.com>
Wed, 21 Mar 2018 11:55:23 +0000 (20:55 +0900)
This commit implements ANeuralNetworksModel_create and adds skeletons of ModelBuilder.

Signed-off-by: sjsujinkim <sjsujin.kim@samsung.com>
src/runtime/ref/nn/runtime/ModelBuilder.cpp [new file with mode: 0644]
src/runtime/ref/nn/runtime/ModelBuilder.h [new file with mode: 0644]
src/runtime/ref/nn/runtime/NeuralNetworks.cpp

diff --git a/src/runtime/ref/nn/runtime/ModelBuilder.cpp b/src/runtime/ref/nn/runtime/ModelBuilder.cpp
new file mode 100644 (file)
index 0000000..3794b26
--- /dev/null
@@ -0,0 +1,402 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include "ModelBuilder.h"
+
+#if 0
+#define LOG_TAG "ModelBuilder"
+
+#include "CompilationBuilder.h"
+#include "Utils.h"
+
+#include <map>
+#include <utility>
+#endif
+namespace android {
+namespace nn {
+
+// The maximum number of operands and operations that a model may have.
+#if 0
+const uint32_t MAX_NUMBER_OF_OPERANDS = 0xFFFFFFFE;
+const uint32_t MAX_NUMBER_OF_OPERATIONS = 0xFFFFFFFE;
+#endif
+
+int ModelBuilder::addOperand(const ANeuralNetworksOperandType& type) {
+    // Dummy Implementation
+    return 0;
+
+#if 0
+    if (mCompletedModel) {
+        LOG(ERROR) << "ANeuralNetworksModel_addOperand can't modify after model finished";
+        return ANEURALNETWORKS_BAD_DATA;
+    }
+    int n = validateOperandType(type, "ANeuralNetworksModel_addOperand", true);
+    if (n != ANEURALNETWORKS_NO_ERROR) {
+        return n;
+    }
+    size_t idx = mOperands.size();
+    if (idx >= MAX_NUMBER_OF_OPERANDS) {
+        LOG(ERROR) << "ANeuralNetworksModel_addOperand exceed max operands";
+        return ANEURALNETWORKS_BAD_DATA;
+    }
+    mOperands.resize(idx + 1);
+    auto& operand = mOperands[idx];
+    operand.type = static_cast<OperandType>(type.type);
+    setFromIntList(&operand.dimensions, type.dimensionCount, type.dimensions);
+    operand.numberOfConsumers = 0;
+    operand.scale = type.scale;
+    operand.zeroPoint = type.zeroPoint;
+    operand.lifetime = OperandLifeTime::TEMPORARY_VARIABLE;
+    operand.location = {.poolIndex = 0, .offset = 0, .length = 0};
+    return ANEURALNETWORKS_NO_ERROR;
+#endif
+}
+
+int ModelBuilder::setOperandValue(uint32_t index, const void* buffer, size_t length) {
+    // Dummy Implementation
+    return 0;
+#if 0
+    VLOG(MODEL) << __func__ << " for operand " << index << " size " << length;
+    if (index >= operandCount()) {
+        LOG(ERROR) << "ANeuralNetworksModel_setOperandValue setting operand " << index << " of "
+                   << operandCount();
+        return ANEURALNETWORKS_BAD_DATA;
+    }
+    Operand& operand = mOperands[index];
+    if (buffer == nullptr) {
+        if (length) {
+            LOG(ERROR) << "ANeuralNetworksModel_setOperandValue buffer is nullptr but length is "
+                          "not 0";
+            return ANEURALNETWORKS_BAD_DATA;
+        }
+        operand.lifetime = OperandLifeTime::NO_VALUE;
+        // The location is unused and is set to zeros.
+        operand.location = {.poolIndex = 0,
+                            .offset = 0,
+                            .length = 0};
+    } else {
+        if (length > 0xFFFFFFFF) {
+            LOG(ERROR) << "ANeuralNetworksModel_setOperandValue value length of " << length
+                       << " exceeds max size";
+            return ANEURALNETWORKS_BAD_DATA;
+        }
+        uint32_t valueLength = static_cast<uint32_t>(length);
+        uint32_t neededLength = sizeOfData(operand.type, operand.dimensions);
+        if (neededLength != valueLength) {
+            LOG(ERROR) << "ANeuralNetworksModel_setOperandValue setting " << valueLength
+                       << " bytes when needing " << neededLength;
+            return ANEURALNETWORKS_BAD_DATA;
+        }
+        if (valueLength <= ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES) {
+            uint32_t existingSize = static_cast<uint32_t>(mSmallOperandValues.size());
+            uint32_t extraBytes = alignBytesNeeded(existingSize, valueLength);
+            mSmallOperandValues.resize(existingSize + extraBytes + valueLength);
+            operand.lifetime = OperandLifeTime::CONSTANT_COPY;
+            operand.location = {
+                .poolIndex = 0, .offset = existingSize + extraBytes, .length = neededLength};
+            memcpy(&mSmallOperandValues[operand.location.offset], buffer, valueLength);
+            VLOG(MODEL) << "Copied small value to offset " << operand.location.offset;
+        } else {
+            VLOG(MODEL) << "Saving large value";
+            operand.lifetime = OperandLifeTime::CONSTANT_REFERENCE;
+            // The values for poolIndex and offset will be set when the model is finished.
+            operand.location = {.poolIndex = 0, .offset = 0, .length = valueLength};
+            // We keep track of the buffers. We'll allocate the shared memory only
+            // once we know the total size, to avoid needless copies.
+            mLargeOperandValues.push_back(LargeValue{.operandIndex = index, .buffer = buffer});
+        }
+    }
+    return ANEURALNETWORKS_NO_ERROR;
+#endif
+}
+
+int ModelBuilder::copyLargeValuesToSharedMemory() {
+    // Dummy Implementation
+    return 0;
+#if 0
+    VLOG(MODEL) << __func__ << " has " << mLargeOperandValues.size() << " values.";
+    if (!mLargeOperandValues.empty()) {
+        // Calculate the size of the shared memory needed for all the large values.
+        // Also sets the offset for each value within the memory.
+        size_t poolSize = 0;
+        for (LargeValue& l: mLargeOperandValues) {
+            Operand& operand = mOperands[l.operandIndex];
+            nnAssert(operand.lifetime == OperandLifeTime::CONSTANT_REFERENCE);
+            poolSize += alignBytesNeeded(poolSize, operand.location.length);
+            operand.location.offset = poolSize;
+            poolSize += operand.location.length;
+        }
+
+        // Allocated the shared memory.
+        int n = mLargeValueMemory.create(poolSize);
+        if (n != ANEURALNETWORKS_NO_ERROR) {
+            return n;
+        }
+        uint8_t* memoryPointer = nullptr;
+        n = mLargeValueMemory.getPointer(&memoryPointer);
+        if (n != ANEURALNETWORKS_NO_ERROR) {
+            return n;
+        }
+        uint32_t poolIndex = mMemories.add(&mLargeValueMemory);
+        VLOG(MODEL) << "Allocated large value pool of size " << poolSize << " at index "
+                    << poolIndex;
+
+        // Copy the values to this memory.
+        for (LargeValue& l: mLargeOperandValues) {
+            Operand& operand = mOperands[l.operandIndex];
+            operand.location.poolIndex = poolIndex;
+            memcpy(memoryPointer + operand.location.offset, l.buffer, operand.location.length);
+        }
+    }
+    return ANEURALNETWORKS_NO_ERROR;
+#endif
+}
+#if 0 // Memory.h is needed
+int ModelBuilder::setOperandValueFromMemory(uint32_t index, const Memory* memory, uint32_t offset,
+                                            size_t length) {
+    // Dummy Implementation
+    return 0;
+#if 0
+    VLOG(MODEL) << __func__ << " for operand " << index << " offset " << offset << " size " << length;
+    if (index >= operandCount()) {
+        LOG(ERROR) << "ANeuralNetworksModel_setOperandValueFromMemory setting operand " << index
+                   << " of " << operandCount();
+        return ANEURALNETWORKS_BAD_DATA;
+    }
+    Operand& operand = mOperands[index];
+    uint32_t neededLength = sizeOfData(operand.type, operand.dimensions);
+    if (neededLength != length) {
+        LOG(ERROR) << "ANeuralNetworksModel_setOperandValueFromMemory setting " << length
+                   << " bytes when needing " << neededLength;
+        return ANEURALNETWORKS_BAD_DATA;
+    }
+    // TODO validate does not exceed length of memory
+    operand.lifetime = OperandLifeTime::CONSTANT_REFERENCE;
+    operand.location = {
+                .poolIndex = mMemories.add(memory), .offset = offset, .length = neededLength};
+    return ANEURALNETWORKS_NO_ERROR;
+#endif
+}
+#endif
+
+int ModelBuilder::addOperation(ANeuralNetworksOperationType type, uint32_t inputCount,
+                               const uint32_t* inputs, uint32_t outputCount,
+                               const uint32_t* outputs) {
+    // Dummy Implementation
+    return 0;
+#if 0
+    if (mCompletedModel) {
+        LOG(ERROR) << "ANeuralNetworksModel_addOperation can't modify after model finished";
+        return ANEURALNETWORKS_BAD_DATA;
+    }
+    if (!validCode(kNumberOfOperationTypes, kNumberOfOperationTypesOEM, type)) {
+        LOG(ERROR) << "ANeuralNetworksModel_addOperation invalid operations type " << type;
+        return ANEURALNETWORKS_BAD_DATA;
+    }
+    int n = validateOperandList(inputCount, inputs, operandCount(),
+                                "ANeuralNetworksModel_addOperation inputs");
+    if (n != ANEURALNETWORKS_NO_ERROR) {
+        return n;
+    }
+    n = validateOperandList(outputCount, outputs, operandCount(),
+                            "ANeuralNetworksModel_addOperation outputs");
+    if (n != ANEURALNETWORKS_NO_ERROR) {
+        return n;
+    }
+
+    uint32_t operationIndex = operationCount();
+    if (operationIndex >= MAX_NUMBER_OF_OPERATIONS) {
+        LOG(ERROR) << "ANeuralNetworksModel_addOperation exceed max operations";
+        return ANEURALNETWORKS_BAD_DATA;
+    }
+    mOperations.resize(operationIndex + 1);
+    auto& entry = mOperations[operationIndex];
+    entry.type = static_cast<OperationType>(type);
+
+    setFromIntList(&entry.inputs, inputCount, inputs);
+    setFromIntList(&entry.outputs, outputCount, outputs);
+    for (uint32_t i : entry.inputs) {
+        mOperands[i].numberOfConsumers++;
+        // TODO mOperands[i].consumers.push_back(operationIndex);
+    }
+    return ANEURALNETWORKS_NO_ERROR;
+#endif
+}
+
+int ModelBuilder::identifyInputsAndOutputs(uint32_t inputCount, const uint32_t* inputs,
+                                      uint32_t outputCount, const uint32_t* outputs) {
+    // Dummy Implementation
+    return 0;
+#if 0
+    if (mCompletedModel) {
+        LOG(ERROR) << "ANeuralNetworksModel_identifyInputsAndOutputs can't modify after model finished";
+        return ANEURALNETWORKS_BAD_DATA;
+    }
+    int n = validateOperandList(inputCount, inputs, operandCount(),
+                                "ANeuralNetworksModel_identifyInputsAndOutputs inputs");
+    if (n != ANEURALNETWORKS_NO_ERROR) {
+        return n;
+    }
+    n = validateOperandList(outputCount, outputs, operandCount(),
+                            "ANeuralNetworksModel_identifyInputsAndOutputs outputs");
+    if (n != ANEURALNETWORKS_NO_ERROR) {
+        return n;
+    }
+
+    // Makes a copy of the index list, validates the arguments, and changes
+    // the lifetime info of the corresponding operand.
+    auto setArguments = [&](std::vector<uint32_t>* indexVector, uint32_t indexCount,
+                            const uint32_t* indexList, OperandLifeTime lifetime) -> bool {
+        indexVector->resize(indexCount);
+        for (uint32_t i = 0; i < indexCount; i++) {
+            const uint32_t operandIndex = indexList[i];
+            if (operandIndex >= mOperands.size()) {
+                LOG(ERROR) << "ANeuralNetworksModel_identifyInputsAndOutputs Can't set input or output "
+                              "to be "
+                           << operandIndex << " as this exceeds the numbe of operands "
+                           << mOperands.size();
+                return false;
+            }
+            (*indexVector)[i] = operandIndex;
+            Operand& operand = mOperands[operandIndex];
+            if (operand.lifetime != OperandLifeTime::TEMPORARY_VARIABLE) {
+                LOG(ERROR) << "ANeuralNetworksModel_identifyInputsAndOutputs Can't set operand "
+                           << operandIndex
+                           << " to be an input or output.  Check that it's not a constant or "
+                              "already an input or output";
+                return false;
+            }
+            operand.lifetime = lifetime;
+        }
+        return true;
+    };
+
+    if (!setArguments(&mInputIndexes, inputCount, inputs, OperandLifeTime::MODEL_INPUT) ||
+        !setArguments(&mOutputIndexes, outputCount, outputs, OperandLifeTime::MODEL_OUTPUT)) {
+        return ANEURALNETWORKS_BAD_DATA;
+    }
+
+    return ANEURALNETWORKS_NO_ERROR;
+#endif
+}
+#if 0 // CompilationBuilder is needed.
+int ModelBuilder::createCompilation(CompilationBuilder** compilation) {
+    // Dummy Implementation
+    return 0;
+#if 0
+    if (!mCompletedModel) {
+        LOG(ERROR) << "ANeuralNetworksCompilation_create passed an unfinished model";
+        *compilation = nullptr;
+        return ANEURALNETWORKS_BAD_STATE;
+    }
+    *compilation = new CompilationBuilder(this);
+    return (*compilation ? ANEURALNETWORKS_NO_ERROR : ANEURALNETWORKS_OUT_OF_MEMORY);
+#endif
+}
+#endif
+int ModelBuilder::finish() {
+    // Dummy Implementation
+    return 0;
+#if 0
+    if (mCompletedModel) {
+        LOG(ERROR) << "ANeuralNetworksModel_finish called more than once";
+        return ANEURALNETWORKS_BAD_STATE;
+    }
+
+    int n = copyLargeValuesToSharedMemory();
+    if (n != ANEURALNETWORKS_NO_ERROR) {
+        return n;
+    }
+
+    // We sort the operations so that they will be in the appropriate
+    // order for a single-threaded, op at a time execution.
+    // TODO: we don't need this if we always run the partitioner.
+    sortIntoRunOrder();
+    mCompletedModel = true;
+    return ANEURALNETWORKS_NO_ERROR;
+#endif
+}
+
+void ModelBuilder::sortIntoRunOrder() {
+    // Dummy Implementation
+#if 0
+    // Tracks the operations that can be executed.
+    std::vector<uint32_t> opsReadyToRun;
+    std::vector<Operation> runOrder;
+
+    // Tracks how many inputs are needed for each operation to be ready to run.
+    std::multimap<uint32_t, uint32_t> operandToOperations;
+    std::vector<uint32_t> unknownInputCount(operationCount());
+    for (uint32_t operationIndex = 0; operationIndex < operationCount(); operationIndex++) {
+        uint32_t& count = unknownInputCount[operationIndex];
+        count = 0;
+        for (uint32_t operandIndex : mOperations[operationIndex].inputs) {
+            auto lifetime = mOperands[operandIndex].lifetime;
+            if (lifetime == OperandLifeTime::TEMPORARY_VARIABLE ||
+                lifetime == OperandLifeTime::MODEL_OUTPUT) {
+                count++;
+                operandToOperations.insert(
+                            std::pair<uint32_t, uint32_t>(operandIndex, operationIndex));
+            }
+        }
+        if (count == 0) {
+            opsReadyToRun.push_back(operationIndex);
+        }
+    }
+
+    while (opsReadyToRun.size() > 0) {
+        // Execute the next op
+        int opIndex = opsReadyToRun.back();
+        opsReadyToRun.pop_back();
+        const Operation& operation = mOperations[opIndex];
+
+        runOrder.push_back(mOperations[opIndex]);
+
+        // Mark all its outputs as known.
+        for (uint32_t operandIndex : operation.outputs) {
+            auto range = operandToOperations.equal_range(operandIndex);
+            for (auto i = range.first; i != range.second; i++) {
+                uint32_t& count = unknownInputCount[i->second];
+                if (--count == 0) {
+                    opsReadyToRun.push_back(i->second);
+                }
+            }
+        }
+    }
+    mOperations = runOrder;
+#endif
+}
+#if 0 // NeuralNetworksWrapper.h is needed.
+void ModelBuilder::setHidlModel(Model* model) const {
+    // Dummy Implementation
+#if 0
+    model->operands = mOperands;
+    model->operations = mOperations;
+    model->inputIndexes = mInputIndexes;
+    model->outputIndexes = mOutputIndexes;
+    model->operandValues = mSmallOperandValues;
+
+    uint32_t count = mMemories.size();
+    model->pools.resize(count);
+    for (uint32_t i = 0; i < count; i++) {
+        model->pools[i] = mMemories[i]->getHidlMemory();
+    }
+#endif
+}
+#endif
+}  // namespace nn
+}  // namespace android
diff --git a/src/runtime/ref/nn/runtime/ModelBuilder.h b/src/runtime/ref/nn/runtime/ModelBuilder.h
new file mode 100644 (file)
index 0000000..9a6bcbe
--- /dev/null
@@ -0,0 +1,157 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Class used to build a model through a succession of successive calls
+// to the NN API.
+
+#ifndef ANDROID_ML_NN_RUNTIME_MODEL_BUILDER_H
+#define ANDROID_ML_NN_RUNTIME_MODEL_BUILDER_H
+
+#include "NeuralNetworks.h"
+
+#include <vector>
+#include <memory>
+
+#if 0
+#include "HalInterfaces.h"
+#include "Memory.h"
+#include "NeuralNetworks.h"
+#include "Utils.h"
+#endif
+namespace android {
+namespace nn {
+#if 0
+class CompilationBuilder;
+class Device;
+class ExecutionPlan;
+class ExecutionStep;
+class Memory;
+#endif
+class ModelBuilder {
+public:
+    virtual ~ModelBuilder() {}
+    // Adds an operand to the model.
+    int addOperand(const ANeuralNetworksOperandType& type);
+    int setOperandValue(uint32_t index, const void* buffer, size_t length);
+#if 0 // Memory.h is needed.
+    int setOperandValueFromMemory(uint32_t index, const Memory* memory, uint32_t offset,
+                                  size_t length);
+#endif
+    int addOperation(ANeuralNetworksOperationType type, uint32_t inputCount, const uint32_t* inputs,
+                     uint32_t outputCount, const uint32_t* outputs);
+    int identifyInputsAndOutputs(uint32_t inputCount, const uint32_t* inputs, uint32_t outputCount,
+                                 const uint32_t* outputs);
+
+    int finish();
+    bool isFinished() const { return mCompletedModel; }
+#if 0 // CompilationBuilder is needed.
+    int createCompilation(CompilationBuilder** compilation);
+#endif
+#if 0 // NeuralNetworksWrapper.h is needed.
+    void setHidlModel(Model* model) const;
+#endif
+#if 0 //"android/hardware/neuralnetworks/1.0/types.h" is needed.
+    uint32_t operandCount() const {
+        // We don't allow more than uint32_t worth of operands
+        return static_cast<uint32_t>(mOperands.size());
+    }
+    uint32_t operationCount() const {
+        // We don't allow more than uint32_t worth of operations
+        return static_cast<uint32_t>(mOperations.size());
+    }
+#endif
+    uint32_t inputCount() const { return static_cast<uint32_t>(mInputIndexes.size()); }
+    uint32_t outputCount() const { return static_cast<uint32_t>(mOutputIndexes.size()); }
+    uint32_t getInputOperandIndex(uint32_t i) const { return mInputIndexes[i]; }
+#if 0 // "android/hardware/neuralnetworks/1.0/types.h" is needed.
+    const Operand& getInputOperand(uint32_t i) const {
+        return mOperands[getInputOperandIndex(i)];
+    }
+    uint32_t getOutputOperandIndex(uint32_t i) const { return mOutputIndexes[i]; }
+    const Operand& getOutputOperand(uint32_t i) const {
+        return mOperands[getOutputOperandIndex(i)];
+    }
+    const Operand& getOperand(uint32_t index) const { return mOperands[index]; }
+    const Operation& getOperation(uint32_t index) const { return mOperations[index]; }
+#endif
+#if 0 // Memory.h is needed.
+    const MemoryTracker& getMemories() const { return mMemories; }
+#endif
+#if 0 // "android/hardware/neuralnetworks/1.0/types.h" is needed.
+    const std::vector<Operation>& getOperations() const { return mOperations; }
+#endif
+    const uint8_t* getPointerToOperandValue(uint32_t offset) const {
+        return mSmallOperandValues.data() + offset;
+    }
+#if 0 // ExecutionPlan is needed.
+    int partitionTheWork(const std::vector<std::shared_ptr<Device>>& devices,
+                         uint32_t preference, ExecutionPlan* plan) const;
+#endif
+ private:
+    // TODO: move partitionTheWork, findBestDeviceForEachOperation,
+    // sortIntoRunOrder to CompilationBuilder?
+#if 0 // Manager.h is needed.
+    int findBestDeviceForEachOperation(uint32_t preference,
+                                       const std::vector<std::shared_ptr<Device>>& devices,
+                                       const size_t operationCount,
+                                       const size_t deviceCount,
+                                       std::vector<int>* bestDeviceForOperation) const;
+    PerformanceInfo getPerformanceInfo(const std::shared_ptr<Device> device,
+                                       uint32_t operationIndex) const;
+#endif
+    // Sorts the operations to be in the correct order for single threaded
+    // node-at-a-time execution.
+    void sortIntoRunOrder();
+
+    // Copies the large values to a shared memory, if we have any.
+    int copyLargeValuesToSharedMemory();
+#if 0 //"android/hardware/neuralnetworks/1.0/types.h" is needed.
+    // The operations of the graph.
+    std::vector<Operation> mOperations;
+    // The description of the operands of the graph.
+    std::vector<Operand> mOperands;
+#endif
+    // Specifies where to find the list of indexes identifying
+    // the inputs and outputs of the model.  The offset is into
+    // the mOperandIndexes table.
+    std::vector<uint32_t> mInputIndexes;
+    std::vector<uint32_t> mOutputIndexes;
+#if 0 // Memory.h is needed.
+    MemoryTracker mMemories;
+#endif
+    // The value of the small operands that are defined at model
+    // creation time.
+    std::vector<uint8_t> mSmallOperandValues;
+
+    struct LargeValue {
+        uint32_t operandIndex;
+        const void* buffer;
+    };
+    // Operand index and buffer pointer for all the large operand values of this model.
+    std::vector<LargeValue> mLargeOperandValues;
+    // The shared memory region that will contain the large values.
+#if 0 // Memory.h is needed.
+    Memory mLargeValueMemory;
+#endif
+    // Once the model has been finished, we should not allow further
+    // modifications to the model.
+    mutable bool mCompletedModel = false;
+};
+
+}  // namespace nn
+}  // namespace android
+
+#endif  // ANDROID_ML_NN_RUNTIME_MODEL_BUILDER_H
index 8cd594d..2b2ead4 100644 (file)
 
 #include "NeuralNetworks.h"
 
+#include "ModelBuilder.h"
+
+#include <iostream>
+
 // TODO Include these files once availible
 #if 0
 #include "Callbacks.h"
@@ -217,8 +221,8 @@ static_assert(static_cast<int32_t>(FusedActivationFunc::RELU6) == ANEURALNETWORK
               "FusedActivationFunc::RELU6 != ANEURALNETWORKS_FUSED_RELU6");
 
 using android::sp;
-using namespace android::nn;
 #endif
+using namespace android::nn;
 
 int ANeuralNetworksMemory_createFromFd(size_t size, int prot, int fd, size_t offset,
                                        ANeuralNetworksMemory** memory) {
@@ -252,7 +256,17 @@ void ANeuralNetworksMemory_free(ANeuralNetworksMemory* memory) {
 
 int ANeuralNetworksModel_create(ANeuralNetworksModel** model) {
     // Dummy Implementation
-    return 0;
+    if (!model) {
+        std::cout << "ANeuralNetworksModel_create passed a nullptr" << std::endl;
+        return ANEURALNETWORKS_UNEXPECTED_NULL;
+    }
+    ModelBuilder* m = new ModelBuilder();
+    if (m == nullptr) {
+        *model = nullptr;
+        return ANEURALNETWORKS_OUT_OF_MEMORY;
+    }
+    *model = reinterpret_cast<ANeuralNetworksModel*>(m);
+    return ANEURALNETWORKS_NO_ERROR;
 
     // Original code for reference
 #if 0