Import declaration of Model, Operand and Operation
authorjiseob.jang <jiseob.jang@samsung.com>
Thu, 22 Mar 2018 13:35:59 +0000 (22:35 +0900)
committer최형규/동작제어Lab(SR)/Senior Engineer/삼성전자 <hk0110.choi@samsung.com>
Fri, 23 Mar 2018 04:10:21 +0000 (13:10 +0900)
This commit imports declaration of Model, Operand and
Operation from Android NN withtout implemenation.

ModelBuilder and CpuExecutor are built successfully with struct Model, Operand
and Operation.

Signed-off-by: jiseob.jang <jiseob.jang@samsung.com>
src/runtime/ref/CMakeLists.txt
src/runtime/ref/nn/common/include/CpuExecutor.h
src/runtime/ref/nn/common/include/HalInterfaces.h [new file with mode: 0644]
src/runtime/ref/nn/depend/CMakeLists.txt
src/runtime/ref/nn/depend/hal/CMakeLists.txt [new file with mode: 0644]
src/runtime/ref/nn/depend/hal/include/android/hardware/neuralnetworks/1.0/types.h [new file with mode: 0644]
src/runtime/ref/nn/depend/libhidl/base/include/hidl/HidlInternal.h
src/runtime/ref/nn/runtime/CMakeLists.txt
src/runtime/ref/nn/runtime/ModelBuilder.h

index 01a2464..eb959c9 100644 (file)
@@ -1,2 +1,10 @@
 set(CMAKE_C_FLAGS "-std=c99")
+
+if(CMAKE_VERSION VERSION_LESS 3.1.0)
+  set(CMAKE_CXX_FLAGS "-std=c++14")
+else(CMAKE_VERSION VERSION_LESS 3.1.0)
+  set(CMAKE_CXX_STANDARD 14)
+endif(CMAKE_VERSION VERSION_LESS 3.1.0)
+
+#set(CMAKE_CXX_FLAGS "-std=c++14")
 add_subdirectory(nn)
index ad94b17..2e7cbbe 100644 (file)
@@ -17,8 +17,8 @@
 #ifndef ANDROID_ML_NN_COMMON_CPU_EXECUTOR_H
 #define ANDROID_ML_NN_COMMON_CPU_EXECUTOR_H
 
-#if 0 // REF-ANN
 #include "HalInterfaces.h"
+#if 0 // REF-ANN
 #include "OperationsUtils.h"
 #endif
 #include "Utils.h"
@@ -84,7 +84,6 @@ bool setRunTimePoolInfosFromHidlMemories(std::vector<RunTimePoolInfo>* poolInfos
 
 // This class is used to execute a model on the CPU.
 class CpuExecutor {
-#if 0 // REF-ANN
 public:
     // Executes the model. The results will be stored at the locations
     // specified in the constructor.
@@ -95,6 +94,7 @@ public:
             const std::vector<RunTimePoolInfo>& requestPoolInfos);
 
 private:
+#if 0 // REF-ANN
     bool initializeRunTimeInfo(const std::vector<RunTimePoolInfo>& modelPoolInfos,
                                const std::vector<RunTimePoolInfo>& requestPoolInfos);
     // Runs one operation of the graph.
diff --git a/src/runtime/ref/nn/common/include/HalInterfaces.h b/src/runtime/ref/nn/common/include/HalInterfaces.h
new file mode 100644 (file)
index 0000000..6043642
--- /dev/null
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_ML_NN_COMMON_HAL_INTERFACES_H
+#define ANDROID_ML_NN_COMMON_HAL_INTERFACES_H
+
+#if 0 // REF-ANN
+#include <android/hardware/neuralnetworks/1.0/IDevice.h>
+#include <android/hardware/neuralnetworks/1.0/IExecutionCallback.h>
+#include <android/hardware/neuralnetworks/1.0/IPreparedModel.h>
+#include <android/hardware/neuralnetworks/1.0/IPreparedModelCallback.h>
+#endif
+#include <android/hardware/neuralnetworks/1.0/types.h>
+
+#if 0 // REF-ANN
+#include <android/hidl/allocator/1.0/IAllocator.h>
+#include <android/hidl/memory/1.0/IMemory.h>
+#include <hidlmemory/mapping.h>
+#endif
+
+using ::android::hardware::hidl_memory;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::neuralnetworks::V1_0::Model;
+using ::android::hardware::neuralnetworks::V1_0::Operand;
+using ::android::hardware::neuralnetworks::V1_0::OperandLifeTime;
+using ::android::hardware::neuralnetworks::V1_0::OperandType;
+using ::android::hardware::neuralnetworks::V1_0::Operation;
+using ::android::hardware::neuralnetworks::V1_0::OperationType;
+using ::android::hardware::neuralnetworks::V1_0::PerformanceInfo;
+using ::android::hardware::neuralnetworks::V1_0::Request;
+#if 0 // REF-ANN
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::hardware::hidl_memory;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::neuralnetworks::V1_0::Capabilities;
+using ::android::hardware::neuralnetworks::V1_0::DataLocation;
+using ::android::hardware::neuralnetworks::V1_0::DeviceStatus;
+using ::android::hardware::neuralnetworks::V1_0::ErrorStatus;
+using ::android::hardware::neuralnetworks::V1_0::FusedActivationFunc;
+using ::android::hardware::neuralnetworks::V1_0::IDevice;
+using ::android::hardware::neuralnetworks::V1_0::IExecutionCallback;
+using ::android::hardware::neuralnetworks::V1_0::IPreparedModel;
+using ::android::hardware::neuralnetworks::V1_0::IPreparedModelCallback;
+using ::android::hardware::neuralnetworks::V1_0::Model;
+using ::android::hardware::neuralnetworks::V1_0::Operand;
+using ::android::hardware::neuralnetworks::V1_0::OperandLifeTime;
+using ::android::hardware::neuralnetworks::V1_0::OperandType;
+using ::android::hardware::neuralnetworks::V1_0::Operation;
+using ::android::hardware::neuralnetworks::V1_0::OperationType;
+using ::android::hardware::neuralnetworks::V1_0::PerformanceInfo;
+using ::android::hardware::neuralnetworks::V1_0::Request;
+using ::android::hardware::neuralnetworks::V1_0::RequestArgument;
+using ::android::hidl::allocator::V1_0::IAllocator;
+using ::android::hidl::memory::V1_0::IMemory;
+#endif
+
+namespace android {
+namespace nn {
+
+} // namespace nn
+} // namespace android
+
+#endif // ANDROID_ML_NN_COMMON_HAL_INTERFACES_H
index af91a5c..7254e88 100644 (file)
@@ -1,6 +1,6 @@
 #ADD_SUBDIRECTORY(android-base)
 #ADD_SUBDIRECTORY(external)
-#ADD_SUBDIRECTORY(hal)
+ADD_SUBDIRECTORY(hal)
 #ADD_SUBDIRECTORY(libc)
 ADD_SUBDIRECTORY(libcutils)
 ADD_SUBDIRECTORY(libhidl)
diff --git a/src/runtime/ref/nn/depend/hal/CMakeLists.txt b/src/runtime/ref/nn/depend/hal/CMakeLists.txt
new file mode 100644 (file)
index 0000000..f0fb019
--- /dev/null
@@ -0,0 +1,36 @@
+
+SET(HAL_INCS
+   ${CMAKE_CURRENT_SOURCE_DIR}/include
+)
+
+SET(INC_DIRS
+  ${INC_DIRS}
+  ${HAL_INCS}
+  PARENT_SCOPE
+)
+
+SET(HAL_NN_SRCS
+#  ${CMAKE_CURRENT_SOURCE_DIR}/hardware/neuralnetworks/1.0/DeviceAll.cpp
+#  ${CMAKE_CURRENT_SOURCE_DIR}/hardware/neuralnetworks/1.0/PreparedModelCallbackAll.cpp
+#  ${CMAKE_CURRENT_SOURCE_DIR}/hardware/neuralnetworks/1.0/ExecutionCallbackAll.cpp
+#  ${CMAKE_CURRENT_SOURCE_DIR}/hardware/neuralnetworks/1.0/types.cpp
+#  ${CMAKE_CURRENT_SOURCE_DIR}/hardware/neuralnetworks/1.0/PreparedModelAll.cpp
+)
+
+SET(HAL_HIDL_SRCS
+#  ${CMAKE_CURRENT_SOURCE_DIR}/hidl/allocator/1.0/AllocatorAll.cpp
+#  ${CMAKE_CURRENT_SOURCE_DIR}/hidl/base/1.0/BaseAll.cpp
+#  ${CMAKE_CURRENT_SOURCE_DIR}/hidl/base/1.0/types.cpp
+#  ${CMAKE_CURRENT_SOURCE_DIR}/hidl/manager/1.0/ServiceManagerAll.cpp
+#  ${CMAKE_CURRENT_SOURCE_DIR}/hidl/manager/1.0/ServiceNotificationAll.cpp
+#  ${CMAKE_CURRENT_SOURCE_DIR}/hidl/manager/1.1/ServiceManagerAll.cpp
+#  ${CMAKE_CURRENT_SOURCE_DIR}/hidl/memory/1.0/MapperAll.cpp
+#  ${CMAKE_CURRENT_SOURCE_DIR}/hidl/memory/1.0/MemoryAll.cpp
+)
+
+SET(SRCS
+  ${SRCS}
+  ${HAL_NN_SRCS}
+  ${HAL_HIDL_SRCS}
+  PARENT_SCOPE
+)
diff --git a/src/runtime/ref/nn/depend/hal/include/android/hardware/neuralnetworks/1.0/types.h b/src/runtime/ref/nn/depend/hal/include/android/hardware/neuralnetworks/1.0/types.h
new file mode 100644 (file)
index 0000000..146d18d
--- /dev/null
@@ -0,0 +1,493 @@
+#ifndef HIDL_GENERATED_ANDROID_HARDWARE_NEURALNETWORKS_V1_0_TYPES_H
+#define HIDL_GENERATED_ANDROID_HARDWARE_NEURALNETWORKS_V1_0_TYPES_H
+
+#include <hidl/HidlSupport.h>
+#if 0 // REF-ANN
+#include <hidl/MQDescriptor.h>
+#include <utils/NativeHandle.h>
+#include <utils/misc.h>
+#endif
+
+namespace android {
+namespace hardware {
+namespace neuralnetworks {
+namespace V1_0 {
+
+enum class OperandType : int32_t {
+    FLOAT32 = 0,
+    INT32 = 1,
+    UINT32 = 2,
+    TENSOR_FLOAT32 = 3,
+    TENSOR_INT32 = 4,
+    TENSOR_QUANT8_ASYMM = 5,
+    OEM = 10000,
+    TENSOR_OEM_BYTE = 10001,
+};
+
+enum class OperationType : int32_t {
+    ADD = 0,
+    AVERAGE_POOL_2D = 1,
+    CONCATENATION = 2,
+    CONV_2D = 3,
+    DEPTHWISE_CONV_2D = 4,
+    DEPTH_TO_SPACE = 5,
+    DEQUANTIZE = 6,
+    EMBEDDING_LOOKUP = 7,
+    FLOOR = 8,
+    FULLY_CONNECTED = 9,
+    HASHTABLE_LOOKUP = 10,
+    L2_NORMALIZATION = 11,
+    L2_POOL_2D = 12,
+    LOCAL_RESPONSE_NORMALIZATION = 13,
+    LOGISTIC = 14,
+    LSH_PROJECTION = 15,
+    LSTM = 16,
+    MAX_POOL_2D = 17,
+    MUL = 18,
+    RELU = 19,
+    RELU1 = 20,
+    RELU6 = 21,
+    RESHAPE = 22,
+    RESIZE_BILINEAR = 23,
+    RNN = 24,
+    SOFTMAX = 25,
+    SPACE_TO_DEPTH = 26,
+    SVDF = 27,
+    TANH = 28,
+    OEM_OPERATION = 10000,
+};
+
+enum class FusedActivationFunc : int32_t {
+    NONE = 0,
+    RELU = 1,
+    RELU1 = 2,
+    RELU6 = 3,
+};
+
+enum class OperandLifeTime : int32_t {
+    TEMPORARY_VARIABLE = 0,
+    MODEL_INPUT = 1,
+    MODEL_OUTPUT = 2,
+    CONSTANT_COPY = 3,
+    CONSTANT_REFERENCE = 4,
+    NO_VALUE = 5,
+};
+
+enum class DeviceStatus : int32_t {
+    AVAILABLE = 0,
+    BUSY = 1,
+    OFFLINE = 2,
+    UNKNOWN = 3,
+};
+
+struct PerformanceInfo final {
+    float execTime __attribute__ ((aligned(4)));
+    float powerUsage __attribute__ ((aligned(4)));
+};
+
+static_assert(offsetof(PerformanceInfo, execTime) == 0, "wrong offset");
+static_assert(offsetof(PerformanceInfo, powerUsage) == 4, "wrong offset");
+static_assert(sizeof(PerformanceInfo) == 8, "wrong size");
+static_assert(__alignof(PerformanceInfo) == 4, "wrong alignment");
+
+struct Capabilities final {
+    PerformanceInfo float32Performance __attribute__ ((aligned(4)));
+    PerformanceInfo quantized8Performance __attribute__ ((aligned(4)));
+};
+
+static_assert(offsetof(Capabilities, float32Performance) == 0, "wrong offset");
+static_assert(offsetof(Capabilities, quantized8Performance) == 8, "wrong offset");
+static_assert(sizeof(Capabilities) == 16, "wrong size");
+static_assert(__alignof(Capabilities) == 4, "wrong alignment");
+
+struct DataLocation final {
+    uint32_t poolIndex __attribute__ ((aligned(4)));
+    uint32_t offset __attribute__ ((aligned(4)));
+    uint32_t length __attribute__ ((aligned(4)));
+};
+
+static_assert(offsetof(DataLocation, poolIndex) == 0, "wrong offset");
+static_assert(offsetof(DataLocation, offset) == 4, "wrong offset");
+static_assert(offsetof(DataLocation, length) == 8, "wrong offset");
+static_assert(sizeof(DataLocation) == 12, "wrong size");
+static_assert(__alignof(DataLocation) == 4, "wrong alignment");
+
+struct Operand final {
+    OperandType type __attribute__ ((aligned(4)));
+    ::android::hardware::hidl_vec<uint32_t> dimensions __attribute__ ((aligned(8)));
+    uint32_t numberOfConsumers __attribute__ ((aligned(4)));
+    float scale __attribute__ ((aligned(4)));
+    int32_t zeroPoint __attribute__ ((aligned(4)));
+    OperandLifeTime lifetime __attribute__ ((aligned(4)));
+    DataLocation location __attribute__ ((aligned(4)));
+};
+
+static_assert(offsetof(Operand, type) == 0, "wrong offset");
+static_assert(offsetof(Operand, dimensions) == 8, "wrong offset");
+static_assert(offsetof(Operand, numberOfConsumers) == 24, "wrong offset");
+static_assert(offsetof(Operand, scale) == 28, "wrong offset");
+static_assert(offsetof(Operand, zeroPoint) == 32, "wrong offset");
+static_assert(offsetof(Operand, lifetime) == 36, "wrong offset");
+static_assert(offsetof(Operand, location) == 40, "wrong offset");
+static_assert(sizeof(Operand) == 56, "wrong size");
+static_assert(__alignof(Operand) == 8, "wrong alignment");
+
+struct Operation final {
+    OperationType type __attribute__ ((aligned(4)));
+    ::android::hardware::hidl_vec<uint32_t> inputs __attribute__ ((aligned(8)));
+    ::android::hardware::hidl_vec<uint32_t> outputs __attribute__ ((aligned(8)));
+};
+
+static_assert(offsetof(Operation, type) == 0, "wrong offset");
+static_assert(offsetof(Operation, inputs) == 8, "wrong offset");
+static_assert(offsetof(Operation, outputs) == 24, "wrong offset");
+static_assert(sizeof(Operation) == 40, "wrong size");
+static_assert(__alignof(Operation) == 8, "wrong alignment");
+
+struct Model final {
+    ::android::hardware::hidl_vec<Operand> operands __attribute__ ((aligned(8)));
+    ::android::hardware::hidl_vec<Operation> operations __attribute__ ((aligned(8)));
+    ::android::hardware::hidl_vec<uint32_t> inputIndexes __attribute__ ((aligned(8)));
+    ::android::hardware::hidl_vec<uint32_t> outputIndexes __attribute__ ((aligned(8)));
+    ::android::hardware::hidl_vec<uint8_t> operandValues __attribute__ ((aligned(8)));
+    ::android::hardware::hidl_vec<::android::hardware::hidl_memory> pools __attribute__ ((aligned(8)));
+};
+
+static_assert(offsetof(Model, operands) == 0, "wrong offset");
+static_assert(offsetof(Model, operations) == 16, "wrong offset");
+static_assert(offsetof(Model, inputIndexes) == 32, "wrong offset");
+static_assert(offsetof(Model, outputIndexes) == 48, "wrong offset");
+static_assert(offsetof(Model, operandValues) == 64, "wrong offset");
+static_assert(offsetof(Model, pools) == 80, "wrong offset");
+static_assert(sizeof(Model) == 96, "wrong size");
+static_assert(__alignof(Model) == 8, "wrong alignment");
+
+struct RequestArgument final {
+    bool hasNoValue __attribute__ ((aligned(1)));
+    DataLocation location __attribute__ ((aligned(4)));
+    ::android::hardware::hidl_vec<uint32_t> dimensions __attribute__ ((aligned(8)));
+};
+
+static_assert(offsetof(RequestArgument, hasNoValue) == 0, "wrong offset");
+static_assert(offsetof(RequestArgument, location) == 4, "wrong offset");
+static_assert(offsetof(RequestArgument, dimensions) == 16, "wrong offset");
+static_assert(sizeof(RequestArgument) == 32, "wrong size");
+static_assert(__alignof(RequestArgument) == 8, "wrong alignment");
+
+struct Request final {
+    ::android::hardware::hidl_vec<RequestArgument> inputs __attribute__ ((aligned(8)));
+    ::android::hardware::hidl_vec<RequestArgument> outputs __attribute__ ((aligned(8)));
+    ::android::hardware::hidl_vec<::android::hardware::hidl_memory> pools __attribute__ ((aligned(8)));
+};
+
+static_assert(offsetof(Request, inputs) == 0, "wrong offset");
+static_assert(offsetof(Request, outputs) == 16, "wrong offset");
+static_assert(offsetof(Request, pools) == 32, "wrong offset");
+static_assert(sizeof(Request) == 48, "wrong size");
+static_assert(__alignof(Request) == 8, "wrong alignment");
+
+enum class ErrorStatus : int32_t {
+    NONE = 0,
+    DEVICE_UNAVAILABLE = 1,
+    GENERAL_FAILURE = 2,
+    OUTPUT_INSUFFICIENT_SIZE = 3,
+    INVALID_ARGUMENT = 4,
+};
+
+constexpr int32_t operator|(const OperandType lhs, const OperandType rhs) {
+    return static_cast<int32_t>(static_cast<int32_t>(lhs) | static_cast<int32_t>(rhs));
+}
+
+constexpr int32_t operator|(const int32_t lhs, const OperandType rhs) {
+    return static_cast<int32_t>(lhs | static_cast<int32_t>(rhs));
+}
+
+constexpr int32_t operator|(const OperandType lhs, const int32_t rhs) {
+    return static_cast<int32_t>(static_cast<int32_t>(lhs) | rhs);
+}
+
+constexpr int32_t operator&(const OperandType lhs, const OperandType rhs) {
+    return static_cast<int32_t>(static_cast<int32_t>(lhs) & static_cast<int32_t>(rhs));
+}
+
+constexpr int32_t operator&(const int32_t lhs, const OperandType rhs) {
+    return static_cast<int32_t>(lhs & static_cast<int32_t>(rhs));
+}
+
+constexpr int32_t operator&(const OperandType lhs, const int32_t rhs) {
+    return static_cast<int32_t>(static_cast<int32_t>(lhs) & rhs);
+}
+
+constexpr int32_t &operator|=(int32_t& v, const OperandType e) {
+    v |= static_cast<int32_t>(e);
+    return v;
+}
+
+constexpr int32_t &operator&=(int32_t& v, const OperandType e) {
+    v &= static_cast<int32_t>(e);
+    return v;
+}
+
+template<typename>
+std::string toString(int32_t o);
+template<>
+std::string toString<OperandType>(int32_t o);
+
+std::string toString(OperandType o);
+
+constexpr int32_t operator|(const OperationType lhs, const OperationType rhs) {
+    return static_cast<int32_t>(static_cast<int32_t>(lhs) | static_cast<int32_t>(rhs));
+}
+
+constexpr int32_t operator|(const int32_t lhs, const OperationType rhs) {
+    return static_cast<int32_t>(lhs | static_cast<int32_t>(rhs));
+}
+
+constexpr int32_t operator|(const OperationType lhs, const int32_t rhs) {
+    return static_cast<int32_t>(static_cast<int32_t>(lhs) | rhs);
+}
+
+constexpr int32_t operator&(const OperationType lhs, const OperationType rhs) {
+    return static_cast<int32_t>(static_cast<int32_t>(lhs) & static_cast<int32_t>(rhs));
+}
+
+constexpr int32_t operator&(const int32_t lhs, const OperationType rhs) {
+    return static_cast<int32_t>(lhs & static_cast<int32_t>(rhs));
+}
+
+constexpr int32_t operator&(const OperationType lhs, const int32_t rhs) {
+    return static_cast<int32_t>(static_cast<int32_t>(lhs) & rhs);
+}
+
+constexpr int32_t &operator|=(int32_t& v, const OperationType e) {
+    v |= static_cast<int32_t>(e);
+    return v;
+}
+
+constexpr int32_t &operator&=(int32_t& v, const OperationType e) {
+    v &= static_cast<int32_t>(e);
+    return v;
+}
+
+template<typename>
+std::string toString(int32_t o);
+template<>
+std::string toString<OperationType>(int32_t o);
+
+std::string toString(OperationType o);
+
+constexpr int32_t operator|(const FusedActivationFunc lhs, const FusedActivationFunc rhs) {
+    return static_cast<int32_t>(static_cast<int32_t>(lhs) | static_cast<int32_t>(rhs));
+}
+
+constexpr int32_t operator|(const int32_t lhs, const FusedActivationFunc rhs) {
+    return static_cast<int32_t>(lhs | static_cast<int32_t>(rhs));
+}
+
+constexpr int32_t operator|(const FusedActivationFunc lhs, const int32_t rhs) {
+    return static_cast<int32_t>(static_cast<int32_t>(lhs) | rhs);
+}
+
+constexpr int32_t operator&(const FusedActivationFunc lhs, const FusedActivationFunc rhs) {
+    return static_cast<int32_t>(static_cast<int32_t>(lhs) & static_cast<int32_t>(rhs));
+}
+
+constexpr int32_t operator&(const int32_t lhs, const FusedActivationFunc rhs) {
+    return static_cast<int32_t>(lhs & static_cast<int32_t>(rhs));
+}
+
+constexpr int32_t operator&(const FusedActivationFunc lhs, const int32_t rhs) {
+    return static_cast<int32_t>(static_cast<int32_t>(lhs) & rhs);
+}
+
+constexpr int32_t &operator|=(int32_t& v, const FusedActivationFunc e) {
+    v |= static_cast<int32_t>(e);
+    return v;
+}
+
+constexpr int32_t &operator&=(int32_t& v, const FusedActivationFunc e) {
+    v &= static_cast<int32_t>(e);
+    return v;
+}
+
+template<typename>
+std::string toString(int32_t o);
+template<>
+std::string toString<FusedActivationFunc>(int32_t o);
+
+std::string toString(FusedActivationFunc o);
+
+constexpr int32_t operator|(const OperandLifeTime lhs, const OperandLifeTime rhs) {
+    return static_cast<int32_t>(static_cast<int32_t>(lhs) | static_cast<int32_t>(rhs));
+}
+
+constexpr int32_t operator|(const int32_t lhs, const OperandLifeTime rhs) {
+    return static_cast<int32_t>(lhs | static_cast<int32_t>(rhs));
+}
+
+constexpr int32_t operator|(const OperandLifeTime lhs, const int32_t rhs) {
+    return static_cast<int32_t>(static_cast<int32_t>(lhs) | rhs);
+}
+
+constexpr int32_t operator&(const OperandLifeTime lhs, const OperandLifeTime rhs) {
+    return static_cast<int32_t>(static_cast<int32_t>(lhs) & static_cast<int32_t>(rhs));
+}
+
+constexpr int32_t operator&(const int32_t lhs, const OperandLifeTime rhs) {
+    return static_cast<int32_t>(lhs & static_cast<int32_t>(rhs));
+}
+
+constexpr int32_t operator&(const OperandLifeTime lhs, const int32_t rhs) {
+    return static_cast<int32_t>(static_cast<int32_t>(lhs) & rhs);
+}
+
+constexpr int32_t &operator|=(int32_t& v, const OperandLifeTime e) {
+    v |= static_cast<int32_t>(e);
+    return v;
+}
+
+constexpr int32_t &operator&=(int32_t& v, const OperandLifeTime e) {
+    v &= static_cast<int32_t>(e);
+    return v;
+}
+
+template<typename>
+std::string toString(int32_t o);
+template<>
+std::string toString<OperandLifeTime>(int32_t o);
+
+std::string toString(OperandLifeTime o);
+
+constexpr int32_t operator|(const DeviceStatus lhs, const DeviceStatus rhs) {
+    return static_cast<int32_t>(static_cast<int32_t>(lhs) | static_cast<int32_t>(rhs));
+}
+
+constexpr int32_t operator|(const int32_t lhs, const DeviceStatus rhs) {
+    return static_cast<int32_t>(lhs | static_cast<int32_t>(rhs));
+}
+
+constexpr int32_t operator|(const DeviceStatus lhs, const int32_t rhs) {
+    return static_cast<int32_t>(static_cast<int32_t>(lhs) | rhs);
+}
+
+constexpr int32_t operator&(const DeviceStatus lhs, const DeviceStatus rhs) {
+    return static_cast<int32_t>(static_cast<int32_t>(lhs) & static_cast<int32_t>(rhs));
+}
+
+constexpr int32_t operator&(const int32_t lhs, const DeviceStatus rhs) {
+    return static_cast<int32_t>(lhs & static_cast<int32_t>(rhs));
+}
+
+constexpr int32_t operator&(const DeviceStatus lhs, const int32_t rhs) {
+    return static_cast<int32_t>(static_cast<int32_t>(lhs) & rhs);
+}
+
+constexpr int32_t &operator|=(int32_t& v, const DeviceStatus e) {
+    v |= static_cast<int32_t>(e);
+    return v;
+}
+
+constexpr int32_t &operator&=(int32_t& v, const DeviceStatus e) {
+    v &= static_cast<int32_t>(e);
+    return v;
+}
+
+template<typename>
+std::string toString(int32_t o);
+template<>
+std::string toString<DeviceStatus>(int32_t o);
+
+std::string toString(DeviceStatus o);
+
+std::string toString(const PerformanceInfo&);
+
+bool operator==(const PerformanceInfo&, const PerformanceInfo&);
+
+bool operator!=(const PerformanceInfo&, const PerformanceInfo&);
+
+std::string toString(const Capabilities&);
+
+bool operator==(const Capabilities&, const Capabilities&);
+
+bool operator!=(const Capabilities&, const Capabilities&);
+
+std::string toString(const DataLocation&);
+
+bool operator==(const DataLocation&, const DataLocation&);
+
+bool operator!=(const DataLocation&, const DataLocation&);
+
+std::string toString(const Operand&);
+
+bool operator==(const Operand&, const Operand&);
+
+bool operator!=(const Operand&, const Operand&);
+
+std::string toString(const Operation&);
+
+bool operator==(const Operation&, const Operation&);
+
+bool operator!=(const Operation&, const Operation&);
+
+std::string toString(const Model&);
+
+// operator== and operator!= are not generated for Model
+
+std::string toString(const RequestArgument&);
+
+bool operator==(const RequestArgument&, const RequestArgument&);
+
+bool operator!=(const RequestArgument&, const RequestArgument&);
+
+std::string toString(const Request&);
+
+// operator== and operator!= are not generated for Request
+
+constexpr int32_t operator|(const ErrorStatus lhs, const ErrorStatus rhs) {
+    return static_cast<int32_t>(static_cast<int32_t>(lhs) | static_cast<int32_t>(rhs));
+}
+
+constexpr int32_t operator|(const int32_t lhs, const ErrorStatus rhs) {
+    return static_cast<int32_t>(lhs | static_cast<int32_t>(rhs));
+}
+
+constexpr int32_t operator|(const ErrorStatus lhs, const int32_t rhs) {
+    return static_cast<int32_t>(static_cast<int32_t>(lhs) | rhs);
+}
+
+constexpr int32_t operator&(const ErrorStatus lhs, const ErrorStatus rhs) {
+    return static_cast<int32_t>(static_cast<int32_t>(lhs) & static_cast<int32_t>(rhs));
+}
+
+constexpr int32_t operator&(const int32_t lhs, const ErrorStatus rhs) {
+    return static_cast<int32_t>(lhs & static_cast<int32_t>(rhs));
+}
+
+constexpr int32_t operator&(const ErrorStatus lhs, const int32_t rhs) {
+    return static_cast<int32_t>(static_cast<int32_t>(lhs) & rhs);
+}
+
+constexpr int32_t &operator|=(int32_t& v, const ErrorStatus e) {
+    v |= static_cast<int32_t>(e);
+    return v;
+}
+
+constexpr int32_t &operator&=(int32_t& v, const ErrorStatus e) {
+    v &= static_cast<int32_t>(e);
+    return v;
+}
+
+template<typename>
+std::string toString(int32_t o);
+template<>
+std::string toString<ErrorStatus>(int32_t o);
+
+std::string toString(ErrorStatus o);
+
+
+}  // namespace V1_0
+}  // namespace neuralnetworks
+}  // namespace hardware
+}  // namespace android
+
+#endif  // HIDL_GENERATED_ANDROID_HARDWARE_NEURALNETWORKS_V1_0_TYPES_H
index 5ec795d..27b6084 100644 (file)
@@ -24,8 +24,6 @@
 #include <vector>
 #include <utility>
 
-#include "Log.h"
-
 namespace android {
 namespace hardware {
 namespace details {
index 79e8a39..c558dce 100644 (file)
@@ -6,7 +6,7 @@ SET (DEPEND_SRCS ${SRCS})
 SET (DEPEND_INCS ${INC_DIRS})
 
 add_library(runtime SHARED ${RUNTIME_SRCS} ${DEPEND_SRCS})
-include_directories(runtime PRIVATE . include ../include ${DEPEND_INCS})
+include_directories(runtime PRIVATE . include ../include ../common/include ${DEPEND_INCS})
 
 # Executable `runtime_run` (Dummy runner executable for simple testing bring-up stage)
 # TODO remove the executable later
index 5ecee4b..a3f782b 100644 (file)
@@ -25,8 +25,8 @@
 #include <vector>
 #include <memory>
 
-#if 0
 #include "HalInterfaces.h"
+#if 0
 #include "Memory.h"
 #include "NeuralNetworks.h"
 #include "Utils.h"
@@ -63,7 +63,6 @@ public:
 #if 0 // NeuralNetworksWrapper.h is needed.
     void setHidlModel(Model* model) const;
 #endif
-#if 0 //"android/hardware/neuralnetworks/1.0/types.h" is needed.
     uint32_t operandCount() const {
         // We don't allow more than uint32_t worth of operands
         return static_cast<uint32_t>(mOperands.size());
@@ -72,11 +71,10 @@ public:
         // We don't allow more than uint32_t worth of operations
         return static_cast<uint32_t>(mOperations.size());
     }
-#endif
+
     uint32_t inputCount() const { return static_cast<uint32_t>(mInputIndexes.size()); }
     uint32_t outputCount() const { return static_cast<uint32_t>(mOutputIndexes.size()); }
     uint32_t getInputOperandIndex(uint32_t i) const { return mInputIndexes[i]; }
-#if 0 // "android/hardware/neuralnetworks/1.0/types.h" is needed.
     const Operand& getInputOperand(uint32_t i) const {
         return mOperands[getInputOperandIndex(i)];
     }
@@ -86,13 +84,11 @@ public:
     }
     const Operand& getOperand(uint32_t index) const { return mOperands[index]; }
     const Operation& getOperation(uint32_t index) const { return mOperations[index]; }
-#endif
+
 #if 0 // Memory.h is needed.
     const MemoryTracker& getMemories() const { return mMemories; }
 #endif
-#if 0 // "android/hardware/neuralnetworks/1.0/types.h" is needed.
     const std::vector<Operation>& getOperations() const { return mOperations; }
-#endif
     const uint8_t* getPointerToOperandValue(uint32_t offset) const {
         return mSmallOperandValues.data() + offset;
     }
@@ -118,12 +114,11 @@ public:
 
     // Copies the large values to a shared memory, if we have any.
     int copyLargeValuesToSharedMemory();
-#if 0 //"android/hardware/neuralnetworks/1.0/types.h" is needed.
     // The operations of the graph.
     std::vector<Operation> mOperations;
     // The description of the operands of the graph.
     std::vector<Operand> mOperands;
-#endif
+
     // Specifies where to find the list of indexes identifying
     // the inputs and outputs of the model.  The offset is into
     // the mOperandIndexes table.