From: 이상규/동작제어Lab(SR)/Principal Engineer/삼성전자 Date: Wed, 4 Apr 2018 13:46:39 +0000 (+0900) Subject: Bring android NN runtime tests (#438) X-Git-Tag: 0.1~432 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=60f0fce32edea61e56f981222d0c2df1568f61e6;p=platform%2Fcore%2Fml%2Fnnfw.git Bring android NN runtime tests (#438) This PR introduces TestMain.cpp, TestTrivialModel and TestValidation from Android NN tests. Signed-off-by: Sanggyu Lee --- diff --git a/src/runtime/ref/nn/runtime/CMakeLists.txt b/src/runtime/ref/nn/runtime/CMakeLists.txt index 20fa222..089458e 100644 --- a/src/runtime/ref/nn/runtime/CMakeLists.txt +++ b/src/runtime/ref/nn/runtime/CMakeLists.txt @@ -46,3 +46,5 @@ SET (SAMPLE_SRCS sample/simple_model_main.cpp add_executable(runtime_run_simple_model ${SAMPLE_SRCS}) target_link_libraries(runtime_run_simple_model ${LIB_RUNTIME}) target_link_libraries(runtime_run_simple_model pthread) + +add_subdirectory(test) diff --git a/src/runtime/ref/nn/runtime/test/CMakeLists.txt b/src/runtime/ref/nn/runtime/test/CMakeLists.txt new file mode 100644 index 0000000..4850069 --- /dev/null +++ b/src/runtime/ref/nn/runtime/test/CMakeLists.txt @@ -0,0 +1,8 @@ +# Executable `runtime_run` (Dummy runner executable using NN API) +set(RUNTIME_ANDROID_NN_TEST runtime_run_android_nn_test) +add_executable(${RUNTIME_ANDROID_NN_TEST} TestMain.cpp + TestValidation.cpp + TestTrivialModel.cpp) +target_link_libraries(${RUNTIME_ANDROID_NN_TEST} ${LIB_RUNTIME}) +target_link_libraries(${RUNTIME_ANDROID_NN_TEST} gtest) +target_link_libraries(${RUNTIME_ANDROID_NN_TEST} pthread) diff --git a/src/runtime/ref/nn/runtime/test/TestMain.cpp b/src/runtime/ref/nn/runtime/test/TestMain.cpp new file mode 100644 index 0000000..04530ea --- /dev/null +++ b/src/runtime/ref/nn/runtime/test/TestMain.cpp @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "NeuralNetworksWrapper.h" + +#include + +using namespace android::nn::wrapper; + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + + // Test with the installed drivers. + int n1 = RUN_ALL_TESTS(); + + return n1; +} diff --git a/src/runtime/ref/nn/runtime/test/TestTrivialModel.cpp b/src/runtime/ref/nn/runtime/test/TestTrivialModel.cpp new file mode 100644 index 0000000..8d29a26 --- /dev/null +++ b/src/runtime/ref/nn/runtime/test/TestTrivialModel.cpp @@ -0,0 +1,220 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "NeuralNetworksWrapper.h" + +#include + +using namespace android::nn::wrapper; + +namespace { + +typedef float Matrix3x4[3][4]; +typedef float Matrix4[4]; + +class TrivialTest : public ::testing::Test { +protected: + virtual void SetUp() {} + + const Matrix3x4 matrix1 = {{1.f, 2.f, 3.f, 4.f}, {5.f, 6.f, 7.f, 8.f}, {9.f, 10.f, 11.f, 12.f}}; + const Matrix3x4 matrix2 = {{100.f, 200.f, 300.f, 400.f}, + {500.f, 600.f, 700.f, 800.f}, + {900.f, 1000.f, 1100.f, 1200.f}}; + const Matrix4 matrix2b = {100.f, 200.f, 300.f, 400.f}; + const Matrix3x4 matrix3 = {{20.f, 30.f, 40.f, 50.f}, + {21.f, 22.f, 23.f, 24.f}, + {31.f, 32.f, 33.f, 34.f}}; + const Matrix3x4 expected2 = {{101.f, 202.f, 303.f, 404.f}, + {505.f, 606.f, 707.f, 808.f}, + {909.f, 1010.f, 1111.f, 1212.f}}; + const Matrix3x4 expected2b = {{101.f, 202.f, 303.f, 404.f}, + {105.f, 206.f, 307.f, 408.f}, + {109.f, 210.f, 311.f, 412.f}}; + const Matrix3x4 expected2c = {{100.f, 400.f, 900.f, 1600.f}, + {500.f, 1200.f, 2100.f, 3200.f}, + {900.f, 2000.f, 3300.f, 4800.f}}; + + const Matrix3x4 expected3 = {{121.f, 232.f, 343.f, 454.f}, + {526.f, 628.f, 730.f, 832.f}, + {940.f, 1042.f, 1144.f, 1246.f}}; + const Matrix3x4 expected3b = {{22.f, 34.f, 46.f, 58.f}, + {31.f, 34.f, 37.f, 40.f}, + {49.f, 52.f, 55.f, 58.f}}; +}; + +// Create a model that can add two tensors using a one node graph. +void CreateAddTwoTensorModel(Model* model) { + OperandType matrixType(Type::TENSOR_FLOAT32, {3, 4}); + OperandType scalarType(Type::INT32, {}); + int32_t activation(ANEURALNETWORKS_FUSED_NONE); + auto a = model->addOperand(&matrixType); + auto b = model->addOperand(&matrixType); + auto c = model->addOperand(&matrixType); + auto d = model->addOperand(&scalarType); + model->setOperandValue(d, &activation, sizeof(activation)); + model->addOperation(ANEURALNETWORKS_ADD, {a, b, d}, {c}); + model->identifyInputsAndOutputs({a, b}, {c}); + ASSERT_TRUE(model->isValid()); + model->finish(); +} + +// Create a model that can add three tensors using a two node graph, +// with one tensor set as part of the model. +void CreateAddThreeTensorModel(Model* model, const Matrix3x4 bias) { + OperandType matrixType(Type::TENSOR_FLOAT32, {3, 4}); + OperandType scalarType(Type::INT32, {}); + int32_t activation(ANEURALNETWORKS_FUSED_NONE); + auto a = model->addOperand(&matrixType); + auto b = model->addOperand(&matrixType); + auto c = model->addOperand(&matrixType); + auto d = model->addOperand(&matrixType); + auto e = model->addOperand(&matrixType); + auto f = model->addOperand(&scalarType); + model->setOperandValue(e, bias, sizeof(Matrix3x4)); + model->setOperandValue(f, &activation, sizeof(activation)); + model->addOperation(ANEURALNETWORKS_ADD, {a, c, f}, {b}); + model->addOperation(ANEURALNETWORKS_ADD, {b, e, f}, {d}); + model->identifyInputsAndOutputs({c, a}, {d}); + ASSERT_TRUE(model->isValid()); + model->finish(); +} + +// Check that the values are the same. This works only if dealing with integer +// value, otherwise we should accept values that are similar if not exact. +int CompareMatrices(const Matrix3x4& expected, const Matrix3x4& actual) { + int errors = 0; + for (int i = 0; i < 3; i++) { + for (int j = 0; j < 4; j++) { + if (expected[i][j] != actual[i][j]) { + printf("expected[%d][%d] != actual[%d][%d], %f != %f\n", i, j, i, j, + static_cast(expected[i][j]), static_cast(actual[i][j])); + errors++; + } + } + } + return errors; +} + +TEST_F(TrivialTest, AddTwo) { + Model modelAdd2; + CreateAddTwoTensorModel(&modelAdd2); + + // Test the one node model. + Matrix3x4 actual; + memset(&actual, 0, sizeof(actual)); + Compilation compilation(&modelAdd2); + compilation.finish(); + Execution execution(&compilation); + ASSERT_EQ(execution.setInput(0, matrix1, sizeof(Matrix3x4)), Result::NO_ERROR); + ASSERT_EQ(execution.setInput(1, matrix2, sizeof(Matrix3x4)), Result::NO_ERROR); + ASSERT_EQ(execution.setOutput(0, actual, sizeof(Matrix3x4)), Result::NO_ERROR); + ASSERT_EQ(execution.compute(), Result::NO_ERROR); + ASSERT_EQ(CompareMatrices(expected2, actual), 0); +} + +TEST_F(TrivialTest, AddThree) { + Model modelAdd3; + CreateAddThreeTensorModel(&modelAdd3, matrix3); + + // Test the three node model. + Matrix3x4 actual; + memset(&actual, 0, sizeof(actual)); + Compilation compilation2(&modelAdd3); + compilation2.finish(); + Execution execution2(&compilation2); + ASSERT_EQ(execution2.setInput(0, matrix1, sizeof(Matrix3x4)), Result::NO_ERROR); + ASSERT_EQ(execution2.setInput(1, matrix2, sizeof(Matrix3x4)), Result::NO_ERROR); + ASSERT_EQ(execution2.setOutput(0, actual, sizeof(Matrix3x4)), Result::NO_ERROR); + ASSERT_EQ(execution2.compute(), Result::NO_ERROR); + ASSERT_EQ(CompareMatrices(expected3, actual), 0); + + // Test it a second time to make sure the model is reusable. + memset(&actual, 0, sizeof(actual)); + Compilation compilation3(&modelAdd3); + compilation3.finish(); + Execution execution3(&compilation3); + ASSERT_EQ(execution3.setInput(0, matrix1, sizeof(Matrix3x4)), Result::NO_ERROR); + ASSERT_EQ(execution3.setInput(1, matrix1, sizeof(Matrix3x4)), Result::NO_ERROR); + ASSERT_EQ(execution3.setOutput(0, actual, sizeof(Matrix3x4)), Result::NO_ERROR); + ASSERT_EQ(execution3.compute(), Result::NO_ERROR); + ASSERT_EQ(CompareMatrices(expected3b, actual), 0); +} + +TEST_F(TrivialTest, BroadcastAddTwo) { + Model modelBroadcastAdd2; + // activation: NONE. + int32_t activation_init[] = {ANEURALNETWORKS_FUSED_NONE}; + OperandType scalarType(Type::INT32, {1}); + auto activation = modelBroadcastAdd2.addOperand(&scalarType); + modelBroadcastAdd2.setOperandValue(activation, activation_init, sizeof(int32_t) * 1); + + OperandType matrixType(Type::TENSOR_FLOAT32, {1, 1, 3, 4}); + OperandType matrixType2(Type::TENSOR_FLOAT32, {4}); + + auto a = modelBroadcastAdd2.addOperand(&matrixType); + auto b = modelBroadcastAdd2.addOperand(&matrixType2); + auto c = modelBroadcastAdd2.addOperand(&matrixType); + modelBroadcastAdd2.addOperation(ANEURALNETWORKS_ADD, {a, b, activation}, {c}); + modelBroadcastAdd2.identifyInputsAndOutputs({a, b}, {c}); + ASSERT_TRUE(modelBroadcastAdd2.isValid()); + modelBroadcastAdd2.finish(); + + // Test the one node model. + Matrix3x4 actual; + memset(&actual, 0, sizeof(actual)); + Compilation compilation(&modelBroadcastAdd2); + compilation.finish(); + Execution execution(&compilation); + ASSERT_EQ(execution.setInput(0, matrix1, sizeof(Matrix3x4)), Result::NO_ERROR); + ASSERT_EQ(execution.setInput(1, matrix2b, sizeof(Matrix4)), Result::NO_ERROR); + ASSERT_EQ(execution.setOutput(0, actual, sizeof(Matrix3x4)), Result::NO_ERROR); + ASSERT_EQ(execution.compute(), Result::NO_ERROR); + ASSERT_EQ(CompareMatrices(expected2b, actual), 0); +} + +TEST_F(TrivialTest, BroadcastMulTwo) { + Model modelBroadcastMul2; + // activation: NONE. + int32_t activation_init[] = {ANEURALNETWORKS_FUSED_NONE}; + OperandType scalarType(Type::INT32, {1}); + auto activation = modelBroadcastMul2.addOperand(&scalarType); + modelBroadcastMul2.setOperandValue(activation, activation_init, sizeof(int32_t) * 1); + + OperandType matrixType(Type::TENSOR_FLOAT32, {1, 1, 3, 4}); + OperandType matrixType2(Type::TENSOR_FLOAT32, {4}); + + auto a = modelBroadcastMul2.addOperand(&matrixType); + auto b = modelBroadcastMul2.addOperand(&matrixType2); + auto c = modelBroadcastMul2.addOperand(&matrixType); + modelBroadcastMul2.addOperation(ANEURALNETWORKS_MUL, {a, b, activation}, {c}); + modelBroadcastMul2.identifyInputsAndOutputs({a, b}, {c}); + ASSERT_TRUE(modelBroadcastMul2.isValid()); + modelBroadcastMul2.finish(); + + // Test the one node model. + Matrix3x4 actual; + memset(&actual, 0, sizeof(actual)); + Compilation compilation(&modelBroadcastMul2); + compilation.finish(); + Execution execution(&compilation); + ASSERT_EQ(execution.setInput(0, matrix1, sizeof(Matrix3x4)), Result::NO_ERROR); + ASSERT_EQ(execution.setInput(1, matrix2b, sizeof(Matrix4)), Result::NO_ERROR); + ASSERT_EQ(execution.setOutput(0, actual, sizeof(Matrix3x4)), Result::NO_ERROR); + ASSERT_EQ(execution.compute(), Result::NO_ERROR); + ASSERT_EQ(CompareMatrices(expected2c, actual), 0); +} + +} // end namespace diff --git a/src/runtime/ref/nn/runtime/test/TestValidation.cpp b/src/runtime/ref/nn/runtime/test/TestValidation.cpp new file mode 100644 index 0000000..d7056c9 --- /dev/null +++ b/src/runtime/ref/nn/runtime/test/TestValidation.cpp @@ -0,0 +1,255 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "NeuralNetworks.h" + +//#include +#include +#include + + +// This file tests all the validations done by the Neural Networks API. + +namespace { +class ValidationTest : public ::testing::Test { +protected: + virtual void SetUp() {} +}; + +class ValidationTestModel : public ValidationTest { +protected: + virtual void SetUp() { + ValidationTest::SetUp(); + ASSERT_EQ(ANeuralNetworksModel_create(&mModel), ANEURALNETWORKS_NO_ERROR); + } + virtual void TearDown() { + ANeuralNetworksModel_free(mModel); + ValidationTest::TearDown(); + } + ANeuralNetworksModel* mModel = nullptr; +}; + +class ValidationTestCompilation : public ValidationTestModel { +protected: + virtual void SetUp() { + ValidationTestModel::SetUp(); + + uint32_t dimensions[]{1}; + ANeuralNetworksOperandType tensorType{.type = ANEURALNETWORKS_TENSOR_FLOAT32, + .dimensionCount = 1, + .dimensions = dimensions}; + ASSERT_EQ(ANeuralNetworksModel_addOperand(mModel, &tensorType), ANEURALNETWORKS_NO_ERROR); + ASSERT_EQ(ANeuralNetworksModel_addOperand(mModel, &tensorType), ANEURALNETWORKS_NO_ERROR); + ASSERT_EQ(ANeuralNetworksModel_addOperand(mModel, &tensorType), ANEURALNETWORKS_NO_ERROR); + uint32_t inList[2]{0, 1}; + uint32_t outList[1]{2}; + ASSERT_EQ(ANeuralNetworksModel_addOperation(mModel, ANEURALNETWORKS_ADD, 2, inList, 1, + outList), + ANEURALNETWORKS_NO_ERROR); + ASSERT_EQ(ANeuralNetworksModel_finish(mModel), ANEURALNETWORKS_NO_ERROR); + + ASSERT_EQ(ANeuralNetworksCompilation_create(mModel, &mCompilation), + ANEURALNETWORKS_NO_ERROR); + } + virtual void TearDown() { + ANeuralNetworksCompilation_free(mCompilation); + ValidationTestModel::TearDown(); + } + ANeuralNetworksCompilation* mCompilation = nullptr; +}; + +class ValidationTestExecution : public ValidationTestCompilation { +protected: + virtual void SetUp() { + ValidationTestCompilation::SetUp(); + + ASSERT_EQ(ANeuralNetworksCompilation_finish(mCompilation), ANEURALNETWORKS_NO_ERROR); + + ASSERT_EQ(ANeuralNetworksExecution_create(mCompilation, &mExecution), + ANEURALNETWORKS_NO_ERROR); + } + virtual void TearDown() { + ANeuralNetworksExecution_free(mExecution); + ValidationTestCompilation::TearDown(); + } + ANeuralNetworksExecution* mExecution = nullptr; +}; + +TEST_F(ValidationTest, CreateModel) { + EXPECT_EQ(ANeuralNetworksModel_create(nullptr), ANEURALNETWORKS_UNEXPECTED_NULL); +} + +TEST_F(ValidationTestModel, AddOperand) { + ANeuralNetworksOperandType floatType{ + .type = ANEURALNETWORKS_FLOAT32, .dimensionCount = 0, .dimensions = nullptr}; + EXPECT_EQ(ANeuralNetworksModel_addOperand(nullptr, &floatType), + ANEURALNETWORKS_UNEXPECTED_NULL); + EXPECT_EQ(ANeuralNetworksModel_addOperand(mModel, nullptr), ANEURALNETWORKS_UNEXPECTED_NULL); + // TODO more types, +} + +TEST_F(ValidationTestModel, SetOperandValue) { + ANeuralNetworksOperandType floatType{ + .type = ANEURALNETWORKS_FLOAT32, .dimensionCount = 0, .dimensions = nullptr}; + EXPECT_EQ(ANeuralNetworksModel_addOperand(mModel, &floatType), ANEURALNETWORKS_NO_ERROR); + + char buffer[20]; + EXPECT_EQ(ANeuralNetworksModel_setOperandValue(nullptr, 0, buffer, sizeof(buffer)), + ANEURALNETWORKS_UNEXPECTED_NULL); + EXPECT_EQ(ANeuralNetworksModel_setOperandValue(mModel, 0, nullptr, sizeof(buffer)), + ANEURALNETWORKS_UNEXPECTED_NULL); + + // This should fail, since buffer is not the size of a float32. + EXPECT_EQ(ANeuralNetworksModel_setOperandValue(mModel, 0, buffer, sizeof(buffer)), + ANEURALNETWORKS_BAD_DATA); + + // This should fail, as this operand does not exist. + EXPECT_EQ(ANeuralNetworksModel_setOperandValue(mModel, 1, buffer, 4), ANEURALNETWORKS_BAD_DATA); + + // TODO lots of validation of type + // EXPECT_EQ(ANeuralNetworksModel_setOperandValue(mModel, 0, buffer, + // sizeof(buffer)), ANEURALNETWORKS_UNEXPECTED_NULL); +} + +TEST_F(ValidationTestModel, AddOperation) { + uint32_t input = 0; + uint32_t output = 0; + EXPECT_EQ(ANeuralNetworksModel_addOperation(nullptr, ANEURALNETWORKS_AVERAGE_POOL_2D, 1, &input, + 1, &output), + ANEURALNETWORKS_UNEXPECTED_NULL); + EXPECT_EQ(ANeuralNetworksModel_addOperation(mModel, ANEURALNETWORKS_AVERAGE_POOL_2D, 0, nullptr, + 1, &output), + ANEURALNETWORKS_UNEXPECTED_NULL); + EXPECT_EQ(ANeuralNetworksModel_addOperation(mModel, ANEURALNETWORKS_AVERAGE_POOL_2D, 1, &input, + 0, nullptr), + ANEURALNETWORKS_UNEXPECTED_NULL); + // EXPECT_EQ(ANeuralNetworksModel_addOperation(mModel, + // ANEURALNETWORKS_AVERAGE_POOL_2D, &inputs, + // &outputs), + // ANEURALNETWORKS_UNEXPECTED_NULL); +} + +TEST_F(ValidationTestModel, SetInputsAndOutputs) { + uint32_t input = 0; + uint32_t output = 0; + EXPECT_EQ(ANeuralNetworksModel_identifyInputsAndOutputs(nullptr, 1, &input, 1, &output), + ANEURALNETWORKS_UNEXPECTED_NULL); + EXPECT_EQ(ANeuralNetworksModel_identifyInputsAndOutputs(mModel, 0, nullptr, 1, &output), + ANEURALNETWORKS_UNEXPECTED_NULL); + EXPECT_EQ(ANeuralNetworksModel_identifyInputsAndOutputs(mModel, 1, &input, 0, nullptr), + ANEURALNETWORKS_UNEXPECTED_NULL); +} + +TEST_F(ValidationTestModel, Finish) { + EXPECT_EQ(ANeuralNetworksModel_finish(nullptr), ANEURALNETWORKS_UNEXPECTED_NULL); + EXPECT_EQ(ANeuralNetworksModel_finish(mModel), ANEURALNETWORKS_NO_ERROR); + EXPECT_EQ(ANeuralNetworksModel_finish(mModel), ANEURALNETWORKS_BAD_STATE); +} + +TEST_F(ValidationTestModel, CreateCompilation) { + ANeuralNetworksCompilation* compilation = nullptr; + EXPECT_EQ(ANeuralNetworksCompilation_create(nullptr, &compilation), + ANEURALNETWORKS_UNEXPECTED_NULL); + EXPECT_EQ(ANeuralNetworksCompilation_create(mModel, nullptr), ANEURALNETWORKS_UNEXPECTED_NULL); + EXPECT_EQ(ANeuralNetworksCompilation_create(mModel, &compilation), ANEURALNETWORKS_BAD_STATE); + + // EXPECT_EQ(ANeuralNetworksCompilation_create(mModel, ANeuralNetworksCompilation * + // *compilation), + // ANEURALNETWORKS_UNEXPECTED_NULL); +} + +TEST_F(ValidationTestCompilation, SetPreference) { + EXPECT_EQ(ANeuralNetworksCompilation_setPreference(nullptr, ANEURALNETWORKS_PREFER_LOW_POWER), + ANEURALNETWORKS_UNEXPECTED_NULL); + + EXPECT_EQ(ANeuralNetworksCompilation_setPreference(mCompilation, 40), ANEURALNETWORKS_BAD_DATA); +} + +TEST_F(ValidationTestCompilation, CreateExecution) { + ANeuralNetworksExecution* execution = nullptr; + EXPECT_EQ(ANeuralNetworksExecution_create(nullptr, &execution), + ANEURALNETWORKS_UNEXPECTED_NULL); + EXPECT_EQ(ANeuralNetworksExecution_create(mCompilation, nullptr), + ANEURALNETWORKS_UNEXPECTED_NULL); + EXPECT_EQ(ANeuralNetworksExecution_create(mCompilation, &execution), + ANEURALNETWORKS_BAD_STATE); + // EXPECT_EQ(ANeuralNetworksExecution_create(mCompilation, ANeuralNetworksExecution * + // *execution), + // ANEURALNETWORKS_UNEXPECTED_NULL); +} + +TEST_F(ValidationTestCompilation, Finish) { + EXPECT_EQ(ANeuralNetworksCompilation_finish(nullptr), ANEURALNETWORKS_UNEXPECTED_NULL); + EXPECT_EQ(ANeuralNetworksCompilation_finish(mCompilation), ANEURALNETWORKS_NO_ERROR); + EXPECT_EQ(ANeuralNetworksCompilation_setPreference(mCompilation, + ANEURALNETWORKS_PREFER_FAST_SINGLE_ANSWER), + ANEURALNETWORKS_BAD_STATE); + EXPECT_EQ(ANeuralNetworksCompilation_finish(mCompilation), ANEURALNETWORKS_BAD_STATE); +} + +#if 0 +// TODO do more.. +TEST_F(ValidationTestExecution, SetInput) { + EXPECT_EQ(ANeuralNetworksExecution_setInput(ANeuralNetworksExecution * execution, int32_t index, + const ANeuralNetworksOperandType* type, + const void* buffer, size_t length), + ANEURALNETWORKS_UNEXPECTED_NULL); +} + +TEST_F(ValidationTestExecution, SetInputFromMemory) { + EXPECT_EQ(ANeuralNetworksExecution_setInputFromMemory(ANeuralNetworksExecution * execution, + int32_t index, + const ANeuralNetworksOperandType* type, + const ANeuralNetworksMemory* buffer, + uint32_t offset), + ANEURALNETWORKS_UNEXPECTED_NULL); +} + +TEST_F(ValidationTestExecution, SetOutput) { + EXPECT_EQ(ANeuralNetworksExecution_setOutput(ANeuralNetworksExecution * execution, + int32_t index, + const ANeuralNetworksOperandType* type, + void* buffer, size_t length), + ANEURALNETWORKS_UNEXPECTED_NULL); +} + +TEST_F(ValidationTestExecution, SetOutputFromMemory) { + EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(ANeuralNetworksExecution * execution, + int32_t index, + const ANeuralNetworksOperandType* type, + const ANeuralNetworksMemory* buffer, + uint32_t offset), + ANEURALNETWORKS_UNEXPECTED_NULL); +} + +TEST_F(ValidationTestExecution, StartCompute) { + EXPECT_EQ(ANeuralNetworksExecution_startCompute(ANeuralNetworksExecution * execution, + ANeuralNetworksEvent * *event), + ANEURALNETWORKS_UNEXPECTED_NULL); +} + +TEST_F(ValidationTestEvent, Wait) { + EXPECT_EQ(ANeuralNetworksEvent_wait(ANeuralNetworksEvent * event), + ANEURALNETWORKS_UNEXPECTED_NULL); +} + +TEST_F(ValidationTestEvent, Free) { + EXPECT_EQ(d ANeuralNetworksEvent_free(ANeuralNetworksEvent * event), + ANEURALNETWORKS_UNEXPECTED_NULL); +} +#endif + +} // namespace