${CMAKE_CURRENT_SOURCE_DIR}/src/TestTrivialModel.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/TestValidation.cpp)
-set(GENERATED_TEST_SRC ${CMAKE_CURRENT_SOURCE_DIR}/src/TestGenerated_V1_0.cpp
- ${CMAKE_CURRENT_SOURCE_DIR}/src/TestGenerated_V1_1.cpp
- ${CMAKE_CURRENT_SOURCE_DIR}/src/TestGenerated_Ex.cpp)
+set(GENERATED_TEST_SRC ${CMAKE_CURRENT_SOURCE_DIR}/src/TestGenerated.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/src/TestGenerated_V1_0.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/src/TestGenerated_V1_1.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/src/TestGenerated_Ex.cpp)
set(RUNTIME_NNAPI_TEST_SRC ${RUNTIME_NNAPI_TEST_SRC} ${GENERATED_TEST_SRC})
#ifndef ANDROID_ML_NN_TOOLS_TEST_GENERATOR_TEST_HARNESS_H
#define ANDROID_ML_NN_TOOLS_TEST_GENERATOR_TEST_HARNESS_H
+#include <gmock/gmock-matchers.h>
#include <gtest/gtest.h>
#include <cmath>
#include <tuple>
#include <vector>
-namespace generated_tests {
+namespace test_helper {
constexpr const size_t gMaximumNumberOfErrorMessages = 10;
-typedef std::map<int, std::vector<float>> Float32Operands;
-typedef std::map<int, std::vector<int32_t>> Int32Operands;
-typedef std::map<int, std::vector<uint8_t>> Quant8Operands;
-typedef std::tuple<Float32Operands, // ANEURALNETWORKS_TENSOR_FLOAT32
- Int32Operands, // ANEURALNETWORKS_TENSOR_INT32
- Quant8Operands // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM
- >
- MixedTyped;
-typedef std::pair<MixedTyped, MixedTyped> MixedTypedExampleType;
+// TODO: Figure out the build dependency to make including "CpuOperationUtils.h" work.
+// Fix for neurun: comment out convertFloat16ToFloat32
+//inline void convertFloat16ToFloat32(const _Float16* input, std::vector<float>* output) {
+// for (size_t i = 0; i < output->size(); ++i) {
+// (*output)[i] = static_cast<float>(input[i]);
+// }
+//}
-template <typename T>
-struct MixedTypedIndex {};
+// This class is a workaround for two issues our code relies on:
+// 1. sizeof(bool) is implementation defined.
+// 2. vector<bool> does not allow direct pointer access via the data() method.
+class bool8 {
+ public:
+ bool8() : mValue() {}
+ /* implicit */ bool8(bool value) : mValue(value) {}
+ inline operator bool() const { return mValue != 0; }
-template <>
-struct MixedTypedIndex<float> {
- static constexpr size_t index = 0;
-};
-template <>
-struct MixedTypedIndex<int32_t> {
- static constexpr size_t index = 1;
+ private:
+ uint8_t mValue;
};
-template <>
-struct MixedTypedIndex<uint8_t> {
- static constexpr size_t index = 2;
+
+static_assert(sizeof(bool8) == 1, "size of bool8 must be 8 bits");
+
+typedef std::map<int, std::vector<uint32_t>> OperandDimensions;
+typedef std::map<int, std::vector<float>> Float32Operands;
+typedef std::map<int, std::vector<int32_t>> Int32Operands;
+typedef std::map<int, std::vector<uint8_t>> Quant8AsymmOperands;
+typedef std::map<int, std::vector<int16_t>> Quant16SymmOperands;
+// Fix for neurun: comment out Float16Operands
+//typedef std::map<int, std::vector<_Float16>> Float16Operands;
+typedef std::map<int, std::vector<bool8>> Bool8Operands;
+typedef std::map<int, std::vector<int8_t>> Quant8ChannelOperands;
+typedef std::map<int, std::vector<uint16_t>> Quant16AsymmOperands;
+typedef std::map<int, std::vector<int8_t>> Quant8SymmOperands;
+struct MixedTyped {
+ static constexpr size_t kNumTypes = 9;
+ OperandDimensions operandDimensions;
+ Float32Operands float32Operands;
+ Int32Operands int32Operands;
+ Quant8AsymmOperands quant8AsymmOperands;
+ Quant16SymmOperands quant16SymmOperands;
+ // Fix for neurun comment out Float16Operands
+ //Float16Operands float16Operands;
+ Bool8Operands bool8Operands;
+ Quant8ChannelOperands quant8ChannelOperands;
+ Quant16AsymmOperands quant16AsymmOperands;
+ Quant8SymmOperands quant8SymmOperands;
};
+typedef std::pair<MixedTyped, MixedTyped> MixedTypedExampleType;
+
+// Mixed-typed examples
+typedef struct {
+ MixedTypedExampleType operands;
+ // Specifies the RANDOM_MULTINOMIAL distribution tolerance.
+ // If set to greater than zero, the input is compared as log-probabilities
+ // to the output and must be within this tolerance to pass.
+ // Fix for neurun: Remove default value - c++11 don't support yet
+ //float expectedMultinomialDistributionTolerance = 0.0;
+ float expectedMultinomialDistributionTolerance;
+} MixedTypedExample;
// Go through all index-value pairs of a given input type
template <typename T>
-inline void for_each(const MixedTyped& idx_and_data,
+inline void for_each(const std::map<int, std::vector<T>>& idx_and_data,
std::function<void(int, const std::vector<T>&)> execute) {
- for (auto& i : std::get<MixedTypedIndex<T>::index>(idx_and_data)) {
+ for (auto& i : idx_and_data) {
execute(i.first, i.second);
}
}
// non-const variant of for_each
template <typename T>
-inline void for_each(MixedTyped& idx_and_data,
+inline void for_each(std::map<int, std::vector<T>>& idx_and_data,
std::function<void(int, std::vector<T>&)> execute) {
- for (auto& i : std::get<MixedTypedIndex<T>::index>(idx_and_data)) {
+ for (auto& i : idx_and_data) {
execute(i.first, i.second);
}
}
+// Go through all index-value pairs of a given input type
+template <typename T>
+inline void for_each(const std::map<int, std::vector<T>>& golden,
+ std::map<int, std::vector<T>>& test,
+ std::function<void(int, const std::vector<T>&, std::vector<T>&)> execute) {
+ for_each<T>(golden, [&test, &execute](int index, const std::vector<T>& g) {
+ auto& t = test[index];
+ execute(index, g, t);
+ });
+}
+
+// Go through all index-value pairs of a given input type
+template <typename T>
+inline void for_each(
+ const std::map<int, std::vector<T>>& golden, const std::map<int, std::vector<T>>& test,
+ std::function<void(int, const std::vector<T>&, const std::vector<T>&)> execute) {
+ for_each<T>(golden, [&test, &execute](int index, const std::vector<T>& g) {
+ auto t = test.find(index);
+ ASSERT_NE(t, test.end());
+ execute(index, g, t->second);
+ });
+}
+
// internal helper for for_all
template <typename T>
-inline void for_all_internal(
- MixedTyped& idx_and_data,
- std::function<void(int, void*, size_t)> execute_this) {
+inline void for_all_internal(std::map<int, std::vector<T>>& idx_and_data,
+ std::function<void(int, void*, size_t)> execute_this) {
for_each<T>(idx_and_data, [&execute_this](int idx, std::vector<T>& m) {
execute_this(idx, static_cast<void*>(m.data()), m.size() * sizeof(T));
});
// expects a functor that takes (int index, void *raw data, size_t sz)
inline void for_all(MixedTyped& idx_and_data,
std::function<void(int, void*, size_t)> execute_this) {
- for_all_internal<float>(idx_and_data, execute_this);
- for_all_internal<int32_t>(idx_and_data, execute_this);
- for_all_internal<uint8_t>(idx_and_data, execute_this);
+ for_all_internal(idx_and_data.float32Operands, execute_this);
+ for_all_internal(idx_and_data.int32Operands, execute_this);
+ for_all_internal(idx_and_data.quant8AsymmOperands, execute_this);
+ for_all_internal(idx_and_data.quant16SymmOperands, execute_this);
+ // Fix for neurun: comment out float16Operands field
+ //for_all_internal(idx_and_data.float16Operands, execute_this);
+ for_all_internal(idx_and_data.bool8Operands, execute_this);
+ for_all_internal(idx_and_data.quant8ChannelOperands, execute_this);
+ for_all_internal(idx_and_data.quant16AsymmOperands, execute_this);
+ for_all_internal(idx_and_data.quant8SymmOperands, execute_this);
+ static_assert(9 == MixedTyped::kNumTypes,
+ "Number of types in MixedTyped changed, but for_all function wasn't updated");
}
// Const variant of internal helper for for_all
template <typename T>
-inline void for_all_internal(
- const MixedTyped& idx_and_data,
- std::function<void(int, const void*, size_t)> execute_this) {
+inline void for_all_internal(const std::map<int, std::vector<T>>& idx_and_data,
+ std::function<void(int, const void*, size_t)> execute_this) {
for_each<T>(idx_and_data, [&execute_this](int idx, const std::vector<T>& m) {
execute_this(idx, static_cast<const void*>(m.data()), m.size() * sizeof(T));
});
// Go through all index-value pairs (const variant)
// expects a functor that takes (int index, const void *raw data, size_t sz)
-inline void for_all(
- const MixedTyped& idx_and_data,
- std::function<void(int, const void*, size_t)> execute_this) {
- for_all_internal<float>(idx_and_data, execute_this);
- for_all_internal<int32_t>(idx_and_data, execute_this);
- for_all_internal<uint8_t>(idx_and_data, execute_this);
+inline void for_all(const MixedTyped& idx_and_data,
+ std::function<void(int, const void*, size_t)> execute_this) {
+ for_all_internal(idx_and_data.float32Operands, execute_this);
+ for_all_internal(idx_and_data.int32Operands, execute_this);
+ for_all_internal(idx_and_data.quant8AsymmOperands, execute_this);
+ for_all_internal(idx_and_data.quant16SymmOperands, execute_this);
+ // Fix for neurun: comment out float16Operands field
+ //for_all_internal(idx_and_data.float16Operands, execute_this);
+ for_all_internal(idx_and_data.bool8Operands, execute_this);
+ for_all_internal(idx_and_data.quant8ChannelOperands, execute_this);
+ for_all_internal(idx_and_data.quant16AsymmOperands, execute_this);
+ for_all_internal(idx_and_data.quant8SymmOperands, execute_this);
+ static_assert(
+ 9 == MixedTyped::kNumTypes,
+ "Number of types in MixedTyped changed, but const for_all function wasn't updated");
}
// Helper template - resize test output per golden
-template <typename ty, size_t tuple_index>
-void resize_accordingly_(const MixedTyped& golden, MixedTyped& test) {
- std::function<void(int, const std::vector<ty>&)> execute =
- [&test](int index, const std::vector<ty>& m) {
- auto& t = std::get<tuple_index>(test);
- t[index].resize(m.size());
- };
- for_each<ty>(golden, execute);
+template <typename T>
+inline void resize_accordingly_(const std::map<int, std::vector<T>>& golden,
+ std::map<int, std::vector<T>>& test) {
+ for_each<T>(golden, test,
+ [](int, const std::vector<T>& g, std::vector<T>& t) { t.resize(g.size()); });
+}
+
+template <>
+inline void resize_accordingly_<uint32_t>(const OperandDimensions& golden,
+ OperandDimensions& test) {
+ for_each<uint32_t>(
+ golden, test,
+ [](int, const std::vector<uint32_t>& g, std::vector<uint32_t>& t) { t = g; });
}
inline void resize_accordingly(const MixedTyped& golden, MixedTyped& test) {
- resize_accordingly_<float, 0>(golden, test);
- resize_accordingly_<int32_t, 1>(golden, test);
- resize_accordingly_<uint8_t, 2>(golden, test);
+ resize_accordingly_(golden.operandDimensions, test.operandDimensions);
+ resize_accordingly_(golden.float32Operands, test.float32Operands);
+ resize_accordingly_(golden.int32Operands, test.int32Operands);
+ resize_accordingly_(golden.quant8AsymmOperands, test.quant8AsymmOperands);
+ resize_accordingly_(golden.quant16SymmOperands, test.quant16SymmOperands);
+ //Fix for neurun: comment out float16Operands field
+ //resize_accordingly_(golden.float16Operands, test.float16Operands);
+ resize_accordingly_(golden.bool8Operands, test.bool8Operands);
+ resize_accordingly_(golden.quant8ChannelOperands, test.quant8ChannelOperands);
+ resize_accordingly_(golden.quant16AsymmOperands, test.quant16AsymmOperands);
+ resize_accordingly_(golden.quant8SymmOperands, test.quant8SymmOperands);
+ static_assert(9 == MixedTyped::kNumTypes,
+ "Number of types in MixedTyped changed, but resize_accordingly function wasn't "
+ "updated");
}
-template <typename ty, size_t tuple_index>
-void filter_internal(const MixedTyped& golden, MixedTyped* filtered,
- std::function<bool(int)> is_ignored) {
- for_each<ty>(golden,
- [filtered, &is_ignored](int index, const std::vector<ty>& m) {
- auto& g = std::get<tuple_index>(*filtered);
- if (!is_ignored(index)) g[index] = m;
- });
+template <typename T>
+void filter_internal(const std::map<int, std::vector<T>>& golden,
+ std::map<int, std::vector<T>>* filtered, std::function<bool(int)> is_ignored) {
+ for_each<T>(golden, [filtered, &is_ignored](int index, const std::vector<T>& m) {
+ auto& g = *filtered;
+ if (!is_ignored(index)) g[index] = m;
+ });
}
inline MixedTyped filter(const MixedTyped& golden,
std::function<bool(int)> is_ignored) {
MixedTyped filtered;
- filter_internal<float, 0>(golden, &filtered, is_ignored);
- filter_internal<int32_t, 1>(golden, &filtered, is_ignored);
- filter_internal<uint8_t, 2>(golden, &filtered, is_ignored);
+ filter_internal(golden.operandDimensions, &filtered.operandDimensions, is_ignored);
+ filter_internal(golden.float32Operands, &filtered.float32Operands, is_ignored);
+ filter_internal(golden.int32Operands, &filtered.int32Operands, is_ignored);
+ filter_internal(golden.quant8AsymmOperands, &filtered.quant8AsymmOperands, is_ignored);
+ filter_internal(golden.quant16SymmOperands, &filtered.quant16SymmOperands, is_ignored);
+ // Fix for neurun: comment out float16Operands field
+ //filter_internal(golden.float16Operands, &filtered.float16Operands, is_ignored);
+ filter_internal(golden.bool8Operands, &filtered.bool8Operands, is_ignored);
+ filter_internal(golden.quant8ChannelOperands, &filtered.quant8ChannelOperands, is_ignored);
+ filter_internal(golden.quant16AsymmOperands, &filtered.quant16AsymmOperands, is_ignored);
+ filter_internal(golden.quant8SymmOperands, &filtered.quant8SymmOperands, is_ignored);
+ static_assert(9 == MixedTyped::kNumTypes,
+ "Number of types in MixedTyped changed, but compare function wasn't updated");
return filtered;
}
// Compare results
-#define VECTOR_TYPE(x) \
- typename std::tuple_element<x, MixedTyped>::type::mapped_type
-#define VALUE_TYPE(x) VECTOR_TYPE(x)::value_type
-template <size_t tuple_index>
-void compare_(
- const MixedTyped& golden, const MixedTyped& test,
- std::function<void(VALUE_TYPE(tuple_index), VALUE_TYPE(tuple_index))>
- cmp) {
- for_each<VALUE_TYPE(tuple_index)>(
- golden,
- [&test, &cmp](int index, const VECTOR_TYPE(tuple_index) & m) {
- const auto& test_operands = std::get<tuple_index>(test);
- const auto& test_ty = test_operands.find(index);
- ASSERT_NE(test_ty, test_operands.end());
- for (unsigned int i = 0; i < m.size(); i++) {
- SCOPED_TRACE(testing::Message()
- << "When comparing element " << i);
- cmp(m[i], test_ty->second[i]);
- }
- });
-}
-#undef VALUE_TYPE
-#undef VECTOR_TYPE
-inline void compare(const MixedTyped& golden, const MixedTyped& test, float fpRange = 1e-5f) {
- size_t totalNumberOfErrors = 0;
- compare_<0>(golden, test, [&totalNumberOfErrors, fpRange](float g, float t) {
- if (totalNumberOfErrors < gMaximumNumberOfErrorMessages) {
- EXPECT_NEAR(g, t, fpRange);
- }
- if (std::abs(g - t) > fpRange) {
- totalNumberOfErrors++;
- }
- });
- compare_<1>(golden, test, [&totalNumberOfErrors](int32_t g, int32_t t) {
- if (totalNumberOfErrors < gMaximumNumberOfErrorMessages) {
- EXPECT_EQ(g, t);
- }
- if (g != t) {
- totalNumberOfErrors++;
+template <typename T>
+void compare_(const std::map<int, std::vector<T>>& golden,
+ const std::map<int, std::vector<T>>& test, std::function<void(T, T)> cmp) {
+ for_each<T>(golden, test, [&cmp](int index, const std::vector<T>& g, const std::vector<T>& t) {
+ for (unsigned int i = 0; i < g.size(); i++) {
+ SCOPED_TRACE(testing::Message()
+ << "When comparing output " << index << " element " << i);
+ cmp(g[i], t[i]);
}
});
- compare_<2>(golden, test, [&totalNumberOfErrors](uint8_t g, uint8_t t) {
- if (totalNumberOfErrors < gMaximumNumberOfErrorMessages) {
- EXPECT_NEAR(g, t, 1);
+}
+
+// TODO: Allow passing accuracy criteria from spec.
+// Currently we only need relaxed accuracy criteria on mobilenet tests, so we return the quant8
+// tolerance simply based on the current test name.
+inline int getQuant8AllowedError() {
+ const ::testing::TestInfo* const testInfo =
+ ::testing::UnitTest::GetInstance()->current_test_info();
+ const std::string testCaseName = testInfo->test_case_name();
+ const std::string testName = testInfo->name();
+ // We relax the quant8 precision for all tests with mobilenet:
+ // - CTS/VTS GeneratedTest and DynamicOutputShapeTest with mobilenet
+ // - VTS CompilationCachingTest and CompilationCachingSecurityTest except for TOCTOU tests
+ if (testName.find("mobilenet") != std::string::npos ||
+ (testCaseName.find("CompilationCaching") != std::string::npos &&
+ testName.find("TOCTOU") == std::string::npos)) {
+ return 2;
+ } else {
+ return 1;
+ }
+}
+
+inline void compare(const MixedTyped& golden, const MixedTyped& test,
+ float fpAtol = 1e-5f, float fpRtol = 1e-5f) {
+ int quant8AllowedError = getQuant8AllowedError();
+ for_each<uint32_t>(
+ golden.operandDimensions, test.operandDimensions,
+ [](int index, const std::vector<uint32_t>& g, const std::vector<uint32_t>& t) {
+ SCOPED_TRACE(testing::Message()
+ << "When comparing dimensions for output " << index);
+ EXPECT_EQ(g, t);
+ });
+ size_t totalNumberOfErrors = 0;
+ compare_<float>(golden.float32Operands, test.float32Operands,
+ [&totalNumberOfErrors, fpAtol, fpRtol](float expected, float actual) {
+ // Compute the range based on both absolute tolerance and relative tolerance
+ float fpRange = fpAtol + fpRtol * std::abs(expected);
+ if (totalNumberOfErrors < gMaximumNumberOfErrorMessages) {
+ EXPECT_NEAR(expected, actual, fpRange);
+ }
+ if (std::abs(expected - actual) > fpRange) {
+ totalNumberOfErrors++;
+ }
+ });
+ compare_<int32_t>(golden.int32Operands, test.int32Operands,
+ [&totalNumberOfErrors](int32_t expected, int32_t actual) {
+ if (totalNumberOfErrors < gMaximumNumberOfErrorMessages) {
+ EXPECT_EQ(expected, actual);
+ }
+ if (expected != actual) {
+ totalNumberOfErrors++;
+ }
+ });
+ compare_<uint8_t>(golden.quant8AsymmOperands, test.quant8AsymmOperands,
+ [&totalNumberOfErrors, quant8AllowedError](uint8_t expected, uint8_t actual) {
+ if (totalNumberOfErrors < gMaximumNumberOfErrorMessages) {
+ EXPECT_NEAR(expected, actual, quant8AllowedError);
+ }
+ if (std::abs(expected - actual) > quant8AllowedError) {
+ totalNumberOfErrors++;
+ }
+ });
+ compare_<int16_t>(golden.quant16SymmOperands, test.quant16SymmOperands,
+ [&totalNumberOfErrors](int16_t expected, int16_t actual) {
+ if (totalNumberOfErrors < gMaximumNumberOfErrorMessages) {
+ EXPECT_NEAR(expected, actual, 1);
+ }
+ if (std::abs(expected - actual) > 1) {
+ totalNumberOfErrors++;
+ }
+ });
+ // Fix for neurun: comment out _Float16 compare
+ //compare_<_Float16>(golden.float16Operands, test.float16Operands,
+ // [&totalNumberOfErrors, fpAtol, fpRtol](_Float16 expected, _Float16 actual) {
+ // // Compute the range based on both absolute tolerance and relative
+ // // tolerance
+ // float fpRange = fpAtol + fpRtol * std::abs(static_cast<float>(expected));
+ // if (totalNumberOfErrors < gMaximumNumberOfErrorMessages) {
+ // EXPECT_NEAR(expected, actual, fpRange);
+ // }
+ // if (std::abs(static_cast<float>(expected - actual)) > fpRange) {
+ // totalNumberOfErrors++;
+ // }
+ // });
+ compare_<bool8>(golden.bool8Operands, test.bool8Operands,
+ [&totalNumberOfErrors](bool expected, bool actual) {
+ if (totalNumberOfErrors < gMaximumNumberOfErrorMessages) {
+ EXPECT_EQ(expected, actual);
+ }
+ if (expected != actual) {
+ totalNumberOfErrors++;
+ }
+ });
+ compare_<int8_t>(golden.quant8ChannelOperands, test.quant8ChannelOperands,
+ [&totalNumberOfErrors, &quant8AllowedError](int8_t expected, int8_t actual) {
+ if (totalNumberOfErrors < gMaximumNumberOfErrorMessages) {
+ EXPECT_NEAR(expected, actual, quant8AllowedError);
+ }
+ if (std::abs(static_cast<int>(expected) - static_cast<int>(actual)) >
+ quant8AllowedError) {
+ totalNumberOfErrors++;
+ }
+ });
+ compare_<uint16_t>(golden.quant16AsymmOperands, test.quant16AsymmOperands,
+ [&totalNumberOfErrors](int16_t expected, int16_t actual) {
+ if (totalNumberOfErrors < gMaximumNumberOfErrorMessages) {
+ EXPECT_NEAR(expected, actual, 1);
+ }
+ if (std::abs(expected - actual) > 1) {
+ totalNumberOfErrors++;
+ }
+ });
+ compare_<int8_t>(golden.quant8SymmOperands, test.quant8SymmOperands,
+ [&totalNumberOfErrors, quant8AllowedError](int8_t expected, int8_t actual) {
+ if (totalNumberOfErrors < gMaximumNumberOfErrorMessages) {
+ EXPECT_NEAR(expected, actual, quant8AllowedError);
+ }
+ if (std::abs(static_cast<int>(expected) - static_cast<int>(actual)) >
+ quant8AllowedError) {
+ totalNumberOfErrors++;
+ }
+ });
+
+ static_assert(9 == MixedTyped::kNumTypes,
+ "Number of types in MixedTyped changed, but compare function wasn't updated");
+ EXPECT_EQ(size_t{0}, totalNumberOfErrors);
+}
+
+// Calculates the expected probability from the unnormalized log-probability of
+// each class in the input and compares it to the actual ocurrence of that class
+// in the output.
+inline void expectMultinomialDistributionWithinTolerance(const MixedTyped& test,
+ const MixedTypedExample& example) {
+ // TODO: These should be parameters but aren't currently preserved in the example.
+ const int kBatchSize = 1;
+ const int kNumClasses = 1024;
+ const int kNumSamples = 128;
+
+ std::vector<int32_t> output = test.int32Operands.at(0);
+ std::vector<int> class_counts;
+ class_counts.resize(kNumClasses);
+ for (int index : output) {
+ class_counts[index]++;
+ }
+ std::vector<float> input;
+ Float32Operands float32Operands = example.operands.first.float32Operands;
+ if (!float32Operands.empty()) {
+ input = example.operands.first.float32Operands.at(0);
+ } /*else {
+ // Fix for neurun: comment out convertFloat16ToFloat32
+ std::vector<_Float16> inputFloat16 = example.operands.first.float16Operands.at(0);
+ input.resize(inputFloat16.size());
+ convertFloat16ToFloat32(inputFloat16.data(), &input);
+ }*/
+ for (int b = 0; b < kBatchSize; ++b) {
+ float probability_sum = 0;
+ const int batch_index = kBatchSize * b;
+ for (int i = 0; i < kNumClasses; ++i) {
+ probability_sum += expf(input[batch_index + i]);
}
- if (std::abs(g - t) > 1) {
- totalNumberOfErrors++;
+ for (int i = 0; i < kNumClasses; ++i) {
+ float probability =
+ static_cast<float>(class_counts[i]) / static_cast<float>(kNumSamples);
+ float probability_expected = expf(input[batch_index + i]) / probability_sum;
+ EXPECT_THAT(probability,
+ ::testing::FloatNear(probability_expected,
+ example.expectedMultinomialDistributionTolerance));
}
- });
- EXPECT_EQ(size_t{0}, totalNumberOfErrors);
+ }
}
-}; // namespace generated_tests
+}; // namespace test_helper
#endif // ANDROID_ML_NN_TOOLS_TEST_GENERATOR_TEST_HARNESS_H
### Fix for neurun
-- Update path in this README.md file for neurun NNAPI frontend test
+- Update path in this `README.md` file for neurun NNAPI frontend test
- `nn/runtime/test/specs/` => `tests/nnapi/specs/`
- $ANDROID_BUILD_TOP/frameworks/ml/nn/runtime/test/specs => $NNAS_PROJECT_PATH/tests/nnapi/specs
- Rebuild with mm afterwards => Rebuild afterwards (mm is not supported)
- `slicing.py`
- `tests/`
- `include/` (`TestHarness.h` is in `tests/nnapi/include`)
+- Update `cts_generator.py`
+ - path for regular expression:
+ `((frameworks/ml/nn/(runtime/test/)?)|(vendor/google/[a-z]*/test/))` => `(tests/nnapi/src/)`
+ - Support EX operation
+ - Fix c++14 feature: change to c++11 constructor
+ - Comment out `TEST_AVAILABLE_SINCE()` macro generation
+ - Comment out unsupported `FLOAT16` on armv7 32bit architecture
+- Update `test_generator.py`
+ - Comment out dynamic shape output test generation
---
#include "{model_file}"
}} // namespace {spec_name}\n"""
# This regex is to remove prefix and get relative path for #include
- pathRegex = r".*((frameworks/ml/nn/(runtime/test/)?)|(vendor/google/[a-z]*/test/))"
+ # Fix for neurun: update path
+ pathRegex = r".*(tests/nnapi/src/)"
specFileBase = os.path.basename(tg.FileNames.specFile)
print(fileHeader.format(spec_file=specFileBase), file=model_fd)
print(fileHeader.format(spec_file=specFileBase), file=example_fd)
p.initializer, p.type.GetCppTypeString(), p.type.GetNumberOfElements())
IndentedPrint(paramDef, file=model_fd)
for op in model.operations:
- IndentedPrint("model->addOperation(ANEURALNETWORKS_%s, {%s}, {%s});"%(
- op.optype, tg.GetJointStr(op.ins), tg.GetJointStr(op.outs)), file=model_fd)
+ # Fix for neurun: EX operation
+ if re.search('_EX$', op.optype):
+ IndentedPrint("model->addOperationEx(ANEURALNETWORKS_%s, {%s}, {%s});"%(
+ op.optype, tg.GetJointStr(op.ins), tg.GetJointStr(op.outs)), file=model_fd)
+ else:
+ IndentedPrint("model->addOperation(ANEURALNETWORKS_%s, {%s}, {%s});"%(
+ op.optype, tg.GetJointStr(op.ins), tg.GetJointStr(op.outs)), file=model_fd)
# Phase 3: add inputs and outputs
print (" // Phase 3, inputs and outputs", file=model_fd)
except KeyError as e:
traceback.print_exc()
sys.exit("Cannot dump tensor of type {}".format(operand.type.type))
+ # NFix for neurun: fix designated initializer (not supported on c++11)
+ # comment out FLOAT16 type
mixedTypeTemplate = """\
{{ // See tools/test_generator/include/TestHarness.h:MixedTyped
// int -> Dimensions map
- .operandDimensions = {{{dimensions_map}}},
+ {{{dimensions_map}}},
// int -> FLOAT32 map
- .float32Operands = {{{float32_map}}},
+ {{{float32_map}}},
// int -> INT32 map
- .int32Operands = {{{int32_map}}},
+ {{{int32_map}}},
// int -> QUANT8_ASYMM map
- .quant8AsymmOperands = {{{uint8_map}}},
+ {{{uint8_map}}},
// int -> QUANT16_SYMM map
- .quant16SymmOperands = {{{int16_map}}},
+ {{{int16_map}}},
// int -> FLOAT16 map
- .float16Operands = {{{float16_map}}},
+ //{{{float16_map}}},
// int -> BOOL8 map
- .bool8Operands = {{{bool8_map}}},
+ {{{bool8_map}}},
// int -> QUANT8_SYMM_PER_CHANNEL map
- .quant8ChannelOperands = {{{int8_map}}},
+ {{{int8_map}}},
// int -> QUANT16_ASYMM map
- .quant16AsymmOperands = {{{uint16_map}}},
+ {{{uint16_map}}},
// int -> QUANT8_SYMM map
- .quant8SymmOperands = {{{quant8_symm_map}}},
+ {{{quant8_symm_map}}},
}}"""
return mixedTypeTemplate.format(
dimensions_map=tg.GetJointStr(typedMap.get("DIMENSIONS", [])),
print("std::vector<MixedTypedExample>& get_%s() {" % (example.examplesName), file=example_fd)
print("static std::vector<MixedTypedExample> %s = {" % (example.examplesName), file=example_fd)
for inputFeedDict, outputFeedDict in example.feedDicts:
+ # Fix designated initializer (c++11 don't support yet)
print ('// Begin of an example', file = example_fd)
- print ('{\n.operands = {', file = example_fd)
+ print ('{\n {', file = example_fd)
inputs = DumpMixedType(example.model.GetInputs(), inputFeedDict)
outputs = DumpMixedType(example.model.GetOutputs(), outputFeedDict)
print ('//Input(s)\n%s,' % inputs , file = example_fd)
print ('//Output(s)\n%s' % outputs, file = example_fd)
print ('},', file = example_fd)
+ # Fix designated initializer (c++11 don't support yet)
if example.expectedMultinomialDistributionTolerance is not None:
- print ('.expectedMultinomialDistributionTolerance = %f' %
+ print ('%f' %
example.expectedMultinomialDistributionTolerance, file = example_fd)
+ else:
+ print ('0.0', file = example_fd)
print ('}, // End of an example', file = example_fd)
print("};", file=example_fd)
print("return %s;" % (example.examplesName), file=example_fd)
execute({namespace}::{create_model_name},
{namespace}::{is_ignored_name},
{namespace}::get_{examples_name}(){log_file});\n}}\n"""
- if example.model.version is not None:
- testTemplate += """\
-TEST_AVAILABLE_SINCE({version}, {test_name}, {namespace}::{create_model_name})\n"""
+ # Fix for neurun: Remove version check
+ #if example.model.version is not None:
+ #testTemplate += """\
+#TEST_AVAILABLE_SINCE({version}, {test_name}, {namespace}::{create_model_name})\n"""
print(testTemplate.format(
test_case_name="DynamicOutputShapeTest" if example.model.hasDynamicOutputShape \
else "GeneratedTests",
))
else:
assert False
- if Configuration.test_dynamic_output_shape:
- self.variations = [[DefaultVariation(), DynamicOutputShapeConverter()]]
- else:
- self.variations = []
+ # Fix for neurun: disable dynamic shape test generation
+ #if Configuration.test_dynamic_output_shape:
+ #self.variations = [[DefaultVariation(), DynamicOutputShapeConverter()]]
+ #else:
+ self.variations = []
Example.examples.append(self)
@staticmethod
# See the License for the specific language governing permissions and
# limitations under the License.
+set -Eeuo pipefail
+
NNAPI_VERSION="
V1_0
V1_1
spec_dir=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
export NNAPI_BASE=$(readlink -f "$spec_dir/../../..")
-[ -z "$TEST_DIR" ] && TEST_DIR="tests/nnapi"
+: ${TEST_DIR:=tests/nnapi}
+: ${FORCE:=""}
+
+echo "NNAPI_BASE: ${NNAPI_BASE}"
+echo "TEST_DIR: ${TEST_DIR}"
+echo "FORCE: ${FORCE}"
function generate_one_testcase {
# Generate one testcase
local LOGFILE=$2
if [ -n "$2" ]; then
- local LOGFILE=", \"$2\""
+ local LOGFILE="-l $2"
fi
- local BASENAME=`basename $1`
- BASENAME=${BASENAME%".mod.py"};
+ local BASENAME=`basename -s .mod.py $1`
+ local MODEL="-m $NNAPI_BASE/$TEST_DIR/src/generated/models/$BASENAME.model.cpp"
local EXAMPLE="-e $NNAPI_BASE/$TEST_DIR/src/generated/examples/$BASENAME.example.cpp"
+ local TEST="-t $NNAPI_BASE/$TEST_DIR/src/generated/tests/$(basename $1).cpp"
- $NNAPI_BASE/$TEST_DIR/nnapi_test_generator/android-p/test_generator.py ./`basename $1`\
- -m $NNAPI_BASE/$TEST_DIR/src/generated/models/$BASENAME.model.cpp $EXAMPLE
+ $NNAPI_BASE/$TEST_DIR/nnapi_test_generator/android-10/cts_generator.py $FORCE ./`basename $1` \
+ $MODEL $EXAMPLE $TEST $LOGFILE
ret=$?
- # Paste these lines into TestGenerated.cpp
- echo
- echo namespace $BASENAME {
- echo std::vector\<MixedTypedExample\> examples \= {
- echo // Generated $BASENAME test
- echo \#include \"generated/examples/$BASENAME.example.cpp\"
- echo }\;
- echo // Generated model constructor
- echo \#include \"generated/models/$BASENAME.model.cpp\"
- echo } // namespace $BASENAME
- echo TEST_F\(GeneratedTests\, $BASENAME\) {
- echo ' execute'\($BASENAME\:\:CreateModel\,
- echo ' '$BASENAME\:\:is_ignored\,
- echo ' '$BASENAME\:\:examples${LOGFILE}\)\;
- echo }
return $ret
}
shift
shift
fi
+ cd $NNAPI_BASE/$TEST_DIR/specs
+ FOUND=0
+
+ CTSONEFILE=$NNAPI_BASE/$TEST_DIR/for-cts/TestGeneratedOneFile.cpp
+ echo "// clang-format off" > $CTSONEFILE
+ echo "// DO NOT EDIT;" >> $CTSONEFILE
+ echo "// Generated by ml/nn/runtime/test/specs/generate_test.sh" >> $CTSONEFILE
+ echo "#include \"../GeneratedUtils.cpp\"" >> $CTSONEFILE
+
+ for ver in $NNAPI_VERSION;
+ do
+ VER_DIR=$NNAPI_BASE/$TEST_DIR/specs/$ver
+ [ ! -d $VER_DIR ] && continue
+ pushd $VER_DIR > /dev/null
+ for f in $@;
+ do
+ if [ -f $(basename $f) ]; then
+ generate_one_testcase $f "$LOGFILE" >> $CTSONEFILE
+ if [ $? -ne 0 ]; then
+ echo "Failed processing $f"
+ return $?
+ fi
+ FOUND=1
+ fi
+ done
+ popd > /dev/null
+ done
+ if [[ $FOUND -eq 0 ]]; then
+ echo did not find any files for $@
+ exit 1
+ fi
+ return $?
+}
+# Process all test spec directory specified by NNAPI_VERSION.
+function generate_spec_dirs {
mkdir -vp $NNAPI_BASE/$TEST_DIR/src/generated/models
mkdir -vp $NNAPI_BASE/$TEST_DIR/src/generated/examples
+ mkdir -vp $NNAPI_BASE/$TEST_DIR/src/generated/tests
cd $NNAPI_BASE/$TEST_DIR/specs
for ver in $NNAPI_VERSION;
do
- OUTFILE=$NNAPI_BASE/$TEST_DIR/src/generated/all_generated_${ver}_cts_tests.cpp
- echo "// DO NOT EDIT;" > $OUTFILE
- echo "// Generated by ${TEST_DIR}/specs/generate_test.sh" >> $OUTFILE
-
+ echo "start to generate ver: ${ver}"
VER_DIR=$NNAPI_BASE/$TEST_DIR/specs/$ver
[ ! -d $VER_DIR ] && continue
- cd $VER_DIR
- for f in $@;
- do
- generate_one_testcase $f $LOGFILE >> $OUTFILE
- if [ $? -ne 0 ]; then
- echo "Failed processing $f"
+ pushd $VER_DIR > /dev/null
+
+ TARGET_MODEL_DIR="-m $NNAPI_BASE/$TEST_DIR/src/generated/models"
+ TARGET_EXAMPLE_DIR="-e $NNAPI_BASE/$TEST_DIR/src/generated/examples"
+ TARGET_TEST_DIR="-t $NNAPI_BASE/$TEST_DIR/src/generated/tests"
+
+ $NNAPI_BASE/$TEST_DIR/nnapi_test_generator/android-10/cts_generator.py $FORCE $VER_DIR \
+ $TARGET_MODEL_DIR $TARGET_EXAMPLE_DIR $TARGET_TEST_DIR
+ if [ $? -ne 0 ]; then
+ echo "Failed processing $VER_DIR"
return $?
- fi
+ fi
+
+ # Workaround for cmake
+ OUTFILE="$NNAPI_BASE/$TEST_DIR/src/generated/all_generated_${ver}_cts_tests.cpp"
+ echo "Generating $OUTFILE"
+ echo "// Generated by tests/nnapi/specs/generate_test.sh" > "$OUTFILE"
+ echo "// DO NOT EDIT" >>"$OUTFILE"
+ echo "// clang-format off" >>"$OUTFILE"
+ for spec in "$NNAPI_BASE/$TEST_DIR/specs/${ver}/"*.mod.py; do
+ BASENAME="$(basename "$spec")"
+ echo "#include "\""generated/tests/${BASENAME}.cpp"\" >>"$OUTFILE"
done
+
+ popd > /dev/null
done
return $?
}
# Only run the following when not sourced by another script
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
+ set -eu
+ if [ $# -gt 0 ]; then
+ if [ $1 = "-f" ] || [ $1 = "--force" ]; then
+ FORCE="-f"
+ shift
+ fi
+ fi
if [ $# -eq 0 ]; then
- FILES=*.mod.py
+ generate_spec_dirs $FORCE
else
FILES="$@"
+ generate_wrapper $FILES
fi
- generate_wrapper $FILES
if [ $? -ne 0 ]; then
exit $?
fi
- echo "Generated file in ${TEST_DIR}/src/generated/"`basename $OUTFILE`
fi # [[ "${BASH_SOURCE[0]}" == "${0}" ]]
/*
* Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * generated/all_generated_tests.cpp that is included in the bottom line
- * in file is also modified.
- *
* Copyright (C) 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* limitations under the License.
*/
-// Top level driver for models and examples generated by test_generator.py
-
-#include "NeuralNetworksWrapper.h"
+#include "TestGenerated.h"
#include "TestHarness.h"
#include <gtest/gtest.h>
+
+#include <ftw.h>
+#include <unistd.h>
#include <cassert>
#include <cmath>
#include <fstream>
#include <iostream>
#include <map>
+#include <thread>
-// Uncomment the following line to generate DOT graphs.
-//
-// #define GRAPH GRAPH
+// Systrace is not available from CTS tests due to platform layering
+// constraints. We reuse the NNTEST_ONLY_PUBLIC_API flag, as that should also be
+// the case for CTS (public APIs only).
+// NNFW Fix: Always use NNTEST_ONLY_PUBLIC_API
+//#ifndef NNTEST_ONLY_PUBLIC_API
+//#include "Tracing.h"
+//#else
+#define NNTRACE_FULL_RAW(...)
+#define NNTRACE_APP(...)
+#define NNTRACE_APP_SWITCH(...)
+//#endif
namespace generated_tests {
-using namespace nnfw::rt::wrapper;
+using namespace nnfw::rt::test_wrapper;
+using namespace test_helper;
+namespace {
template <typename T>
-static void print(std::ostream& os, const MixedTyped& test) {
+void print(std::ostream& os, const std::map<int, std::vector<T>>& test) {
// dump T-typed inputs
for_each<T>(test, [&os](int idx, const std::vector<T>& f) {
os << " aliased_output" << idx << ": [";
});
}
-static void printAll(std::ostream& os, const MixedTyped& test) {
- print<float>(os, test);
- print<int32_t>(os, test);
- print<uint8_t>(os, test);
+// Specialized for _Float16 because it requires explicit conversion.
+// Fix for neurun: comment out
+//template <>
+//void print<_Float16>(std::ostream& os, const std::map<int, std::vector<_Float16>>& test) {
+// for_each<_Float16>(test, [&os](int idx, const std::vector<_Float16>& f) {
+// os << " aliased_output" << idx << ": [";
+// for (size_t i = 0; i < f.size(); ++i) {
+// os << (i == 0 ? "" : ", ") << +static_cast<float>(f[i]);
+// }
+// os << "],\n";
+// });
+//}
+
+void printAll(std::ostream& os, const MixedTyped& test) {
+ print(os, test.float32Operands);
+ print(os, test.int32Operands);
+ print(os, test.quant8AsymmOperands);
+ print(os, test.quant16SymmOperands);
+ // Fix for neurun: comment out
+ //print(os, test.float16Operands);
+ print(os, test.bool8Operands);
+ print(os, test.quant8ChannelOperands);
+ print(os, test.quant16AsymmOperands);
+ print(os, test.quant8SymmOperands);
+ static_assert(9 == MixedTyped::kNumTypes,
+ "Number of types in MixedTyped changed, but printAll function wasn't updated");
}
+} // namespace
-// Test driver for those generated from ml/nn/runtime/test/spec
-static void execute(std::function<void(Model*)> createModel,
- std::function<bool(int)> isIgnored,
- std::vector<MixedTypedExampleType>& examples,
- std::string dumpFile = "") {
- Model model;
- createModel(&model);
- model.finish();
- bool dumpToFile = !dumpFile.empty();
+Compilation GeneratedTests::compileModel(const Model* model) {
+ NNTRACE_APP(NNTRACE_PHASE_COMPILATION, "compileModel");
+ if (mTestCompilationCaching) {
+ // Compile the model twice with the same token, so that compilation caching will be
+ // exercised if supported by the driver.
+ Compilation compilation1(model);
+ compilation1.setCaching(mCacheDir, mToken);
+ compilation1.finish();
+ Compilation compilation2(model);
+ compilation2.setCaching(mCacheDir, mToken);
+ compilation2.finish();
+ return compilation2;
+ } else {
+ Compilation compilation(model);
+ compilation.finish();
+ return compilation;
+ }
+}
+void GeneratedTests::executeWithCompilation(const Model* model, Compilation* compilation,
+ std::function<bool(int)> isIgnored,
+ std::vector<MixedTypedExample>& examples,
+ std::string dumpFile) {
+ bool dumpToFile = !dumpFile.empty();
std::ofstream s;
if (dumpToFile) {
s.open(dumpFile, std::ofstream::trunc);
}
int exampleNo = 0;
- Compilation compilation(&model);
- compilation.finish();
-
- const float fpRange = 1e-5f;
+ float fpAtol = 1e-5f;
+ float fpRtol = 5.0f * 1.1920928955078125e-7f;
for (auto& example : examples) {
+ NNTRACE_APP(NNTRACE_PHASE_EXECUTION, "executeWithCompilation example");
SCOPED_TRACE(exampleNo);
// TODO: We leave it as a copy here.
// Should verify if the input gets modified by the test later.
- MixedTyped inputs = example.first;
- const MixedTyped& golden = example.second;
-
- Execution execution(&compilation);
+ MixedTyped inputs = example.operands.first;
+ const MixedTyped& golden = example.operands.second;
- // Set all inputs
- for_all(inputs, [&execution](int idx, const void* p, size_t s) {
- const void* buffer = s == 0 ? nullptr : p;
- ASSERT_EQ(Result::NO_ERROR, execution.setInput(idx, buffer, s));
- });
+ // NNFW Fix: comment out using hasFloat16Inputs
+ //const bool hasFloat16Inputs = !inputs.float16Operands.empty();
+ if (model->isRelaxed()/* || hasFloat16Inputs*/) {
+ // TODO: Adjust the error limit based on testing.
+ // If in relaxed mode, set the absolute tolerance to be 5ULP of FP16.
+ fpAtol = 5.0f * 0.0009765625f;
+ // Set the relative tolerance to be 5ULP of the corresponding FP precision.
+ fpRtol = 5.0f * 0.0009765625f;
+ }
+ Execution execution(compilation);
MixedTyped test;
- // Go through all typed outputs
- resize_accordingly(golden, test);
- for_all(test, [&execution](int idx, void* p, size_t s) {
- void* buffer = s == 0 ? nullptr : p;
- ASSERT_EQ(Result::NO_ERROR, execution.setOutput(idx, buffer, s));
- });
+ {
+ NNTRACE_APP(NNTRACE_PHASE_INPUTS_AND_OUTPUTS, "executeWithCompilation example");
+ // Set all inputs
+ for_all(inputs, [&execution](int idx, const void* p, size_t s) {
+ const void* buffer = s == 0 ? nullptr : p;
+ ASSERT_EQ(Result::NO_ERROR, execution.setInput(idx, buffer, s));
+ });
+
+ // Go through all typed outputs
+ resize_accordingly(golden, test);
+ for_all(test, [&execution](int idx, void* p, size_t s) {
+ void* buffer = s == 0 ? nullptr : p;
+ ASSERT_EQ(Result::NO_ERROR, execution.setOutput(idx, buffer, s));
+ });
+ }
Result r = execution.compute();
ASSERT_EQ(Result::NO_ERROR, r);
+ {
+ NNTRACE_APP(NNTRACE_PHASE_RESULTS, "executeWithCompilation example");
- // Dump all outputs for the slicing tool
- if (dumpToFile) {
- s << "output" << exampleNo << " = {\n";
- printAll(s, test);
- // all outputs are done
- s << "}\n";
- }
+ // Get output dimensions
+ for_each<uint32_t>(
+ test.operandDimensions, [&execution](int idx, std::vector<uint32_t>& t) {
+ ASSERT_EQ(Result::NO_ERROR, execution.getOutputOperandDimensions(idx, &t));
+ });
+
+ // Dump all outputs for the slicing tool
+ if (dumpToFile) {
+ s << "output" << exampleNo << " = {\n";
+ printAll(s, test);
+ // all outputs are done
+ s << "}\n";
+ }
- // Filter out don't cares
- MixedTyped filteredGolden = filter(golden, isIgnored);
- MixedTyped filteredTest = filter(test, isIgnored);
- // We want "close-enough" results for float
+ // Filter out don't cares
+ MixedTyped filteredGolden = filter(golden, isIgnored);
+ MixedTyped filteredTest = filter(test, isIgnored);
+ // We want "close-enough" results for float
- compare(filteredGolden, filteredTest, fpRange);
+ compare(filteredGolden, filteredTest, fpAtol, fpRtol);
+ }
exampleNo++;
+
+ if (example.expectedMultinomialDistributionTolerance > 0) {
+ expectMultinomialDistributionWithinTolerance(test, example);
+ }
+ }
+}
+
+void GeneratedTests::executeOnce(const Model* model, std::function<bool(int)> isIgnored,
+ std::vector<MixedTypedExample>& examples, std::string dumpFile) {
+ NNTRACE_APP(NNTRACE_PHASE_OVERALL, "executeOnce");
+ Compilation compilation = compileModel(model);
+ executeWithCompilation(model, &compilation, isIgnored, examples, dumpFile);
+}
+
+void GeneratedTests::executeMultithreadedOwnCompilation(const Model* model,
+ std::function<bool(int)> isIgnored,
+ std::vector<MixedTypedExample>& examples) {
+ NNTRACE_APP(NNTRACE_PHASE_OVERALL, "executeMultithreadedOwnCompilation");
+ SCOPED_TRACE("MultithreadedOwnCompilation");
+ std::vector<std::thread> threads;
+ for (int i = 0; i < 10; i++) {
+ threads.push_back(std::thread([&]() { executeOnce(model, isIgnored, examples, ""); }));
+ }
+ std::for_each(threads.begin(), threads.end(), [](std::thread& t) { t.join(); });
+}
+
+void GeneratedTests::executeMultithreadedSharedCompilation(
+ const Model* model, std::function<bool(int)> isIgnored,
+ std::vector<MixedTypedExample>& examples) {
+ NNTRACE_APP(NNTRACE_PHASE_OVERALL, "executeMultithreadedSharedCompilation");
+ SCOPED_TRACE("MultithreadedSharedCompilation");
+ Compilation compilation = compileModel(model);
+ std::vector<std::thread> threads;
+ for (int i = 0; i < 10; i++) {
+ threads.push_back(std::thread(
+ [&]() { executeWithCompilation(model, &compilation, isIgnored, examples, ""); }));
}
+ std::for_each(threads.begin(), threads.end(), [](std::thread& t) { t.join(); });
}
-}; // namespace generated_tests
+// Test driver for those generated from ml/nn/runtime/test/spec
+void GeneratedTests::execute(std::function<void(Model*)> createModel,
+ std::function<bool(int)> isIgnored,
+ std::vector<MixedTypedExample>& examples,
+ [[maybe_unused]] std::string dumpFile) {
+ NNTRACE_APP(NNTRACE_PHASE_OVERALL, "execute");
+ Model model;
+ createModel(&model);
+ model.finish();
+ auto executeInternal = [&model, &isIgnored, &examples,
+ this]([[maybe_unused]] std::string dumpFile) {
+ SCOPED_TRACE("TestCompilationCaching = " + std::to_string(mTestCompilationCaching));
+#ifndef NNTEST_MULTITHREADED
+ executeOnce(&model, isIgnored, examples, dumpFile);
+#else // defined(NNTEST_MULTITHREADED)
+ executeMultithreadedOwnCompilation(&model, isIgnored, examples);
+ executeMultithreadedSharedCompilation(&model, isIgnored, examples);
+#endif // !defined(NNTEST_MULTITHREADED)
+ };
+
+ mTestCompilationCaching = false;
+// Fix for neurun: Not supported feature - copmilation caching
+// TODO Enable this
+#if 0
+ executeInternal(dumpFile);
+ mTestCompilationCaching = true;
+#endif
+ executeInternal("");
+}
-using namespace nnfw::rt::wrapper;
+void GeneratedTests::SetUp() {
+#ifdef NNTEST_COMPUTE_MODE
+ mOldComputeMode = Execution::setComputeMode(GetParam());
+#endif
+ // Fix for neurun: Fix file path for linux
+ char cacheDirTemp[] = "/tmp/TestCompilationCachingXXXXXX";
+ //char cacheDirTemp[] = "/data/local/tmp/TestCompilationCachingXXXXXX";
+ char* cacheDir = mkdtemp(cacheDirTemp);
+ ASSERT_NE(cacheDir, nullptr);
+ mCacheDir = cacheDir;
+ mToken = std::vector<uint8_t>(ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN, 0);
+}
-// Mixed-typed examples
-typedef generated_tests::MixedTypedExampleType MixedTypedExample;
+void GeneratedTests::TearDown() {
+#ifdef NNTEST_COMPUTE_MODE
+ Execution::setComputeMode(mOldComputeMode);
+#endif
+ if (!::testing::Test::HasFailure()) {
+ // TODO: Switch to std::filesystem::remove_all once libc++fs is made available in CTS.
+ // Remove the cache directory specified by path recursively.
+ auto callback = [](const char* child, const struct stat*, int, struct FTW*) {
+ return remove(child);
+ };
+ nftw(mCacheDir.c_str(), callback, 128, FTW_DEPTH | FTW_MOUNT | FTW_PHYS);
+ }
+}
-class GeneratedTests : public ::testing::Test {
- protected:
- virtual void SetUp() {}
-};
+#ifdef NNTEST_COMPUTE_MODE
+INSTANTIATE_TEST_SUITE_P(ComputeMode, GeneratedTests,
+ testing::Values(Execution::ComputeMode::SYNC,
+ Execution::ComputeMode::ASYNC,
+ Execution::ComputeMode::BURST));
+#endif
-// Testcases generated from runtime/test/specs/*.mod.py
-using namespace generated_tests;
-#include "generated/all_generated_tests.cpp"
-// End of testcases generated from runtime/test/specs/*.mod.py
+} // namespace generated_tests
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_FRAMEWORK_ML_NN_RUNTIME_TEST_TESTGENERATED_H
+#define ANDROID_FRAMEWORK_ML_NN_RUNTIME_TEST_TESTGENERATED_H
+
+#include <gtest/gtest.h>
+
+// Fix for NNFW: comment out include TestCompliance.h
+//#include "TestCompliance.h"
+#include "TestHarness.h"
+#include "TestNeuralNetworksWrapper.h"
+
+#ifdef NNTEST_CTS
+#define NNTEST_COMPUTE_MODE
+#endif
+
+#ifdef NNTEST_COMPUTE_MODE
+#define GENERATED_TESTS_BASE testing::TestWithParam<Execution::ComputeMode>
+#undef TEST_F
+#define TEST_F TEST_P
+// Only generated tests include the TestGenerated.h header file, so only those
+// tests will be affected by changing their TEST_F to TEST_P. If we
+// accidentally change TEST_F to TEST_P in some other context, we will get a
+// compile-time failure, because TEST_F requires a non-value-parameterized
+// fixture class whereas TEST_P requires a value-parameterized fixture class.
+//
+// Example failure:
+//
+// clang-format off
+// gtest-param-util.h:488:41: error: no type named 'ParamType' in '(anonymous namespace)::MemoryTest'
+// using ParamType = typename TestSuite::ParamType;
+// ~~~~~~~~~~~~~~~~~~~~^~~~~~~~~
+// TestMemory.cpp:43:1: note: in instantiation of template class 'testing::internal::ParameterizedTestSuiteInfo<(anonymous namespace)::MemoryTest>' requested here
+// TEST_P(MemoryTest, TestFd) {
+// ^
+// gtest-param-test.h:428:11: note: expanded from macro 'TEST_P'
+// clang-format on
+#else
+#define GENERATED_TESTS_BASE ::testing::Test
+#endif
+
+using namespace nnfw::rt::test_wrapper;
+using namespace test_helper;
+
+namespace generated_tests {
+
+class GeneratedTests : public GENERATED_TESTS_BASE {
+ protected:
+ virtual void SetUp() override;
+ virtual void TearDown() override;
+
+ Compilation compileModel(const Model* model);
+ void executeWithCompilation(const Model* model, Compilation* compilation,
+ std::function<bool(int)> isIgnored,
+ std::vector<MixedTypedExample>& examples, std::string dumpFile);
+ void executeOnce(const Model* model, std::function<bool(int)> isIgnored,
+ std::vector<MixedTypedExample>& examples, std::string dumpFile);
+ void executeMultithreadedOwnCompilation(const Model* model, std::function<bool(int)> isIgnored,
+ std::vector<MixedTypedExample>& examples);
+ void executeMultithreadedSharedCompilation(const Model* model,
+ std::function<bool(int)> isIgnored,
+ std::vector<MixedTypedExample>& examples);
+ // Test driver for those generated from ml/nn/runtime/test/spec
+ void execute(std::function<void(Model*)> createModel, std::function<bool(int)> isIgnored,
+ std::vector<MixedTypedExample>& examples, std::string dumpFile = "");
+
+ std::string mCacheDir;
+ std::vector<uint8_t> mToken;
+ bool mTestCompilationCaching;
+#ifdef NNTEST_COMPUTE_MODE
+ // SetUp() uses Execution::setComputeMode() to establish a new ComputeMode,
+ // and saves off the previous ComputeMode here; TearDown() restores that
+ // previous ComputeMode, so that subsequent tests will not be affected by
+ // the SetUp() ComputeMode change.
+ Execution::ComputeMode mOldComputeMode;
+#endif
+};
+
+// Tag for the dynamic output shape tests
+class DynamicOutputShapeTest : public GeneratedTests {};
+
+} // namespace generated_tests
+
+using namespace generated_tests;
+
+#endif // ANDROID_FRAMEWORK_ML_NN_RUNTIME_TEST_TESTGENERATED_H
* limitations under the License.
*/
-#include "TestGenerated_common.cpp"
+#include "TestGenerated.h"
using namespace generated_tests;
#include "generated/all_generated_Ex_cts_tests.cpp"
* limitations under the License.
*/
-#include "TestGenerated_common.cpp"
+#include "TestGenerated.h"
using namespace generated_tests;
#include "generated/all_generated_V1_0_cts_tests.cpp"
* limitations under the License.
*/
-#include "TestGenerated_common.cpp"
+#include "TestGenerated.h"
using namespace generated_tests;
#include "generated/all_generated_V1_1_cts_tests.cpp"
+++ /dev/null
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * generated/all_generated_tests.cpp that is included in the bottom line
- * in file is also modified.
- *
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Top level driver for models and examples generated by test_generator.py
-
-#include "NeuralNetworksWrapper.h"
-#include "TestHarness.h"
-
-#include <gtest/gtest.h>
-#include <cassert>
-#include <cmath>
-#include <fstream>
-#include <iostream>
-#include <map>
-
-// Uncomment the following line to generate DOT graphs.
-//
-// #define GRAPH GRAPH
-
-namespace generated_tests {
-using namespace nnfw::rt::wrapper;
-
-template <typename T>
-static void print(std::ostream& os, const MixedTyped& test) {
- // dump T-typed inputs
- for_each<T>(test, [&os](int idx, const std::vector<T>& f) {
- os << " aliased_output" << idx << ": [";
- for (size_t i = 0; i < f.size(); ++i) {
- os << (i == 0 ? "" : ", ") << +f[i];
- }
- os << "],\n";
- });
-}
-
-static void printAll(std::ostream& os, const MixedTyped& test) {
- print<float>(os, test);
- print<int32_t>(os, test);
- print<uint8_t>(os, test);
-}
-
-// Test driver for those generated from ml/nn/runtime/test/spec
-static void execute(std::function<void(Model*)> createModel,
- std::function<bool(int)> isIgnored,
- std::vector<MixedTypedExampleType>& examples,
- std::string dumpFile = "") {
- Model model;
- createModel(&model);
- model.finish();
- bool dumpToFile = !dumpFile.empty();
-
- std::ofstream s;
- if (dumpToFile) {
- s.open(dumpFile, std::ofstream::trunc);
- ASSERT_TRUE(s.is_open());
- }
-
- int exampleNo = 0;
- Compilation compilation(&model);
- compilation.finish();
-
- const float fpRange = 1e-5f;
- for (auto& example : examples) {
- SCOPED_TRACE(exampleNo);
- // TODO: We leave it as a copy here.
- // Should verify if the input gets modified by the test later.
- MixedTyped inputs = example.first;
- const MixedTyped& golden = example.second;
-
- Execution execution(&compilation);
-
- // Set all inputs
- for_all(inputs, [&execution](int idx, const void* p, size_t s) {
- const void* buffer = s == 0 ? nullptr : p;
- ASSERT_EQ(Result::NO_ERROR, execution.setInput(idx, buffer, s));
- });
-
- MixedTyped test;
- // Go through all typed outputs
- resize_accordingly(golden, test);
- for_all(test, [&execution](int idx, void* p, size_t s) {
- void* buffer = s == 0 ? nullptr : p;
- ASSERT_EQ(Result::NO_ERROR, execution.setOutput(idx, buffer, s));
- });
-
- Result r = execution.compute();
- ASSERT_EQ(Result::NO_ERROR, r);
-
- // Dump all outputs for the slicing tool
- if (dumpToFile) {
- s << "output" << exampleNo << " = {\n";
- printAll(s, test);
- // all outputs are done
- s << "}\n";
- }
-
- // Filter out don't cares
- MixedTyped filteredGolden = filter(golden, isIgnored);
- MixedTyped filteredTest = filter(test, isIgnored);
- // We want "close-enough" results for float
-
- compare(filteredGolden, filteredTest, fpRange);
- exampleNo++;
- }
-}
-
-}; // namespace generated_tests
-
-using namespace nnfw::rt::wrapper;
-
-// Mixed-typed examples
-typedef generated_tests::MixedTypedExampleType MixedTypedExample;
-
-class GeneratedTests : public ::testing::Test {
- protected:
- virtual void SetUp() {}
-};
// The burst path is off by default in these tests. This is the first case
// where it is turned on. Both "useCpuOnly" and "allowSyncExecHal" are
// irrelevant here because the burst path is separate from both.
- n |= test(/*useCpuOnly=*/false, Execution::ComputeMode::BURST);
+ // Fix for neurun: disable burst mode
+ //n |= test(/*useCpuOnly=*/false, Execution::ComputeMode::BURST);
return n;
}