using Initializer = std::function<void(int id, Interpreter *, Interpreter *)>;
std::map<TfLiteType, Initializer> initializers;
+ std::map<TfLiteType, Initializer> reseters;
// Generate singed 32-bit integer (s32) input
initializers[kTfLiteInt32] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) {
};
};
+ // Generate singed 32-bit integer (s32) input
+ reseters[kTfLiteInt32] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) {
+ assert(tfl_interp->tensor(id)->type == kTfLiteInt32);
+ assert(nnapi->tensor(id)->type == kTfLiteInt32);
+
+ auto tfl_interp_view = nnfw::support::tflite::TensorView<int32_t>::make(*tfl_interp, id);
+ auto nnapi_view = nnfw::support::tflite::TensorView<int32_t>::make(*nnapi, id);
+
+ assert(tfl_interp_view.shape() == nnapi_view.shape());
+
+ int32_t value = 0;
+
+ nnfw::util::tensor::iterate(tfl_interp_view.shape())
+ << [&](const nnfw::util::tensor::Index &ind) {
+ // TODO Generate random values
+ tfl_interp_view.at(ind) = value;
+ nnapi_view.at(ind) = value;
+ };
+ };
+
initializers[kTfLiteUInt8] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) {
assert(tfl_interp->tensor(id)->type == kTfLiteUInt8);
assert(nnapi->tensor(id)->type == kTfLiteUInt8);
};
};
+ reseters[kTfLiteUInt8] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) {
+ assert(tfl_interp->tensor(id)->type == kTfLiteUInt8);
+ assert(nnapi->tensor(id)->type == kTfLiteUInt8);
+
+ auto tfl_interp_view = nnfw::support::tflite::TensorView<uint8_t>::make(*tfl_interp, id);
+ auto nnapi_view = nnfw::support::tflite::TensorView<uint8_t>::make(*nnapi, id);
+
+ assert(tfl_interp_view.shape() == nnapi_view.shape());
+
+ auto fp = static_cast<uint8_t (RandomGenerator::*)(const ::nnfw::util::tensor::Shape &,
+ const ::nnfw::util::tensor::Index &)>(
+ &RandomGenerator::generate<uint8_t>);
+ const nnfw::util::tensor::Object<uint8_t> data(tfl_interp_view.shape(),
+ std::bind(fp, _randgen, _1, _2));
+ assert(tfl_interp_view.shape() == data.shape());
+
+ uint8_t value = 0;
+
+ nnfw::util::tensor::iterate(tfl_interp_view.shape())
+ << [&](const nnfw::util::tensor::Index &ind) {
+ tfl_interp_view.at(ind) = value;
+ nnapi_view.at(ind) = value;
+ };
+ };
+
initializers[kTfLiteFloat32] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) {
assert(tfl_interp->tensor(id)->type == kTfLiteFloat32);
assert(nnapi->tensor(id)->type == kTfLiteFloat32);
};
};
+ reseters[kTfLiteFloat32] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) {
+ assert(tfl_interp->tensor(id)->type == kTfLiteFloat32);
+ assert(nnapi->tensor(id)->type == kTfLiteFloat32);
+
+ auto tfl_interp_view = nnfw::support::tflite::TensorView<float>::make(*tfl_interp, id);
+ auto nnapi_view = nnfw::support::tflite::TensorView<float>::make(*nnapi, id);
+
+ assert(tfl_interp_view.shape() == nnapi_view.shape());
+
+ auto fp = static_cast<float (RandomGenerator::*)(const ::nnfw::util::tensor::Shape &,
+ const ::nnfw::util::tensor::Index &)>(
+ &RandomGenerator::generate<float>);
+ const nnfw::util::tensor::Object<float> data(tfl_interp_view.shape(),
+ std::bind(fp, _randgen, _1, _2));
+
+ assert(tfl_interp_view.shape() == data.shape());
+
+ float value = 0;
+
+ nnfw::util::tensor::iterate(tfl_interp_view.shape())
+ << [&](const nnfw::util::tensor::Index &ind) {
+ tfl_interp_view.at(ind) = value;
+ nnapi_view.at(ind) = value;
+ };
+ };
+
// Fill IFM with random numbers
for (const auto id : tfl_interp->inputs())
{
it->second(id, tfl_interp.get(), nnapi.get());
}
+ // Fill OFM with 0
+ for (const auto id : tfl_interp->outputs())
+ {
+ assert(tfl_interp->tensor(id)->type == nnapi->tensor(id)->type);
+
+ auto it = reseters.find(tfl_interp->tensor(id)->type);
+
+ if (it == reseters.end())
+ {
+ throw std::runtime_error{"Not supported input type"};
+ }
+
+ it->second(id, tfl_interp.get(), nnapi.get());
+ }
+
std::cout << "[NNAPI TEST] Run T/F Lite Interpreter without NNAPI" << std::endl;
tfl_interp->Invoke();