for (size_t I = 0; I < Interpreter->outputs().size(); ++I)
OutputsMap[Interpreter->GetOutputName(I)] = I;
+ size_t NumberFeaturesPassed = 0;
for (size_t I = 0; I < InputSpecs.size(); ++I) {
auto &InputSpec = InputSpecs[I];
auto MapI = InputsMap.find(InputSpec.name() + ":" +
return;
std::memset(Input[I]->data.data, 0,
InputSpecs[I].getTotalTensorBufferSize());
+ ++NumberFeaturesPassed;
+ }
+
+ if (NumberFeaturesPassed < Interpreter->inputs().size()) {
+ // we haven't passed all the required features to the model, throw an error.
+ errs() << "Required feature(s) have not been passed to the ML model";
+ invalidate();
+ return;
}
for (size_t I = 0; I < OutputSpecsSize; ++I) {
for (auto I = 0; I < 2 * 5; ++I)
EXPECT_FLOAT_EQ(F[I], 3.14 + I);
}
+
+TEST(TFUtilsTest, MissingFeature) {
+ std::vector<TensorSpec> InputSpecs{};
+ std::vector<TensorSpec> OutputSpecs{
+ TensorSpec::createSpec<float>("StatefulPartitionedCall", {1})};
+
+ TFModelEvaluator Evaluator(getModelPath(), InputSpecs, OutputSpecs);
+ EXPECT_FALSE(Evaluator.isValid());
+}