} else {
TF_LITE_ENSURE_EQ(context, bias->type, data_type);
}
- TF_LITE_ENSURE_EQ(context, bias->dims->size, 1);
- TF_LITE_ENSURE_EQ(context, bias->dims->data[0], filter->dims->data[0]);
+ TF_LITE_ENSURE_EQ(context, NumElements(bias), SizeOfDimension(filter, 0));
}
int channels_out = filter->dims->data[0];
TF_LITE_ASSERT_EQ(input_size, batch_size * filter->dims->data[1]);
if (bias) {
- TF_LITE_ASSERT_EQ(bias->dims->data[0], num_units);
+ TF_LITE_ENSURE_EQ(context, NumElements(bias), SizeOfDimension(filter, 0));
}
TF_LITE_ENSURE_EQ(context, NumDimensions(filter), 2);
- TF_LITE_ENSURE_EQ(context, NumDimensions(bias), 1);
// Note that quantized inference requires that all tensors have their
// parameters set. This is usually done during quantized training.