assert(kernel_size == off);
}
+ // Configure Bias Data
+ const uint32_t bias_size = bias.size();
+ float bias_data[bias_size] = { 0.0f, };
+
+ // Fill bias data
+ for (uint32_t off = 0; off < bias.size(); ++off)
+ {
+ bias_data[off] = bias.at(off);
+ }
+
// Assumption on this example
assert(IFM_C == KER_C);
assert(KER_N == bias.size());
{1 /*N*/, IFM_H, IFM_W, IFM_C} /* dims */,
quantization);
- // NOTE kernel_data should live longer than interpreter!
+ // NOTE kernel_data & bias_data should live longer than interpreter!
interp.SetTensorParametersReadOnly(2,
kTfLiteFloat32 /* type */,
"filter" /* name */,
quantization,
reinterpret_cast<const char *>(kernel_data), kernel_size * sizeof(float));
- // Configure Bias
- const uint32_t bias_size = bias.size();
- float bias_data[bias_size] = { 0.0f, };
-
- // Fill bias data
- for (uint32_t off = 0; off < bias.size(); ++off)
- {
- bias_data[off] = bias.at(off);
- }
-
interp.SetTensorParametersReadOnly(3,
kTfLiteFloat32 /* type */,
"bias" /* name */,
{ bias.size() } /* dims */,
quantization,
- reinterpret_cast<const char *>(bias_data), sizeof(bias_data));
+ reinterpret_cast<const char *>(bias_data), bias_size * sizeof(float));
// Add Convolution Node
//