#include <nncc/core/ADT/tensor/LexicalLayout.h>
#include <nncc/core/ADT/tensor/Shape.h>
+#include <iostream>
+
using namespace nncc::core::ADT;
namespace tflimport
}
}
+/**
+ * @brief Copy values of tfl tensors into coco::Data if the data was not copied
+ */
+void copy_tensors(GraphBuilderContext *ctx)
+{
+ auto d = ctx->d();
+
+ // for each bag, check if bag is not allocated but tflite tensor has values
+ for (auto &iter : ctx->bags())
+ {
+ auto tfl_tensor_id = iter.first;
+ auto bag = iter.second;
+
+ auto tfl_buffer = ctx->buffer().tensor_buffer<float>(ctx->graph(), tfl_tensor_id);
+
+ // TODO remove this line when support int32 is ready
+ if (ctx->tensor().type(tfl_tensor_id) == tflite::TensorType::TensorType_INT32)
+ {
+ std::cout << "*** INT32 COPYING IS NOT SUPPORTED ***" << std::endl;
+ continue;
+ }
+
+ assert(ctx->tensor().type(tfl_tensor_id) == tflite::TensorType::TensorType_FLOAT32);
+
+ auto span = d->f32()->weight(bag); // TODO support other type
+
+ if (!(span.data() == nullptr && span.size() == 0)) // already allocated
+ continue;
+
+ if (tfl_buffer.ptr == nullptr || tfl_buffer.len == 0) // no data to copy
+ continue;
+
+ d->f32()->allocate(bag);
+
+ auto ifm_span = d->f32()->weight(bag);
+ for (uint32_t idx = 0; idx < tfl_buffer.len; ++idx)
+ {
+ ifm_span[idx] = tfl_buffer.ptr[idx];
+ }
+ }
+}
+
} // namespace tflimport
Frontend::Frontend(std::unique_ptr<RawModel> &&raw) : _raw{std::move(raw)}
std::string opcodename = opcode_context.opcode_name(op);
throw std::runtime_error{"Not supported: " + opcodename};
}
+
+ // copying unfilled tensor value
+ copy_tensors(&opbuilder_context);
}
// Create "Bundle"
coco::Bag *bag(int32_t tensor_id) { return _bag_ctx[tensor_id]; }
+public:
+ std::map<uint32_t, coco::Bag *>::iterator begin() { return _bag_ctx.begin(); }
+
+ std::map<uint32_t, coco::Bag *>::iterator end() { return _bag_ctx.end(); }
+
private:
std::map<uint32_t, coco::Bag *> _bag_ctx;
};