size_t nodeTid = INVALID_TENSOR_ID;
if (op->getPrevNodes().empty()) {
if (auto* p2const = dynamic_cast<ops::ConstantOp*>(op)) {
- type = OpDescr::Type::CONSTANT;
+ type = OpDescr::Type::ORDINARY;
+
auto* shape = const_cast<Shape*> (&p2const->getOutputShape(0));
- nodeTid = allocateTensor(name, TensorDescription::Type::CONSTANT, shape);
+ /*
+ * FIXME allocateTensor get const Shape
+ */
+ nodeTid = allocateTensor(name, TensorDescription::Type::ORDINARY, shape);
} else {
// process input op
+ assert(op->getType() == Operation::Type::variable);
Shape inputShape = op->getOutputShape(0);
nodeTid = allocateTensor(name, TensorDescription::Type::IN, &inputShape);
type = OpDescr::Type::IN;
nodeOutputs.push_back(nodeTid);
// process op outputs
// consider op as output if it has no consumers
- if (op->getNextNodes().empty() && (type != OpDescr::Type::CONSTANT)) {
- assert(type == OpDescr::Type::OUT);
+ if (op->getNextNodes().empty() && (type == OpDescr::Type::OUT))
_outputs.push_back(nodeTid);
- }
// process op inputs
vector<size_t> nodeInputs;
for (const IODescriptor &d: op->getPrevNodes()) {
// Set contains pointer to node if it is visited by DFS
set<Operation*> visited;
+ // Collect all inputs and constants
+ vector<Operation*> init_ops(g->collectInputs());
+ vector<Operation*> constant_ops(g->collectConstants());
+ init_ops.insert(init_ops.end(), constant_ops.begin(), constant_ops.end());
+
// Walk all network inputs
- for (Operation* in : g->collectInputs()) {
+ for (Operation* in : init_ops) {
assert(dynamic_cast<ops::VariableOp*>(in) || dynamic_cast<ops::ConstantOp*>(in));
if (!visited.count(in)) {
visited.insert(in);
#include "core/modelIR/operations/ReluOp.h"
#include "core/modelIR/operations/ReshapeOp.h"
#include "core/modelIR/operations/ResizeOp.h"
+#include "core/modelIR/operations/ScaleOp.h"
#include "core/modelIR/operations/SigmoidOp.h"
#include "core/modelIR/operations/SliceOp.h"
#include "core/modelIR/operations/SoftmaxOp.h"
#include "core/modelIR/operations/SqueezeOp.h"
#include "core/modelIR/operations/TanhOp.h"
-#include "core/modelIR/Tensor.h"
-#include "core/modelIR/ShapeRange.h"
#include "pass/PassException.h"
-#include "core/modelIR/Tensor.h"
#include "core/modelIR/Shape.h"
#include "core/modelIR/ShapeRange.h"
+#include "core/modelIR/Tensor.h"
using namespace nnc::mir;
using namespace ::tflite;
checkActivationType(opts->fused_activation_function(), problems_op_set);
}
-std::vector<mir::Operation*> TFLiteOpCreator::convertConv2D(InputOps inputs, InputParams params,
+std::vector<mir::Operation*> TFLiteOpCreator::convertConv2D(InputOps& inputs, InputParams& params,
const Conv2DOptions* opts) {
const auto& input_shape = inputs[0]->getOutputShape(0);
const auto& kernel_shape = params[0].getShape();
}
std::vector<mir::Operation*>
-TFLiteOpCreator::convertDepthwiseConv2D(InputOps inputs, InputParams params,
+TFLiteOpCreator::convertDepthwiseConv2D(InputOps& inputs, InputParams& params,
const DepthwiseConv2DOptions* opts) {
const auto& input_shape = inputs[0]->getOutputShape(0);
const auto& kernel_shape = params[0].getShape();
checkActivationType(opts->fused_activation_function(), problems_op_set);
}
-std::vector<mir::Operation*> TFLiteOpCreator::convertConcatenation(InputOps inputs,
- InputParams params,
+std::vector<mir::Operation*> TFLiteOpCreator::convertConcatenation(InputOps& inputs,
+ InputParams& params,
const ConcatenationOptions* opts) {
std::vector<IODescriptor> descriptors;
for (auto i : inputs)
checkActivationType(opts->fused_activation_function(), problems_op_set);
}
-std::vector<mir::Operation*> TFLiteOpCreator::convertMaxPool2D(InputOps inputs, InputParams params,
+std::vector<mir::Operation*> TFLiteOpCreator::convertMaxPool2D(InputOps& inputs, InputParams& params,
const Pool2DOptions* opts) {
auto& input_shape = inputs[0]->getOutputShape(0);
Shape window_shape{opts->filter_height(), opts->filter_width()};
ops::PoolOp::RoundMode::floor);
}
-std::vector<mir::Operation*> TFLiteOpCreator::convertAveragePool2D(InputOps inputs,
- InputParams params,
+std::vector<mir::Operation*> TFLiteOpCreator::convertAveragePool2D(InputOps& inputs,
+ InputParams& params,
const Pool2DOptions* opts) {
auto& input_shape = inputs[0]->getOutputShape(0);
Shape window_shape{opts->filter_height(), opts->filter_width()};
ops::PoolOp::RoundMode::floor);
}
-std::vector<mir::Operation*> TFLiteOpCreator::createSoftmax(InputOps inputs, InputParams params,
+std::vector<mir::Operation*> TFLiteOpCreator::createSoftmax(InputOps& inputs, InputParams& params,
const SoftmaxOptions* opts) {
// Softmax in TFLite is always 2-D.
assert(inputs[0]->getOutputShape(0).rank() == 2);
return temporary_shape;
}
-std::vector<mir::Operation*> TFLiteOpCreator::createSlice(InputOps inputs, InputParams params,
+std::vector<mir::Operation*> TFLiteOpCreator::createSlice(InputOps& inputs, InputParams& params,
const ::tflite::SliceOptions*) {
auto starts = shapeFromTensor(mir::Tensor<int32_t>(params[0]));
auto sizes = shapeFromTensor(mir::Tensor<int32_t>(params[1]));
starts, sizes);
}
-std::vector<mir::Operation*> TFLiteOpCreator::convertReshape(InputOps inputs, InputParams params,
+std::vector<mir::Operation*> TFLiteOpCreator::convertReshape(InputOps& inputs, InputParams& params,
const ReshapeOptions* opts) {
// TODO: we should also support "-1" values in new_shape, which means that correct
// shape values must be calculated. Better do it in the shape inference module.
}
std::vector<mir::Operation*>
-TFLiteOpCreator::createAdd(InputOps& inputs, InputParams&, const ::tflite::AddOptions* opts) {
+TFLiteOpCreator::createAdd(InputOps& inputs, const InputParams& params,
+ const ::tflite::AddOptions* opts) {
std::vector<IODescriptor> descriptors;
+
for (auto i : inputs)
descriptors.push_back(i->getOutput(0));
+
+ for (const auto& param : params) {
+ auto weights_tensor = createOp<ops::ConstantOp>(ActivationFunctionType_NONE, param);
+ descriptors.push_back(weights_tensor[0]->getOutput(0));
+ }
+
return createOp<ops::ElementwiseOp>(opts->fused_activation_function(), descriptors,
ops::ElementwiseOp::OpType::add);
}
std::vector<mir::Operation*>
-TFLiteOpCreator::createMul(InputOps& inputs, InputParams&, const ::tflite::MulOptions* opts) {
+TFLiteOpCreator::createMul(InputOps& inputs, const InputParams& params,
+ const ::tflite::MulOptions* opts) {
std::vector<IODescriptor> descriptors;
+
for (auto i : inputs)
descriptors.push_back(i->getOutput(0));
+
+ for (const auto& param : params) {
+ auto weights_tensor = createOp<ops::ConstantOp>(ActivationFunctionType_NONE, param);
+ descriptors.push_back(weights_tensor[0]->getOutput(0));
+ }
+
return createOp<ops::ElementwiseOp>(opts->fused_activation_function(), descriptors,
ops::ElementwiseOp::OpType::mul);
}
ops::ElementwiseOp::OpType::max);
}
-std::vector<mir::Operation*> TFLiteOpCreator::convertReducer(InputOps inputs, InputParams params,
+std::vector<mir::Operation*> TFLiteOpCreator::convertReducer(InputOps& inputs, InputParams& params,
ops::ReduceFOp::FuncType ft,
const ::tflite::ReducerOptions* opts) {
assert(params.at(0).getShape().rank() <= 1 && "Must be 1-dim or 0-dim tensor");
}
std::vector<mir::Operation*> TFLiteOpCreator::createSqueeze(
- InputOps inputs, InputParams params, const ::tflite::SqueezeOptions* opts) {
+ InputOps& inputs, InputParams& params, const ::tflite::SqueezeOptions* opts) {
std::vector<int32_t> squeeze_dims{opts->squeeze_dims()->begin(), opts->squeeze_dims()->end()};
squeeze_dims);
}
-std::vector<mir::Operation*> TFLiteOpCreator::createPad(InputOps inputs, InputParams params,
+std::vector<mir::Operation*> TFLiteOpCreator::createPad(InputOps& inputs, InputParams& params,
const ::tflite::PadOptions *opts) {
assert(params.size() == 1); // support pad with one param
std::vector<std::pair<int32_t, int32_t>> paddings;
class TFLiteOpCreator {
public:
- using InputOps = std::vector<mir::Operation*>&;
- using InputParams = std::vector<mir::TensorVariant>&;
+ using InputOps = std::vector<mir::Operation*>;
+ using InputParams = std::vector<mir::TensorVariant>;
explicit TFLiteOpCreator(Graph* g) : _graph(g) {}
- std::vector<mir::Operation*> convertConv2D(InputOps, InputParams, const ::tflite::Conv2DOptions*);
+ std::vector<mir::Operation*> convertConv2D(InputOps&, InputParams&, const ::tflite::Conv2DOptions*);
- std::vector<mir::Operation*> convertDepthwiseConv2D(InputOps, InputParams,
+ std::vector<mir::Operation*> convertDepthwiseConv2D(InputOps&, InputParams&,
const ::tflite::DepthwiseConv2DOptions*);
- std::vector<mir::Operation*> convertConcatenation(InputOps, InputParams,
+ std::vector<mir::Operation*> convertConcatenation(InputOps&, InputParams&,
const ::tflite::ConcatenationOptions*);
- std::vector<mir::Operation*> convertMaxPool2D(InputOps, InputParams,
+ std::vector<mir::Operation*> convertMaxPool2D(InputOps&, InputParams&,
const ::tflite::Pool2DOptions*);
- std::vector<mir::Operation*> convertAveragePool2D(InputOps, InputParams,
+ std::vector<mir::Operation*> convertAveragePool2D(InputOps&, InputParams&,
const ::tflite::Pool2DOptions*);
- std::vector<mir::Operation*> convertReducer(InputOps, InputParams, ops::ReduceFOp::FuncType,
+ std::vector<mir::Operation*> convertReducer(InputOps&, InputParams&, ops::ReduceFOp::FuncType,
const ::tflite::ReducerOptions*);
- std::vector<mir::Operation*> createSoftmax(InputOps, InputParams, const ::tflite::SoftmaxOptions*);
+ std::vector<mir::Operation*> createSoftmax(InputOps&, InputParams&, const ::tflite::SoftmaxOptions*);
- std::vector<mir::Operation*> createSlice(InputOps, InputParams, const ::tflite::SliceOptions*);
-
- std::vector<mir::Operation*> convertReshape(InputOps, InputParams,
+ std::vector<mir::Operation*> createSlice(InputOps&, InputParams&, const ::tflite::SliceOptions*);
+
+ std::vector<mir::Operation*> convertReshape(InputOps&, InputParams&,
const ::tflite::ReshapeOptions*);
- std::vector<mir::Operation*> convertFullyConnected(InputOps, InputParams,
+ std::vector<mir::Operation*> convertFullyConnected(InputOps&, InputParams&,
const ::tflite::FullyConnectedOptions*);
- std::vector<mir::Operation*> convertResizeNN(InputOps, InputParams,
+ std::vector<mir::Operation*> convertResizeNN(InputOps&, InputParams&,
const ::tflite::ResizeNearestNeighborOptions*);
std::vector<mir::Operation*> createLogistic(InputOps& inputs, InputParams& params);
const ::tflite::SqueezeOptions* opts);
/** @brief Elementwise Add */
- std::vector<mir::Operation*> createAdd(InputOps&, InputParams&, const ::tflite::AddOptions*);
+ std::vector<mir::Operation*> createAdd(InputOps&, const InputParams&, const ::tflite::AddOptions*);
/** @brief Elementwise product */
- std::vector<mir::Operation*> createMul(InputOps&, InputParams&, const ::tflite::MulOptions*);
+ std::vector<mir::Operation*> createMul(InputOps&, const InputParams&, const ::tflite::MulOptions*);
/** @brief Elementwise maximum */
std::vector<mir::Operation*> createMax(InputOps&, InputParams&, const ::tflite::MaximumMinimumOptions*);
/** @brief Elementwise division */