public:
enum class OpType {
- prod = 0, sum = 1, max = 2
+ mul,
+ add,
+ max,
+ div
};
/**
// On the last iteration the result is saved in the node output.
// Different ACL layers used to implement different types of elementwise operations.
switch (op.getOpType()) {
- case ops::ElementwiseOp::OpType::prod:
+ case ops::ElementwiseOp::OpType::mul:
in1 = genMultiplication(out->name() + "_" + "multiplication", i - 1, op.getInputShape(i),
in1, in2, i == prev_nodes.size() - 1 ? out : nullptr);
break;
- case ops::ElementwiseOp::OpType::sum:
+ case ops::ElementwiseOp::OpType::add:
in1 = genAddition(out->name() + "_" + "addition", i - 1, op.getInputShape(i),
in1, in2, i == prev_nodes.size() - 1 ? out : nullptr);
break;
ops::ElementwiseOp::OpType optype;
switch (opts.operation()){
case EltwiseParameter_EltwiseOp_PROD:
- optype = ops::ElementwiseOp::OpType::prod;
+ optype = ops::ElementwiseOp::OpType::mul;
break;
case EltwiseParameter_EltwiseOp_SUM:
- optype = ops::ElementwiseOp::OpType::sum;
+ optype = ops::ElementwiseOp::OpType::add;
// TODO TechDebt: When broadcast is implemented this should create Scale Ops before sum args
for (float c: opts.coeff())
assert(c == 1.0f && "Coeff != 1 is not supported");
auto operands = op.getPrevNodes();
std::vector<Tensor<float>> ins;
for (auto &in : operands) {
- ins.push_back(Tensor<float>(var(in.op->getId())[in.index]));
+ ins.emplace_back(var(in.op->getId())[in.index]);
}
float (*func)(float,float); // Another dirty hack
switch (op.getOpType()) {
- case ops::ElementwiseOp::OpType::sum:
+ case ops::ElementwiseOp::OpType::add:
func = [](float a, float b) { return a + b; };
break;
- case ops::ElementwiseOp::OpType::prod:
+ case ops::ElementwiseOp::OpType::mul:
func = [](float a, float b) { return a * b;};
break;
case ops::ElementwiseOp::OpType::max:
func = [](float a, float b) { return std::max(a,b);};
break;
+ case ops::ElementwiseOp::OpType::div:
+ func = [](float a, float b) { return a / b; };
+ break;
default:
- assert(false && "Not supported Optype");
+ assert(false && "Unsupported Optype");
}
+
var(op.getId()) = Fill<float>(op.getOutputShape(0), [&func, &ins, &op](const Index &id) {
float acc = ins[0].at(id);
for (size_t i = 1; i < ins.size() ; i++)
}
void ModelAnalyzer::visit(mir::ops::ElementwiseOp& op) {
- const char *funcName = nullptr;
+ const char* func_name = nullptr;
switch ( op.getOpType() ) {
- case ops::ElementwiseOp::OpType::sum:
- funcName = "ElementWise<Add>";
+ case ops::ElementwiseOp::OpType::add:
+ func_name = "ElementWise<Add>";
break;
- case ops::ElementwiseOp::OpType::prod:
- funcName = "ElementWise<Mul>";
+ case ops::ElementwiseOp::OpType::mul:
+ func_name = "ElementWise<Mul>";
break;
case ops::ElementwiseOp::OpType::max:
- funcName = "ElementWise<Max>";
+ func_name = "ElementWise<Max>";
+ break;
+ case ops::ElementwiseOp::OpType::div:
+ func_name = "ElementWise<Div>";
break;
default:
assert(false && "unsupported elementwise operation type");
}
- addOpDescr(&op, funcName);
+ addOpDescr(&op, func_name);
}
void ModelAnalyzer::visit(mir::ops::EluOp& op) {
}
}
};
+
+//TODO maybe move to a separate file since everything else here is extracted from TF Lite
+//23.11.2018
+struct Div {
+ static inline void Call(const float* input1_data, const float* input2_data,
+ float* output_data, Dims<4> dims) {
+ auto output = MapAsVector(output_data, dims);
+ output = output.cwiseQuotient(MapAsVector(input2_data, dims));
+ }
+};
case BuiltinOperator_ADD:
case BuiltinOperator_MUL:
case BuiltinOperator_MAXIMUM:
+ case BuiltinOperator_DIV:
case BuiltinOperator_TRANSPOSE_CONV:
case BuiltinOperator_TANH:
case BuiltinOperator_RELU:
case BuiltinOperator_MUL:
outputs = _opCreator->createMul(inputs, params, op->builtin_options_as<MulOptions>());
break;
+ case BuiltinOperator_DIV:
+ outputs = _opCreator->createDiv(inputs, params, op->builtin_options_as<DivOptions>());
+ break;
case BuiltinOperator_MAXIMUM:
outputs = _opCreator->createMax(inputs, params, op->builtin_options_as<MaximumMinimumOptions>());
break;
for (auto i : inputs)
descriptors.push_back(i->getOutput(0));
return createOp<ops::ElementwiseOp>(opts->fused_activation_function(), descriptors,
- ops::ElementwiseOp::OpType::sum);
+ ops::ElementwiseOp::OpType::add);
}
std::vector<mir::Operation*>
for (auto i : inputs)
descriptors.push_back(i->getOutput(0));
return createOp<ops::ElementwiseOp>(opts->fused_activation_function(), descriptors,
- ops::ElementwiseOp::OpType::prod);
+ ops::ElementwiseOp::OpType::mul);
+}
+
+std::vector<mir::Operation*>
+TFLiteOpCreator::createDiv(InputOps& inputs, InputParams&, const ::tflite::DivOptions* opts) {
+ std::vector<IODescriptor> descriptors;
+ for (auto i : inputs)
+ descriptors.push_back(i->getOutput(0));
+ return createOp<ops::ElementwiseOp>(opts->fused_activation_function(), descriptors,
+ ops::ElementwiseOp::OpType::div);
}
std::vector<mir::Operation*>
std::vector<mir::Operation*> createMul(InputOps&, InputParams&, const ::tflite::MulOptions*);
/** @brief Elementwise maximum */
std::vector<mir::Operation*> createMax(InputOps&, InputParams&, const ::tflite::MaximumMinimumOptions*);
+ /** @brief Elementwise division */
+ std::vector<mir::Operation*> createDiv(InputOps&, InputParams&, const ::tflite::DivOptions*);
/// @brief Free-standing ( non-fused ) activation function based on tflite activation
- std::vector<mir::Operation*> createActivation(InputOps&, InputParams&, const ::tflite::ActivationFunctionType);
-
+ std::vector<mir::Operation*> createActivation(InputOps&, InputParams&,
+ const ::tflite::ActivationFunctionType);
/**
* @brief Creates a Transposed convolution
* @param params 0 - output shape (unused), 1 - kernel, 2- input
fillTensors(input_n_tensors[0], input_a_tensors[0], shape_data, 1.0f);
fillTensors(input_n_tensors[1], input_a_tensors[1], shape_data, 2.0f);
auto op_generator = [](mir::Graph& g, const std::vector<mir::IODescriptor>& inputs) {
- return g.create<mir::ops::ElementwiseOp>("y", inputs, mir::ops::ElementwiseOp::OpType::sum);
+ return g.create<mir::ops::ElementwiseOp>("y", inputs, mir::ops::ElementwiseOp::OpType::add);
};
createAndRunTestGraph(op_generator, ElementWise<Add,Tensor,Tensor>, input_n_tensors, input_a_tensors[0],
fillTensors(input_n_tensors[1], input_a_tensors[1], shape_data, 2.0f);
fillTensors(input_n_tensors[2], input_a_tensors[2], shape_data, 3.0f);
auto opGenerator = [](mir::Graph& g, const std::vector<mir::IODescriptor>& inputs) {
- return g.create<mir::ops::ElementwiseOp>("y", inputs, mir::ops::ElementwiseOp::OpType::prod);
+ return g.create<mir::ops::ElementwiseOp>("y", inputs, mir::ops::ElementwiseOp::OpType::mul);
};
createAndRunTestGraph(opGenerator, ElementWise<Mul,Tensor,Tensor,Tensor>, input_n_tensors, input_a_tensors[0],