outputs = _opCreator->convertSqrt(inputs);
break;
case BuiltinOperator_ADD:
- outputs = _opCreator->createElementwise(
- ops::ElementwiseOp::OpType::add,
- op->builtin_options_as_AddOptions()->fused_activation_function(), inputs);
+ outputs = _opCreator->convertAdd(op->builtin_options_as<AddOptions>(), inputs);
break;
case BuiltinOperator_SUB:
- outputs = _opCreator->createElementwise(
- ops::ElementwiseOp::OpType::sub,
- op->builtin_options_as_SubOptions()->fused_activation_function(), inputs);
+ outputs = _opCreator->convertSub(op->builtin_options_as<SubOptions>(), inputs);
break;
case BuiltinOperator_MUL:
- outputs = _opCreator->createElementwise(
- ops::ElementwiseOp::OpType::mul,
- op->builtin_options_as_MulOptions()->fused_activation_function(), inputs);
+ outputs = _opCreator->convertMul(op->builtin_options_as<MulOptions>(), inputs);
break;
case BuiltinOperator_DIV:
- outputs = _opCreator->createElementwise(
- ops::ElementwiseOp::OpType::div,
- op->builtin_options_as_DivOptions()->fused_activation_function(), inputs);
+ outputs = _opCreator->convertDiv(op->builtin_options_as<DivOptions>(), inputs);
break;
case BuiltinOperator_MAXIMUM:
- outputs = _opCreator->createElementwise(ops::ElementwiseOp::OpType::max,
- ActivationFunctionType_NONE, inputs); // no activation
+ outputs = _opCreator->convertMax(inputs);
break;
case BuiltinOperator_SQUARED_DIFFERENCE:
outputs = _opCreator->convertSquaredDifference(inputs);
return {result->getOutput(0)};
}
-mir::Operation::Output* TFLiteOpCreator::tryConvertToFloatTensor(mir::Operation::Output* arg) {
- auto constant_op = dynamic_cast<mir::ops::ConstantOp*>(arg->getNode());
- if (constant_op != nullptr && constant_op->getValue().getDataType() == mir::DTYPE::INT32) {
- const mir::TensorVariant& int_tensor = constant_op->getValue();
- mir::TensorVariant float_tensor(mir::DTYPE::FLOAT32, int_tensor.getShape());
- mir::Tensor<int32_t> int_tensor_accessor(int_tensor);
- mir::Tensor<float> float_tensor_accessor(float_tensor);
- for (const auto& index : mir::ShapeRange(int_tensor.getShape()))
- float_tensor_accessor.at(index) = static_cast<float>(int_tensor_accessor.at(index));
- return createOp<ops::ConstantOp>(float_tensor)->getOutput(0);
- } else {
- return arg;
+std::vector<mir::Operation::Output*>
+TFLiteOpCreator::convertAdd(const ::tflite::AddOptions* opts,
+ const std::vector<mir::Operation::Output*>& inputs) {
+ auto result = createOp<ops::ElementwiseOp>(inputs, ops::ElementwiseOp::OpType::add);
+ return {addFusedActivation(result->getOutput(0), opts->fused_activation_function())};
+}
+
+std::vector<mir::Operation::Output*>
+TFLiteOpCreator::convertSub(const ::tflite::SubOptions* opts,
+ const std::vector<mir::Operation::Output*>& inputs) {
+ auto result = createOp<ops::ElementwiseOp>(inputs, ops::ElementwiseOp::OpType::sub);
+ return {addFusedActivation(result->getOutput(0), opts->fused_activation_function())};
+}
+
+std::vector<mir::Operation::Output*>
+TFLiteOpCreator::convertMul(const ::tflite::MulOptions* opts,
+ const std::vector<mir::Operation::Output*>& inputs) {
+ // Try to constant fold the operation in some cases.
+ if (inputs.size() == 2 && inputs[0]->getShape() == inputs[1]->getShape() &&
+ opts->fused_activation_function() == ActivationFunctionType_NONE) {
+ auto constant1_op = dynamic_cast<const ops::ConstantOp*>(inputs[0]->getNode());
+ auto constant2_op = dynamic_cast<const ops::ConstantOp*>(inputs[1]->getNode());
+ if (constant1_op != nullptr && constant2_op != nullptr) {
+ const auto& input1_tensor = constant1_op->getValue();
+ const auto& input2_tensor = constant2_op->getValue();
+ if (input1_tensor.getDataType() == mir::DTYPE::INT32 &&
+ input2_tensor.getDataType() == mir::DTYPE::INT32) {
+ const auto& output_shape = inputs[0]->getShape();
+ mir::TensorVariant res_tensor(mir::DTYPE::INT32, output_shape);
+
+ mir::Tensor<int32_t> input1_accessor(input1_tensor);
+ mir::Tensor<int32_t> input2_accessor(input2_tensor);
+ mir::Tensor<int32_t> res_accessor(res_tensor);
+
+ for (const auto& idx : mir::ShapeRange(output_shape)) {
+ res_accessor.at(idx) = input1_accessor.at(idx) * input2_accessor.at(idx);
+ }
+
+ return {createOp<ops::ConstantOp>(res_tensor)->getOutput(0)};
+ }
+ }
}
+
+ auto result = createOp<ops::ElementwiseOp>(inputs, ops::ElementwiseOp::OpType::mul);
+ return {addFusedActivation(result->getOutput(0), opts->fused_activation_function())};
}
std::vector<mir::Operation::Output*>
-TFLiteOpCreator::createElementwise(ops::ElementwiseOp::OpType op_type,
- ::tflite::ActivationFunctionType activation,
- const std::vector<mir::Operation::Output*>& inputs) {
- std::vector<mir::Operation::Output*> float_inputs;
- for (auto* input : inputs)
- float_inputs.push_back(tryConvertToFloatTensor(input));
+TFLiteOpCreator::convertDiv(const ::tflite::DivOptions* opts,
+ const std::vector<mir::Operation::Output*>& inputs) {
+ auto result = createOp<ops::ElementwiseOp>(inputs, ops::ElementwiseOp::OpType::div);
+ return {addFusedActivation(result->getOutput(0), opts->fused_activation_function())};
+}
- auto result = createOp<ops::ElementwiseOp>(float_inputs, op_type);
- return {addFusedActivation(result->getOutput(0), activation)};
+std::vector<mir::Operation::Output*>
+TFLiteOpCreator::convertMax(const std::vector<mir::Operation::Output*>& inputs) {
+ auto result = createOp<ops::ElementwiseOp>(inputs, ops::ElementwiseOp::OpType::max);
+ return {result->getOutput(0)};
}
std::vector<mir::Operation::Output*>
squeeze_dims.push_back(axis);
}
- input = tryConvertToFloatTensor(input);
+ // Try to constant fold the operation in some cases.
+ if (shrink_axis_mask == 0) {
+ auto constant_op = dynamic_cast<const ops::ConstantOp*>(input->getNode());
+ if (constant_op != nullptr) {
+ const auto& input_tensor = constant_op->getValue();
+ if (input_tensor.getDataType() == mir::DTYPE::INT32) {
+ mir::Shape output_shape(num_dims);
+ for (int32_t i = 0; i < num_dims; ++i) {
+ if (size.dim(i) == -1) {
+ output_shape.dim(i) = input_shape.dim(i) - start.dim(i);
+ } else {
+ output_shape.dim(i) = size.dim(i);
+ }
+ }
+
+ mir::TensorVariant res_tensor(mir::DTYPE::INT32, output_shape);
+ mir::Tensor<int32_t> input_accessor(input_tensor);
+ mir::Tensor<int32_t> res_accessor(res_tensor);
+
+ mir::Index in_idx(static_cast<std::size_t>(num_dims));
+ for (const auto& out_idx : mir::ShapeRange(output_shape)) {
+ for (int32_t i = 0; i < num_dims; ++i) {
+ in_idx.at(i) = out_idx.at(i) + start.dim(i);
+ }
+ res_accessor.at(out_idx) = input_accessor.at(in_idx);
+ }
+
+ return {createOp<ops::ConstantOp>(res_tensor)->getOutput(0)};
+ }
+ }
+ }
+
auto result = createOp<ops::SliceOp>(input, start, size);
result = createOp<ops::SqueezeOp>(result->getOutput(0), squeeze_dims);
return {result->getOutput(0)};
const std::vector<mir::Operation::Output*>& inputs);
std::vector<mir::Operation::Output*>
- createElementwise(ops::ElementwiseOp::OpType op_type,
- ::tflite::ActivationFunctionType activation,
- const std::vector<mir::Operation::Output*>& inputs);
+ convertAdd(const ::tflite::AddOptions* opts,
+ const std::vector<mir::Operation::Output*>& inputs);
+
+ std::vector<mir::Operation::Output*>
+ convertSub(const ::tflite::SubOptions* opts,
+ const std::vector<mir::Operation::Output*>& inputs);
+
+ std::vector<mir::Operation::Output*>
+ convertMul(const ::tflite::MulOptions* opts,
+ const std::vector<mir::Operation::Output*>& inputs);
+
+ std::vector<mir::Operation::Output*>
+ convertDiv(const ::tflite::DivOptions* opts,
+ const std::vector<mir::Operation::Output*>& inputs);
+
+ std::vector<mir::Operation::Output*>
+ convertMax(const std::vector<mir::Operation::Output*>& inputs);
std::vector<mir::Operation::Output*>
convertSquaredDifference(const std::vector<mir::Operation::Output*>& inputs);
template<typename OpType, typename... Types>
mir::Operation* createOp(Types&&... args);
- // FIXME This is a temporary hack needed to support SHAPE operator in short term.
- mir::Operation::Output* tryConvertToFloatTensor(mir::Operation::Output* arg);
};
template<typename OpType, typename... Types>