case BuiltinOperator_STRIDED_SLICE:
_opCreator->checkStridedSlice(op->builtin_options_as<StridedSliceOptions>(),
_problemsOpSet);
+ break;
+ case BuiltinOperator_SHAPE:
+ _opCreator->checkShape(op->builtin_options_as<ShapeOptions>(), _problemsOpSet);
+ break;
case BuiltinOperator_SOFTMAX:
case BuiltinOperator_SLICE:
case BuiltinOperator_RESHAPE:
case BuiltinOperator_LEAKY_RELU:
outputs = _opCreator->convertLeakyReLU(op->builtin_options_as<LeakyReluOptions>(), inputs);
break;
+ case BuiltinOperator_SHAPE:
+ outputs = _opCreator->convertShape(op->builtin_options_as<ShapeOptions>(), inputs);
+ break;
default:
assert(false && "All unsupported types should have been found before this pass.");
}
return {result->getOutput(0)};
}
+mir::Operation::Output* TFLiteOpCreator::tryConvertToFloatTensor(mir::Operation::Output* arg) {
+ auto constant_op = dynamic_cast<mir::ops::ConstantOp*>(arg->getNode());
+ if (constant_op != nullptr && constant_op->getValue().getDataType() == mir::DTYPE::INT32) {
+ const mir::TensorVariant& int_tensor = constant_op->getValue();
+ mir::TensorVariant float_tensor(mir::DTYPE::FLOAT32, int_tensor.getShape());
+ mir::Tensor<int32_t> int_tensor_accessor(int_tensor);
+ mir::Tensor<float> float_tensor_accessor(float_tensor);
+ for (const auto& index : mir::ShapeRange(int_tensor.getShape()))
+ float_tensor_accessor.at(index) = static_cast<float>(int_tensor_accessor.at(index));
+ return createOp<ops::ConstantOp>(float_tensor)->getOutput(0);
+ } else {
+ return arg;
+ }
+}
+
std::vector<mir::Operation::Output*>
TFLiteOpCreator::createElementwise(ops::ElementwiseOp::OpType op_type,
::tflite::ActivationFunctionType activation,
const std::vector<mir::Operation::Output*>& inputs) {
- auto result = createOp<ops::ElementwiseOp>(inputs, op_type);
+ std::vector<mir::Operation::Output*> float_inputs;
+ for (auto* input : inputs)
+ float_inputs.push_back(tryConvertToFloatTensor(input));
+
+ auto result = createOp<ops::ElementwiseOp>(float_inputs, op_type);
return {addFusedActivation(result->getOutput(0), activation)};
}
squeeze_dims.push_back(axis);
}
+ input = tryConvertToFloatTensor(input);
auto result = createOp<ops::SliceOp>(input, start, size);
result = createOp<ops::SqueezeOp>(result->getOutput(0), squeeze_dims);
return {result->getOutput(0)};
return {result->getOutput(0)};
}
+void TFLiteOpCreator::checkShape(const ::tflite::ShapeOptions* opts,
+ std::set<std::string>& problem_ops_set) {
+ if (opts->out_type() != TensorType_INT32) {
+ problem_ops_set.insert(std::string("SHAPE: Unsupported tensor type: ") +
+ EnumNameTensorType(opts->out_type()));
+ }
+}
+
+std::vector<mir::Operation::Output*>
+TFLiteOpCreator::convertShape(const ::tflite::ShapeOptions* opts,
+ const std::vector<mir::Operation::Output*>& inputs) {
+ const auto& input_shape = inputs[0]->getShape();
+ int32_t rank = input_shape.rank();
+ Shape output_shape{rank};
+ std::vector<int32_t> data;
+ for (int32_t i = 0; i < rank; i++)
+ data.emplace_back(input_shape.dim(i));
+ mir::TensorVariant tensor(mir::DTYPE::INT32, output_shape, data.data());
+ auto result = createOp<ops::ConstantOp>(tensor);
+ return {result->getOutput(0)};
+}
+
} // namespace nnc
convertLeakyReLU(const ::tflite::LeakyReluOptions* opts,
const std::vector<mir::Operation::Output*>& inputs);
+ std::vector<mir::Operation::Output*>
+ convertShape(const ::tflite::ShapeOptions* opts,
+ const std::vector<mir::Operation::Output*>& inputs);
+
void checkPool2D(const ::tflite::Pool2DOptions* opts,
std::set<std::string>& problem_ops_set);
void checkStridedSlice(const ::tflite::StridedSliceOptions* opts,
std::set<std::string>& problem_ops_set);
+
+ void checkShape(const ::tflite::ShapeOptions* opts,
+ std::set<std::string>& problem_ops_set);
private:
Graph* _graph;
template<typename OpType, typename... Types>
mir::Operation* createOp(Types&&... args);
+
+ // FIXME This is a temporary hack needed to support SHAPE operator in short term.
+ mir::Operation::Output* tryConvertToFloatTensor(mir::Operation::Output* arg);
};
template<typename OpType, typename... Types>