void visit(const ::internal::tflite::op::Mul::Node &node) override;
void visit(const ::internal::tflite::op::Div::Node &node) override;
void visit(const ::internal::tflite::op::Conv2D::implicit::Node &node) override;
+ void visit(const ::internal::tflite::op::Conv2D::Explicit::Node &node) override;
void visit(const ::internal::tflite::op::DepthwiseConv2D::implicit::Node &node) override;
void visit(const ::internal::tflite::op::Dequantize::Node &node) override;
void visit(const ::internal::tflite::op::MaxPool2D::implicit::Node &node) override;
_builder.addStage(stage);
}
+void Planner::visit(const ::internal::tflite::op::Conv2D::Explicit::Node &node)
+{
+ const ::internal::tflite::operand::Index ofm_index{node.param().ofm_index};
+
+ const ::internal::tflite::operand::Index ifm_index{node.param().ifm_index};
+ const ::internal::tflite::operand::Index ker_index{node.param().ker_index};
+ const ::internal::tflite::operand::Index bias_index{node.param().bias_index};
+
+ const ::internal::tflite::operand::Index vstride_index{node.param().vstride_index};
+ const ::internal::tflite::operand::Index hstride_index{node.param().hstride_index};
+
+ const ::internal::tflite::operand::Index padding_left_index{node.param().padding_left_index};
+ const ::internal::tflite::operand::Index padding_right_index{node.param().padding_right_index};
+ const ::internal::tflite::operand::Index padding_top_index{node.param().padding_top_index};
+ const ::internal::tflite::operand::Index padding_bottom_index{node.param().padding_bottom_index};
+
+ const ::internal::tflite::operand::Index activation_index{node.param().activation_index};
+
+ const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature();
+ const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature();
+ const auto ker_shape = _ctx.at(ker_index).shape().asKernel();
+ const auto bias_size = _ctx.at(bias_index).shape().asVector();
+
+ const int32_t padding_left = _ctx.at(padding_left_index).asScalar<int32_t>();
+ const int32_t padding_right = _ctx.at(padding_right_index).asScalar<int32_t>();
+ const int32_t padding_top = _ctx.at(padding_top_index).asScalar<int32_t>();
+ const int32_t padding_bottom = _ctx.at(padding_bottom_index).asScalar<int32_t>();
+
+ Stride stride;
+
+ stride.vertical = _ctx.at(vstride_index).asScalar<int32_t>();
+ stride.horizontal = _ctx.at(hstride_index).asScalar<int32_t>();
+
+ // TODO Should move to the place where the operand is handled, if it is possible.
+ // Set Shape Constraints and TensorInfo
+ _builder.addShapeConstr(ofm_index,
+ asTensorInfo(ofm_shape, _ctx.at(ofm_index).type(),
+ _ctx.at(ofm_index).scale(), _ctx.at(ofm_index).zeroPoint()));
+ _builder.addShapeConstr(ifm_index,
+ asTensorInfo(ifm_shape, _ctx.at(ifm_index).type(),
+ _ctx.at(ifm_index).scale(), _ctx.at(ifm_index).zeroPoint()));
+ _builder.addShapeConstr(ker_index,
+ asTensorInfo(ker_shape, _ctx.at(ker_index).type(),
+ _ctx.at(ker_index).scale(), _ctx.at(ker_index).zeroPoint()));
+ _builder.addShapeConstr(bias_index, asTensorInfo(bias_size, _ctx.at(bias_index).type(),
+ _ctx.at(bias_index).scale(),
+ _ctx.at(bias_index).zeroPoint()));
+
+ // Set initializer for kernel
+ {
+ auto ker_base = _ctx.at(ker_index).data().base();
+ auto ker_size = _ctx.at(ker_index).data().size();
+ auto ker_type = _ctx.at(ker_index).type();
+
+ switch (ker_type)
+ {
+ case ANEURALNETWORKS_TENSOR_FLOAT32:
+ {
+ auto initializer = std::bind(initKernelTensor<float>, _1, ker_shape, ker_base, ker_size);
+ _builder.addInitializer(ker_index, initializer);
+ break;
+ }
+ case ANEURALNETWORKS_TENSOR_QUANT8_ASYMM:
+ {
+ auto initializer = std::bind(initKernelTensor<uint8_t>, _1, ker_shape, ker_base, ker_size);
+ _builder.addInitializer(ker_index, initializer);
+ break;
+ }
+ default:
+ {
+ throw std::runtime_error("Not supported");
+ }
+ }
+ }
+
+ // Set initializer for bias
+ {
+ auto bias_base = _ctx.at(bias_index).data().base();
+ auto bias_type = _ctx.at(bias_index).type();
+
+ switch (bias_type)
+ {
+ case ANEURALNETWORKS_TENSOR_FLOAT32:
+ {
+ auto initializer = std::bind(initVectorTensor<float>, _1, bias_base, bias_size);
+ _builder.addInitializer(bias_index, initializer);
+ break;
+ }
+ case ANEURALNETWORKS_TENSOR_INT32:
+ {
+ auto initializer = std::bind(initVectorTensor<int32_t>, _1, bias_base, bias_size);
+ _builder.addInitializer(bias_index, initializer);
+ break;
+ }
+ default:
+ {
+ throw std::runtime_error("Not supported");
+ }
+ }
+ }
+
+ // Construct operation parameters
+ struct Param
+ {
+ int ofm_index;
+ int ifm_index;
+ int ker_index;
+ int bias_index;
+
+ Padding padding;
+ Stride stride;
+
+ FuseCode activation;
+ };
+
+ Param param;
+
+ param.ofm_index = ofm_index.asInt();
+ param.ifm_index = ifm_index.asInt();
+ param.ker_index = ker_index.asInt();
+ param.bias_index = bias_index.asInt();
+
+ param.stride = stride;
+
+ param.padding.left = padding_left;
+ param.padding.right = padding_right;
+ param.padding.top = padding_top;
+ param.padding.bottom = padding_bottom;
+
+ param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
+
+ auto stage = [param](const IAllocationContext &ctx, IExecutionBuilder &builder) {
+ auto ofm_alloc = ctx.at(::internal::tflite::operand::Index{param.ofm_index});
+ auto ifm_alloc = ctx.at(::internal::tflite::operand::Index{param.ifm_index});
+ auto ker_alloc = ctx.at(::internal::tflite::operand::Index{param.ker_index});
+ auto bias_alloc = ctx.at(::internal::tflite::operand::Index{param.bias_index});
+
+ const auto conv_info = asPadStringInfo(param.padding, param.stride);
+
+ std::unique_ptr<::arm_compute::CLConvolutionLayer> fn{new ::arm_compute::CLConvolutionLayer};
+
+ fn->configure(ifm_alloc, ker_alloc, bias_alloc, ofm_alloc, conv_info);
+
+ builder.append("Conv2D", std::move(fn));
+
+ ActivationBuilder{builder}.append(param.activation, ofm_alloc);
+ };
+
+ _builder.addStage(stage);
+}
+
void Planner::visit(const ::internal::tflite::op::DepthwiseConv2D::implicit::Node &node)
{
const ::internal::tflite::operand::Index ofm_index{node.param().ofm_index};
{
namespace Conv2D
{
+namespace Explicit
+{
+
+void Node::accept(NodeVisitor &&v) const { v.visit(*this); }
+
+} // namespace Explicit
+
namespace implicit
{
{
namespace Conv2D
{
+namespace Explicit
+{
+
+Param::Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount,
+ const uint32_t *outputs)
+{
+ assert(inputCount == 10 && outputCount == 1);
+
+ ofm_index = outputs[0];
+
+ // Each input should be interpreted as follows:
+ //
+ //
+ // 0 -> IFM Tensor Index
+ // 1 -> Kernel Tensor Index
+ // 2 -> Bias Tensor Index
+ // 3 -> Padding_left index
+ // 4 -> Padding_right index
+ // 5 -> Padding_top index
+ // 6 -> Padding_bottom index
+ // 7 -> Stride (width) Index
+ // 8 -> Stride (height) INdex
+ // 9 -> Activation Index
+ ifm_index = inputs[0];
+ ker_index = inputs[1];
+ bias_index = inputs[2];
+ padding_left_index = inputs[3];
+ padding_right_index = inputs[4];
+ padding_top_index = inputs[5];
+ padding_bottom_index = inputs[6];
+ hstride_index = inputs[7];
+ vstride_index = inputs[8];
+ activation_index = inputs[9];
+}
+
+} // namespace Explicit
+
namespace implicit
{