ActivationBuilder{*_execution_builder}.append(activation, ofm_alloc->handle());
}
+void KernelGenerator::visit(const model::operation::PadNode &node)
+{
+ const auto input_index{node.getInputs().at(model::operation::PadNode::Input::INPUT)};
+ const auto pad_index{node.getInputs().at(model::operation::PadNode::Input::PAD)};
+ const auto output_index{node.getOutputs().at(0)};
+ assert(_ctx.at(pad_index).isConstant());
+
+ auto rank = _ctx.at(pad_index).shape().dim(0);
+ auto pad_base = _ctx.at(pad_index).data().base();
+
+ auto input = _tensor_builder->at(input_index).get()->handle();
+ auto output = _tensor_builder->at(output_index).get()->handle();
+
+ ::arm_compute::PaddingList padding_list;
+ padding_list.resize(rank);
+ for (int32_t n = 0; n < rank; ++n)
+ {
+ const int32_t *from = reinterpret_cast<const int32_t *>(pad_base) + (n * 2);
+
+ const auto frontend_layout = _current_subg_layout;
+ const auto backend_layout = _tensor_builder->at(input_index).get()->layout();
+ const auto axis =
+ acl_common::ToARMComputeAxis(rank, n, frontend_layout, backend_layout).value();
+ padding_list[axis] = ::arm_compute::PaddingInfo{from[0], from[1]};
+ }
+
+ const auto input_type = _ctx.at(input_index).typeInfo();
+ UNUSED_RELEASE(input_type);
+ assert(input->info()->data_type() == acl_common::asDataType(input_type.type()));
+ assert(input->info()->quantization_info() ==
+ ::arm_compute::QuantizationInfo(input_type.scale(), input_type.offset()));
+ const auto pixel_value =
+ ::arm_compute::PixelValue(0, input->info()->data_type(), input->info()->quantization_info());
+
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::NEPadLayer>();
+ fn->configure(input, output, padding_list, pixel_value);
+
+ _execution_builder->append(asAclFunction(std::move(fn)));
+}
+
void KernelGenerator::visit(const model::operation::ReLUNode &node)
{
const auto output_index{node.getOutputs().at(0)};
void visit(const model::operation::LogisticNode &) override;
void visit(const model::operation::LSTMNode &) override;
void visit(const model::operation::MulNode &) override;
+ void visit(const model::operation::PadNode &) override;
void visit(const model::operation::ReLUNode &) override;
void visit(const model::operation::ReLU1Node &) override;
void visit(const model::operation::ReLU6Node &) override;
void ShapeFixer::visit(const model::operation::LSTMNode &) { /* DO NOTHING */}
+void ShapeFixer::visit(const model::operation::PadNode &node)
+{
+ const auto input_index{node.getInputs().at(model::operation::PadNode::Input::INPUT)};
+ const auto output_index{node.getOutputs().at(0)};
+ _tensor_builder->dimCorrection(input_index, false);
+ _tensor_builder->dimCorrection(output_index, false);
+}
+
void ShapeFixer::visit(const model::operation::MulNode &node)
{
const auto lhs_index{node.getInputs().at(model::operation::MulNode::Input::LHS)};
void visit(const model::operation::LogisticNode &) override;
void visit(const model::operation::LSTMNode &) override;
void visit(const model::operation::MulNode &) override;
+ void visit(const model::operation::PadNode &) override;
void visit(const model::operation::ReLUNode &) override;
void visit(const model::operation::ReLU1Node &) override;
void visit(const model::operation::ReLU6Node &) override;