void visit(const ::internal::tflite::op::ReLU1::Node &node) override;
void visit(const ::internal::tflite::op::ReLU6::Node &node) override;
void visit(const ::internal::tflite::op::Tanh::Node &node) override;
+ void visit(const ::internal::tflite::op::Logistic::Node &node) override;
private:
const ::internal::tflite::operand::Set &_ctx;
_builder.addStage(stage);
}
+void Planner::visit(const ::internal::tflite::op::Logistic::Node &node)
+{
+ VERBOSE(Logistic) << "Configure Logistic operation" << std::endl;
+
+ const ::internal::tflite::operand::Index ofm_index{node.param().ofm_index};
+ const ::internal::tflite::operand::Index ifm_index{node.param().ifm_index};
+
+ // Set shape constraints
+ _builder.addShapeConstr(ofm_index,
+ asTensorInfo(_ctx.at(ofm_index).shape(), _ctx.at(ofm_index).type()));
+ _builder.addShapeConstr(ifm_index,
+ asTensorInfo(_ctx.at(ifm_index).shape(), _ctx.at(ifm_index).type()));
+
+ struct Param
+ {
+ int ofm_index;
+ int ifm_index;
+ };
+
+ Param param;
+
+ param.ofm_index = ofm_index.asInt();
+ param.ifm_index = ifm_index.asInt();
+
+ auto stage = [param](const IAllocationContext &ctx, IExecutionBuilder &builder) {
+ auto ofm_alloc = ctx.at(::internal::tflite::operand::Index{param.ofm_index});
+ auto ifm_alloc = ctx.at(::internal::tflite::operand::Index{param.ifm_index});
+
+ const ::arm_compute::ActivationLayerInfo act_info{
+ ::arm_compute::ActivationLayerInfo::ActivationFunction::LOGISTIC};
+
+ auto fn = nnfw::make_unique<::arm_compute::CLActivationLayer>();
+
+ fn->configure(ifm_alloc, ofm_alloc, act_info);
+
+ builder.append("Logistic", std::move(fn));
+ };
+
+ _builder.addStage(stage);
+}
+
class AllocationContext final : public IAllocationContext
{
public:
--- /dev/null
+#include "internal/op/Logistic.h"
+#include "internal/op/NodeVisitor.h"
+
+#include <cassert>
+
+namespace internal
+{
+namespace tflite
+{
+namespace op
+{
+namespace Logistic
+{
+
+void Node::accept(NodeVisitor &&v) const { v.visit(*this); }
+
+} // namespace Logistic
+} // namespace op
+} // namespace tflite
+} // namespace internal
+
+namespace internal
+{
+namespace tflite
+{
+namespace op
+{
+namespace Logistic
+{
+
+Param::Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount,
+ const uint32_t *outputs)
+{
+ assert(inputCount == 1 && outputCount == 1);
+
+ ofm_index = outputs[0];
+
+ // Each input should be interpreted as follows:
+ //
+ // 0 -> Input Tensor Index
+ ifm_index = inputs[0];
+}
+
+} // namespace Logistic
+} // namespace op
+} // namespace tflite
+} // namespace internal
--- /dev/null
+#ifndef __INTERNAL_OP_LOGISTIC_H__
+#define __INTERNAL_OP_LOGISTIC_H__
+
+#include "internal/op/Node.h"
+
+#include <cstdint>
+
+namespace internal
+{
+namespace tflite
+{
+namespace op
+{
+namespace Logistic
+{
+
+struct Param
+{
+ int32_t ofm_index;
+
+ int32_t ifm_index;
+
+ Param() = default;
+ Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount, const uint32_t *outputs);
+};
+
+class Node final : public op::Node
+{
+public:
+ Node(const Param ¶m) : _param(param)
+ {
+ // DO NOTHING
+ }
+
+public:
+ virtual ~Node() = default;
+
+public:
+ const Param ¶m(void) const { return _param; }
+
+public:
+ void accept(NodeVisitor &&) const override;
+
+private:
+ const Param _param;
+};
+
+} // namespace Logistic
+} // namespace op
+} // namespace tflite
+} // namespace internal
+
+#endif // __INTERNAL_OP_LOGISTIC_H__
#include "internal/op/ReLU6.h"
#include "internal/op/Tanh.h"
#include "internal/op/Squeeze.h"
+#include "internal/op/Logistic.h"
namespace internal
{
virtual void visit(const ReLU6::Node &) = 0;
virtual void visit(const Tanh::Node &) = 0;
virtual void visit(const Squeeze::Node &) = 0;
+ virtual void visit(const Logistic::Node &) = 0;
};
} // namespace op
break;
}
+ case ANEURALNETWORKS_LOGISTIC:
+ {
+ using internal::tflite::op::Logistic::Param;
+ using internal::tflite::op::Logistic::Node;
+
+ // Add 'operations'
+ auto &operations = model->deref().operations();
+
+ operations.emplace_back<Node>(Param{inputCount, inputs, outputCount, outputs});
+
+ break;
+ }
default:
throw std::runtime_error{"Not supported operation"};
};
GeneratedTests.l2_pool_float_2
GeneratedTests.l2_pool_float_large
GeneratedTests.l2_pool_float
-GeneratedTests.logistic_float_1
-GeneratedTests.logistic_float_2
GeneratedTests.logistic_quant8_1
GeneratedTests.logistic_quant8_2
GeneratedTests.max_pool_float_2