#include <arm_compute/runtime/CL/functions/CLSquaredDifference.h>
#include <arm_compute/runtime/CL/functions/CLNeg.h>
#include <arm_compute/runtime/CL/functions/CLNotEqual.h>
+#include <arm_compute/runtime/CL/functions/CLBinaryLogicalOp.h>
#include <arm_compute/runtime/SubTensor.h>
#include <arm_compute/runtime/NEON/functions/NESoftmaxLayer.h>
{
VERBOSE(LogicalOr) << "Configure LogicalOr operation" << std::endl;
- // TODO Implement LogicalOr op
- throw std::runtime_error("Not supported, yet");
+ const ::internal::tflite::operand::Index output_index{node.param().output_index};
+ const ::internal::tflite::operand::Index input1_index{node.param().input1_index};
+ const ::internal::tflite::operand::Index input2_index{node.param().input2_index};
+
+ // Set Shape Constraints and TensorInfo
+ _builder.addShapeConstr(output_index,
+ asTensorInfo(asTensorShape(_ctx.at(output_index).shape(), false),
+ _ctx.at(output_index).type(), _ctx.at(output_index).scale(),
+ _ctx.at(output_index).zeroPoint()));
+
+ if (!(_ctx.at(input1_index).shape() == _ctx.at(input2_index).shape()))
+ {
+ const auto broadcast_rank =
+ std::max(_ctx.at(input1_index).shape().rank(), _ctx.at(input2_index).shape().rank());
+ const_cast<::internal::tflite::operand::Shape &>(_ctx.at(input1_index).shape())
+ .extendRank(broadcast_rank);
+ const_cast<::internal::tflite::operand::Shape &>(_ctx.at(input2_index).shape())
+ .extendRank(broadcast_rank);
+ }
+
+ _builder.addShapeConstr(input1_index,
+ asTensorInfo(asTensorShape(_ctx.at(input1_index).shape(), false),
+ _ctx.at(input1_index).type(), _ctx.at(input1_index).scale(),
+ _ctx.at(input1_index).zeroPoint()));
+ _builder.addShapeConstr(input2_index,
+ asTensorInfo(asTensorShape(_ctx.at(input2_index).shape(), false),
+ _ctx.at(input2_index).type(), _ctx.at(input2_index).scale(),
+ _ctx.at(input2_index).zeroPoint()));
+
+ // Construct operation parameters
+ struct Param
+ {
+ int output_index;
+ int input1_index;
+ int input2_index;
+ };
+
+ Param param;
+
+ param.output_index = output_index.asInt();
+ param.input1_index = input1_index.asInt();
+ param.input2_index = input2_index.asInt();
+ auto stage = [param](const IAllocationContext &ctx, IExecutionBuilder &builder) {
+ auto output_alloc = ctx.at(::internal::tflite::operand::Index{param.output_index});
+ auto input1_alloc = ctx.at(::internal::tflite::operand::Index{param.input1_index});
+ auto input2_alloc = ctx.at(::internal::tflite::operand::Index{param.input2_index});
+ if (::internal::arm_compute::isGpuMode())
+ {
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::CLBinaryLogicalOp>();
+
+ fn->configure(CAST_CL(input1_alloc), CAST_CL(input2_alloc), CAST_CL(output_alloc),
+ ::arm_compute::LogicalOperation::OR);
+
+ builder.append("LogicalOr", std::move(fn));
+ }
+ else
+ {
+ // TODO Add NEON support
+
+ throw std::runtime_error("Not supported yet");
+ }
+ };
+ _builder.addStage(stage);
}
class AllocationContext final : public IAllocationContext