#include <arm_compute/runtime/CL/functions/CLReduceOperation.h>
#include <arm_compute/runtime/CL/functions/CLSpaceToBatchND.h>
#include <arm_compute/runtime/CL/functions/CLSpaceToDepth.h>
+#include <arm_compute/runtime/CL/functions/CLSplit.h>
#include <arm_compute/runtime/CL/functions/CLSquaredDifference.h>
#include <arm_compute/runtime/CL/functions/CLStridedSliceEx.h>
#include <arm_compute/runtime/CL/functions/CLTopKV2.h>
}
};
- auto add_split_params = [&add_scalar_int32](void* data) {
+ auto add_split_params = [&add_scalar_int32, &augmented_inputs](void* data) {
+ // swap 1st and 2nd operand order
+ auto input_tensor = augmented_inputs[1];
+ auto axis = augmented_inputs[0];
+ augmented_inputs[0] = input_tensor;
+ augmented_inputs[1] = axis;
+
auto builtin = reinterpret_cast<TfLiteSplitParams*>(data);
add_scalar_int32(builtin->num_splits);
};
});
}
+void StageGenerator::visit(const model::operation::SplitNode &node)
+{
+ const auto input_index{node.getInputs().at(model::operation::SplitNode::Input::INPUT)};
+ const auto axis_index{node.param().axis_index};
+ const auto num_of_splits_index{node.param().num_of_splits_index};
+
+ assert(_ctx.at(num_of_splits_index).asScalar<unsigned int>() == node.getOutputs().size());
+
+ struct Param
+ {
+ model::OperandIndex ifm_index;
+ std::vector<model::OperandIndex> output_indexes;
+ int32_t axis;
+ };
+
+ Param param;
+ param.ifm_index = input_index;
+ param.axis = _ctx.at(axis_index).asScalar<int32_t>();
+ if (param.axis < 0)
+ param.axis += 4;
+
+ _tensor_builder->dimCorrection(input_index, false);
+ for (const auto &e : node.getOutputs())
+ {
+ param.output_indexes.emplace_back(e);
+ _tensor_builder->dimCorrection(e, false);
+ }
+
+ auto tensors = _tensor_builder;
+
+ returnStage([tensors, param](IExecutionBuilder &builder) {
+ auto ifm_alloc = tensors->at(param.ifm_index).get();
+ std::vector<arm_compute::ICLTensor *> output_allocs;
+ for (auto ofm_ind : param.output_indexes)
+ {
+ output_allocs.emplace_back(tensors->at(ofm_ind).get()->handle());
+ }
+
+ std::unique_ptr<::arm_compute::IFunction> fn;
+
+ auto l = make_layer<::arm_compute::CLSplit>();
+
+ auto armAxis = ::neurun::backend::acl_common::ToARMComputeAxis(
+ tensors->at(param.ifm_index).get()->num_dimensions(), param.axis)
+ .value();
+ l->configure(ifm_alloc->handle(), output_allocs, armAxis);
+
+ fn = std::move(l);
+
+ auto acl_fn = asAclFunction(std::move(fn));
+
+ builder.append(std::move(acl_fn));
+ });
+}
+
} // namespace acl_cl
} // namespace backend
} // namespace neurun
void visit(const model::operation::LocalResponseNormalizationNode &) override;
void visit(const model::operation::DepthToSpaceNode &) override;
void visit(const model::operation::ReduceMinNode &) override;
+ void visit(const model::operation::SplitNode &) override;
private:
const neurun::model::Operands &_ctx;
#include "operation/LocalResponseNormalizationNode.h"
#include "operation/DepthToSpaceNode.h"
#include "operation/ReduceMinNode.h"
+#include "operation/SplitNode.h"
OP(LocalResponseNormalizationNode , true)
OP(DepthToSpaceNode , true)
OP(ReduceMinNode , true)
+OP(SplitNode , true)
OP(PermuteNode , false)
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __NEURUN_MODEL_OPERATION_SPLIT_NODE_H__
+#define __NEURUN_MODEL_OPERATION_SPLIT_NODE_H__
+
+#include "model/Operation.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+class SplitNode : public model::Operation
+{
+public:
+ enum Input
+ {
+ INPUT = 0
+ };
+
+ struct Param
+ {
+ OperandIndex axis_index;
+ OperandIndex num_of_splits_index;
+ };
+
+public:
+ SplitNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param ¶m);
+
+public:
+ virtual void accept(OperationVisitor &&) const override;
+ virtual std::string getName() const override { return "Split"; }
+
+public:
+ const Param ¶m() const { return _param; }
+
+private:
+ Param _param;
+};
+} // namespace operation
+} // namespace model
+} // namespace neurun
+#endif // __NEURUN_MODEL_OPERATION_SPLIT_NODE_H__
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "model/operation/SplitNode.h"
+#include <cassert>
+#include "model/OperationVisitor.h"
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+void SplitNode::accept(OperationVisitor &&v) const { v.visit(*this); }
+SplitNode::SplitNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param ¶m)
+ : model::Operation{OperandConstraint::createExact(1u), inputs, outputs}, _param{param}
+{
+}
+} // namespace operation
+} // namespace model
+} // namespace neurun
return new operation::ReduceMinNode{inputs, outputs, param};
};
+
+ _map[ANEURALNETWORKS_SPLIT_EX] = [](const OperationFactory::Param &init_param,
+ neurun::model::Operands &) {
+ assert(init_param.input_count == 3);
+ assert(init_param.output_count >= 1); // At least one output tensor and axis
+
+ OperandIndexSequence inputs{init_param.inputs[0]};
+ OperandIndexSequence outputs;
+ for (uint32_t n = 0; n < init_param.output_count; ++n)
+ {
+ outputs.append(OperandIndex{init_param.outputs[n]});
+ }
+
+ operation::SplitNode::Param param;
+ param.axis_index = OperandIndex{init_param.inputs[1]};
+ param.num_of_splits_index = OperandIndex{init_param.inputs[2]};
+
+ return new operation::SplitNode{inputs, outputs, param};
+ };
}
neurun::model::Operation *OperationFactory::create(ANeuralNetworksOperationType type,
GeneratedTests.batch_to_space*
GeneratedTests.space_to_batch*
# Unexpected result
-GeneratedTests.split*
GeneratedTests.pack*
GeneratedTests.unpack*
# Not support broadcast