#include "backend/acl_cl/StageGenerator.h"
+#include "kernel/acl_cl/CLFunction.h"
+
#include <arm_compute/runtime/CL/functions/CLConvolutionLayer.h>
#include <arm_compute/runtime/CL/functions/CLPoolingLayer.h>
#include <arm_compute/runtime/CL/functions/CLActivationLayer.h>
template <typename T> std::unique_ptr<T> make_layer(void) { return std::unique_ptr<T>{new T}; }
+std::unique_ptr<::neurun::kernel::acl_cl::CLFunction>
+make_cl_function(std::unique_ptr<::arm_compute::IFunction> &&layer)
+{
+ return std::unique_ptr<::neurun::kernel::acl_cl::CLFunction>(
+ new ::neurun::kernel::acl_cl::CLFunction(std::move(layer)));
+}
+
::arm_compute::PadStrideInfo asPadStringInfo(const ::internal::Padding &padding,
const ::internal::Stride &stride)
{
fn->configure(ifm_alloc, nullptr, act_info);
- _builder.append(std::move(fn));
+ auto acl_fn = make_cl_function(std::move(fn));
+
+ _builder.append(std::move(acl_fn));
}
void ActivationBuilder::append(FuseCode code, ::arm_compute::ICLTensor *ifm_alloc)
fn->configure(ifm_alloc, ker_alloc, bias_alloc, ofm_alloc, conv_info);
- builder.append(std::move(fn));
+ auto acl_fn = make_cl_function(std::move(fn));
+
+ builder.append(std::move(acl_fn));
ActivationBuilder{builder}.append(param.activation, ofm_alloc);
});
fn->configure(ifm_alloc, ofm_alloc, info);
- builder.append(std::move(fn));
+ auto acl_fn = make_cl_function(std::move(fn));
+
+ builder.append((std::move(acl_fn)));
});
}
fn->configure(ifm_alloc, ofm_alloc, info);
- builder.append(std::move(fn));
+ auto acl_fn = make_cl_function(std::move(fn));
+
+ builder.append((std::move(acl_fn)));
});
}
fn->configure(input_allocs, param.axis, output_alloc);
- builder.append(std::move(fn));
+ auto acl_fn = make_cl_function(std::move(fn));
+
+ builder.append(std::move(acl_fn));
});
}
fn->configure(input_alloc, weight_alloc, bias_alloc, output_alloc);
- builder.append(std::move(fn));
+ auto acl_fn = make_cl_function(std::move(fn));
+
+ builder.append((std::move(acl_fn)));
ActivationBuilder{builder}.append(param.activation, output_alloc);
});
fn->configure(input_alloc, output_alloc);
- builder.append(std::move(fn));
+ auto acl_fn = make_cl_function(std::move(fn));
+
+ builder.append((std::move(acl_fn)));
});
}
fn->configure(input_alloc, output_alloc, param.scale);
- builder.append(std::move(fn));
+ auto acl_fn = make_cl_function(std::move(fn));
+
+ builder.append((std::move(acl_fn)));
});
}
#include <memory>
#include <functional>
-#include <arm_compute/runtime/IFunction.h>
+#include "exec/interface/IFunction.h"
#include "backend/interface/ITensorBuilder.h"
#include "graph/operation/NodeVisitor.h"
{
virtual ~IExecutionBuilder() = default;
- virtual void append(std::unique_ptr<::arm_compute::IFunction> &&f) = 0;
+ virtual void append(std::unique_ptr<::neurun::exec::IFunction> &&f) = 0;
};
using Stage = std::function<void(IExecutionBuilder &)>;
#define __NEURUN_CODEGEN_OPERATION_SEQUENCE_H__
#include <stdint.h>
-#include <arm_compute/runtime/IFunction.h>
+#include "exec/interface/IFunction.h"
#include <memory>
#include <vector>
uint32_t size(void) const { return _functions.size(); }
public:
- Sequence &append(std::unique_ptr<::arm_compute::IFunction> &&func)
+ Sequence &append(std::unique_ptr<::neurun::exec::IFunction> &&func)
{
_functions.emplace_back(std::move(func));
return (*this);
}
public:
- ::arm_compute::IFunction &at(uint32_t n) const { return *(_functions.at(n)); }
+ ::neurun::exec::IFunction &at(uint32_t n) const { return *(_functions.at(n)); }
private:
- std::vector<std::unique_ptr<::arm_compute::IFunction>> _functions;
+ std::vector<std::unique_ptr<::neurun::exec::IFunction>> _functions;
};
} // namespace operation
}
public:
- void append(std::unique_ptr<::arm_compute::IFunction> &&f) override
+ void append(std::unique_ptr<::neurun::exec::IFunction> &&f) override
{
_plan.operations().append(std::move(f));
}
--- /dev/null
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_EXEC_I_FUNCTION_H__
+#define __NEURUN_EXEC_I_FUNCTION_H__
+
+namespace neurun
+{
+namespace exec
+{
+
+class IFunction
+{
+public:
+ virtual ~IFunction() = default;
+ virtual void run() = 0;
+ virtual void prepare() {}
+};
+
+} // namespace exec
+} // namespace neurun
+
+#endif // __NEURUN_EXEC_I_FUNCTION_H__
--- /dev/null
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_KERNEL_ACL_CL_CL_FUNCTION_H__
+#define __NEURUN_KERNEL_ACL_CL_CL_FUNCTION_H__
+
+#include "exec/interface/IFunction.h"
+#include <arm_compute/runtime/IFunction.h>
+#include <memory>
+
+namespace neurun
+{
+namespace kernel
+{
+namespace acl_cl
+{
+
+class CLFunction : public ::neurun::exec::IFunction
+{
+public:
+ CLFunction() = delete;
+
+public:
+ CLFunction(std::unique_ptr<::arm_compute::IFunction> &&func)
+ : _func(std::forward<std::unique_ptr<::arm_compute::IFunction>>(func))
+ {
+ // DO NOTHING
+ }
+
+public:
+ void run() override { _func->run(); }
+ void prepare() override { _func->prepare(); }
+
+private:
+ std::unique_ptr<::arm_compute::IFunction> _func;
+};
+
+} // namespace acl_cl
+} // namespace kernel
+} // namespace neurun
+
+#endif // __NEURUN_KERNEL_ACL_CL_CL_FUNCTION_H__
#include <NeuralNetworks.h>
-#include <arm_compute/runtime/IFunction.h>
+#include "exec/interface/IFunction.h"
#include "kernel/cpu/OperationUtils.h"
namespace cpu
{
-class AvgPoolLayer : public ::arm_compute::IFunction
+class AvgPoolLayer : public ::neurun::exec::IFunction
{
public:
AvgPoolLayer();
#include <NeuralNetworks.h>
-#include <arm_compute/runtime/IFunction.h>
+#include "exec/interface/IFunction.h"
#include "kernel/cpu/OperationUtils.h"
namespace cpu
{
-class ConcatLayer : public ::arm_compute::IFunction
+class ConcatLayer : public ::neurun::exec::IFunction
{
public:
ConcatLayer();
#include <NeuralNetworks.h>
-#include <arm_compute/runtime/IFunction.h>
+#include "exec/interface/IFunction.h"
#include "kernel/cpu/OperationUtils.h"
namespace cpu
{
-class ConvolutionLayer : public ::arm_compute::IFunction
+class ConvolutionLayer : public ::neurun::exec::IFunction
{
public:
ConvolutionLayer();
#include <NeuralNetworks.h>
-#include <arm_compute/runtime/IFunction.h>
+#include "exec/interface/IFunction.h"
#include "kernel/cpu/OperationUtils.h"
namespace cpu
{
-class FullyConnectedLayer : public ::arm_compute::IFunction
+class FullyConnectedLayer : public ::neurun::exec::IFunction
{
public:
FullyConnectedLayer();
#include <NeuralNetworks.h>
-#include <arm_compute/runtime/IFunction.h>
+#include "exec/interface/IFunction.h"
#include "kernel/cpu/OperationUtils.h"
namespace cpu
{
-class MaxPoolLayer : public ::arm_compute::IFunction
+class MaxPoolLayer : public ::neurun::exec::IFunction
{
public:
MaxPoolLayer();
#include <NeuralNetworks.h>
-#include <arm_compute/runtime/IFunction.h>
+#include "exec/interface/IFunction.h"
#include <arm_compute/core/ITensor.h>
#include "util/feature/nhwc/View.h"
namespace cpu
{
-class PermuteLayer : public ::arm_compute::IFunction
+class PermuteLayer : public ::neurun::exec::IFunction
{
public:
enum class Type
#include <NeuralNetworks.h>
-#include <arm_compute/runtime/IFunction.h>
+#include "exec/interface/IFunction.h"
#include "kernel/cpu/OperationUtils.h"
namespace cpu
{
-class ReshapeLayer : public ::arm_compute::IFunction
+class ReshapeLayer : public ::neurun::exec::IFunction
{
public:
ReshapeLayer();
#include <NeuralNetworks.h>
-#include <arm_compute/runtime/IFunction.h>
+#include "exec/interface/IFunction.h"
#include "kernel/cpu/OperationUtils.h"
namespace cpu
{
-class SoftMaxLayer : public ::arm_compute::IFunction
+class SoftMaxLayer : public ::neurun::exec::IFunction
{
public:
SoftMaxLayer();