This commit changes namespace of kernels from internal::kernel to neurun::kernel.
Signed-off-by: sjsujinkim <sjsujin.kim@samsung.com>
input_allocs.emplace_back(tensors->at(::internal::tflite::operand::Index{ifm_ind}).get());
}
- std::unique_ptr<::internal::kernel::acl_cl::ConcatLayer> fn{
- new ::internal::kernel::acl_cl::ConcatLayer};
+ std::unique_ptr<::neurun::kernel::acl_cl::ConcatLayer> fn{
+ new ::neurun::kernel::acl_cl::ConcatLayer};
fn->configure(input_allocs, param.axis, output_alloc);
auto ker_alloc = tensors->at(::internal::tflite::operand::Index{param.ker_index});
auto bias_alloc = tensors->at(::internal::tflite::operand::Index{param.bias_index});
- std::unique_ptr<::internal::kernel::cpu::ConvolutionLayer> fn{
- new ::internal::kernel::cpu::ConvolutionLayer};
+ std::unique_ptr<::neurun::kernel::cpu::ConvolutionLayer> fn{
+ new ::neurun::kernel::cpu::ConvolutionLayer};
fn->configure(ifm_alloc->buffer(), param.ifm_shape, ker_alloc->buffer(), param.ker_shape,
bias_alloc->buffer(), param.bias_shape, param.padding.left, param.padding.right,
auto ofm_alloc = tensors->at(::internal::tflite::operand::Index{param.ofm_index}).get();
auto ifm_alloc = tensors->at(::internal::tflite::operand::Index{param.ifm_index}).get();
- std::unique_ptr<::internal::kernel::cpu::MaxPoolLayer> fn{
- new ::internal::kernel::cpu::MaxPoolLayer};
+ std::unique_ptr<::neurun::kernel::cpu::MaxPoolLayer> fn{
+ new ::neurun::kernel::cpu::MaxPoolLayer};
fn->configure(ifm_alloc->buffer(), param.ifm_shape, param.padding.left, param.padding.right,
param.padding.top, param.padding.bottom, param.stride.horizontal,
auto ofm_alloc = tensors->at(::internal::tflite::operand::Index{param.ofm_index}).get();
auto ifm_alloc = tensors->at(::internal::tflite::operand::Index{param.ifm_index}).get();
- std::unique_ptr<::internal::kernel::cpu::AvgPoolLayer> fn{
- new ::internal::kernel::cpu::AvgPoolLayer};
+ std::unique_ptr<::neurun::kernel::cpu::AvgPoolLayer> fn{
+ new ::neurun::kernel::cpu::AvgPoolLayer};
fn->configure(ifm_alloc->buffer(), param.ifm_shape, param.padding.left, param.padding.right,
param.padding.top, param.padding.bottom, param.stride.horizontal,
tensors->at(::internal::tflite::operand::Index{ifm_ind}).get()->buffer());
}
- std::unique_ptr<::internal::kernel::cpu::ConcatLayer> fn{
- new ::internal::kernel::cpu::ConcatLayer};
+ std::unique_ptr<::neurun::kernel::cpu::ConcatLayer> fn{new ::neurun::kernel::cpu::ConcatLayer};
fn->configure(input_buffers, param.ifm_shapes, param.axis, output_alloc->buffer(),
param.ofm_shape);
auto weight_alloc = tensors->at(::internal::tflite::operand::Index{param.weight_index}).get();
auto bias_alloc = tensors->at(::internal::tflite::operand::Index{param.bias_index}).get();
- std::unique_ptr<::internal::kernel::cpu::FullyConnectedLayer> fn{
- new ::internal::kernel::cpu::FullyConnectedLayer};
+ std::unique_ptr<::neurun::kernel::cpu::FullyConnectedLayer> fn{
+ new ::neurun::kernel::cpu::FullyConnectedLayer};
fn->configure(input_alloc->buffer(), param.ifm_shape, weight_alloc->buffer(),
param.weight_shape, bias_alloc->buffer(), param.bias_shape, param.activation,
auto output_alloc = tensors->at(::internal::tflite::operand::Index{param.output_index}).get();
auto input_alloc = tensors->at(::internal::tflite::operand::Index{param.input_index}).get();
- std::unique_ptr<::internal::kernel::cpu::ReshapeLayer> fn{
- new ::internal::kernel::cpu::ReshapeLayer};
+ std::unique_ptr<::neurun::kernel::cpu::ReshapeLayer> fn{
+ new ::neurun::kernel::cpu::ReshapeLayer};
fn->configure(input_alloc->buffer(), param.ifm_shape, output_alloc->buffer(), param.ofm_shape);
auto output_alloc = tensors->at(::internal::tflite::operand::Index{param.output_index}).get();
auto input_alloc = tensors->at(::internal::tflite::operand::Index{param.input_index}).get();
- std::unique_ptr<::internal::kernel::cpu::SoftMaxLayer> fn{
- new ::internal::kernel::cpu::SoftMaxLayer};
+ std::unique_ptr<::neurun::kernel::cpu::SoftMaxLayer> fn{
+ new ::neurun::kernel::cpu::SoftMaxLayer};
fn->configure(input_alloc->buffer(), param.ifm_shape, param.scale, output_alloc->buffer(),
param.ofm_shape);
} // namespace {anonymous}
-namespace internal
+namespace neurun
{
namespace kernel
{
} // namespace acl_cl
} // namespace kernel
-} // namespace internal
+} // namespace neurun
// If we remove this, we can also remove cpu kernel library dependency
#include "kernel/cpufallback/OperationUtils.h"
-using namespace internal::kernel::cpu;
+using namespace neurun::kernel::cpu;
-namespace internal
+namespace neurun
{
namespace kernel
{
{
//
-// internal::kernel::acl_cl::ConcatLayer
+// neurun::kernel::acl_cl::ConcatLayer
// A naive implementation of ConcatLayer for ACL
//
} // namespace acl_cl
} // namespace kernel
-} // namespace internal
+} // namespace neurun
#endif // __INTERNAL_KERNEL_ACL_CL_CONCAT_LAYER_H__
#include "logging.h"
-namespace internal
+namespace neurun
{
namespace kernel
{
} // namespace acl_cl
} // namespace kernel
-} // namespace internal
+} // namespace neurun
#include "internal/Model.h"
#include "internal/common/Tensor.h"
-namespace internal
+namespace neurun
{
namespace kernel
{
} // namespace acl_cl
} // namespace kernel
-} // namespace internal
+} // namespace neurun
#endif // __INTERNAL_KERNELS_ACL_CL_TENSOR_CONVERT_FROM_COMMON_LAYER_H__
#include "logging.h"
-namespace internal
+namespace neurun
{
namespace kernel
{
} // namespace acl_cl
} // namespace kernel
-} // namespace internal
+} // namespace neurun
#include "internal/Model.h"
#include "internal/common/Tensor.h"
-namespace internal
+namespace neurun
{
namespace kernel
{
} // namespace acl_cl
} // namespace kernel
-} // namespace internal
+} // namespace neurun
#endif // __INTERNAL_KERNELS_ACL_CL_TENSOR_CONVERT_TO_COMMON_LAYER_H__
#include "tensorflow/contrib/lite/kernels/internal/optimized/optimized_ops.h"
#include "kernel/cpufallback/OperationUtils.h"
-namespace internal
+namespace neurun
{
namespace kernel
{
} // namespace cpu
} // namespace kernel
-} // namespace internal
+} // namespace neurun
-#ifndef __INTERNAL_KERNELS_CPU_AVGPOOLLAYER_H__
-#define __INTERNAL_KERNELS_CPU_AVGPOOLLAYER_H__
+#ifndef __NEURUN_KERNEL_CPU_AVGPOOLLAYER_H__
+#define __NEURUN_KERNEL_CPU_AVGPOOLLAYER_H__
#include <NeuralNetworks.h>
#include "internal/Model.h"
#include "kernel/cpufallback/OperationUtils.h"
-using namespace internal::kernel::cpu;
+using namespace neurun::kernel::cpu;
-namespace internal
+namespace neurun
{
namespace kernel
{
} // namespace cpu
} // namespace kernel
-} // namespace internal
+} // namespace neurun
-#endif // __INTERNAL_KERNELS_CPU_AVGPOOLLAYER_H__
+#endif // __NEURUN_KERNEL_CPU_AVGPOOLLAYER_H__
#include "tensorflow/contrib/lite/kernels/internal/optimized/optimized_ops.h"
#include "kernel/cpufallback/OperationUtils.h"
-namespace internal
+namespace neurun
{
namespace kernel
{
} // namespace cpu
} // namespace kernel
-} // namespace internal
+} // namespace neurun
* limitations under the License.
*/
-#ifndef __INTERNAL_KERNELS_CPU_CONCATLAYER_H__
-#define __INTERNAL_KERNELS_CPU_CONCATLAYER_H__
+#ifndef __NEURUN_KERNEL_CPU_CONCATLAYER_H__
+#define __NEURUN_KERNEL_CPU_CONCATLAYER_H__
#include <NeuralNetworks.h>
#include "internal/Model.h"
#include "kernel/cpufallback/OperationUtils.h"
-using namespace internal::kernel::cpu;
+using namespace neurun::kernel::cpu;
-namespace internal
+namespace neurun
{
namespace kernel
{
} // namespace cpu
} // namespace kernel
-} // namespace internal
+} // namespace neurun
-#endif // __INTERNAL_KERNELS_CPU_CONCATLAYER_H__
+#endif // __NEURUN_KERNEL_CPU_CONCATLAYER_H__
#include "tensorflow/contrib/lite/kernels/internal/optimized/optimized_ops.h"
#include "kernel/cpufallback/OperationUtils.h"
-namespace internal
+namespace neurun
{
namespace kernel
{
} // namespace cpu
} // namespace kernel
-} // namespace internal
+} // namespace neurun
-#ifndef __INTERNAL_KERNELS_CPU_CONVOLUTIONLAYER_H__
-#define __INTERNAL_KERNELS_CPU_CONVOLUTIONLAYER_H__
+#ifndef __NEURUN_KERNEL_CPU_CONVOLUTIONLAYER_H__
+#define __NEURUN_KERNEL_CPU_CONVOLUTIONLAYER_H__
#include <NeuralNetworks.h>
#include "internal/Model.h"
#include "kernel/cpufallback/OperationUtils.h"
-using namespace internal::kernel::cpu;
+using namespace neurun::kernel::cpu;
-namespace internal
+namespace neurun
{
namespace kernel
{
} // namespace cpu
} // namespace kernel
-} // namespace internal
+} // namespace neurun
-#endif // __INTERNAL_KERNELS_CPU_CONVOLUTIONLAYER_H__
+#endif // __NEURUN_KERNEL_CPU_CONVOLUTIONLAYER_H__
#include <mutex>
-namespace internal
+namespace neurun
{
namespace kernel
{
} // namespace cpu
} // namespace kernel
-} // namespace internal
+} // namespace neurun
-#ifndef __INTERNAL_KERNELS_CPU_FULLYCONNECTEDLAYER_H__
-#define __INTERNAL_KERNELS_CPU_FULLYCONNECTEDLAYER_H__
+#ifndef __NEURUN_KERNEL_CPU_FULLYCONNECTEDLAYER_H__
+#define __NEURUN_KERNEL_CPU_FULLYCONNECTEDLAYER_H__
#include <NeuralNetworks.h>
#include "internal/Model.h"
#include "kernel/cpufallback/OperationUtils.h"
-using namespace internal::kernel::cpu;
+using namespace neurun::kernel::cpu;
-namespace internal
+namespace neurun
{
namespace kernel
{
} // namespace cpu
} // namespace kernel
-} // namespace internal
+} // namespace neurun
-#endif // __INTERNAL_KERNELS_CPU_FULLYCONNECTEDLAYER_H__
+#endif // __NEURUN_KERNEL_CPU_FULLYCONNECTEDLAYER_H__
#include "tensorflow/contrib/lite/kernels/internal/optimized/optimized_ops.h"
#include "kernel/cpufallback/OperationUtils.h"
-namespace internal
+namespace neurun
{
namespace kernel
{
} // namespace cpu
} // namespace kernel
-} // namespace internal
+} // namespace neurun
-#ifndef __INTERNAL_KERNELS_CPU_MAXPOOLLAYER_H__
-#define __INTERNAL_KERNELS_CPU_MAXPOOLLAYER_H__
+#ifndef __NEURUN_KERNEL_CPU_MAXPOOLLAYER_H__
+#define __NEURUN_KERNEL_CPU_MAXPOOLLAYER_H__
#include <NeuralNetworks.h>
#include "internal/Model.h"
#include "kernel/cpufallback/OperationUtils.h"
-using namespace internal::kernel::cpu;
+using namespace neurun::kernel::cpu;
-namespace internal
+namespace neurun
{
namespace kernel
{
} // namespace cpu
} // namespace kernel
-} // namespace internal
+} // namespace neurun
-#endif // __INTERNAL_KERNELS_CPU_MAXPOOLLAYER_H__
+#endif // __NEURUN_KERNEL_CPU_MAXPOOLLAYER_H__
#include <algorithm>
#include <cassert>
-namespace internal
+namespace neurun
{
namespace kernel
{
} // namespace cpu
} // namespace kernel
-} // namespace internal
+} // namespace neurun
#include "internal/Model.h"
#include "tensorflow/contrib/lite/kernels/internal/types.h"
-namespace internal
+namespace neurun
{
namespace kernel
{
} // namespace cpu
} // namespace kernel
-} // namespace internal
+} // namespace neurun
#endif // __NNFW_SUPPORT_NNAPI_OPERATION_UTILS_H__
#include "tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h"
#include "kernel/cpufallback/OperationUtils.h"
-namespace internal
+namespace neurun
{
namespace kernel
{
} // namespace cpu
} // namespace kernel
-} // namespace internal
+} // namespace neurun
-#ifndef __INTERNAL_KERNELS_CPU_RESHAPELAYER_H__
-#define __INTERNAL_KERNELS_CPU_RESHAPELAYER_H__
+#ifndef __NEURUN_KERNEL_CPU_RESHAPELAYER_H__
+#define __NEURUN_KERNEL_CPU_RESHAPELAYER_H__
#include <NeuralNetworks.h>
#include "internal/Model.h"
#include "kernel/cpufallback/OperationUtils.h"
-using namespace internal::kernel::cpu;
+using namespace neurun::kernel::cpu;
-namespace internal
+namespace neurun
{
namespace kernel
{
} // namespace cpu
} // namespace kernel
-} // namespace internal
+} // namespace neurun
-#endif // __INTERNAL_KERNELS_CPU_RESHAPELAYER_H__
+#endif // __NEURUN_KERNEL_CPU_RESHAPELAYER_H__
#include "tensorflow/contrib/lite/kernels/internal/optimized/optimized_ops.h"
#include "kernel/cpufallback/OperationUtils.h"
-namespace internal
+namespace neurun
{
namespace kernel
{
} // namespace cpu
} // namespace kernel
-} // namespace internal
+} // namespace neurun
-#ifndef __INTERNAL_KERNELS_CPU_SOFTMAXLAYER_H__
-#define __INTERNAL_KERNELS_CPU_SOFTMAXLAYER_H__
+#ifndef __NEURUN_KERNEL_CPU_SOFTMAXLAYER_H__
+#define __NEURUN_KERNEL_CPU_SOFTMAXLAYER_H__
#include <NeuralNetworks.h>
#include "internal/Model.h"
#include "kernel/cpufallback/OperationUtils.h"
-using namespace internal::kernel::cpu;
+using namespace neurun::kernel::cpu;
-namespace internal
+namespace neurun
{
namespace kernel
{
} // namespace cpu
} // namespace kernel
-} // namespace internal
+} // namespace neurun
-#endif // __INTERNAL_KERNELS_CPU_SOFTMAXLAYER_H__
+#endif // __NEURUN_KERNEL_CPU_SOFTMAXLAYER_H__
#include "logging.h"
-namespace internal
+namespace neurun
{
namespace kernel
{
} // namespace cpu
} // namespace kernel
-} // namespace internal
+} // namespace neurun
* limitations under the License.
*/
-#ifndef __INTERNAL_KERNELS_CPU_TENSOR_CONVERT_FROM_COMMON_LAYER_H__
-#define __INTERNAL_KERNELS_CPU_TENSOR_CONVERT_FROM_COMMON_LAYER_H__
+#ifndef __NEURUN_KERNEL_CPU_TENSOR_CONVERT_FROM_COMMON_LAYER_H__
+#define __NEURUN_KERNEL_CPU_TENSOR_CONVERT_FROM_COMMON_LAYER_H__
#include <NeuralNetworks.h>
#include "internal/Model.h"
#include "internal/common/Tensor.h"
-namespace internal
+namespace neurun
{
namespace kernel
{
} // namespace cpu
} // namespace kernel
-} // namespace internal
+} // namespace neurun
-#endif // __INTERNAL_KERNELS_CPU_TENSOR_CONVERT_FROM_COMMON_LAYER_H__
+#endif // __NEURUN_KERNEL_CPU_TENSOR_CONVERT_FROM_COMMON_LAYER_H__
#include "logging.h"
-namespace internal
+namespace neurun
{
namespace kernel
{
} // namespace cpu
} // namespace kernel
-} // namespace internal
+} // namespace neurun
* limitations under the License.
*/
-#ifndef __INTERNAL_KERNELS_CPU_TENSOR_CONVERT_TO_COMMON_LAYER_H__
-#define __INTERNAL_KERNELS_CPU_TENSOR_CONVERT_TO_COMMON_LAYER_H__
+#ifndef __NEURUN_KERNEL_CPU_TENSOR_CONVERT_TO_COMMON_LAYER_H__
+#define __NEURUN_KERNEL_CPU_TENSOR_CONVERT_TO_COMMON_LAYER_H__
#include <NeuralNetworks.h>
#include "internal/Model.h"
#include "internal/common/Tensor.h"
-namespace internal
+namespace neurun
{
namespace kernel
{
} // namespace cpu
} // namespace kernel
-} // namespace internal
+} // namespace neurun
-#endif // __INTERNAL_KERNELS_CPU_TENSOR_CONVERT_TO_COMMON_LAYER_H__
+#endif // __NEURUN_KERNEL_CPU_TENSOR_CONVERT_TO_COMMON_LAYER_H__