if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::make_unique<::arm_compute::CLActivationLayer>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::CLActivationLayer>();
fn->configure(CAST_CL(ifm_alloc), nullptr, act_info);
}
else
{
- auto fn = nnfw::make_unique<::arm_compute::NEActivationLayer>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::NEActivationLayer>();
fn->configure(ifm_alloc, nullptr, act_info);
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::make_unique<::arm_compute::CLActivationLayer>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::CLActivationLayer>();
fn->configure(CAST_CL(ifm_alloc), nullptr, act_info);
}
else
{
- auto fn = nnfw::make_unique<::arm_compute::NEActivationLayer>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::NEActivationLayer>();
fn->configure(ifm_alloc, nullptr, act_info);
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::make_unique<::arm_compute::CLActivationLayer>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::CLActivationLayer>();
fn->configure(CAST_CL(ifm_alloc), nullptr, act_info);
}
else
{
- auto fn = nnfw::make_unique<::arm_compute::NEActivationLayer>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::NEActivationLayer>();
fn->configure(ifm_alloc, nullptr, act_info);
// NOTE SimpleArithmeticAddition does not support broadcasting
assert(lhs_shape == rhs_shape);
- auto l = nnfw::make_unique<SimpleArithmeticAddition>();
+ auto l = nnfw::cpp14::make_unique<SimpleArithmeticAddition>();
l->configure(lhs_alloc, rhs_alloc, ofm_alloc);
{
if (::internal::arm_compute::isGpuMode())
{
- auto l = nnfw::make_unique<::arm_compute::CLArithmeticAddition>();
+ auto l = nnfw::cpp14::make_unique<::arm_compute::CLArithmeticAddition>();
// TODO Decide ConvertPolicy (WARP? SATURATE?) according to NN API specification
l->configure(CAST_CL(lhs_alloc), CAST_CL(rhs_alloc), CAST_CL(ofm_alloc),
}
else // NEON
{
- auto l = nnfw::make_unique<::arm_compute::NEArithmeticAddition>();
+ auto l = nnfw::cpp14::make_unique<::arm_compute::NEArithmeticAddition>();
// TODO Decide ConvertPolicy (WARP? SATURATE?) according to NN API specification
l->configure(lhs_alloc, rhs_alloc, ofm_alloc, ::arm_compute::ConvertPolicy::SATURATE);
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::make_unique<::arm_compute::CLArithmeticSubtractionEx>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::CLArithmeticSubtractionEx>();
// TODO Decide ConvertPolicy (WARP? SATURATE?) according to NN API specification
fn->configure(CAST_CL(lhs_alloc), CAST_CL(rhs_alloc), CAST_CL(ofm_alloc),
}
else // NEON
{
- auto fn = nnfw::make_unique<::arm_compute::NEArithmeticSubtraction>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::NEArithmeticSubtraction>();
// TODO Decide ConvertPolicy (WARP? SATURATE?) according to NN API specification
fn->configure(lhs_alloc, rhs_alloc, ofm_alloc, ::arm_compute::ConvertPolicy::SATURATE);
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::make_unique<::arm_compute::CLPixelWiseMultiplication>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::CLPixelWiseMultiplication>();
fn->configure(CAST_CL(lhs_input_alloc), CAST_CL(rhs_input_alloc), CAST_CL(output_alloc),
1.0, // scale
}
else // NEON
{
- auto fn = nnfw::make_unique<::arm_compute::NEPixelWiseMultiplication>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::NEPixelWiseMultiplication>();
fn->configure(lhs_input_alloc, rhs_input_alloc, output_alloc,
1.0, // scale
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::make_unique<::arm_compute::CLPixelWiseDivision>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::CLPixelWiseDivision>();
fn->configure(CAST_CL(lhs_alloc), CAST_CL(rhs_alloc), CAST_CL(ofm_alloc),
1.0, // scale
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::make_unique<::arm_compute::CLDepthwiseConvolutionLayer>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::CLDepthwiseConvolutionLayer>();
fn->configure(CAST_CL(ifm_alloc), CAST_CL(ker_alloc), CAST_CL(bias_alloc), CAST_CL(ofm_alloc),
conv_info, param.multipler);
}
else
{
- auto fn = nnfw::make_unique<::arm_compute::NEDepthwiseConvolutionLayer>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::NEDepthwiseConvolutionLayer>();
fn->configure(ifm_alloc, ker_alloc, bias_alloc, ofm_alloc, conv_info, param.multipler);
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::make_unique<::arm_compute::CLDepthwiseConvolutionLayer>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::CLDepthwiseConvolutionLayer>();
fn->configure(CAST_CL(ifm_alloc), CAST_CL(ker_alloc), CAST_CL(bias_alloc), CAST_CL(ofm_alloc),
conv_info, param.multipler);
}
else
{
- auto fn = nnfw::make_unique<::arm_compute::NEDepthwiseConvolutionLayer>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::NEDepthwiseConvolutionLayer>();
fn->configure(ifm_alloc, ker_alloc, bias_alloc, ofm_alloc, conv_info, param.multipler);
if (from_env<bool>(std::getenv("USE_SIMPLE_CAST")))
{
// Use the CPU version of CAST operation
- auto l = nnfw::make_unique<SimpleCastLayer>();
+ auto l = nnfw::cpp14::make_unique<SimpleCastLayer>();
l->configure(input_alloc, output_alloc);
fn = std::move(l);
{
if (::internal::arm_compute::isGpuMode())
{
- auto l = nnfw::make_unique<::arm_compute::CLCast>();
+ auto l = nnfw::cpp14::make_unique<::arm_compute::CLCast>();
l->configure(CAST_CL(input_alloc), CAST_CL(output_alloc));
fn = std::move(l);
auto weight_alloc = ctx.at(::internal::tflite::operand::Index{param.weight_index});
auto bias_alloc = ctx.at(::internal::tflite::operand::Index{param.bias_index});
- auto fn = nnfw::make_unique<GenericFullyConnectedLayer>();
+ auto fn = nnfw::cpp14::make_unique<GenericFullyConnectedLayer>();
fn->configure(input_alloc, weight_alloc, bias_alloc, output_alloc, needs_reshape,
asTensorShape(reshape));
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::make_unique<::arm_compute::CLScale>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::CLScale>();
fn->configure(CAST_CL(ifm_alloc), CAST_CL(ofm_alloc),
::arm_compute::InterpolationPolicy::BILINEAR,
if (::internal::arm_compute::isGpuMode())
{
// GenericReshape first apply NCHW->NHWC permutation, and apply reshape
- auto fn = nnfw::make_unique<GenericReshapeLayer>();
+ auto fn = nnfw::cpp14::make_unique<GenericReshapeLayer>();
fn->configure(CAST_CL(input_alloc), CAST_CL(output_alloc));
}
else
{
- auto fn = nnfw::make_unique<GenericReshapeLayer>();
+ auto fn = nnfw::cpp14::make_unique<GenericReshapeLayer>();
fn->configure(input_alloc, output_alloc);
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::make_unique<::arm_compute::CLReshapeLayer>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::CLReshapeLayer>();
fn->configure(CAST_CL(input_alloc), CAST_CL(output_alloc));
}
else
{
- auto fn = nnfw::make_unique<::arm_compute::NEReshapeLayer>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::NEReshapeLayer>();
fn->configure(input_alloc, output_alloc);
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::make_unique<::arm_compute::CLSoftmaxLayer>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::CLSoftmaxLayer>();
fn->configure(CAST_CL(input_alloc), CAST_CL(output_alloc), param.scale);
}
else
{
- auto fn = nnfw::make_unique<::arm_compute::NESoftmaxLayer>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::NESoftmaxLayer>();
fn->configure(input_alloc, output_alloc, param.scale);
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::make_unique<::arm_compute::CLStridedSlice>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::CLStridedSlice>();
fn->configure(CAST_CL(inputData_alloc), CAST_CL(outputData_alloc), CAST_CL(startData_alloc),
CAST_CL(endData_alloc), CAST_CL(stridesData_alloc), param.beginMask,
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::make_unique<::arm_compute::CLReduceOperation>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::CLReduceOperation>();
fn->configure(CAST_CL(ifm_alloc), CAST_CL(ofm_alloc), param.axis,
::arm_compute::ReduceOperation::MIN);
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::make_unique<::arm_compute::CLReduceOperation>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::CLReduceOperation>();
fn->configure(CAST_CL(ifm_alloc), CAST_CL(ofm_alloc), param.axis,
::arm_compute::ReduceOperation::MAX);
if (from_env<bool>(std::getenv("USE_SIMPLE_CAST")))
{
// Use the CPU version of CAST operation
- auto l = nnfw::make_unique<SimpleCastLayer>();
+ auto l = nnfw::cpp14::make_unique<SimpleCastLayer>();
l->configure(input_alloc, output_alloc);
fn = std::move(l);
{
if (::internal::arm_compute::isGpuMode())
{
- auto l = nnfw::make_unique<::arm_compute::CLCast>();
+ auto l = nnfw::cpp14::make_unique<::arm_compute::CLCast>();
l->configure(CAST_CL(input_alloc), CAST_CL(output_alloc));
fn = std::move(l);
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::make_unique<::arm_compute::CLTopKV2>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::CLTopKV2>();
fn->configure(CAST_CL(input_alloc), param.k, CAST_CL(values_alloc), CAST_CL(indices_alloc));
{
std::unique_ptr<::arm_compute::IFunction> fn;
- auto l = nnfw::make_unique<::arm_compute::CLGather>();
+ auto l = nnfw::cpp14::make_unique<::arm_compute::CLGather>();
l->configure(CAST_CL(lhs_alloc), CAST_CL(rhs_alloc), CAST_CL(ofm_alloc));
fn = std::move(l);
builder.append("Gather", std::move(fn));
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::make_unique<::arm_compute::CLActivationLayer>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::CLActivationLayer>();
fn->configure(CAST_CL(ifm_alloc), CAST_CL(ofm_alloc), act_info);
}
else
{
- auto fn = nnfw::make_unique<::arm_compute::NEActivationLayer>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::NEActivationLayer>();
fn->configure(ifm_alloc, ofm_alloc, act_info);
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::make_unique<::arm_compute::CLActivationLayer>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::CLActivationLayer>();
fn->configure(CAST_CL(ifm_alloc), CAST_CL(ofm_alloc), act_info);
}
else
{
- auto fn = nnfw::make_unique<::arm_compute::NEActivationLayer>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::NEActivationLayer>();
fn->configure(ifm_alloc, ofm_alloc, act_info);
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::make_unique<::arm_compute::CLActivationLayer>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::CLActivationLayer>();
fn->configure(CAST_CL(ifm_alloc), CAST_CL(ofm_alloc), act_info);
}
else
{
- auto fn = nnfw::make_unique<::arm_compute::NEActivationLayer>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::NEActivationLayer>();
fn->configure(ifm_alloc, ofm_alloc, act_info);
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::make_unique<::arm_compute::CLActivationLayer>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::CLActivationLayer>();
fn->configure(CAST_CL(ifm_alloc), CAST_CL(ofm_alloc), act_info);
}
else
{
- auto fn = nnfw::make_unique<::arm_compute::NEActivationLayer>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::NEActivationLayer>();
fn->configure(ifm_alloc, ofm_alloc, act_info);
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::make_unique<::arm_compute::CLActivationLayer>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::CLActivationLayer>();
fn->configure(CAST_CL(ifm_alloc), CAST_CL(ofm_alloc), act_info);
}
else
{
- auto fn = nnfw::make_unique<::arm_compute::NEActivationLayer>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::NEActivationLayer>();
fn->configure(ifm_alloc, ofm_alloc, act_info);
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::make_unique<::arm_compute::CLReduceOperation>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::CLReduceOperation>();
fn->configure(CAST_CL(ifm_alloc), CAST_CL(ofm_alloc), param.axis,
::arm_compute::ReduceOperation::MEAN);
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::make_unique<::arm_compute::CLPermuteEx>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::CLPermuteEx>();
fn->configure(CAST_CL(ifm_alloc), CAST_CL(ofm_alloc),
getARMComputePermutationVector(param.rank, param.pv));
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::make_unique<::arm_compute::CLFloor>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::CLFloor>();
fn->configure(CAST_CL(ifm_alloc), CAST_CL(ofm_alloc));
}
else
{
- auto fn = nnfw::make_unique<::arm_compute::NEFloor>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::NEFloor>();
fn->configure(ifm_alloc, ofm_alloc);
auto ofm_alloc = ctx.at(::internal::tflite::operand::Index{param.ofm_index});
auto ifm_alloc = ctx.at(::internal::tflite::operand::Index{param.ifm_index});
- auto fn = nnfw::make_unique<SimpleArgMinMax>();
+ auto fn = nnfw::cpp14::make_unique<SimpleArgMinMax>();
bool is_min = false, is_max = true;
fn->configure(CAST_CL(ifm_alloc), CAST_CL(ofm_alloc), param.axis, param.rank, is_min, is_max);
if (from_env<bool>(std::getenv("USE_SIMPLE_SQRT")))
{
// USE CPU VERSION OF SQRT
- auto fn = nnfw::make_unique<SimpleSQRT>();
+ auto fn = nnfw::cpp14::make_unique<SimpleSQRT>();
fn->configure(input_alloc, output_alloc);
{
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::make_unique<::arm_compute::CLActivationLayer>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::CLActivationLayer>();
fn->configure(CAST_CL(input_alloc), CAST_CL(output_alloc), act_info);
}
else
{
- auto fn = nnfw::make_unique<::arm_compute::NEActivationLayer>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::NEActivationLayer>();
fn->configure(input_alloc, output_alloc, act_info);
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::make_unique<::arm_compute::CLActivationLayerEx>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::CLActivationLayerEx>();
fn->configure(CAST_CL(input_alloc), CAST_CL(output_alloc), act_info);
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::make_unique<::arm_compute::CLEqual>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::CLEqual>();
fn->configure(CAST_CL(input1_alloc), CAST_CL(input2_alloc), CAST_CL(output_alloc));
auto ifm_alloc = ctx.at(::internal::tflite::operand::Index{param.ifm_index});
auto ker_alloc = ctx.at(::internal::tflite::operand::Index{param.ker_index});
- auto fn = nnfw::make_unique<SimpleTransposeConv>();
+ auto fn = nnfw::cpp14::make_unique<SimpleTransposeConv>();
// Only rank 4 is supported
const int rank = 4;
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::make_unique<::arm_compute::CLSquaredDifference>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::CLSquaredDifference>();
fn->configure(CAST_CL(lhs_alloc), CAST_CL(rhs_alloc), CAST_CL(ofm_alloc));
builder.append("SquaredDifference", std::move(fn));
{
// USE CPU VERSION OF PADLAYER
auto rank = 4;
- auto fn = nnfw::make_unique<SimplePadLayer>();
+ auto fn = nnfw::cpp14::make_unique<SimplePadLayer>();
fn->configure(ifm_alloc, ofm_alloc, pad_alloc, getARMComputeAxises(rank));
{
if (::internal::arm_compute::isGpuMode()) // GPU
{
- auto fn = nnfw::make_unique<::arm_compute::CLPadLayer>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::CLPadLayer>();
fn->configure(CAST_CL(ifm_alloc), CAST_CL(ofm_alloc), CAST_CL(pad_alloc));
{
// USE CPU VERSION OF SPACETODEPTH
auto rank = 4;
- auto fn = nnfw::make_unique<SimpleSpaceToDepth>();
+ auto fn = nnfw::cpp14::make_unique<SimpleSpaceToDepth>();
fn->configure(input_alloc, output_alloc, param.block_size, getARMComputeAxises(rank));
{
if (::internal::arm_compute::isGpuMode()) // GPU
{
- auto fn = nnfw::make_unique<::arm_compute::CLSpaceToDepth>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::CLSpaceToDepth>();
fn->configure(CAST_CL(input_alloc), CAST_CL(output_alloc), param.block_size);
// NOTE SimpleSpaceToBatchND is quite slow
if (from_env<bool>(std::getenv("USE_SIMPLE_SPACE_TO_BATCH_ND")))
{
- auto fn = nnfw::make_unique<SimpleSpaceToBatchND>();
+ auto fn = nnfw::cpp14::make_unique<SimpleSpaceToBatchND>();
fn->configure(input_alloc, block_size_alloc, padding_size_alloc, output_alloc,
getARMComputeAxises(param.rank));
}
else if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::make_unique<::arm_compute::CLSpaceToBatchND>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::CLSpaceToBatchND>();
fn->configure(CAST_CL(input_alloc), CAST_CL(block_size_alloc), CAST_CL(padding_size_alloc),
CAST_CL(output_alloc));
// NOTE SimpleBatchToSpaceND is quite slow, but may be useful for debugging
if (from_env<bool>(std::getenv("USE_SIMPLE_BATCH_TO_SPACE_ND")))
{
- auto fn = nnfw::make_unique<SimpleBatchToSpaceND>();
+ auto fn = nnfw::cpp14::make_unique<SimpleBatchToSpaceND>();
fn->configure(input_alloc, output_alloc, param.block_size, getARMComputeAxises(param.rank));
builder.append("BatchToSpaceND", std::move(fn));
}
else if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::make_unique<::arm_compute::CLBatchToSpaceND>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::CLBatchToSpaceND>();
fn->configure(CAST_CL(input_alloc), CAST_CL(output_alloc), param.block_size);
builder.append("BatchToSpaceND", std::move(fn));
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::make_unique<::arm_compute::CLNormalizationLayerEx>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::CLNormalizationLayerEx>();
fn->configure(CAST_CL(ifm_alloc), CAST_CL(ofm_alloc), norm_info);
}
else
{
- auto fn = nnfw::make_unique<::arm_compute::NENormalizationLayerEx>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::NENormalizationLayerEx>();
fn->configure(ifm_alloc, ofm_alloc, norm_info);
if (from_env<bool>(std::getenv("USE_SIMPLE_EMBEDDINGLOOKUP")))
{
- auto fn = nnfw::make_unique<SimpleEmbeddingLookup>();
+ auto fn = nnfw::cpp14::make_unique<SimpleEmbeddingLookup>();
fn->configure(lookups_alloc, values_alloc, output_alloc);
}
else if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::make_unique<::arm_compute::CLEmbeddingLookup>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::CLEmbeddingLookup>();
fn->configure(CAST_CL(values_alloc), CAST_CL(output_alloc), CAST_CL(lookups_alloc));
if (from_env<bool>(std::getenv("USE_SIMPLE_HASHTABLELOOKUP")))
{
- auto fn = nnfw::make_unique<SimpleHashtableLookupLayer>();
+ auto fn = nnfw::cpp14::make_unique<SimpleHashtableLookupLayer>();
fn->configure(lookups_alloc, keys_alloc, values_alloc, output_alloc, hits_alloc);
}
else if (::internal::arm_compute::isGpuMode()) // GPU
{
- auto fn = nnfw::make_unique<::arm_compute::CLHashtableLookup>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::CLHashtableLookup>();
fn->configure(CAST_CL(lookups_alloc), CAST_CL(keys_alloc), CAST_CL(values_alloc),
CAST_CL(output_alloc), CAST_CL(hits_alloc));
param.alpha, param.beta, param.bias, false);
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::make_unique<::arm_compute::CLNormalizationLayerEx>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::CLNormalizationLayerEx>();
fn->configure(CAST_CL(ifm_alloc), CAST_CL(ofm_alloc), norm_info);
}
else
{
- auto fn = nnfw::make_unique<::arm_compute::NENormalizationLayerEx>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::NENormalizationLayerEx>();
fn->configure(ifm_alloc, ofm_alloc, norm_info);
{
// USE CPU VERSION OF DEPTHTOSPACE
auto rank = 4;
- auto fn = nnfw::make_unique<SimpleDepthToSpace>();
+ auto fn = nnfw::cpp14::make_unique<SimpleDepthToSpace>();
fn->configure(input_alloc, output_alloc, param.block_size, getARMComputeAxises(rank));
{
if (::internal::arm_compute::isGpuMode()) // GPU
{
- auto fn = nnfw::make_unique<::arm_compute::CLDepthToSpace>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::CLDepthToSpace>();
fn->configure(CAST_CL(input_alloc), CAST_CL(output_alloc), param.block_size);
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::make_unique<SimpleUnpackLayer>();
+ auto fn = nnfw::cpp14::make_unique<SimpleUnpackLayer>();
std::vector<::arm_compute::ICLTensor *> outputs;
for (const auto &index : param.ofm_indexes)
{
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::make_unique<SimplePackLayer>();
+ auto fn = nnfw::cpp14::make_unique<SimplePackLayer>();
std::vector<::arm_compute::ICLTensor *> inputs;
for (const auto &index : param.ifm_indexes)
{
// NOTE SimpleNeg is quite slow, but may be useful for debugging
if (from_env<bool>(std::getenv("USE_SIMPLE_NEG")))
{
- auto fn = nnfw::make_unique<SimpleNeg>();
+ auto fn = nnfw::cpp14::make_unique<SimpleNeg>();
fn->configure(ifm_alloc, ofm_alloc);
builder.append("Neg", std::move(fn));
}
else if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::make_unique<::arm_compute::CLNeg>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::CLNeg>();
fn->configure(CAST_CL(ifm_alloc), CAST_CL(ofm_alloc));
builder.append("Neg", std::move(fn));
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::make_unique<::arm_compute::CLExp>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::CLExp>();
fn->configure(CAST_CL(ifm_alloc), CAST_CL(ofm_alloc));
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::make_unique<::arm_compute::CLReduceOperation>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::CLReduceOperation>();
fn->configure(CAST_CL(ifm_alloc), CAST_CL(ofm_alloc), param.axis,
::arm_compute::ReduceOperation::SUM);
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::make_unique<::arm_compute::CLNotEqual>();
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::CLNotEqual>();
fn->configure(CAST_CL(input1_alloc), CAST_CL(input2_alloc), CAST_CL(output_alloc));