if (!_batch_bcast->IsValid())
return;
- auto x_reshaped = _batch_bcast->x_reshape();
- auto y_reshaped = _batch_bcast->y_reshape();
+ const auto &x_reshaped = _batch_bcast->x_reshape();
+ const auto &y_reshaped = _batch_bcast->y_reshape();
auto output_shape = _batch_bcast->output_shape();
_x_batch_size = std::accumulate(x_reshaped.cbegin(), x_reshaped.cend(), INT32_C(1),
std::multiplies<int32_t>());
- _y_batch_size = std::accumulate(x_reshaped.cbegin(), x_reshaped.cend(), INT32_C(1),
+ _y_batch_size = std::accumulate(y_reshaped.cbegin(), y_reshaped.cend(), INT32_C(1),
std::multiplies<int32_t>());
_output_shape.ReplaceWith(output_shape.size(), output_shape.data());
_output_batch_size = _output_shape.FlatSize();
Result::Result(const Phases &phases)
{
- const auto option = phases.option();
+ const auto &option = phases.option();
{
for (int i = PhaseEnum::MODEL_LOAD; i <= PhaseEnum::PREPARE; ++i)
{
- auto phase = phases.at(gPhaseStrings[i]);
+ const auto &phase = phases.at(gPhaseStrings[i]);
time[i][FigureType::MEAN] = averageTimeMs(phase);
}
int i = PhaseEnum::EXECUTE;
- auto exec_phase = phases.at(gPhaseStrings[i]);
+ const auto &exec_phase = phases.at(gPhaseStrings[i]);
time[i][FigureType::MEAN] = averageTimeMs(exec_phase);
time[i][FigureType::MAX] = maxTimeMs(exec_phase);
time[i][FigureType::MIN] = minTimeMs(exec_phase);
const auto stride = node.param().stride;
const auto activation = node.param().activation;
- const auto param_padding = node.param().padding;
+ const auto ¶m_padding = node.param().padding;
const auto dilation = node.param().dilation;
auto fn = std::make_unique<ops::ConvolutionLayer>();
for (const auto &ifm_idx : node.getInputs())
input_tensors.emplace_back(_tensor_reg->getPortableTensor(ifm_idx));
- const auto equation = node.param().equation;
+ const auto &equation = node.param().equation;
auto fn = std::make_unique<ops::EinsumLayer>();
absl::Status KernelGenerator::readConstTensor(const ir::OperandIndex &index,
tflite::gpu::TensorOrScalar *param)
{
- const auto shape = _ctx.at(index).shape();
+ const auto &shape = _ctx.at(index).shape();
if (shape.rank() == 0 && shape.num_elements() == 1)
{
tflite::gpu::Tensor<tflite::gpu::Scalar, tflite::gpu::DataType::FLOAT32> tensor;
{
std::unique_ptr<tflite::gpu::GPUOperation> gpu_op_1;
tflite::gpu::OperationDef op_def_1;
- const auto shape = _ctx.at(ofm_index).shape();
+ const auto &shape = _ctx.at(ofm_index).shape();
auto new_ind = _tensor_reg->addNewClTensor(shape);
addClNode({ifm_index}, {new_ind}, std::move(gpu_op));
// However, it is not applied here, so input/output have the same layout of frontend. Because
// "ExecutorFactory" would convert shape of input/output accoding to the layouts when registering
// operand info to "TensorBuilder" after calling "StaticShapeInferer"
- const auto new_shape = input.info().shape();
+ const auto &new_shape = input.info().shape();
output.info().shape(new_shape);
}
: Node{"operand" + std::to_string(index.value())}
{
{
- auto type_to_shape = [](Type type) {
+ auto type_to_shape = [](Type type) -> const auto & {
switch (type)
{
case Type::MODEL_INPUT: