This commit updates .clang-format based on internal policy.
Signed-off-by: Jonghyun Park <jh1302.park@samsung.com>
---
Language: Cpp
+BasedOnStyle: Google
AccessModifierOffset: -2
AlignAfterOpenBracket: Align
AlignEscapedNewlinesLeft: true
Priority: 3
- Regex: '.*'
Priority: 1
-IndentCaseLabels: false
+IndentCaseLabels: true
IndentWidth: 2
IndentWrappedFunctionNames: false
+JavaScriptQuotes: Leave
+JavaScriptWrapImports: true
KeepEmptyLinesAtTheStartOfBlocks: true
MacroBlockBegin: ''
MacroBlockEnd: ''
{
switch (dtype)
{
- case ann::DType::S32:
- return "ANEURALNETWORKS_INT32";
- default:
- break;
+ case ann::DType::S32:
+ return "ANEURALNETWORKS_INT32";
+ default:
+ break;
};
throw std::invalid_argument("dtype");
{
switch (dtype)
{
- case ann::DType::S32:
- return "ANEURALNETWORKS_TENSOR_INT32";
- case ann::DType::F32:
- return "ANEURALNETWORKS_TENSOR_FLOAT32";
- default:
- break;
+ case ann::DType::S32:
+ return "ANEURALNETWORKS_TENSOR_INT32";
+ case ann::DType::F32:
+ return "ANEURALNETWORKS_TENSOR_FLOAT32";
+ default:
+ break;
};
throw std::invalid_argument("dtype");
return #ENUM;
#include "ANN/IR/Operation.def"
#undef ANN_OPERATION
- default:
- throw std::invalid_argument{"code"};
+ default:
+ throw std::invalid_argument{"code"};
};
}
{
switch (axis)
{
- case coco::ConcatF::Axis::Batch:
- return 0;
- case coco::ConcatF::Axis::Depth:
- return 1;
- case coco::ConcatF::Axis::Height:
- return 2;
- case coco::ConcatF::Axis::Width:
- return 3;
- default:
- break;
+ case coco::ConcatF::Axis::Batch:
+ return 0;
+ case coco::ConcatF::Axis::Depth:
+ return 1;
+ case coco::ConcatF::Axis::Height:
+ return 2;
+ case coco::ConcatF::Axis::Width:
+ return 3;
+ default:
+ break;
};
throw std::invalid_argument{"axis is unknown value"};
switch (act)
{
- case tflite::ActivationFunctionType::ActivationFunctionType_NONE:
- {
- // Create Copy Instr (copying from ifm to output_obj),
- // redundant layer but optimized by backend
- auto copy_ins = instr_builder(m).copy(output_obj, ifm);
-
- // Append the instruction to the block
- block->instr()->append(copy_ins);
- break;
- }
- case tflite::ActivationFunctionType::ActivationFunctionType_RELU:
- {
- // Create Eval(output_obj, ReLU(load(ifm)))
- auto load_op = op_builder(m).load(ifm).pop();
- auto relu_op = m->entity()->op()->create<coco::ReLU>();
- relu_op->arg(load_op);
-
- auto eval_ins = instr_builder(m).eval(output_obj, relu_op);
-
- // Append the instruction to the block
- block->instr()->append(eval_ins);
- break;
- }
- case tflite::ActivationFunctionType::ActivationFunctionType_RELU6:
- {
- // Create Eval(output_obj, ReLU6(load(ifm)))
- auto load_op = op_builder(m).load(ifm).pop();
- auto relu6_op = m->entity()->op()->create<coco::ReLU6>();
- relu6_op->arg(load_op);
-
- auto eval_ins = instr_builder(m).eval(output_obj, relu6_op);
-
- // Append the instruction to the block
- block->instr()->append(eval_ins);
- break;
- }
- default:
- // TODO support other fused activations
- assert(false);
- break;
+ case tflite::ActivationFunctionType::ActivationFunctionType_NONE:
+ {
+ // Create Copy Instr (copying from ifm to output_obj),
+ // redundant layer but optimized by backend
+ auto copy_ins = instr_builder(m).copy(output_obj, ifm);
+
+ // Append the instruction to the block
+ block->instr()->append(copy_ins);
+ break;
+ }
+ case tflite::ActivationFunctionType::ActivationFunctionType_RELU:
+ {
+ // Create Eval(output_obj, ReLU(load(ifm)))
+ auto load_op = op_builder(m).load(ifm).pop();
+ auto relu_op = m->entity()->op()->create<coco::ReLU>();
+ relu_op->arg(load_op);
+
+ auto eval_ins = instr_builder(m).eval(output_obj, relu_op);
+
+ // Append the instruction to the block
+ block->instr()->append(eval_ins);
+ break;
+ }
+ case tflite::ActivationFunctionType::ActivationFunctionType_RELU6:
+ {
+ // Create Eval(output_obj, ReLU6(load(ifm)))
+ auto load_op = op_builder(m).load(ifm).pop();
+ auto relu6_op = m->entity()->op()->create<coco::ReLU6>();
+ relu6_op->arg(load_op);
+
+ auto eval_ins = instr_builder(m).eval(output_obj, relu6_op);
+
+ // Append the instruction to the block
+ block->instr()->append(eval_ins);
+ break;
+ }
+ default:
+ // TODO support other fused activations
+ assert(false);
+ break;
}
return output_obj;
switch (axis)
{
- case 0:
- res = coco::ConcatF::Axis::Batch;
- break;
- case 1:
- res = coco::ConcatF::Axis::Height;
- break;
- case 2:
- res = coco::ConcatF::Axis::Width;
- break;
- case 3:
- res = coco::ConcatF::Axis::Depth;
- break;
- default:
- break;
+ case 0:
+ res = coco::ConcatF::Axis::Batch;
+ break;
+ case 1:
+ res = coco::ConcatF::Axis::Height;
+ break;
+ case 2:
+ res = coco::ConcatF::Axis::Width;
+ break;
+ case 3:
+ res = coco::ConcatF::Axis::Depth;
+ break;
+ default:
+ break;
}
return res;
tflite::Padding padding = getOpPadding(node->pad());
switch (padding)
{
- case tflite::Padding_SAME:
- {
- auto height = static_cast<uint32_t>(pred_shape._dims[1]);
- auto width = static_cast<uint32_t>(pred_shape._dims[2]);
+ case tflite::Padding_SAME:
+ {
+ auto height = static_cast<uint32_t>(pred_shape._dims[1]);
+ auto width = static_cast<uint32_t>(pred_shape._dims[2]);
- int32_t proposed_res_height = ceil_div(height, node->stride()->vertical());
- int32_t proposed_res_width = ceil_div(width, node->stride()->horizontal());
+ int32_t proposed_res_height = ceil_div(height, node->stride()->vertical());
+ int32_t proposed_res_width = ceil_div(width, node->stride()->horizontal());
- shape._dims[1] = pred_shape._dims[1] == -1 ? -1 : proposed_res_height;
- shape._dims[2] = pred_shape._dims[2] == -1 ? -1 : proposed_res_width;
- break;
- }
- case tflite::Padding_VALID:
- {
- auto padded_h = static_cast<uint32_t>(pred_shape._dims[1] - (node->window()->vertical() - 1));
- auto padded_w = static_cast<uint32_t>(pred_shape._dims[2] - (node->window()->horizontal() - 1));
+ shape._dims[1] = pred_shape._dims[1] == -1 ? -1 : proposed_res_height;
+ shape._dims[2] = pred_shape._dims[2] == -1 ? -1 : proposed_res_width;
+ break;
+ }
+ case tflite::Padding_VALID:
+ {
+ auto padded_h = static_cast<uint32_t>(pred_shape._dims[1] - (node->window()->vertical() - 1));
+ auto padded_w =
+ static_cast<uint32_t>(pred_shape._dims[2] - (node->window()->horizontal() - 1));
- int32_t proposed_height = ceil_div(padded_h, node->stride()->vertical());
- int32_t proposed_width = ceil_div(padded_w, node->stride()->horizontal());
+ int32_t proposed_height = ceil_div(padded_h, node->stride()->vertical());
+ int32_t proposed_width = ceil_div(padded_w, node->stride()->horizontal());
- shape._dims[1] = pred_shape._dims[1] == -1 ? -1 : proposed_height;
- shape._dims[2] = pred_shape._dims[2] == -1 ? -1 : proposed_width;
- break;
- }
- default:
- assert(false && "unknown padding type");
+ shape._dims[1] = pred_shape._dims[1] == -1 ? -1 : proposed_height;
+ shape._dims[2] = pred_shape._dims[2] == -1 ? -1 : proposed_width;
+ break;
+ }
+ default:
+ assert(false && "unknown padding type");
}
return shape;
}
tflite::Padding padding = getOpPadding(node->pad());
switch (padding)
{
- case tflite::Padding_SAME:
- {
- auto height = static_cast<uint32_t>(ifm_shape._dims[1]);
- auto width = static_cast<uint32_t>(ifm_shape._dims[2]);
+ case tflite::Padding_SAME:
+ {
+ auto height = static_cast<uint32_t>(ifm_shape._dims[1]);
+ auto width = static_cast<uint32_t>(ifm_shape._dims[2]);
- int32_t proposed_res_height = ceil_div(height, node->stride()->vertical());
- int32_t proposed_res_width = ceil_div(width, node->stride()->horizontal());
+ int32_t proposed_res_height = ceil_div(height, node->stride()->vertical());
+ int32_t proposed_res_width = ceil_div(width, node->stride()->horizontal());
- shape._dims[1] = ifm_shape._dims[1] == -1 ? -1 : proposed_res_height;
- shape._dims[2] = ifm_shape._dims[2] == -1 ? -1 : proposed_res_width;
- break;
- }
- case tflite::Padding_VALID:
- {
- auto padded_h = static_cast<uint32_t>(ifm_shape._dims[1] - (node->window()->vertical() - 1));
- auto padded_w = static_cast<uint32_t>(ifm_shape._dims[2] - (node->window()->horizontal() - 1));
+ shape._dims[1] = ifm_shape._dims[1] == -1 ? -1 : proposed_res_height;
+ shape._dims[2] = ifm_shape._dims[2] == -1 ? -1 : proposed_res_width;
+ break;
+ }
+ case tflite::Padding_VALID:
+ {
+ auto padded_h = static_cast<uint32_t>(ifm_shape._dims[1] - (node->window()->vertical() - 1));
+ auto padded_w =
+ static_cast<uint32_t>(ifm_shape._dims[2] - (node->window()->horizontal() - 1));
- int32_t proposed_height = ceil_div(padded_h, node->stride()->vertical());
- int32_t proposed_width = ceil_div(padded_w, node->stride()->horizontal());
+ int32_t proposed_height = ceil_div(padded_h, node->stride()->vertical());
+ int32_t proposed_width = ceil_div(padded_w, node->stride()->horizontal());
- shape._dims[1] = ifm_shape._dims[1] == -1 ? -1 : proposed_height;
- shape._dims[2] = ifm_shape._dims[2] == -1 ? -1 : proposed_width;
- break;
- }
- default:
- assert(false && "unknown padding type");
+ shape._dims[1] = ifm_shape._dims[1] == -1 ? -1 : proposed_height;
+ shape._dims[2] = ifm_shape._dims[2] == -1 ? -1 : proposed_width;
+ break;
+ }
+ default:
+ assert(false && "unknown padding type");
}
return shape;
}
tflite::Padding padding = getOpPadding(node->pad());
switch (padding)
{
- case tflite::Padding_SAME:
- {
- auto height = static_cast<uint32_t>(ifm_shape._dims[1]);
- auto width = static_cast<uint32_t>(ifm_shape._dims[2]);
+ case tflite::Padding_SAME:
+ {
+ auto height = static_cast<uint32_t>(ifm_shape._dims[1]);
+ auto width = static_cast<uint32_t>(ifm_shape._dims[2]);
- int32_t proposed_res_height = ceil_div(height, node->stride()->vertical());
- int32_t proposed_res_width = ceil_div(width, node->stride()->horizontal());
+ int32_t proposed_res_height = ceil_div(height, node->stride()->vertical());
+ int32_t proposed_res_width = ceil_div(width, node->stride()->horizontal());
- shape._dims[1] = ifm_shape._dims[1] == -1 ? -1 : proposed_res_height;
- shape._dims[2] = ifm_shape._dims[2] == -1 ? -1 : proposed_res_width;
- break;
- }
- case tflite::Padding_VALID:
- {
- auto padded_h = static_cast<uint32_t>(ifm_shape._dims[1] - (ker_shape._dims[1] - 1));
- auto padded_w = static_cast<uint32_t>(ifm_shape._dims[2] - (ker_shape._dims[2] - 1));
+ shape._dims[1] = ifm_shape._dims[1] == -1 ? -1 : proposed_res_height;
+ shape._dims[2] = ifm_shape._dims[2] == -1 ? -1 : proposed_res_width;
+ break;
+ }
+ case tflite::Padding_VALID:
+ {
+ auto padded_h = static_cast<uint32_t>(ifm_shape._dims[1] - (ker_shape._dims[1] - 1));
+ auto padded_w = static_cast<uint32_t>(ifm_shape._dims[2] - (ker_shape._dims[2] - 1));
- int32_t proposed_height = ceil_div(padded_h, node->stride()->vertical());
- int32_t proposed_width = ceil_div(padded_w, node->stride()->horizontal());
+ int32_t proposed_height = ceil_div(padded_h, node->stride()->vertical());
+ int32_t proposed_width = ceil_div(padded_w, node->stride()->horizontal());
- shape._dims[1] = ifm_shape._dims[1] == -1 ? -1 : proposed_height;
- shape._dims[2] = ifm_shape._dims[2] == -1 ? -1 : proposed_width;
- break;
- }
- default:
- assert(false && "unknown padding type");
+ shape._dims[1] = ifm_shape._dims[1] == -1 ? -1 : proposed_height;
+ shape._dims[2] = ifm_shape._dims[2] == -1 ? -1 : proposed_width;
+ break;
+ }
+ default:
+ assert(false && "unknown padding type");
}
return shape;
}
switch (model->operator_codes()->Get(opcode_index)->builtin_code())
{
- case tflite::BuiltinOperator_RELU:
- ASSERT_EQ(relu_exeuction_index, -1);
- relu_exeuction_index = static_cast<int64_t>(n);
- break;
- case tflite::BuiltinOperator_MAX_POOL_2D:
- ASSERT_EQ(maxpool_execution_index, -1);
- maxpool_execution_index = static_cast<int64_t>(n);
- break;
- default:
- break;
+ case tflite::BuiltinOperator_RELU:
+ ASSERT_EQ(relu_exeuction_index, -1);
+ relu_exeuction_index = static_cast<int64_t>(n);
+ break;
+ case tflite::BuiltinOperator_MAX_POOL_2D:
+ ASSERT_EQ(maxpool_execution_index, -1);
+ maxpool_execution_index = static_cast<int64_t>(n);
+ break;
+ default:
+ break;
}
}
{
switch (dtype)
{
- case loco::DataType::U8:
- return tflite::TensorType_UINT8;
- // case loco::DataType::U16: unsupported
- // case loco::DataType::U32: unsupported
- // case loco::DataType::U64: unsupported
- case loco::DataType::S8:
- return tflite::TensorType_INT8;
- case loco::DataType::S16:
- return tflite::TensorType_INT16;
- case loco::DataType::S32:
- return tflite::TensorType_INT32;
- case loco::DataType::S64:
- return tflite::TensorType_INT64;
- case loco::DataType::FLOAT16:
- return tflite::TensorType_FLOAT16;
- case loco::DataType::FLOAT32:
- return tflite::TensorType_FLOAT32;
- // case loco::DataType::FLOAT64: unsupported
- default:
- assert(false && "unsupported data type");
+ case loco::DataType::U8:
+ return tflite::TensorType_UINT8;
+ // case loco::DataType::U16: unsupported
+ // case loco::DataType::U32: unsupported
+ // case loco::DataType::U64: unsupported
+ case loco::DataType::S8:
+ return tflite::TensorType_INT8;
+ case loco::DataType::S16:
+ return tflite::TensorType_INT16;
+ case loco::DataType::S32:
+ return tflite::TensorType_INT32;
+ case loco::DataType::S64:
+ return tflite::TensorType_INT64;
+ case loco::DataType::FLOAT16:
+ return tflite::TensorType_FLOAT16;
+ case loco::DataType::FLOAT32:
+ return tflite::TensorType_FLOAT32;
+ // case loco::DataType::FLOAT64: unsupported
+ default:
+ assert(false && "unsupported data type");
}
}
switch (dtype)
{
- case DataType::FLOAT32:
- {
- auto lhs_vector = as_float_vector(lhs_dataset);
- auto rhs_vector = as_float_vector(rhs_dataset);
-
- assert(lhs_vector.size() == rhs_vector.size());
+ case DataType::FLOAT32:
+ {
+ auto lhs_vector = as_float_vector(lhs_dataset);
+ auto rhs_vector = as_float_vector(rhs_dataset);
- LexicalLayout layout;
+ assert(lhs_vector.size() == rhs_vector.size());
- for (TensorIndexEnumerator e{shape}; e.valid(); e.advance())
- {
- const auto &ind = e.current();
- auto lhs_value = lhs_vector.at(layout.offset(shape, ind));
- auto rhs_value = rhs_vector.at(layout.offset(shape, ind));
+ LexicalLayout layout;
- // TODO Abstract equality criterion
- if (std::abs(lhs_value - rhs_value) >= 0.001f)
+ for (TensorIndexEnumerator e{shape}; e.valid(); e.advance())
{
- ErrorDetail<ErrorCode::ValueMismatch> error{};
- mux.notify(error);
- continue;
+ const auto &ind = e.current();
+ auto lhs_value = lhs_vector.at(layout.offset(shape, ind));
+ auto rhs_value = rhs_vector.at(layout.offset(shape, ind));
+
+ // TODO Abstract equality criterion
+ if (std::abs(lhs_value - rhs_value) >= 0.001f)
+ {
+ ErrorDetail<ErrorCode::ValueMismatch> error{};
+ mux.notify(error);
+ continue;
+ }
}
- }
- break;
- }
- default:
- throw std::runtime_error{"Not supported, yet"};
+ break;
+ }
+ default:
+ throw std::runtime_error{"Not supported, yet"};
};
}
} while (false);
{
switch (dtype)
{
- case loco::DataType::U8:
- return tflite::TensorType_UINT8;
- // case loco::DataType::U16: unsupported
- // case loco::DataType::U32: unsupported
- // case loco::DataType::U64: unsupported
- case loco::DataType::S8:
- return tflite::TensorType_INT8;
- case loco::DataType::S16:
- return tflite::TensorType_INT16;
- case loco::DataType::S32:
- return tflite::TensorType_INT32;
- case loco::DataType::S64:
- return tflite::TensorType_INT64;
- case loco::DataType::FLOAT16:
- return tflite::TensorType_FLOAT16;
- case loco::DataType::FLOAT32:
- return tflite::TensorType_FLOAT32;
- // case loco::DataType::FLOAT64: unsupported
- default:
- assert(false && "unsupported data type");
+ case loco::DataType::U8:
+ return tflite::TensorType_UINT8;
+ // case loco::DataType::U16: unsupported
+ // case loco::DataType::U32: unsupported
+ // case loco::DataType::U64: unsupported
+ case loco::DataType::S8:
+ return tflite::TensorType_INT8;
+ case loco::DataType::S16:
+ return tflite::TensorType_INT16;
+ case loco::DataType::S32:
+ return tflite::TensorType_INT32;
+ case loco::DataType::S64:
+ return tflite::TensorType_INT64;
+ case loco::DataType::FLOAT16:
+ return tflite::TensorType_FLOAT16;
+ case loco::DataType::FLOAT32:
+ return tflite::TensorType_FLOAT32;
+ // case loco::DataType::FLOAT64: unsupported
+ default:
+ assert(false && "unsupported data type");
}
}
tflite::Padding padding = getOpPadding(node->pad());
switch (padding)
{
- case tflite::Padding_SAME:
- {
- auto height = static_cast<uint32_t>(pred_shape._dims[1]);
- auto width = static_cast<uint32_t>(pred_shape._dims[2]);
+ case tflite::Padding_SAME:
+ {
+ auto height = static_cast<uint32_t>(pred_shape._dims[1]);
+ auto width = static_cast<uint32_t>(pred_shape._dims[2]);
- int32_t proposed_res_height = ceil_div(height, node->stride()->vertical());
- int32_t proposed_res_width = ceil_div(width, node->stride()->horizontal());
+ int32_t proposed_res_height = ceil_div(height, node->stride()->vertical());
+ int32_t proposed_res_width = ceil_div(width, node->stride()->horizontal());
- shape._dims[1] = pred_shape._dims[1] == -1 ? -1 : proposed_res_height;
- shape._dims[2] = pred_shape._dims[2] == -1 ? -1 : proposed_res_width;
- break;
- }
- case tflite::Padding_VALID:
- {
- auto padded_h = static_cast<uint32_t>(pred_shape._dims[1] - (node->window()->vertical() - 1));
- auto padded_w = static_cast<uint32_t>(pred_shape._dims[2] - (node->window()->horizontal() - 1));
+ shape._dims[1] = pred_shape._dims[1] == -1 ? -1 : proposed_res_height;
+ shape._dims[2] = pred_shape._dims[2] == -1 ? -1 : proposed_res_width;
+ break;
+ }
+ case tflite::Padding_VALID:
+ {
+ auto padded_h = static_cast<uint32_t>(pred_shape._dims[1] - (node->window()->vertical() - 1));
+ auto padded_w =
+ static_cast<uint32_t>(pred_shape._dims[2] - (node->window()->horizontal() - 1));
- int32_t proposed_height = ceil_div(padded_h, node->stride()->vertical());
- int32_t proposed_width = ceil_div(padded_w, node->stride()->horizontal());
+ int32_t proposed_height = ceil_div(padded_h, node->stride()->vertical());
+ int32_t proposed_width = ceil_div(padded_w, node->stride()->horizontal());
- shape._dims[1] = pred_shape._dims[1] == -1 ? -1 : proposed_height;
- shape._dims[2] = pred_shape._dims[2] == -1 ? -1 : proposed_width;
- break;
- }
- default:
- assert(false && "unknown padding type");
+ shape._dims[1] = pred_shape._dims[1] == -1 ? -1 : proposed_height;
+ shape._dims[2] = pred_shape._dims[2] == -1 ? -1 : proposed_width;
+ break;
+ }
+ default:
+ assert(false && "unknown padding type");
}
return shape;
}
tflite::Padding padding = getOpPadding(node->pad());
switch (padding)
{
- case tflite::Padding_SAME:
- {
- auto height = static_cast<uint32_t>(ifm_shape._dims[1]);
- auto width = static_cast<uint32_t>(ifm_shape._dims[2]);
+ case tflite::Padding_SAME:
+ {
+ auto height = static_cast<uint32_t>(ifm_shape._dims[1]);
+ auto width = static_cast<uint32_t>(ifm_shape._dims[2]);
- int32_t proposed_res_height = ceil_div(height, node->stride()->vertical());
- int32_t proposed_res_width = ceil_div(width, node->stride()->horizontal());
+ int32_t proposed_res_height = ceil_div(height, node->stride()->vertical());
+ int32_t proposed_res_width = ceil_div(width, node->stride()->horizontal());
- shape._dims[1] = ifm_shape._dims[1] == -1 ? -1 : proposed_res_height;
- shape._dims[2] = ifm_shape._dims[2] == -1 ? -1 : proposed_res_width;
- break;
- }
- case tflite::Padding_VALID:
- {
- auto padded_h = static_cast<uint32_t>(ifm_shape._dims[1] - (node->window()->vertical() - 1));
- auto padded_w = static_cast<uint32_t>(ifm_shape._dims[2] - (node->window()->horizontal() - 1));
+ shape._dims[1] = ifm_shape._dims[1] == -1 ? -1 : proposed_res_height;
+ shape._dims[2] = ifm_shape._dims[2] == -1 ? -1 : proposed_res_width;
+ break;
+ }
+ case tflite::Padding_VALID:
+ {
+ auto padded_h = static_cast<uint32_t>(ifm_shape._dims[1] - (node->window()->vertical() - 1));
+ auto padded_w =
+ static_cast<uint32_t>(ifm_shape._dims[2] - (node->window()->horizontal() - 1));
- int32_t proposed_height = ceil_div(padded_h, node->stride()->vertical());
- int32_t proposed_width = ceil_div(padded_w, node->stride()->horizontal());
+ int32_t proposed_height = ceil_div(padded_h, node->stride()->vertical());
+ int32_t proposed_width = ceil_div(padded_w, node->stride()->horizontal());
- shape._dims[1] = ifm_shape._dims[1] == -1 ? -1 : proposed_height;
- shape._dims[2] = ifm_shape._dims[2] == -1 ? -1 : proposed_width;
- break;
- }
- default:
- assert(false && "unknown padding type");
+ shape._dims[1] = ifm_shape._dims[1] == -1 ? -1 : proposed_height;
+ shape._dims[2] = ifm_shape._dims[2] == -1 ? -1 : proposed_width;
+ break;
+ }
+ default:
+ assert(false && "unknown padding type");
}
return shape;
}
tflite::Padding padding = getOpPadding(node->pad());
switch (padding)
{
- case tflite::Padding_SAME:
- {
- auto height = static_cast<uint32_t>(ifm_shape._dims[1]);
- auto width = static_cast<uint32_t>(ifm_shape._dims[2]);
+ case tflite::Padding_SAME:
+ {
+ auto height = static_cast<uint32_t>(ifm_shape._dims[1]);
+ auto width = static_cast<uint32_t>(ifm_shape._dims[2]);
- int32_t proposed_res_height = ceil_div(height, node->stride()->vertical());
- int32_t proposed_res_width = ceil_div(width, node->stride()->horizontal());
+ int32_t proposed_res_height = ceil_div(height, node->stride()->vertical());
+ int32_t proposed_res_width = ceil_div(width, node->stride()->horizontal());
- shape._dims[1] = ifm_shape._dims[1] == -1 ? -1 : proposed_res_height;
- shape._dims[2] = ifm_shape._dims[2] == -1 ? -1 : proposed_res_width;
- break;
- }
- case tflite::Padding_VALID:
- {
- auto padded_h = static_cast<uint32_t>(ifm_shape._dims[1] - (ker_shape._dims[1] - 1));
- auto padded_w = static_cast<uint32_t>(ifm_shape._dims[2] - (ker_shape._dims[2] - 1));
+ shape._dims[1] = ifm_shape._dims[1] == -1 ? -1 : proposed_res_height;
+ shape._dims[2] = ifm_shape._dims[2] == -1 ? -1 : proposed_res_width;
+ break;
+ }
+ case tflite::Padding_VALID:
+ {
+ auto padded_h = static_cast<uint32_t>(ifm_shape._dims[1] - (ker_shape._dims[1] - 1));
+ auto padded_w = static_cast<uint32_t>(ifm_shape._dims[2] - (ker_shape._dims[2] - 1));
- int32_t proposed_height = ceil_div(padded_h, node->stride()->vertical());
- int32_t proposed_width = ceil_div(padded_w, node->stride()->horizontal());
+ int32_t proposed_height = ceil_div(padded_h, node->stride()->vertical());
+ int32_t proposed_width = ceil_div(padded_w, node->stride()->horizontal());
- shape._dims[1] = ifm_shape._dims[1] == -1 ? -1 : proposed_height;
- shape._dims[2] = ifm_shape._dims[2] == -1 ? -1 : proposed_width;
- break;
- }
- default:
- assert(false && "unknown padding type");
+ shape._dims[1] = ifm_shape._dims[1] == -1 ? -1 : proposed_height;
+ shape._dims[2] = ifm_shape._dims[2] == -1 ? -1 : proposed_width;
+ break;
+ }
+ default:
+ assert(false && "unknown padding type");
}
return shape;
}
#include "CanonicalNodes.lst"
#undef CANONICAL_NODE
- default:
- break;
+ default:
+ break;
}
throw std::runtime_error{"NYI"};
#include "CanonicalNodes.lst"
#undef CANONICAL_NODE
- default:
- break;
+ default:
+ break;
}
throw std::runtime_error{"NYI"};
{
switch (axis)
{
- case FeatureAxis::Count:
- return true;
- case FeatureAxis::Depth:
- return true;
- case FeatureAxis::Height:
- return true;
- case FeatureAxis::Width:
- return true;
- default:
- break;
+ case FeatureAxis::Count:
+ return true;
+ case FeatureAxis::Depth:
+ return true;
+ case FeatureAxis::Height:
+ return true;
+ case FeatureAxis::Width:
+ return true;
+ default:
+ break;
}
return false;
{
switch (axis)
{
- case FilterAxis::Count:
- return true;
- case FilterAxis::Depth:
- return true;
- case FilterAxis::Height:
- return true;
- case FilterAxis::Width:
- return true;
- default:
- break;
+ case FilterAxis::Count:
+ return true;
+ case FilterAxis::Depth:
+ return true;
+ case FilterAxis::Height:
+ return true;
+ case FilterAxis::Width:
+ return true;
+ default:
+ break;
}
return false;
{
switch (axis)
{
- case DepthwiseFilterAxis::Depth:
- return true;
- case DepthwiseFilterAxis::Multiplier:
- return true;
- case DepthwiseFilterAxis::Height:
- return true;
- case DepthwiseFilterAxis::Width:
- return true;
- default:
- break;
+ case DepthwiseFilterAxis::Depth:
+ return true;
+ case DepthwiseFilterAxis::Multiplier:
+ return true;
+ case DepthwiseFilterAxis::Height:
+ return true;
+ case DepthwiseFilterAxis::Width:
+ return true;
+ default:
+ break;
}
return false;
switch (ifm_data->dtype())
{
- case loco::DataType::FLOAT32:
- {
- auto ifm_buf = ifm_data->as_f32_bufptr();
+ case loco::DataType::FLOAT32:
+ {
+ auto ifm_buf = ifm_data->as_f32_bufptr();
- auto avgpool2d_buf = avgPool2D<float>(avgpool2d, ifm_buf);
+ auto avgpool2d_buf = avgPool2D<float>(avgpool2d, ifm_buf);
- avgpool2d_data = make_data(avgpool2d_buf);
- break;
- }
- default:
- throw std::runtime_error("NYI for this DataType");
+ avgpool2d_data = make_data(avgpool2d_buf);
+ break;
+ }
+ default:
+ throw std::runtime_error("NYI for this DataType");
}
assert(avgpool2d_data != nullptr);
switch (input_data->dtype())
{
- case loco::DataType::FLOAT32:
- {
- auto input_bufptr = input_data->as_f32_bufptr();
- auto bias_bufptr = bias_data->as_f32_bufptr();
- auto bias_add_buf = make_buffer<float, LexicalLayout>(*input_data->shape());
+ case loco::DataType::FLOAT32:
+ {
+ auto input_bufptr = input_data->as_f32_bufptr();
+ auto bias_bufptr = bias_data->as_f32_bufptr();
+ auto bias_add_buf = make_buffer<float, LexicalLayout>(*input_data->shape());
- auto *shape = input_data->shape();
+ auto *shape = input_data->shape();
- for (IndexEnumerator e{*shape}; e.valid(); e.advance())
- {
- const auto &index = e.current();
- nncc::core::ADT::tensor::Index bias_index({index.at(axis)});
- bias_add_buf.at(index) = input_bufptr->at(index) + bias_bufptr->at(bias_index);
- }
+ for (IndexEnumerator e{*shape}; e.valid(); e.advance())
+ {
+ const auto &index = e.current();
+ nncc::core::ADT::tensor::Index bias_index({index.at(axis)});
+ bias_add_buf.at(index) = input_bufptr->at(index) + bias_bufptr->at(bias_index);
+ }
- bias_add_data = make_data(bias_add_buf);
- break;
- }
- default:
- throw std::runtime_error("NYI for this DataType");
+ bias_add_data = make_data(bias_add_buf);
+ break;
+ }
+ default:
+ throw std::runtime_error("NYI for this DataType");
}
assert(bias_add_data != nullptr);
switch (input_data->dtype())
{
- case loco::DataType::S32:
- {
- auto input_bufptr = input_data->as_s32_bufptr();
- bias_enc_data = make_data(*input_bufptr);
- break;
- }
- case loco::DataType::FLOAT32:
- {
- auto input_bufptr = input_data->as_f32_bufptr();
- bias_enc_data = make_data(*input_bufptr);
- break;
- }
- default:
- throw std::runtime_error("NYI for this DataType");
+ case loco::DataType::S32:
+ {
+ auto input_bufptr = input_data->as_s32_bufptr();
+ bias_enc_data = make_data(*input_bufptr);
+ break;
+ }
+ case loco::DataType::FLOAT32:
+ {
+ auto input_bufptr = input_data->as_f32_bufptr();
+ bias_enc_data = make_data(*input_bufptr);
+ break;
+ }
+ default:
+ throw std::runtime_error("NYI for this DataType");
}
assert(bias_enc_data != nullptr);
switch (constgen->dtype())
{
- case loco::DataType::S32:
- {
- assert(volume == constgen->size<loco::DataType::S32>());
+ case loco::DataType::S32:
+ {
+ assert(volume == constgen->size<loco::DataType::S32>());
- auto buf = make_buffer<int32_t, LexicalLayout>(shape);
+ auto buf = make_buffer<int32_t, LexicalLayout>(shape);
- for (IndexEnumerator e{shape}; e.valid(); e.advance())
- {
- const auto &index = e.current();
- uint32_t offset = ::offset_by_index(shape, index);
- buf.at(index) = constgen->at<loco::DataType::S32>(offset);
+ for (IndexEnumerator e{shape}; e.valid(); e.advance())
+ {
+ const auto &index = e.current();
+ uint32_t offset = ::offset_by_index(shape, index);
+ buf.at(index) = constgen->at<loco::DataType::S32>(offset);
+ }
+
+ data = locomotiv::make_data(buf);
+ break;
}
+ case loco::DataType::FLOAT32:
+ {
+ assert(volume == constgen->size<loco::DataType::FLOAT32>());
- data = locomotiv::make_data(buf);
- break;
- }
- case loco::DataType::FLOAT32:
- {
- assert(volume == constgen->size<loco::DataType::FLOAT32>());
+ auto buf = make_buffer<float, LexicalLayout>(shape);
- auto buf = make_buffer<float, LexicalLayout>(shape);
+ for (IndexEnumerator e{shape}; e.valid(); e.advance())
+ {
+ const auto &index = e.current();
+ uint32_t offset = ::offset_by_index(shape, index);
+ buf.at(index) = constgen->at<loco::DataType::FLOAT32>(offset);
+ }
- for (IndexEnumerator e{shape}; e.valid(); e.advance())
- {
- const auto &index = e.current();
- uint32_t offset = ::offset_by_index(shape, index);
- buf.at(index) = constgen->at<loco::DataType::FLOAT32>(offset);
+ data = locomotiv::make_data(buf);
+ break;
}
-
- data = locomotiv::make_data(buf);
- break;
- }
- default:
- throw std::runtime_error("NYI for this DataType");
+ default:
+ throw std::runtime_error("NYI for this DataType");
}
assert(data != nullptr);
switch (input_data->dtype())
{
- case loco::DataType::FLOAT32:
- {
- auto input_buf = input_data->as_f32_bufptr();
- enc_data = dw_filter_encode<float>(enc, input_buf);
- break;
- }
- default:
- throw std::runtime_error("NYI for this DataType");
+ case loco::DataType::FLOAT32:
+ {
+ auto input_buf = input_data->as_f32_bufptr();
+ enc_data = dw_filter_encode<float>(enc, input_buf);
+ break;
+ }
+ default:
+ throw std::runtime_error("NYI for this DataType");
}
assert(enc_data != nullptr);
switch (input_data->dtype())
{
- case loco::DataType::S32:
- {
- auto input_buf = input_data->as_s32_bufptr();
- dec_data = feature_decode<int32_t>(dec, input_buf);
- break;
- }
- case loco::DataType::FLOAT32:
- {
- auto input_buf = input_data->as_f32_bufptr();
- dec_data = feature_decode<float>(dec, input_buf);
- break;
- }
- default:
- throw std::runtime_error("NYI for this DataType");
+ case loco::DataType::S32:
+ {
+ auto input_buf = input_data->as_s32_bufptr();
+ dec_data = feature_decode<int32_t>(dec, input_buf);
+ break;
+ }
+ case loco::DataType::FLOAT32:
+ {
+ auto input_buf = input_data->as_f32_bufptr();
+ dec_data = feature_decode<float>(dec, input_buf);
+ break;
+ }
+ default:
+ throw std::runtime_error("NYI for this DataType");
}
assert(dec_data != nullptr);
switch (input_data->dtype())
{
- case loco::DataType::S32:
- {
- auto input_buf = input_data->as_s32_bufptr();
- enc_data = feature_encode<int32_t>(enc, input_buf);
- break;
- }
- case loco::DataType::FLOAT32:
- {
- auto input_buf = input_data->as_f32_bufptr();
- enc_data = feature_encode<float>(enc, input_buf);
- break;
- }
- default:
- throw std::runtime_error("NYI for this DataType");
+ case loco::DataType::S32:
+ {
+ auto input_buf = input_data->as_s32_bufptr();
+ enc_data = feature_encode<int32_t>(enc, input_buf);
+ break;
+ }
+ case loco::DataType::FLOAT32:
+ {
+ auto input_buf = input_data->as_f32_bufptr();
+ enc_data = feature_encode<float>(enc, input_buf);
+ break;
+ }
+ default:
+ throw std::runtime_error("NYI for this DataType");
}
assert(enc_data != nullptr);
switch (input_data->dtype())
{
- case loco::DataType::S32:
- {
- auto input_buf = input_data->as_s32_bufptr();
- enc_data = filter_encode<int32_t>(enc, input_buf);
- break;
- }
- case loco::DataType::FLOAT32:
- {
- auto input_buf = input_data->as_f32_bufptr();
- enc_data = filter_encode<float>(enc, input_buf);
- break;
- }
- default:
- throw std::runtime_error("NYI for this DataType");
+ case loco::DataType::S32:
+ {
+ auto input_buf = input_data->as_s32_bufptr();
+ enc_data = filter_encode<int32_t>(enc, input_buf);
+ break;
+ }
+ case loco::DataType::FLOAT32:
+ {
+ auto input_buf = input_data->as_f32_bufptr();
+ enc_data = filter_encode<float>(enc, input_buf);
+ break;
+ }
+ default:
+ throw std::runtime_error("NYI for this DataType");
}
assert(enc_data != nullptr);
switch (input_data->dtype())
{
- case loco::DataType::S32:
- {
- auto input_bufptr = input_data->as_s32_bufptr();
- forward_data = make_data(*input_bufptr);
- break;
- }
- case loco::DataType::FLOAT32:
- {
- auto input_bufptr = input_data->as_f32_bufptr();
- forward_data = make_data(*input_bufptr);
- break;
- }
- default:
- throw std::runtime_error("NYI for this DataType");
+ case loco::DataType::S32:
+ {
+ auto input_bufptr = input_data->as_s32_bufptr();
+ forward_data = make_data(*input_bufptr);
+ break;
+ }
+ case loco::DataType::FLOAT32:
+ {
+ auto input_bufptr = input_data->as_f32_bufptr();
+ forward_data = make_data(*input_bufptr);
+ break;
+ }
+ default:
+ throw std::runtime_error("NYI for this DataType");
}
assert(forward_data != nullptr);
switch (ifm_data->dtype())
{
- case loco::DataType::FLOAT32:
- {
- auto ifm_buf = ifm_data->as_f32_bufptr();
+ case loco::DataType::FLOAT32:
+ {
+ auto ifm_buf = ifm_data->as_f32_bufptr();
- auto maxpool2d_buf = maxPool2D<float>(maxpool2d, ifm_buf);
+ auto maxpool2d_buf = maxPool2D<float>(maxpool2d, ifm_buf);
- maxpool2d_data = make_data(maxpool2d_buf);
- break;
- }
- default:
- throw std::runtime_error("NYI for this DataType");
+ maxpool2d_data = make_data(maxpool2d_buf);
+ break;
+ }
+ default:
+ throw std::runtime_error("NYI for this DataType");
}
assert(maxpool2d_data != nullptr);
switch (from_data->dtype())
{
- case loco::DataType::S32:
- {
- auto from_bufptr = from_data->as_s32_bufptr();
- push_data = make_data(*from_bufptr);
- break;
- }
- case loco::DataType::FLOAT32:
- {
- auto from_bufptr = from_data->as_f32_bufptr();
- push_data = make_data(*from_bufptr);
- break;
- }
- default:
- throw std::runtime_error("NYI for this DataType");
+ case loco::DataType::S32:
+ {
+ auto from_bufptr = from_data->as_s32_bufptr();
+ push_data = make_data(*from_bufptr);
+ break;
+ }
+ case loco::DataType::FLOAT32:
+ {
+ auto from_bufptr = from_data->as_f32_bufptr();
+ push_data = make_data(*from_bufptr);
+ break;
+ }
+ default:
+ throw std::runtime_error("NYI for this DataType");
}
assert(push_data != nullptr);
switch (input_data->dtype())
{
- case loco::DataType::FLOAT32:
- {
- auto input_bufptr = input_data->as_f32_bufptr();
- auto relu_buf = make_buffer<float, LexicalLayout>(*input_data->shape());
- auto *shape = input_data->shape();
-
- for (IndexEnumerator e{*shape}; e.valid(); e.advance())
+ case loco::DataType::FLOAT32:
{
- const auto &index = e.current();
- relu_buf.at(index) = relu_ew(input_bufptr->at(index));
+ auto input_bufptr = input_data->as_f32_bufptr();
+ auto relu_buf = make_buffer<float, LexicalLayout>(*input_data->shape());
+ auto *shape = input_data->shape();
+
+ for (IndexEnumerator e{*shape}; e.valid(); e.advance())
+ {
+ const auto &index = e.current();
+ relu_buf.at(index) = relu_ew(input_bufptr->at(index));
+ }
+
+ relu_data = make_data(relu_buf);
+ break;
}
-
- relu_data = make_data(relu_buf);
- break;
- }
- default:
- throw std::runtime_error("NYI for this DataType");
+ default:
+ throw std::runtime_error("NYI for this DataType");
}
assert(relu_data != nullptr);
switch (input_data->dtype())
{
- case loco::DataType::FLOAT32:
- {
- auto input_bufptr = input_data->as_f32_bufptr();
- auto *shape = input_data->shape();
- auto relu6_buf = make_buffer<float, LexicalLayout>(*shape);
-
- for (IndexEnumerator e{*shape}; e.valid(); e.advance())
+ case loco::DataType::FLOAT32:
{
- const auto &index = e.current();
- relu6_buf.at(index) = relu6_ew(input_bufptr->at(index));
+ auto input_bufptr = input_data->as_f32_bufptr();
+ auto *shape = input_data->shape();
+ auto relu6_buf = make_buffer<float, LexicalLayout>(*shape);
+
+ for (IndexEnumerator e{*shape}; e.valid(); e.advance())
+ {
+ const auto &index = e.current();
+ relu6_buf.at(index) = relu6_ew(input_bufptr->at(index));
+ }
+
+ relu6_data = make_data(relu6_buf);
+ break;
}
-
- relu6_data = make_data(relu6_buf);
- break;
- }
- default:
- throw std::runtime_error("NYI for this DataType");
+ default:
+ throw std::runtime_error("NYI for this DataType");
}
assert(relu6_data != nullptr);
switch (input_data->dtype())
{
- case loco::DataType::FLOAT32:
- {
- auto input_bufptr = input_data->as_f32_bufptr();
- auto *input_shape = input_data->shape();
+ case loco::DataType::FLOAT32:
+ {
+ auto input_bufptr = input_data->as_f32_bufptr();
+ auto *input_shape = input_data->shape();
- using Shape = nncc::core::ADT::tensor::Shape;
- std::unique_ptr<Shape> output_shape(new Shape());
+ using Shape = nncc::core::ADT::tensor::Shape;
+ std::unique_ptr<Shape> output_shape(new Shape());
- output_shape->resize(reshape->rank());
- for (uint32_t axis = 0; axis < output_shape->rank(); ++axis)
- {
- output_shape->dim(axis) = reshape->dim(axis).value();
- }
+ output_shape->resize(reshape->rank());
+ for (uint32_t axis = 0; axis < output_shape->rank(); ++axis)
+ {
+ output_shape->dim(axis) = reshape->dim(axis).value();
+ }
- auto reshape_bufptr = make_buffer<float, LexicalLayout>(*output_shape);
+ auto reshape_bufptr = make_buffer<float, LexicalLayout>(*output_shape);
- float *input_ptr = const_cast<float *>(input_bufptr->base());
- uint64_t input_len = num_elements(*input_shape) * sizeof(float);
+ float *input_ptr = const_cast<float *>(input_bufptr->base());
+ uint64_t input_len = num_elements(*input_shape) * sizeof(float);
- float *output_ptr = reshape_bufptr.base();
- uint64_t output_len = num_elements(*output_shape) * sizeof(float);
+ float *output_ptr = reshape_bufptr.base();
+ uint64_t output_len = num_elements(*output_shape) * sizeof(float);
- assert(input_len == output_len);
- memcpy(output_ptr, input_ptr, input_len);
+ assert(input_len == output_len);
+ memcpy(output_ptr, input_ptr, input_len);
- reshape_data = make_data(reshape_bufptr);
- break;
- }
- default:
- throw std::runtime_error("NYI for this DataType");
+ reshape_data = make_data(reshape_bufptr);
+ break;
+ }
+ default:
+ throw std::runtime_error("NYI for this DataType");
}
assert(reshape_data != nullptr);
std::unique_ptr<NodeData> concat_data = nullptr;
switch (lhs_data->dtype())
{
- case loco::DataType::FLOAT32:
- {
- auto lhs_bufptr = lhs_data->as_f32_bufptr();
- auto rhs_bufptr = rhs_data->as_f32_bufptr();
- auto concat_buf = make_buffer<float, LexicalLayout>(concat_shape);
-
- for (IndexEnumerator e{concat_shape}; e.valid(); e.advance())
+ case loco::DataType::FLOAT32:
{
- const auto &e_index = e.current();
+ auto lhs_bufptr = lhs_data->as_f32_bufptr();
+ auto rhs_bufptr = rhs_data->as_f32_bufptr();
+ auto concat_buf = make_buffer<float, LexicalLayout>(concat_shape);
- if (e_index.at(axis) < left_dim_size)
- {
- // Left index is same as output index
- concat_buf.at(e_index) = lhs_bufptr->at(e_index);
- }
- else
+ for (IndexEnumerator e{concat_shape}; e.valid(); e.advance())
{
- // Adjust right index to valid range
- Index r_index = e_index;
- r_index.at(axis) -= left_dim_size;
- concat_buf.at(e_index) = rhs_bufptr->at(r_index);
+ const auto &e_index = e.current();
+
+ if (e_index.at(axis) < left_dim_size)
+ {
+ // Left index is same as output index
+ concat_buf.at(e_index) = lhs_bufptr->at(e_index);
+ }
+ else
+ {
+ // Adjust right index to valid range
+ Index r_index = e_index;
+ r_index.at(axis) -= left_dim_size;
+ concat_buf.at(e_index) = rhs_bufptr->at(r_index);
+ }
}
- }
- concat_data = make_data(concat_buf);
- break;
- }
- default:
- throw std::runtime_error("NYI for this DataType");
+ concat_data = make_data(concat_buf);
+ break;
+ }
+ default:
+ throw std::runtime_error("NYI for this DataType");
}
assert(concat_data != nullptr);
return "canonical." #OPCODE;
#include "loco/IR/CanonicalNodes.lst"
#undef CANONICAL_NODE
- default:
- break;
+ default:
+ break;
};
return "canonical."
switch (dtype)
{
- case loco::DataType::S32:
- {
- uint32_t input_elements = node->size<loco::DataType::S32>();
- const_node->size<loco::DataType::S32>(input_elements);
- for (uint32_t i = 0; i < input_elements; ++i)
+ case loco::DataType::S32:
{
- const_node->at<loco::DataType::S32>(i) = node->at<loco::DataType::S32>(i);
+ uint32_t input_elements = node->size<loco::DataType::S32>();
+ const_node->size<loco::DataType::S32>(input_elements);
+ for (uint32_t i = 0; i < input_elements; ++i)
+ {
+ const_node->at<loco::DataType::S32>(i) = node->at<loco::DataType::S32>(i);
+ }
+ break;
}
- break;
- }
- case loco::DataType::FLOAT32:
- {
- uint32_t input_elements = node->size<loco::DataType::FLOAT32>();
- const_node->size<loco::DataType::FLOAT32>(input_elements);
- for (uint32_t i = 0; i < input_elements; ++i)
+ case loco::DataType::FLOAT32:
{
- const_node->at<loco::DataType::FLOAT32>(i) = node->at<loco::DataType::FLOAT32>(i);
+ uint32_t input_elements = node->size<loco::DataType::FLOAT32>();
+ const_node->size<loco::DataType::FLOAT32>(input_elements);
+ for (uint32_t i = 0; i < input_elements; ++i)
+ {
+ const_node->at<loco::DataType::FLOAT32>(i) = node->at<loco::DataType::FLOAT32>(i);
+ }
+ break;
}
- break;
- }
- default:
- throw std::runtime_error("NYI for this DataType");
+ default:
+ throw std::runtime_error("NYI for this DataType");
}
// update graph
{
switch (dtype)
{
- case tensorflow::DT_UINT8:
- return loco::DataType::U8;
- case tensorflow::DT_FLOAT:
- return loco::DataType::FLOAT32;
- case tensorflow::DT_BOOL:
- return loco::DataType::U8;
- case tensorflow::DT_INT32:
- return loco::DataType::S32;
- case tensorflow::DT_INT64:
- return loco::DataType::S64;
- case tensorflow::DT_STRING:
- case tensorflow::DT_COMPLEX64:
- default:
- break;
+ case tensorflow::DT_UINT8:
+ return loco::DataType::U8;
+ case tensorflow::DT_FLOAT:
+ return loco::DataType::FLOAT32;
+ case tensorflow::DT_BOOL:
+ return loco::DataType::U8;
+ case tensorflow::DT_INT32:
+ return loco::DataType::S32;
+ case tensorflow::DT_INT64:
+ return loco::DataType::S64;
+ case tensorflow::DT_STRING:
+ case tensorflow::DT_COMPLEX64:
+ default:
+ break;
}
throw std::runtime_error{"Unsupported tensorflow dtype: " + tensorflow::DataType_Name(dtype)};
}
#include "TFNodes.lst"
#undef TENSORFLOW_NODE
- default:
- break;
+ default:
+ break;
}
throw std::runtime_error{"NYI"};
#include "TFNodes.lst"
#undef TENSORFLOW_NODE
- default:
- break;
+ default:
+ break;
}
throw std::runtime_error{"NYI"};
{
switch (shapedata->domain())
{
- case loco::Domain::Tensor:
- {
- loco::TensorShape shape = shapedata->tensor_shape();
- std::unique_ptr<NodeShape> node_shape = stdex::make_unique<NodeShape>(shape);
- return std::move(node_shape);
- }
- break;
-
- case loco::Domain::Feature:
- {
- loco::FeatureShape shape = shapedata->feature_shape();
- std::unique_ptr<NodeShape> node_shape = stdex::make_unique<NodeShape>(shape);
- return std::move(node_shape);
- }
- break;
-
- case loco::Domain::Filter:
- {
- loco::FilterShape shape = shapedata->filter_shape();
- std::unique_ptr<NodeShape> node_shape = stdex::make_unique<NodeShape>(shape);
- return std::move(node_shape);
- }
- break;
-
- case loco::Domain::Bias:
- {
- loco_tobe::BiasShape shape = shapedata->bias_shape();
- std::unique_ptr<NodeShape> node_shape = stdex::make_unique<NodeShape>(shape);
- return std::move(node_shape);
- }
- break;
-
- default:
- throw std::runtime_error("Not supported loco::Domain");
+ case loco::Domain::Tensor:
+ {
+ loco::TensorShape shape = shapedata->tensor_shape();
+ std::unique_ptr<NodeShape> node_shape = stdex::make_unique<NodeShape>(shape);
+ return std::move(node_shape);
+ }
+ break;
+
+ case loco::Domain::Feature:
+ {
+ loco::FeatureShape shape = shapedata->feature_shape();
+ std::unique_ptr<NodeShape> node_shape = stdex::make_unique<NodeShape>(shape);
+ return std::move(node_shape);
+ }
+ break;
+
+ case loco::Domain::Filter:
+ {
+ loco::FilterShape shape = shapedata->filter_shape();
+ std::unique_ptr<NodeShape> node_shape = stdex::make_unique<NodeShape>(shape);
+ return std::move(node_shape);
+ }
+ break;
+
+ case loco::Domain::Bias:
+ {
+ loco_tobe::BiasShape shape = shapedata->bias_shape();
+ std::unique_ptr<NodeShape> node_shape = stdex::make_unique<NodeShape>(shape);
+ return std::move(node_shape);
+ }
+ break;
+
+ default:
+ throw std::runtime_error("Not supported loco::Domain");
}
}
switch (dtype)
{
- case loco::DataType::S32:
- read_value_int32(const_node, num_elements, input_tensor);
- break;
+ case loco::DataType::S32:
+ read_value_int32(const_node, num_elements, input_tensor);
+ break;
- case loco::DataType::FLOAT32:
- read_value_float32(const_node, num_elements, input_tensor);
- break;
+ case loco::DataType::FLOAT32:
+ read_value_float32(const_node, num_elements, input_tensor);
+ break;
- // TODO support other types
+ // TODO support other types
- default:
- throw std::runtime_error{"Error: Unsupported data type for " + node.name()};
+ default:
+ throw std::runtime_error{"Error: Unsupported data type for " + node.name()};
}
// register string-name to node
switch (dtype)
{
- case loco::DataType::S32:
- read_value_int32(const_node, num_elements, input_tensor);
- break;
+ case loco::DataType::S32:
+ read_value_int32(const_node, num_elements, input_tensor);
+ break;
- case loco::DataType::FLOAT32:
- read_value_float32(const_node, num_elements, input_tensor);
- break;
+ case loco::DataType::FLOAT32:
+ read_value_float32(const_node, num_elements, input_tensor);
+ break;
- // TODO support other types
+ // TODO support other types
- default:
- throw std::runtime_error{"Error: Unsupported data type for " + node.name()};
+ default:
+ throw std::runtime_error{"Error: Unsupported data type for " + node.name()};
}
// register string-name to node
auto dtype = node->dtype();
switch (dtype)
{
- case loco::DataType::S32:
- ss << node->size<loco::DataType::S32>();
- break;
- case loco::DataType::FLOAT32:
- ss << node->size<loco::DataType::FLOAT32>();
- break;
- default:
- throw std::runtime_error("NYI for this DataType");
+ case loco::DataType::S32:
+ ss << node->size<loco::DataType::S32>();
+ break;
+ case loco::DataType::FLOAT32:
+ ss << node->size<loco::DataType::FLOAT32>();
+ break;
+ default:
+ throw std::runtime_error("NYI for this DataType");
}
s.args().append("size", ss.str());
s.state(locop::NodeSummary::State::PartiallyKnown);
{
switch (_direction)
{
- case Direction::Forward:
- {
- return _lines.at(n);
- }
- case Direction::Reverse:
- {
- return _lines.at(lines() - n - 1);
- }
+ case Direction::Forward:
+ {
+ return _lines.at(n);
+ }
+ case Direction::Reverse:
+ {
+ return _lines.at(lines() - n - 1);
+ }
}
throw std::runtime_error{"unreachable"};
{
switch (info->kind())
{
- case nnkit::support::tftestinfo::ParsedTensor::Kind::Input:
- sig.add_input(moco::tf::TensorName{info->name()});
- break;
+ case nnkit::support::tftestinfo::ParsedTensor::Kind::Input:
+ sig.add_input(moco::tf::TensorName{info->name()});
+ break;
- case nnkit::support::tftestinfo::ParsedTensor::Kind::Output:
- sig.add_output(moco::tf::TensorName{info->name()});
- break;
+ case nnkit::support::tftestinfo::ParsedTensor::Kind::Output:
+ sig.add_output(moco::tf::TensorName{info->name()});
+ break;
- default:
- throw std::runtime_error{"Unknown kind"};
+ default:
+ throw std::runtime_error{"Unknown kind"};
}
}
}
switch (dtype)
{
- case tensorflow::DT_FLOAT:
- pack<float>(tensor);
- break;
- case tensorflow::DT_INT32:
- pack<int32_t>(tensor);
- break;
- default:
- throw std::runtime_error{"Unsupported dtype"};
+ case tensorflow::DT_FLOAT:
+ pack<float>(tensor);
+ break;
+ case tensorflow::DT_INT32:
+ pack<int32_t>(tensor);
+ break;
+ default:
+ throw std::runtime_error{"Unsupported dtype"};
}
}
}
switch (dtype)
{
- case tensorflow::DT_FLOAT:
- unpack<float>(tensor);
- break;
- case tensorflow::DT_INT32:
- unpack<int32_t>(tensor);
- break;
- default:
- throw std::runtime_error{"Unsupported dtype"};
+ case tensorflow::DT_FLOAT:
+ unpack<float>(tensor);
+ break;
+ case tensorflow::DT_INT32:
+ unpack<int32_t>(tensor);
+ break;
+ default:
+ throw std::runtime_error{"Unsupported dtype"};
}
}
}
{
switch (value)
{
- case tflchef::SAME:
- return tflite::Padding_SAME;
- case tflchef::VALID:
- return tflite::Padding_VALID;
- default:
- break;
+ case tflchef::SAME:
+ return tflite::Padding_SAME;
+ case tflchef::VALID:
+ return tflite::Padding_VALID;
+ default:
+ break;
}
throw std::runtime_error{"Unknown padding value"};
{
switch (value)
{
- case tflchef::NONE:
- return tflite::ActivationFunctionType_NONE;
- case tflchef::RELU:
- return tflite::ActivationFunctionType_RELU;
- case tflchef::RELU6:
- return tflite::ActivationFunctionType_RELU6;
- default:
- break;
+ case tflchef::NONE:
+ return tflite::ActivationFunctionType_NONE;
+ case tflchef::RELU:
+ return tflite::ActivationFunctionType_RELU;
+ case tflchef::RELU6:
+ return tflite::ActivationFunctionType_RELU6;
+ default:
+ break;
}
throw std::runtime_error{"Unknown activation"};
{
switch (value)
{
- case tflchef::FLOAT32:
- return tflite::TensorType_FLOAT32;
- case tflchef::INT32:
- return tflite::TensorType_INT32;
- default:
- break;
+ case tflchef::FLOAT32:
+ return tflite::TensorType_FLOAT32;
+ case tflchef::INT32:
+ return tflite::TensorType_INT32;
+ default:
+ break;
}
throw std::runtime_error{"Unknown tensor type"};
switch (type)
{
- case tflchef::INT32:
- return s32;
- case tflchef::FLOAT32:
- return fp32;
- default:
- break;
+ case tflchef::INT32:
+ return s32;
+ case tflchef::FLOAT32:
+ return fp32;
+ default:
+ break;
}
throw std::runtime_error{"Unknown tensor type"};
{
switch (type)
{
- case tflite::TensorType_FLOAT32:
- return tflchef::FLOAT32;
- case tflite::TensorType_INT32:
- return tflchef::INT32;
- // TODO handle other types
- // TensorType_FLOAT16
- // TensorType_UINT8
- // TensorType_INT64
- // TensorType_STRING
- // TensorType_BOOL
- // TensorType_INT16
- // TensorType_COMPLEX64
- default:
- throw std::runtime_error{"unsupported tensor type"};
+ case tflite::TensorType_FLOAT32:
+ return tflchef::FLOAT32;
+ case tflite::TensorType_INT32:
+ return tflchef::INT32;
+ // TODO handle other types
+ // TensorType_FLOAT16
+ // TensorType_UINT8
+ // TensorType_INT64
+ // TensorType_STRING
+ // TensorType_BOOL
+ // TensorType_INT16
+ // TensorType_COMPLEX64
+ default:
+ throw std::runtime_error{"unsupported tensor type"};
}
}
{
switch (type)
{
- case tflite::ActivationFunctionType_NONE:
- return tflchef::NONE;
- case tflite::ActivationFunctionType_RELU:
- return tflchef::RELU;
- case tflite::ActivationFunctionType_RELU6:
- return tflchef::RELU6;
- // TODO handle other types
- // ActivationFunctionType_RELU_N1_TO_1
- // ActivationFunctionType_TANH
- // ActivationFunctionType_SIGN_BIT
- default:
- throw std::runtime_error{"unsupported activation type"};
+ case tflite::ActivationFunctionType_NONE:
+ return tflchef::NONE;
+ case tflite::ActivationFunctionType_RELU:
+ return tflchef::RELU;
+ case tflite::ActivationFunctionType_RELU6:
+ return tflchef::RELU6;
+ // TODO handle other types
+ // ActivationFunctionType_RELU_N1_TO_1
+ // ActivationFunctionType_TANH
+ // ActivationFunctionType_SIGN_BIT
+ default:
+ throw std::runtime_error{"unsupported activation type"};
}
}
{
switch (padding)
{
- case tflite::Padding_SAME:
- return tflchef::SAME;
- case tflite::Padding_VALID:
- return tflchef::VALID;
- default:
- throw std::runtime_error{"unsupported padding"};
+ case tflite::Padding_SAME:
+ return tflchef::SAME;
+ case tflite::Padding_VALID:
+ return tflchef::VALID;
+ default:
+ throw std::runtime_error{"unsupported padding"};
}
}