switch (weight_type)
{
- case ::neurun::internal::operand::DataType::NEURUN_TENSOR_FLOAT32:
+ case ::neurun::internal::operand::DataType::TENSOR_FLOAT32:
{
return [num_output, ifm_shape, weight_base, weight_size](::arm_compute::ITensor &tensor) {
const ::nnfw::util::kernel::Shape ker_shape{num_output, ifm_shape.C, ifm_shape.H,
};
};
}
- case ::neurun::internal::operand::DataType::NEURUN_TENSOR_QUANT8_ASYMM:
+ case ::neurun::internal::operand::DataType::TENSOR_QUANT8_ASYMM:
{
return [num_output, ifm_shape, weight_base, weight_size](::arm_compute::ITensor &tensor) {
const ::nnfw::util::kernel::Shape ker_shape{num_output, ifm_shape.C, ifm_shape.H,
switch (bias_type)
{
- case ::neurun::internal::operand::DataType::NEURUN_TENSOR_FLOAT32:
+ case ::neurun::internal::operand::DataType::TENSOR_FLOAT32:
{
return [bias_base, bias_size](::arm_compute::ITensor &tensor) {
for (uint32_t n = 0; n < bias_size; ++n)
}
};
}
- case ::neurun::internal::operand::DataType::NEURUN_TENSOR_QUANT8_ASYMM:
+ case ::neurun::internal::operand::DataType::TENSOR_QUANT8_ASYMM:
{
return [bias_base, bias_size](::arm_compute::ITensor &tensor) {
for (uint32_t n = 0; n < bias_size; ++n)
enum class DataType
{
- NEURUN_SCALAR_FLOAT32 = 0,
- NEURUN_SCALAR_INT32 = 1,
- NEURUN_SCALAR_UINT32 = 2,
+ SCALAR_FLOAT32 = 0,
+ SCALAR_INT32 = 1,
+ SCALAR_UINT32 = 2,
- NEURUN_TENSOR_FLOAT32 = 3,
- NEURUN_TENSOR_INT32 = 4,
+ TENSOR_FLOAT32 = 3,
+ TENSOR_INT32 = 4,
- NEURUN_TENSOR_QUANT8_ASYMM = 5,
+ TENSOR_QUANT8_ASYMM = 5,
};
} // namespace operand