const auto num_output = _ctx.at(weight_index).shape().dim(0);
auto weight_base = _ctx.at(weight_index).data().base();
auto weight_size = _ctx.at(weight_index).data().size();
- auto weight_type = _ctx.at(weight_index).shape().type();
+ auto weight_type = _ctx.at(weight_index).typeInfo().type();
// NOTE We assume that input is a feature map
// TODO Remove this restriction!
switch (weight_type)
{
- case ANEURALNETWORKS_TENSOR_FLOAT32:
+ case ::neurun::internal::operand::DataType::NEURUN_TENSOR_FLOAT32:
{
return [num_output, ifm_shape, weight_base, weight_size](::arm_compute::ITensor &tensor) {
const ::nnfw::util::kernel::Shape ker_shape{num_output, ifm_shape.C, ifm_shape.H,
};
};
}
- case ANEURALNETWORKS_TENSOR_QUANT8_ASYMM:
+ case ::neurun::internal::operand::DataType::NEURUN_TENSOR_QUANT8_ASYMM:
{
return [num_output, ifm_shape, weight_base, weight_size](::arm_compute::ITensor &tensor) {
const ::nnfw::util::kernel::Shape ker_shape{num_output, ifm_shape.C, ifm_shape.H,
const ::neurun::graph::operand::Index bias_index{node.param().bias_index};
auto bias_base = _ctx.at(bias_index).data().base();
- auto bias_type = _ctx.at(bias_index).shape().type();
+ auto bias_type = _ctx.at(bias_index).typeInfo().type();
const auto bias_size = _ctx.at(bias_index).shape().asVector();
switch (bias_type)
{
- case ANEURALNETWORKS_TENSOR_FLOAT32:
+ case ::neurun::internal::operand::DataType::NEURUN_TENSOR_FLOAT32:
{
return [bias_base, bias_size](::arm_compute::ITensor &tensor) {
for (uint32_t n = 0; n < bias_size; ++n)
}
};
}
- case ANEURALNETWORKS_TENSOR_QUANT8_ASYMM:
+ case ::neurun::internal::operand::DataType::NEURUN_TENSOR_QUANT8_ASYMM:
{
return [bias_base, bias_size](::arm_compute::ITensor &tensor) {
for (uint32_t n = 0; n < bias_size; ++n)