else
{
// fill element of output kernel with zero element
- assert(folded_kernel.getDataType() == mir::DTYPE::FLOAT32 &&
+ assert(folded_kernel.getDataType() == mir::DataType::FLOAT32 &&
"unsupported data type, add appropriate zero element creation");
auto elem = reinterpret_cast<float *>(unfold_kernel.at(idx));
*elem = 0.0f;
{
const void *src_data;
- DTYPE dtype;
+ mir::DataType dtype;
if (blob.data_size() != 0)
{
assert(blob.double_data_size() == 0);
- dtype = DTYPE::FLOAT32;
+ dtype = mir::DataType::FLOAT32;
src_data = blob.data().data();
}
else if (blob.double_data_size() != 0)
{
- dtype = DTYPE::FLOAT64;
+ dtype = mir::DataType::FLOAT64;
src_data = blob.double_data().data();
}
else
{
if (opts.coeff().Get(i) != 1.0f)
{
- TensorVariant coeff_tensor(DTYPE::FLOAT32, Shape{1}, &opts.coeff().Get(i));
+ TensorVariant coeff_tensor(mir::DataType::FLOAT32, Shape{1}, &opts.coeff().Get(i));
auto coeff_const = createOp<ops::ConstantOp>(layer.name() + "_const", coeff_tensor);
std::vector<mir::Operation::Output *> mul_inputs;
mul_inputs.push_back(coeff_const->getOutput(0));
static TensorVariant createZeroedTensor(const mir::Shape &shape)
{
// TODO For now it is hardcoded float32.
- auto elem_type = mir::DTYPE::FLOAT32;
+ auto elem_type = mir::DataType::FLOAT32;
std::vector<float> zeros(static_cast<std::size_t>(shape.numElements()), 0.0f);
return TensorVariant(elem_type, shape, zeros.data());
}