Replace deprecated `DTYPE` with `DataType`.
Signed-off-by: Sergei Barannikov <s.barannikov@samsung.com>
#endif // NNC_HDF5_SUPPORTED
-static TensorVariant readTensorFromFile(const std::string& filename, DTYPE dtype,
+static TensorVariant readTensorFromFile(const std::string& filename, DataType data_type,
const Shape& shape) {
- assert(dtype == DTYPE::FLOAT32);
+ assert(data_type == DataType::FLOAT32);
std::size_t input_data_size = shape.numElements() * sizeof(float);
std::ifstream stream(filename, std::ios::in | std::ios::binary);
if (stream.fail())
throw PassException("Couldn't read file \"" + filename + "\".");
- return TensorVariant(dtype, shape, data.get());
+ return TensorVariant(data_type, shape, data.get());
}
PassData InterpreterPass::run(PassData data) {
std::string tensor_name = input_op->getName();
std::replace(tensor_name.begin(), tensor_name.end(), '/', '_');
std::string filename = cli::interInputDataDir + "/" + tensor_name + ".dat";
- auto tensor = readTensorFromFile(filename, DTYPE::FLOAT32, input_op->getOutputShape(0));
+ auto tensor = readTensorFromFile(filename, DataType::FLOAT32, input_op->getOutputShape(0));
interpreter.setInput(input_op->getName(), tensor);
}
protected:
mir::TensorVariant allocate_tensor(const mir::Shape& shape) {
- // Use hardcoded DTYPE for now, since theres no support for operations on types other than
+ // Use hardcoded DataType for now, since theres no support for operations on types other than
// floats
std::vector<float> zeros(static_cast<std::size_t>(shape.numElements()), 0.0f);
- return mir::TensorVariant(mir::DTYPE::FLOAT32, shape, zeros.data());
+ return mir::TensorVariant(mir::DataType::FLOAT32, shape, zeros.data());
}
};
assert(const1_val.getShape().dim(0) == const2_val.getShape().dim(0));
// Create and fill TensorVariant for new ConstantOp
- TensorVariant new_const_val(DTYPE::FLOAT32, const1_val.getShape());
+ TensorVariant new_const_val(DataType::FLOAT32, const1_val.getShape());
Tensor<float> const1_accessor(const1_val);
Tensor<float> const2_accessor(const2_val);
Tensor<float> new_const_accessor(new_const_val);
float* data_ptr = data.get();
for (int32_t i = 0; i < num_elems; ++i)
data_ptr[i] = i;
- return TensorVariant(DTYPE::FLOAT32, shape, data_ptr);
+ return TensorVariant(DataType::FLOAT32, shape, data_ptr);
}
}
// Create graph: 'input->conv->bias->scale->scale->bias'
auto input = g.create<ops::InputOp>("input", Shape{1, 299, 299, 3});
- auto conv_const = g.create<ops::ConstantOp>("", TensorVariant(DTYPE::FLOAT32, {10, 3, 3, 3}));
+ auto conv_const = g.create<ops::ConstantOp>("", TensorVariant(DataType::FLOAT32, {10, 3, 3, 3}));
std::vector<int32_t> padding{0, 0};
auto conv = g.create<ops::Conv2DOp>("conv", input->getOutput(0), conv_const->getOutput(0),
Shape{1, 1}, padding, padding);
- auto bias1_const = g.create<ops::ConstantOp>("", TensorVariant(DTYPE::FLOAT32, {10}));
+ auto bias1_const = g.create<ops::ConstantOp>("", TensorVariant(DataType::FLOAT32, {10}));
auto bias1 = g.create<ops::BiasAddOp>("bias1", conv->getOutput(0), bias1_const->getOutput(0));
- auto scale1_const = g.create<ops::ConstantOp>("", TensorVariant(DTYPE::FLOAT32, {10}));
+ auto scale1_const = g.create<ops::ConstantOp>("", TensorVariant(DataType::FLOAT32, {10}));
auto scale1 = g.create<ops::ScaleOp>("scale1", bias1->getOutput(0), scale1_const->getOutput(0));
- auto scale2_const = g.create<ops::ConstantOp>("", TensorVariant(DTYPE::FLOAT32, {10}));
+ auto scale2_const = g.create<ops::ConstantOp>("", TensorVariant(DataType::FLOAT32, {10}));
auto scale2 = g.create<ops::ScaleOp>("scale2", scale1->getOutput(0), scale2_const->getOutput(0));
- auto scale3_const = g.create<ops::ConstantOp>("", TensorVariant(DTYPE::FLOAT32, {10}));
+ auto scale3_const = g.create<ops::ConstantOp>("", TensorVariant(DataType::FLOAT32, {10}));
auto scale3 = g.create<ops::ScaleOp>("scale3", scale2->getOutput(0), scale3_const->getOutput(0));
- auto bias2_const = g.create<ops::ConstantOp>("", TensorVariant(DTYPE::FLOAT32, {10}));
+ auto bias2_const = g.create<ops::ConstantOp>("", TensorVariant(DataType::FLOAT32, {10}));
g.create<ops::BiasAddOp>("", scale3->getOutput(0), bias2_const->getOutput(0));
// Check that layout is desired
* ||
* [relu]
*/
- Operation* C0 = g.create<ops::ConstantOp>("C0", TensorVariant(DTYPE::FLOAT32, {2, 2}));
+ Operation* C0 = g.create<ops::ConstantOp>("C0", TensorVariant(DataType::FLOAT32, {2, 2}));
Operation* input = g.create<ops::InputOp>("input", Shape{1, 2, 3});
- Operation* C1 = g.create<ops::ConstantOp>("C1", TensorVariant(DTYPE::FLOAT32, {2, 2}));
- Operation* C2 = g.create<ops::ConstantOp>("C2", TensorVariant(DTYPE::FLOAT32, {2, 2}));
+ Operation* C1 = g.create<ops::ConstantOp>("C1", TensorVariant(DataType::FLOAT32, {2, 2}));
+ Operation* C2 = g.create<ops::ConstantOp>("C2", TensorVariant(DataType::FLOAT32, {2, 2}));
Operation* relu = g.create<ops::ReluOp>("relu", input->getOutput(0));
std::stringstream ss;
mir::Shape nshape;
fillShapes(nshape, ashape, shape);
atensor.reshape(ashape);
- ntensor.reset(new mir::TensorVariant(mir::DTYPE::FLOAT32, nshape));
+ ntensor.reset(new mir::TensorVariant(mir::DataType::FLOAT32, nshape));
fillNTensor(*ntensor, start);
copyATensorFromNTensor(atensor, *ntensor);
}
*/
void compareResults(const mir::TensorVariant &ref_nnc_tensor, const Tensor &test_art_tensor) {
assert(ref_nnc_tensor.getElementSize() == 4L &&
- ref_nnc_tensor.getDataType() == mir::DTYPE::FLOAT32);
+ ref_nnc_tensor.getDataType() == mir::DataType::FLOAT32);
const mir::Shape &nnc_shape = ref_nnc_tensor.getShape();
const Shape &art_shape = test_art_tensor.getShape();
float const_value = 0.0;
mir::Scalar constant_value(reinterpret_cast<char*>(&const_value),
- mir::DTYPE::FLOAT32, sizeof(float));
+ mir::DataType::FLOAT32, sizeof(float));
auto op_generator = [num_dims, &paddings, &constant_value]
(mir::Graph& g, const std::vector<mir::Operation::Output*>& inputs) {