[nnc] Rename DTYPE to DataType (#6248)
authorСергей Баранников/AI Tools Lab /SRR/Engineer/삼성전자 <s.barannikov@samsung.com>
Mon, 5 Aug 2019 17:40:16 +0000 (20:40 +0300)
committerEfimov Alexander/AI Tools Lab/./Samsung Electronics <a.efimov@samsung.com>
Mon, 5 Aug 2019 17:40:16 +0000 (20:40 +0300)
Replace deprecated `DTYPE` with `DataType`.

Signed-off-by: Sergei Barannikov <s.barannikov@samsung.com>
compiler/nnc/passes/interpreter/interpreter_pass.cpp
compiler/nnc/passes/interpreter/ops/OperationImpl.h
compiler/nnc/passes/optimizations/FuseArithmeticOps.cpp
compiler/nnc/unittests/acl_backend/MIRToDOM.cpp
compiler/nnc/unittests/optimizations/FuseArithmeticOps.cpp
compiler/nnc/unittests/optimizations/RemoveDeadEnds.cpp
compiler/nnc/unittests/soft_backend/CPPOperations.cpp

index e6b6477..ad9f8c4 100644 (file)
@@ -88,9 +88,9 @@ static void writeTensorToHDF5File(const TensorVariant& tensor,
 
 #endif  // NNC_HDF5_SUPPORTED
 
-static TensorVariant readTensorFromFile(const std::string& filename, DTYPE dtype,
+static TensorVariant readTensorFromFile(const std::string& filename, DataType data_type,
                                         const Shape& shape) {
-  assert(dtype == DTYPE::FLOAT32);
+  assert(data_type == DataType::FLOAT32);
   std::size_t input_data_size = shape.numElements() * sizeof(float);
 
   std::ifstream stream(filename, std::ios::in | std::ios::binary);
@@ -113,7 +113,7 @@ static TensorVariant readTensorFromFile(const std::string& filename, DTYPE dtype
   if (stream.fail())
     throw PassException("Couldn't read file \"" + filename + "\".");
 
-  return TensorVariant(dtype, shape, data.get());
+  return TensorVariant(data_type, shape, data.get());
 }
 
 PassData InterpreterPass::run(PassData data) {
@@ -126,7 +126,7 @@ PassData InterpreterPass::run(PassData data) {
     std::string tensor_name = input_op->getName();
     std::replace(tensor_name.begin(), tensor_name.end(), '/', '_');
     std::string filename = cli::interInputDataDir + "/" + tensor_name + ".dat";
-    auto tensor = readTensorFromFile(filename, DTYPE::FLOAT32, input_op->getOutputShape(0));
+    auto tensor = readTensorFromFile(filename, DataType::FLOAT32, input_op->getOutputShape(0));
     interpreter.setInput(input_op->getName(), tensor);
   }
 
index 105f28b..9476f22 100644 (file)
@@ -30,10 +30,10 @@ public:
 
 protected:
   mir::TensorVariant allocate_tensor(const mir::Shape& shape) {
-    // Use hardcoded DTYPE for now, since theres no support for operations on types other than
+    // Use hardcoded DataType for now, since theres no support for operations on types other than
     // floats
     std::vector<float> zeros(static_cast<std::size_t>(shape.numElements()), 0.0f);
-    return mir::TensorVariant(mir::DTYPE::FLOAT32, shape, zeros.data());
+    return mir::TensorVariant(mir::DataType::FLOAT32, shape, zeros.data());
   }
 };
 
index a99a033..d615a6c 100644 (file)
@@ -93,7 +93,7 @@ Operation* mergeConstantOps(Graph* g, const ops::ConstantOp* const1_op,
   assert(const1_val.getShape().dim(0) == const2_val.getShape().dim(0));
 
   // Create and fill TensorVariant for new ConstantOp
-  TensorVariant new_const_val(DTYPE::FLOAT32, const1_val.getShape());
+  TensorVariant new_const_val(DataType::FLOAT32, const1_val.getShape());
   Tensor<float> const1_accessor(const1_val);
   Tensor<float> const2_accessor(const2_val);
   Tensor<float> new_const_accessor(new_const_val);
index 2306035..1e75f81 100644 (file)
@@ -200,7 +200,7 @@ TensorVariant createTensorVariant(const Shape& shape) {
   float* data_ptr = data.get();
   for (int32_t i = 0; i < num_elems; ++i)
     data_ptr[i] = i;
-  return TensorVariant(DTYPE::FLOAT32, shape, data_ptr);
+  return TensorVariant(DataType::FLOAT32, shape, data_ptr);
 }
 
 }
index e738c0b..f8bbd3b 100644 (file)
@@ -35,19 +35,19 @@ TEST(OptPass, fuseConvBiasScaleScaleBias) {
 
   // Create graph: 'input->conv->bias->scale->scale->bias'
   auto input = g.create<ops::InputOp>("input", Shape{1, 299, 299, 3});
-  auto conv_const = g.create<ops::ConstantOp>("", TensorVariant(DTYPE::FLOAT32, {10, 3, 3, 3}));
+  auto conv_const = g.create<ops::ConstantOp>("", TensorVariant(DataType::FLOAT32, {10, 3, 3, 3}));
   std::vector<int32_t> padding{0, 0};
   auto conv = g.create<ops::Conv2DOp>("conv", input->getOutput(0), conv_const->getOutput(0),
                                             Shape{1, 1}, padding, padding);
-  auto bias1_const = g.create<ops::ConstantOp>("", TensorVariant(DTYPE::FLOAT32, {10}));
+  auto bias1_const = g.create<ops::ConstantOp>("", TensorVariant(DataType::FLOAT32, {10}));
   auto bias1 = g.create<ops::BiasAddOp>("bias1", conv->getOutput(0), bias1_const->getOutput(0));
-  auto scale1_const = g.create<ops::ConstantOp>("", TensorVariant(DTYPE::FLOAT32, {10}));
+  auto scale1_const = g.create<ops::ConstantOp>("", TensorVariant(DataType::FLOAT32, {10}));
   auto scale1 = g.create<ops::ScaleOp>("scale1", bias1->getOutput(0), scale1_const->getOutput(0));
-  auto scale2_const = g.create<ops::ConstantOp>("", TensorVariant(DTYPE::FLOAT32, {10}));
+  auto scale2_const = g.create<ops::ConstantOp>("", TensorVariant(DataType::FLOAT32, {10}));
   auto scale2 = g.create<ops::ScaleOp>("scale2", scale1->getOutput(0), scale2_const->getOutput(0));
-  auto scale3_const = g.create<ops::ConstantOp>("", TensorVariant(DTYPE::FLOAT32, {10}));
+  auto scale3_const = g.create<ops::ConstantOp>("", TensorVariant(DataType::FLOAT32, {10}));
   auto scale3 = g.create<ops::ScaleOp>("scale3", scale2->getOutput(0), scale3_const->getOutput(0));
-  auto bias2_const = g.create<ops::ConstantOp>("", TensorVariant(DTYPE::FLOAT32, {10}));
+  auto bias2_const = g.create<ops::ConstantOp>("", TensorVariant(DataType::FLOAT32, {10}));
   g.create<ops::BiasAddOp>("", scale3->getOutput(0), bias2_const->getOutput(0));
 
   // Check that layout is desired
index 789b803..9d0f440 100644 (file)
@@ -35,10 +35,10 @@ TEST(OptPass, removeDeadEndConstants) {
    *        ||
    *      [relu]
    */
-  Operation* C0 = g.create<ops::ConstantOp>("C0", TensorVariant(DTYPE::FLOAT32, {2, 2}));
+  Operation* C0 = g.create<ops::ConstantOp>("C0", TensorVariant(DataType::FLOAT32, {2, 2}));
   Operation* input = g.create<ops::InputOp>("input", Shape{1, 2, 3});
-  Operation* C1 = g.create<ops::ConstantOp>("C1", TensorVariant(DTYPE::FLOAT32, {2, 2}));
-  Operation* C2 = g.create<ops::ConstantOp>("C2", TensorVariant(DTYPE::FLOAT32, {2, 2}));
+  Operation* C1 = g.create<ops::ConstantOp>("C1", TensorVariant(DataType::FLOAT32, {2, 2}));
+  Operation* C2 = g.create<ops::ConstantOp>("C2", TensorVariant(DataType::FLOAT32, {2, 2}));
   Operation* relu = g.create<ops::ReluOp>("relu", input->getOutput(0));
 
   std::stringstream ss;
index ffc06e7..61fe5b9 100644 (file)
@@ -207,7 +207,7 @@ void fillTensors(unique_ptr<mir::TensorVariant> &ntensor,
   mir::Shape nshape;
   fillShapes(nshape, ashape, shape);
   atensor.reshape(ashape);
-  ntensor.reset(new mir::TensorVariant(mir::DTYPE::FLOAT32, nshape));
+  ntensor.reset(new mir::TensorVariant(mir::DataType::FLOAT32, nshape));
   fillNTensor(*ntensor, start);
   copyATensorFromNTensor(atensor, *ntensor);
 }
@@ -268,7 +268,7 @@ bool areFloatsNear(float a, float b, int32_t ulp, float eps) {
  */
 void compareResults(const mir::TensorVariant &ref_nnc_tensor, const Tensor &test_art_tensor) {
   assert(ref_nnc_tensor.getElementSize() == 4L &&
-         ref_nnc_tensor.getDataType() == mir::DTYPE::FLOAT32);
+         ref_nnc_tensor.getDataType() == mir::DataType::FLOAT32);
 
   const mir::Shape &nnc_shape = ref_nnc_tensor.getShape();
   const Shape &art_shape = test_art_tensor.getShape();
@@ -969,7 +969,7 @@ TEST(cpp_operations_test, pad) {
   float const_value = 0.0;
 
   mir::Scalar constant_value(reinterpret_cast<char*>(&const_value),
-                             mir::DTYPE::FLOAT32, sizeof(float));
+                             mir::DataType::FLOAT32, sizeof(float));
 
   auto op_generator = [num_dims, &paddings, &constant_value]
       (mir::Graph& g, const std::vector<mir::Operation::Output*>& inputs) {