[nnpkg-run] generate random input for qint8 type (#9033)
author이상규/On-Device Lab(SR)/Principal Engineer/삼성전자 <sg5.lee@samsung.com>
Fri, 29 Nov 2019 03:57:21 +0000 (12:57 +0900)
committer이춘석/On-Device Lab(SR)/Staff Engineer/삼성전자 <chunseok.lee@samsung.com>
Fri, 29 Nov 2019 03:57:21 +0000 (12:57 +0900)
nnpackage_run can generate random input for qint8 type input tensor.
Note that load and dump needs more work.

Signed-off-by: Sanggyu Lee <sg5.lee@samsung.com>
tests/tools/nnpackage_run/src/nnpackage_run.cc

index 6952c51..3745250 100644 (file)
@@ -53,14 +53,34 @@ uint64_t num_elems(const nnfw_tensorinfo *ti)
   return n;
 }
 
-template <class T> std::vector<T> randomData(RandomGenerator &randgen, uint64_t size)
+uint64_t bufsize_for(const nnfw_tensorinfo *ti)
+{
+  static int elmsize[] = {
+      sizeof(float), /* NNFW_TYPE_TENSOR_FLOAT32 */
+      sizeof(int),   /* NNFW_TYPE_TENSOR_INT32 */
+      sizeof(char),  /* NNFW_TYPE_TENSOR_QUANT8_ASYMM */
+      sizeof(bool),  /* NNFW_TYPE_TENSOR_BOOL = 3 */
+  };
+  return elmsize[ti->dtype] * num_elems(ti);
+}
+
+template <class T> void randomData(RandomGenerator &randgen, void *data, uint64_t size)
 {
-  std::vector<T> vec(size);
   for (uint64_t i = 0; i < size; i++)
-    vec[i] = randgen.generate<T>();
-  return vec;
+    reinterpret_cast<T *>(data)[i] = randgen.generate<T>();
 }
 
+class Allocation
+{
+public:
+  Allocation() : data_(nullptr) {}
+  ~Allocation() { free(data_); }
+  void *data() const { return data_; }
+  void *alloc(uint64_t sz) { return data_ = malloc(sz); }
+private:
+  void *data_;
+};
+
 } // unnamed namespace
 
 // TODO Replace this with nnfw::misc::benchmark::Accumulator
@@ -147,9 +167,9 @@ int main(const int argc, char **argv)
     {
       nnfw_tensorinfo ti;
       NNPR_ENSURE_STATUS(nnfw_input_tensorinfo(session, i, &ti));
-      if (ti.dtype != NNFW_TYPE_TENSOR_FLOAT32)
+      if (ti.dtype != NNFW_TYPE_TENSOR_FLOAT32 && ti.dtype != NNFW_TYPE_TENSOR_QUANT8_ASYMM)
       {
-        std::cerr << "Only float 32bit is supported." << std::endl;
+        std::cerr << "Only FLOAT32 and QUANT8_ASYMM are supported." << std::endl;
         exit(-1);
       }
     }
@@ -163,9 +183,9 @@ int main(const int argc, char **argv)
     {
       nnfw_tensorinfo ti;
       NNPR_ENSURE_STATUS(nnfw_output_tensorinfo(session, i, &ti));
-      if (ti.dtype != NNFW_TYPE_TENSOR_FLOAT32)
+      if (ti.dtype != NNFW_TYPE_TENSOR_FLOAT32 && ti.dtype != NNFW_TYPE_TENSOR_QUANT8_ASYMM)
       {
-        std::cerr << "Only float 32bit is supported." << std::endl;
+        std::cerr << "Only FLOAT32 and QUANT8_ASYMM are supported." << std::endl;
         exit(-1);
       }
     }
@@ -185,7 +205,7 @@ int main(const int argc, char **argv)
 
   // prepare input
 
-  std::vector<std::vector<float>> inputs(num_inputs);
+  std::vector<Allocation> inputs(num_inputs);
 
   auto loadInputs = [session, num_inputs, &inputs](const std::string &filename) {
     try
@@ -212,7 +232,7 @@ int main(const int argc, char **argv)
 
         // allocate memory for data
         auto sz = num_elems(&ti);
-        inputs[i].resize(sz);
+        inputs[i].alloc(sz * sizeof(float));
         // read data
         data_set.read(inputs[i].data(), H5::PredType::NATIVE_FLOAT);
 
@@ -241,10 +261,22 @@ int main(const int argc, char **argv)
     {
       nnfw_tensorinfo ti;
       NNPR_ENSURE_STATUS(nnfw_input_tensorinfo(session, i, &ti));
-      auto input_num_elements = num_elems(&ti);
-      inputs[i] = randomData<float>(randgen, input_num_elements);
-      NNPR_ENSURE_STATUS(nnfw_set_input(session, i, NNFW_TYPE_TENSOR_FLOAT32, inputs[i].data(),
-                                        sizeof(float) * input_num_elements));
+      auto input_size_in_bytes = bufsize_for(&ti);
+      inputs[i].alloc(input_size_in_bytes);
+      switch (ti.dtype)
+      {
+        case NNFW_TYPE_TENSOR_FLOAT32:
+          randomData<float>(randgen, inputs[i].data(), num_elems(&ti));
+          break;
+        case NNFW_TYPE_TENSOR_QUANT8_ASYMM:
+          randomData<char>(randgen, inputs[i].data(), num_elems(&ti));
+          break;
+        default:
+          std::cerr << "Not supported input type" << std::endl;
+          std::exit(-1);
+      }
+      NNPR_ENSURE_STATUS(
+          nnfw_set_input(session, i, ti.dtype, inputs[i].data(), input_size_in_bytes));
       NNPR_ENSURE_STATUS(nnfw_set_input_layout(session, i, NNFW_LAYOUT_CHANNELS_LAST));
     }
   };
@@ -258,16 +290,16 @@ int main(const int argc, char **argv)
 
   uint32_t num_outputs = 0;
   NNPR_ENSURE_STATUS(nnfw_output_size(session, &num_outputs));
-  std::vector<std::vector<float>> outputs(num_outputs);
+  std::vector<Allocation> outputs(num_outputs);
 
   for (uint32_t i = 0; i < num_outputs; i++)
   {
     nnfw_tensorinfo ti;
     NNPR_ENSURE_STATUS(nnfw_output_tensorinfo(session, i, &ti));
-    auto output_num_elements = num_elems(&ti);
-    outputs[i].resize(output_num_elements);
-    NNPR_ENSURE_STATUS(nnfw_set_output(session, i, NNFW_TYPE_TENSOR_FLOAT32, outputs[i].data(),
-                                       sizeof(float) * output_num_elements));
+    auto output_size_in_bytes = bufsize_for(&ti);
+    outputs[i].alloc(output_size_in_bytes);
+    NNPR_ENSURE_STATUS(
+        nnfw_set_output(session, i, ti.dtype, outputs[i].data(), output_size_in_bytes));
     NNPR_ENSURE_STATUS(nnfw_set_output_layout(session, i, NNFW_LAYOUT_CHANNELS_LAST));
   }