nnpackge_run grows bigger and bigger.
This patch factor out the feature implementation into separate files.
Signed-off-by: Sanggyu Lee <sg5.lee@samsung.com>
list(APPEND NNPACKAGE_RUN_SRCS "src/nnpackage_run.cc")
list(APPEND NNPACKAGE_RUN_SRCS "src/args.cc")
+list(APPEND NNPACKAGE_RUN_SRCS "src/h5formatter.cc")
+list(APPEND NNPACKAGE_RUN_SRCS "src/nnfw_util.cc")
nnas_find_package(Boost REQUIRED)
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NNPACKAGE_RUN_ALLOCATION_H__
+#define __NNPACKAGE_RUN_ALLOCATION_H__
+
+#include <cstdlib>
+#include <cstdint>
+
+namespace NNPackageRun
+{
+class Allocation
+{
+public:
+ Allocation() : data_(nullptr) {}
+ ~Allocation() { free(data_); }
+ void *data() const { return data_; }
+ void *alloc(uint64_t sz) { return data_ = malloc(sz); }
+private:
+ void *data_;
+};
+} // end of namespace
+
+#endif // __NNPACKAGE_RUN_ALLOCATION_H__
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "h5formatter.h"
+#include "nnfw.h"
+#include "nnfw_util.h"
+
+#include <cassert>
+#include <iostream>
+#include <stdexcept>
+#include <cstdlib>
+#include <H5Cpp.h>
+
+namespace NNPackageRun
+{
+static const char *h5_value_grpname = "value";
+
+void H5Formatter::loadInputs(const std::string &filename, std::vector<Allocation> &inputs)
+{
+ uint32_t num_inputs;
+ NNPR_ENSURE_STATUS(nnfw_input_size(session, &num_inputs));
+ try
+ {
+ // Turn off the automatic error printing.
+ H5::Exception::dontPrint();
+
+ H5::H5File file(filename, H5F_ACC_RDONLY);
+ H5::Group value_group = file.openGroup(h5_value_grpname);
+ for (uint32_t i = 0; i < num_inputs; ++i)
+ {
+ nnfw_tensorinfo ti;
+ NNPR_ENSURE_STATUS(nnfw_input_tensorinfo(session, i, &ti));
+
+ H5::DataSet data_set = value_group.openDataSet(std::to_string(i));
+
+ // check type
+ H5::DataType type = data_set.getDataType();
+ if (!(type == H5::PredType::IEEE_F32BE || type == H5::PredType::IEEE_F32LE))
+ {
+ throw std::runtime_error("h5 input has non-float32 type. nnpkg_run supports float32 only.");
+ }
+
+ // allocate memory for data
+ auto sz = num_elems(&ti);
+ inputs[i].alloc(sz * sizeof(float));
+ // read data
+ data_set.read(inputs[i].data(), H5::PredType::NATIVE_FLOAT);
+
+ NNPR_ENSURE_STATUS(nnfw_set_input(session, i, NNFW_TYPE_TENSOR_FLOAT32, inputs[i].data(),
+ sizeof(float) * num_elems(&ti)));
+ NNPR_ENSURE_STATUS(nnfw_set_input_layout(session, i, NNFW_LAYOUT_CHANNELS_LAST));
+ }
+ }
+ catch (const H5::Exception &e)
+ {
+ H5::Exception::printErrorStack();
+ std::exit(-1);
+ }
+ catch (const std::exception &e)
+ {
+ std::cerr << e.what() << std::endl;
+ std::exit(-1);
+ }
+};
+
+void H5Formatter::dumpOutputs(const std::string &filename, std::vector<Allocation> &outputs)
+{
+ uint32_t num_outputs;
+ NNPR_ENSURE_STATUS(nnfw_output_size(session, &num_outputs));
+ try
+ {
+ // Turn off the automatic error printing.
+ H5::Exception::dontPrint();
+
+ H5::H5File file(filename, H5F_ACC_TRUNC);
+ H5::Group value_group = file.createGroup(h5_value_grpname);
+ for (uint32_t i = 0; i < num_outputs; i++)
+ {
+ nnfw_tensorinfo ti;
+ NNPR_ENSURE_STATUS(nnfw_output_tensorinfo(session, i, &ti));
+ std::vector<hsize_t> dims(ti.rank);
+ for (uint32_t j = 0; j < ti.rank; ++j)
+ {
+ if (ti.dims[j] >= 0)
+ dims[j] = static_cast<hsize_t>(ti.dims[j]);
+ else
+ {
+ std::cerr << "Negative dimension in output tensor" << std::endl;
+ exit(-1);
+ }
+ }
+ H5::DataSpace data_space(ti.rank, dims.data());
+ H5::DataSet data_set =
+ value_group.createDataSet(std::to_string(i), H5::PredType::IEEE_F32BE, data_space);
+ data_set.write(outputs[i].data(), H5::PredType::NATIVE_FLOAT);
+ }
+ }
+ catch (const H5::Exception &e)
+ {
+ H5::Exception::printErrorStack();
+ std::exit(-1);
+ }
+};
+
+} // end of namespace NNPackageRun
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NNPACKAGE_RUN_H5FORMATTER_H__
+#define __NNPACKAGE_RUN_H5FORMATTER_H__
+
+#include <string>
+#include <vector>
+
+#include "allocation.h"
+
+struct nnfw_session;
+
+namespace NNPackageRun
+{
+class H5Formatter
+{
+public:
+ H5Formatter(nnfw_session *sess) : session(sess) {}
+ void loadInputs(const std::string &filename, std::vector<Allocation> &inputs);
+ void dumpOutputs(const std::string &filename, std::vector<Allocation> &outputs);
+
+private:
+ nnfw_session *session;
+};
+} // end of namespace
+
+#endif // __NNPACKAGE_RUN_H5FORMATTER_H__
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cassert>
+#include <string>
+#include "nnfw.h"
+
+namespace NNPackageRun
+{
+uint64_t num_elems(const nnfw_tensorinfo *ti)
+{
+ uint64_t n = 1;
+ for (uint32_t i = 0; i < ti->rank; ++i)
+ {
+ assert(ti->dims[i] >= 0);
+ n *= ti->dims[i];
+ }
+ return n;
+}
+
+uint64_t bufsize_for(const nnfw_tensorinfo *ti)
+{
+ static int elmsize[] = {
+ sizeof(float), /* NNFW_TYPE_TENSOR_FLOAT32 */
+ sizeof(int), /* NNFW_TYPE_TENSOR_INT32 */
+ sizeof(char), /* NNFW_TYPE_TENSOR_QUANT8_ASYMM */
+ sizeof(bool), /* NNFW_TYPE_TENSOR_BOOL = 3 */
+ };
+ return elmsize[ti->dtype] * num_elems(ti);
+}
+
+} // end of namespace
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NNPACKAGE_RUN_NNFW_UTIL_H__
+#define __NNPACKAGE_RUN_NNFW_UTIL_H__
+
+#include "nnfw.h"
+
+#define NNPR_ENSURE_STATUS(a) \
+ do \
+ { \
+ if ((a) != NNFW_STATUS_NO_ERROR) \
+ { \
+ exit(-1); \
+ } \
+ } while (0)
+
+namespace NNPackageRun
+{
+uint64_t num_elems(const nnfw_tensorinfo *ti);
+uint64_t bufsize_for(const nnfw_tensorinfo *ti);
+} // end of namespace nnpkg_run
+
+#endif // __NNPACKAGE_UTIL_H__
* limitations under the License.
*/
+#include "allocation.h"
#include "args.h"
+#include "benchmark.h"
+#include "h5formatter.h"
#include "tflite/Diff.h"
#include "nnfw.h"
-#include "benchmark.h"
-
-#include <H5Cpp.h>
+#include "nnfw_util.h"
#include <cassert>
#include <chrono>
#include <boost/filesystem.hpp>
-#define NNPR_ENSURE_STATUS(a) \
- do \
- { \
- if ((a) != NNFW_STATUS_NO_ERROR) \
- { \
- exit(-1); \
- } \
- } while (0)
-
-namespace
-{
-
-uint64_t num_elems(const nnfw_tensorinfo *ti)
+namespace NNPackageRun
{
- uint64_t n = 1;
- for (uint32_t i = 0; i < ti->rank; ++i)
- {
- assert(ti->dims[i] >= 0);
- n *= ti->dims[i];
- }
- return n;
-}
-
-uint64_t bufsize_for(const nnfw_tensorinfo *ti)
-{
- static int elmsize[] = {
- sizeof(float), /* NNFW_TYPE_TENSOR_FLOAT32 */
- sizeof(int), /* NNFW_TYPE_TENSOR_INT32 */
- sizeof(char), /* NNFW_TYPE_TENSOR_QUANT8_ASYMM */
- sizeof(bool), /* NNFW_TYPE_TENSOR_BOOL = 3 */
- };
- return elmsize[ti->dtype] * num_elems(ti);
-}
template <class T> void randomData(RandomGenerator &randgen, void *data, uint64_t size)
{
for (uint64_t i = 0; i < size; i++)
reinterpret_cast<T *>(data)[i] = randgen.generate<T>();
}
-
-class Allocation
-{
-public:
- Allocation() : data_(nullptr) {}
- ~Allocation() { free(data_); }
- void *data() const { return data_; }
- void *alloc(uint64_t sz) { return data_ = malloc(sz); }
-private:
- void *data_;
-};
-
-} // unnamed namespace
+}
// TODO Replace this with nnfw::misc::benchmark::Accumulator
namespace benchmark
} // namespace benchmark
-static const char *h5_value_grpname = "value";
static const char *default_backend_cand = "acl_cl";
NNFW_STATUS resolve_op_backend(nnfw_session *session)
int main(const int argc, char **argv)
{
- NNPackageRun::Args args(argc, argv);
+ using namespace NNPackageRun;
+ Args args(argc, argv);
auto nnpackage_path = args.getPackageFilename();
std::unique_ptr<benchmark::MemoryPoller> mp{nullptr};
mp_results[0] = mp->End("Compiling");
// prepare input
-
std::vector<Allocation> inputs(num_inputs);
- auto loadInputs = [session, num_inputs, &inputs](const std::string &filename) {
- try
- {
- // Turn off the automatic error printing.
- H5::Exception::dontPrint();
-
- H5::H5File file(filename, H5F_ACC_RDONLY);
- H5::Group value_group = file.openGroup(h5_value_grpname);
- for (uint32_t i = 0; i < num_inputs; ++i)
- {
- nnfw_tensorinfo ti;
- NNPR_ENSURE_STATUS(nnfw_input_tensorinfo(session, i, &ti));
-
- H5::DataSet data_set = value_group.openDataSet(std::to_string(i));
-
- // check type
- H5::DataType type = data_set.getDataType();
- if (!(type == H5::PredType::IEEE_F32BE || type == H5::PredType::IEEE_F32LE))
- {
- throw std::runtime_error(
- "h5 input has non-float32 type. nnpkg_run supports float32 only.");
- }
-
- // allocate memory for data
- auto sz = num_elems(&ti);
- inputs[i].alloc(sz * sizeof(float));
- // read data
- data_set.read(inputs[i].data(), H5::PredType::NATIVE_FLOAT);
-
- NNPR_ENSURE_STATUS(nnfw_set_input(session, i, NNFW_TYPE_TENSOR_FLOAT32, inputs[i].data(),
- sizeof(float) * num_elems(&ti)));
- NNPR_ENSURE_STATUS(nnfw_set_input_layout(session, i, NNFW_LAYOUT_CHANNELS_LAST));
- }
- }
- catch (const H5::Exception &e)
- {
- H5::Exception::printErrorStack();
- std::exit(-1);
- }
- catch (const std::exception &e)
- {
- std::cerr << e.what() << std::endl;
- std::exit(-1);
- }
- };
-
auto generateInputs = [session, num_inputs, &inputs]() {
// generate random data
const int seed = 1;
NNPR_ENSURE_STATUS(nnfw_set_input_layout(session, i, NNFW_LAYOUT_CHANNELS_LAST));
}
};
-
if (!args.getLoadFilename().empty())
- loadInputs(args.getLoadFilename());
+ H5Formatter(session).loadInputs(args.getLoadFilename(), inputs);
else
generateInputs();
}
// dump output tensors
-
- auto dumpOutputs = [session, num_outputs, &outputs](const std::string &filename) {
- try
- {
- // Turn off the automatic error printing.
- H5::Exception::dontPrint();
-
- H5::H5File file(filename, H5F_ACC_TRUNC);
- H5::Group value_group = file.createGroup(h5_value_grpname);
- for (uint32_t i = 0; i < num_outputs; i++)
- {
- nnfw_tensorinfo ti;
- NNPR_ENSURE_STATUS(nnfw_output_tensorinfo(session, i, &ti));
- std::vector<hsize_t> dims(ti.rank);
- for (uint32_t j = 0; j < ti.rank; ++j)
- {
- assert(ti.dims[j] >= 0);
- dims[j] = ti.dims[j];
- }
- H5::DataSpace data_space(ti.rank, dims.data());
- H5::DataSet data_set =
- value_group.createDataSet(std::to_string(i), H5::PredType::IEEE_F32BE, data_space);
- data_set.write(outputs[i].data(), H5::PredType::NATIVE_FLOAT);
- }
- }
- catch (const H5::Exception &e)
- {
- H5::Exception::printErrorStack();
- std::exit(-1);
- }
- };
-
if (!args.getDumpFilename().empty())
- dumpOutputs(args.getDumpFilename());
+ H5Formatter(session).dumpOutputs(args.getDumpFilename(), outputs);
NNPR_ENSURE_STATUS(nnfw_close_session(session));