set(NNC_HDF5_SUPPORTED OFF)
endif()
+# We need protobuf and pytorch sources to generate caffe2.pb.h and caffe2.pb.cc
+nncc_find_package(PytorchSource QUIET)
+nncc_find_package(Protobuf QUIET)
+if (Protobuf_FOUND AND PytorchSource_FOUND)
+ set(NNC_FRONTEND_CAFFE2_ENABLED ON)
+else()
+ set(NNC_FRONTEND_CAFFE2_ENABLED OFF)
+endif()
+
# Try to get compiled caffe proto and return if not successful
# Note: this creates a target called "caffeproto" that contains compiled caffe.proto sources,
# and after linking with it caffe.pb.h will be available as "caffe/proto/caffe.pb.h"
set(NNC_SOFT_BACKEND_DIR ${CMAKE_CURRENT_SOURCE_DIR}/passes/soft_backend)
set(NNC_INTERPRETER_DIR ${CMAKE_CURRENT_SOURCE_DIR}/passes/interpreter)
set(NNC_CAFFE_FRONTEND_DIR ${CMAKE_CURRENT_SOURCE_DIR}/passes/caffe_frontend)
+set(NNC_CAFFE2_FRONTEND_DIR ${CMAKE_CURRENT_SOURCE_DIR}/passes/caffe2_frontend)
set(NNC_TFLITE_FRONTEND_DIR ${CMAKE_CURRENT_SOURCE_DIR}/passes/tflite_frontend)
set(NNC_CORE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/core)
set(NNC_SUPPORT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/support)
#include "pass/PassData.h"
#include "passes/caffe_frontend/caffe_importer.h"
+#include "passes/caffe2_frontend/caffe2_importer.h"
#include "passes/tflite_frontend/tflite_importer.h"
#include "passes/interpreter/InterpreterPass.h"
#include "passes/soft_backend/CPPGenerator.h"
} // runPasses
+static std::string getFrontendOptionsString() {
+ std::string res;
+
+#ifdef NNC_FRONTEND_CAFFE_ENABLED
+ res += " '" + cli::caffeFrontend.getNames()[0] + "' ";
+#endif // NNC_FRONTEND_CAFFE_ENABLED
+
+#ifdef NNC_FRONTEND_CAFFE2_ENABLED
+ res += " '" + cli::caffe2Frontend.getNames()[0] + "' ";
+#endif // NNC_FRONTEND_CAFFE2_ENABLED
+
+#ifdef NNC_FRONTEND_ONNX_ENABLED
+ res += " '" + cli::ONNXFrontend.getNames()[0] + "' ";
+#endif // NNC_FRONTEND_ONNX_ENABLED
+
+#ifdef NNC_FRONTEND_TFLITE_ENABLED
+ res += " '" + cli::tflFrontend.getNames()[0] + "' ";
+#endif // NNC_FRONTEND_TFLITE_ENABLED
+
+ return res;
+}
+
/**
* @brief Register frontend pass
* @throw DriverException if errors occurred
std::unique_ptr<Pass> pass;
- if (cli::caffeFrontend.isDisabled() && cli::tflFrontend.isDisabled()) {
- throw DriverException("frontends are not available");
- }
-
- if (cli::caffeFrontend && cli::tflFrontend) {
- throw DriverException("only one of the following options are allowed"
- " to be set in the same time: '"
- + cli::caffeFrontend.getNames()[0] + "', '"
- + cli::tflFrontend.getNames()[0] + "'");
- }
+ // For bool, the value false is converted to zero and the value true is converted to one
+ if (cli::caffeFrontend + cli::caffe2Frontend + cli::tflFrontend + cli::onnxFrontend != 1)
+ throw DriverException("One and only one of the following options are allowed and have to be set"
+ " to be set in the same time: " + getFrontendOptionsString());
if (cli::caffeFrontend) {
#ifdef NNC_FRONTEND_CAFFE_ENABLED
- pass = std::move(std::unique_ptr<Pass>(new CaffeImporter(cli::inputFiles[0])));
+ pass = std::move(std::unique_ptr<Pass>(new CaffeImporter(cli::inputFile)));
#endif // NNC_FRONTEND_CAFFE_ENABLED
- }
- else if ( cli::onnxFrontend )
- {
+ } else if (cli::caffe2Frontend) {
+#ifdef NNC_FRONTEND_CAFFE2_ENABLED
+ // FIXME: caffe2 input shapes are not provided by model and must be set from cli
+ // current 'inputShapes' could provide only one shape, while model could has several inputs
+ pass = std::move(std::unique_ptr<Pass>(new Caffe2Importer(cli::inputFile, cli::initNet,
+ {cli::inputShapes})));
+#endif // NNC_FRONTEND_CAFFE2_ENABLED
+ } else if ( cli::onnxFrontend ) {
#ifdef NNC_FRONTEND_ONNX_ENABLED
pass = std::move(std::unique_ptr<Pass>(new ONNXImporter()));
#endif // NNC_FRONTEND_ONNX_ENABLED
}
- else if ( cli::tflFrontend )
- {
+ else if ( cli::tflFrontend ) {
#ifdef NNC_FRONTEND_TFLITE_ENABLED
- pass = std::move(std::unique_ptr<Pass>(new TfliteImporter(cli::inputFiles[0])));
+ pass = std::move(std::unique_ptr<Pass>(new TfliteImporter(cli::inputFile)));
#endif // NNC_FRONTEND_TFLITE_ENABLED
} else {
- throw DriverException("one of the following options must be defined: '"
- + cli::caffeFrontend.getNames()[0] + "', '"
- + cli::tflFrontend.getNames()[0] + "'");
+ throw DriverException("One of the following options must be defined: '"
+ + getFrontendOptionsString());
}
_passManager.registerPass(std::move(pass));
showopt(false)
#endif // NNC_FRONTEND_ONNX_ENABLED
);
+
+Option<bool> caffe2Frontend(optname("--caffe2"),
+ overview("treat input file as Caffe2 model (predict_net.pb)"),
+ false,
+ optional(true),
+ optvalues(""),
+ nullptr,
+ separators(""),
+#ifdef NNC_FRONTEND_CAFFE2_ENABLED
+ showopt(true),
+#else
+ showopt(false),
+#endif // NNC_FRONTEND_CAFFE2_ENABLED
+ IOption::Group::caffe2
+ );
+
+Option<std::vector<int>> inputShapes(optname("--input-shape"),
+ overview("Shape of caffe2 input"),
+ std::vector<int>{},
+ optional(false),
+ optvalues(""),
+ nullptr,
+ separators(""),
+#ifdef NNC_FRONTEND_CAFFE2_ENABLED
+ showopt(true),
+#else
+ showopt(false),
+#endif // NNC_FRONTEND_CAFFE2_ENABLED
+ IOption::Group::caffe2
+ );
+
+Option<std::string> initNet(optname("--init-net"),
+ overview("path to Caffe2 model weights (init_net.pb)"),
+ std::string(),
+ optional(false),
+ optvalues(""),
+ nullptr,
+ separators(""),
+#ifdef NNC_FRONTEND_CAFFE2_ENABLED
+ showopt(true),
+#else
+ showopt(false),
+#endif // NNC_FRONTEND_CAFFE2_ENABLED
+ IOption::Group::caffe2
+ );
+
Option<bool> tflFrontend(optname("--tflite"),
overview("treat input file as Tensor Flow Lite model"),
false,
/**
* Options for *frontend*
*/
-Option<std::vector<std::string>> inputFiles(optname("--nnmodel, -m"),
- overview("specify input files with serialized NN models: "
- "single model file must be provided for caffe, tflite and onnx frameworks; "
- "two model files must be specified for caffe2 framework (init_net and predict_net)"),
- std::vector<std::string>{},
- optional(false),
- optvalues(""),
- checkModelFiles);
+Option<std::string> inputFile(optname("--nnmodel, -m"),
+ overview("specify input file with serialized NN models"),
+ std::string(),
+ optional(false),
+ optvalues(""),
+ checkInFile);
/**
* Options for *backend*
#cmakedefine NNC_FRONTEND_CAFFE_ENABLED
/**
+ * @brief define that CAFFE2 frontend is enabled
+ */
+#cmakedefine NNC_FRONTEND_CAFFE2_ENABLED
+
+/**
* @brief define that ONNX frontend is enabled
*/
#cmakedefine NNC_FRONTEND_ONNX_ENABLED
/**
* Options for compiler driver
*/
+extern Option<bool> caffe2Frontend; // frontend for CAFFE2 AI framework
+extern Option<std::vector<int>> inputShapes;
+extern Option<std::string> initNet;
+
extern Option<bool> caffeFrontend; // frontend for CAFFE AI framework
extern Option<bool> tflFrontend; // frontend for TensorFlow Lite AI framework
-extern Option<bool> onnxFrontend; // frontend for ONNX AI framework
+extern Option<bool> onnxFrontend; // frontend for ONNX AI framework
// valid values for target option
#define NNC_TARGET_X86_CPP "x86-c++"
/**
* Frontend options
*/
-extern Option<std::vector<std::string>> inputFiles; // files contains model of specific AI framework
+extern Option<std::string> inputFile; // files contains model of specific AI framework
/**
* Options for backend
--- /dev/null
+
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef NNCC_CAFFE2_IMPORTER_H
+#define NNCC_CAFFE2_IMPORTER_H
+
+#include <set>
+#include <string>
+#include <memory>
+
+#include "passes/common_frontend/nn_importer.h"
+
+#include "pass/Pass.h"
+#include "pass/PassData.h"
+
+// Use forward declarations for non interface classes
+namespace caffe2 {
+class OperatorDef;
+class NetDef;
+}
+namespace nnc {
+// class Caffe2OpCreator;
+enum class SupportedCaffe2OpType : uint8_t;
+}
+
+namespace nnc {
+
+using MIRTensor = nnc::mir::TensorVariant;
+
+class Caffe2Importer : public NNImporter, public Pass {
+public:
+ explicit Caffe2Importer(std::string predictNet, std::string initNet,
+ std::vector<std::vector<int>> inputShapes);
+
+ /**
+ * @brief Import model from file, must be called before 'createIR' method
+ * @throw PassException in case, if model couldn't be parsed or NNC doesn't support it
+ */
+ void import() override;
+
+ /**
+ * @brief Create MIR graph from caffe model, must be called after 'import' method
+ * @return MIR graph, corresponding to processed caffe model
+ */
+ mir::Graph* createIR() override;
+
+ PassData run(PassData) override;
+ void cleanup() override;
+
+ ~Caffe2Importer();
+
+private:
+ std::string _predictNet;
+ std::string _initNet;
+ mir::Graph* _graph;
+ std::unique_ptr<::caffe2::NetDef> _net;
+ // std::unique_ptr<Caffe2OpCreator> _opCreator;
+ std::vector<mir::Shape> _inputShapes;
+
+ static const std::map<std::string, SupportedCaffe2OpType> _operatorTypes;
+ std::set<std::string> _problemsOpSet;
+
+ // This map maps caffe2 operators names to MIR operators
+ // that correspond to previous caffe2 operators
+ std::map<std::string, mir::IODescriptor> _blobNameToIODescriptor;
+ mir::Operation* _lastNode;
+
+ std::map<std::string, std::shared_ptr<MIRTensor>> _MIRTensors;
+
+ /**
+ * @brief Pass through caffe2 graph and collect ops unsupported by NNC
+ * @throw PassException with message, containing detected problems
+ */
+ // void collectUnsupportedOps();
+
+ /**
+ * @brief Collecting unsupported parts of caffe2 operator
+ */
+ // void collectUnsupportedOp(const ::caffe2::OperatorDef&);
+
+ /**
+ * @brief Creating MIR node from single caffe2 operator
+ */
+ // void createMIRNodesFromOp(const ::caffe2::OperatorDef&);
+
+ /**
+ * @brief Since caffe2 tensor values stored separately (in init_net) - preload them in _MIRTensors
+ */
+ // void preloadAllTensors();
+
+ /**
+ * @brief Creates MIR tensor from caffe2 givenTensorFill op
+ */
+ // std::shared_ptr<mir::TensorVariant> createTensor(const ::caffe2::OperatorDef&);
+
+ /**
+ * @brief Returns MIR ops, under given caffe2 op
+ */
+ // std::vector<mir::IODescriptor> getInputMIROps(const ::caffe2::OperatorDef&);
+
+ /**
+ * @brief create MIR inputs with given names and shapes
+ */
+ // void createGraphInputs(const std::vector<std::string>&, const std::vector<mir::Shape>&);
+
+ /**
+ * @brief Mark output MIR nodes
+ */
+ // void setGraphOutputs();
+
+ /**
+ * @brief Set MIR node names
+ */
+ // void setIrNodeNames();
+};
+
+} // namespace nnc
+
+#endif // NNCC_CAFFE2_IMPORTER_H
#include <string>
#include <cstdint>
-namespace nnc
-{
+namespace nnc {
// Class that can be used to memory map a file with NN model
-class ModelAllocation
-{
+class ModelAllocation {
public:
explicit ModelAllocation(std::string filename);
virtual ~ModelAllocation();
size_t getNumBytes();
private:
- enum MmapState
- {
- MAPPED,
- UNMAPPED
+ enum MmapState {
+ mapped,
+ unmapped
};
- MmapState mmapState = UNMAPPED;
- void *dataPnt = nullptr;
- size_t numBytes = 0;
+ MmapState _mmapState = unmapped;
+ void* _dataPnt = nullptr;
+ size_t _numBytes = 0;
- int fd = -1;
+ int _fd = -1;
};
} // namespace nnc
--- /dev/null
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef NNCC_PROTO_HELPER_H
+#define NNCC_PROTO_HELPER_H
+
+#include <iostream>
+#include <fcntl.h>
+#include <unistd.h>
+#include <memory>
+
+#include "google/protobuf/io/coded_stream.h"
+#include "google/protobuf/io/zero_copy_stream_impl.h"
+#include "google/protobuf/text_format.h"
+
+#include "passes/common_frontend/model_allocation.h"
+
+namespace nnc {
+
+const int protoBytesLimit = INT_MAX;
+const int protoBytesWarningLimit = 1024 * 1024 * 512;
+
+template <typename protoType>
+bool readProtoFromTextFile(const char* filename, protoType* proto) {
+ std::unique_ptr<ModelAllocation> protoMap(new ModelAllocation{filename});
+
+ google::protobuf::io::CodedInputStream coded_input(
+ (const google::protobuf::uint8*)protoMap->getDataPnt(), protoMap->getNumBytes());
+ coded_input.SetTotalBytesLimit(protoBytesLimit, protoBytesWarningLimit);
+
+ bool success = google::protobuf::TextFormat::Parse(&coded_input, proto);
+
+ return success;
+}
+
+template <typename protoType>
+bool readProtoFromBinaryFile(const char* filename, protoType* proto) {
+ std::unique_ptr<ModelAllocation> protoMap(new ModelAllocation{filename});
+
+ google::protobuf::io::CodedInputStream coded_input(
+ (const google::protobuf::uint8*)protoMap->getDataPnt(), protoMap->getNumBytes());
+ coded_input.SetTotalBytesLimit(protoBytesLimit, protoBytesWarningLimit);
+
+ bool success = proto->ParseFromCodedStream(&coded_input);
+
+ return success;
+}
+
+} // namespace nnc
+
+#endif // NNCC_PROTO_HELPER_H
{
none = 0,
caffe2 = 1,
- onnx = 2
+ onnx = 2 // 'onnx' is currently unused
};
/**
_checker = checker;
_is_enabled = enabled;
- assert((_is_enabled || _is_optional) && "disabled option can't be required");
+ assert((_is_enabled || _is_optional || group != IOption::Group::none) && "disabled non-group option can't be required");
_group = group;
- _can_have_several_vals = std::is_same<T, std::vector<std::string>>::value;
+ _can_have_several_vals = std::is_same<T, std::vector<std::string>>::value
+ || std::is_same<T, std::vector<int>>::value;
assert(!(_can_have_several_vals && !_seps.empty()) && "option with several values can't have separators");
// register new option for parser
// prototypes of option checker functions
//
void checkInFile(const Option<std::string> &);
-void checkModelFiles(const Option<std::vector<std::string>> &);
void checkOutFile(const Option<std::string> &);
void checkOutDir(const Option<std::string> &);
void checkDebugFile(const Option<std::string> &);
add_subdirectory(tflite_frontend)
add_subdirectory(caffe_frontend)
add_subdirectory(onnx_frontend)
+add_subdirectory(caffe2_frontend)
#
# BACKENDs
--- /dev/null
+if (NOT NNC_FRONTEND_CAFFE2_ENABLED)
+ return ()
+endif()
+
+###################
+# Caffe2 proto #
+###################
+
+# Compile caffe2.proto from pytroch sources
+# Produces CAFFE2_PROTO_SOURCES and CAFFE2_PROTO_INCLUDE_DIRS variables
+Protobuf_Generate(CAFFE2_PROTO "${CMAKE_CURRENT_BINARY_DIR}/generated/caffe2"
+ "${PytorchSource_DIR}" "caffe2/proto/caffe2.proto")
+
+add_library(caffe2proto STATIC ${CAFFE2_PROTO_SOURCES})
+set_target_properties(caffe2proto PROPERTIES POSITION_INDEPENDENT_CODE ON)
+target_include_directories(caffe2proto PUBLIC ${CAFFE2_PROTO_INCLUDE_DIRS})
+target_link_libraries(caffe2proto libprotobuf)
+
+###################
+# Caffe2 importer #
+###################
+
+file(GLOB caffe2_importer_sources *.cpp)
+
+add_nnc_library(caffe2_importer SHARED ${caffe2_importer_sources}
+ ${caffe2_importer_headers})
+
+target_link_libraries(caffe2_importer PUBLIC caffe2proto)
+target_link_libraries(caffe2_importer PUBLIC nn_import_common)
+target_link_libraries(caffe2_importer PRIVATE nnc_support)
+target_link_libraries(caffe2_importer PRIVATE nnc_core)
+
+# install caffe2 frontend library
+install_nnc_library(caffe2_importer)
--- /dev/null
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vector>
+#include <fstream>
+#include <sstream>
+#include <cassert>
+
+#include "passes/caffe2_frontend/caffe2_importer.h"
+#include "passes/common_frontend/shape_helper.h"
+#include "passes/common_frontend/proto_helper.h"
+
+#include "caffe2/proto/caffe2.pb.h"
+
+#include "caffe2_op_types.h"
+// #include "caffe2_op_creator.h"
+
+#include "core/modelIR/Shape.h"
+#include "core/modelIR/operations/VariableOp.h"
+#include "pass/PassException.h"
+
+#include "caffe2_proto_helper.h"
+
+namespace nnc {
+
+using namespace ::caffe2;
+using VariableOp = nnc::mir::ops::VariableOp;
+using nnc::mir::Shape;
+
+Caffe2Importer::Caffe2Importer(std::string predictNet, std::string initNet,
+ std::vector<std::vector<int>> shapes) :
+ _predictNet(std::move(predictNet)),
+ _initNet(std::move(initNet)),
+ _graph(new mir::Graph())/*,
+ _opCreator(new Caffe2OpCreator(_graph))*/ {
+ for(auto& shape : shapes)
+ _inputShapes.emplace_back(shape);
+}
+
+Caffe2Importer::~Caffe2Importer()=default;
+
+PassData Caffe2Importer::run(PassData) {
+ import();
+ return createIR();
+}
+
+void Caffe2Importer::cleanup() {
+ delete _graph;
+}
+
+void Caffe2Importer::import() {
+ GOOGLE_PROTOBUF_VERIFY_VERSION;
+
+ _net.reset(new NetDef());
+ if (!readProtoFromBinaryFile<::caffe2::NetDef>(_predictNet.c_str(), _net.get()))
+ throw PassException("Could not load model: " + _predictNet+ "\n");
+
+ std::unique_ptr<NetDef> net2;
+ net2.reset(new NetDef());
+ if (!readProtoFromBinaryFile<::caffe2::NetDef>(_initNet.c_str(), net2.get()))
+ throw PassException("Could not load model: " + _initNet+ "\n");
+ _net->MergeFrom(*net2);
+
+ // collectUnsupportedOps();
+
+ // preloadAllTensors();
+}
+
+mir::Graph* Caffe2Importer::createIR() {
+ throw PassException("Caffe2: NYI");
+ /*
+ for (auto& op : _net->op())
+ createMIRNodesFromOp(op);
+
+ setIrNodeNames();
+ setGraphOutputs();
+ */
+
+ return _graph;
+}
+
+const std::map<std::string, SupportedCaffe2OpType> Caffe2Importer::_operatorTypes = {
+ {"AveragePool", SupportedCaffe2OpType::averagePool},
+ {"Conv", SupportedCaffe2OpType::conv},
+ {"Dropout", SupportedCaffe2OpType::dropout},
+ {"FC", SupportedCaffe2OpType::FC},
+ {"GivenTensorFill", SupportedCaffe2OpType::givenTensorFill},
+ {"MaxPool", SupportedCaffe2OpType::maxPool},
+ {"Relu", SupportedCaffe2OpType::relu},
+ {"Softmax", SupportedCaffe2OpType::softmax},
+ {"Sum", SupportedCaffe2OpType::sum}
+};
+
+} // namespace nnc
--- /dev/null
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef NNCC_CAFFE2_OP_TYPES_H
+#define NNCC_CAFFE2_OP_TYPES_H
+
+namespace nnc {
+
+enum class SupportedCaffe2OpType : uint8_t {
+ averagePool,
+ conv,
+ dropout,
+ FC,
+ givenTensorFill,
+ maxPool,
+ relu,
+ softmax,
+ sum
+};
+
+} // namespace nnc
+
+#endif // NNCC_CAFFE2_OP_TYPES_H
* limitations under the License.
*/
-#ifndef NNCC_PROTO_READER_H
-#define NNCC_PROTO_READER_H
+#include "caffe2/proto/caffe2.pb.h"
-#include "caffe/proto/caffe.pb.h"
+#include "pass/PassException.h"
-#include "google/protobuf/io/coded_stream.h"
-#include "google/protobuf/io/zero_copy_stream_impl.h"
-#include "google/protobuf/text_format.h"
+#include "caffe2_proto_helper.h"
namespace nnc {
-using google::protobuf::io::FileInputStream;
-using google::protobuf::io::ZeroCopyInputStream;
-using google::protobuf::io::CodedInputStream;
+const ::caffe2::Argument& findArgumentByName(RepArgument args, std::string name) {
+ for (auto& arg : args)
+ if (arg.name() == name)
+ return arg;
+ throw PassException("Can't find argument with name: " + name);
+}
-bool readProtoFromTextFile(const char* filename, ::caffe::NetParameter* proto);
-
-bool readProtoFromBinaryFile(const char* filename, ::caffe::NetParameter* proto);
+const bool hasArgument(RepArgument args, std::string name) {
+ for (auto& arg : args)
+ if (arg.name() == name)
+ return true;
+ return false;
+}
} // namespace nnc
-
-#endif // NNCC_PROTO_READER_H
--- /dev/null
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef NNCC_CAFFE2_PROTO_HELPER_H
+#define NNCC_CAFFE2_PROTO_HELPER_H
+
+namespace nnc {
+
+using RepArgument = const ::google::protobuf::RepeatedPtrField<::caffe2::Argument>&;
+
+const ::caffe2::Argument& findArgumentByName(RepArgument args, std::string name);
+const bool hasArgument(RepArgument args, std::string name);
+
+} // namespace nnc
+
+#endif // NNCC_CAFFE2_PROTO_HELPER_H
#include "passes/caffe_frontend/caffe_importer.h"
#include "caffe_op_creator.h"
#include "caffe_op_types.h"
-#include "proto_reader.h"
#include "core/modelIR/Shape.h"
#include "core/modelIR/TensorUtil.h"
#include "pass/PassException.h"
#include "passes/common_frontend/shape_helper.h"
+#include "passes/common_frontend/proto_helper.h"
namespace nnc {
GOOGLE_PROTOBUF_VERIFY_VERSION;
_net.reset(new NetParameter());
- if (!readProtoFromBinaryFile(_modelFilename.c_str(), _net.get()))
+ if (!readProtoFromBinaryFile<::caffe::NetParameter>(_modelFilename.c_str(), _net.get()))
throw PassException("Could not load model: " + _modelFilename + "\n");
collectUnsupportedLayers();
+++ /dev/null
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <iostream>
-#include <fcntl.h>
-#include <unistd.h>
-#include <memory>
-
-#include "caffe/proto/caffe.pb.h"
-
-#include "proto_reader.h"
-
-namespace nnc {
-
-const int protoBytesLimit = INT_MAX;
-const int protoBytesWarningLimit = 1024 * 1024 * 512;
-
-bool readProtoFromTextFile(const char* filename, ::caffe::NetParameter* proto) {
- int fd = open(filename, O_RDONLY);
- if (fd == -1) {
- std::cout << "File not found: " << filename << std::endl;
- return false;
- }
-
- FileInputStream input{fd};
-
- bool success = google::protobuf::TextFormat::Parse(&input, proto);
-
- close(fd);
-
- return success;
-}
-
-bool readProtoFromBinaryFile(const char* filename, ::caffe::NetParameter* proto) {
- int fd = open(filename, O_RDONLY);
- if (fd == -1) {
- std::cout << "File not found: " << filename << std::endl;
- return false;
- }
-
- FileInputStream raw_input{fd};
- CodedInputStream coded_input{&raw_input};
- coded_input.SetTotalBytesLimit(protoBytesLimit, protoBytesWarningLimit);
-
- bool success = proto->ParseFromCodedStream(&coded_input);
-
- close(fd);
-
- return success;
-}
-
-} // namespace nnc
add_library(nn_import_common STATIC ${COMMON_SOURCES})
set_target_properties(nn_import_common PROPERTIES POSITION_INDEPENDENT_CODE ON)
-target_link_libraries(nn_import_common PRIVATE nnc_core nnc_support)
\ No newline at end of file
+target_link_libraries(nn_import_common PRIVATE nnc_core nnc_support)
#include "passes/common_frontend/model_allocation.h"
-namespace nnc
-{
+namespace nnc {
-ModelAllocation::ModelAllocation(std::string filename)
-{
+ModelAllocation::ModelAllocation(std::string filename) {
using stat = struct stat;
- fd = open(filename.c_str(), O_RDONLY);
+ _fd = open(filename.c_str(), O_RDONLY);
- if (fd == -1)
- {
+ if (_fd == -1)
return;
- }
stat st{};
- int flag = fstat(fd, &st);
+ int flag = fstat(_fd, &st);
if (flag == -1)
- {
return;
- }
- numBytes = st.st_size;
+ _numBytes = st.st_size;
- dataPnt = mmap(nullptr, numBytes, PROT_READ, MAP_SHARED, fd, 0);
+ _dataPnt = mmap(nullptr, _numBytes, PROT_READ, MAP_SHARED, _fd, 0);
- if (dataPnt != MAP_FAILED)
- {
- mmapState = MAPPED;
- }
+ if (_dataPnt != MAP_FAILED)
+ _mmapState = mapped;
}
-ModelAllocation::~ModelAllocation()
-{
- if (mmapState == MAPPED)
- {
- munmap(dataPnt, numBytes);
- mmapState = UNMAPPED;
+ModelAllocation::~ModelAllocation() {
+ if (_mmapState == mapped) {
+ munmap(_dataPnt, _numBytes);
+ _mmapState = unmapped;
}
- if (fd != -1)
- {
- close(fd);
- }
+ if (_fd != -1)
+ close(_fd);
}
-const void *ModelAllocation::getDataPnt() { return mmapState == MAPPED ? dataPnt : nullptr; }
+const void *ModelAllocation::getDataPnt() { return _mmapState == mapped ? _dataPnt : nullptr; }
-size_t ModelAllocation::getNumBytes() { return mmapState == MAPPED ? numBytes : 0; }
+size_t ModelAllocation::getNumBytes() { return _mmapState == mapped ? _numBytes : 0; }
} // namespace nnc
fclose(f);
} // checkInFile
-void checkModelFiles(const Option<std::vector<std::string>> &in_files) {
- if (in_files.empty())
- throw BadOption("Model file names should not be empty");
-
- if ((tflFrontend || caffeFrontend || onnxFrontend) && in_files.size() != 1)
- throw BadOption("For caffe, tflite and onnx frameworks single model file must be specified");
- // else if (cli::caffe2Frontend && in_files.size() != 2)
- // throw BadOption("For caffe2 framework two model files must be specified (init_net and predict_net)");
-
- for (auto& f_name : in_files) {
- auto f = fopen(f_name.c_str(), "rb");
- if (!f)
- throw BadOption("Cannot open file <" + f_name + ">");
- fclose(f);
- }
-} // checkModelFiles
-
void checkOutFile(const Option<std::string> &out_file) {
if ( out_file.empty() )
throw BadOption("Output file name should not be empty");
this->push_back(val);
}
+// vector of ints
+template <>
+void Option<std::vector<int>>::setValue(const std::string &val)
+{
+ if (!val.empty())
+ this->push_back(stoi(val));
+}
+
// bool
template <>
void Option<bool>::setValue(const std::string &val)
return 1;
cli::CommandLine::getParser()->parseCommandLine(argc, argv);
- std::string modelName = cli::inputFiles[0];
- nnc::CaffeImporter importer{modelName};
+ nnc::CaffeImporter importer{cli::inputFile};
importer.import();
importer.createIR();
}
catch (...) {
- std::cout << "Could not create IR for model \"" << modelName << "\"" << std::endl;
+ std::cout << "Could not create IR for model \"" << cli::inputFile << "\"" << std::endl;
return 1;
}
}
cli::CommandLine::getParser()->parseCommandLine(argc, argv);
- std::string modelName = cli::inputFiles[0];
-
- nnc::TfliteImporter importer{modelName};
+ nnc::TfliteImporter importer{cli::inputFile};
importer.import();
}
catch (...)
{
- std::cout << "Could not create IR for model \"" << modelName << "\"" << std::endl;
+ std::cout << "Could not create IR for model \"" << cli::inputFile << "\"" << std::endl;
return 1;
}
# dumpers of NN models
add_subdirectory(tflite_dot_dumper)
add_subdirectory(caffe_dot_dumper)
+add_subdirectory(caffe2_dot_dumper)
--- /dev/null
+if (NOT TARGET caffe2_importer)
+ return()
+endif()
+
+add_nncc_example_executable(caffe2_model_dumper ${OPTIONS_SRC} model_dump.cpp)
+nncc_target_link_libraries(caffe2_model_dumper nnc_support caffe2_importer)
+target_include_directories(caffe2_model_dumper PRIVATE ${NNC_CAFFE2_FRONTEND_DIR})
--- /dev/null
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <iostream>
+
+#include "support/CommandLine.h"
+#include "option/Options.h"
+#include "passes/caffe2_frontend/caffe2_importer.h"
+#include "core/modelIR/Graph.h"
+#include "core/modelIR/IrDotDumper.h"
+#include "core/modelIR/ShapeInference.h"
+#include "pass/PassException.h"
+
+using namespace nnc;
+using namespace nnc::mir;
+using namespace nnc::cli;
+
+int main(int argc, const char **argv) {
+ cli::CommandLine::getParser()->parseCommandLine(argc, argv, false);
+
+ // FIXME: caffe2 input shapes are not provided by model and must be set from cli
+ nnc::Caffe2Importer importer{cli::inputFile, cli::initNet, {cli::inputShapes}};
+
+ try {
+ importer.import();
+ IrDotDumper dotDumper;
+ ShapeInference inf;
+ auto g = static_cast<Graph *>(importer.createIR());
+ g->accept(&inf);
+ g->accept(&dotDumper);
+ dotDumper.writeDot(std::cout);
+ }
+ catch (PassException &e) {
+ std::cout << "Error: " << e.what() << std::endl;
+ return -1;
+ }
+
+ return 0;
+}
int main(int argc, const char **argv) {
cli::CommandLine::getParser()->parseCommandLine(argc, argv, false);
- std::string model = cli::inputFiles[0];
-
- nnc::CaffeImporter importer{model};
+ nnc::CaffeImporter importer{cli::inputFile};
try {
importer.import();
int main(int argc, const char **argv) {
cli::CommandLine::getParser()->parseCommandLine(argc, argv, false);
- std::string model = cli::inputFiles[0];
- nnc::TfliteImporter importer{model};
+ nnc::TfliteImporter importer{cli::inputFile};
try {
importer.import();