From 9ecc6a78552d9ddf1e4516b53c45f8b2778c4a2f Mon Sep 17 00:00:00 2001 From: Parichay Kapoor Date: Thu, 15 Jul 2021 11:05:59 +0900 Subject: [PATCH] [LayerV1] Delete for LayerV1 This patch deletes LayerV1 headers and its implementations. Some of the relevant codes from LayerV1 are moved to either LayerNode or to LayerDevel. Signed-off-by: Parichay Kapoor --- .../Custom/LayerPlugin/layer_plugin_test.cpp | 1 - Applications/ProductRatings/jni/main.cpp | 1 - debian/nntrainer-dev.install | 1 - jni/Android.mk | 1 - nntrainer/app_context.h | 5 +- nntrainer/layers/activation_layer.cpp | 7 +- nntrainer/layers/addition_layer.cpp | 1 - nntrainer/layers/bn_layer.cpp | 7 +- nntrainer/layers/concat_layer.cpp | 1 - nntrainer/layers/conv2d_layer.cpp | 7 +- nntrainer/layers/embedding.cpp | 7 +- nntrainer/layers/entropy_layer.h | 1 - nntrainer/layers/gru.cpp | 8 +- nntrainer/layers/input_layer.cpp | 7 +- nntrainer/layers/layer.cpp | 313 --------- nntrainer/layers/layer_devel.h | 79 +++ nntrainer/layers/layer_impl.cpp | 15 +- nntrainer/layers/layer_internal.h | 704 --------------------- nntrainer/layers/layer_node.cpp | 6 +- nntrainer/layers/layer_node.h | 17 +- nntrainer/layers/lstm.cpp | 8 +- nntrainer/layers/meson.build | 4 +- nntrainer/layers/nnstreamer_layer.cpp | 7 +- nntrainer/layers/output_layer.cpp | 1 - nntrainer/layers/plugged_layer.h | 2 +- nntrainer/layers/pooling2d_layer.cpp | 7 +- nntrainer/layers/preprocess_flip_layer.cpp | 7 +- nntrainer/layers/preprocess_translate_layer.cpp | 7 +- nntrainer/layers/rnn.cpp | 8 +- nntrainer/layers/split_layer.cpp | 7 +- nntrainer/layers/tflite_layer.cpp | 7 +- nntrainer/layers/time_dist.cpp | 1 - nntrainer/models/dynamic_training_optimization.cpp | 1 - nntrainer/models/dynamic_training_optimization.h | 3 +- nntrainer/models/neuralnet.cpp | 10 +- nntrainer/models/neuralnet.h | 4 +- nntrainer/utils/node_exporter.cpp | 3 +- nntrainer/utils/node_exporter.h | 8 - nntrainer/utils/parse_util.cpp | 3 +- packaging/nntrainer.spec | 1 - test/unittest/unittest_nntrainer_appcontext.cpp | 8 +- test/unittest/unittest_nntrainer_layers.cpp | 11 +- 42 files changed, 171 insertions(+), 1136 deletions(-) delete mode 100644 nntrainer/layers/layer.cpp delete mode 100644 nntrainer/layers/layer_internal.h diff --git a/Applications/Custom/LayerPlugin/layer_plugin_test.cpp b/Applications/Custom/LayerPlugin/layer_plugin_test.cpp index 85040c4..1ba6cef 100644 --- a/Applications/Custom/LayerPlugin/layer_plugin_test.cpp +++ b/Applications/Custom/LayerPlugin/layer_plugin_test.cpp @@ -19,7 +19,6 @@ #include #include -#include const char *NNTRAINER_PATH = std::getenv("NNTRAINER_PATH"); diff --git a/Applications/ProductRatings/jni/main.cpp b/Applications/ProductRatings/jni/main.cpp index 1b0ba6a..23e70b2 100644 --- a/Applications/ProductRatings/jni/main.cpp +++ b/Applications/ProductRatings/jni/main.cpp @@ -22,7 +22,6 @@ #include #include -#include #include #include #include diff --git a/debian/nntrainer-dev.install b/debian/nntrainer-dev.install index 8aed5e1..e7513da 100644 --- a/debian/nntrainer-dev.install +++ b/debian/nntrainer-dev.install @@ -15,7 +15,6 @@ /usr/include/nntrainer/databuffer_factory.h /usr/include/nntrainer/layer_context.h /usr/include/nntrainer/layer_devel.h -/usr/include/nntrainer/layer_internal.h /usr/include/nntrainer/neuralnet.h /usr/include/nntrainer/tensor.h /usr/include/nntrainer/tensor_dim.h diff --git a/jni/Android.mk b/jni/Android.mk index 83e4138..4f07dae 100644 --- a/jni/Android.mk +++ b/jni/Android.mk @@ -135,7 +135,6 @@ NNTRAINER_SRCS := $(NNTRAINER_ROOT)/nntrainer/models/neuralnet.cpp \ $(NNTRAINER_ROOT)/nntrainer/tensor/weight.cpp \ $(NNTRAINER_ROOT)/nntrainer/tensor/tensor_dim.cpp \ $(NNTRAINER_ROOT)/nntrainer/tensor/blas_interface.cpp \ - $(NNTRAINER_ROOT)/nntrainer/layers/layer.cpp \ $(NNTRAINER_ROOT)/nntrainer/layers/layer_node.cpp \ $(NNTRAINER_ROOT)/nntrainer/layers/input_layer.cpp \ $(NNTRAINER_ROOT)/nntrainer/layers/output_layer.cpp \ diff --git a/nntrainer/app_context.h b/nntrainer/app_context.h index 8d3fe51..9c5c758 100644 --- a/nntrainer/app_context.h +++ b/nntrainer/app_context.h @@ -24,8 +24,8 @@ #include #include +#include #include -#include #include #include @@ -293,8 +293,7 @@ public: } private: - FactoryMap - factory_map; + FactoryMap factory_map; std::string working_path_base; }; diff --git a/nntrainer/layers/activation_layer.cpp b/nntrainer/layers/activation_layer.cpp index a798851..e0bd297 100644 --- a/nntrainer/layers/activation_layer.cpp +++ b/nntrainer/layers/activation_layer.cpp @@ -20,7 +20,6 @@ #include #include -#include #include #include #include @@ -73,9 +72,9 @@ void ActivationLayer::setProperty(const std::vector &values) { void ActivationLayer::setProperty(const std::string &type_str, const std::string &value) { - using PropertyType = LayerV1::PropertyType; - LayerV1::PropertyType type = - static_cast(parseLayerProperty(type_str)); + using PropertyType = nntrainer::Layer::PropertyType; + nntrainer::Layer::PropertyType type = + static_cast(parseLayerProperty(type_str)); switch (type) { case PropertyType::activation: { diff --git a/nntrainer/layers/addition_layer.cpp b/nntrainer/layers/addition_layer.cpp index 893313c..18c411d 100644 --- a/nntrainer/layers/addition_layer.cpp +++ b/nntrainer/layers/addition_layer.cpp @@ -12,7 +12,6 @@ */ #include -#include #include #include #include diff --git a/nntrainer/layers/bn_layer.cpp b/nntrainer/layers/bn_layer.cpp index 2a475bc..0975ca4 100644 --- a/nntrainer/layers/bn_layer.cpp +++ b/nntrainer/layers/bn_layer.cpp @@ -22,7 +22,6 @@ */ #include -#include #include #include #include @@ -102,10 +101,10 @@ void BatchNormalizationLayer::setProperty( void BatchNormalizationLayer::setProperty(const std::string &type_str, const std::string &value) { - using PropertyType = LayerV1::PropertyType; + using PropertyType = nntrainer::Layer::PropertyType; int status = ML_ERROR_NONE; - LayerV1::PropertyType type = - static_cast(parseLayerProperty(type_str)); + nntrainer::Layer::PropertyType type = + static_cast(parseLayerProperty(type_str)); switch (type) { case PropertyType::epsilon: diff --git a/nntrainer/layers/concat_layer.cpp b/nntrainer/layers/concat_layer.cpp index 14cc771..e07b229 100644 --- a/nntrainer/layers/concat_layer.cpp +++ b/nntrainer/layers/concat_layer.cpp @@ -13,7 +13,6 @@ #include #include -#include #include #include #include diff --git a/nntrainer/layers/conv2d_layer.cpp b/nntrainer/layers/conv2d_layer.cpp index a7262fe..d7ec447 100644 --- a/nntrainer/layers/conv2d_layer.cpp +++ b/nntrainer/layers/conv2d_layer.cpp @@ -18,7 +18,6 @@ #include #include -#include #include #include #include @@ -507,10 +506,10 @@ void Conv2DLayer::setProperty(const std::vector &values) { void Conv2DLayer::setProperty(const std::string &type_str, const std::string &value) { - using PropertyType = LayerV1::PropertyType; + using PropertyType = nntrainer::Layer::PropertyType; int status = ML_ERROR_NONE; - LayerV1::PropertyType type = - static_cast(parseLayerProperty(type_str)); + nntrainer::Layer::PropertyType type = + static_cast(parseLayerProperty(type_str)); switch (type) { case PropertyType::filters: { diff --git a/nntrainer/layers/embedding.cpp b/nntrainer/layers/embedding.cpp index 96f87db..cf05ea2 100644 --- a/nntrainer/layers/embedding.cpp +++ b/nntrainer/layers/embedding.cpp @@ -12,7 +12,6 @@ */ #include -#include #include #include #include @@ -75,10 +74,10 @@ void EmbeddingLayer::setProperty(const std::vector &values) { void EmbeddingLayer::setProperty(const std::string &type_str, const std::string &value) { - using PropertyType = LayerV1::PropertyType; + using PropertyType = nntrainer::Layer::PropertyType; int status = ML_ERROR_NONE; - LayerV1::PropertyType type = - static_cast(parseLayerProperty(type_str)); + nntrainer::Layer::PropertyType type = + static_cast(parseLayerProperty(type_str)); switch (type) { case PropertyType::in_dim: { diff --git a/nntrainer/layers/entropy_layer.h b/nntrainer/layers/entropy_layer.h index 2cc7eba..ebf0f0d 100644 --- a/nntrainer/layers/entropy_layer.h +++ b/nntrainer/layers/entropy_layer.h @@ -15,7 +15,6 @@ #define __LOSS_LAYER_H__ #ifdef __cplusplus -#include #include namespace nntrainer { diff --git a/nntrainer/layers/gru.cpp b/nntrainer/layers/gru.cpp index 395b7bb..3c0033d 100644 --- a/nntrainer/layers/gru.cpp +++ b/nntrainer/layers/gru.cpp @@ -28,10 +28,10 @@ #include #include -#include #include #include #include +#include #include #include @@ -151,10 +151,10 @@ void GRULayer::setProperty(const std::vector &values) { void GRULayer::setProperty(const std::string &type_str, const std::string &value) { - using PropertyType = LayerV1::PropertyType; + using PropertyType = nntrainer::Layer::PropertyType; int status = ML_ERROR_NONE; - LayerV1::PropertyType type = - static_cast(parseLayerProperty(type_str)); + nntrainer::Layer::PropertyType type = + static_cast(parseLayerProperty(type_str)); // TODO : Add return_state property & api to get the hidden input switch (type) { diff --git a/nntrainer/layers/input_layer.cpp b/nntrainer/layers/input_layer.cpp index f2003e8..3cbfd63 100644 --- a/nntrainer/layers/input_layer.cpp +++ b/nntrainer/layers/input_layer.cpp @@ -22,7 +22,6 @@ */ #include -#include #include #include #include @@ -54,10 +53,10 @@ void InputLayer::setProperty(const std::vector &values) { void InputLayer::setProperty(const std::string &type_str, const std::string &value) { - using PropertyType = LayerV1::PropertyType; + using PropertyType = nntrainer::Layer::PropertyType; int status = ML_ERROR_NONE; - LayerV1::PropertyType type = - static_cast(parseLayerProperty(type_str)); + nntrainer::Layer::PropertyType type = + static_cast(parseLayerProperty(type_str)); switch (type) { case PropertyType::normalization: { diff --git a/nntrainer/layers/layer.cpp b/nntrainer/layers/layer.cpp deleted file mode 100644 index 907108f..0000000 --- a/nntrainer/layers/layer.cpp +++ /dev/null @@ -1,313 +0,0 @@ -/** - * Copyright (C) 2019 Samsung Electronics Co., Ltd. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * - * @file layer.cpp - * @date 04 December 2019 - * @brief This is Layers Classes for Neural Network - * @see https://github.com/nnstreamer/nntrainer - * @author Jijoong Moon - * @bug No known bugs except for NYI items - * - */ -#include -#include - -#include -#include -#include -#include -#include -#include - -namespace nntrainer { - -int LayerV1::checkValidation() { return ML_ERROR_NONE; } - -void LayerV1::setBatch(unsigned int batch) { - for (unsigned int idx = 0; idx < getNumInputs(); ++idx) - input_dim[idx].setTensorDim(0, batch); - - for (unsigned int idx = 0; idx < getNumOutputs(); ++idx) - output_dim[idx].setTensorDim(0, batch); -} - -std::vector LayerV1::getOutputs() { - std::vector ret; - for (unsigned int i = 0; i < getNumOutputs(); ++i) { - ret.push_back(net_hidden[i]->getVariableRef()); - } - return ret; -} - -std::vector LayerV1::getDerivatives() { - std::vector ret; - for (unsigned int i = 0; i < getNumInputs(); ++i) { - ret.push_back(net_input[i]->getGradientRef()); - } - return ret; -} - -void LayerV1::copy(std::shared_ptr l) { - for (auto const &w : l->weights) - weights.push_back(w.clone()); - - this->input_dim = l->input_dim; - this->output_dim = l->output_dim; - this->loss = l->loss; - this->weight_regularizer = l->weight_regularizer; - this->weight_regularizer_constant = l->weight_regularizer_constant; - this->weight_initializer = l->weight_initializer; -} - -sharedConstTensors LayerV1::forwarding_with_val(sharedConstTensors input, - sharedConstTensors label, - bool training) { - - if (getNumInputs() != input.size()) { - std::stringstream ss; - ss << "Number of inputs mismatched, given: " << input.size() - << " expected: " << getNumInputs(); - throw std::invalid_argument(ss.str().c_str()); - } - - for (unsigned int i = 0; i < getNumInputs(); ++i) { - net_input[i]->getVariableRef() = input[i]->clone(); - } - - if (!label.empty()) { - for (unsigned int i = 0; i < getNumOutputs(); ++i) { - net_hidden[i]->getGradientRef() = label[i]->clone(); - } - } - - forwarding(training); - - nntrainer::sharedConstTensors out; - - for (unsigned int i = 0; i < getNumOutputs(); ++i) { - out.push_back(MAKE_SHARED_TENSOR(net_hidden[i]->getVariable())); - } - - return out; -} - -sharedConstTensors LayerV1::backwarding_with_val(sharedConstTensors label) { - - for (unsigned int i = 0; i < getNumOutputs(); ++i) { - net_hidden[i]->getGradientRef() = label[i]->clone(); - } - - backwarding(); - - nntrainer::sharedConstTensors out; - - for (unsigned int i = 0; i < getNumInputs(); ++i) { - out.push_back(MAKE_SHARED_TENSOR(net_input[i]->getGradient())); - } - - return out; -} - -void LayerV1::read(std::ifstream &file) { - for (auto &weight : weights) { - weight.getVariableRef().read(file); - } -} - -void LayerV1::save(std::ofstream &file) { - for (auto &weight : weights) { - weight.getVariableRef().save(file); - } -} - -int LayerV1::setProperty(std::vector values) { - int status = ML_ERROR_NONE; - - try { - values = loadProperties(values, layer_props); - } catch (std::invalid_argument &e) { - ml_loge("parsing property failed, reason: %s", e.what()); - return ML_ERROR_INVALID_PARAMETER; - } - - /// @todo: deprecate this in favor of loadProperties - for (unsigned int i = 0; i < values.size(); ++i) { - std::string key; - std::string value; - - status = getKeyValue(values[i], key, value); - NN_RETURN_STATUS(); - - unsigned int type = parseLayerProperty(key); - - if (value.empty()) { - ml_logd("value is empty: key: %s, value: %s", key.c_str(), value.c_str()); - return ML_ERROR_INVALID_PARAMETER; - } - - try { - /// @note this calls derived setProperty if available - setProperty(static_cast(type), value); - } catch (...) { - ml_logd("value or key is not valid, key: %s, value: %s", key.c_str(), - value.c_str()); - return ML_ERROR_INVALID_PARAMETER; - } - } - return status; -} - -void LayerV1::setProperty(const PropertyType type, const std::string &value) { - int status = ML_ERROR_NONE; - - switch (type) { - case PropertyType::weight_regularizer: - if (!value.empty()) { - weight_regularizer = - (WeightRegularizer)parseType(value, TOKEN_WEIGHT_REGULARIZER); - if (weight_regularizer == WeightRegularizer::UNKNOWN) { - throw std::invalid_argument("[Layer] Unknown Weight decay"); - } - } - break; - case PropertyType::weight_regularizer_constant: - if (!value.empty()) { - status = setFloat(weight_regularizer_constant, value); - throw_status(status); - } - break; - case PropertyType::weight_initializer: - if (!value.empty()) { - weight_initializer = - (WeightInitializer)parseType(value, TOKEN_WEIGHT_INIT); - } - break; - case PropertyType::bias_initializer: - if (!value.empty()) { - bias_initializer = (WeightInitializer)parseType(value, TOKEN_WEIGHT_INIT); - } - break; - default: - std::string msg = - "[Layer] Unknown Layer Property Key for value " + std::string(value); - throw exception::not_supported(msg); - } -} - -template -void LayerV1::printIfValid(std::ostream &out, const PropertyType type, - const T target) { - try { - setProperty(type); - } catch (exception::not_supported &e) { - return; - } - - out << propToStr(static_cast(type)) << ": " << target - << std::endl; -} - -void LayerV1::printShapeInfo(std::ostream &out) { - for (unsigned int idx = 0; idx < getNumInputs(); ++idx) { - out << "input " << input_dim[idx]; - for (unsigned int i = 0; i < weights.size(); i++) - out << "inner" << i << " " << weightAt(i).getVariable().getDim(); - } - for (unsigned int idx = 0; idx < getNumOutputs(); ++idx) { - out << "output " << output_dim[idx]; - } -} - -void LayerV1::printPropertiesMeta(std::ostream &out) { - // printIfValid( - // out, PropertyType::activation, - // static_cast::type>(activation_type)); -} - -void LayerV1::printProperties(std::ostream &out) { - // out << "Trainable: " << trainable << std::endl; - printIfValid(out, PropertyType::weight_regularizer, - static_cast(weight_regularizer)); - printIfValid(out, PropertyType::weight_regularizer_constant, - weight_regularizer_constant); -} - -void LayerV1::printMetric(std::ostream &out) { - if (loss > 0) { - out << "Weight regularization loss: " << loss; - } -} - -void LayerV1::printPreset(std::ostream &out, PrintPreset preset) { - unsigned int flags = 0; - switch (preset) { - case PrintPreset::PRINT_ALL: - flags = PRINT_WEIGHTS | PRINT_METRIC; - /// fall through intended - case PrintPreset::PRINT_SUMMARY_META: - flags |= PRINT_PROP_META; - /// fall through intended - case PrintPreset::PRINT_SUMMARY: - flags |= PRINT_INST_INFO | PRINT_SHAPE_INFO | PRINT_PROP | PRINT_PROP_META; - break; - case PrintPreset::PRINT_NONE: - return; - default: - throw ::std::invalid_argument("undefined preset given"); - } - print(out, flags); -} - -void LayerV1::print(std::ostream &out, unsigned int flags) { - /** @todo properly move print to LayerNode */ - if (flags & PRINT_INST_INFO) { - out << "==================="; - // if (getName().empty()) - // printInstance(out, this); - // else - // out << "<" << getName() << ">" << std::endl; - - out << "Layer Type: " << getType() << std::endl; - } - - if (flags & PRINT_SHAPE_INFO) { - out << "======shape information: " << std::endl; - printShapeInfo(out); - } - - if (flags & PRINT_PROP_META) { - out << "======meta properties: " << std::endl; - printPropertiesMeta(out); - } - - if (flags & PRINT_PROP) { - out << "======properties: " << std::endl; - printProperties(out); - } - - if (flags & PRINT_WEIGHTS) { - out << "======weights: " << std::endl; - for (auto const &weight : weights) { - out << '[' << weight.getName() << ']' << std::endl; - out << weight.getVariable(); - } - } - - if (flags & PRINT_METRIC) { - out << "======metrics: " << std::endl; - printMetric(out); - } -}; - -} /* namespace nntrainer */ diff --git a/nntrainer/layers/layer_devel.h b/nntrainer/layers/layer_devel.h index 17fa91f..28a21c4 100644 --- a/nntrainer/layers/layer_devel.h +++ b/nntrainer/layers/layer_devel.h @@ -51,6 +51,85 @@ class Layer { public: /** + * @brief Property Enumeration + * 0. input shape : string + * 1. normalization : bool + * 2. standardization : bool + * 3. activation : string (type) + * 4. epsilon : float + * 5. weight_regularizer : string (type) + * 6. weight_regularizer_constant : float + * 7. unit : int + * 8. weight_initializer : string (type) + * 9. bias initializer : string (type) + * 10. filter_size : int + * 11. kernel_size : ( n , m ) + * 12. stride : ( n, m ) + * 13. padding : ( n, m ) + * 14. pool_size : ( n,m ) + * 15. pooling : max, average, global_max, global_average + * 16. flatten : bool + * 17. name : string (type) + * 18. momentum : float, + * 19. moving_mean_initializer : string (type), + * 20. moving_variance_initializer : string (type), + * 21. gamma_initializer : string (type), + * 22. beta_initializer" : string (type) + * 23. modelfile : model file for loading config for backbone layer + * 24. input_layers : string (type) + * 25. output_layers : string (type) + * 26. trainable : + * 27. flip_direction + * 28. random_translate + * 29. in_dim : int ( input dimension for embedding layer ) + * 30. out_dim : int ( output dimesion for embedding layer ) + * 31. recurrent_activation : string (type) - lstm + * 32. distribute : bool + * 33. split_dimension : string (type) + * 34. return_sequences : bool (type) - lstm + * 35. hidden_state_activation : string (type) - lstm + */ + enum class PropertyType { + input_shape = 0, + normalization = 1, + standardization = 2, + activation = 3, + epsilon = 4, + weight_regularizer = 5, + weight_regularizer_constant = 6, + unit = 7, + weight_initializer = 8, + bias_initializer = 9, + filters = 10, + kernel_size = 11, + stride = 12, + padding = 13, + pool_size = 14, + pooling = 15, + flatten = 16, + name = 17, + momentum = 18, + moving_mean_initializer = 19, + moving_variance_initializer = 20, + gamma_initializer = 21, + beta_initializer = 22, + modelfile = 23, /** model file for loading config for backbone layer */ + input_layers = 24, + output_layers = 25, + trainable = 26, + flip_direction = 27, + random_translate = 28, + in_dim = 29, + out_dim = 30, + recurrent_activation = 31, + distribute = 32, + split_dimension = 33, + return_sequences = 34, + hidden_state_activation = 35, + unknown + }; + + /** * @brief Destructor of Layer Class */ virtual ~Layer() = default; diff --git a/nntrainer/layers/layer_impl.cpp b/nntrainer/layers/layer_impl.cpp index 460850e..caa4f7e 100644 --- a/nntrainer/layers/layer_impl.cpp +++ b/nntrainer/layers/layer_impl.cpp @@ -16,7 +16,6 @@ #include #include -#include #include #include #include @@ -55,12 +54,14 @@ void LayerImpl::setProperty(const std::vector &values) { void LayerImpl::setProperty(const std::string &type_str, const std::string &value) { + using PropertyType = nntrainer::Layer::PropertyType; + int status = ML_ERROR_NONE; - LayerV1::PropertyType type = - static_cast(parseLayerProperty(type_str)); + nntrainer::Layer::PropertyType type = + static_cast(parseLayerProperty(type_str)); switch (type) { - case LayerV1::PropertyType::weight_regularizer: + case PropertyType::weight_regularizer: if (!value.empty()) { weight_regularizer = (WeightRegularizer)parseType(value, TOKEN_WEIGHT_REGULARIZER); @@ -69,19 +70,19 @@ void LayerImpl::setProperty(const std::string &type_str, } } break; - case LayerV1::PropertyType::weight_regularizer_constant: + case PropertyType::weight_regularizer_constant: if (!value.empty()) { status = setFloat(weight_regularizer_constant, value); throw_status(status); } break; - case LayerV1::PropertyType::weight_initializer: + case PropertyType::weight_initializer: if (!value.empty()) { weight_initializer = (WeightInitializer)parseType(value, TOKEN_WEIGHT_INIT); } break; - case LayerV1::PropertyType::bias_initializer: + case PropertyType::bias_initializer: if (!value.empty()) { bias_initializer = (WeightInitializer)parseType(value, TOKEN_WEIGHT_INIT); } diff --git a/nntrainer/layers/layer_internal.h b/nntrainer/layers/layer_internal.h deleted file mode 100644 index 981cee5..0000000 --- a/nntrainer/layers/layer_internal.h +++ /dev/null @@ -1,704 +0,0 @@ -/** - * Copyright (C) 2019 Samsung Electronics Co., Ltd. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * @file layer_internal.h - * @date 04 December 2019 - * @brief This is Layer classes of Neural Network - * @see https://github.com/nnstreamer/nntrainer - * @author Jijoong Moon - * @bug No known bugs except for NYI items - * - */ -#ifndef __LAYER_H__ -#define __LAYER_H__ -#ifdef __cplusplus - -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace nntrainer { - -/** - * @class Layer Base class for layers - * @brief Base class for all layers - * - * @details nntrainer::Layer inherits ml::train::Layer but has been ommitted to - * disallow static_cast between nntrainer::Layer and ml::train::Layer objects. - */ -class LayerV1 { - - /** model classes can call private methods which arent exposed to public */ - friend class NeuralNetwork; - friend class NetworkGraph; - -public: - /** - * @brief Constructor of Layer Class - */ - LayerV1( - WeightRegularizer weight_regularizer_ = WeightRegularizer::NONE, - const float weight_regularizer_constant_ = 1.0f, - WeightInitializer weight_initializer_ = - WeightInitializer::WEIGHT_XAVIER_UNIFORM, - WeightInitializer bias_initializer_ = WeightInitializer::WEIGHT_ZEROS) : - layer_props(), - loss(0.0f), - weight_regularizer(weight_regularizer_), - weight_regularizer_constant(weight_regularizer_constant_), - weight_initializer(weight_initializer_), - bias_initializer(bias_initializer_) { - setNumInputs(1); - setNumOutputs(1); - } - - /** - * @brief Destructor of Layer Class - */ - virtual ~LayerV1() = default; - - /** - * @brief Move constructor of Layer. - * @param[in] Layer && - */ - LayerV1(LayerV1 &&rhs) noexcept = default; - - /** - * @brief Move assignment operator. - * @parma[in] rhs Layer to be moved. - */ - virtual LayerV1 &operator=(LayerV1 &&rhs) = default; - - /** - * @brief Get the layer type - * @return const std::string type representation - */ - virtual const std::string getType() const = 0; - - /** - * @brief Forward Propagation of a layer - * @param[in] in List of Input Tensors taken by this layer - * @retval List of Output Tensors - */ - virtual void forwarding(bool training = true) = 0; - - /** - * @brief Forward Propagation of a layer - * @param[in] input List of Input Tensors taken by this layer - * @param[in] label List of Label Tensors taken by this layer - * @param[in] training if training, pass true else false. some layers have - * diffrent behavior depending on this - * @retval List of Output Tensors - */ - virtual sharedConstTensors forwarding_with_val(sharedConstTensors input, - sharedConstTensors label = {}, - bool training = true); - - /** - * @brief calc the derivative to be passed to the previous layer - * @retval Derivative List of Tensor for the previous layer - */ - virtual void calcDerivative() = 0; - - /** - * @brief check if this layer requires label to be passed - * @note if requireLabel() == true means, for now, that it is endpoint of a - * graph(numOutlayers == 0). label will be fed to the gradient of hidden if - * requireLabel is true - * @todo If we get to have a use case for requireLabel(true) but in the - * middle of a graph, change the semantics - * - * @retval true requires a label when training - * @retval false does not require a label - */ - virtual bool requireLabel() const { return false; } - - /** - * @brief Calculate the derivative of a layer - */ - virtual void calcGradient(){}; - - /** - * @brief Apply the gradient for the layer - * @param[in] iteration Iteration value for the Optimizer - * @param[in] optimizer Optimizer to apply the gradient - * @note This function is no-op if optimizer is nullptr - */ - virtual void applyGradient(unsigned int iteration, - std::shared_ptr optimizer) { - if (optimizer) { - for (auto &weight : weights) - optimizer->applyGradient(weight, iteration); - } - } - - /** - * @brief Back Propagate the derivative to the previous layer - * @retval Derivative List of Tensor for the previous layer - */ - virtual void backwarding() { - calcGradient(); - calcDerivative(); - } - - /** - * @brief Backward to calculate the gradient for the layer and apply it - * @param[in] iteration Iteration value for the Optimizer - * @param[in] deriv Derivative for the layer - * @param[in] optimizer Optimizer to apply the gradient - */ - virtual sharedConstTensors - backwarding_with_val(int iteration, sharedConstTensors deriv, - std::shared_ptr optimizer = nullptr) { - auto ret = backwarding_with_val(deriv); - applyGradient(iteration, optimizer); - return ret; - }; - - /** - * @brief Backward to calculate the gradient for the layer - * @param[in] deriv Derivative for the layer - */ - virtual sharedConstTensors backwarding_with_val(sharedConstTensors deriv); - - /** - * @brief read layer Weight & Bias data from file - * @note derived class can call this to get/save weights - * @param[in] file input file stream - */ - virtual void read(std::ifstream &file); - - /** - * @brief save layer Weight & Bias data from file - * @note derived class can call this to get/save weights - * @param[in] file output file stream - */ - virtual void save(std::ofstream &file); - - /** - * @brief set Property of layer - * @param[in] values values of property - * @retval #ML_ERROR_NONE Successful. - * @retval #ML_ERROR_INVALID_PARAMETER invalid parameter. - * @note this shouldn't be virtual, this became virtual to support custom - * layer. should be reverted after layer.h can fully support custom layer - */ - virtual int setProperty(std::vector values); - - /** - * @brief Property Enumeration - * 0. input shape : string - * 1. bias zero : bool - * 2. normalization : bool - * 3. standardization : bool - * 4. activation : string (type) - * 5. epsilon : float - * 6. weight_regularizer : string (type) - * 7. weight_regularizer_constant : float - * 8. unit : int - * 9. weight_initializer : string (type) - * 10. filter_size : int - * 11. kernel_size : ( n , m ) - * 12. stride : ( n, m ) - * 13. padding : ( n, m ) - * 14. pool_size : ( n,m ) - * 15. pooling : max, average, global_max, global_average - * 16. flatten : bool - * 17. name : string (type) - * 18. momentum : float, - * 19. moving_mean_initializer : string (type), - * 20. moving_variance_initializer : string (type), - * 21. gamma_initializer : string (type), - * 22. beta_initializer" : string (type) - * 23. modelfile : model file for loading config for backbone layer - * 24. input_layers : string (type) - * 25. output_layers : string (type) - * 26. trainable : - * 27. flip_direction - * 28. random_translate - * 29. in_dim : int ( input dimension for embedding layer ) - * 30. out_dim : int ( output dimesion for embedding layer ) - * 31. recurrent_activation : string (type) - lstm - * 32. distribute : bool - * 33. split_dimension : string (type) - * 34. return_sequences : bool (type) - lstm - * 35. hidden_state_activation : string (type) - lstm - * 36. dropout : float (type) - drop out rate - */ - enum class PropertyType { - input_shape = 0, - normalization = 1, - standardization = 2, - activation = 3, - epsilon = 4, - weight_regularizer = 5, - weight_regularizer_constant = 6, - unit = 7, - weight_initializer = 8, - bias_initializer = 9, - filters = 10, - kernel_size = 11, - stride = 12, - padding = 13, - pool_size = 14, - pooling = 15, - flatten = 16, - name = 17, - momentum = 18, - moving_mean_initializer = 19, - moving_variance_initializer = 20, - gamma_initializer = 21, - beta_initializer = 22, - modelfile = 23, /** model file for loading config for backbone layer */ - input_layers = 24, - output_layers = 25, - trainable = 26, - flip_direction = 27, - random_translate = 28, - in_dim = 29, - out_dim = 30, - recurrent_activation = 31, - distribute = 32, - split_dimension = 33, - return_sequences = 34, - hidden_state_activation = 35, - dropout = 36, - unknown - }; - - /** - * @brief setProperty by PropertyType - * @note By passing empty string, this can validate if @a type is valid - * @param[in] type property type to be passed - * @param[in] value value to be passed, if empty string is passed, do nothing - * but throws error when @a type is invalid - * @exception exception::not_supported when property type is not valid for - * the particular layer - * @exception std::invalid_argument invalid argument - */ - virtual void setProperty(const PropertyType type, - const std::string &value = ""); - - /** - * @brief Copy Layer - * @param[in] l Layer to be copied - */ - virtual void copy(std::shared_ptr l); - - /** - * @brief check hyper parameter for the layer - * @retval #ML_ERROR_NONE Successful. - * @retval #ML_ERROR_INVALID_PARAMETER invalid parameter. - */ - virtual int checkValidation(); - - /** - * @brief Get the output dimension - * @return TensorDim dimension of the output - */ - virtual std::vector getOutputDimension() { return output_dim; } - - /** - * @brief Get the input dimension - * @return TensorDim dimension of the input - */ - virtual std::vector getInputDimension() { return input_dim; } - - /** - * @brief this function helps exporting the layer in a predefined format, - * while workarounding issue caused by templated function type eraser - * - * @param exporter exporter that conatins exporting logic - * @param method enum value to identify how it should be exported to - * @todo remove this when name is moved to layer_node - */ - virtual void - export_to(Exporter &exporter, - ExportMethods method = ExportMethods::METHOD_STRINGVECTOR) const { - exporter.saveResult(layer_props, method, this); - }; - - /** - * @brief get the loss value added by this layer - * @retval loss value - */ - virtual float getLoss() { return loss; } - - /** - * @brief check if this layer supports backwarding - * @note support backwarding primarily means that the layer can process the - * derivatives and return back the gradients to the previous layer. - * @return true if supports backwarding, else false - */ - virtual bool supportBackwarding() const { return true; }; - /** - * @brief get all weights of the layer - * @retval vector of all params - */ - virtual std::vector getWeights() { return weights; } - - /** - * @brief Preset modes for printing summary for the layer - */ - enum class PrintPreset { - PRINT_NONE = 0, /**< Print nothing */ - PRINT_SUMMARY, /**< Print preset including summary information */ - PRINT_SUMMARY_META, /**< Print summary preset that includes meta information - */ - PRINT_ALL /**< Print everything possible */ - }; - - /** - * @brief print using PrintPreset - * - * @param out oustream - * @param preset preset to be used - */ - virtual void printPreset(std::ostream &out, - PrintPreset preset = PrintPreset::PRINT_SUMMARY); - - /** - * @brief get data alias at param position. - * @exception std::out_of_range for index out of range - */ - virtual Weight &weightAt(const unsigned int position) { - return weights[position]; - } - - /** - * @brief Get the number of weights - * - * @return unsigned int number of weights - */ - virtual unsigned int getNumWeights() { return weights.size(); } - - /** - * @brief Set the batch for the layer - * @param batch Batch value to be set - */ - virtual void setBatch(unsigned int batch); - - /** - * @brief Get hidden tensors - * - * @return std::vector get outputs - */ - virtual std::vector getOutputs(); - - /** - * @brief Get derivatives tensors - * - * @return std::vector get derivatives - */ - virtual std::vector getDerivatives(); - - /** - * @brief Get the Input Ref object - * - * @return std::vector>& - */ - virtual const std::vector> &getInputRef() const { - return net_input; - } - - /** - * @brief Get the Output Ref object - * - * @return std::vector>& - */ - virtual const std::vector> &getOutputRef() const { - return net_hidden; - } - - /** - * @brief Get reference to the weights - * @retval Reference of the list of weights in the layer - */ - virtual std::vector &getWeightsRef() { return weights; } - - /** - * @brief Get the Weights Ref object - * - * @return const std::vector& refs of weights - */ - virtual const std::vector &getWeightsRef() const { return weights; } - - /** - * @brief Set the Input Buffers object - * - * @param inputs inputs to set - */ - virtual void setInputBuffers(std::vector> inputs) { - net_input = inputs; - } - - /** - * @brief Set output Buffers - * - * @param outputs output to set - */ - virtual void - setOutputBuffers(std::vector> outputs) { - net_hidden = outputs; - } - - /** - * @brief Initialize the layer - * - Weight(Height, Width), Bias(1, Width) - * @retval #ML_ERROR_NONE Successful. - * @retval #ML_ERROR_INVALID_PARAMETER invalid parameter. - */ - virtual int initialize(Manager &manager) = 0; - - /** - * @brief get number of input layers - * - * @return unsigned int input size - */ - virtual unsigned int getNumInputs() { return input_dim.size(); } - - /** - * @brief get number of output layers - * - * @return unsigned int output size - */ - virtual unsigned int getNumOutputs() { return output_dim.size(); } - - /** - * @brief set Number of Input Layers - * - * @param size size of inputs - */ - void setNumInputs(unsigned int size) { - if (size < 1) - throw std::invalid_argument("Minimum number of inputs must be 1"); - if (input_dim.size() != size) { - /** clear is intentional to clear any previously set input dimensions */ - input_dim.clear(); - input_dim.resize(size); - } - net_input.resize(size); - } - - /** - * @brief set Number of Output Layers - * - * @param size size of outputs - */ - void setNumOutputs(unsigned int size) { - if (size < 1) - throw std::invalid_argument("Minimum number of outputs must be 1"); - if (output_dim.size() != size) { - /** clear is intentional to clear any previously set output dimensions */ - output_dim.clear(); - output_dim.resize(size); - } - net_hidden.resize(size); - } - - /** - - * @brief Set the input dimension - * @param[in] d dimension to be set - */ - void setInputDimension(const std::vector &d) { input_dim = d; } - - /** - * @brief Set the input dimension - * @param[in] d dimension to be set - * @param[in] i axis - */ - void setInputDimension(const TensorDim &d, unsigned int i) { - if (i >= getNumInputs()) - throw std::out_of_range("Setting dimensions out of bounds"); - input_dim[i] = d; - } - - /** - * @brief If the current layer can support in-place - * - * @return true if inplace, else false - * @details all layers default to out of place execution - * @note all layers default to out of place execution - */ - virtual bool supportInPlace() const { return false; } - -protected: - /** - * @brief Print Options when printing layer info - */ - typedef enum { - // clang-format off - PRINT_INST_INFO = (1 << 0), /**< Option to print type & instance address info */ - PRINT_SHAPE_INFO = (1 << 1), /**< Option to print shape information, invalid before initiation*/ - PRINT_PROP = (1 << 2), /**< Option to print properties */ - PRINT_PROP_META = (1 << 3), /**< Option to print properties that describe meta info - e.g) layer activation type for non-activation layer. */ - PRINT_WEIGHTS = (1 << 4), /**< Option to print weights */ - PRINT_METRIC = (1 << 5) /**< Option to print metrics (currently loss only) */ - // clang-format on - } PrintOption; - - std::tuple<> layer_props; /**< supported properties of layer */ - - /** - * @brief Input Tensors - */ - std::vector> net_input; - - /** - * @brief Output Tensors - */ - std::vector> net_hidden; - - /** - * @brief Dimension of input activation - */ - std::vector input_dim; - - /** - * @brief Dimension of output activation - */ - std::vector output_dim; - - /** - * @brief Loss value added by this layer - */ - float loss; - - WeightRegularizer weight_regularizer; - - float weight_regularizer_constant; - - /** - * @brief initializer for weights - */ - WeightInitializer weight_initializer; - - /** - * @brief initializer for bias - */ - WeightInitializer bias_initializer; - - /** - * @brief weight_list in this layer. This contains all weights of the - * layer. - */ - std::vector weights; - -private: - /** - * @brief check if @a type is valid and print if prop is valid to @a out - */ - template - void printIfValid(std::ostream &out, const PropertyType type, T target); - - /** - * @brief anchor point to override if PRINT_SHAPE_INFO is enabled for - * Layer::print() - */ - virtual void printShapeInfo(std::ostream &out); - - /** - * @brief anchor point to override if PRINT_PROP_META is enabled for - * Layer::print() - */ - virtual void printPropertiesMeta(std::ostream &out); - - /** - * @brief anchor point to override if PRINT_PROP is enabled for Layer::print() - */ - virtual void printProperties(std::ostream &out); - - /** - * @brief anchor point to override if PRINT_METRIC is enabled for - * Layer::print() - */ - virtual void printMetric(std::ostream &out); - - /** - * @brief set Weight Initialization Type - * @param[in] wini WeightInitializer - */ - void setWeightInit(WeightInitializer wini) { weight_initializer = wini; } - - /** - * @brief Print layer related information. Do not override without clear - * reason. It is recommended to override printShapeInfo, printPropertiesMeta, - * printProperties, printMetric instead - * @param[in] out outstream - * @param[in] flags combination of LayerPrintOption - */ - virtual void print(std::ostream &out, unsigned int flags = 0); -}; - -/** - * @brief Overriding output stream for layers and it's derived class - */ -template ::value, T> * = nullptr> -std::ostream &operator<<(std::ostream &out, T &l) { - l.printPreset(out, LayerV1::PrintPreset::PRINT_SUMMARY); - return out; -} - -using CreateLayerV1Func = nntrainer::LayerV1 *(*)(); -using DestroyLayerV1Func = void (*)(nntrainer::LayerV1 *); - -/** - * @brief Layer Pluggable struct that enables pluggable layer - * - */ -typedef struct { - CreateLayerV1Func createfunc; /**< create layer function */ - DestroyLayerV1Func destroyfunc; /**< destory function */ -} LayerV1Pluggable; - -/** - * @brief pluggable layer must have this structure defined - */ -extern "C" LayerV1Pluggable ml_train_layerv1_pluggable; - -/** - * @brief General Layer Factory function to register Layer - * - * @param props property representation - * @return std::unique_ptr created object - */ -template ::value, T> * = nullptr> -std::unique_ptr -createLayer(const std::vector &props = {}) { - std::unique_ptr ptr = std::make_unique(); - - if (ptr->setProperty(props) != ML_ERROR_NONE) { - throw std::invalid_argument("Set properties failed for layer"); - } - return ptr; -} - -} // namespace nntrainer - -#endif /* __cplusplus */ -#endif /* __LAYER_H__ */ diff --git a/nntrainer/layers/layer_node.cpp b/nntrainer/layers/layer_node.cpp index 99d6aac..0039cae 100644 --- a/nntrainer/layers/layer_node.cpp +++ b/nntrainer/layers/layer_node.cpp @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -147,7 +148,7 @@ int LayerNode::setProperty(std::vector properties) { } bool LayerNode::setProperty(const std::string &key, const std::string &value) { - using PropertyType = nntrainer::LayerV1::PropertyType; + using PropertyType = nntrainer::Layer::PropertyType; PropertyType type = static_cast(parseLayerProperty(key)); switch (type) { @@ -387,8 +388,7 @@ typedef enum { // clang-format on } PrintOption; -void LayerNode::printPreset(std::ostream &out, LayerV1::PrintPreset preset) { - using PrintPreset = LayerV1::PrintPreset; +void LayerNode::printPreset(std::ostream &out, PrintPreset preset) { unsigned int flags = 0; switch (preset) { diff --git a/nntrainer/layers/layer_node.h b/nntrainer/layers/layer_node.h index f081c0a..2c4d4bd 100644 --- a/nntrainer/layers/layer_node.h +++ b/nntrainer/layers/layer_node.h @@ -33,7 +33,6 @@ #include #include #include -#include namespace nntrainer { @@ -73,7 +72,6 @@ public: */ ~LayerNode(); -public: /** * Support all the interface requirements by ml::train::Layer */ @@ -572,13 +570,24 @@ public: } /** + * @brief Preset modes for printing summary for the layer + */ + enum class PrintPreset { + PRINT_NONE = 0, /**< Print nothing */ + PRINT_SUMMARY, /**< Print preset including summary information */ + PRINT_SUMMARY_META, /**< Print summary preset that includes meta information + */ + PRINT_ALL /**< Print everything possible */ + }; + + /** * @brief print using PrintPreset * * @param out oustream * @param preset preset to be used */ - void printPreset(std::ostream &out, LayerV1::PrintPreset preset = - LayerV1::PrintPreset::PRINT_SUMMARY); + void printPreset(std::ostream &out, + PrintPreset preset = PrintPreset::PRINT_SUMMARY); private: std::unique_ptr diff --git a/nntrainer/layers/lstm.cpp b/nntrainer/layers/lstm.cpp index a7c3650..2b33b17 100644 --- a/nntrainer/layers/lstm.cpp +++ b/nntrainer/layers/lstm.cpp @@ -12,11 +12,11 @@ */ #include -#include #include #include #include #include +#include #include #include @@ -132,10 +132,10 @@ void LSTMLayer::setProperty(const std::vector &values) { void LSTMLayer::setProperty(const std::string &type_str, const std::string &value) { - using PropertyType = LayerV1::PropertyType; + using PropertyType = nntrainer::Layer::PropertyType; int status = ML_ERROR_NONE; - LayerV1::PropertyType type = - static_cast(parseLayerProperty(type_str)); + nntrainer::Layer::PropertyType type = + static_cast(parseLayerProperty(type_str)); // TODO : Add return_state property & api to get the hidden input switch (type) { diff --git a/nntrainer/layers/meson.build b/nntrainer/layers/meson.build index 440e23b..498be0f 100644 --- a/nntrainer/layers/meson.build +++ b/nntrainer/layers/meson.build @@ -12,7 +12,6 @@ layer_sources = [ 'flatten_layer.cpp', 'input_layer.cpp', 'output_layer.cpp', - 'layer.cpp', 'layer_node.cpp', 'pooling2d_layer.cpp', 'preprocess_flip_layer.cpp', @@ -33,10 +32,9 @@ layer_sources = [ layer_headers = [ 'layer_context.h', 'layer_devel.h', - 'layer_factory.h', - 'layer_internal.h', 'acti_func.h', 'layer_node.h', + 'layer_devel.h', 'common_properties.h', ] diff --git a/nntrainer/layers/nnstreamer_layer.cpp b/nntrainer/layers/nnstreamer_layer.cpp index dbb5b01..8836b2f 100644 --- a/nntrainer/layers/nnstreamer_layer.cpp +++ b/nntrainer/layers/nnstreamer_layer.cpp @@ -14,7 +14,6 @@ * nnstreamer data */ -#include #include #include #include @@ -163,9 +162,9 @@ void NNStreamerLayer::setProperty(const std::vector &values) { void NNStreamerLayer::setProperty(const std::string &type_str, const std::string &value) { - using PropertyType = LayerV1::PropertyType; - LayerV1::PropertyType type = - static_cast(parseLayerProperty(type_str)); + using PropertyType = nntrainer::Layer::PropertyType; + nntrainer::Layer::PropertyType type = + static_cast(parseLayerProperty(type_str)); switch (type) { case PropertyType::modelfile: { diff --git a/nntrainer/layers/output_layer.cpp b/nntrainer/layers/output_layer.cpp index 64244e7..c720d7b 100644 --- a/nntrainer/layers/output_layer.cpp +++ b/nntrainer/layers/output_layer.cpp @@ -12,7 +12,6 @@ */ #include -#include #include #include #include diff --git a/nntrainer/layers/plugged_layer.h b/nntrainer/layers/plugged_layer.h index bc09a48..ea7042d 100644 --- a/nntrainer/layers/plugged_layer.h +++ b/nntrainer/layers/plugged_layer.h @@ -36,7 +36,7 @@ public: layerImpl(pluggable->createfunc()), destroy_func(pluggable->destroyfunc) { NNTR_THROW_IF(layerImpl == nullptr, std::invalid_argument) - << "either create_func_ failed or cannot dynamic cast to layer_internal"; + << "either create_func_ failed or cannot dynamic cast to layer"; } /** diff --git a/nntrainer/layers/pooling2d_layer.cpp b/nntrainer/layers/pooling2d_layer.cpp index 564c1b3..82e434d 100644 --- a/nntrainer/layers/pooling2d_layer.cpp +++ b/nntrainer/layers/pooling2d_layer.cpp @@ -14,7 +14,6 @@ #include #include -#include #include #include #include @@ -241,10 +240,10 @@ void Pooling2DLayer::setProperty(const std::vector &values) { void Pooling2DLayer::setProperty(const std::string &type_str, const std::string &value) { - using PropertyType = LayerV1::PropertyType; + using PropertyType = nntrainer::Layer::PropertyType; int status = ML_ERROR_NONE; - LayerV1::PropertyType type = - static_cast(parseLayerProperty(type_str)); + nntrainer::Layer::PropertyType type = + static_cast(parseLayerProperty(type_str)); switch (type) { case PropertyType::pooling: diff --git a/nntrainer/layers/preprocess_flip_layer.cpp b/nntrainer/layers/preprocess_flip_layer.cpp index b421d2d..6408df5 100644 --- a/nntrainer/layers/preprocess_flip_layer.cpp +++ b/nntrainer/layers/preprocess_flip_layer.cpp @@ -13,7 +13,6 @@ #include -#include #include #include #include @@ -57,9 +56,9 @@ void PreprocessFlipLayer::setProperty(const std::vector &values) { void PreprocessFlipLayer::setProperty(const std::string &type_str, const std::string &value) { - using PropertyType = LayerV1::PropertyType; - LayerV1::PropertyType type = - static_cast(parseLayerProperty(type_str)); + using PropertyType = nntrainer::Layer::PropertyType; + nntrainer::Layer::PropertyType type = + static_cast(parseLayerProperty(type_str)); switch (type) { case PropertyType::flip_direction: { diff --git a/nntrainer/layers/preprocess_translate_layer.cpp b/nntrainer/layers/preprocess_translate_layer.cpp index e658649..7a35350 100644 --- a/nntrainer/layers/preprocess_translate_layer.cpp +++ b/nntrainer/layers/preprocess_translate_layer.cpp @@ -14,7 +14,6 @@ #include -#include #include #include #include @@ -82,10 +81,10 @@ void PreprocessTranslateLayer::setProperty( void PreprocessTranslateLayer::setProperty(const std::string &type_str, const std::string &value) { - using PropertyType = LayerV1::PropertyType; + using PropertyType = nntrainer::Layer::PropertyType; int status = ML_ERROR_NONE; - LayerV1::PropertyType type = - static_cast(parseLayerProperty(type_str)); + nntrainer::Layer::PropertyType type = + static_cast(parseLayerProperty(type_str)); switch (type) { case PropertyType::random_translate: { diff --git a/nntrainer/layers/rnn.cpp b/nntrainer/layers/rnn.cpp index 538ad0e..6fbb557 100644 --- a/nntrainer/layers/rnn.cpp +++ b/nntrainer/layers/rnn.cpp @@ -12,10 +12,10 @@ */ #include -#include #include #include #include +#include #include #include #include @@ -121,10 +121,10 @@ void RNNLayer::setProperty(const std::vector &values) { void RNNLayer::setProperty(const std::string &type_str, const std::string &value) { - using PropertyType = LayerV1::PropertyType; + using PropertyType = nntrainer::Layer::PropertyType; int status = ML_ERROR_NONE; - LayerV1::PropertyType type = - static_cast(parseLayerProperty(type_str)); + nntrainer::Layer::PropertyType type = + static_cast(parseLayerProperty(type_str)); // TODO : Add return_state property & api to get the hidden input switch (type) { diff --git a/nntrainer/layers/split_layer.cpp b/nntrainer/layers/split_layer.cpp index 169deab..0f39aee 100644 --- a/nntrainer/layers/split_layer.cpp +++ b/nntrainer/layers/split_layer.cpp @@ -12,7 +12,6 @@ */ #include -#include #include #include #include @@ -171,10 +170,10 @@ void SplitLayer::setProperty(const std::vector &values) { void SplitLayer::setProperty(const std::string &type_str, const std::string &value) { - using PropertyType = LayerV1::PropertyType; + using PropertyType = nntrainer::Layer::PropertyType; int status = ML_ERROR_NONE; - LayerV1::PropertyType type = - static_cast(parseLayerProperty(type_str)); + nntrainer::Layer::PropertyType type = + static_cast(parseLayerProperty(type_str)); switch (type) { case PropertyType::split_dimension: { diff --git a/nntrainer/layers/tflite_layer.cpp b/nntrainer/layers/tflite_layer.cpp index 26023e8..b2e9854 100644 --- a/nntrainer/layers/tflite_layer.cpp +++ b/nntrainer/layers/tflite_layer.cpp @@ -10,7 +10,6 @@ * @bug No known bugs except for NYI items */ -#include #include #include #include @@ -94,9 +93,9 @@ void TfLiteLayer::setProperty(const std::vector &values) { void TfLiteLayer::setProperty(const std::string &type_str, const std::string &value) { - using PropertyType = LayerV1::PropertyType; - LayerV1::PropertyType type = - static_cast(parseLayerProperty(type_str)); + using PropertyType = nntrainer::Layer::PropertyType; + nntrainer::Layer::PropertyType type = + static_cast(parseLayerProperty(type_str)); switch (type) { case PropertyType::modelfile: { diff --git a/nntrainer/layers/time_dist.cpp b/nntrainer/layers/time_dist.cpp index 522fb23..9d29aac 100644 --- a/nntrainer/layers/time_dist.cpp +++ b/nntrainer/layers/time_dist.cpp @@ -11,7 +11,6 @@ * */ -#include #include #include #include diff --git a/nntrainer/models/dynamic_training_optimization.cpp b/nntrainer/models/dynamic_training_optimization.cpp index 5d9084d..b47279f 100644 --- a/nntrainer/models/dynamic_training_optimization.cpp +++ b/nntrainer/models/dynamic_training_optimization.cpp @@ -15,7 +15,6 @@ #include #include -#include #include #include diff --git a/nntrainer/models/dynamic_training_optimization.h b/nntrainer/models/dynamic_training_optimization.h index b270d65..e0e9511 100644 --- a/nntrainer/models/dynamic_training_optimization.h +++ b/nntrainer/models/dynamic_training_optimization.h @@ -39,7 +39,8 @@ #include #include -#include +#include +#include #include namespace nntrainer { diff --git a/nntrainer/models/neuralnet.cpp b/nntrainer/models/neuralnet.cpp index c9cf8b1..176bc0e 100644 --- a/nntrainer/models/neuralnet.cpp +++ b/nntrainer/models/neuralnet.cpp @@ -821,7 +821,7 @@ void NeuralNetwork::printPreset(std::ostream &out, unsigned int preset) { if (preset > ML_TRAIN_SUMMARY_TENSOR) return; - LayerV1::PrintPreset layer_preset = LayerV1::PrintPreset::PRINT_NONE; + LayerNode::PrintPreset layer_preset = LayerNode::PrintPreset::PRINT_NONE; ///@todo match flags with preset unsigned int flags = PRINT_INST_INFO | PRINT_GRAPH_INFO | PRINT_PROP | @@ -829,11 +829,11 @@ void NeuralNetwork::printPreset(std::ostream &out, unsigned int preset) { switch (preset) { case ML_TRAIN_SUMMARY_TENSOR: - layer_preset = LayerV1::PrintPreset::PRINT_ALL; + layer_preset = LayerNode::PrintPreset::PRINT_ALL; break; case ML_TRAIN_SUMMARY_LAYER: - layer_preset = initialized ? LayerV1::PrintPreset::PRINT_SUMMARY - : LayerV1::PrintPreset::PRINT_SUMMARY_META; + layer_preset = initialized ? LayerNode::PrintPreset::PRINT_SUMMARY + : LayerNode::PrintPreset::PRINT_SUMMARY_META; break; case ML_TRAIN_SUMMARY_MODEL: break; @@ -845,7 +845,7 @@ void NeuralNetwork::printPreset(std::ostream &out, unsigned int preset) { } void NeuralNetwork::print(std::ostream &out, unsigned int flags, - LayerV1::PrintPreset layerPrintPreset) { + LayerNode::PrintPreset layerPrintPreset) { if (flags & PRINT_INST_INFO) { out << "==================="; printInstance(out, this); diff --git a/nntrainer/models/neuralnet.h b/nntrainer/models/neuralnet.h index 78b4749..c94b21f 100644 --- a/nntrainer/models/neuralnet.h +++ b/nntrainer/models/neuralnet.h @@ -555,8 +555,8 @@ private: * @param[in] Layer::PrintPreset print preset when to print layer properties */ void print(std::ostream &out, unsigned int flags = 0, - LayerV1::PrintPreset layerPrintPreset = - LayerV1::PrintPreset::PRINT_SUMMARY); + LayerNode::PrintPreset layerPrintPreset = + LayerNode::PrintPreset::PRINT_SUMMARY); /** * @brief Set Loss diff --git a/nntrainer/utils/node_exporter.cpp b/nntrainer/utils/node_exporter.cpp index 4d1afac..9479bac 100644 --- a/nntrainer/utils/node_exporter.cpp +++ b/nntrainer/utils/node_exporter.cpp @@ -46,7 +46,8 @@ Exporter::getResult() noexcept { } template <> -void Exporter::saveTflResult(const std::tuple<> &props, const LayerV1 *self) { +void Exporter::saveTflResult(const std::tuple<> &props, + const nntrainer::Layer *self) { createIfNull(tf_node); } diff --git a/nntrainer/utils/node_exporter.h b/nntrainer/utils/node_exporter.h index 72b6971..f7b8780 100644 --- a/nntrainer/utils/node_exporter.h +++ b/nntrainer/utils/node_exporter.h @@ -208,14 +208,6 @@ class Distribute; class Trainable; } // namespace props -class LayerV1; -/** - * @copydoc template void - * Exporter::saveTflResult(const PropsType &props, const NodeType *self); - */ -template <> -void Exporter::saveTflResult(const std::tuple<> &props, const LayerV1 *self); - class LayerNode; /** * @copydoc template void diff --git a/nntrainer/utils/parse_util.cpp b/nntrainer/utils/parse_util.cpp index c1ff734..afee2a8 100644 --- a/nntrainer/utils/parse_util.cpp +++ b/nntrainer/utils/parse_util.cpp @@ -28,7 +28,6 @@ #include #include -#include #include #include #include @@ -280,7 +279,7 @@ unsigned int parseLayerProperty(std::string property) { } } - return (unsigned int)LayerV1::PropertyType::unknown; + return (unsigned int)Layer::PropertyType::unknown; } std::string propToStr(unsigned int type) { return property_string[type]; } diff --git a/packaging/nntrainer.spec b/packaging/nntrainer.spec index 26cd99c..5437c76 100644 --- a/packaging/nntrainer.spec +++ b/packaging/nntrainer.spec @@ -448,7 +448,6 @@ cp -r result %{buildroot}%{_datadir}/nntrainer/unittest/ %{_includedir}/nntrainer/databuffer_factory.h %{_includedir}/nntrainer/layer_context.h %{_includedir}/nntrainer/layer_devel.h -%{_includedir}/nntrainer/layer_internal.h %{_includedir}/nntrainer/neuralnet.h %{_includedir}/nntrainer/tensor.h %{_includedir}/nntrainer/tensor_dim.h diff --git a/test/unittest/unittest_nntrainer_appcontext.cpp b/test/unittest/unittest_nntrainer_appcontext.cpp index 236c36e..208aa62 100644 --- a/test/unittest/unittest_nntrainer_appcontext.cpp +++ b/test/unittest/unittest_nntrainer_appcontext.cpp @@ -148,15 +148,11 @@ public: * * @todo solidify the api signature */ -class CustomLayer : public nntrainer::LayerV1 { +class CustomLayer : public nntrainer::Layer { public: inline static const std::string type = "identity_layer"; - int setProperty(std::vector values) override { return 1; } - - int checkValidation() override { return 1; } - - float getLoss() override { return 0.0f; } + void setProperty(const std::vector &values) override {} const std::string getType() const override { return CustomLayer::type; } }; diff --git a/test/unittest/unittest_nntrainer_layers.cpp b/test/unittest/unittest_nntrainer_layers.cpp index dc27d0d..a0bec72 100644 --- a/test/unittest/unittest_nntrainer_layers.cpp +++ b/test/unittest/unittest_nntrainer_layers.cpp @@ -23,7 +23,6 @@ #include #include #include -#include #include #include #include @@ -260,7 +259,7 @@ TEST_F(nntrainer_InputLayer, initialize_01_p) { TEST_F(nntrainer_InputLayer, set_property_01_n) { EXPECT_THROW( - layer.setProperty(nntrainer::LayerV1::PropertyType::input_shape, "0:3:2:1"), + layer.setProperty(nntrainer::Layer::PropertyType::input_shape, "0:3:2:1"), std::invalid_argument); } @@ -907,7 +906,7 @@ protected: sharedTensor label; std::vector new_w; std::vector grad; - std::vector> layers; + std::vector> layers; nntrainer::LossType loss_type = nntrainer::LossType::LOSS_UNKNOWN; }; @@ -1363,7 +1362,7 @@ protected: TEST_F(nntrainer_Conv2DLayer, print_01_p) { std::stringstream ss, ss2; - layer.printPreset(ss, nntrainer::LayerV1::PrintPreset::PRINT_ALL); + layer.printPreset(ss, nntrainer::Layer::PrintPreset::PRINT_ALL); ss2 << layer; EXPECT_GT(ss.str().size(), 100u); EXPECT_GT(ss2.str().size(), 100u); @@ -2083,13 +2082,13 @@ TEST(nntrainer_LossLayer, setProperty_through_vector_n) { TEST(nntrainer_LossLayer, setProperty_individual_01_n) { nntrainer::LossLayer layer; EXPECT_THROW( - layer.setProperty(nntrainer::LayerV1::PropertyType::filters, "1:2"), + layer.setProperty(nntrainer::Layer::PropertyType::filters, "1:2"), nntrainer::exception::not_supported); } TEST(nntrainer_LossLayer, setProperty_individual_02_n) { nntrainer::LossLayer layer; - EXPECT_THROW(layer.setProperty(nntrainer::LayerV1::PropertyType::filters, + EXPECT_THROW(layer.setProperty(nntrainer::Layer::PropertyType::filters, "invalid_string"), nntrainer::exception::not_supported); } -- 2.7.4