This patch deletes LayerV1 headers and its implementations.
Some of the relevant codes from LayerV1 are moved to either LayerNode or
to LayerDevel.
Signed-off-by: Parichay Kapoor <pk.kapoor@samsung.com>
#include <app_context.h>
#include <layer.h>
-#include <layer_internal.h>
const char *NNTRAINER_PATH = std::getenv("NNTRAINER_PATH");
#include <time.h>
#include <dataset.h>
-#include <layer_internal.h>
#include <ml-api-common.h>
#include <neuralnet.h>
#include <tensor.h>
/usr/include/nntrainer/databuffer_factory.h
/usr/include/nntrainer/layer_context.h
/usr/include/nntrainer/layer_devel.h
-/usr/include/nntrainer/layer_internal.h
/usr/include/nntrainer/neuralnet.h
/usr/include/nntrainer/tensor.h
/usr/include/nntrainer/tensor_dim.h
$(NNTRAINER_ROOT)/nntrainer/tensor/weight.cpp \
$(NNTRAINER_ROOT)/nntrainer/tensor/tensor_dim.cpp \
$(NNTRAINER_ROOT)/nntrainer/tensor/blas_interface.cpp \
- $(NNTRAINER_ROOT)/nntrainer/layers/layer.cpp \
$(NNTRAINER_ROOT)/nntrainer/layers/layer_node.cpp \
$(NNTRAINER_ROOT)/nntrainer/layers/input_layer.cpp \
$(NNTRAINER_ROOT)/nntrainer/layers/output_layer.cpp \
#include <unordered_map>
#include <vector>
+#include <layer.h>
#include <layer_devel.h>
-#include <layer_internal.h>
#include <optimizer.h>
#include <nntrainer_error.h>
}
private:
- FactoryMap<ml::train::Optimizer, nntrainer::LayerV1, nntrainer::Layer>
- factory_map;
+ FactoryMap<ml::train::Optimizer, nntrainer::Layer> factory_map;
std::string working_path_base;
};
#include <activation_layer.h>
#include <blas_interface.h>
-#include <layer_internal.h>
#include <lazy_tensor.h>
#include <nntrainer_error.h>
#include <nntrainer_log.h>
void ActivationLayer::setProperty(const std::string &type_str,
const std::string &value) {
- using PropertyType = LayerV1::PropertyType;
- LayerV1::PropertyType type =
- static_cast<LayerV1::PropertyType>(parseLayerProperty(type_str));
+ using PropertyType = nntrainer::Layer::PropertyType;
+ nntrainer::Layer::PropertyType type =
+ static_cast<nntrainer::Layer::PropertyType>(parseLayerProperty(type_str));
switch (type) {
case PropertyType::activation: {
*/
#include <addition_layer.h>
-#include <layer_internal.h>
#include <nntrainer_error.h>
#include <nntrainer_log.h>
#include <parse_util.h>
*/
#include <bn_layer.h>
-#include <layer_internal.h>
#include <lazy_tensor.h>
#include <nntrainer_error.h>
#include <nntrainer_log.h>
void BatchNormalizationLayer::setProperty(const std::string &type_str,
const std::string &value) {
- using PropertyType = LayerV1::PropertyType;
+ using PropertyType = nntrainer::Layer::PropertyType;
int status = ML_ERROR_NONE;
- LayerV1::PropertyType type =
- static_cast<LayerV1::PropertyType>(parseLayerProperty(type_str));
+ nntrainer::Layer::PropertyType type =
+ static_cast<nntrainer::Layer::PropertyType>(parseLayerProperty(type_str));
switch (type) {
case PropertyType::epsilon:
#include <concat_layer.h>
#include <cstring>
-#include <layer_internal.h>
#include <nntrainer_error.h>
#include <nntrainer_log.h>
#include <parse_util.h>
#include <blas_interface.h>
#include <conv2d_layer.h>
-#include <layer_internal.h>
#include <lazy_tensor.h>
#include <nntrainer_error.h>
#include <nntrainer_log.h>
void Conv2DLayer::setProperty(const std::string &type_str,
const std::string &value) {
- using PropertyType = LayerV1::PropertyType;
+ using PropertyType = nntrainer::Layer::PropertyType;
int status = ML_ERROR_NONE;
- LayerV1::PropertyType type =
- static_cast<LayerV1::PropertyType>(parseLayerProperty(type_str));
+ nntrainer::Layer::PropertyType type =
+ static_cast<nntrainer::Layer::PropertyType>(parseLayerProperty(type_str));
switch (type) {
case PropertyType::filters: {
*/
#include <embedding.h>
-#include <layer_internal.h>
#include <lazy_tensor.h>
#include <nntrainer_error.h>
#include <nntrainer_log.h>
void EmbeddingLayer::setProperty(const std::string &type_str,
const std::string &value) {
- using PropertyType = LayerV1::PropertyType;
+ using PropertyType = nntrainer::Layer::PropertyType;
int status = ML_ERROR_NONE;
- LayerV1::PropertyType type =
- static_cast<LayerV1::PropertyType>(parseLayerProperty(type_str));
+ nntrainer::Layer::PropertyType type =
+ static_cast<nntrainer::Layer::PropertyType>(parseLayerProperty(type_str));
switch (type) {
case PropertyType::in_dim: {
#define __LOSS_LAYER_H__
#ifdef __cplusplus
-#include <layer_internal.h>
#include <tensor.h>
namespace nntrainer {
#include <cmath>
#include <gru.h>
-#include <layer_internal.h>
#include <lazy_tensor.h>
#include <nntrainer_error.h>
#include <nntrainer_log.h>
+#include <node_exporter.h>
#include <parse_util.h>
#include <util_func.h>
void GRULayer::setProperty(const std::string &type_str,
const std::string &value) {
- using PropertyType = LayerV1::PropertyType;
+ using PropertyType = nntrainer::Layer::PropertyType;
int status = ML_ERROR_NONE;
- LayerV1::PropertyType type =
- static_cast<LayerV1::PropertyType>(parseLayerProperty(type_str));
+ nntrainer::Layer::PropertyType type =
+ static_cast<nntrainer::Layer::PropertyType>(parseLayerProperty(type_str));
// TODO : Add return_state property & api to get the hidden input
switch (type) {
*/
#include <input_layer.h>
-#include <layer_internal.h>
#include <nntrainer_error.h>
#include <nntrainer_log.h>
#include <parse_util.h>
void InputLayer::setProperty(const std::string &type_str,
const std::string &value) {
- using PropertyType = LayerV1::PropertyType;
+ using PropertyType = nntrainer::Layer::PropertyType;
int status = ML_ERROR_NONE;
- LayerV1::PropertyType type =
- static_cast<LayerV1::PropertyType>(parseLayerProperty(type_str));
+ nntrainer::Layer::PropertyType type =
+ static_cast<nntrainer::Layer::PropertyType>(parseLayerProperty(type_str));
switch (type) {
case PropertyType::normalization: {
+++ /dev/null
-/**
- * Copyright (C) 2019 Samsung Electronics Co., Ltd. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- *
- * @file layer.cpp
- * @date 04 December 2019
- * @brief This is Layers Classes for Neural Network
- * @see https://github.com/nnstreamer/nntrainer
- * @author Jijoong Moon <jijoong.moon@samsung.com>
- * @bug No known bugs except for NYI items
- *
- */
-#include <ostream>
-#include <sstream>
-
-#include <layer_internal.h>
-#include <layer_node.h>
-#include <nntrainer_error.h>
-#include <nntrainer_log.h>
-#include <parse_util.h>
-#include <util_func.h>
-
-namespace nntrainer {
-
-int LayerV1::checkValidation() { return ML_ERROR_NONE; }
-
-void LayerV1::setBatch(unsigned int batch) {
- for (unsigned int idx = 0; idx < getNumInputs(); ++idx)
- input_dim[idx].setTensorDim(0, batch);
-
- for (unsigned int idx = 0; idx < getNumOutputs(); ++idx)
- output_dim[idx].setTensorDim(0, batch);
-}
-
-std::vector<Tensor> LayerV1::getOutputs() {
- std::vector<Tensor> ret;
- for (unsigned int i = 0; i < getNumOutputs(); ++i) {
- ret.push_back(net_hidden[i]->getVariableRef());
- }
- return ret;
-}
-
-std::vector<Tensor> LayerV1::getDerivatives() {
- std::vector<Tensor> ret;
- for (unsigned int i = 0; i < getNumInputs(); ++i) {
- ret.push_back(net_input[i]->getGradientRef());
- }
- return ret;
-}
-
-void LayerV1::copy(std::shared_ptr<LayerV1> l) {
- for (auto const &w : l->weights)
- weights.push_back(w.clone());
-
- this->input_dim = l->input_dim;
- this->output_dim = l->output_dim;
- this->loss = l->loss;
- this->weight_regularizer = l->weight_regularizer;
- this->weight_regularizer_constant = l->weight_regularizer_constant;
- this->weight_initializer = l->weight_initializer;
-}
-
-sharedConstTensors LayerV1::forwarding_with_val(sharedConstTensors input,
- sharedConstTensors label,
- bool training) {
-
- if (getNumInputs() != input.size()) {
- std::stringstream ss;
- ss << "Number of inputs mismatched, given: " << input.size()
- << " expected: " << getNumInputs();
- throw std::invalid_argument(ss.str().c_str());
- }
-
- for (unsigned int i = 0; i < getNumInputs(); ++i) {
- net_input[i]->getVariableRef() = input[i]->clone();
- }
-
- if (!label.empty()) {
- for (unsigned int i = 0; i < getNumOutputs(); ++i) {
- net_hidden[i]->getGradientRef() = label[i]->clone();
- }
- }
-
- forwarding(training);
-
- nntrainer::sharedConstTensors out;
-
- for (unsigned int i = 0; i < getNumOutputs(); ++i) {
- out.push_back(MAKE_SHARED_TENSOR(net_hidden[i]->getVariable()));
- }
-
- return out;
-}
-
-sharedConstTensors LayerV1::backwarding_with_val(sharedConstTensors label) {
-
- for (unsigned int i = 0; i < getNumOutputs(); ++i) {
- net_hidden[i]->getGradientRef() = label[i]->clone();
- }
-
- backwarding();
-
- nntrainer::sharedConstTensors out;
-
- for (unsigned int i = 0; i < getNumInputs(); ++i) {
- out.push_back(MAKE_SHARED_TENSOR(net_input[i]->getGradient()));
- }
-
- return out;
-}
-
-void LayerV1::read(std::ifstream &file) {
- for (auto &weight : weights) {
- weight.getVariableRef().read(file);
- }
-}
-
-void LayerV1::save(std::ofstream &file) {
- for (auto &weight : weights) {
- weight.getVariableRef().save(file);
- }
-}
-
-int LayerV1::setProperty(std::vector<std::string> values) {
- int status = ML_ERROR_NONE;
-
- try {
- values = loadProperties(values, layer_props);
- } catch (std::invalid_argument &e) {
- ml_loge("parsing property failed, reason: %s", e.what());
- return ML_ERROR_INVALID_PARAMETER;
- }
-
- /// @todo: deprecate this in favor of loadProperties
- for (unsigned int i = 0; i < values.size(); ++i) {
- std::string key;
- std::string value;
-
- status = getKeyValue(values[i], key, value);
- NN_RETURN_STATUS();
-
- unsigned int type = parseLayerProperty(key);
-
- if (value.empty()) {
- ml_logd("value is empty: key: %s, value: %s", key.c_str(), value.c_str());
- return ML_ERROR_INVALID_PARAMETER;
- }
-
- try {
- /// @note this calls derived setProperty if available
- setProperty(static_cast<PropertyType>(type), value);
- } catch (...) {
- ml_logd("value or key is not valid, key: %s, value: %s", key.c_str(),
- value.c_str());
- return ML_ERROR_INVALID_PARAMETER;
- }
- }
- return status;
-}
-
-void LayerV1::setProperty(const PropertyType type, const std::string &value) {
- int status = ML_ERROR_NONE;
-
- switch (type) {
- case PropertyType::weight_regularizer:
- if (!value.empty()) {
- weight_regularizer =
- (WeightRegularizer)parseType(value, TOKEN_WEIGHT_REGULARIZER);
- if (weight_regularizer == WeightRegularizer::UNKNOWN) {
- throw std::invalid_argument("[Layer] Unknown Weight decay");
- }
- }
- break;
- case PropertyType::weight_regularizer_constant:
- if (!value.empty()) {
- status = setFloat(weight_regularizer_constant, value);
- throw_status(status);
- }
- break;
- case PropertyType::weight_initializer:
- if (!value.empty()) {
- weight_initializer =
- (WeightInitializer)parseType(value, TOKEN_WEIGHT_INIT);
- }
- break;
- case PropertyType::bias_initializer:
- if (!value.empty()) {
- bias_initializer = (WeightInitializer)parseType(value, TOKEN_WEIGHT_INIT);
- }
- break;
- default:
- std::string msg =
- "[Layer] Unknown Layer Property Key for value " + std::string(value);
- throw exception::not_supported(msg);
- }
-}
-
-template <typename T>
-void LayerV1::printIfValid(std::ostream &out, const PropertyType type,
- const T target) {
- try {
- setProperty(type);
- } catch (exception::not_supported &e) {
- return;
- }
-
- out << propToStr(static_cast<unsigned int>(type)) << ": " << target
- << std::endl;
-}
-
-void LayerV1::printShapeInfo(std::ostream &out) {
- for (unsigned int idx = 0; idx < getNumInputs(); ++idx) {
- out << "input " << input_dim[idx];
- for (unsigned int i = 0; i < weights.size(); i++)
- out << "inner" << i << " " << weightAt(i).getVariable().getDim();
- }
- for (unsigned int idx = 0; idx < getNumOutputs(); ++idx) {
- out << "output " << output_dim[idx];
- }
-}
-
-void LayerV1::printPropertiesMeta(std::ostream &out) {
- // printIfValid(
- // out, PropertyType::activation,
- // static_cast<std::underlying_type<ActivationType>::type>(activation_type));
-}
-
-void LayerV1::printProperties(std::ostream &out) {
- // out << "Trainable: " << trainable << std::endl;
- printIfValid(out, PropertyType::weight_regularizer,
- static_cast<int>(weight_regularizer));
- printIfValid(out, PropertyType::weight_regularizer_constant,
- weight_regularizer_constant);
-}
-
-void LayerV1::printMetric(std::ostream &out) {
- if (loss > 0) {
- out << "Weight regularization loss: " << loss;
- }
-}
-
-void LayerV1::printPreset(std::ostream &out, PrintPreset preset) {
- unsigned int flags = 0;
- switch (preset) {
- case PrintPreset::PRINT_ALL:
- flags = PRINT_WEIGHTS | PRINT_METRIC;
- /// fall through intended
- case PrintPreset::PRINT_SUMMARY_META:
- flags |= PRINT_PROP_META;
- /// fall through intended
- case PrintPreset::PRINT_SUMMARY:
- flags |= PRINT_INST_INFO | PRINT_SHAPE_INFO | PRINT_PROP | PRINT_PROP_META;
- break;
- case PrintPreset::PRINT_NONE:
- return;
- default:
- throw ::std::invalid_argument("undefined preset given");
- }
- print(out, flags);
-}
-
-void LayerV1::print(std::ostream &out, unsigned int flags) {
- /** @todo properly move print to LayerNode */
- if (flags & PRINT_INST_INFO) {
- out << "===================";
- // if (getName().empty())
- // printInstance(out, this);
- // else
- // out << "<" << getName() << ">" << std::endl;
-
- out << "Layer Type: " << getType() << std::endl;
- }
-
- if (flags & PRINT_SHAPE_INFO) {
- out << "======shape information: " << std::endl;
- printShapeInfo(out);
- }
-
- if (flags & PRINT_PROP_META) {
- out << "======meta properties: " << std::endl;
- printPropertiesMeta(out);
- }
-
- if (flags & PRINT_PROP) {
- out << "======properties: " << std::endl;
- printProperties(out);
- }
-
- if (flags & PRINT_WEIGHTS) {
- out << "======weights: " << std::endl;
- for (auto const &weight : weights) {
- out << '[' << weight.getName() << ']' << std::endl;
- out << weight.getVariable();
- }
- }
-
- if (flags & PRINT_METRIC) {
- out << "======metrics: " << std::endl;
- printMetric(out);
- }
-};
-
-} /* namespace nntrainer */
public:
/**
+ * @brief Property Enumeration
+ * 0. input shape : string
+ * 1. normalization : bool
+ * 2. standardization : bool
+ * 3. activation : string (type)
+ * 4. epsilon : float
+ * 5. weight_regularizer : string (type)
+ * 6. weight_regularizer_constant : float
+ * 7. unit : int
+ * 8. weight_initializer : string (type)
+ * 9. bias initializer : string (type)
+ * 10. filter_size : int
+ * 11. kernel_size : ( n , m )
+ * 12. stride : ( n, m )
+ * 13. padding : ( n, m )
+ * 14. pool_size : ( n,m )
+ * 15. pooling : max, average, global_max, global_average
+ * 16. flatten : bool
+ * 17. name : string (type)
+ * 18. momentum : float,
+ * 19. moving_mean_initializer : string (type),
+ * 20. moving_variance_initializer : string (type),
+ * 21. gamma_initializer : string (type),
+ * 22. beta_initializer" : string (type)
+ * 23. modelfile : model file for loading config for backbone layer
+ * 24. input_layers : string (type)
+ * 25. output_layers : string (type)
+ * 26. trainable :
+ * 27. flip_direction
+ * 28. random_translate
+ * 29. in_dim : int ( input dimension for embedding layer )
+ * 30. out_dim : int ( output dimesion for embedding layer )
+ * 31. recurrent_activation : string (type) - lstm
+ * 32. distribute : bool
+ * 33. split_dimension : string (type)
+ * 34. return_sequences : bool (type) - lstm
+ * 35. hidden_state_activation : string (type) - lstm
+ */
+ enum class PropertyType {
+ input_shape = 0,
+ normalization = 1,
+ standardization = 2,
+ activation = 3,
+ epsilon = 4,
+ weight_regularizer = 5,
+ weight_regularizer_constant = 6,
+ unit = 7,
+ weight_initializer = 8,
+ bias_initializer = 9,
+ filters = 10,
+ kernel_size = 11,
+ stride = 12,
+ padding = 13,
+ pool_size = 14,
+ pooling = 15,
+ flatten = 16,
+ name = 17,
+ momentum = 18,
+ moving_mean_initializer = 19,
+ moving_variance_initializer = 20,
+ gamma_initializer = 21,
+ beta_initializer = 22,
+ modelfile = 23, /** model file for loading config for backbone layer */
+ input_layers = 24,
+ output_layers = 25,
+ trainable = 26,
+ flip_direction = 27,
+ random_translate = 28,
+ in_dim = 29,
+ out_dim = 30,
+ recurrent_activation = 31,
+ distribute = 32,
+ split_dimension = 33,
+ return_sequences = 34,
+ hidden_state_activation = 35,
+ unknown
+ };
+
+ /**
* @brief Destructor of Layer Class
*/
virtual ~Layer() = default;
#include <vector>
#include <common_properties.h>
-#include <layer_internal.h>
#include <nntrainer_error.h>
#include <nntrainer_log.h>
#include <node_exporter.h>
void LayerImpl::setProperty(const std::string &type_str,
const std::string &value) {
+ using PropertyType = nntrainer::Layer::PropertyType;
+
int status = ML_ERROR_NONE;
- LayerV1::PropertyType type =
- static_cast<LayerV1::PropertyType>(parseLayerProperty(type_str));
+ nntrainer::Layer::PropertyType type =
+ static_cast<nntrainer::Layer::PropertyType>(parseLayerProperty(type_str));
switch (type) {
- case LayerV1::PropertyType::weight_regularizer:
+ case PropertyType::weight_regularizer:
if (!value.empty()) {
weight_regularizer =
(WeightRegularizer)parseType(value, TOKEN_WEIGHT_REGULARIZER);
}
}
break;
- case LayerV1::PropertyType::weight_regularizer_constant:
+ case PropertyType::weight_regularizer_constant:
if (!value.empty()) {
status = setFloat(weight_regularizer_constant, value);
throw_status(status);
}
break;
- case LayerV1::PropertyType::weight_initializer:
+ case PropertyType::weight_initializer:
if (!value.empty()) {
weight_initializer =
(WeightInitializer)parseType(value, TOKEN_WEIGHT_INIT);
}
break;
- case LayerV1::PropertyType::bias_initializer:
+ case PropertyType::bias_initializer:
if (!value.empty()) {
bias_initializer = (WeightInitializer)parseType(value, TOKEN_WEIGHT_INIT);
}
+++ /dev/null
-/**
- * Copyright (C) 2019 Samsung Electronics Co., Ltd. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- * @file layer_internal.h
- * @date 04 December 2019
- * @brief This is Layer classes of Neural Network
- * @see https://github.com/nnstreamer/nntrainer
- * @author Jijoong Moon <jijoong.moon@samsung.com>
- * @bug No known bugs except for NYI items
- *
- */
-#ifndef __LAYER_H__
-#define __LAYER_H__
-#ifdef __cplusplus
-
-#include <memory>
-#include <set>
-#include <tuple>
-#include <vector>
-
-#include <acti_func.h>
-#include <common_properties.h>
-#include <layer.h>
-#include <manager.h>
-#include <node_exporter.h>
-#include <optimizer_devel.h>
-#include <tensor.h>
-#include <tensor_dim.h>
-#include <weight.h>
-
-namespace nntrainer {
-
-/**
- * @class Layer Base class for layers
- * @brief Base class for all layers
- *
- * @details nntrainer::Layer inherits ml::train::Layer but has been ommitted to
- * disallow static_cast between nntrainer::Layer and ml::train::Layer objects.
- */
-class LayerV1 {
-
- /** model classes can call private methods which arent exposed to public */
- friend class NeuralNetwork;
- friend class NetworkGraph;
-
-public:
- /**
- * @brief Constructor of Layer Class
- */
- LayerV1(
- WeightRegularizer weight_regularizer_ = WeightRegularizer::NONE,
- const float weight_regularizer_constant_ = 1.0f,
- WeightInitializer weight_initializer_ =
- WeightInitializer::WEIGHT_XAVIER_UNIFORM,
- WeightInitializer bias_initializer_ = WeightInitializer::WEIGHT_ZEROS) :
- layer_props(),
- loss(0.0f),
- weight_regularizer(weight_regularizer_),
- weight_regularizer_constant(weight_regularizer_constant_),
- weight_initializer(weight_initializer_),
- bias_initializer(bias_initializer_) {
- setNumInputs(1);
- setNumOutputs(1);
- }
-
- /**
- * @brief Destructor of Layer Class
- */
- virtual ~LayerV1() = default;
-
- /**
- * @brief Move constructor of Layer.
- * @param[in] Layer &&
- */
- LayerV1(LayerV1 &&rhs) noexcept = default;
-
- /**
- * @brief Move assignment operator.
- * @parma[in] rhs Layer to be moved.
- */
- virtual LayerV1 &operator=(LayerV1 &&rhs) = default;
-
- /**
- * @brief Get the layer type
- * @return const std::string type representation
- */
- virtual const std::string getType() const = 0;
-
- /**
- * @brief Forward Propagation of a layer
- * @param[in] in List of Input Tensors taken by this layer
- * @retval List of Output Tensors
- */
- virtual void forwarding(bool training = true) = 0;
-
- /**
- * @brief Forward Propagation of a layer
- * @param[in] input List of Input Tensors taken by this layer
- * @param[in] label List of Label Tensors taken by this layer
- * @param[in] training if training, pass true else false. some layers have
- * diffrent behavior depending on this
- * @retval List of Output Tensors
- */
- virtual sharedConstTensors forwarding_with_val(sharedConstTensors input,
- sharedConstTensors label = {},
- bool training = true);
-
- /**
- * @brief calc the derivative to be passed to the previous layer
- * @retval Derivative List of Tensor for the previous layer
- */
- virtual void calcDerivative() = 0;
-
- /**
- * @brief check if this layer requires label to be passed
- * @note if requireLabel() == true means, for now, that it is endpoint of a
- * graph(numOutlayers == 0). label will be fed to the gradient of hidden if
- * requireLabel is true
- * @todo If we get to have a use case for requireLabel(true) but in the
- * middle of a graph, change the semantics
- *
- * @retval true requires a label when training
- * @retval false does not require a label
- */
- virtual bool requireLabel() const { return false; }
-
- /**
- * @brief Calculate the derivative of a layer
- */
- virtual void calcGradient(){};
-
- /**
- * @brief Apply the gradient for the layer
- * @param[in] iteration Iteration value for the Optimizer
- * @param[in] optimizer Optimizer to apply the gradient
- * @note This function is no-op if optimizer is nullptr
- */
- virtual void applyGradient(unsigned int iteration,
- std::shared_ptr<Optimizer> optimizer) {
- if (optimizer) {
- for (auto &weight : weights)
- optimizer->applyGradient(weight, iteration);
- }
- }
-
- /**
- * @brief Back Propagate the derivative to the previous layer
- * @retval Derivative List of Tensor for the previous layer
- */
- virtual void backwarding() {
- calcGradient();
- calcDerivative();
- }
-
- /**
- * @brief Backward to calculate the gradient for the layer and apply it
- * @param[in] iteration Iteration value for the Optimizer
- * @param[in] deriv Derivative for the layer
- * @param[in] optimizer Optimizer to apply the gradient
- */
- virtual sharedConstTensors
- backwarding_with_val(int iteration, sharedConstTensors deriv,
- std::shared_ptr<Optimizer> optimizer = nullptr) {
- auto ret = backwarding_with_val(deriv);
- applyGradient(iteration, optimizer);
- return ret;
- };
-
- /**
- * @brief Backward to calculate the gradient for the layer
- * @param[in] deriv Derivative for the layer
- */
- virtual sharedConstTensors backwarding_with_val(sharedConstTensors deriv);
-
- /**
- * @brief read layer Weight & Bias data from file
- * @note derived class can call this to get/save weights
- * @param[in] file input file stream
- */
- virtual void read(std::ifstream &file);
-
- /**
- * @brief save layer Weight & Bias data from file
- * @note derived class can call this to get/save weights
- * @param[in] file output file stream
- */
- virtual void save(std::ofstream &file);
-
- /**
- * @brief set Property of layer
- * @param[in] values values of property
- * @retval #ML_ERROR_NONE Successful.
- * @retval #ML_ERROR_INVALID_PARAMETER invalid parameter.
- * @note this shouldn't be virtual, this became virtual to support custom
- * layer. should be reverted after layer.h can fully support custom layer
- */
- virtual int setProperty(std::vector<std::string> values);
-
- /**
- * @brief Property Enumeration
- * 0. input shape : string
- * 1. bias zero : bool
- * 2. normalization : bool
- * 3. standardization : bool
- * 4. activation : string (type)
- * 5. epsilon : float
- * 6. weight_regularizer : string (type)
- * 7. weight_regularizer_constant : float
- * 8. unit : int
- * 9. weight_initializer : string (type)
- * 10. filter_size : int
- * 11. kernel_size : ( n , m )
- * 12. stride : ( n, m )
- * 13. padding : ( n, m )
- * 14. pool_size : ( n,m )
- * 15. pooling : max, average, global_max, global_average
- * 16. flatten : bool
- * 17. name : string (type)
- * 18. momentum : float,
- * 19. moving_mean_initializer : string (type),
- * 20. moving_variance_initializer : string (type),
- * 21. gamma_initializer : string (type),
- * 22. beta_initializer" : string (type)
- * 23. modelfile : model file for loading config for backbone layer
- * 24. input_layers : string (type)
- * 25. output_layers : string (type)
- * 26. trainable :
- * 27. flip_direction
- * 28. random_translate
- * 29. in_dim : int ( input dimension for embedding layer )
- * 30. out_dim : int ( output dimesion for embedding layer )
- * 31. recurrent_activation : string (type) - lstm
- * 32. distribute : bool
- * 33. split_dimension : string (type)
- * 34. return_sequences : bool (type) - lstm
- * 35. hidden_state_activation : string (type) - lstm
- * 36. dropout : float (type) - drop out rate
- */
- enum class PropertyType {
- input_shape = 0,
- normalization = 1,
- standardization = 2,
- activation = 3,
- epsilon = 4,
- weight_regularizer = 5,
- weight_regularizer_constant = 6,
- unit = 7,
- weight_initializer = 8,
- bias_initializer = 9,
- filters = 10,
- kernel_size = 11,
- stride = 12,
- padding = 13,
- pool_size = 14,
- pooling = 15,
- flatten = 16,
- name = 17,
- momentum = 18,
- moving_mean_initializer = 19,
- moving_variance_initializer = 20,
- gamma_initializer = 21,
- beta_initializer = 22,
- modelfile = 23, /** model file for loading config for backbone layer */
- input_layers = 24,
- output_layers = 25,
- trainable = 26,
- flip_direction = 27,
- random_translate = 28,
- in_dim = 29,
- out_dim = 30,
- recurrent_activation = 31,
- distribute = 32,
- split_dimension = 33,
- return_sequences = 34,
- hidden_state_activation = 35,
- dropout = 36,
- unknown
- };
-
- /**
- * @brief setProperty by PropertyType
- * @note By passing empty string, this can validate if @a type is valid
- * @param[in] type property type to be passed
- * @param[in] value value to be passed, if empty string is passed, do nothing
- * but throws error when @a type is invalid
- * @exception exception::not_supported when property type is not valid for
- * the particular layer
- * @exception std::invalid_argument invalid argument
- */
- virtual void setProperty(const PropertyType type,
- const std::string &value = "");
-
- /**
- * @brief Copy Layer
- * @param[in] l Layer to be copied
- */
- virtual void copy(std::shared_ptr<LayerV1> l);
-
- /**
- * @brief check hyper parameter for the layer
- * @retval #ML_ERROR_NONE Successful.
- * @retval #ML_ERROR_INVALID_PARAMETER invalid parameter.
- */
- virtual int checkValidation();
-
- /**
- * @brief Get the output dimension
- * @return TensorDim dimension of the output
- */
- virtual std::vector<TensorDim> getOutputDimension() { return output_dim; }
-
- /**
- * @brief Get the input dimension
- * @return TensorDim dimension of the input
- */
- virtual std::vector<TensorDim> getInputDimension() { return input_dim; }
-
- /**
- * @brief this function helps exporting the layer in a predefined format,
- * while workarounding issue caused by templated function type eraser
- *
- * @param exporter exporter that conatins exporting logic
- * @param method enum value to identify how it should be exported to
- * @todo remove this when name is moved to layer_node
- */
- virtual void
- export_to(Exporter &exporter,
- ExportMethods method = ExportMethods::METHOD_STRINGVECTOR) const {
- exporter.saveResult(layer_props, method, this);
- };
-
- /**
- * @brief get the loss value added by this layer
- * @retval loss value
- */
- virtual float getLoss() { return loss; }
-
- /**
- * @brief check if this layer supports backwarding
- * @note support backwarding primarily means that the layer can process the
- * derivatives and return back the gradients to the previous layer.
- * @return true if supports backwarding, else false
- */
- virtual bool supportBackwarding() const { return true; };
- /**
- * @brief get all weights of the layer
- * @retval vector of all params
- */
- virtual std::vector<Weight> getWeights() { return weights; }
-
- /**
- * @brief Preset modes for printing summary for the layer
- */
- enum class PrintPreset {
- PRINT_NONE = 0, /**< Print nothing */
- PRINT_SUMMARY, /**< Print preset including summary information */
- PRINT_SUMMARY_META, /**< Print summary preset that includes meta information
- */
- PRINT_ALL /**< Print everything possible */
- };
-
- /**
- * @brief print using PrintPreset
- *
- * @param out oustream
- * @param preset preset to be used
- */
- virtual void printPreset(std::ostream &out,
- PrintPreset preset = PrintPreset::PRINT_SUMMARY);
-
- /**
- * @brief get data alias at param position.
- * @exception std::out_of_range for index out of range
- */
- virtual Weight &weightAt(const unsigned int position) {
- return weights[position];
- }
-
- /**
- * @brief Get the number of weights
- *
- * @return unsigned int number of weights
- */
- virtual unsigned int getNumWeights() { return weights.size(); }
-
- /**
- * @brief Set the batch for the layer
- * @param batch Batch value to be set
- */
- virtual void setBatch(unsigned int batch);
-
- /**
- * @brief Get hidden tensors
- *
- * @return std::vector<Tensor> get outputs
- */
- virtual std::vector<Tensor> getOutputs();
-
- /**
- * @brief Get derivatives tensors
- *
- * @return std::vector<Tensor> get derivatives
- */
- virtual std::vector<Tensor> getDerivatives();
-
- /**
- * @brief Get the Input Ref object
- *
- * @return std::vector<std::shared_ptr<Var_Grad>>&
- */
- virtual const std::vector<std::shared_ptr<Var_Grad>> &getInputRef() const {
- return net_input;
- }
-
- /**
- * @brief Get the Output Ref object
- *
- * @return std::vector<std::shared_ptr<Var_Grad>>&
- */
- virtual const std::vector<std::shared_ptr<Var_Grad>> &getOutputRef() const {
- return net_hidden;
- }
-
- /**
- * @brief Get reference to the weights
- * @retval Reference of the list of weights in the layer
- */
- virtual std::vector<Weight> &getWeightsRef() { return weights; }
-
- /**
- * @brief Get the Weights Ref object
- *
- * @return const std::vector<Weight>& refs of weights
- */
- virtual const std::vector<Weight> &getWeightsRef() const { return weights; }
-
- /**
- * @brief Set the Input Buffers object
- *
- * @param inputs inputs to set
- */
- virtual void setInputBuffers(std::vector<std::shared_ptr<Var_Grad>> inputs) {
- net_input = inputs;
- }
-
- /**
- * @brief Set output Buffers
- *
- * @param outputs output to set
- */
- virtual void
- setOutputBuffers(std::vector<std::shared_ptr<Var_Grad>> outputs) {
- net_hidden = outputs;
- }
-
- /**
- * @brief Initialize the layer
- * - Weight(Height, Width), Bias(1, Width)
- * @retval #ML_ERROR_NONE Successful.
- * @retval #ML_ERROR_INVALID_PARAMETER invalid parameter.
- */
- virtual int initialize(Manager &manager) = 0;
-
- /**
- * @brief get number of input layers
- *
- * @return unsigned int input size
- */
- virtual unsigned int getNumInputs() { return input_dim.size(); }
-
- /**
- * @brief get number of output layers
- *
- * @return unsigned int output size
- */
- virtual unsigned int getNumOutputs() { return output_dim.size(); }
-
- /**
- * @brief set Number of Input Layers
- *
- * @param size size of inputs
- */
- void setNumInputs(unsigned int size) {
- if (size < 1)
- throw std::invalid_argument("Minimum number of inputs must be 1");
- if (input_dim.size() != size) {
- /** clear is intentional to clear any previously set input dimensions */
- input_dim.clear();
- input_dim.resize(size);
- }
- net_input.resize(size);
- }
-
- /**
- * @brief set Number of Output Layers
- *
- * @param size size of outputs
- */
- void setNumOutputs(unsigned int size) {
- if (size < 1)
- throw std::invalid_argument("Minimum number of outputs must be 1");
- if (output_dim.size() != size) {
- /** clear is intentional to clear any previously set output dimensions */
- output_dim.clear();
- output_dim.resize(size);
- }
- net_hidden.resize(size);
- }
-
- /**
-
- * @brief Set the input dimension
- * @param[in] d dimension to be set
- */
- void setInputDimension(const std::vector<TensorDim> &d) { input_dim = d; }
-
- /**
- * @brief Set the input dimension
- * @param[in] d dimension to be set
- * @param[in] i axis
- */
- void setInputDimension(const TensorDim &d, unsigned int i) {
- if (i >= getNumInputs())
- throw std::out_of_range("Setting dimensions out of bounds");
- input_dim[i] = d;
- }
-
- /**
- * @brief If the current layer can support in-place
- *
- * @return true if inplace, else false
- * @details all layers default to out of place execution
- * @note all layers default to out of place execution
- */
- virtual bool supportInPlace() const { return false; }
-
-protected:
- /**
- * @brief Print Options when printing layer info
- */
- typedef enum {
- // clang-format off
- PRINT_INST_INFO = (1 << 0), /**< Option to print type & instance address info */
- PRINT_SHAPE_INFO = (1 << 1), /**< Option to print shape information, invalid before initiation*/
- PRINT_PROP = (1 << 2), /**< Option to print properties */
- PRINT_PROP_META = (1 << 3), /**< Option to print properties that describe meta info
- e.g) layer activation type for non-activation layer. */
- PRINT_WEIGHTS = (1 << 4), /**< Option to print weights */
- PRINT_METRIC = (1 << 5) /**< Option to print metrics (currently loss only) */
- // clang-format on
- } PrintOption;
-
- std::tuple<> layer_props; /**< supported properties of layer */
-
- /**
- * @brief Input Tensors
- */
- std::vector<std::shared_ptr<Var_Grad>> net_input;
-
- /**
- * @brief Output Tensors
- */
- std::vector<std::shared_ptr<Var_Grad>> net_hidden;
-
- /**
- * @brief Dimension of input activation
- */
- std::vector<TensorDim> input_dim;
-
- /**
- * @brief Dimension of output activation
- */
- std::vector<TensorDim> output_dim;
-
- /**
- * @brief Loss value added by this layer
- */
- float loss;
-
- WeightRegularizer weight_regularizer;
-
- float weight_regularizer_constant;
-
- /**
- * @brief initializer for weights
- */
- WeightInitializer weight_initializer;
-
- /**
- * @brief initializer for bias
- */
- WeightInitializer bias_initializer;
-
- /**
- * @brief weight_list in this layer. This contains all weights of the
- * layer.
- */
- std::vector<Weight> weights;
-
-private:
- /**
- * @brief check if @a type is valid and print if prop is valid to @a out
- */
- template <typename T>
- void printIfValid(std::ostream &out, const PropertyType type, T target);
-
- /**
- * @brief anchor point to override if PRINT_SHAPE_INFO is enabled for
- * Layer::print()
- */
- virtual void printShapeInfo(std::ostream &out);
-
- /**
- * @brief anchor point to override if PRINT_PROP_META is enabled for
- * Layer::print()
- */
- virtual void printPropertiesMeta(std::ostream &out);
-
- /**
- * @brief anchor point to override if PRINT_PROP is enabled for Layer::print()
- */
- virtual void printProperties(std::ostream &out);
-
- /**
- * @brief anchor point to override if PRINT_METRIC is enabled for
- * Layer::print()
- */
- virtual void printMetric(std::ostream &out);
-
- /**
- * @brief set Weight Initialization Type
- * @param[in] wini WeightInitializer
- */
- void setWeightInit(WeightInitializer wini) { weight_initializer = wini; }
-
- /**
- * @brief Print layer related information. Do not override without clear
- * reason. It is recommended to override printShapeInfo, printPropertiesMeta,
- * printProperties, printMetric instead
- * @param[in] out outstream
- * @param[in] flags combination of LayerPrintOption
- */
- virtual void print(std::ostream &out, unsigned int flags = 0);
-};
-
-/**
- * @brief Overriding output stream for layers and it's derived class
- */
-template <typename T, typename std::enable_if_t<
- std::is_base_of<LayerV1, T>::value, T> * = nullptr>
-std::ostream &operator<<(std::ostream &out, T &l) {
- l.printPreset(out, LayerV1::PrintPreset::PRINT_SUMMARY);
- return out;
-}
-
-using CreateLayerV1Func = nntrainer::LayerV1 *(*)();
-using DestroyLayerV1Func = void (*)(nntrainer::LayerV1 *);
-
-/**
- * @brief Layer Pluggable struct that enables pluggable layer
- *
- */
-typedef struct {
- CreateLayerV1Func createfunc; /**< create layer function */
- DestroyLayerV1Func destroyfunc; /**< destory function */
-} LayerV1Pluggable;
-
-/**
- * @brief pluggable layer must have this structure defined
- */
-extern "C" LayerV1Pluggable ml_train_layerv1_pluggable;
-
-/**
- * @brief General Layer Factory function to register Layer
- *
- * @param props property representation
- * @return std::unique_ptr<ml::train::Layer> created object
- */
-template <typename T,
- std::enable_if_t<std::is_base_of<LayerV1, T>::value, T> * = nullptr>
-std::unique_ptr<LayerV1>
-createLayer(const std::vector<std::string> &props = {}) {
- std::unique_ptr<LayerV1> ptr = std::make_unique<T>();
-
- if (ptr->setProperty(props) != ML_ERROR_NONE) {
- throw std::invalid_argument("Set properties failed for layer");
- }
- return ptr;
-}
-
-} // namespace nntrainer
-
-#endif /* __cplusplus */
-#endif /* __LAYER_H__ */
#include <layer_node.h>
#include <nntrainer_error.h>
#include <nntrainer_log.h>
+#include <node_exporter.h>
#include <time_dist.h>
#include <base_properties.h>
}
bool LayerNode::setProperty(const std::string &key, const std::string &value) {
- using PropertyType = nntrainer::LayerV1::PropertyType;
+ using PropertyType = nntrainer::Layer::PropertyType;
PropertyType type = static_cast<PropertyType>(parseLayerProperty(key));
switch (type) {
// clang-format on
} PrintOption;
-void LayerNode::printPreset(std::ostream &out, LayerV1::PrintPreset preset) {
- using PrintPreset = LayerV1::PrintPreset;
+void LayerNode::printPreset(std::ostream &out, PrintPreset preset) {
unsigned int flags = 0;
switch (preset) {
#include <layer.h>
#include <layer_context.h>
#include <layer_devel.h>
-#include <layer_internal.h>
namespace nntrainer {
*/
~LayerNode();
-public:
/**
* Support all the interface requirements by ml::train::Layer
*/
}
/**
+ * @brief Preset modes for printing summary for the layer
+ */
+ enum class PrintPreset {
+ PRINT_NONE = 0, /**< Print nothing */
+ PRINT_SUMMARY, /**< Print preset including summary information */
+ PRINT_SUMMARY_META, /**< Print summary preset that includes meta information
+ */
+ PRINT_ALL /**< Print everything possible */
+ };
+
+ /**
* @brief print using PrintPreset
*
* @param out oustream
* @param preset preset to be used
*/
- void printPreset(std::ostream &out, LayerV1::PrintPreset preset =
- LayerV1::PrintPreset::PRINT_SUMMARY);
+ void printPreset(std::ostream &out,
+ PrintPreset preset = PrintPreset::PRINT_SUMMARY);
private:
std::unique_ptr<nntrainer::Layer>
*/
#include <cmath>
-#include <layer_internal.h>
#include <lazy_tensor.h>
#include <lstm.h>
#include <nntrainer_error.h>
#include <nntrainer_log.h>
+#include <node_exporter.h>
#include <parse_util.h>
#include <util_func.h>
void LSTMLayer::setProperty(const std::string &type_str,
const std::string &value) {
- using PropertyType = LayerV1::PropertyType;
+ using PropertyType = nntrainer::Layer::PropertyType;
int status = ML_ERROR_NONE;
- LayerV1::PropertyType type =
- static_cast<LayerV1::PropertyType>(parseLayerProperty(type_str));
+ nntrainer::Layer::PropertyType type =
+ static_cast<nntrainer::Layer::PropertyType>(parseLayerProperty(type_str));
// TODO : Add return_state property & api to get the hidden input
switch (type) {
'flatten_layer.cpp',
'input_layer.cpp',
'output_layer.cpp',
- 'layer.cpp',
'layer_node.cpp',
'pooling2d_layer.cpp',
'preprocess_flip_layer.cpp',
layer_headers = [
'layer_context.h',
'layer_devel.h',
- 'layer_factory.h',
- 'layer_internal.h',
'acti_func.h',
'layer_node.h',
+ 'layer_devel.h',
'common_properties.h',
]
* nnstreamer data
*/
-#include <layer_internal.h>
#include <lazy_tensor.h>
#include <nnstreamer_layer.h>
#include <nntrainer_error.h>
void NNStreamerLayer::setProperty(const std::string &type_str,
const std::string &value) {
- using PropertyType = LayerV1::PropertyType;
- LayerV1::PropertyType type =
- static_cast<LayerV1::PropertyType>(parseLayerProperty(type_str));
+ using PropertyType = nntrainer::Layer::PropertyType;
+ nntrainer::Layer::PropertyType type =
+ static_cast<nntrainer::Layer::PropertyType>(parseLayerProperty(type_str));
switch (type) {
case PropertyType::modelfile: {
*/
#include <cstring>
-#include <layer_internal.h>
#include <nntrainer_error.h>
#include <nntrainer_log.h>
#include <output_layer.h>
layerImpl(pluggable->createfunc()),
destroy_func(pluggable->destroyfunc) {
NNTR_THROW_IF(layerImpl == nullptr, std::invalid_argument)
- << "either create_func_ failed or cannot dynamic cast to layer_internal";
+ << "either create_func_ failed or cannot dynamic cast to layer";
}
/**
#include <cstring>
#include <limits>
-#include <layer_internal.h>
#include <lazy_tensor.h>
#include <nntrainer_error.h>
#include <nntrainer_log.h>
void Pooling2DLayer::setProperty(const std::string &type_str,
const std::string &value) {
- using PropertyType = LayerV1::PropertyType;
+ using PropertyType = nntrainer::Layer::PropertyType;
int status = ML_ERROR_NONE;
- LayerV1::PropertyType type =
- static_cast<LayerV1::PropertyType>(parseLayerProperty(type_str));
+ nntrainer::Layer::PropertyType type =
+ static_cast<nntrainer::Layer::PropertyType>(parseLayerProperty(type_str));
switch (type) {
case PropertyType::pooling:
#include <random>
-#include <layer_internal.h>
#include <nntrainer_error.h>
#include <nntrainer_log.h>
#include <parse_util.h>
void PreprocessFlipLayer::setProperty(const std::string &type_str,
const std::string &value) {
- using PropertyType = LayerV1::PropertyType;
- LayerV1::PropertyType type =
- static_cast<LayerV1::PropertyType>(parseLayerProperty(type_str));
+ using PropertyType = nntrainer::Layer::PropertyType;
+ nntrainer::Layer::PropertyType type =
+ static_cast<nntrainer::Layer::PropertyType>(parseLayerProperty(type_str));
switch (type) {
case PropertyType::flip_direction: {
#include <random>
-#include <layer_internal.h>
#include <nntrainer_error.h>
#include <nntrainer_log.h>
#include <parse_util.h>
void PreprocessTranslateLayer::setProperty(const std::string &type_str,
const std::string &value) {
- using PropertyType = LayerV1::PropertyType;
+ using PropertyType = nntrainer::Layer::PropertyType;
int status = ML_ERROR_NONE;
- LayerV1::PropertyType type =
- static_cast<LayerV1::PropertyType>(parseLayerProperty(type_str));
+ nntrainer::Layer::PropertyType type =
+ static_cast<nntrainer::Layer::PropertyType>(parseLayerProperty(type_str));
switch (type) {
case PropertyType::random_translate: {
*/
#include <cmath>
-#include <layer_internal.h>
#include <lazy_tensor.h>
#include <nntrainer_error.h>
#include <nntrainer_log.h>
+#include <node_exporter.h>
#include <parse_util.h>
#include <rnn.h>
#include <util_func.h>
void RNNLayer::setProperty(const std::string &type_str,
const std::string &value) {
- using PropertyType = LayerV1::PropertyType;
+ using PropertyType = nntrainer::Layer::PropertyType;
int status = ML_ERROR_NONE;
- LayerV1::PropertyType type =
- static_cast<LayerV1::PropertyType>(parseLayerProperty(type_str));
+ nntrainer::Layer::PropertyType type =
+ static_cast<nntrainer::Layer::PropertyType>(parseLayerProperty(type_str));
// TODO : Add return_state property & api to get the hidden input
switch (type) {
*/
#include <cstring>
-#include <layer_internal.h>
#include <nntrainer_error.h>
#include <nntrainer_log.h>
#include <parse_util.h>
void SplitLayer::setProperty(const std::string &type_str,
const std::string &value) {
- using PropertyType = LayerV1::PropertyType;
+ using PropertyType = nntrainer::Layer::PropertyType;
int status = ML_ERROR_NONE;
- LayerV1::PropertyType type =
- static_cast<LayerV1::PropertyType>(parseLayerProperty(type_str));
+ nntrainer::Layer::PropertyType type =
+ static_cast<nntrainer::Layer::PropertyType>(parseLayerProperty(type_str));
switch (type) {
case PropertyType::split_dimension: {
* @bug No known bugs except for NYI items
*/
-#include <layer_internal.h>
#include <nntrainer_error.h>
#include <nntrainer_log.h>
#include <tflite_layer.h>
void TfLiteLayer::setProperty(const std::string &type_str,
const std::string &value) {
- using PropertyType = LayerV1::PropertyType;
- LayerV1::PropertyType type =
- static_cast<LayerV1::PropertyType>(parseLayerProperty(type_str));
+ using PropertyType = nntrainer::Layer::PropertyType;
+ nntrainer::Layer::PropertyType type =
+ static_cast<nntrainer::Layer::PropertyType>(parseLayerProperty(type_str));
switch (type) {
case PropertyType::modelfile: {
*
*/
-#include <layer_internal.h>
#include <nntrainer_error.h>
#include <nntrainer_log.h>
#include <parse_util.h>
#include <vector>
#include <dynamic_training_optimization.h>
-#include <layer_internal.h>
#include <tensor.h>
#include <util_func.h>
#include <random>
#include <vector>
-#include <layer_internal.h>
+#include <layer_devel.h>
+#include <optimizer_devel.h>
#include <tensor.h>
namespace nntrainer {
if (preset > ML_TRAIN_SUMMARY_TENSOR)
return;
- LayerV1::PrintPreset layer_preset = LayerV1::PrintPreset::PRINT_NONE;
+ LayerNode::PrintPreset layer_preset = LayerNode::PrintPreset::PRINT_NONE;
///@todo match flags with preset
unsigned int flags = PRINT_INST_INFO | PRINT_GRAPH_INFO | PRINT_PROP |
switch (preset) {
case ML_TRAIN_SUMMARY_TENSOR:
- layer_preset = LayerV1::PrintPreset::PRINT_ALL;
+ layer_preset = LayerNode::PrintPreset::PRINT_ALL;
break;
case ML_TRAIN_SUMMARY_LAYER:
- layer_preset = initialized ? LayerV1::PrintPreset::PRINT_SUMMARY
- : LayerV1::PrintPreset::PRINT_SUMMARY_META;
+ layer_preset = initialized ? LayerNode::PrintPreset::PRINT_SUMMARY
+ : LayerNode::PrintPreset::PRINT_SUMMARY_META;
break;
case ML_TRAIN_SUMMARY_MODEL:
break;
}
void NeuralNetwork::print(std::ostream &out, unsigned int flags,
- LayerV1::PrintPreset layerPrintPreset) {
+ LayerNode::PrintPreset layerPrintPreset) {
if (flags & PRINT_INST_INFO) {
out << "===================";
printInstance(out, this);
* @param[in] Layer::PrintPreset print preset when to print layer properties
*/
void print(std::ostream &out, unsigned int flags = 0,
- LayerV1::PrintPreset layerPrintPreset =
- LayerV1::PrintPreset::PRINT_SUMMARY);
+ LayerNode::PrintPreset layerPrintPreset =
+ LayerNode::PrintPreset::PRINT_SUMMARY);
/**
* @brief Set Loss
}
template <>
-void Exporter::saveTflResult(const std::tuple<> &props, const LayerV1 *self) {
+void Exporter::saveTflResult(const std::tuple<> &props,
+ const nntrainer::Layer *self) {
createIfNull(tf_node);
}
class Trainable;
} // namespace props
-class LayerV1;
-/**
- * @copydoc template <typename PropsType, typename NodeType> void
- * Exporter::saveTflResult(const PropsType &props, const NodeType *self);
- */
-template <>
-void Exporter::saveTflResult(const std::tuple<> &props, const LayerV1 *self);
-
class LayerNode;
/**
* @copydoc template <typename PropsType, typename NodeType> void
#include <string>
#include <acti_func.h>
-#include <layer_internal.h>
#include <neuralnet.h>
#include <nntrainer_error.h>
#include <nntrainer_log.h>
}
}
- return (unsigned int)LayerV1::PropertyType::unknown;
+ return (unsigned int)Layer::PropertyType::unknown;
}
std::string propToStr(unsigned int type) { return property_string[type]; }
%{_includedir}/nntrainer/databuffer_factory.h
%{_includedir}/nntrainer/layer_context.h
%{_includedir}/nntrainer/layer_devel.h
-%{_includedir}/nntrainer/layer_internal.h
%{_includedir}/nntrainer/neuralnet.h
%{_includedir}/nntrainer/tensor.h
%{_includedir}/nntrainer/tensor_dim.h
*
* @todo solidify the api signature
*/
-class CustomLayer : public nntrainer::LayerV1 {
+class CustomLayer : public nntrainer::Layer {
public:
inline static const std::string type = "identity_layer";
- int setProperty(std::vector<std::string> values) override { return 1; }
-
- int checkValidation() override { return 1; }
-
- float getLoss() override { return 0.0f; }
+ void setProperty(const std::vector<std::string> &values) override {}
const std::string getType() const override { return CustomLayer::type; }
};
#include <fc_layer.h>
#include <flatten_layer.h>
#include <input_layer.h>
-#include <layer_internal.h>
#include <layer_node.h>
#include <loss_layer.h>
#include <lstm.h>
TEST_F(nntrainer_InputLayer, set_property_01_n) {
EXPECT_THROW(
- layer.setProperty(nntrainer::LayerV1::PropertyType::input_shape, "0:3:2:1"),
+ layer.setProperty(nntrainer::Layer::PropertyType::input_shape, "0:3:2:1"),
std::invalid_argument);
}
sharedTensor label;
std::vector<nntrainer::Tensor> new_w;
std::vector<nntrainer::Tensor> grad;
- std::vector<std::shared_ptr<nntrainer::LayerV1>> layers;
+ std::vector<std::shared_ptr<nntrainer::Layer>> layers;
nntrainer::LossType loss_type = nntrainer::LossType::LOSS_UNKNOWN;
};
TEST_F(nntrainer_Conv2DLayer, print_01_p) {
std::stringstream ss, ss2;
- layer.printPreset(ss, nntrainer::LayerV1::PrintPreset::PRINT_ALL);
+ layer.printPreset(ss, nntrainer::Layer::PrintPreset::PRINT_ALL);
ss2 << layer;
EXPECT_GT(ss.str().size(), 100u);
EXPECT_GT(ss2.str().size(), 100u);
TEST(nntrainer_LossLayer, setProperty_individual_01_n) {
nntrainer::LossLayer layer;
EXPECT_THROW(
- layer.setProperty(nntrainer::LayerV1::PropertyType::filters, "1:2"),
+ layer.setProperty(nntrainer::Layer::PropertyType::filters, "1:2"),
nntrainer::exception::not_supported);
}
TEST(nntrainer_LossLayer, setProperty_individual_02_n) {
nntrainer::LossLayer layer;
- EXPECT_THROW(layer.setProperty(nntrainer::LayerV1::PropertyType::filters,
+ EXPECT_THROW(layer.setProperty(nntrainer::Layer::PropertyType::filters,
"invalid_string"),
nntrainer::exception::not_supported);
}