From: Jihoon Lee Date: Fri, 19 Nov 2021 04:21:59 +0000 (+0900) Subject: [layer devel] clean up header dependency X-Git-Tag: accepted/tizen/unified/20220323.062643~179 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=e9a0d1c75358a7577c633545cfba161a8d79a020;p=platform%2Fcore%2Fml%2Fnntrainer.git [layer devel] clean up header dependency This patch clean up header dependency related to layer devel which is being included in so many places and does not need layer_context to be included in it's translation unit **Self evaluation:** 1. Build test: [X]Passed [ ]Failed [ ]Skipped 2. Run test: [X]Passed [ ]Failed [ ]Skipped Signed-off-by: Jihoon Lee --- diff --git a/Applications/TransferLearning/Draw_Classification/jni/main.cpp b/Applications/TransferLearning/Draw_Classification/jni/main.cpp index fda6067..0ce4948 100644 --- a/Applications/TransferLearning/Draw_Classification/jni/main.cpp +++ b/Applications/TransferLearning/Draw_Classification/jni/main.cpp @@ -29,6 +29,7 @@ #define APP_VALIDATE #endif +#include #include #include #include diff --git a/nntrainer/layers/activation_layer.cpp b/nntrainer/layers/activation_layer.cpp index 3116fa3..5c0ffd4 100644 --- a/nntrainer/layers/activation_layer.cpp +++ b/nntrainer/layers/activation_layer.cpp @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/nntrainer/layers/addition_layer.cpp b/nntrainer/layers/addition_layer.cpp index 564cf6f..bdae68e 100644 --- a/nntrainer/layers/addition_layer.cpp +++ b/nntrainer/layers/addition_layer.cpp @@ -16,6 +16,8 @@ #include #include +#include + namespace nntrainer { static constexpr size_t SINGLE_INOUT_IDX = 0; diff --git a/nntrainer/layers/attention_layer.cpp b/nntrainer/layers/attention_layer.cpp index eaf3b96..1f5edb2 100644 --- a/nntrainer/layers/attention_layer.cpp +++ b/nntrainer/layers/attention_layer.cpp @@ -12,6 +12,7 @@ */ #include +#include #include #include diff --git a/nntrainer/layers/bn_layer.cpp b/nntrainer/layers/bn_layer.cpp index 7d8b978..f9bae35 100644 --- a/nntrainer/layers/bn_layer.cpp +++ b/nntrainer/layers/bn_layer.cpp @@ -22,6 +22,7 @@ */ #include +#include #include #include #include diff --git a/nntrainer/layers/bn_layer.h b/nntrainer/layers/bn_layer.h index d0f47b5..5ecee09 100644 --- a/nntrainer/layers/bn_layer.h +++ b/nntrainer/layers/bn_layer.h @@ -28,6 +28,7 @@ #include #include +#include #include namespace nntrainer { diff --git a/nntrainer/layers/centroid_knn.cpp b/nntrainer/layers/centroid_knn.cpp index 153d5b2..734ce0d 100644 --- a/nntrainer/layers/centroid_knn.cpp +++ b/nntrainer/layers/centroid_knn.cpp @@ -18,6 +18,7 @@ #include #include +#include #include #include #include diff --git a/nntrainer/layers/centroid_knn.h b/nntrainer/layers/centroid_knn.h index 018240f..93f0166 100644 --- a/nntrainer/layers/centroid_knn.h +++ b/nntrainer/layers/centroid_knn.h @@ -16,7 +16,6 @@ #include #include -#include #include namespace nntrainer { diff --git a/nntrainer/layers/concat_layer.cpp b/nntrainer/layers/concat_layer.cpp index 6c3662b..5b74273 100644 --- a/nntrainer/layers/concat_layer.cpp +++ b/nntrainer/layers/concat_layer.cpp @@ -14,6 +14,7 @@ #include #include +#include #include #include #include diff --git a/nntrainer/layers/concat_layer.h b/nntrainer/layers/concat_layer.h index 11764e4..f484365 100644 --- a/nntrainer/layers/concat_layer.h +++ b/nntrainer/layers/concat_layer.h @@ -15,8 +15,9 @@ #define __CONCAT_LAYER_H__ #ifdef __cplusplus +#include #include - +#include namespace nntrainer { /** diff --git a/nntrainer/layers/conv1d_layer.cpp b/nntrainer/layers/conv1d_layer.cpp index f47946b..3d257c0 100644 --- a/nntrainer/layers/conv1d_layer.cpp +++ b/nntrainer/layers/conv1d_layer.cpp @@ -17,6 +17,8 @@ #include #include +#include +#include #include #include #include @@ -35,6 +37,8 @@ Conv1DLayer::Conv1DLayer(const std::array &padding_) : conv2d_layer = std::make_unique(); } +Conv1DLayer::~Conv1DLayer() {} + void Conv1DLayer::finalize(InitLayerContext &context) { if (context.getNumInputs() != 1) { throw std::invalid_argument("Convolution layer takes only one input"); diff --git a/nntrainer/layers/conv1d_layer.h b/nntrainer/layers/conv1d_layer.h index 2fb6746..c51f44d 100644 --- a/nntrainer/layers/conv1d_layer.h +++ b/nntrainer/layers/conv1d_layer.h @@ -15,12 +15,14 @@ #define __CONV1D_LAYER_H_ #ifdef __cplusplus -#include +#include #include #include namespace nntrainer { +class Conv2DLayer; + /** * @class Convolution 1D Layer * @brief Convolution 1D Layer @@ -35,7 +37,7 @@ public: /** * @brief Destructor of Conv 1D Layer */ - ~Conv1DLayer() = default; + ~Conv1DLayer(); /** * @brief Move constructor of Conv 1D Layer. diff --git a/nntrainer/layers/conv2d_layer.cpp b/nntrainer/layers/conv2d_layer.cpp index bc462da..7365ace 100644 --- a/nntrainer/layers/conv2d_layer.cpp +++ b/nntrainer/layers/conv2d_layer.cpp @@ -18,11 +18,13 @@ #include #include +#include #include #include #include #include #include +#include #include namespace nntrainer { diff --git a/nntrainer/layers/conv2d_layer.h b/nntrainer/layers/conv2d_layer.h index 9577efb..0497e52 100644 --- a/nntrainer/layers/conv2d_layer.h +++ b/nntrainer/layers/conv2d_layer.h @@ -17,6 +17,7 @@ #include +#include #include namespace nntrainer { diff --git a/nntrainer/layers/dropout.cpp b/nntrainer/layers/dropout.cpp index e504352..76f2d8d 100644 --- a/nntrainer/layers/dropout.cpp +++ b/nntrainer/layers/dropout.cpp @@ -12,8 +12,10 @@ */ #include +#include #include #include +#include #include namespace nntrainer { diff --git a/nntrainer/layers/dropout.h b/nntrainer/layers/dropout.h index 1fece43..aa2cb71 100644 --- a/nntrainer/layers/dropout.h +++ b/nntrainer/layers/dropout.h @@ -17,7 +17,6 @@ #include #include -#include namespace nntrainer { diff --git a/nntrainer/layers/embedding.cpp b/nntrainer/layers/embedding.cpp index c63e51c..730a81a 100644 --- a/nntrainer/layers/embedding.cpp +++ b/nntrainer/layers/embedding.cpp @@ -12,6 +12,7 @@ */ #include +#include #include #include #include diff --git a/nntrainer/layers/embedding.h b/nntrainer/layers/embedding.h index 940a128..d182cc1 100644 --- a/nntrainer/layers/embedding.h +++ b/nntrainer/layers/embedding.h @@ -15,6 +15,7 @@ #define __EMBEDDING_H__ #ifdef __cplusplus +#include #include namespace nntrainer { diff --git a/nntrainer/layers/fc_layer.cpp b/nntrainer/layers/fc_layer.cpp index 4368fb3..070c874 100644 --- a/nntrainer/layers/fc_layer.cpp +++ b/nntrainer/layers/fc_layer.cpp @@ -22,6 +22,7 @@ */ #include +#include #include #include #include diff --git a/nntrainer/layers/flatten_layer.cpp b/nntrainer/layers/flatten_layer.cpp index 06fa7cb..cb11adc 100644 --- a/nntrainer/layers/flatten_layer.cpp +++ b/nntrainer/layers/flatten_layer.cpp @@ -13,8 +13,10 @@ */ #include +#include #include #include +#include namespace nntrainer { diff --git a/nntrainer/layers/gru.cpp b/nntrainer/layers/gru.cpp index 59d35d4..85c25cb 100644 --- a/nntrainer/layers/gru.cpp +++ b/nntrainer/layers/gru.cpp @@ -28,7 +28,7 @@ #include #include -#include +#include #include #include #include diff --git a/nntrainer/layers/input_layer.cpp b/nntrainer/layers/input_layer.cpp index 5d3b58f..d2a890e 100644 --- a/nntrainer/layers/input_layer.cpp +++ b/nntrainer/layers/input_layer.cpp @@ -22,6 +22,7 @@ */ #include +#include #include #include #include diff --git a/nntrainer/layers/input_layer.h b/nntrainer/layers/input_layer.h index 5634cf8..259383b 100644 --- a/nntrainer/layers/input_layer.h +++ b/nntrainer/layers/input_layer.h @@ -25,9 +25,7 @@ #ifdef __cplusplus #include -#include #include -#include namespace nntrainer { diff --git a/nntrainer/layers/layer_devel.h b/nntrainer/layers/layer_devel.h index 32cfff0..af81cfa 100644 --- a/nntrainer/layers/layer_devel.h +++ b/nntrainer/layers/layer_devel.h @@ -27,15 +27,14 @@ #include #include -#include -#include - namespace ml::train { class Layer; } namespace nntrainer { +class InitLayerContext; +class RunLayerContext; class Exporter; enum class ExportMethods; diff --git a/nntrainer/layers/layer_impl.h b/nntrainer/layers/layer_impl.h index c7b8aa0..26f87a3 100644 --- a/nntrainer/layers/layer_impl.h +++ b/nntrainer/layers/layer_impl.h @@ -31,6 +31,13 @@ class InitLayerContext; class RunLayerContext; class Exporter; +namespace props { +class WeightRegularizer; +class WeightRegularizerConstant; +class WeightInitializer; +class BiasInitializer; +} // namespace props + enum class ExportMethods; /** diff --git a/nntrainer/layers/loss/constant_derivative_loss_layer.cpp b/nntrainer/layers/loss/constant_derivative_loss_layer.cpp index ab8bf0b..2b6c27d 100644 --- a/nntrainer/layers/loss/constant_derivative_loss_layer.cpp +++ b/nntrainer/layers/loss/constant_derivative_loss_layer.cpp @@ -14,6 +14,8 @@ #include +#include + namespace nntrainer { static constexpr int SINGLE_INOUT_IDX = 0; @@ -21,7 +23,7 @@ static constexpr int SINGLE_INOUT_IDX = 0; static constexpr float value = 1.0f; ConstantDerivativeLossLayer::ConstantDerivativeLossLayer() : LossLayer() {} -ConstantDerivativeLossLayer::~ConstantDerivativeLossLayer() = default; +ConstantDerivativeLossLayer::~ConstantDerivativeLossLayer(){}; void ConstantDerivativeLossLayer::forwarding(RunLayerContext &context, bool training) { diff --git a/nntrainer/layers/loss/cross_entropy_loss_layer.h b/nntrainer/layers/loss/cross_entropy_loss_layer.h index e05d92e..07e474a 100644 --- a/nntrainer/layers/loss/cross_entropy_loss_layer.h +++ b/nntrainer/layers/loss/cross_entropy_loss_layer.h @@ -16,6 +16,7 @@ #ifdef __cplusplus #include +#include namespace nntrainer { diff --git a/nntrainer/layers/loss/cross_entropy_sigmoid_loss_layer.cpp b/nntrainer/layers/loss/cross_entropy_sigmoid_loss_layer.cpp index d0b205d..1820c0b 100644 --- a/nntrainer/layers/loss/cross_entropy_sigmoid_loss_layer.cpp +++ b/nntrainer/layers/loss/cross_entropy_sigmoid_loss_layer.cpp @@ -16,6 +16,7 @@ #include #include +#include #include #include diff --git a/nntrainer/layers/loss/cross_entropy_softmax_loss_layer.cpp b/nntrainer/layers/loss/cross_entropy_softmax_loss_layer.cpp index 5312f46..95a6048 100644 --- a/nntrainer/layers/loss/cross_entropy_softmax_loss_layer.cpp +++ b/nntrainer/layers/loss/cross_entropy_softmax_loss_layer.cpp @@ -16,6 +16,7 @@ #include #include +#include #include #include diff --git a/nntrainer/layers/loss/loss_layer.cpp b/nntrainer/layers/loss/loss_layer.cpp index 2be4ee6..84f767f 100644 --- a/nntrainer/layers/loss/loss_layer.cpp +++ b/nntrainer/layers/loss/loss_layer.cpp @@ -11,9 +11,13 @@ * */ +#include #include namespace nntrainer { +void LossLayer::finalize(InitLayerContext &context) { + context.setOutputDimensions(context.getInputDimensions()); +} void LossLayer::updateLoss(RunLayerContext &context, const Tensor &l) { float loss_sum = 0.0f; diff --git a/nntrainer/layers/loss/loss_layer.h b/nntrainer/layers/loss/loss_layer.h index 8c194ee..00b520f 100644 --- a/nntrainer/layers/loss/loss_layer.h +++ b/nntrainer/layers/loss/loss_layer.h @@ -17,6 +17,8 @@ #include +#include + namespace nntrainer { /** @@ -33,9 +35,7 @@ public: /** * @copydoc Layer::finalize(InitLayerContext &context) */ - virtual void finalize(InitLayerContext &context) override { - context.setOutputDimensions(context.getInputDimensions()); - } + virtual void finalize(InitLayerContext &context) override; /** * @copydoc Layer::setProperty(const std::vector &values) diff --git a/nntrainer/layers/loss/mse_loss_layer.cpp b/nntrainer/layers/loss/mse_loss_layer.cpp index 9691aa3..f6ca1dc 100644 --- a/nntrainer/layers/loss/mse_loss_layer.cpp +++ b/nntrainer/layers/loss/mse_loss_layer.cpp @@ -11,6 +11,7 @@ * */ +#include #include #include diff --git a/nntrainer/layers/lstm.cpp b/nntrainer/layers/lstm.cpp index 25f3c4e..49cd2da 100644 --- a/nntrainer/layers/lstm.cpp +++ b/nntrainer/layers/lstm.cpp @@ -12,7 +12,7 @@ */ #include -#include +#include #include #include #include diff --git a/nntrainer/layers/lstmcell.cpp b/nntrainer/layers/lstmcell.cpp index 588b4a8..e25fe1f 100644 --- a/nntrainer/layers/lstmcell.cpp +++ b/nntrainer/layers/lstmcell.cpp @@ -12,7 +12,7 @@ */ #include -#include +#include #include #include #include diff --git a/nntrainer/layers/multiout_layer.cpp b/nntrainer/layers/multiout_layer.cpp index 0e720c9..10673ec 100644 --- a/nntrainer/layers/multiout_layer.cpp +++ b/nntrainer/layers/multiout_layer.cpp @@ -12,6 +12,7 @@ */ #include +#include #include #include #include diff --git a/nntrainer/layers/nnstreamer_layer.cpp b/nntrainer/layers/nnstreamer_layer.cpp index 18b315e..b8fa458 100644 --- a/nntrainer/layers/nnstreamer_layer.cpp +++ b/nntrainer/layers/nnstreamer_layer.cpp @@ -16,6 +16,7 @@ #include #include +#include #include #include #include @@ -60,8 +61,7 @@ NNStreamerLayer::NNStreamerLayer() : static constexpr size_t SINGLE_INOUT_IDX = 0; -int NNStreamerLayer::nnst_info_to_tensor_dim(ml_tensors_info_h &out_res, - TensorDim &dim) { +static int nnst_info_to_tensor_dim(ml_tensors_info_h &out_res, TensorDim &dim) { int status = ML_ERROR_NONE; unsigned int count; ml_tensor_type_e type; diff --git a/nntrainer/layers/nnstreamer_layer.h b/nntrainer/layers/nnstreamer_layer.h index 9d09bf2..661ebd1 100644 --- a/nntrainer/layers/nnstreamer_layer.h +++ b/nntrainer/layers/nnstreamer_layer.h @@ -98,15 +98,6 @@ private: * @brief release the layer resources */ void release() noexcept; - - /** - * @brief convert nnstreamer's tensor_info to nntrainer's tensor_dim - * @param[in] out_res nnstreamer's tensor_info - * @param[out] dim nntrainer's tensor_dim - * @retval 0 on success, -errno on failure - */ - static int nnst_info_to_tensor_dim(ml_tensors_info_h &out_res, - TensorDim &dim); }; } // namespace nntrainer diff --git a/nntrainer/layers/permute_layer.cpp b/nntrainer/layers/permute_layer.cpp index 201a6ea..9c53cc2 100644 --- a/nntrainer/layers/permute_layer.cpp +++ b/nntrainer/layers/permute_layer.cpp @@ -13,8 +13,10 @@ #include #include +#include #include #include +#include #include #include #include diff --git a/nntrainer/layers/permute_layer.h b/nntrainer/layers/permute_layer.h index a526a9a..0496f31 100644 --- a/nntrainer/layers/permute_layer.h +++ b/nntrainer/layers/permute_layer.h @@ -19,7 +19,7 @@ #include #include -#include + namespace nntrainer { namespace props { diff --git a/nntrainer/layers/pooling2d_layer.cpp b/nntrainer/layers/pooling2d_layer.cpp index 02180b4..b3ee156 100644 --- a/nntrainer/layers/pooling2d_layer.cpp +++ b/nntrainer/layers/pooling2d_layer.cpp @@ -14,13 +14,13 @@ #include #include -#include +#include +#include #include #include #include #include #include - namespace nntrainer { static constexpr size_t SINGLE_INOUT_IDX = 0; @@ -405,4 +405,12 @@ void Pooling2DLayer::pooling2d(Tensor &in, bool training, Tensor &output, } } +void Pooling2DLayer::setBatch(RunLayerContext &context, unsigned int batch) { + context.updateTensor(pool_helper_idx, batch); + props::PoolingTypeInfo::Enum pooling_type = + std::get(pooling2d_props).get(); + if (pooling_type == props::PoolingTypeInfo::Enum::global_max) + pool_helper_size.resize(batch * context.getInput(0).channel()); +} + } /* namespace nntrainer */ diff --git a/nntrainer/layers/pooling2d_layer.h b/nntrainer/layers/pooling2d_layer.h index 2b1fa23..94dcbb5 100644 --- a/nntrainer/layers/pooling2d_layer.h +++ b/nntrainer/layers/pooling2d_layer.h @@ -19,6 +19,7 @@ #include #include +#include #include namespace nntrainer { @@ -105,13 +106,7 @@ public: /** * @copydoc Layer::setBatch(RunLayerContext &context, unsigned int batch) */ - void setBatch(RunLayerContext &context, unsigned int batch) override { - context.updateTensor(pool_helper_idx, batch); - props::PoolingTypeInfo::Enum pooling_type = - std::get(pooling2d_props).get(); - if (pooling_type == props::PoolingTypeInfo::Enum::global_max) - pool_helper_size.resize(batch * context.getInput(0).channel()); - } + void setBatch(RunLayerContext &context, unsigned int batch) override; private: std::array padding; diff --git a/nntrainer/layers/preprocess_flip_layer.cpp b/nntrainer/layers/preprocess_flip_layer.cpp index b9e711d..b95066c 100644 --- a/nntrainer/layers/preprocess_flip_layer.cpp +++ b/nntrainer/layers/preprocess_flip_layer.cpp @@ -13,6 +13,8 @@ #include +#include +#include #include #include #include diff --git a/nntrainer/layers/preprocess_flip_layer.h b/nntrainer/layers/preprocess_flip_layer.h index aef292f..594bb75 100644 --- a/nntrainer/layers/preprocess_flip_layer.h +++ b/nntrainer/layers/preprocess_flip_layer.h @@ -17,6 +17,7 @@ #include +#include #include namespace nntrainer { diff --git a/nntrainer/layers/preprocess_l2norm_layer.cpp b/nntrainer/layers/preprocess_l2norm_layer.cpp index 141376d..4c499a2 100644 --- a/nntrainer/layers/preprocess_l2norm_layer.cpp +++ b/nntrainer/layers/preprocess_l2norm_layer.cpp @@ -16,9 +16,10 @@ #include #include +#include #include #include - +#include #include namespace nntrainer { diff --git a/nntrainer/layers/preprocess_l2norm_layer.h b/nntrainer/layers/preprocess_l2norm_layer.h index 62fdff7..ed3c22a 100644 --- a/nntrainer/layers/preprocess_l2norm_layer.h +++ b/nntrainer/layers/preprocess_l2norm_layer.h @@ -17,9 +17,7 @@ #define __PREPROCESS_L2NORM_LAYER_H__ #include -#include #include -#include namespace nntrainer { diff --git a/nntrainer/layers/preprocess_translate_layer.cpp b/nntrainer/layers/preprocess_translate_layer.cpp index 314914f..eae17dd 100644 --- a/nntrainer/layers/preprocess_translate_layer.cpp +++ b/nntrainer/layers/preprocess_translate_layer.cpp @@ -14,6 +14,7 @@ #include +#include #include #include #include diff --git a/nntrainer/layers/preprocess_translate_layer.h b/nntrainer/layers/preprocess_translate_layer.h index da69f2a..2f962b0 100644 --- a/nntrainer/layers/preprocess_translate_layer.h +++ b/nntrainer/layers/preprocess_translate_layer.h @@ -21,6 +21,7 @@ #include #endif +#include #include namespace nntrainer { diff --git a/nntrainer/layers/reshape_layer.cpp b/nntrainer/layers/reshape_layer.cpp index aa9d613..d2e2c9d 100644 --- a/nntrainer/layers/reshape_layer.cpp +++ b/nntrainer/layers/reshape_layer.cpp @@ -12,11 +12,11 @@ * @todo Update flatten to work in-place properly. */ +#include #include #include #include #include - namespace nntrainer { static constexpr size_t SINGLE_INOUT_IDX = 0; diff --git a/nntrainer/layers/rnn.cpp b/nntrainer/layers/rnn.cpp index 08045c8..f3c04af 100644 --- a/nntrainer/layers/rnn.cpp +++ b/nntrainer/layers/rnn.cpp @@ -12,7 +12,7 @@ */ #include -#include +#include #include #include #include diff --git a/nntrainer/layers/rnncell.cpp b/nntrainer/layers/rnncell.cpp index df23084..6b1143c 100644 --- a/nntrainer/layers/rnncell.cpp +++ b/nntrainer/layers/rnncell.cpp @@ -13,7 +13,7 @@ #include -#include +#include #include #include #include diff --git a/nntrainer/layers/split_layer.cpp b/nntrainer/layers/split_layer.cpp index aa78092..92a4c40 100644 --- a/nntrainer/layers/split_layer.cpp +++ b/nntrainer/layers/split_layer.cpp @@ -12,6 +12,7 @@ */ #include +#include #include #include #include diff --git a/nntrainer/layers/split_layer.h b/nntrainer/layers/split_layer.h index 517906d..e0e389c 100644 --- a/nntrainer/layers/split_layer.h +++ b/nntrainer/layers/split_layer.h @@ -17,7 +17,9 @@ #define __SPLIT_LAYER_H__ #ifdef __cplusplus +#include #include +#include namespace nntrainer { diff --git a/nntrainer/layers/tflite_layer.cpp b/nntrainer/layers/tflite_layer.cpp index 83f7af4..38183ca 100644 --- a/nntrainer/layers/tflite_layer.cpp +++ b/nntrainer/layers/tflite_layer.cpp @@ -11,6 +11,7 @@ */ #include +#include #include #include #include diff --git a/nntrainer/layers/tflite_layer.h b/nntrainer/layers/tflite_layer.h index 1706286..cb1c17e 100644 --- a/nntrainer/layers/tflite_layer.h +++ b/nntrainer/layers/tflite_layer.h @@ -16,11 +16,16 @@ #ifdef __cplusplus #include +#include #include #include #include +namespace ml::train { +class TensorDim; +} + namespace nntrainer { class PropsTflModelPath; @@ -88,7 +93,7 @@ private: * @param is_output check if output */ void setDimensions(const std::vector &tensor_idx_list, - std::vector &dim, bool is_output); + std::vector &dim, bool is_output); }; } // namespace nntrainer diff --git a/nntrainer/layers/time_dist.cpp b/nntrainer/layers/time_dist.cpp index 64726dc..8e6655b 100644 --- a/nntrainer/layers/time_dist.cpp +++ b/nntrainer/layers/time_dist.cpp @@ -11,6 +11,7 @@ * */ +#include #include #include #include diff --git a/nntrainer/layers/time_dist.h b/nntrainer/layers/time_dist.h index c5c1f4f..0fc6645 100644 --- a/nntrainer/layers/time_dist.h +++ b/nntrainer/layers/time_dist.h @@ -16,6 +16,7 @@ #ifdef __cplusplus #include +#include namespace nntrainer { diff --git a/nntrainer/models/dynamic_training_optimization.h b/nntrainer/models/dynamic_training_optimization.h index e0e9511..ad2b4bb 100644 --- a/nntrainer/models/dynamic_training_optimization.h +++ b/nntrainer/models/dynamic_training_optimization.h @@ -45,6 +45,9 @@ namespace nntrainer { +class Weight; +class Var_Grad; + /** * @class DynamicTraining Optimization * @brief Dynamic Training Optimization diff --git a/test/unittest/layers/layers_standalone_common_tests.cpp b/test/unittest/layers/layers_standalone_common_tests.cpp index d5f60ee..38b1c9b 100644 --- a/test/unittest/layers/layers_standalone_common_tests.cpp +++ b/test/unittest/layers/layers_standalone_common_tests.cpp @@ -12,7 +12,10 @@ #include +#include #include +#include +#include constexpr unsigned SAMPLE_TRIES = 10; @@ -36,8 +39,8 @@ TEST_P(LayerSemantics, DISABLED_setPropertiesValidInvalidOnly_n) { } TEST_P(LayerSemantics, finalizeValidate_p) { - nntrainer::TensorDim in_dim({1, 1, 1, 1}); - std::vector input_dims(num_inputs, in_dim); + ml::train::TensorDim in_dim({1, 1, 1, 1}); + std::vector input_dims(num_inputs, in_dim); nntrainer::InitLayerContext init_context = nntrainer::InitLayerContext(input_dims, 1, false, "layer"); EXPECT_EQ(init_context.validate(), true); @@ -77,8 +80,8 @@ TEST_P(LayerSemantics, gettersValidate_p) { } TEST_P(LayerSemantics, setBatchValidate_p) { - nntrainer::TensorDim in_dim({1, 1, 1, 1}); - std::vector input_dims(num_inputs, in_dim); + ml::train::TensorDim in_dim({1, 1, 1, 1}); + std::vector input_dims(num_inputs, in_dim); nntrainer::InitLayerContext init_context = nntrainer::InitLayerContext(input_dims, 1, false, "layer"); EXPECT_EQ(init_context.validate(), true);