return createLayer(LayerType::LAYER_RESHAPE, properties);
}
-/**
- * @brief Helper function to create LSTMCell layer
- */
-inline std::unique_ptr<Layer>
-LSTMCell(const std::vector<std::string> &properties = {}) {
- return createLayer(LayerType::LAYER_LSTMCELL, properties);
-}
-
/**
* @brief Helper function to create addition layer
*/
return createLayer(LayerType::LAYER_LSTM, properties);
}
+/**
+ * @brief Helper function to create LSTMCell layer
+ */
+inline std::unique_ptr<Layer>
+LSTMCell(const std::vector<std::string> &properties = {}) {
+ return createLayer(LayerType::LAYER_LSTMCELL, properties);
+}
+
/**
* @brief Helper function to create GRU layer
*/
'rnn.cpp',
'acti_func.cpp',
'lstm.cpp',
+ 'lstmcell.cpp',
'time_dist.cpp',
'common_properties.cpp',
'split_layer.cpp',
'centroid_knn.cpp',
'layer_context.cpp',
'reshape_layer.cpp',
- 'lstmcell.cpp'
]
layer_headers = [
// SPDX-License-Identifier: Apache-2.0
/**
- * Copyright (C) 2020 Jijoong Moon <jijoong.moon@samsung.com>
+ * Copyright (C) 2021 Jijoong Moon <jijoong.moon@samsung.com>
*
* @file rnn.cpp
* @date 17 March 2021
dim_hh.height(unit);
dim_hh.batch(1);
- // weight_initializer can be set sepeartely. weight_xh initializer,
+ // weight_initializer can be set seperately. weight_xh initializer,
// weight_hh initializer kernel initializer & recurrent_initializer in keras
// for now, it is set same way.
bias_dim, bias_initializer, WeightRegularizer::NONE, 1.0f, "bias_h", true);
// We do not need this if we reuse net_hidden[0]. But if we do, then the unit
- // test will fail. Becuase it modifies the date during gradient calculation
+ // test will fail. Becuase it modifies the data during gradient calculation
// TODO : We could control with something like #define test to save memory
TensorDim d = input_dim;
d.width(unit);
# loss = self.loss(output, labels[0])
return output, loss
-
class LSTMStacked(torch.nn.Module):
def __init__(self, unroll_for=2, num_lstm=1):
super().__init__()
# self.lstm.weight_hh.data.fill_(1.0)
# self.lstm.weight_ih.data.fill_(1.0)
# self.lstm.bias_hh.data.fill_(1.0)
+ for lstm in self.lstms:
+ lstm.bias_ih.data.fill_(0.0)
+ lstm.bias_ih.requires_grad=False
self.unroll_for = unroll_for
self.loss = torch.nn.MSELoss()
def forward(self, inputs, labels):
# second bias is always set to make it always zero grad.
- # this is because that we are only keepting one bias
+ # this is because that we are only keeping one bias
for lstm in self.lstms:
lstm.bias_ih.data.fill_(0.0)
loss = self.loss(ret, labels[0])
return ret, loss
-
if __name__ == "__main__":
record_v2(
FCUnroll(unroll_for=5),
/**
* Copyright (C) 2021 Parichay Kapoor <pk.kapoor@samsung.com>
*
- * @file unittest_layers_lstm.cpp
+ * @file unittest_layers_lstmcell.cpp
* @date 22 October 2021
- * @brief LSTM Layer Test
+ * @brief LSTMCell Layer Test
* @see https://github.com/nnstreamer/nntrainer
* @author Parichay Kapoor <pk.kapoor@samsung.com>
* @bug No known bugs except for NYI items
auto outer_graph = makeGraph({
{"input", {"name=input", "input_shape=1:1:2"}},
- /// here lstm_cells is being inserted
+ /// here lstm is being inserted
{"mse", {"name=loss", "input_layers=lstm_scope/a1"}},
});
for (auto &node : outer_graph) {
auto outer_graph = makeGraph({
{"input", {"name=input", "input_shape=1:1:2"}},
- /// here lstm_cells is being inserted
+ /// here lstm is being inserted
{"mse", {"name=loss", "input_layers=lstm_scope/a2"}},
});
for (auto &node : outer_graph) {