[Application] Merge LSTM to Layers
authorDongHak Park <donghak.park@samsung.com>
Thu, 9 Feb 2023 02:35:54 +0000 (11:35 +0900)
committerJijoong Moon <jijoong.moon@samsung.com>
Thu, 23 Feb 2023 11:24:47 +0000 (20:24 +0900)
Merge LSTM dir to Layers dir
- both pytorch and tensorflow are single LSTM Layer Test Code
- in Layers dir there was single and simple layer tests
- in Layers dir NNtrainer LSTM exmaple also exist
- For User, they find various simeple exmaple more intuitively

releated PR : #2101

Signed-off-by: DongHak Park <donghak.park@samsung.com>
Applications/LSTM/README.md [deleted file]
Applications/LSTM/tensorflow/tensor_lstm.py [deleted file]
Applications/Layers/PyTorch/LSTM.py [moved from Applications/LSTM/pytorch/torch_lstm.py with 100% similarity]

diff --git a/Applications/LSTM/README.md b/Applications/LSTM/README.md
deleted file mode 100644 (file)
index 42714b2..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-# LSTM
-
-In this example, we demonstrate training LSTM(Long Short-Term Memory) model with dummy dataset.
-
-### LSTM Model Configuration
-
-This is an example of a simple recurrent neural network model consisting of one LSTM cell.
-
-### Comparision with other frameworks
-
-For the comparison, we provide PyTorch and Tensorflow code for same model in ```pytorch/lstm.py``` and ```tensorflow/lstm.py```, respectively.
-
-You can run pytorch code with
-
-``` bash
-$ python3 ./pytorch/torch_lstm.py
-```
-
-You can run tensorflow code with
-
-``` bash
-$ python3 ./tensorflow/tensor_lstm.py
-```
diff --git a/Applications/LSTM/tensorflow/tensor_lstm.py b/Applications/LSTM/tensorflow/tensor_lstm.py
deleted file mode 100644 (file)
index 68a2130..0000000
+++ /dev/null
@@ -1,69 +0,0 @@
-# SPDX-License-Identifier: Apache-2.0
-# Copyright (C) 2023 Seungbaek Hong <sb92.hong@samsung.com>
-#
-# @file   main.cpp
-# @date   30 Jan 2023
-# @see    https://github.com/nnstreamer/nntrainer
-# @author Seungbaek Hong <sb92.hong@samsung.com>
-# @bug   No known bugs except for NYI items
-# @brief  This is LSTM Example for Tensorflow (only training with dummy data)
-
-import tensorflow as tf
-
-print(f"Tensorflow version: {tf.__version__}")
-
-seed = 0
-tf.random.set_seed(seed)
-tf.config.threading.set_intra_op_parallelism_threads(1)
-
-EPOCH = 10
-DB_SIZE = 64
-BATCH_SIZE = 64
-IMG_SIZE = 224*224*3
-OUTPUT_SIZE = 10
-
-
-class LSTM(tf.keras.Model):
-    def __init__(self):
-        super(LSTM, self).__init__()
-        self.lstm = tf.keras.layers.LSTM(OUTPUT_SIZE, batch_input_shape = (BATCH_SIZE, 1, IMG_SIZE), return_sequences=True)
-
-    def call(self, x):
-        output = self.lstm(x)
-        return output
-
-
-def train(dataloader, model, loss_fn, optimizer):
-    epoch_loss, num_of_batch = 0, len(dataloader)
-
-    for X_batch, y_batch in dataloader:
-        # Compute prediction error
-        with tf.GradientTape() as tape:
-            pred = model(X_batch)            
-            loss = loss_fn(pred, y_batch)
-
-        # Backpropagation
-        gradients = tape.gradient(loss, model.trainable_variables)
-        optimizer.apply_gradients(zip(gradients, model.trainable_variables))
-
-        epoch_loss += loss / num_of_batch
-
-    return epoch_loss
-
-def make_dummy_database(num_of_samples):
-    X = tf.random.normal((num_of_samples, 1, IMG_SIZE))
-    y = tf.random.normal((num_of_samples, 1, OUTPUT_SIZE))
-    return X, y
-
-
-if __name__ == '__main__':
-    model = LSTM()
-    loss_fn = tf.keras.losses.MeanSquaredError()
-    optimizer = tf.keras.optimizers.SGD()
-
-    for epoch in range(EPOCH):
-        X_train, y_train = make_dummy_database(DB_SIZE)
-        dataset = tf.data.Dataset.from_tensor_slices((X_train, y_train)).batch(BATCH_SIZE)
-        epoch_loss = train(dataset, model, loss_fn, optimizer)
-        print(f"loss: {epoch_loss:>7f}  [{epoch+1:>5d}/{EPOCH}]")
-        del X_train, y_train, dataset, epoch_loss