debug=["name", "summary"],
)
+ lstm_layer_tc = lambda lstm_layer: partial(
+ record,
+ model=[
+ K.Input(batch_shape=(1, 2, 1)),
+ lstm_layer,
+ K.layers.Dense(1)
+ ],
+ optimizer=opt.SGD(learning_rate=0.1),
+ iteration=1,
+ input_shape=(1,2,1),
+ label_shape=(1,1),
+ is_onehot=False,
+ loss_fn_str="mse"
+ )
+ lstm = K.layers.LSTM(2)
+ lstm_layer_tc(lstm)(file_name="lstm_basic.info", debug=["summary", "initial_weights", "dx", "output", "layer_name", "label"],)
##
# @brief generate data using uniform data from a function and save to the file.
# @note one-hot label is supported for now, this could be extended if needed.
-def prepare_data(model, input_shape, label_shape, writer_fn, **kwargs):
+def prepare_data(model, input_shape, label_shape, writer_fn, is_onehot, **kwargs):
initial_input = _rand_like(input_shape)
- label = tf.one_hot(
- indices=np.random.randint(0, label_shape[1] - 1, label_shape[0]),
- depth=label_shape[1],
- )
+ if is_onehot:
+ label = tf.one_hot(
+ indices=np.random.randint(0, label_shape[1] - 1, label_shape[0]),
+ depth=label_shape[1],
+ )
+ else:
+ label=_rand_like(label_shape)
initial_weights = []
for layer in iter_model(model):
# @param inputs keras inputs to build a model
# @param outputs keras outputs to build a model
def generate_recordable_model(
- loss_fn_str, model=None, inputs=None, outputs=None, **kwargs
+ loss_fn_str, model=None, inputs=None, outputs=None, is_onehot=False, **kwargs
):
if isinstance(model, list):
model = [attach_trans_layer(layer) for layer in model]
model=None,
inputs=None,
outputs=None,
+ is_onehot=True,
**kwargs
):
if os.path.isfile(file_name):
print("Warning: the file %s is being truncated and overwritten" % file_name)
loss_fn = _get_loss_fn(loss_fn_str)
- model = generate_recordable_model(loss_fn_str, model, inputs, outputs, **kwargs)
+ model = generate_recordable_model(loss_fn_str, model, inputs, outputs, is_onehot, **kwargs)
with open(file_name, "wb") as f:
write = _get_writer(f)
initial_input, label = prepare_data(
- model, input_shape, label_shape, write, **kwargs
+ model, input_shape, label_shape, write, is_onehot, **kwargs
)
-
for _ in range(iteration):
_debug_print(
iteration="\033[1;33m[%d/%d]\033[0m" % (_ + 1, iteration),
static std::string input_base = "type = input";
static std::string fc_base = "type = Fully_connected";
static std::string conv_base = "type = conv2d | stride = 1,1 | padding = 0,0";
+static std::string lstm_base = "type = lstm";
static std::string pooling_base = "type = pooling2d | padding = 0,0";
static std::string preprocess_flip_base = "type = preprocess_flip";
static std::string preprocess_translate_base = "type = preprocess_translate";
}
);
+INI lstm_basic(
+ "lstm_basic",
+ {
+ nn_base + "loss=mse | batch_size=1",
+ sgd_base + "learning_rate = 0.1",
+ I("input") + input_base + "input_shape=1:2:1",
+ I("lstm") + lstm_base +
+ "unit = 2" + "input_layers=input",
+ I("outputlayer") + fc_base + "unit = 1" + "input_layers=lstm"
+ }
+);
+
INSTANTIATE_TEST_CASE_P(
nntrainerModelAutoTests, nntrainerModelTest, ::testing::Values(
mkModelTc(fc_sigmoid_mse, "3:1:1:10", 10),
/**< Addition test */
mkModelTc(addition_resnet_like_validate, "3:1:1:10", 10)
+ mkModelTc(preprocess_flip_validate, "3:1:1:10", 10)
/// #1192 time distribution inference bug
// mkModelTc(fc_softmax_mse_distribute_validate, "3:1:5:3", 1),
// mkModelTc(fc_softmax_cross_distribute_validate, "3:1:5:3", 1),
// mkModelTc(fc_sigmoid_cross_distribute_validate, "3:1:5:3", 1)
+ mkModelTc(lstm_basic, "1:1:1:1", 1)
// / #if gtest_version <= 1.7.0
));
/// #else gtest_version > 1.8.0