[inputlayer]
Type = input
Input_Shape = 32:1:1:62720 # Input Layer Dimension
-Bias_zero = true # Zero Bias
+Bias_init_zero = true # Zero Bias
Normalization = true
[outputlayer]
Type = fully_connected
Unit = 10 # Output Layer Dimension ( = Weight Width )
-Bias_zero = true
+Bias_init_zero = true
Activation = softmax # activation : sigmoid, softmax
Weight_Decay = l2norm
weight_Decay_Lambda = 0.005
[inputlayer]
Type = input
HiddenSize = 62720 # Input Layer Dimension
-Bias_zero = true # Zero Bias
+Bias_init_zero = true # Zero Bias
Normalization = true
Activation = sigmoid # activation : sigmoid, tanh
[outputlayer]
Type = fully_connected
HiddenSize = 10 # Output Layer Dimension ( = Weight Width )
-Bias_zero = true
+Bias_init_zero = true
Activation = softmax # activation : sigmoid, softmax
Weight_Decay = l2norm
weight_Decay_Lambda = 0.005
Type = InputLayer
Id = 0 # Layer Id
HiddenSize = 62720 # Input Layer Dimension
-Bias_zero = true # Zero Bias
+Bias_init_zero = true # Zero Bias
[fc1layer]
Type = FullyConnectedLayer
Id = 1
HiddenSize = 128 # Hidden Layer Dimension ( = Weight Width )
-Bias_zero = true
+Bias_init_zero = true
[batchnormalization]
Type = BatchNormalizationLayer
Type = OutputLayer
Id = 3
HiddenSize = 10 # Output Layer Dimension ( = Weight Width )
-Bias_zero = true
+Bias_init_zero = true
Softmax = true
[inputlayer]
Type = input
Input_Shape = 1:1:1:2
-Bias_zero = true # Zero Bias
+Bias_init_zero = true # Zero Bias
Activation = sigmoid
[outputlayer]
Type = fully_connected
Unit = 1
-Bias_zero = true
+Bias_init_zero = true
Activation = sigmoid
[inputlayer]
Type = input
Input_Shape = 32:1:1:4 # Input Layer Dimension
-Bias_zero = true # Zero Bias
+Bias_init_zero = true # Zero Bias
Activation = tanh # activation : sigmoid, tanh
[fc1layer]
Type = fully_connected
Unit = 50 # Hidden Layer Dimension ( = Weight Width )
-Bias_zero = true
+Bias_init_zero = true
Activation = tanh # activation : sigmoid, tanh
[fc2layer]
Type = fully_connected
Unit = 50
-Bias_zero = true
+Bias_init_zero = true
Activation = tanh # activation : sigmoid, tanh
[outputlayer]
Type = fully_connected
Unit = 2 # Output Layer Dimension ( = Weight Width )
-Bias_zero = true
+Bias_init_zero = true
Activation = sigmoid # activation : sigmoid, tanh
[inputlayer]
Type = input
Input_Shape = 32:1:1:62720 # Input Layer Dimension
-Bias_zero = true # Zero Bias
+Bias_init_zero = true # Zero Bias
Normalization = true
Activation = sigmoid # activation : sigmoid, tanh
[outputlayer]
Type = fully_connected
Unit = 10 # Output Layer Dimension ( = Weight Width )
-Bias_zero = true
+Bias_init_zero = true
Activation = softmax # activation : sigmoid, softmax
Weight_Decay = l2norm
weight_Decay_Lambda = 0.005
/* set property for input layer */
status =
ml_nnlayer_set_property (layers[0], "input_shape= 32:1:1:62720",
- "normalization=true", "bias_zero=true", NULL);
+ "normalization=true", "bias_init_zero=true", NULL);
NN_RETURN_STATUS ();
/* add input layer into model */
/* set property for fc layer */
status = ml_nnlayer_set_property (layers[1], "unit= 10", "activation=softmax",
- "bias_zero=true", "weight_decay=l2norm",
+ "bias_init_zero=true", "weight_decay=l2norm",
"weight_decay_lambda=0.005", "weight_ini=xavier_uniform", NULL);
NN_RETURN_STATUS ();
/* set property for input layer */
status =
ml_nnlayer_set_property (layers[0], "input_shape= 32:1:1:62720",
- "normalization=true", "bias_zero=true", NULL);
+ "normalization=true", "bias_init_zero=true", NULL);
NN_RETURN_STATUS ();
/* add input layer into model */
/* set property for fc layer */
status = ml_nnlayer_set_property (layers[1], "unit= 10", "activation=softmax",
- "bias_zero=true", "weight_decay=l2norm",
+ "bias_init_zero=true", "weight_decay=l2norm",
"weight_decay_lambda=0.005", "weight_ini=xavier_uniform", NULL);
NN_RETURN_STATUS ();
[inputlayer]
Type = input
Input_Shape = 1:1:1:128 # Input Layer Dimension
-Bias_zero = true # Zero Bias
+Bias_init_zero = true # Zero Bias
Activation = sigmoid
[fc1layer]
Type = fully_connected
Unit = 20 # Hidden Layer Dimension ( = Weight Width )
-Bias_zero = true
+Bias_init_zero = true
Activation = sigmoid
[outputlayer]
Type = fully_connected
Unit = 3 # Output Layer Dimension ( = Weight Width )
-Bias_zero = true
+Bias_init_zero = true
Activation = sigmoid
$(NNTRAINER_ROOT)/nntrainer/src/tensor_dim.cpp \
$(NNTRAINER_ROOT)/nntrainer/src/conv2d_layer.cpp \
$(NNTRAINER_ROOT)/nntrainer/src/pooling2d_layer.cpp \
- $(NNTRAINER_ROOT)/nntrainer/src/activation_layer.cpp
+ $(NNTRAINER_ROOT)/nntrainer/src/activation_layer.cpp \
+ $(NNTRAINER_ROOT)/nntrainer/src/flatten_layer.cpp
NNTRAINER_INCLUDES := $(NNTRAINER_ROOT)/nntrainer/include
#include <fstream>
#include <iostream>
#include <layer.h>
-#include <nntrainer_error.h>
#include <tensor.h>
#include <vector>
* @param[in] values values of property
* @retval #ML_ERROR_NOT_SUPPORTED Successful.
*/
- int setProperty(std::vector<std::string> values) {
- return ML_ERROR_NOT_SUPPORTED;
- };
+ int setProperty(std::vector<std::string> values);
/**
* @brief copy layer
public:
Layer()
: last_layer(false),
- init_zero(false),
+ bias_init_zero(false),
type(LAYER_UNKNOWN),
loss(0.0),
cost(COST_UNKNOWN),
activation_type(ACT_NONE),
bn_follow(false),
weight_decay(),
- weight_ini_type(WEIGHT_UNKNOWN) {}
+ weight_ini_type(WEIGHT_UNKNOWN),
+ flatten(false) {}
/**
* @brief Destructor of Layer Class
* @brief set bias initialize with zero
* @param[in] zero true/false
*/
- void setBiasZero(bool zero) { init_zero = zero; }
+ void setBiasZero(bool zero) { bias_init_zero = zero; }
/**
* @brief set Weight Initialization Type
std::shared_ptr<std::vector<Tensor>> getWeights() { return getObjFromRef(weights); }
/**
+ * @brief get if the output of this layer must be flatten
+ * @retval flatten value
+ */
+ bool getFlatten() { return flatten; }
+
+ /**
* @brief Property Enumeration
* 0. input shape : string
* 1. bias zero : bool
*/
enum class PropertyType {
input_shape = 0,
- bias_zero = 1,
+ bias_init_zero = 1,
normalization = 2,
standardization = 3,
activation = 4,
padding = 13,
pooling_size = 14,
pooling = 15,
- unknown = 16
+ flatten = 16,
+ unknown = 17
};
protected:
/**
* @brief Boolean for the Bias to set zero
*/
- bool init_zero;
+ bool bias_init_zero;
/**
* @brief Layer type
WeightIniType weight_ini_type;
/**
+ * @brief Output of this layer should be flattened
+ */
+ bool flatten;
+
+ /**
* @brief Gradient for the weights in this layer
*/
std::vector<std::reference_wrapper<Tensor>> gradients;
#include <conv2d_layer.h>
#include <databuffer.h>
#include <fc_layer.h>
+#include <flatten_layer.h>
#include <fstream>
#include <input_layer.h>
#include <iostream>
* @returns Create activation layer
*/
std::shared_ptr<Layer> _make_act_layer(ActiType act, std::shared_ptr<Layer>);
+
+ /**
+ * @brief Add flatten layer to layers
+ * @param[in/out] int Position position to insert the layer.
+ * position++ when layer is inserted.
+ * @note layer is inserted at the back of layers
+ */
+ int initFlattenLayer(unsigned int &position);
+
+ /**
+ * @brief Add flatten layer to layers
+ * @note layer is inserted at the back of layers
+ */
+ int initFlattenLayer();
};
} /* namespace nntrainer */
* 5. WEIGHTINI ( Weight Initialization Token )
* 7. WEIGHT_DECAY ( Weight Decay Token )
* 8. PADDING ( Padding Token )
+ * 9. POOLING ( Pooling Token )
* 9. UNKNOWN
*/
typedef enum {
case PropertyType::input_shape:
status = dim.setTensorDim(values[0].c_str());
break;
- case PropertyType::bias_zero: {
- status = setBoolean(init_zero, value);
+ case PropertyType::bias_init_zero: {
+ status = setBoolean(bias_init_zero, value);
NN_RETURN_STATUS();
} break;
case PropertyType::epsilon:
filters.push_back(Knl);
Tensor B(input_dim.batch(), 1, 1, 1);
- if (!init_zero) {
+ if (!bias_init_zero) {
B.apply([&](float x) { return random(); });
}
bias.push_back(B);
status = input_dim.setTensorDim(value.c_str());
NN_RETURN_STATUS();
break;
- case PropertyType::bias_zero:
- status = setBoolean(init_zero, value);
+ case PropertyType::bias_init_zero:
+ status = setBoolean(bias_init_zero, value);
NN_RETURN_STATUS();
break;
case PropertyType::activation:
status = setActivation((ActiType)parseType(value, TOKEN_ACTI));
NN_RETURN_STATUS();
break;
+ case PropertyType::flatten:
+ status = setBoolean(flatten, value);
+ NN_RETURN_STATUS();
+ break;
case PropertyType::weight_decay:
weight_decay.type = (WeightDecayType)parseType(value, TOKEN_WEIGHT_DECAY);
if (weight_decay.type == WeightDecayType::unknown) {
output_dim = input_dim;
output_dim.width(unit);
- if (init_zero) {
+ if (bias_init_zero) {
bias.setZero();
} else {
bias.setRandUniform(-0.5, 0.5);
unit = width;
output_dim.width(unit);
} break;
- case PropertyType::bias_zero: {
- status = setBoolean(init_zero, value);
+ case PropertyType::bias_init_zero: {
+ status = setBoolean(this->bias_init_zero, value);
NN_RETURN_STATUS();
} break;
case PropertyType::activation:
status = setActivation((ActiType)parseType(value, TOKEN_ACTI));
NN_RETURN_STATUS();
break;
+ case PropertyType::flatten:
+ status = setBoolean(flatten, value);
+ NN_RETURN_STATUS();
+ break;
case PropertyType::weight_decay:
weight_decay.type = (WeightDecayType)parseType(value, TOKEN_WEIGHT_DECAY);
if (weight_decay.type == WeightDecayType::unknown) {
this->last_layer = from->last_layer;
}
+int FlattenLayer::setProperty(std::vector<std::string> values) {
+ return ML_ERROR_NOT_SUPPORTED;
+}
+
} /* namespace nntrainer */
status = input_dim.setTensorDim(value.c_str());
NN_RETURN_STATUS();
break;
- case PropertyType::bias_zero:
- status = setBoolean(init_zero, value);
+ case PropertyType::bias_init_zero:
+ status = setBoolean(bias_init_zero, value);
NN_RETURN_STATUS();
break;
case PropertyType::normalization:
iniparser_getstring(ini, (layer_name + ":Type").c_str(), unknown);
LayerType layer_type = (LayerType) parseType(layer_type_str, TOKEN_LAYER);
bool b_zero =
- iniparser_getboolean(ini, (layer_name + ":Bias_zero").c_str(), true);
+ iniparser_getboolean(ini, (layer_name + ":bias_init_zero").c_str(), true);
last = (section_names_iter + 1) == section_names.end();
std::make_shared<Conv2DLayer>();
std::string input_shape_str = iniparser_getstring(
- ini, (layer_name + ":Input_Shape").c_str(), unknown);
+ ini, (layer_name + ":Input_Shape").c_str(), unknown);
if (input_shape_str.compare("Unknown") != 0) {
TensorDim d;
status =
getValues(CONV2D_DIM,
- iniparser_getstring(
- ini, (layer_name + ":kernel_size").c_str(), unknown),
- (int *)size);
+ iniparser_getstring(
+ ini, (layer_name + ":kernel_size").c_str(), unknown),
+ (int *)size);
NN_INI_RETURN_STATUS();
status = conv2d_layer->setSize(size, Layer::PropertyType::kernel_size);
NN_INI_RETURN_STATUS();
status = getValues(
- CONV2D_DIM,
- iniparser_getstring(ini, (layer_name + ":stride").c_str(), unknown),
- (int *)size);
+ CONV2D_DIM,
+ iniparser_getstring(ini, (layer_name + ":stride").c_str(), unknown),
+ (int *)size);
NN_INI_RETURN_STATUS();
status = conv2d_layer->setSize(size, Layer::PropertyType::stride);
NN_INI_RETURN_STATUS();
status = getValues(CONV2D_DIM,
- iniparser_getstring(
- ini, (layer_name + ":padding").c_str(), unknown),
- (int *)size);
+ iniparser_getstring(
+ ini, (layer_name + ":padding").c_str(), unknown),
+ (int *)size);
NN_INI_RETURN_STATUS();
status = conv2d_layer->setSize(size, Layer::PropertyType::padding);
NN_INI_RETURN_STATUS();
status = conv2d_layer->setFilter(
- iniparser_getint(ini, (layer_name + ":filter").c_str(), 0));
+ iniparser_getint(ini, (layer_name + ":filter").c_str(), 0));
NN_INI_RETURN_STATUS();
conv2d_layer->setBiasZero(b_zero);
conv2d_layer->setWeightInit((WeightIniType)parseType(
- iniparser_getstring(ini, (layer_name + ":WeightIni").c_str(),
- unknown),
- TOKEN_WEIGHTINI));
+ iniparser_getstring(ini, (layer_name + ":WeightIni").c_str(),
+ unknown),
+ TOKEN_WEIGHTINI));
status = parseWeightDecay(ini, layer_name, weight_decay);
NN_INI_RETURN_STATUS();
NN_INI_RETURN_STATUS();
break;
}
+
+ /** Add activation layer */
const char *acti_str = iniparser_getstring(
ini, (layer_name + ":Activation").c_str(), unknown);
ActiType act = (ActiType)parseType(acti_str, TOKEN_ACTI);
status = initActivationLayer(act);
NN_INI_RETURN_STATUS();
+ /** Add flatten layer */
+ bool flatten =
+ iniparser_getboolean(ini, (layer_name + ":Flatten").c_str(), false);
+ if (flatten) {
+ status = initFlattenLayer();
+ NN_INI_RETURN_STATUS();
+ }
previous_dim = layers.back()->getOutputDimension();
}
default:
break;
}
- status = initActivationLayer(layers[i]->getActivationType(), i);
+ std::shared_ptr<Layer> last_layer = layers[i];
+ status = initActivationLayer(last_layer->getActivationType(), i);
NN_RETURN_STATUS();
+ if (last_layer->getFlatten()) {
+ status = initFlattenLayer(i);
+ NN_RETURN_STATUS();
+ }
previous_dim = layers[i]->getOutputDimension();
}
return ML_ERROR_INVALID_PARAMETER;
}
+int NeuralNetwork::initFlattenLayer(unsigned int &position) {
+ std::shared_ptr<FlattenLayer> flatten_layer =
+ std::make_shared<FlattenLayer>();
+
+ flatten_layer->setInputDimension(layers[position]->getOutputDimension());
+ flatten_layer->initialize(layers[position]->getLast());
+ layers.insert(layers.begin() + position + 1, flatten_layer);
+ position++;
+ return ML_ERROR_NONE;
+}
+
+int NeuralNetwork::initFlattenLayer() {
+ unsigned int position = layers.end() - layers.begin() - 1;
+ return initFlattenLayer(position);
+}
+
} /* namespace nntrainer */
/**
* @brief Layer Properties
* input_shape = 0,
- * bias_zero = 1,
+ * bias_init_zero = 1,
* normalization = 2,
* standardization = 3,
* activation = 4,
* padding = 13
* pooling_size = 14
* pooling = 15
+ * flatten = 16
*
* InputLayer has 0, 1, 2, 3 properties.
* FullyConnectedLayer has 1, 4, 6, 7, 8, 9 properties.
* Pooling2DLayer has 12, 13, 14, 15 properties.
* BatchNormalizationLayer has 0, 1, 5, 6, 7 properties.
*/
- std::array<std::string, 17> property_string = {
- "input_shape", "bias_zero", "normalization", "standardization",
+ std::array<std::string, 18> property_string = {
+ "input_shape", "bias_init_zero", "normalization", "standardization",
"activation", "epsilon", "weight_decay", "weight_decay_lambda",
"unit", "weight_ini", "filter", "kernel_size",
"stride", "padding", "pooling_size", "pooling",
- "unknown"};
+ "flatten", "unknown"};
for (i = 0; i < property_string.size(); i++) {
unsigned int size = (property_string[i].size() > property.size())
"\n"
"Input_Shape = 32:1:1:62720"
"\n"
- "Bias_zero = true"
+ "bias_init_zero = true"
"\n"
"Normalization = true"
"\n"
"\n"
"Unit = 10"
"\n"
- "Bias_zero = true"
+ "bias_init_zero = true"
"\n"
"Activation = softmax"
"\n";
"\n"
"Input_Shape = 32:3:28:28"
"\n"
- "Bias_zero = true"
+ "bias_init_zero = true"
"\n"
"Activation = sigmoid"
"\n"
"\n"
"weightIni=xavier_uniform"
"\n"
+ "flatten = false"
+ "\n"
"[outputlayer]"
"\n"
"Type = fully_connected"
"\n"
"Unit = 10"
"\n"
- "Bias_zero = true"
+ "bias_init_zero = true"
"\n"
"Activation = softmax"
"\n";
Type = input
Id = 0 # Layer Id
HiddenSize = 128 # Input Layer Dimension
-Bias_zero = true # Zero Bias
+Bias_init_zero = true # Zero Bias
Activation = sigmoid
[fc1layer]
Type = fully_connected
Id = 1
HiddenSize = 20 # Hidden Layer Dimension ( = Weight Width )
-Bias_zero = true
+Bias_init_zero = true
Activation = sigmoid
[outputlayer]
Type = output
Id = 3
HiddenSize = 3 # Output Layer Dimension ( = Weight Width )
-Bias_zero = true
+Bias_init_zero = true
Activation = sigmoid
status =
ml_nnlayer_set_property(layers[0], "input_shape= 32:1:1:62720",
- "normalization=true", "bias_zero=true", NULL);
+ "normalization=true", "bias_init_zero=true", NULL);
EXPECT_EQ(status, ML_ERROR_NONE);
status = ml_nnmodel_add_layer(model, layers[0]);
EXPECT_EQ(status, ML_ERROR_NONE);
status = ml_nnlayer_set_property(layers[1], "unit= 10", "activation=softmax",
- "bias_zero=true", "weight_decay=l2norm",
+ "bias_init_zero=true", "weight_decay=l2norm",
"weight_decay_lambda=0.005",
"weight_ini=xavier_uniform", NULL);
EXPECT_EQ(status, ML_ERROR_NONE);
status =
ml_nnlayer_set_property(layers[0], "input_shape= 32:1:1:62720",
- "normalization=true", "bias_zero=true", NULL);
+ "normalization=true", "bias_init_zero=true", NULL);
EXPECT_EQ(status, ML_ERROR_NONE);
status = ml_nnmodel_add_layer(model, layers[0]);
EXPECT_EQ(status, ML_ERROR_NONE);
status = ml_nnlayer_set_property(layers[1], "unit= 10", "activation=softmax",
- "bias_zero=true", "weight_decay=l2norm",
+ "bias_init_zero=true", "weight_decay=l2norm",
"weight_decay_lambda=0.005", NULL);
EXPECT_EQ(status, ML_ERROR_NONE);
EXPECT_EQ(status, ML_ERROR_NONE);
status = ml_nnlayer_set_property(layer, "unit= 10", "activation=softmax",
- "bias_zero=true", "weight_decay=l2norm",
+ "bias_init_zero=true", "weight_decay=l2norm",
"weight_decay_lambda=0.005", NULL);
EXPECT_EQ(status, ML_ERROR_NONE);
status =
ml_nnlayer_set_property(layers[0], "input_shape= 32:1:1:62720",
- "normalization=true", "bias_zero=true", NULL);
+ "normalization=true", "bias_init_zero=true", NULL);
EXPECT_EQ(status, ML_ERROR_NONE);
status = ml_nnmodel_add_layer(model, layers[0]);
EXPECT_EQ(status, ML_ERROR_NONE);
status = ml_nnlayer_set_property(layers[1], "unit= 10", "activation=softmax",
- "bias_zero=true", "weight_decay=l2norm",
+ "bias_init_zero=true", "weight_decay=l2norm",
"weight_decay_lambda=0.005", NULL);
EXPECT_EQ(status, ML_ERROR_NONE);
status =
ml_nnlayer_set_property(layers[0], "input_shape= 16:1:1:62720",
- "normalization=true", "bias_zero=true", NULL);
+ "normalization=true", "bias_init_zero=true", NULL);
EXPECT_EQ(status, ML_ERROR_NONE);
status = ml_nnmodel_add_layer(model, layers[0]);
EXPECT_EQ(status, ML_ERROR_NONE);
status = ml_nnlayer_set_property(layers[1], "unit= 10", "activation=softmax",
- "bias_zero=true", "weight_decay=l2norm",
+ "bias_init_zero=true", "weight_decay=l2norm",
"weight_decay_lambda=0.005",
"weight_ini=xavier_uniform", NULL);
EXPECT_EQ(status, ML_ERROR_NONE);
status =
ml_nnlayer_set_property(layers[0], "input_shape= 16:1:1:62720",
- "normalization=true", "bias_zero=true", NULL);
+ "normalization=true", "bias_init_zero=true", NULL);
EXPECT_EQ(status, ML_ERROR_NONE);
status = ml_nnmodel_add_layer(model, layers[0]);
EXPECT_EQ(status, ML_ERROR_NONE);
status = ml_nnlayer_set_property(layers[1], "unit= 10", "activation=softmax",
- "bias_zero=true", "weight_decay=l2norm",
+ "bias_init_zero=true", "weight_decay=l2norm",
"weight_decay_lambda=0.005",
"weight_ini=xavier_uniform", NULL);
EXPECT_EQ(status, ML_ERROR_NONE);
status = ml_nnlayer_set_property(handle, "unit=10", NULL);
EXPECT_EQ(status, ML_ERROR_NONE);
- status = ml_nnlayer_set_property(handle, "bias_zero=true", NULL);
+ status = ml_nnlayer_set_property(handle, "bias_init_zero=true", NULL);
EXPECT_EQ(status, ML_ERROR_NONE);
status = ml_nnlayer_set_property(handle, "activation =sigmoid", NULL);
}
/**
+ * @brief Flatten Layer
+ */
+TEST(nntrainer_Conv2DLayer, initialize_02_p) {
+ int status = ML_ERROR_NONE;
+ std::string config_file = "./test.ini";
+ RESET_CONFIG(config_file.c_str());
+ replaceString("flatten = false", "flatten = true", config_file, config_str2);
+ nntrainer::NeuralNetwork NN;
+ status = NN.setConfig(config_file);
+ EXPECT_EQ(status, ML_ERROR_NONE);
+ status = NN.init();
+ EXPECT_EQ(status, ML_ERROR_NONE);
+}
+
+/**
* @brief Main gtest
*/
int main(int argc, char **argv) {
std::vector<std::string> input_str;
input_str.push_back("input_shape=32:3:28:28");
- input_str.push_back("bias_zero=true");
+ input_str.push_back("bias_init_zero=true");
input_str.push_back("activation=sigmoid");
input_str.push_back("weight_decay=l2norm");
input_str.push_back("weight_decay_lambda = 0.005");
previous_dim.setTensorDim("32:3:28:28");
input_str.push_back("input_shape=32:3:28:28");
- input_str.push_back("bias_zero=true");
+ input_str.push_back("bias_init_zero=true");
input_str.push_back("activation=sigmoid");
input_str.push_back("weight_decay=l2norm");
input_str.push_back("weight_decay_lambda = 0.005");
previous_dim.setTensorDim("32:3:28:28");
input_str.push_back("input_shape=32:3:28:28");
- input_str.push_back("bias_zero=true");
+ input_str.push_back("bias_init_zero=true");
input_str.push_back("activation=sigmoid");
input_str.push_back("weight_decay=l2norm");
input_str.push_back("weight_decay_lambda = 0.005");
previous_dim.setTensorDim("1:3:7:7");
input_str.push_back("input_shape=1:3:7:7");
- input_str.push_back("bias_zero=true");
+ input_str.push_back("bias_init_zero=true");
input_str.push_back("weight_decay=l2norm");
input_str.push_back("weight_decay_lambda = 0.005");
input_str.push_back("weight_ini=xavier_uniform");
previous_dim.setTensorDim("2:3:7:7");
input_str.push_back("input_shape=2:3:7:7");
- input_str.push_back("bias_zero=true");
+ input_str.push_back("bias_init_zero=true");
input_str.push_back("weight_decay=l2norm");
input_str.push_back("weight_decay_lambda = 0.005");
input_str.push_back("weight_ini=xavier_uniform");
previous_dim.setTensorDim("1:3:7:7");
input_str.push_back("input_shape=1:3:7:7");
- input_str.push_back("bias_zero=true");
+ input_str.push_back("bias_init_zero=true");
input_str.push_back("weight_decay=l2norm");
input_str.push_back("weight_decay_lambda = 0.005");
input_str.push_back("weight_ini=xavier_uniform");