*/
int setProperty(std::vector<std::string> values);
+ /**
+ * @brief get the base name for the layer
+ * @retval base name of the layer
+ */
+ std::string getBaseName() { return "Activation"; };
+
private:
std::function<Tensor(Tensor const &)> _act_fn;
std::function<Tensor(Tensor const &)> _act_prime_fn;
*/
int setProperty(std::vector<std::string> values);
+ /**
+ * @brief get the base name for the layer
+ * @retval base name of the layer
+ */
+ std::string getBaseName() { return "BatchNormalization"; };
+
private:
Tensor weight;
Tensor bias;
/* unknown = 3, */
/* }; */
+ /**
+ * @brief get the base name for the layer
+ * @retval base name of the layer
+ */
+ std::string getBaseName() { return "Convolution2D"; };
+
private:
unsigned int filter_size;
unsigned int kernel_size[CONV2D_DIM];
*/
int setOptimizer(Optimizer &opt);
+ /**
+ * @brief get the base name for the layer
+ * @retval base name of the layer
+ */
+ std::string getBaseName() { return "FullyConnected"; };
+
private:
unsigned int unit;
Tensor weight;
* @param[in] l layer to copy
*/
void copy(std::shared_ptr<Layer> l);
+
+ /**
+ * @brief get the base name for the layer
+ * @retval base name of the layer
+ */
+ std::string getBaseName() { return "Flatten"; };
};
} // namespace nntrainer
*/
int setProperty(std::vector<std::string> values);
+ /**
+ * @brief get the base name for the layer
+ * @retval base name of the layer
+ */
+ std::string getBaseName() { return "Input"; };
+
private:
bool normalization;
bool standardization;
#include <fstream>
#include <iostream>
#include <optimizer.h>
+#include <set>
#include <tensor.h>
#include <tensor_dim.h>
#include <vector>
bool getFlatten() { return flatten; }
/**
+ * @brief Set name of the layer
+ */
+ int setName(std::string name);
+
+ /**
+ * @brief Set name of the layer
+ */
+ std::string getName();
+
+ /**
+ * @brief Get base name of the layer
+ */
+ virtual std::string getBaseName() = 0;
+
+ /**
* @brief Property Enumeration
* 0. input shape : string
* 1. bias zero : bool
* 14. pooling_size : ( n,m )
* 15. pooling : max, average, global_max, global_average
* 16. flatten : bool
+ * 17. name : string (type)
*/
enum class PropertyType {
input_shape = 0,
pooling_size = 14,
pooling = 15,
flatten = 16,
- unknown = 17
+ name = 17,
+ unknown = 18
};
protected:
/**
+ * @brief Name of the layer (works as the identifier)
+ */
+ std::string name;
+
+ /**
* @brief check if current layer's weight decay type is l2norm
* @return bool is weightdecay type is L2 Norm
*/
private:
/**
+ * @brief Set containing all the names of layers
+ */
+ static std::set<std::string> layer_names;
+
+ /**
+ * @brief Count assigned to layer names declared by default
+ */
+ static int def_name_count;
+
+ /**
+ * @brief Ensure that layer has a name
+ */
+ void ensureName();
+
+ /**
* @brief Convert vector of reference to vector of objects
*/
std::shared_ptr<std::vector<Tensor>>
*/
int initialize(bool last);
+ /**
+ * @brief get the base name for the layer
+ * @retval base name of the layer
+ */
+ std::string getBaseName() { return "Loss"; };
+
private:
/**
* @brief update loss
*/
void save(std::ofstream &file);
+ /**
+ * @brief get the base name for the layer
+ * @retval base name of the layer
+ */
+ std::string getBaseName() { return "Optimizer"; };
+
private:
/**
* @brief Optimizer Type
unknown = 3,
};
+ /**
+ * @brief get the base name for the layer
+ * @retval base name of the layer
+ */
+ std::string getBaseName() { return "Pooling2D"; };
+
private:
unsigned int pooling_size[POOLING2D_DIM];
unsigned int stride[POOLING2D_DIM];
#define __UTIL_FUNC_H__
#ifdef __cplusplus
+#include <layer.h>
#include <tensor.h>
namespace nntrainer {
namespace nntrainer {
+int Layer::def_name_count = 0;
+std::set<std::string> Layer::layer_names;
+
int Layer::setActivation(ActiType acti) {
int status = ML_ERROR_NONE;
if (acti == ACT_UNKNOWN) {
unsigned int type = parseLayerProperty(key);
switch (static_cast<PropertyType>(type)) {
+ case PropertyType::name:
+ status = setName(value);
+ NN_RETURN_STATUS();
+ break;
case PropertyType::input_shape:
status = input_dim.setTensorDim(value.c_str());
NN_RETURN_STATUS();
return status;
}
+int Layer::setName(std::string name) {
+ int status = ML_ERROR_NONE;
+ std::pair<std::set<std::string>::iterator, bool> ret;
+
+ if (name.empty())
+ status = ML_ERROR_INVALID_PARAMETER;
+
+ if (name == this->name)
+ return status;
+
+ ret = layer_names.insert(name);
+ if (ret.second == false)
+ status = ML_ERROR_INVALID_PARAMETER;
+ else
+ this->name = name;
+
+ return status;
+}
+
+std::string Layer::getName() {
+ ensureName();
+ return name;
+}
+
+void Layer::ensureName() {
+ if (name.empty()) {
+ std::set<std::string>::iterator iter;
+
+ do {
+ name = getBaseName() + std::to_string(def_name_count++);
+ iter = layer_names.find(name);
+ } while (iter != layer_names.end());
+
+ layer_names.insert(name);
+ }
+}
+
} /* namespace nntrainer */
ini, (layer_name + ":Normalization").c_str(), false));
input_layer->setStandardization(iniparser_getboolean(
ini, (layer_name + ":Standardization").c_str(), false));
- layers.push_back(input_layer);
+ addLayer(input_layer);
} break;
case LAYER_CONV2D: {
int size[CONV2D_DIM];
status = conv2d_layer->setOptimizer(opt);
NN_INI_RETURN_STATUS();
- layers.push_back(conv2d_layer);
+ addLayer(conv2d_layer);
} break;
case LAYER_POOLING2D: {
status = pooling2d_layer->initialize(last);
NN_INI_RETURN_STATUS();
- layers.push_back(pooling2d_layer);
+ addLayer(pooling2d_layer);
} break;
case LAYER_FLATTEN: {
status = flatten_layer->initialize(last);
NN_INI_RETURN_STATUS();
- layers.push_back(flatten_layer);
+ addLayer(flatten_layer);
} break;
case LAYER_FC: {
status = fc_layer->setOptimizer(opt);
NN_INI_RETURN_STATUS();
- layers.push_back(fc_layer);
+ addLayer(fc_layer);
} break;
case LAYER_BN: {
std::shared_ptr<BatchNormalizationLayer> bn_layer =
// fixme: deprecate this.
layers.back()->setBNfollow(true);
- layers.push_back(bn_layer);
+ addLayer(bn_layer);
NN_INI_RETURN_STATUS();
} break;
case LAYER_UNKNOWN:
status = loss_layer->setCost(updated_cost);
NN_RETURN_STATUS();
- layers.push_back(loss_layer);
+ addLayer(loss_layer);
return status;
}
if (initialized) {
return ML_ERROR_NOT_SUPPORTED;
- } else {
- layers.push_back(layer);
}
+ /** @todo This might be redundant. Remove this after testing */
+ for (auto iter = layers.begin(); iter != layers.end(); ++iter) {
+ if ((*iter)->getName() == layer->getName()) {
+ ml_loge("Layer with name %s already exists in the model.",
+ layer->getName().c_str());
+ return ML_ERROR_INVALID_PARAMETER;
+ }
+ }
+
+ layers.push_back(layer);
+
return status;
}
}
/**
+ * @brief Flatten Layer
+ */
+TEST(nntrainer_Layer, initialize_03_p) {
+ int status = ML_ERROR_NONE;
+ std::string config_file = "./test.ini";
+ RESET_CONFIG(config_file.c_str());
+ replaceString("flatten = false", "flatten = true", config_file, config_str2);
+ nntrainer::NeuralNetwork NN;
+ status = NN.setConfig(config_file);
+ EXPECT_EQ(status, ML_ERROR_NONE);
+ status = NN.init();
+ EXPECT_EQ(status, ML_ERROR_NONE);
+}
+
+/**
* @brief Main gtest
*/
int main(int argc, char **argv) {
int status = ML_ERROR_NONE;
nntrainer::FullyConnectedLayer layer;
nntrainer::TensorDim d;
+ std::string layer_name;
+
d.setTensorDim("32:1:28:28");
layer.setProperty({"unit=1"});
layer.setInputDimension(d);
+
+ /** Layer name can be set */
+ layer_name = "FCLayer0";
+ status = layer.setName(layer_name);
+ EXPECT_EQ(status, ML_ERROR_NONE);
+ EXPECT_EQ(layer.getName(), layer_name);
+
status = layer.initialize(false);
EXPECT_EQ(status, ML_ERROR_NONE);
+
+ /** Layer name can be updated */
+ layer_name = "FCLayer1";
+ status = layer.setName(layer_name);
+ EXPECT_EQ(status, ML_ERROR_NONE);
+ EXPECT_EQ(layer.getName(), layer_name);
+}
+
+/**
+ * @brief FullyConnected Layer
+ */
+TEST(nntrainer_FullyConnectedLayer, initialize_05_n) {
+ int status = ML_ERROR_NONE;
+ nntrainer::FullyConnectedLayer layer0, layer1;
+ nntrainer::TensorDim d;
+ std::string layer_name;
+
+ /** Default name is set */
+ layer_name = layer0.getName();
+ EXPECT_GT(layer_name.length(), 0);
+
+ /** Set same name again */
+ status = layer0.setName(layer_name);
+ EXPECT_EQ(status, ML_ERROR_NONE);
+ EXPECT_EQ(layer0.getName(), layer_name);
+
+ /** Do not set the name already allocated */
+ status = layer1.setName(layer_name);
+ EXPECT_EQ(status, ML_ERROR_INVALID_PARAMETER);
+
+ /** Default name is set even after error */
+ layer_name = layer1.getName();
+ EXPECT_GT(layer_name.length(), 0);
}
/**