* where you would like to look for the layers, while NNTRAINER_CONF_PATH is a
* (buildtime hardcoded @a file path) to locate configuration file *.ini file
*/
- /*** @note for now, NNTRAINER_PATH is a SINGLE PATH rather than serise of path
+ /*** @note for now, NNTRAINER_PATH is a SINGLE PATH rather than series of path
* like PATH environment variable. this could be improved but for now, it is
* enough
*/
std::mutex factory_mutex;
/**
- * @brief finialize global context
+ * @brief finalize global context
*
*/
static void fini_global_context_nntrainer(void) __attribute__((destructor));
std::once_flag global_app_context_init_flag;
static void add_default_object(AppContext &ac) {
- /// @note all layers should be added to the app_context to gaurantee that
+ /// @note all layers should be added to the app_context to guarantee that
/// createLayer/createOptimizer class is created
using OptType = ml::train::OptimizerType;
ac.registerFactory(nntrainer::createOptimizer<SGD>, SGD::type, OptType::SGD);
ac.registerFactory(nntrainer::createLayer<CentroidKNN>, CentroidKNN::type,
LayerType::LAYER_CENTROID_KNN);
- /** proprocess layers */
+ /** preprocess layers */
ac.registerFactory(nntrainer::createLayer<PreprocessFlipLayer>,
PreprocessFlipLayer::type,
LayerType::LAYER_PREPROCESS_FLIP);
* @brief Construct a new Sample object
* @note the batch dimension will be ignored to make a single sample
*
- * @param iter iteration obejcts
+ * @param iter iteration objects
* @param batch nth batch to create the sample
*/
Sample(const Iteration &iter, unsigned int batch);
}
/**
- * @brief this function helps exporting the dataproducer in a predefined
+ * @brief this function helps exporting the data producer in a predefined
* format, while workarounding issue caused by templated function type eraser
*
- * @param exporter exporter that conatins exporting logic
+ * @param exporter exporter that contains exporting logic
* @param method enum value to identify how it should be exported to
*/
virtual void exportTo(Exporter &exporter,
/**
* @brief denote if given producer is thread safe and can be parallelized.
- * @note if size() == SIZE_UNDEFIEND, thread safe shall be false
+ * @note if size() == SIZE_UNDEFINED, thread safe shall be false
*
* @return bool true if thread safe.
*/
* @brief this function helps exporting the dataset in a predefined format,
* while workarounding issue caused by templated function type eraser
*
- * @param exporter exporter that conatins exporting logic
+ * @param exporter exporter that contains exporting logic
* @param method enum value to identify how it should be exported to
*/
void exportTo(Exporter &exporter,
namespace nntrainer {
DirDataProducer::DirDataProducer() :
- dir_data_props(new Props()),
- num_class(0),
- num_data_total(0) {}
+ dir_data_props(new Props()), num_class(0), num_data_total(0) {}
DirDataProducer::DirDataProducer(const std::string &dir_path) :
dir_data_props(new Props(props::DirPath(dir_path))),
auto sz = size(input_dims, label_dims);
NNTR_THROW_IF(sz == 0, std::invalid_argument)
- << "size is zero, dataproducer does not provide anything";
+ << "size is zero, data producer does not provide anything";
return [sz, input_dims, this](unsigned int idx, std::vector<Tensor> &inputs,
std::vector<Tensor> &labels) {
/// @todo move this to higher order component
NNTR_THROW_IF(size(input_dims, label_dims) == 0, std::invalid_argument)
- << "size is zero, dataproducer does not provide anything";
+ << "size is zero, data producer does not provide anything";
/** prepare states for the generator */
std::vector<std::uniform_int_distribution<unsigned int>> label_chooser_;
public:
/**
* @brief Construct a new L2norm Layer object
- * that normlizes given feature with l2norm
+ * that normalizes given feature with l2norm
*/
PreprocessL2NormLayer() : Layer() {}
/**
* The split is only done along the split_dimension dimension.
- * (Assumes input data is continous)
+ * (Assumes input data is continuous)
* For example, consider input dimension [b,c,h,w], split_number = n
* 1. axis = 1, output_dim = [b,c//n,h,w], num_outputs = n
* 2. axis = 2, output_dim = [b,c,h//n,w], num_outputs = n
* to facilitate easier processing.
*
* The helper shape consolidates all the dimensions before the split_dimension
- * together and all the dimensions after the split_dimension to faciliate
+ * together and all the dimensions after the split_dimension to facilitate
* easier splitting of the data.
*/
leading_helper_dim = 1;