};
/**
+ * @brief FilterSize property, filter size is used to measure how many filters
+ * are there
+ *
+ */
+class FilterSize : public nntrainer::PositiveIntegerProperty {
+public:
+ static constexpr const char *key = "filters"; /**< unique key to access */
+ using prop_tag = uint_prop_tag; /**< property type */
+};
+
+/**
+ * @brief KernelSize property, kernel size is used to measure the filter size
+ *
+ */
+class KernelSize : public nntrainer::PositiveIntegerProperty {
+public:
+ static constexpr const char *key = "kernel_size"; /**< unique key to access */
+ using prop_tag = uint_prop_tag; /**< property type */
+};
+
+/**
+ * @brief Stride property, stride is used to measure how much it will be slide
+ * the filter
+ *
+ */
+class Stride : public nntrainer::PositiveIntegerProperty {
+public:
+ /**
+ * @brief Construct a new Stride object with a default value 1
+ *
+ */
+ Stride(unsigned int value = 1);
+ static constexpr const char *key = "stride"; /**< unique key to access */
+ using prop_tag = uint_prop_tag; /**< property type */
+};
+
+/**
* @brief Padding2D property, this is used to calculate padding2D
* @details Padding2D is saved as a string. Upon calling Padding2D::compute,
* returns std::vector<unsigned int> which has computed padding2Ds, below
Padding2D(const std::string &value = "valid") :
nntrainer::Property<std::string>(value) {} /**< default value if any */
bool isValid(const std::string &v) const override;
- using prop_tag = str_prop_tag; /**< property type */
+ static constexpr const char *key = "padding"; /**< unique key to access */
+ using prop_tag = str_prop_tag; /**< property type */
/**
* @brief compute actual padding2D from the underlying data
#include <lazy_tensor.h>
#include <nntrainer_error.h>
#include <nntrainer_log.h>
+#include <node_exporter.h>
#include <parse_util.h>
#include <profiler.h>
#include <util_func.h>
*/
static void col2im(const Tensor &col_matrix, const TensorDim &kdim,
const std::array<unsigned, 4> &padding,
- const std::array<unsigned, CONV2D_DIM> &mstride,
+ const std::array<props::Stride, CONV2D_DIM> &mstride,
const std::array<unsigned, CONV2D_DIM> &dilation,
Tensor &image) {
unsigned pt = padding[0];
static TensorDim
calcIm2ColOutputDim(const TensorDim &in, const TensorDim &kdim,
const std::array<unsigned int, CONV2D_DIM * 2> &padding,
- const std::array<unsigned int, CONV2D_DIM> &mstride,
+ const std::array<props::Stride, CONV2D_DIM> &mstride,
const std::array<unsigned int, CONV2D_DIM> &dilation) {
unsigned pt = padding[0];
*/
static void im2col(const Tensor &in, const TensorDim &kdim,
const std::array<unsigned int, 4> &padding,
- const std::array<unsigned int, CONV2D_DIM> &mstride,
+ const std::array<props::Stride, CONV2D_DIM> &mstride,
const std::array<unsigned int, CONV2D_DIM> &dilation,
Tensor &out) {
/// for channel last mode, this is deprecated for now, leaving here on
enum ConvParams { weight, bias, im2col_result, col2im_result };
+Conv2DLayer::Conv2DLayer(
+ const std::array<unsigned int, CONV2D_DIM * 2> &padding_) :
+ LayerImpl(),
+ padding(padding_),
+ conv_props(props::FilterSize(), std::array<props::KernelSize, CONV2D_DIM>(),
+ std::array<props::Stride, CONV2D_DIM>(), props::Padding2D()),
+ wt_idx({0}) {}
+
void Conv2DLayer::finalize(InitLayerContext &context) {
if (context.getNumInputs() != 1) {
throw std::invalid_argument("Convolution layer takes only one input");
const TensorDim &in_dim = context.getInputDimensions()[0];
+ unsigned int filter_size = std::get<props::FilterSize>(conv_props);
+ auto &kernel_size =
+ std::get<std::array<props::KernelSize, CONV2D_DIM>>(conv_props);
+ auto &stride = std::get<std::array<props::Stride, CONV2D_DIM>>(conv_props);
+
TensorDim dim =
TensorDim(filter_size, in_dim.channel(), kernel_size[0], kernel_size[1]);
TensorDim bias_dim = TensorDim(1, filter_size, 1, 1);
void Conv2DLayer::forwarding(RunLayerContext &context, bool training) {
int status = ML_ERROR_NONE;
+ unsigned int filter_size = std::get<props::FilterSize>(conv_props);
+ auto &stride = std::get<std::array<props::Stride, CONV2D_DIM>>(conv_props);
+
Tensor &input_ = context.getInput(SINGLE_INOUT_IDX);
Tensor &hidden_ = context.getOutput(SINGLE_INOUT_IDX);
}
void Conv2DLayer::calcDerivative(RunLayerContext &context) {
+ unsigned int filter_size = std::get<props::FilterSize>(conv_props);
+ auto &stride = std::get<std::array<props::Stride, CONV2D_DIM>>(conv_props);
+
Tensor &derivative = context.getIncomingDerivative(SINGLE_INOUT_IDX);
Tensor &input_derivative = context.getOutgoingDerivative(SINGLE_INOUT_IDX);
Tensor &filter_kernel = context.getWeight(wt_idx[ConvParams::weight]);
}
void Conv2DLayer::calcGradient(RunLayerContext &context) {
+ unsigned int filter_size = std::get<props::FilterSize>(conv_props);
+ auto &stride = std::get<std::array<props::Stride, CONV2D_DIM>>(conv_props);
+
Tensor &derivative = context.getIncomingDerivative(SINGLE_INOUT_IDX);
Tensor &input_ = context.getInput(SINGLE_INOUT_IDX);
delBias = derivative.sum({0, 2, 3});
}
-void Conv2DLayer::setProperty(const std::vector<std::string> &values) {
- /// @todo: deprecate this in favor of loadProperties
- for (unsigned int i = 0; i < values.size(); ++i) {
- std::string key;
- std::string value;
- std::stringstream ss;
-
- if (getKeyValue(values[i], key, value) != ML_ERROR_NONE) {
- throw std::invalid_argument("Error parsing the property: " + values[i]);
- }
-
- if (value.empty()) {
- ss << "value is empty: key: " << key << ", value: " << value;
- throw std::invalid_argument(ss.str());
- }
-
- /// @note this calls derived setProperty if available
- setProperty(key, value);
- }
+void Conv2DLayer::exportTo(Exporter &exporter,
+ const ExportMethods &method) const {
+ LayerImpl::exportTo(exporter, method);
+ exporter.saveResult(conv_props, method, this);
}
-void Conv2DLayer::setProperty(const std::string &type_str,
- const std::string &value) {
- using PropertyType = nntrainer::Layer::PropertyType;
- int status = ML_ERROR_NONE;
- nntrainer::Layer::PropertyType type =
- static_cast<nntrainer::Layer::PropertyType>(parseLayerProperty(type_str));
-
- switch (type) {
- case PropertyType::filters: {
- status = setUint(filter_size, value);
- throw_status(status);
- } break;
- case PropertyType::kernel_size:
- status = getValues(CONV2D_DIM, value, (int *)(kernel_size.data()));
- throw_status(status);
- if (kernel_size[0] == 0 || kernel_size[1] == 0) {
- throw std::invalid_argument(
- "[Conv2DLayer] kernel_size must be greater than 0");
- }
- break;
- case PropertyType::stride:
- status = getValues(CONV2D_DIM, value, (int *)(stride.data()));
- throw_status(status);
- if (stride[0] == 0 || stride[1] == 0) {
- throw std::invalid_argument(
- "[Conv2DLayer] stride must be greater than 0");
- }
- break;
- case PropertyType::padding:
- from_string(value, std::get<props::Padding2D>(conv_props));
- break;
- default:
- LayerImpl::setProperty(type_str, value);
- break;
- }
+void Conv2DLayer::setProperty(const std::vector<std::string> &values) {
+ auto remain_props = loadProperties(values, conv_props);
+ LayerImpl::setProperty(remain_props);
}
} /* namespace nntrainer */
/**
* @brief Constructor of Conv 2D Layer
*/
- Conv2DLayer(unsigned int filter_size_ = 0,
- const std::array<unsigned int, CONV2D_DIM> &kernel_size_ = {0, 0},
- const std::array<unsigned int, CONV2D_DIM> &stride_ = {1, 1},
- const std::array<unsigned int, CONV2D_DIM * 2> &padding_ = {0, 0,
- 0,
- 0}) :
- LayerImpl(),
- filter_size(filter_size_),
- kernel_size(kernel_size_),
- stride(stride_),
- padding(padding_),
- wt_idx({0}) {}
+ Conv2DLayer(const std::array<unsigned int, CONV2D_DIM * 2> &padding_ = {
+ 0, 0, 0, 0});
/**
* @brief Destructor of Conv 2D Layer
/**
* @copydoc Layer::exportTo(Exporter &exporter, ExportMethods method)
*/
- void exportTo(Exporter &exporter,
- const ExportMethods &method) const override {
- Layer::exportTo(exporter, method);
- }
+ void exportTo(Exporter &exporter, const ExportMethods &method) const override;
/**
* @copydoc Layer::getType()
inline static const std::string type = "conv2d";
private:
- unsigned int filter_size;
- std::array<unsigned int, CONV2D_DIM> kernel_size;
- std::array<unsigned int, CONV2D_DIM> stride;
std::array<unsigned int, CONV2D_DIM * 2> padding;
- std::tuple<props::Padding2D> conv_props;
+ std::tuple<props::FilterSize, std::array<props::KernelSize, CONV2D_DIM>,
+ std::array<props::Stride, CONV2D_DIM>, props::Padding2D>
+ conv_props;
std::array<unsigned int, 5> wt_idx; /**< indices of the weights and tensors */
-
- /**
- * @brief setProperty by type and value separated
- * @param[in] type property type to be passed
- * @param[in] value value to be passed
- * @exception exception::not_supported when property type is not valid for
- * the particular layer
- * @exception std::invalid_argument invalid argument
- */
- void setProperty(const std::string &type, const std::string &value);
};
} // namespace nntrainer