This also contains the unit tests to evaluate.
**Self evaluation:**
1. Build test: [X]Passed [ ]Failed [ ]Skipped
2. Run test: [X]Passed [ ]Failed [ ]Skipped
Signed-off-by: Adwaith Anand <adwaith.a@samsung.com>
context.setEffDimFlagInputDimension(0, 0b1001);
context.setDynDimFlagInputDimension(0, 0b1000);
+ bool is_nchw = (getTensorType() == Tformat::NCHW) ? true : false;
/** set output dimensions */
auto const &in_dim = context.getInputDimensions()[0];
output_dims[0] = in_dim;
- output_dims[0].width(unit);
+ is_nchw ? output_dims[0].width(unit) : output_dims[0].channel(unit);
context.setOutputDimensions(output_dims);
/** set weight specifications */
// @todo : This NCHW format setting is just temporal, it needs to be set by
// global configuration
- TensorDim bias_dim(1, 1, 1, unit, getTensorType(), 0b0001);
- TensorDim weight_dim(1, 1, in_dim.width(), unit, getTensorType(), 0b0011);
+
+ TensorDim bias_dim(1, is_nchw ? 1 : unit, 1, is_nchw ? unit : 1,
+ getTensorType(), is_nchw ? 0b0001 : 0b0100);
+ TensorDim weight_dim(1, is_nchw ? 1 : unit, is_nchw ? in_dim.width() : 1,
+ is_nchw ? unit : in_dim.channel(), getTensorType(),
+ is_nchw ? 0b0011 : 0b0101);
weight_idx[FCParams::weight] = context.requestWeight(
weight_dim, weight_initializer, weight_regularizer,
* @param Tensor Type : NCHW or NHWC
*/
void setTensorType(const std::string &values) {
- if (values.compare("NCHW") || values.compare("nchw")) {
- tensor_type = ml::train::TensorDim::Format::NCHW;
- } else {
- tensor_type = ml::train::TensorDim::Format::NHWC;
- }
+ tensor_type = (values.compare("NCHW") == 0 || values.compare("nchw") == 0)
+ ? ml::train::TensorDim::Format::NCHW
+ : ml::train::TensorDim::Format::NHWC;
}
/**
}
void LayerNode::setTensorType(const std::string &type_) {
- TensorDim::Format type = (type_.compare("NCHW") || type_.compare("nchw"))
- ? TensorDim::Format::NCHW
- : TensorDim::Format::NHWC;
+ TensorDim::Format type =
+ (type_.compare("NCHW") == 0 || type_.compare("nchw") == 0)
+ ? TensorDim::Format::NCHW
+ : TensorDim::Format::NHWC;
getLayer()->setTensorType(type);
}
std::vector<shape_parser_> parsed;
from_string(input_shape_str, parsed);
+ for (auto &p : parsed) {
+ p.get().setFormat(layer->getTensorType());
+ }
+
InitLayerContext context({parsed.begin(), parsed.end()}, {true}, false,
"golden_test");
layer->finalize(context);
{"unit=5", "weight_decay=0.0", "bias_decay=0.0"}, "3:1:1:10",
"fc_plain.nnlayergolden", LayerGoldenTestParamOptions::DEFAULT, "nchw");
+auto fc_basic_plain_nhwc = LayerGoldenTestParamType(
+ nntrainer::createLayer<nntrainer::FullyConnectedLayer>, {"unit=5"},
+ "3:10:1:1", "fc_plain.nnlayergolden",
+ LayerGoldenTestParamOptions::SKIP_CALC_DERIV |
+ LayerGoldenTestParamOptions::SKIP_CALC_GRAD,
+ "nhwc");
+
+auto fc_basic_single_batch_nhwc = LayerGoldenTestParamType(
+ nntrainer::createLayer<nntrainer::FullyConnectedLayer>, {"unit=4"},
+ "1:10:1:1", "fc_single_batch.nnlayergolden",
+ LayerGoldenTestParamOptions::SKIP_CALC_DERIV |
+ LayerGoldenTestParamOptions::SKIP_CALC_GRAD,
+ "nhwc");
+
+auto fc_basic_no_decay_nhwc = LayerGoldenTestParamType(
+ nntrainer::createLayer<nntrainer::FullyConnectedLayer>,
+ {"unit=5", "weight_decay=0.0", "bias_decay=0.0"}, "3:10:1:1",
+ "fc_plain.nnlayergolden",
+ LayerGoldenTestParamOptions::SKIP_CALC_DERIV |
+ LayerGoldenTestParamOptions::SKIP_CALC_GRAD,
+ "nhwc");
+
GTEST_PARAMETER_TEST(FullyConnected, LayerGoldenTest,
::testing::Values(fc_basic_plain, fc_basic_single_batch,
- fc_basic_no_decay));
+ fc_basic_no_decay, fc_basic_plain_nhwc,
+ fc_basic_single_batch_nhwc,
+ fc_basic_no_decay_nhwc));