virtual void setProperty(const std::vector<std::string> &values) = 0;
/**
+ * @brief Set Tensor Type : NCHW or NHWC
+ * @note This is used mainly for the unittest case which does not have
+ * model.
+ */
+ virtual void setTensorType(const std::string &values){};
+
+ /**
* @brief Get name of the layer
* @retval name of the layer
* @note This name is unique to this layer in a model
inPlaceOptimize();
+ TensorDim::Format type =
+ (getModelTensorType().compare("NCHW") ||
+ getModelTensorType().compare("nchw") || getModelTensorType().compare(""))
+ ? TensorDim::Format::NCHW
+ : TensorDim::Format::NHWC;
+
+ for (auto iter = cbegin(); iter != cend(); iter++) {
+ auto lnode = (*iter);
+ /// @todo later, we can set layer tensor type differenctly with model
+ /// tensor type
+ lnode->setTensorType(getModelTensorType());
+ }
+
status = checkCompiledGraph();
NN_RETURN_STATUS();
backward_iter_end(nullptr),
forward_iter_end(nullptr),
optimize_memory(true),
- exec_mode(ExecutionMode::TRAIN) {}
+ exec_mode(ExecutionMode::TRAIN),
+ model_tensor_type("NCHW") {}
/**
* @brief Constructor of NeuralNetwork Graph Class
* @param[in] swap_path memory swap file path when the swap is enabled
*/
NetworkGraph(bool enable_swap, const std::string &swap_path = "",
- unsigned int lookahead = 0) :
- tensor_manager(
- std::make_shared<Manager>(enable_swap, swap_path, lookahead)),
+ unsigned int lookahead = 0,
+ const std::string &tensor_type = "NCHW") :
+ tensor_manager(std::make_shared<Manager>(enable_swap, swap_path, lookahead,
+ tensor_type)),
graph(),
compiled(false),
batch_size(0),
backward_iter_end(nullptr),
forward_iter_end(nullptr),
optimize_memory(true),
- exec_mode(ExecutionMode::TRAIN) {}
+ exec_mode(ExecutionMode::TRAIN),
+ model_tensor_type(tensor_type) {}
/**
* @brief Destructor of the NeuralNetwork Graph class
std::vector<Tensor> getOutputTensors() const;
/**
+ * @brief return model tensor type
+ *
+ * @return TensorDim::Format NCHW or NHWC
+ */
+ std::string getModelTensorType() const { return model_tensor_type; };
+
+ /**
* @brief Flush data to the device
*
*/
ExecutionMode exec_mode; /**< execution mode with which the graph has been
currently set or previously set */
+ std::string model_tensor_type; /**< Model Tensor Type: NCHW or NHWC */
+
std::unordered_map<std::string, int>
profile_keys; /**< profile keys based on the layer type */
std::vector<Weight *>
/** set weight specifications */
// @todo : This NCHW format setting is just temporal, it needs to be set by
// global configuration
- TensorDim bias_dim(1, 1, 1, unit, ml::train::TensorDim::Format::NCHW, 0b0001);
- TensorDim weight_dim(1, 1, in_dim.width(), unit,
- ml::train::TensorDim::Format::NCHW, 0b0011);
+ TensorDim bias_dim(1, 1, 1, unit, getTensorType(), 0b0001);
+ TensorDim weight_dim(1, 1, in_dim.width(), unit, getTensorType(), 0b0011);
weight_idx[FCParams::weight] = context.requestWeight(
weight_dim, weight_initializer, weight_regularizer,
#include <vector>
#include <common.h>
+#include <tensor_dim.h>
namespace ml::train {
class Layer;
* @return true if supports backwarding, else false
*/
virtual bool supportBackwarding() const = 0;
+
+ /**
+ * @brief Set the Tensor Type for the layer
+ * @param Tensor Type : TensorDim::Format::NCHW or TneosrDim::Format::NHWC
+ */
+ virtual void setTensorType(
+ ml::train::TensorDim::Format type = ml::train::TensorDim::Format::NCHW) {
+ tensor_type = type;
+ }
+
+ /**
+ * @brief set the Tensor Type for the layer
+ * @param Tensor Type : NCHW or NHWC
+ */
+ void setTensorType(const std::string &values) {
+ if (values.compare("NCHW") || values.compare("nchw")) {
+ tensor_type = ml::train::TensorDim::Format::NCHW;
+ } else {
+ tensor_type = ml::train::TensorDim::Format::NHWC;
+ }
+ }
+
+ /**
+ * @brief get the Tensor Type for the layer
+ * @return Tensor Type : TensorDim::Format::NCHW or
+ * TneosrDim::Format::NHWC
+ */
+ virtual ml::train::TensorDim::Format getTensorType() { return tensor_type; }
+
+private:
+ ml::train::TensorDim::Format tensor_type;
};
/// @todo Decide where to put and how to implement(#986)
con = std::make_unique<Connection>(name, index);
}
+void LayerNode::setTensorType(const std::string type_) {
+ TensorDim::Format type = (type_.compare("NCHW") || type_.compare("nchw"))
+ ? TensorDim::Format::NCHW
+ : TensorDim::Format::NHWC;
+ getLayer()->setTensorType(type);
+}
+
const std::string LayerNode::getName() const noexcept {
auto &name = std::get<props::Name>(*layer_node_props);
return name.empty() ? "" : name.get();
*/
bool needsCalcGradient() { return needs_calc_gradient; }
+ /**
+ * @brief Set Tensor type for layer
+ *
+ * @param type NCHW : NHWC
+ */
+ void setTensorType(const std::string type_ = "NCHW");
+
private:
/**
* @brief Get the Input Layers object
std::vector<std::unique_ptr<Connection>>
output_connections; /**< output layer names */
+ TensorDim::Format tensor_type;
+
#ifdef ENABLE_TEST
/**
* @brief Init context which is stored for debugging issue
set(value);
}
+ModelTensorType::ModelTensorType(const std::string &value) { set(value); }
+
} // namespace nntrainer::props
MemorySwapLookahead(const unsigned int &value = 0);
};
+/**
+ * @brief model tensor type : NCHW or NHWC
+ *
+ */
+class ModelTensorType : public Property<std::string> {
+public:
+ static constexpr const char *key = "tensor_type"; /**< unique key to access */
+ using prop_tag = str_prop_tag; /**< property type */
+
+ /**
+ * @brief Constructor
+ *
+ * @param value value to set, defaults to false
+ */
+ ModelTensorType(const std::string &value = "NCHW");
+};
+
} // namespace nntrainer::props
#endif
NeuralNetwork::NeuralNetwork() :
model_props(props::LossType(), {}, {}, props::ClipGradByGlobalNorm()),
- model_flex_props(
- props::Epochs(), props::TrainingBatchSize(), props::SavePath(),
- props::ContinueTrain(), props::SaveBestPath(), props::MemoryOptimization(),
- props::MemorySwap(), props::MemorySwapPath(), props::MemorySwapLookahead()),
+ model_flex_props(props::Epochs(), props::TrainingBatchSize(),
+ props::SavePath(), props::ContinueTrain(),
+ props::SaveBestPath(), props::MemoryOptimization(),
+ props::MemorySwap(), props::MemorySwapPath(),
+ props::MemorySwapLookahead(), props::ModelTensorType()),
load_path(std::string()),
epoch_idx(0),
iter(0),
NeuralNetwork::NeuralNetwork(AppContext app_context_) :
model_props(props::LossType(), {}, {}, props::ClipGradByGlobalNorm()),
- model_flex_props(
- props::Epochs(), props::TrainingBatchSize(), props::SavePath(),
- props::ContinueTrain(), props::SaveBestPath(), props::MemoryOptimization(),
- props::MemorySwap(), props::MemorySwapPath(), props::MemorySwapLookahead()),
+ model_flex_props(props::Epochs(), props::TrainingBatchSize(),
+ props::SavePath(), props::ContinueTrain(),
+ props::SaveBestPath(), props::MemoryOptimization(),
+ props::MemorySwap(), props::MemorySwapPath(),
+ props::MemorySwapLookahead(), props::ModelTensorType()),
load_path(std::string()),
epoch_idx(0),
iter(0),
std::get<props::MemorySwapPath>(model_flex_props);
unsigned int lookahead =
std::get<props::MemorySwapLookahead>(model_flex_props);
- model_graph = NetworkGraph(memory_swap, memory_swap_path, lookahead);
+
+ const std::string tensor_type =
+ std::get<props::ModelTensorType>(model_flex_props);
+
+ model_graph =
+ NetworkGraph(memory_swap, memory_swap_path, lookahead, tensor_type);
model_graph.setMemoryOptimizations(
std::get<props::MemoryOptimization>(model_flex_props));
std::tuple<props::Epochs, props::TrainingBatchSize, props::SavePath,
props::ContinueTrain, props::SaveBestPath,
props::MemoryOptimization, props::MemorySwap,
- props::MemorySwapPath, props::MemorySwapLookahead>;
+ props::MemorySwapPath, props::MemorySwapLookahead,
+ props::ModelTensorType>;
using RigidPropTypes =
std::tuple<props::LossType, std::vector<props::InputConnection>,
std::vector<props::LabelLayer>, props::ClipGradByGlobalNorm>;
/**
* @brief Constructor of Manager
*/
- Manager() : enable_optimizations(true), swap_lookahead(0) {}
+ Manager() :
+ enable_optimizations(true),
+ swap_lookahead(0),
+ tensor_type("nchw") {}
/**
* @brief Constructor of Manager
*/
Manager(bool enable_swap, const std::string &swap_path = "",
- unsigned int lookahead = 0) :
+ unsigned int lookahead = 0, const std::string tensor_type_ = "nchw") :
weight_pool(enable_swap, swap_path, "weight_pool"),
tensor_pool(enable_swap, swap_path, "tensor_pool"),
enable_optimizations(true),
- swap_lookahead(lookahead) {}
+ swap_lookahead(lookahead),
+ tensor_type(tensor_type_) {}
/**
* @brief Construct a new Manager object (deleted)
unsigned int swap_lookahead; /** lookahead for memory swap */
+ std::string tensor_type;
+
/**
* @brief Finalize the given tensor pool
*
std::vector<std::string> /**< Properties */,
const char *, /**< Input Tensor dimensions representation */
const char * /**< Golden file name */,
- int /**< LayerGoldenTestParamOptions */>;
+ int /**< LayerGoldenTestParamOptions */,
+ std::string /**<TensorType*/>;
/**
* @brief Golden Layer Test with designated format
TEST_P(LayerGoldenTest, run) {
auto f = std::get<0>(GetParam());
auto layer = f(std::get<1>(GetParam()));
+ std::string type = std::get<5>(GetParam());
+ layer->setTensorType(type);
auto golden_file = checkedOpenStream<std::ifstream>(
getGoldenPath(std::get<3>(GetParam())), std::ios::in | std::ios::binary);
auto &input_dims = std::get<2>(GetParam());
auto attention_shared_kv = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::AttentionLayer>, {}, "1:1:5:7,1:1:3:7",
- "attention_shared_kv.nnlayergolden", LayerGoldenTestParamOptions::DEFAULT);
+ "attention_shared_kv.nnlayergolden", LayerGoldenTestParamOptions::DEFAULT,
+ "nchw");
auto attention_shared_kv_batched = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::AttentionLayer>, {}, "2:1:5:7,2:1:3:7",
"attention_shared_kv_batched.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
auto attention_batched = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::AttentionLayer>, {},
"2:1:5:7,2:1:3:7,2:1:3:7", "attention_batched.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
GTEST_PARAMETER_TEST(Attention, LayerGoldenTest,
::testing::Values(attention_shared_kv,
auto bn_basic_channels_training = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::BatchNormalizationLayer>, {}, "2:4:2:3",
- "bn_channels_training.nnlayergolden", LayerGoldenTestParamOptions::DEFAULT);
+ "bn_channels_training.nnlayergolden", LayerGoldenTestParamOptions::DEFAULT,
+ "nchw");
auto bn_basic_channels_inference = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::BatchNormalizationLayer>, {}, "2:4:2:3",
- "bn_channels_inference.nnlayergolden", bn_inference_option);
+ "bn_channels_inference.nnlayergolden", bn_inference_option, "nchw");
auto bn_basic_width_training = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::BatchNormalizationLayer>, {}, "2:1:1:10",
- "bn_width_training.nnlayergolden", LayerGoldenTestParamOptions::DEFAULT);
+ "bn_width_training.nnlayergolden", LayerGoldenTestParamOptions::DEFAULT,
+ "nchw");
auto bn_basic_width_inference = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::BatchNormalizationLayer>, {}, "2:1:1:10",
- "bn_width_inference.nnlayergolden", bn_inference_option);
+ "bn_width_inference.nnlayergolden", bn_inference_option, "nchw");
GTEST_PARAMETER_TEST(BatchNormalization, LayerGoldenTest,
::testing::Values(bn_basic_channels_training,
auto concat_dim3 = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::ConcatLayer>, {"axis=3"},
"2:3:3:2, 2:3:3:3", "concat_dim3.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
auto concat_dim2 = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::ConcatLayer>, {"axis=2"},
"2:3:2:3, 2:3:3:3", "concat_dim2.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
auto concat_dim1 = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::ConcatLayer>, {"axis=1"},
"2:2:3:3, 2:3:3:3", "concat_dim1.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
GTEST_PARAMETER_TEST(Concat, LayerGoldenTest,
::testing::Values(concat_dim3, concat_dim2, concat_dim1));
auto conv1d_sb_minimum = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::Conv1DLayer>,
{"filters=3", "kernel_size=2"}, "1:1:1:4", "conv1d_sb_minimum.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
auto conv1d_mb_minimum = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::Conv1DLayer>,
{"filters=3", "kernel_size=2"}, "3:1:1:4", "conv1d_mb_minimum.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
-auto conv1d_sb_same_remain = LayerGoldenTestParamType(
- nntrainer::createLayer<nntrainer::Conv1DLayer>,
- {"filters=2", "kernel_size=3", "padding=same"}, "1:1:1:4",
- "conv1d_sb_same_remain.nnlayergolden", LayerGoldenTestParamOptions::DEFAULT);
+auto conv1d_sb_same_remain =
+ LayerGoldenTestParamType(nntrainer::createLayer<nntrainer::Conv1DLayer>,
+ {"filters=2", "kernel_size=3", "padding=same"},
+ "1:1:1:4", "conv1d_sb_same_remain.nnlayergolden",
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
-auto conv1d_mb_same_remain = LayerGoldenTestParamType(
- nntrainer::createLayer<nntrainer::Conv1DLayer>,
- {"filters=2", "kernel_size=3", "padding=same"}, "3:1:1:4",
- "conv1d_mb_same_remain.nnlayergolden", LayerGoldenTestParamOptions::DEFAULT);
+auto conv1d_mb_same_remain =
+ LayerGoldenTestParamType(nntrainer::createLayer<nntrainer::Conv1DLayer>,
+ {"filters=2", "kernel_size=3", "padding=same"},
+ "3:1:1:4", "conv1d_mb_same_remain.nnlayergolden",
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
auto conv1d_sb_same_uneven_remain_1 = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::Conv1DLayer>,
"padding=same",
},
"1:3:1:4", "conv1d_sb_same_uneven_remain.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
auto conv1d_sb_same_uneven_remain_2 = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::Conv1DLayer>,
"padding=0,1",
},
"1:3:1:4", "conv1d_sb_same_uneven_remain.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
auto conv1d_mb_same_uneven_remain_1 = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::Conv1DLayer>,
"padding=same",
},
"3:3:1:4", "conv1d_mb_same_uneven_remain.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
auto conv1d_mb_same_uneven_remain_2 = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::Conv1DLayer>,
"padding=0,1",
},
"3:3:1:4", "conv1d_mb_same_uneven_remain.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
auto conv1d_sb_valid_drop_last =
LayerGoldenTestParamType(nntrainer::createLayer<nntrainer::Conv1DLayer>,
"padding=valid",
},
"1:3:1:7", "conv1d_sb_valid_drop_last.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
auto conv1d_mb_valid_drop_last =
LayerGoldenTestParamType(nntrainer::createLayer<nntrainer::Conv1DLayer>,
"padding=valid",
},
"3:3:1:7", "conv1d_mb_valid_drop_last.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
-auto conv1d_sb_no_overlap = LayerGoldenTestParamType(
- nntrainer::createLayer<nntrainer::Conv1DLayer>,
- {"filters=3", "kernel_size=2", "stride=3"}, "1:2:1:5",
- "conv1d_sb_no_overlap.nnlayergolden", LayerGoldenTestParamOptions::DEFAULT);
+auto conv1d_sb_no_overlap =
+ LayerGoldenTestParamType(nntrainer::createLayer<nntrainer::Conv1DLayer>,
+ {"filters=3", "kernel_size=2", "stride=3"},
+ "1:2:1:5", "conv1d_sb_no_overlap.nnlayergolden",
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
auto conv1d_mb_no_overlap =
LayerGoldenTestParamType(nntrainer::createLayer<nntrainer::Conv1DLayer>,
"stride=3",
},
"3:2:1:5", "conv1d_mb_no_overlap.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
-auto conv1d_sb_causal = LayerGoldenTestParamType(
- nntrainer::createLayer<nntrainer::Conv1DLayer>,
- {"filters=3", "kernel_size=2", "padding=causal"}, "1:1:1:4",
- "conv1d_sb_causal.nnlayergolden", LayerGoldenTestParamOptions::DEFAULT);
+auto conv1d_sb_causal =
+ LayerGoldenTestParamType(nntrainer::createLayer<nntrainer::Conv1DLayer>,
+ {"filters=3", "kernel_size=2", "padding=causal"},
+ "1:1:1:4", "conv1d_sb_causal.nnlayergolden",
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
-auto conv1d_mb_causal = LayerGoldenTestParamType(
- nntrainer::createLayer<nntrainer::Conv1DLayer>,
- {"filters=3", "kernel_size=2", "padding=causal"}, "3:1:1:4",
- "conv1d_mb_causal.nnlayergolden", LayerGoldenTestParamOptions::DEFAULT);
+auto conv1d_mb_causal =
+ LayerGoldenTestParamType(nntrainer::createLayer<nntrainer::Conv1DLayer>,
+ {"filters=3", "kernel_size=2", "padding=causal"},
+ "3:1:1:4", "conv1d_mb_causal.nnlayergolden",
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
-auto conv1d_sb_1x1_kernel = LayerGoldenTestParamType(
- nntrainer::createLayer<nntrainer::Conv1DLayer>,
- {"filters=3", "kernel_size=1", "stride=2"}, "1:2:1:5",
- "conv1d_sb_1x1_kernel.nnlayergolden", LayerGoldenTestParamOptions::DEFAULT);
+auto conv1d_sb_1x1_kernel =
+ LayerGoldenTestParamType(nntrainer::createLayer<nntrainer::Conv1DLayer>,
+ {"filters=3", "kernel_size=1", "stride=2"},
+ "1:2:1:5", "conv1d_sb_1x1_kernel.nnlayergolden",
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
auto conv1d_mb_1x1_kernel =
LayerGoldenTestParamType(nntrainer::createLayer<nntrainer::Conv1DLayer>,
"stride=2",
},
"3:2:1:5", "conv1d_mb_1x1_kernel.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
auto conv1d_sb_dilation =
LayerGoldenTestParamType(nntrainer::createLayer<nntrainer::Conv1DLayer>,
"dilation=2",
},
"1:3:1:11", "conv1d_sb_dilation.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
auto conv1d_mb_dilation =
LayerGoldenTestParamType(nntrainer::createLayer<nntrainer::Conv1DLayer>,
"dilation=2",
},
"3:3:1:11", "conv1d_mb_dilation.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
auto conv1d_sb_same_dilation =
LayerGoldenTestParamType(nntrainer::createLayer<nntrainer::Conv1DLayer>,
"dilation=2",
},
"1:3:1:11", "conv1d_sb_same_dilation.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
auto conv1d_mb_same_dilation =
LayerGoldenTestParamType(nntrainer::createLayer<nntrainer::Conv1DLayer>,
"dilation=2",
},
"3:3:1:11", "conv1d_mb_same_dilation.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
auto conv1d_sb_causal_dilation = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::Conv1DLayer>,
{"filters=3", "kernel_size=2", "padding=causal", "dilation=2"}, "1:1:1:4",
"conv1d_sb_causal_dilation.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
auto conv1d_mb_causal_dilation = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::Conv1DLayer>,
{"filters=3", "kernel_size=2", "padding=causal", "dilation=2"}, "3:1:1:4",
"conv1d_mb_causal_dilation.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
GTEST_PARAMETER_TEST(
Convolution1D, LayerGoldenTest,
GTEST_PARAMETER_TEST(Convolution2D, LayerSemantics,
::testing::Values(semantic_conv2d));
-auto conv2d_sb_minimum = LayerGoldenTestParamType(
- nntrainer::createLayer<nntrainer::Conv2DLayer>,
- {"filters=3", "kernel_size=2,2"}, "1:1:4:4",
- "conv2d_sb_minimum.nnlayergolden", LayerGoldenTestParamOptions::DEFAULT);
+auto conv2d_sb_minimum =
+ LayerGoldenTestParamType(nntrainer::createLayer<nntrainer::Conv2DLayer>,
+ {"filters=3", "kernel_size=2,2"}, "1:1:4:4",
+ "conv2d_sb_minimum.nnlayergolden",
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
-auto conv2d_mb_minimum = LayerGoldenTestParamType(
- nntrainer::createLayer<nntrainer::Conv2DLayer>,
- {"filters=3", "kernel_size=2,2"}, "3:1:4:4",
- "conv2d_mb_minimum.nnlayergolden", LayerGoldenTestParamOptions::DEFAULT);
+auto conv2d_mb_minimum =
+ LayerGoldenTestParamType(nntrainer::createLayer<nntrainer::Conv2DLayer>,
+ {"filters=3", "kernel_size=2,2"}, "3:1:4:4",
+ "conv2d_mb_minimum.nnlayergolden",
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
-auto conv2d_sb_same_remain = LayerGoldenTestParamType(
- nntrainer::createLayer<nntrainer::Conv2DLayer>,
- {"filters=2", "kernel_size=3,3", "padding=same"}, "1:1:4:4",
- "conv2d_sb_same_remain.nnlayergolden", LayerGoldenTestParamOptions::DEFAULT);
+auto conv2d_sb_same_remain =
+ LayerGoldenTestParamType(nntrainer::createLayer<nntrainer::Conv2DLayer>,
+ {"filters=2", "kernel_size=3,3", "padding=same"},
+ "1:1:4:4", "conv2d_sb_same_remain.nnlayergolden",
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
-auto conv2d_mb_same_remain = LayerGoldenTestParamType(
- nntrainer::createLayer<nntrainer::Conv2DLayer>,
- {"filters=2", "kernel_size=3,3", "padding=same"}, "3:1:4:4",
- "conv2d_mb_same_remain.nnlayergolden", LayerGoldenTestParamOptions::DEFAULT);
+auto conv2d_mb_same_remain =
+ LayerGoldenTestParamType(nntrainer::createLayer<nntrainer::Conv2DLayer>,
+ {"filters=2", "kernel_size=3,3", "padding=same"},
+ "3:1:4:4", "conv2d_mb_same_remain.nnlayergolden",
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
auto conv2d_sb_same_uneven_remain_1 = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::Conv2DLayer>,
"padding=same",
},
"1:3:4:4", "conv2d_sb_same_uneven_remain.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
auto conv2d_sb_same_uneven_remain_2 = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::Conv2DLayer>,
"padding=0,1,0,1",
},
"1:3:4:4", "conv2d_sb_same_uneven_remain.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
auto conv2d_mb_same_uneven_remain_1 = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::Conv2DLayer>,
"padding=same",
},
"3:3:4:4", "conv2d_mb_same_uneven_remain.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
auto conv2d_mb_same_uneven_remain_2 = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::Conv2DLayer>,
"padding=0,1,0,1",
},
"3:3:4:4", "conv2d_mb_same_uneven_remain.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
auto conv2d_sb_valid_drop_last =
LayerGoldenTestParamType(nntrainer::createLayer<nntrainer::Conv2DLayer>,
"padding=valid",
},
"1:3:7:7", "conv2d_sb_valid_drop_last.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
auto conv2d_mb_valid_drop_last =
LayerGoldenTestParamType(nntrainer::createLayer<nntrainer::Conv2DLayer>,
"padding=valid",
},
"3:3:7:7", "conv2d_mb_valid_drop_last.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
-auto conv2d_sb_no_overlap = LayerGoldenTestParamType(
- nntrainer::createLayer<nntrainer::Conv2DLayer>,
- {"filters=3", "kernel_size=2,2", "stride=3,3"}, "1:2:5:5",
- "conv2d_sb_no_overlap.nnlayergolden", LayerGoldenTestParamOptions::DEFAULT);
+auto conv2d_sb_no_overlap =
+ LayerGoldenTestParamType(nntrainer::createLayer<nntrainer::Conv2DLayer>,
+ {"filters=3", "kernel_size=2,2", "stride=3,3"},
+ "1:2:5:5", "conv2d_sb_no_overlap.nnlayergolden",
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
auto conv2d_mb_no_overlap =
LayerGoldenTestParamType(nntrainer::createLayer<nntrainer::Conv2DLayer>,
"stride=3,3",
},
"3:2:5:5", "conv2d_mb_no_overlap.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
-auto conv2d_sb_1x1_kernel = LayerGoldenTestParamType(
- nntrainer::createLayer<nntrainer::Conv2DLayer>,
- {"filters=3", "kernel_size=1,1", "stride=2,2"}, "1:2:5:5",
- "conv2d_sb_1x1_kernel.nnlayergolden", LayerGoldenTestParamOptions::DEFAULT);
+auto conv2d_sb_1x1_kernel =
+ LayerGoldenTestParamType(nntrainer::createLayer<nntrainer::Conv2DLayer>,
+ {"filters=3", "kernel_size=1,1", "stride=2,2"},
+ "1:2:5:5", "conv2d_sb_1x1_kernel.nnlayergolden",
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
auto conv2d_mb_1x1_kernel =
LayerGoldenTestParamType(nntrainer::createLayer<nntrainer::Conv2DLayer>,
"stride=2,2",
},
"3:2:5:5", "conv2d_mb_1x1_kernel.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
auto conv2d_sb_dilation =
LayerGoldenTestParamType(nntrainer::createLayer<nntrainer::Conv2DLayer>,
"dilation=2,2",
},
"1:3:11:11", "conv2d_sb_dilation.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
auto conv2d_mb_dilation =
LayerGoldenTestParamType(nntrainer::createLayer<nntrainer::Conv2DLayer>,
"dilation=2,2",
},
"3:3:11:11", "conv2d_mb_dilation.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
auto conv2d_sb_same_dilation =
LayerGoldenTestParamType(nntrainer::createLayer<nntrainer::Conv2DLayer>,
"dilation=2,2",
},
"1:3:11:11", "conv2d_sb_same_dilation.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
auto conv2d_mb_same_dilation =
LayerGoldenTestParamType(nntrainer::createLayer<nntrainer::Conv2DLayer>,
"dilation=2,2",
},
"3:3:11:11", "conv2d_mb_same_dilation.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
GTEST_PARAMETER_TEST(
Convolution2D, LayerGoldenTest,
nntrainer::createLayer<nntrainer::DropOutLayer>, {"dropout_rate=0.2"},
"2:3:2:3", "dropout_20_training.nnlayergolden",
LayerGoldenTestParamOptions::DEFAULT |
- LayerGoldenTestParamOptions::DROPOUT_MATCH_60_PERCENT);
+ LayerGoldenTestParamOptions::DROPOUT_MATCH_60_PERCENT,
+ "nchw");
auto dropout_20_inference = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::DropOutLayer>, {"dropout_rate=0.2"},
- "2:3:2:3", "dropout_20_inference.nnlayergolden", dropout_inference_option);
+ "2:3:2:3", "dropout_20_inference.nnlayergolden", dropout_inference_option,
+ "nchw");
auto dropout_0_training = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::DropOutLayer>, {"dropout_rate=0.0"},
"2:3:2:3", "dropout_0_training.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
auto dropout_100_training = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::DropOutLayer>, {"dropout_rate=1.0"},
"2:3:2:3", "dropout_100_training.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
GTEST_PARAMETER_TEST(Dropout, LayerGoldenTest,
::testing::Values(dropout_20_training, dropout_0_training,
auto fc_basic_plain = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::FullyConnectedLayer>, {"unit=5"},
- "3:1:1:10", "fc_plain.nnlayergolden", LayerGoldenTestParamOptions::DEFAULT);
+ "3:1:1:10", "fc_plain.nnlayergolden", LayerGoldenTestParamOptions::DEFAULT,
+ "nchw");
auto fc_basic_single_batch = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::FullyConnectedLayer>, {"unit=4"},
"1:1:1:10", "fc_single_batch.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
auto fc_basic_no_decay = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::FullyConnectedLayer>,
{"unit=5", "weight_decay=0.0", "bias_decay=0.0"}, "3:1:1:10",
- "fc_plain.nnlayergolden", LayerGoldenTestParamOptions::DEFAULT);
+ "fc_plain.nnlayergolden", LayerGoldenTestParamOptions::DEFAULT, "nchw");
GTEST_PARAMETER_TEST(FullyConnected, LayerGoldenTest,
::testing::Values(fc_basic_plain, fc_basic_single_batch,
auto gru_single_step = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::GRULayer>,
{"unit=5", "integrate_bias=true", "reset_after=false"}, "3:1:1:7",
- "gru_single_step.nnlayergolden", LayerGoldenTestParamOptions::DEFAULT);
+ "gru_single_step.nnlayergolden", LayerGoldenTestParamOptions::DEFAULT,
+ "nchw");
auto gru_multi_step = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::GRULayer>,
{"unit=5", "integrate_bias=true", "reset_after=false"}, "3:1:4:7",
- "gru_multi_step.nnlayergolden", LayerGoldenTestParamOptions::DEFAULT);
+ "gru_multi_step.nnlayergolden", LayerGoldenTestParamOptions::DEFAULT, "nchw");
auto gru_single_step_seq =
LayerGoldenTestParamType(nntrainer::createLayer<nntrainer::GRULayer>,
{"unit=5", "return_sequences=true",
"integrate_bias=true", "reset_after=false"},
"3:1:1:7", "gru_single_step_seq.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
auto gru_multi_step_seq =
LayerGoldenTestParamType(nntrainer::createLayer<nntrainer::GRULayer>,
{"unit=5", "return_sequences=true",
"integrate_bias=true", "reset_after=false"},
"3:1:4:7", "gru_multi_step_seq.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
auto gru_multi_step_seq_act_orig = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::GRULayer>,
{"unit=5", "return_sequences=true", "hidden_state_activation=tanh",
"recurrent_activation=sigmoid", "integrate_bias=true", "reset_after=false"},
"3:1:4:7", "gru_multi_step_seq.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
auto gru_multi_step_seq_act = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::GRULayer>,
{"unit=5", "return_sequences=true", "hidden_state_activation=sigmoid",
"recurrent_activation=tanh", "integrate_bias=true", "reset_after=false"},
"3:1:4:7", "gru_multi_step_seq_act.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
// Check reset_after
auto gru_reset_after_single_step = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::GRULayer>,
{"unit=5", "integrate_bias=false", "reset_after=true"}, "3:1:1:7",
"gru_reset_after_single_step.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
auto gru_reset_after_multi_step = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::GRULayer>,
{"unit=5", "integrate_bias=false", "reset_after=true"}, "3:1:4:7",
"gru_reset_after_multi_step.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
auto gru_reset_after_single_step_seq = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::GRULayer>,
{"unit=5", "return_sequences=true", "integrate_bias=false",
"reset_after=true"},
"3:1:1:7", "gru_reset_after_single_step_seq.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
auto gru_reset_after_multi_step_seq = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::GRULayer>,
{"unit=5", "return_sequences=true", "integrate_bias=false",
"reset_after=true"},
"3:1:4:7", "gru_reset_after_multi_step_seq.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
auto gru_reset_after_multi_step_seq_act_orig = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::GRULayer>,
{"unit=5", "return_sequences=true", "hidden_state_activation=tanh",
"recurrent_activation=sigmoid", "integrate_bias=false", "reset_after=true"},
"3:1:4:7", "gru_reset_after_multi_step_seq.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
auto gru_reset_after_multi_step_seq_act = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::GRULayer>,
{"unit=5", "return_sequences=true", "hidden_state_activation=sigmoid",
"recurrent_activation=tanh", "integrate_bias=false", "reset_after=true"},
"3:1:4:7", "gru_reset_after_multi_step_seq_act.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
GTEST_PARAMETER_TEST(
GRU, LayerGoldenTest,
auto grucell_single_step = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::GRUCellLayer>,
{"unit=5", "integrate_bias=true", "reset_after=false"}, "3:1:1:7,3:1:1:5",
- "grucell_single_step.nnlayergolden", LayerGoldenTestParamOptions::DEFAULT);
+ "grucell_single_step.nnlayergolden", LayerGoldenTestParamOptions::DEFAULT,
+ "nchw");
auto grucell_reset_after_single_step = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::GRUCellLayer>,
{"unit=5", "integrate_bias=false", "reset_after=true"}, "3:1:1:7,3:1:1:5",
"grucell_reset_after_single_step.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
auto grucell_single_step_act = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::GRUCellLayer>,
{"unit=5", "integrate_bias=true", "reset_after=false",
"hidden_state_activation=sigmoid", "recurrent_activation=tanh"},
"3:1:1:7,3:1:1:5", "grucell_single_step_act.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
GTEST_PARAMETER_TEST(GRUCell, LayerGoldenTest,
::testing::Values(grucell_single_step,
auto ln_axis_1 = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::LayerNormalizationLayer>, {"axis=1"},
- "2:4:2:3", "ln_axis_1.nnlayergolden", LayerGoldenTestParamOptions::DEFAULT);
+ "2:4:2:3", "ln_axis_1.nnlayergolden", LayerGoldenTestParamOptions::DEFAULT,
+ "nchw");
auto ln_axis_2 = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::LayerNormalizationLayer>, {"axis=2"},
- "2:4:2:3", "ln_axis_2.nnlayergolden", LayerGoldenTestParamOptions::DEFAULT);
+ "2:4:2:3", "ln_axis_2.nnlayergolden", LayerGoldenTestParamOptions::DEFAULT,
+ "nchw");
auto ln_axis_3 = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::LayerNormalizationLayer>, {"axis=3"},
- "2:4:2:3", "ln_axis_3.nnlayergolden", LayerGoldenTestParamOptions::DEFAULT);
+ "2:4:2:3", "ln_axis_3.nnlayergolden", LayerGoldenTestParamOptions::DEFAULT,
+ "nchw");
auto ln_axis_1_2 = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::LayerNormalizationLayer>, {"axis=1, 2"},
- "2:4:2:3", "ln_axis_1_2.nnlayergolden", LayerGoldenTestParamOptions::DEFAULT);
+ "2:4:2:3", "ln_axis_1_2.nnlayergolden", LayerGoldenTestParamOptions::DEFAULT,
+ "nchw");
auto ln_axis_2_3 = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::LayerNormalizationLayer>, {"axis=2, 3"},
- "2:4:2:3", "ln_axis_2_3.nnlayergolden", LayerGoldenTestParamOptions::DEFAULT);
+ "2:4:2:3", "ln_axis_2_3.nnlayergolden", LayerGoldenTestParamOptions::DEFAULT,
+ "nchw");
auto ln_axis_1_3 = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::LayerNormalizationLayer>, {"axis=1, 3"},
- "2:4:2:3", "ln_axis_1_3.nnlayergolden", LayerGoldenTestParamOptions::DEFAULT);
+ "2:4:2:3", "ln_axis_1_3.nnlayergolden", LayerGoldenTestParamOptions::DEFAULT,
+ "nchw");
auto ln_axis_1_2_3 = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::LayerNormalizationLayer>, {"axis=1, 2, 3"},
"2:4:2:3", "ln_axis_1_2_3.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
GTEST_PARAMETER_TEST(LayerNormalization, LayerGoldenTest,
::testing::Values(ln_axis_1, ln_axis_2, ln_axis_3,
GTEST_PARAMETER_TEST(LSTM, LayerSemantics, ::testing::Values(semantic_lstm));
-auto lstm_single_step = LayerGoldenTestParamType(
- nntrainer::createLayer<nntrainer::LSTMLayer>,
- {"unit=5", "integrate_bias=true"}, "3:1:1:7",
- "lstm_single_step.nnlayergolden", LayerGoldenTestParamOptions::DEFAULT);
+auto lstm_single_step =
+ LayerGoldenTestParamType(nntrainer::createLayer<nntrainer::LSTMLayer>,
+ {"unit=5", "integrate_bias=true"}, "3:1:1:7",
+ "lstm_single_step.nnlayergolden",
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
auto lstm_multi_step = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::LSTMLayer>,
{"unit=5", "integrate_bias=true"}, "3:1:4:7", "lstm_multi_step.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
auto lstm_single_step_seq = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::LSTMLayer>,
{"unit=5", "integrate_bias=true", "return_sequences=true"}, "3:1:1:7",
- "lstm_single_step_seq.nnlayergolden", LayerGoldenTestParamOptions::DEFAULT);
+ "lstm_single_step_seq.nnlayergolden", LayerGoldenTestParamOptions::DEFAULT,
+ "nchw");
auto lstm_multi_step_seq = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::LSTMLayer>,
{"unit=5", "integrate_bias=true", "return_sequences=true"}, "3:1:4:7",
- "lstm_multi_step_seq.nnlayergolden", LayerGoldenTestParamOptions::DEFAULT);
+ "lstm_multi_step_seq.nnlayergolden", LayerGoldenTestParamOptions::DEFAULT,
+ "nchw");
auto lstm_multi_step_seq_act_orig = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::LSTMLayer>,
{"unit=5", "integrate_bias=true", "return_sequences=true",
"hidden_state_activation=tanh", "recurrent_activation=sigmoid"},
"3:1:4:7", "lstm_multi_step_seq.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
auto lstm_multi_step_seq_act = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::LSTMLayer>,
{"unit=5", "integrate_bias=true", "return_sequences=true",
"hidden_state_activation=sigmoid", "recurrent_activation=tanh"},
"3:1:4:7", "lstm_multi_step_seq_act.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
GTEST_PARAMETER_TEST(LSTM, LayerGoldenTest,
::testing::Values(lstm_single_step, lstm_multi_step,
auto lstmcell_single_step = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::LSTMCellLayer>,
{"unit=5", "integrate_bias=true"}, "3:1:1:7,3:1:1:5,3:1:1:5",
- "lstmcell_single_step.nnlayergolden", LayerGoldenTestParamOptions::DEFAULT);
+ "lstmcell_single_step.nnlayergolden", LayerGoldenTestParamOptions::DEFAULT,
+ "nchw");
GTEST_PARAMETER_TEST(LSTMCell, LayerGoldenTest,
::testing::Values(lstmcell_single_step));
nntrainer::createLayer<nntrainer::MultiHeadAttentionLayer>,
{"num_heads=2", "projected_key_dim=3"}, "1:1:5:7,1:1:3:7,1:1:3:7",
"multi_head_attention_single_batch.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
auto multi_head_attention = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::MultiHeadAttentionLayer>,
{"num_heads=2", "projected_key_dim=3"}, "2:1:5:7,2:1:3:7,2:1:3:7",
- "multi_head_attention.nnlayergolden", LayerGoldenTestParamOptions::DEFAULT);
+ "multi_head_attention.nnlayergolden", LayerGoldenTestParamOptions::DEFAULT,
+ "nchw");
auto multi_head_attention_return_attention_scores = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::MultiHeadAttentionLayer>,
"average_attention_weight=false"},
"2:1:5:7,2:1:3:7,2:1:3:7",
"multi_head_attention_return_attention_scores.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
auto multi_head_attention_value_dim = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::MultiHeadAttentionLayer>,
{"num_heads=2", "projected_key_dim=3", "projected_value_dim=5"},
"2:1:5:7,2:1:3:7,2:1:3:7", "multi_head_attention_value_dim.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
auto multi_head_attention_output_shape = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::MultiHeadAttentionLayer>,
{"num_heads=2", "projected_key_dim=3", "output_shape=5"},
"2:1:5:7,2:1:3:7,2:1:3:7", "multi_head_attention_output_shape.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
GTEST_PARAMETER_TEST(
MultiHeadAttention, LayerGoldenTest,
auto positional_encoding_partial = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::PositionalEncodingLayer>,
{"max_timestep=10"}, "3:1:7:6", "positional_encoding_partial.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
auto positional_encoding = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::PositionalEncodingLayer>,
{"max_timestep=10"}, "3:1:10:6", "positional_encoding.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
INSTANTIATE_TEST_CASE_P(PositionalEncoding, LayerGoldenTest,
::testing::Values(positional_encoding_partial,
auto rnn_single_step = LayerGoldenTestParamType(
nntrainer::createLayer<nntrainer::RNNLayer>,
{"unit=5", "return_sequences=false", "integrate_bias=true"}, "3:1:1:7",
- "rnn_single_step.nnlayergolden", LayerGoldenTestParamOptions::DEFAULT);
+ "rnn_single_step.nnlayergolden", LayerGoldenTestParamOptions::DEFAULT,
+ "nchw");
GTEST_PARAMETER_TEST(RNN, LayerGoldenTest, ::testing::Values(rnn_single_step));
GTEST_PARAMETER_TEST(RNNCell, LayerSemantics,
::testing::Values(semantic_rnncell));
-auto rnncell_single_step = LayerGoldenTestParamType(
- nntrainer::createLayer<nntrainer::RNNCellLayer>,
- {"unit=5", "integrate_bias=true"}, "3:1:1:7,3:1:1:5",
- "rnncell_single_step.nnlayergolden", LayerGoldenTestParamOptions::DEFAULT);
+auto rnncell_single_step =
+ LayerGoldenTestParamType(nntrainer::createLayer<nntrainer::RNNCellLayer>,
+ {"unit=5", "integrate_bias=true"}, "3:1:1:7,3:1:1:5",
+ "rnncell_single_step.nnlayergolden",
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
GTEST_PARAMETER_TEST(RNNCell, LayerGoldenTest,
::testing::Values(rnncell_single_step));
{"unit=5", "integrate_bias=true", "hidden_state_zoneout_rate=0.1",
"cell_state_zoneout_rate=0.0"},
"3:1:1:7,3:1:1:5,3:1:1:5", "zoneout_lstmcell_single_step.nnlayergolden",
- LayerGoldenTestParamOptions::DEFAULT);
+ LayerGoldenTestParamOptions::DEFAULT, "nchw");
INSTANTIATE_TEST_CASE_P(LSTMCell, LayerGoldenTest,
::testing::Values(zoneout_lstmcell_single_step));
static nntrainer::IniSection bn_base("bn", "Type=batch_normalization");
static nntrainer::IniSection sgd_base("optimizer", "Type = sgd");
+static nntrainer::IniSection nn_base_nhwc = nn_base + "tensor_type=NHWC";
+static nntrainer::IniSection nn_base_nchw = nn_base + "tensor_type=NCHW";
+
using I = nntrainer::IniSection;
using INI = nntrainer::IniWrapper;
INI fc_sigmoid_baseline(
"fc_sigmoid",
- {nn_base + "batch_size = 3",
+ {nn_base_nchw + "batch_size = 3",
sgd_base + "learning_rate = 1",
I("input") + input_base + "input_shape = 1:1:3",
I("dense") + fc_base + "unit = 5",