Previously, nntrainer only supported fp32. nntrainer didn't need to
change the data type, but it needs to be changed to support fp16. If
the tensor type is fp16, get input_dim of InitLayerContext through
getInputDimentions and call setDataType. For getInputDimentions,
return a const object. Added getMutableInputDimentions function
because return object of getInputDimentions cannot be modified.
Signed-off-by: SeoHyungjun <hyungjun.seo@samsung.com>
*/
const std::vector<TensorDim> &getInputDimensions() const { return input_dim; }
+ /**
+ * @brief Get the Mutable Input Dimensions object
+ *
+ * @return std::vector<TensorDim>& Input dimensions
+ */
+ std::vector<TensorDim> &getMutableInputDimensions() { return input_dim; }
+
/**
* @brief Set Data Type for Input Dimensions
*
"golden_test", "", 0.0, tensor_type);
layer->finalize(context);
- for (auto dim : context.getInputDimensions()) {
+ for (auto &dim : context.getMutableInputDimensions()) {
if (tensor_type[2] == "fp16") {
dim.setDataType(ml::train::TensorDim::DataType::FP16);
}