nntrainer::Tformat fm, nntrainer::Tdatatype d_type) {
nntrainer::TensorDim::TensorType t_type(fm, d_type);
nntrainer::Tensor t(batch, channel, height, width, t_type);
- // if (t_type.data_type == nntrainer::Tdatatype::FP32) {
+ if (t_type.data_type == nntrainer::Tdatatype::FP32) {
float i = 0;
t = t.apply((std::function<float(float)>)[&](float in) { return i++; });
- // } else if (t_type.data_type == nntrainer::Tdatatype::FP16) {
- // _FP16 i = 0;
- // t = t.apply((std::function<_FP16(_FP16)>)[&](_FP16 in) { return i++; });
- // }
+ } else if (t_type.data_type == nntrainer::Tdatatype::FP16) {
+ _FP16 i = 0;
+ t = t.apply((std::function<_FP16(_FP16)>)[&](_FP16 in) { return i++; });
+ }
return t;
}
unsigned int height, unsigned int width,
float min, float max, nntrainer::Tformat fm,
nntrainer::Tdatatype d_type) {
- nntrainer::TensorDim::TensorType t_type(fm, d_type);
+ nntrainer::TensorDim::TensorType t_type(fm, d_type);
nntrainer::Tensor t(batch, channel, height, width, t_type);
t.setRandUniform(min, max);
return t;
nntrainer::Tensor t = constant(1.0, 2, 2, 2, 2, nntrainer::Tformat::NCHW,
nntrainer::Tdatatype::FP16);
int idx = 0;
- std::function<float(float)> f = [&](float in) { return idx++ % 2; };
+ std::function<_FP16(_FP16)> f = [&](_FP16 in) {
+ return static_cast<_FP16>(idx++ % 2);
+ };
t = t.apply(f);
nntrainer::Tensor actual, expected;