From: Andrey Malevich Date: Wed, 13 Mar 2019 05:57:44 +0000 (-0700) Subject: Fix half-float conversion ops to handle tensors larger than 2B of params (#17952) X-Git-Tag: accepted/tizen/6.5/unified/20211028.231830~839 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=c8f9072ab635e008ba88f4ad62478f8ac3379449;p=platform%2Fupstream%2Fpytorch.git Fix half-float conversion ops to handle tensors larger than 2B of params (#17952) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/17952 As desc. Reviewed By: hyuen Differential Revision: D14435092 fbshipit-source-id: dc614ba16ad531101d04d01aec8f1fbd534ebec5 --- diff --git a/caffe2/operators/half_float_ops.cc b/caffe2/operators/half_float_ops.cc index 3745121..b186ec6 100644 --- a/caffe2/operators/half_float_ops.cc +++ b/caffe2/operators/half_float_ops.cc @@ -12,7 +12,7 @@ bool FloatToHalfOp::RunOnDevice() { at::Half* out = output->template mutable_data(); auto N = input.numel(); - for (auto i = 0; i < N; i++) { + for (size_t i = 0; i < N; i++) { out[i] = data[i]; } @@ -28,7 +28,7 @@ bool HalfToFloatOp::RunOnDevice() { float* out = output->template mutable_data(); auto N = input.numel(); - for (auto i = 0; i < N; i++) { + for (size_t i = 0; i < N; i++) { out[i] = data[i]; } return true;