From ba287eebca61a54bd0e6ec57b2761abf1110ffdd Mon Sep 17 00:00:00 2001 From: Huan Gui Date: Wed, 5 Dec 2018 22:51:23 -0800 Subject: [PATCH] Fix clip gradient with empty input (#14709) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/14709 As titled Reviewed By: Wakeupbuddy Differential Revision: D13305554 fbshipit-source-id: 380062d4b0e4f9dc0207a27766cac7b8d05384d5 --- caffe2/operators/clip_op.cc | 2 +- caffe2/operators/clip_op.cu | 4 ++-- caffe2/python/operator_test/clip_op_test.py | 18 +++++++++++------- 3 files changed, 14 insertions(+), 10 deletions(-) diff --git a/caffe2/operators/clip_op.cc b/caffe2/operators/clip_op.cc index b056005..80f4451 100644 --- a/caffe2/operators/clip_op.cc +++ b/caffe2/operators/clip_op.cc @@ -20,7 +20,7 @@ bool ClipGradientOp::RunOnDevice() { auto& Y = Input(0); auto& dY = Input(1); auto* dX = Output(0); - CAFFE_ENFORCE_GT(Y.numel(), 0); + CAFFE_ENFORCE_GE(Y.numel(), 0); CAFFE_ENFORCE_EQ(dY.numel(), Y.numel()); dX->ResizeLike(Y); const float* Ydata = Y.data(); diff --git a/caffe2/operators/clip_op.cu b/caffe2/operators/clip_op.cu index 167ef21..46422a9 100644 --- a/caffe2/operators/clip_op.cu +++ b/caffe2/operators/clip_op.cu @@ -44,7 +44,7 @@ template <> bool ClipOp::RunOnDevice() { auto& X = Input(0); auto* Y = Output(0); - CAFFE_ENFORCE_GT(X.size(), 0); + CAFFE_ENFORCE_GE(X.size(), 0); Y->ResizeLike(X); ClipKernel<<< CAFFE_GET_BLOCKS(X.size()), @@ -60,7 +60,7 @@ bool ClipGradientOp::RunOnDevice() { auto& Y = Input(0); auto& dY = Input(1); auto* dX = Output(0); - CAFFE_ENFORCE_GT(Y.size(), 0); + CAFFE_ENFORCE_GE(Y.size(), 0); CAFFE_ENFORCE_EQ(dY.size(), Y.size()); dX->ResizeLike(Y); ClipGradientKernel<<< diff --git a/caffe2/python/operator_test/clip_op_test.py b/caffe2/python/operator_test/clip_op_test.py index 46163d3..0c981ce 100644 --- a/caffe2/python/operator_test/clip_op_test.py +++ b/caffe2/python/operator_test/clip_op_test.py @@ -14,16 +14,18 @@ import caffe2.python.serialized_test.serialized_test_util as serial class TestClip(serial.SerializedTestCase): - @serial.given(X=hu.tensor(), + @serial.given(X=hu.tensor(min_dim=0), min_=st.floats(min_value=-2, max_value=0), max_=st.floats(min_value=0, max_value=2), inplace=st.booleans(), **hu.gcs) def test_clip(self, X, min_, max_, inplace, gc, dc): # go away from the origin point to avoid kink problems - - X[np.abs(X - min_) < 0.05] += 0.1 - X[np.abs(X - max_) < 0.05] += 0.1 + if np.isscalar(X): + X = np.array([], dtype=np.float32) + else: + X[np.abs(X - min_) < 0.05] += 0.1 + X[np.abs(X - max_) < 0.05] += 0.1 def clip_ref(X): X = X.clip(min_, max_) @@ -40,13 +42,15 @@ class TestClip(serial.SerializedTestCase): # Gradient check wrt X self.assertGradientChecks(gc, op, [X], 0, [0]) - @given(X=hu.tensor(), + @given(X=hu.tensor(min_dim=0), inplace=st.booleans(), **hu.gcs) def test_clip_default(self, X, inplace, gc, dc): # go away from the origin point to avoid kink problems - X += 0.04 * np.sign(X) - + if np.isscalar(X): + X = np.array([], dtype=np.float32) + else: + X += 0.04 * np.sign(X) def clip_ref(X): return (X,) -- 2.7.4