auto& Y = Input(0);
auto& dY = Input(1);
auto* dX = Output(0);
- CAFFE_ENFORCE_GT(Y.numel(), 0);
+ CAFFE_ENFORCE_GE(Y.numel(), 0);
CAFFE_ENFORCE_EQ(dY.numel(), Y.numel());
dX->ResizeLike(Y);
const float* Ydata = Y.data<float>();
bool ClipOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* Y = Output(0);
- CAFFE_ENFORCE_GT(X.size(), 0);
+ CAFFE_ENFORCE_GE(X.size(), 0);
Y->ResizeLike(X);
ClipKernel<<<
CAFFE_GET_BLOCKS(X.size()),
auto& Y = Input(0);
auto& dY = Input(1);
auto* dX = Output(0);
- CAFFE_ENFORCE_GT(Y.size(), 0);
+ CAFFE_ENFORCE_GE(Y.size(), 0);
CAFFE_ENFORCE_EQ(dY.size(), Y.size());
dX->ResizeLike(Y);
ClipGradientKernel<<<
class TestClip(serial.SerializedTestCase):
- @serial.given(X=hu.tensor(),
+ @serial.given(X=hu.tensor(min_dim=0),
min_=st.floats(min_value=-2, max_value=0),
max_=st.floats(min_value=0, max_value=2),
inplace=st.booleans(),
**hu.gcs)
def test_clip(self, X, min_, max_, inplace, gc, dc):
# go away from the origin point to avoid kink problems
-
- X[np.abs(X - min_) < 0.05] += 0.1
- X[np.abs(X - max_) < 0.05] += 0.1
+ if np.isscalar(X):
+ X = np.array([], dtype=np.float32)
+ else:
+ X[np.abs(X - min_) < 0.05] += 0.1
+ X[np.abs(X - max_) < 0.05] += 0.1
def clip_ref(X):
X = X.clip(min_, max_)
# Gradient check wrt X
self.assertGradientChecks(gc, op, [X], 0, [0])
- @given(X=hu.tensor(),
+ @given(X=hu.tensor(min_dim=0),
inplace=st.booleans(),
**hu.gcs)
def test_clip_default(self, X, inplace, gc, dc):
# go away from the origin point to avoid kink problems
- X += 0.04 * np.sign(X)
-
+ if np.isscalar(X):
+ X = np.array([], dtype=np.float32)
+ else:
+ X += 0.04 * np.sign(X)
def clip_ref(X):
return (X,)