y = 1 if x > threshold
y = 0 if x <= threshold
- y' = don't have
+
+ y' = don't differenciable
*/
template <typename Dtype>
class ThresholdLayer : public NeuronLayer<Dtype> {
protected:
virtual Dtype Forward_cpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top);
+ virtual Dtype Forward_gpu(const vector<Blob<Dtype>*>& bottom,
+ vector<Blob<Dtype>*>* top);
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
const bool propagate_down, vector<Blob<Dtype>*>* bottom) {
NOT_IMPLEMENTED;
}
-
+
Dtype threshold_;
};
--- /dev/null
+// Copyright 2014 BVLC and contributors.
+
+#include <algorithm>
+#include <vector>
+
+#include "caffe/layer.hpp"
+#include "caffe/vision_layers.hpp"
+
+using std::max;
+
+namespace caffe {
+
+template <typename Dtype>
+__global__ void ThresholdForward(const int n, const, threshold, const Dtype* in, Dtype* out) {
+ CUDA_KERNEL_LOOP(index, n) {
+ out[index] = in[index] > threshold ? 1 : 0;
+ }
+}
+
+template <typename Dtype>
+Dtype ThresholdLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
+ vector<Blob<Dtype>*>* top) {
+ const Dtype* bottom_data = bottom[0]->gpu_data();
+ Dtype* top_data = (*top)[0]->mutable_gpu_data();
+ const int count = bottom[0]->count();
+ // NOLINT_NEXT_LINE(whitespace/operators)
+ ReLUForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
+ count, threshold_, bottom_data, top_data);
+ CUDA_POST_KERNEL_CHECK;
+
+ return Dtype(0);
+}
+
+
+INSTANTIATE_CLASS(ReLULayer);
+
+
+} // namespace caffe
}
}
+TYPED_TEST(ThresholdLayerTest, TestGPU) {
+ LayerParameter layer_param;
+ Caffe::set_mode(Caffe::GPU);
+ ThresholdLayer<TypeParam> layer(layer_param);
+ layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ // Now, check values
+ const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
+ const TypeParam* top_data = this->blob_top_->cpu_data();
+ const TypeParam threshold_ = layer_param.threshold_param().threshold();
+ for (int i = 0; i < this->blob_bottom_->count(); ++i) {
+ EXPECT_GE(top_data[i], 0.);
+ EXPECT_LE(top_data[i], 1.);
+ if (top_data[i] == 0) {
+ EXPECT_LE(bottom_data[i],threshold_);
+ }
+ if (top_data[i] == 1) {
+ EXPECT_GT(bottom_data[i],threshold_);
+ }
+ }
+}
+
+TYPED_TEST(ThresholdLayerTest, TestGPU2) {
+ LayerParameter layer_param;
+ Caffe::set_mode(Caffe::GPU);
+ ThresholdParameter* threshold_param =
+ layer_param.mutable_threshold_param();
+ threshold_param->set_threshold(0.5);
+ ThresholdLayer<TypeParam> layer(layer_param);
+ layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ // Now, check values
+ const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
+ const TypeParam* top_data = this->blob_top_->cpu_data();
+ const TypeParam threshold_ = layer_param.threshold_param().threshold();
+ EXPECT_FLOAT_EQ(threshold_,0.5);
+ for (int i = 0; i < this->blob_bottom_->count(); ++i) {
+ EXPECT_GE(top_data[i], 0.);
+ EXPECT_LE(top_data[i], 1.);
+ if (top_data[i] == 0) {
+ EXPECT_LE(bottom_data[i],threshold_);
+ }
+ if (top_data[i] == 1) {
+ EXPECT_TRUE(bottom_data[i] > threshold_);
+ }
+ }
+}
+
} // namespace caffe