void ConvolutionLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
ConvolutionParameter conv_param = this->layer_param_.convolution_param();
- // Check configuration.
CHECK(!conv_param.has_kernel_size() !=
!(conv_param.has_kernel_h() && conv_param.has_kernel_w()))
<< "Filter size is kernel_size OR kernel_h and kernel_w; not both";
caffe_set(N_, Dtype(1), bias_multiplier_.mutable_cpu_data());
}
this->param_propagate_down_.resize(this->blobs_.size(), true);
- // Default computation engine.
-#ifdef CAFFE_ENGINE
- if (conv_param.engine() == ConvolutionParameter_Engine_DEFAULT) {
- conv_param.set_engine(ConvolutionParameter_Engine_CAFFE);
- }
-#endif
}
rand_idx_.Reshape(bottom[0]->num(), channels_, pooled_height_,
pooled_width_);
}
- // Default computation engine.
-#ifdef CAFFE_ENGINE
- if (pool_param.engine() == PoolingParameter_Engine_DEFAULT) {
- pool_param.set_engine(PoolingParameter_Engine_CAFFE);
- }
-#endif
}
// TODO(Yangqing): Is there a faster way to do pooling in the channel-first
template <typename Dtype>
void ReLULayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
- ReLUParameter relu_param = this->layer_param_.relu_param();
const Dtype* bottom_data = bottom[0]->cpu_data();
Dtype* top_data = (*top)[0]->mutable_cpu_data();
const int count = bottom[0]->count();
- Dtype negative_slope = relu_param.negative_slope();
+ Dtype negative_slope = this->layer_param_.relu_param().negative_slope();
for (int i = 0; i < count; ++i) {
top_data[i] = std::max(bottom_data[i], Dtype(0))
+ negative_slope * std::min(bottom_data[i], Dtype(0));
}
- // Default computation engine.
-#ifdef CAFFE_ENGINE
- if (relu_param.engine() == ReLUParameter_Engine_DEFAULT) {
- relu_param.set_engine(ReLUParameter_Engine_CAFFE);
- }
-#endif
}
template <typename Dtype>
template <typename Dtype>
void SigmoidLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
- SigmoidParameter sigmoid_param = this->layer_param_.sigmoid_param();
const Dtype* bottom_data = bottom[0]->cpu_data();
Dtype* top_data = (*top)[0]->mutable_cpu_data();
const int count = bottom[0]->count();
for (int i = 0; i < count; ++i) {
top_data[i] = sigmoid(bottom_data[i]);
}
- // Default computation engine.
-#ifdef CAFFE_ENGINE
- if (sigmoid_param.engine() == SigmoidParameter_Engine_DEFAULT) {
- sigmoid_param.set_engine(SigmoidParameter_Engine_CAFFE);
- }
-#endif
}
template <typename Dtype>
template <typename Dtype>
void SoftmaxLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
- SoftmaxParameter softmax_param = this->layer_param_.softmax_param();
(*top)[0]->Reshape(bottom[0]->num(), bottom[0]->channels(),
bottom[0]->height(), bottom[0]->width());
sum_multiplier_.Reshape(1, bottom[0]->channels(), 1, 1);
multiplier_data[i] = 1.;
}
scale_.Reshape(bottom[0]->num(), 1, bottom[0]->height(), bottom[0]->width());
- // Default computation engine.
-#ifdef CAFFE_ENGINE
- if (softmax_param.engine() == SoftmaxParameter_Engine_DEFAULT) {
- softmax_param.set_engine(SoftmaxParameter_Engine_CAFFE);
- }
-#endif
}
template <typename Dtype>
template <typename Dtype>
void TanHLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
- TanHParameter tanh_param = this->layer_param_.tanh_param();
const Dtype* bottom_data = bottom[0]->cpu_data();
Dtype* top_data = (*top)[0]->mutable_cpu_data();
Dtype exp2x;
exp2x = exp(2 * bottom_data[i]);
top_data[i] = (exp2x - Dtype(1)) / (exp2x + Dtype(1));
}
- // Default computation engine.
-#ifdef CAFFE_ENGINE
- if (tanh_param.engine() == TanHParameter_Engine_DEFAULT) {
- tanh_param.set_engine(TanHParameter_Engine_CAFFE);
- }
-#endif
}
template <typename Dtype>