int pooled_height_;
int pooled_width_;
Blob<Dtype> rand_idx_;
- shared_ptr<SyncedMemory> max_idx_;
+ shared_ptr<Blob<int> > max_idx_;
};
/* SoftmaxLayer
diff_ = other.diff();
}
+template <>
+void Blob<int>::Update() {
+ // The "update" method is used for parameter blobs in a Net, which are stored
+ // as Blob<float>s or Blob<double>s -- hence we do not define it for
+ // Blob<int>s.
+ NOT_IMPLEMENTED;
+}
+
template <typename Dtype>
void Blob<Dtype>::Update() {
// We will perform update based on where the data is located.
}
INSTANTIATE_CLASS(Blob);
+template class Blob<int>;
} // namespace caffe
// If max pooling, we will initialize the vector index part.
if (this->layer_param_.pooling_param().pool() ==
PoolingParameter_PoolMethod_MAX) {
- max_idx_.reset(new SyncedMemory((*top)[0]->count() * sizeof(int)));
+ max_idx_.reset(new Blob<int>(bottom[0]->num(), channels_,
+ pooled_height_, pooled_width_));
}
// If stochastic pooling, we will initialize the random index part.
if (this->layer_param_.pooling_param().pool() ==
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
// Initialize
- mask = static_cast<int*>(max_idx_->mutable_cpu_data());
+ mask = max_idx_->mutable_cpu_data();
for (int i = 0; i < top_count; ++i) {
top_data[i] = -FLT_MAX;
mask[i] = -1;
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
// The main loop
- mask = static_cast<const int*>(max_idx_->cpu_data());
+ mask = max_idx_->cpu_data();
for (int n = 0; n < top[0]->num(); ++n) {
for (int c = 0; c < channels_; ++c) {
for (int ph = 0; ph < pooled_height_; ++ph) {
int* mask;
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
- mask = static_cast<int*>(max_idx_->mutable_gpu_data());
+ mask = max_idx_->mutable_gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
MaxPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), channels_,
const int* mask;
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
- mask = static_cast<const int*>(max_idx_->gpu_data());
+ mask = max_idx_->gpu_data();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
// NOLINT_NEXT_LINE(whitespace/operators)
MaxPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(