1 // Copyright 2013 Yangqing Jia
5 #include "caffe/layer.hpp"
6 #include "caffe/vision_layers.hpp"
7 #include "caffe/util/math_functions.hpp"
13 template <typename Dtype>
14 void SoftmaxLayer<Dtype>::SetUp(const vector<Blob<Dtype>*>& bottom,
15 vector<Blob<Dtype>*>* top) {
16 CHECK_EQ(bottom.size(), 1) << "Softmax Layer takes a single blob as input.";
17 CHECK_EQ(top->size(), 1) << "Softmax Layer takes a single blob as output.";
18 (*top)[0]->Reshape(bottom[0]->num(), bottom[0]->channels(),
19 bottom[0]->height(), bottom[0]->width());
20 sum_multiplier_.Reshape(1, bottom[0]->channels(),
21 bottom[0]->height(), bottom[0]->width());
22 Dtype* multiplier_data = sum_multiplier_.mutable_cpu_data();
23 for (int i = 0; i < sum_multiplier_.count(); ++i) {
24 multiplier_data[i] = 1.;
26 scale_.Reshape(bottom[0]->num(), 1, 1, 1);
29 template <typename Dtype>
30 void SoftmaxLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
31 vector<Blob<Dtype>*>* top) {
32 const Dtype* bottom_data = bottom[0]->cpu_data();
33 Dtype* top_data = (*top)[0]->mutable_cpu_data();
34 Dtype* scale_data = scale_.mutable_cpu_data();
35 int num = bottom[0]->num();
36 int dim = bottom[0]->count() / bottom[0]->num();
37 memcpy(top_data, bottom_data, sizeof(Dtype) * bottom[0]->count());
38 // we need to subtract the max to avoid numerical issues, compute the exp,
39 // and then normalize.
41 for (int i = 0; i < num; ++i) {
42 scale_data[i] = bottom_data[i*dim];
43 for (int j = 0; j < dim; ++j) {
44 scale_data[i] = max(scale_data[i], bottom_data[i * dim + j]);
48 caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, dim, 1, -1.,
49 scale_data, sum_multiplier_.cpu_data(), 1., top_data);
50 // Perform exponentiation
51 caffe_exp<Dtype>(num * dim, top_data, top_data);
53 caffe_cpu_gemv<Dtype>(CblasNoTrans, num, dim, 1., top_data,
54 sum_multiplier_.cpu_data(), 0., scale_data);
56 for (int i = 0; i < num; ++i) {
57 caffe_scal<Dtype>(dim, Dtype(1.) / scale_data[i], top_data + i * dim);
61 template <typename Dtype>
62 Dtype SoftmaxLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
63 const bool propagate_down,
64 vector<Blob<Dtype>*>* bottom) {
65 const Dtype* top_diff = top[0]->cpu_diff();
66 const Dtype* top_data = top[0]->cpu_data();
67 Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff();
68 Dtype* scale_data = scale_.mutable_cpu_data();
69 int num = top[0]->num();
70 int dim = top[0]->count() / top[0]->num();
71 memcpy(bottom_diff, top_diff, sizeof(Dtype) * top[0]->count());
72 // Compute inner1d(top_diff, top_data) and subtract them from the bottom diff
73 for (int i = 0; i < num; ++i) {
74 scale_data[i] = caffe_cpu_dot<Dtype>(dim, top_diff + i * dim,
78 caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, dim, 1, -1.,
79 scale_data, sum_multiplier_.cpu_data(), 1., bottom_diff);
80 // elementwise multiplication
81 caffe_mul<Dtype>(top[0]->count(), bottom_diff, top_data, bottom_diff);
85 // TODO: implement the GPU version of softmax.
87 INSTANTIATE_CLASS(SoftmaxLayer);