Add gradient checks for infogain loss layer, letting it take the
authorJeff Donahue <jeff.donahue@gmail.com>
Thu, 24 Jul 2014 02:22:11 +0000 (19:22 -0700)
committerJeff Donahue <jeff.donahue@gmail.com>
Fri, 25 Jul 2014 21:34:34 +0000 (14:34 -0700)
infogain matrix as the third bottom blob.

src/caffe/layers/infogain_loss_layer.cpp
src/caffe/test/test_infogain_loss_layer.cpp [new file with mode: 0644]

index 204f6c3..4b02f4e 100644 (file)
@@ -18,14 +18,24 @@ void InfogainLossLayer<Dtype>::FurtherSetUp(
   CHECK_EQ(bottom[1]->channels(), 1);
   CHECK_EQ(bottom[1]->height(), 1);
   CHECK_EQ(bottom[1]->width(), 1);
-
-  BlobProto blob_proto;
-  ReadProtoFromBinaryFile(
-    this->layer_param_.infogain_loss_param().source(), &blob_proto);
-  infogain_.FromProto(blob_proto);
-  CHECK_EQ(infogain_.num(), 1);
-  CHECK_EQ(infogain_.channels(), 1);
-  CHECK_EQ(infogain_.height(), infogain_.width());
+  Blob<Dtype>* infogain = NULL;
+  if (bottom.size() < 3) {
+    CHECK(this->layer_param_.infogain_loss_param().has_source())
+        << "Infogain matrix source must be specified.";
+    BlobProto blob_proto;
+    ReadProtoFromBinaryFile(
+      this->layer_param_.infogain_loss_param().source(), &blob_proto);
+    infogain_.FromProto(blob_proto);
+    infogain = &infogain_;
+  } else {
+    infogain = bottom[2];
+  }
+  const int num = bottom[0]->num();
+  const int dim = bottom[0]->count() / num;
+  CHECK_EQ(infogain->num(), 1);
+  CHECK_EQ(infogain->channels(), 1);
+  CHECK_EQ(infogain->height(), dim);
+  CHECK_EQ(infogain->width(), dim);
 }
 
 
@@ -34,10 +44,14 @@ Dtype InfogainLossLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
     vector<Blob<Dtype>*>* top) {
   const Dtype* bottom_data = bottom[0]->cpu_data();
   const Dtype* bottom_label = bottom[1]->cpu_data();
-  const Dtype* infogain_mat = infogain_.cpu_data();
+  const Dtype* infogain_mat = NULL;
+  if (bottom.size() < 3) {
+    infogain_mat = infogain_.cpu_data();
+  } else {
+    infogain_mat = bottom[2]->cpu_data();
+  }
   int num = bottom[0]->num();
   int dim = bottom[0]->count() / bottom[0]->num();
-  CHECK_EQ(infogain_.height(), dim);
   Dtype loss = 0;
   for (int i = 0; i < num; ++i) {
     int label = static_cast<int>(bottom_label[i]);
@@ -46,10 +60,11 @@ Dtype InfogainLossLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
       loss -= infogain_mat[label * dim + j] * log(prob);
     }
   }
+  loss /= num;
   if (top->size() == 1) {
-    (*top)[0]->mutable_cpu_data()[0] = loss / num;
+    (*top)[0]->mutable_cpu_data()[0] = loss;
   }
-  return loss / num;
+  return loss;
 }
 
 template <typename Dtype>
@@ -60,14 +75,22 @@ void InfogainLossLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
     LOG(FATAL) << this->type_name()
                << " Layer cannot backpropagate to label inputs.";
   }
+  if (propagate_down.size() > 2 && propagate_down[2]) {
+    LOG(FATAL) << this->type_name()
+               << " Layer cannot backpropagate to infogain inputs.";
+  }
   if (propagate_down[0]) {
     const Dtype* bottom_data = (*bottom)[0]->cpu_data();
     const Dtype* bottom_label = (*bottom)[1]->cpu_data();
-    const Dtype* infogain_mat = infogain_.cpu_data();
+    const Dtype* infogain_mat = NULL;
+    if (bottom->size() < 3) {
+      infogain_mat = infogain_.cpu_data();
+    } else {
+      infogain_mat = (*bottom)[2]->cpu_data();
+    }
     Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff();
     int num = (*bottom)[0]->num();
     int dim = (*bottom)[0]->count() / (*bottom)[0]->num();
-    CHECK_EQ(infogain_.height(), dim);
     for (int i = 0; i < num; ++i) {
       int label = static_cast<int>(bottom_label[i]);
       for (int j = 0; j < dim; ++j) {
diff --git a/src/caffe/test/test_infogain_loss_layer.cpp b/src/caffe/test/test_infogain_loss_layer.cpp
new file mode 100644 (file)
index 0000000..99bad26
--- /dev/null
@@ -0,0 +1,67 @@
+// Copyright 2014 BVLC and contributors.
+
+#include <cmath>
+#include <cstdlib>
+#include <cstring>
+#include <vector>
+
+#include "gtest/gtest.h"
+#include "caffe/blob.hpp"
+#include "caffe/common.hpp"
+#include "caffe/filler.hpp"
+#include "caffe/loss_layers.hpp"
+#include "caffe/test/test_gradient_check_util.hpp"
+
+#include "caffe/test/test_caffe_main.hpp"
+
+namespace caffe {
+
+template <typename TypeParam>
+class InfogainLossLayerTest : public MultiDeviceTest<TypeParam> {
+  typedef typename TypeParam::Dtype Dtype;
+
+ protected:
+  InfogainLossLayerTest()
+      : blob_bottom_data_(new Blob<Dtype>(10, 5, 1, 1)),
+        blob_bottom_label_(new Blob<Dtype>(10, 1, 1, 1)),
+        blob_bottom_infogain_(new Blob<Dtype>(1, 1, 5, 5)) {
+    Caffe::set_random_seed(1701);
+    FillerParameter filler_param;
+    PositiveUnitballFiller<Dtype> filler(filler_param);
+    filler.Fill(this->blob_bottom_data_);
+    blob_bottom_vec_.push_back(blob_bottom_data_);
+    for (int i = 0; i < blob_bottom_label_->count(); ++i) {
+      blob_bottom_label_->mutable_cpu_data()[i] = caffe_rng_rand() % 5;
+    }
+    blob_bottom_vec_.push_back(blob_bottom_label_);
+    filler_param.set_min(0.1);
+    filler_param.set_max(2.0);
+    UniformFiller<Dtype> infogain_filler(filler_param);
+    infogain_filler.Fill(this->blob_bottom_infogain_);
+    blob_bottom_vec_.push_back(blob_bottom_infogain_);
+  }
+  virtual ~InfogainLossLayerTest() {
+    delete blob_bottom_data_;
+    delete blob_bottom_label_;
+    delete blob_bottom_infogain_;
+  }
+  Blob<Dtype>* const blob_bottom_data_;
+  Blob<Dtype>* const blob_bottom_label_;
+  Blob<Dtype>* const blob_bottom_infogain_;
+  vector<Blob<Dtype>*> blob_bottom_vec_;
+  vector<Blob<Dtype>*> blob_top_vec_;
+};
+
+TYPED_TEST_CASE(InfogainLossLayerTest, TestDtypesAndDevices);
+
+
+TYPED_TEST(InfogainLossLayerTest, TestGradient) {
+  typedef typename TypeParam::Dtype Dtype;
+  LayerParameter layer_param;
+  InfogainLossLayer<Dtype> layer(layer_param);
+  GradientChecker<Dtype> checker(1e-4, 2e-2, 1701, 1, 0.01);
+  checker.CheckGradientSingle(&layer, &(this->blob_bottom_vec_),
+      &(this->blob_top_vec_), 0, -1, -1);
+}
+
+}  // namespace caffe