*_pb2.py
# test files
-src/test_caffeine
+*.testbin
# vim swp files
*.swp
TEST_NAME := test_$(PROJECT)
CXX_SRCS := $(shell find caffeine ! -name "test_*.cpp" -name "*.cpp")
CU_SRCS := $(shell find caffeine -name "*.cu")
-TEST_SRCS := $(shell find caffeine -name "test_*.cpp") gtest/gtest-all.cpp
+TEST_SRCS := $(shell find caffeine -name "test_*.cpp")
+GTEST_SRC := gtest/gtest-all.cpp
PROTO_SRCS := $(wildcard caffeine/proto/*.proto)
PROTO_GEN_HEADER := ${PROTO_SRCS:.proto=.pb.h}
PROTO_GEN_CC := ${PROTO_SRCS:.proto=.pb.cc}
PROTO_OBJS := ${PROTO_SRCS:.proto=.pb.o}
OBJS := $(PROTO_OBJS) $(CXX_OBJS) $(CU_OBJS)
TEST_OBJS := ${TEST_SRCS:.cpp=.o}
+GTEST_OBJ := ${GTEST_SRC:.cpp=.o}
+TEST_BINS := ${TEST_OBJS:.o=.testbin}
CUDA_DIR := /usr/local/cuda
CUDA_ARCH := -arch=sm_20
linecount: clean
cloc --read-lang-def=caffeine.cloc caffeine/
-test: $(TEST_NAME)
-
-$(TEST_NAME): $(OBJS) $(TEST_OBJS)
- $(CXX) $(OBJS) $(TEST_OBJS) -o $(TEST_NAME) $(LDFLAGS) $(WARNINGS)
+test: $(OBJS) $(GTEST_OBJ) $(TEST_BINS)
+
+$(TEST_BINS): %.test : %.o
+ $(CXX) $< $(OBJS) $(GTEST_OBJ) -o $@ $(LDFLAGS) $(WARNINGS)
$(NAME): $(PROTO_GEN_CC) $(OBJS)
$(LINK) -shared $(OBJS) -o $(NAME)
protoc $(PROTO_SRCS) --cpp_out=. --python_out=.
clean:
- @- $(RM) $(NAME) $(TEST_NAME)
- @- $(RM) $(OBJS) $(TEST_OBJS)
+ @- $(RM) $(NAME) $(TEST_BINS)
+ @- $(RM) $(OBJS) $(TEST_OBJS)
@- $(RM) $(PROTO_GEN_HEADER) $(PROTO_GEN_CC) $(PROTO_GEN_PY)
distclean: clean
#include "caffeine/blob.hpp"
#include "caffeine/filler.hpp"
+#include "caffeine/test/test_caffeine_main.hpp"
+
namespace caffeine {
template <typename Dtype>
--- /dev/null
+// The main caffeine test code. Your test cpp code should include this hpp
+// to allow a main function to be compiled into the binary.
+#ifndef CAFFEINE_TEST_TEST_CAFFEINE_MAIN_HPP_
+#define CAFFEINE_TEST_TEST_CAFFEINE_MAIN_HPP_
+
+#include <cstdlib>
+#include <cstdio>
+#include <iostream>
+
+#include <cuda_runtime.h>
+#include <glog/logging.h>
+#include <gtest/gtest.h>
+
+namespace caffeine {
+
+cudaDeviceProp CAFFEINE_TEST_CUDA_PROP;
+
+} // namespace caffeine
+
+using namespace caffeine;
+using namespace std;
+
+int main(int argc, char** argv) {
+ ::testing::InitGoogleTest(&argc, argv);
+ ::google::InitGoogleLogging(argv[0]);
+ // Before starting testing, let's first print out a few cuda defice info.
+ int device;
+ cudaGetDeviceCount(&device);
+ cout << "Cuda number of devices: " << device << endl;
+ if (argc > 1) {
+ // Use the given device
+ device = atoi(argv[1]);
+ cudaSetDevice(device);
+ cout << "Setting to use device " << device << endl;
+ }
+ cudaGetDevice(&device);
+ cout << "Current device id: " << device << endl;
+ cudaGetDeviceProperties(&CAFFEINE_TEST_CUDA_PROP, device);
+ // invoke the test.
+ return RUN_ALL_TESTS();
+}
+
+#endif // CAFFEINE_TEST_TEST_CAFFEINE_MAIN_HPP_
#include "caffeine/common.hpp"
#include "caffeine/syncedmem.hpp"
+#include "caffeine/test/test_caffeine_main.hpp"
+
namespace caffeine {
class CommonTest : public ::testing::Test {};
#include "gtest/gtest.h"
#include "caffeine/filler.hpp"
+#include "caffeine/test/test_caffeine_main.hpp"
+
namespace caffeine {
typedef ::testing::Types<float, double> Dtypes;
+++ /dev/null
-#include <algorithm>
-#include <cmath>
-#include <glog/logging.h>
-#include <gtest/gtest.h>
-#include "caffeine/test/test_gradient_check_util.hpp"
-
-using std::max;
-
-namespace caffeine {
-
-template <typename Dtype>
-void GradientChecker<Dtype>::CheckGradientSingle(Layer<Dtype>& layer,
- vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>& top,
- int check_bottom, int top_id, int top_data_id) {
- // First, figure out what blobs we need to check against.
- vector<Blob<Dtype>*> blobs_to_check;
- for (int i = 0; i < layer.params().size(); ++i) {
- blobs_to_check.push_back(&layer.params()[i]);
- }
- if (check_bottom < 0) {
- for (int i = 0; i < bottom.size(); ++i) {
- blobs_to_check.push_back(bottom[i]);
- }
- } else {
- CHECK(check_bottom < bottom.size());
- blobs_to_check.push_back(bottom[check_bottom]);
- }
- // go through the bottom and parameter blobs
- //LOG(ERROR) << "Checking " << blobs_to_check.size() << " blobs.";
- for (int blobid = 0; blobid < blobs_to_check.size(); ++blobid) {
- Blob<Dtype>* current_blob = blobs_to_check[blobid];
- //LOG(ERROR) << "Blob " << blobid << ": checking " << current_blob->count()
- // << " parameters.";
- // go through the values
- for (int feat_id = 0; feat_id < current_blob->count(); ++feat_id) {
- // First, obtain the original data
- Caffeine::set_random_seed(seed_);
- layer.Forward(bottom, &top);
- Dtype computed_objective = GetObjAndGradient(top, top_id, top_data_id);
- // Get any additional loss from the layer
- computed_objective += layer.Backward(top, true, &bottom);
- Dtype computed_gradient = current_blob->cpu_diff()[feat_id];
- // compute score by adding stepsize
- current_blob->mutable_cpu_data()[feat_id] += stepsize_;
- Caffeine::set_random_seed(seed_);
- layer.Forward(bottom, &top);
- Dtype positive_objective = GetObjAndGradient(top, top_id, top_data_id);
- positive_objective += layer.Backward(top, true, &bottom);
- // compute score by subtracting stepsize
- current_blob->mutable_cpu_data()[feat_id] -= stepsize_ * 2;
- Caffeine::set_random_seed(seed_);
- layer.Forward(bottom, &top);
- Dtype negative_objective = GetObjAndGradient(top, top_id, top_data_id);
- negative_objective += layer.Backward(top, true, &bottom);
- // Recover stepsize
- current_blob->mutable_cpu_data()[feat_id] += stepsize_;
- Dtype estimated_gradient = (positive_objective - negative_objective) /
- stepsize_ / 2.;
- Dtype feature = current_blob->cpu_data()[feat_id];
- //LOG(ERROR) << "debug: " << current_blob->cpu_data()[feat_id] << " "
- // << current_blob->cpu_diff()[feat_id];
- if (kink_ - kink_range_ > feature || feature > kink_ + kink_range_) {
- // We check relative accuracy, but for too small values, we threshold
- // the scale factor by 1.
- Dtype scale = max(max(fabs(computed_gradient), fabs(estimated_gradient)),
- 1.);
- EXPECT_GT(computed_gradient, estimated_gradient - threshold_ * scale);
- EXPECT_LT(computed_gradient, estimated_gradient + threshold_ * scale);
- }
- //LOG(ERROR) << "Feature: " << current_blob->cpu_data()[feat_id];
- //LOG(ERROR) << "computed gradient: " << computed_gradient
- // << " estimated_gradient: " << estimated_gradient;
- }
- }
-}
-
-template <typename Dtype>
-void GradientChecker<Dtype>::CheckGradientExhaustive(Layer<Dtype>& layer,
- vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>& top, int check_bottom) {
- layer.SetUp(bottom, &top);
- //LOG(ERROR) << "Exhaustive Mode.";
- for (int i = 0; i < top.size(); ++i) {
- //LOG(ERROR) << "Exhaustive: blob " << i << " size " << top[i]->count();
- for (int j = 0; j < top[i]->count(); ++j) {
- //LOG(ERROR) << "Exhaustive: blob " << i << " data " << j;
- CheckGradientSingle(layer, bottom, top, check_bottom, i, j);
- }
- }
-}
-
-template <typename Dtype>
-Dtype GradientChecker<Dtype>::GetObjAndGradient(vector<Blob<Dtype>*>& top,
- int top_id, int top_data_id) {
- Dtype loss = 0;
- if (top_id < 0) {
- // the loss will be half of the sum of squares of all outputs
- for (int i = 0; i < top.size(); ++i) {
- Blob<Dtype>* top_blob = top[i];
- const Dtype* top_blob_data = top_blob->cpu_data();
- Dtype* top_blob_diff = top_blob->mutable_cpu_diff();
- int count = top_blob->count();
- for (int j = 0; j < count; ++j) {
- loss += top_blob_data[j] * top_blob_data[j];
- }
- // set the diff: simply the data.
- memcpy(top_blob_diff, top_blob_data, sizeof(Dtype) * top_blob->count());
- }
- loss /= 2.;
- } else {
- // the loss will be the top_data_id-th element in the top_id-th blob.
- for (int i = 0; i < top.size(); ++i) {
- Blob<Dtype>* top_blob = top[i];
- Dtype* top_blob_diff = top_blob->mutable_cpu_diff();
- memset(top_blob_diff, 0, sizeof(Dtype) * top_blob->count());
- }
- loss = top[top_id]->cpu_data()[top_data_id];
- top[top_id]->mutable_cpu_diff()[top_data_id] = 1.;
- }
- return loss;
-}
-
-INSTANTIATE_CLASS(GradientChecker);
-
-} // namespace caffeine
#ifndef CAFFEINE_TEST_GRADIENT_CHECK_UTIL_H_
#define CAFFEINE_TEST_GRADIENT_CHECK_UTIL_H_
+#include <algorithm>
+#include <cmath>
+#include <glog/logging.h>
+#include <gtest/gtest.h>
#include "caffeine/layer.hpp"
+using std::max;
+
namespace caffeine {
// The gradient checker adds a L2 normalization loss function on top of the
Dtype kink_range_;
};
+
+// Detailed implementations are as follows.
+
+
+template <typename Dtype>
+void GradientChecker<Dtype>::CheckGradientSingle(Layer<Dtype>& layer,
+ vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>& top,
+ int check_bottom, int top_id, int top_data_id) {
+ // First, figure out what blobs we need to check against.
+ vector<Blob<Dtype>*> blobs_to_check;
+ for (int i = 0; i < layer.params().size(); ++i) {
+ blobs_to_check.push_back(&layer.params()[i]);
+ }
+ if (check_bottom < 0) {
+ for (int i = 0; i < bottom.size(); ++i) {
+ blobs_to_check.push_back(bottom[i]);
+ }
+ } else {
+ CHECK(check_bottom < bottom.size());
+ blobs_to_check.push_back(bottom[check_bottom]);
+ }
+ // go through the bottom and parameter blobs
+ //LOG(ERROR) << "Checking " << blobs_to_check.size() << " blobs.";
+ for (int blobid = 0; blobid < blobs_to_check.size(); ++blobid) {
+ Blob<Dtype>* current_blob = blobs_to_check[blobid];
+ //LOG(ERROR) << "Blob " << blobid << ": checking " << current_blob->count()
+ // << " parameters.";
+ // go through the values
+ for (int feat_id = 0; feat_id < current_blob->count(); ++feat_id) {
+ // First, obtain the original data
+ Caffeine::set_random_seed(seed_);
+ layer.Forward(bottom, &top);
+ Dtype computed_objective = GetObjAndGradient(top, top_id, top_data_id);
+ // Get any additional loss from the layer
+ computed_objective += layer.Backward(top, true, &bottom);
+ Dtype computed_gradient = current_blob->cpu_diff()[feat_id];
+ // compute score by adding stepsize
+ current_blob->mutable_cpu_data()[feat_id] += stepsize_;
+ Caffeine::set_random_seed(seed_);
+ layer.Forward(bottom, &top);
+ Dtype positive_objective = GetObjAndGradient(top, top_id, top_data_id);
+ positive_objective += layer.Backward(top, true, &bottom);
+ // compute score by subtracting stepsize
+ current_blob->mutable_cpu_data()[feat_id] -= stepsize_ * 2;
+ Caffeine::set_random_seed(seed_);
+ layer.Forward(bottom, &top);
+ Dtype negative_objective = GetObjAndGradient(top, top_id, top_data_id);
+ negative_objective += layer.Backward(top, true, &bottom);
+ // Recover stepsize
+ current_blob->mutable_cpu_data()[feat_id] += stepsize_;
+ Dtype estimated_gradient = (positive_objective - negative_objective) /
+ stepsize_ / 2.;
+ Dtype feature = current_blob->cpu_data()[feat_id];
+ //LOG(ERROR) << "debug: " << current_blob->cpu_data()[feat_id] << " "
+ // << current_blob->cpu_diff()[feat_id];
+ if (kink_ - kink_range_ > feature || feature > kink_ + kink_range_) {
+ // We check relative accuracy, but for too small values, we threshold
+ // the scale factor by 1.
+ Dtype scale = max(max(fabs(computed_gradient), fabs(estimated_gradient)),
+ 1.);
+ EXPECT_GT(computed_gradient, estimated_gradient - threshold_ * scale);
+ EXPECT_LT(computed_gradient, estimated_gradient + threshold_ * scale);
+ }
+ //LOG(ERROR) << "Feature: " << current_blob->cpu_data()[feat_id];
+ //LOG(ERROR) << "computed gradient: " << computed_gradient
+ // << " estimated_gradient: " << estimated_gradient;
+ }
+ }
+}
+
+template <typename Dtype>
+void GradientChecker<Dtype>::CheckGradientExhaustive(Layer<Dtype>& layer,
+ vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>& top, int check_bottom) {
+ layer.SetUp(bottom, &top);
+ //LOG(ERROR) << "Exhaustive Mode.";
+ for (int i = 0; i < top.size(); ++i) {
+ //LOG(ERROR) << "Exhaustive: blob " << i << " size " << top[i]->count();
+ for (int j = 0; j < top[i]->count(); ++j) {
+ //LOG(ERROR) << "Exhaustive: blob " << i << " data " << j;
+ CheckGradientSingle(layer, bottom, top, check_bottom, i, j);
+ }
+ }
+}
+
+template <typename Dtype>
+Dtype GradientChecker<Dtype>::GetObjAndGradient(vector<Blob<Dtype>*>& top,
+ int top_id, int top_data_id) {
+ Dtype loss = 0;
+ if (top_id < 0) {
+ // the loss will be half of the sum of squares of all outputs
+ for (int i = 0; i < top.size(); ++i) {
+ Blob<Dtype>* top_blob = top[i];
+ const Dtype* top_blob_data = top_blob->cpu_data();
+ Dtype* top_blob_diff = top_blob->mutable_cpu_diff();
+ int count = top_blob->count();
+ for (int j = 0; j < count; ++j) {
+ loss += top_blob_data[j] * top_blob_data[j];
+ }
+ // set the diff: simply the data.
+ memcpy(top_blob_diff, top_blob_data, sizeof(Dtype) * top_blob->count());
+ }
+ loss /= 2.;
+ } else {
+ // the loss will be the top_data_id-th element in the top_id-th blob.
+ for (int i = 0; i < top.size(); ++i) {
+ Blob<Dtype>* top_blob = top[i];
+ Dtype* top_blob_diff = top_blob->mutable_cpu_diff();
+ memset(top_blob_diff, 0, sizeof(Dtype) * top_blob->count());
+ }
+ loss = top[top_id]->cpu_data()[top_data_id];
+ top[top_id]->mutable_cpu_diff()[top_data_id] = 1.;
+ }
+ return loss;
+}
+
} // namespace caffeine
#endif // CAFFEINE_TEST_GRADIENT_CHECK_UTIL_H_
#include "caffeine/vision_layers.hpp"
#include "caffeine/test/test_gradient_check_util.hpp"
+#include "caffeine/test/test_caffeine_main.hpp"
namespace caffeine {
#include "caffeine/vision_layers.hpp"
#include "caffeine/test/test_gradient_check_util.hpp"
+#include "caffeine/test/test_caffeine_main.hpp"
+
namespace caffeine {
extern cudaDeviceProp CAFFEINE_TEST_CUDA_PROP;
#include "caffeine/vision_layers.hpp"
#include "caffeine/test/test_gradient_check_util.hpp"
+#include "caffeine/test/test_caffeine_main.hpp"
+
using std::min;
using std::max;
#include "caffeine/vision_layers.hpp"
#include "caffeine/test/test_gradient_check_util.hpp"
+#include "caffeine/test/test_caffeine_main.hpp"
namespace caffeine {
#include "caffeine/vision_layers.hpp"
#include "caffeine/test/test_gradient_check_util.hpp"
+#include "caffeine/test/test_caffeine_main.hpp"
namespace caffeine {
#include <cuda_runtime.h>
#include <glog/logging.h>
#include <gtest/gtest.h>
+#include "caffeine/test/test_caffeine_main.hpp"
namespace caffeine {
-cudaDeviceProp CAFFEINE_TEST_CUDA_PROP;
+extern cudaDeviceProp CAFFEINE_TEST_CUDA_PROP;
-} // namespace caffeine
-
-using namespace caffeine;
-using namespace std;
+class PlatformTest : public ::testing::Test {};
-int main(int argc, char** argv) {
- ::testing::InitGoogleTest(&argc, argv);
- ::google::InitGoogleLogging(argv[0]);
- // Before starting testing, let's first print out a few cuda defice info.
- int device;
- cudaGetDeviceCount(&device);
- cout << "Cuda number of devices: " << device << endl;
- if (argc > 1) {
- // Use the given device
- device = atoi(argv[1]);
- cudaSetDevice(device);
- cout << "Setting to use device " << device << endl;
- }
- cudaGetDevice(&device);
- cout << "Current device id: " << device << endl;
- cudaGetDeviceProperties(&CAFFEINE_TEST_CUDA_PROP, device);
+TEST_F(PlatformTest, TestInitialization) {
printf("Major revision number: %d\n", CAFFEINE_TEST_CUDA_PROP.major);
printf("Minor revision number: %d\n", CAFFEINE_TEST_CUDA_PROP.minor);
printf("Name: %s\n", CAFFEINE_TEST_CUDA_PROP.name);
printf("Concurrent copy and execution: %s\n", (CAFFEINE_TEST_CUDA_PROP.deviceOverlap ? "Yes" : "No"));
printf("Number of multiprocessors: %d\n", CAFFEINE_TEST_CUDA_PROP.multiProcessorCount);
printf("Kernel execution timeout: %s\n", (CAFFEINE_TEST_CUDA_PROP.kernelExecTimeoutEnabled ? "Yes" : "No"));
-
- return RUN_ALL_TESTS();
+ EXPECT_TRUE(true);
}
+} // namespace caffeine
#include "caffeine/common.hpp"
#include "caffeine/syncedmem.hpp"
+#include "caffeine/test/test_caffeine_main.hpp"
namespace caffeine {
#include "caffeine/blob.hpp"
#include "caffeine/util/math_functions.hpp"
+#include "caffeine/test/test_caffeine_main.hpp"
+
namespace caffeine {
extern cudaDeviceProp CAFFEINE_TEST_CUDA_PROP;