test update
authorYangqing Jia <jiayq84@gmail.com>
Mon, 16 Sep 2013 19:53:26 +0000 (12:53 -0700)
committerYangqing Jia <jiayq84@gmail.com>
Mon, 16 Sep 2013 19:53:26 +0000 (12:53 -0700)
src/caffeine/test/test_caffeine_main.cpp
src/caffeine/test/test_neuron_layer.cpp

index 450e18c..ae9d179 100644 (file)
@@ -1,8 +1,41 @@
+#include <iostream>
+
+#include <cuda_runtime.h>
 #include <glog/logging.h>
 #include <gtest/gtest.h>
 
+using namespace std;
+
 int main(int argc, char** argv) {
   ::testing::InitGoogleTest(&argc, argv);
   ::google::InitGoogleLogging(argv[0]);
+  // Before starting testing, let's first print out a few cuda defice info.
+  int device;
+  cudaGetDeviceCount(&device);
+  cout << "Cuda number of devices: " << device << endl;
+  cudaGetDevice(&device);
+  cout << "Current device id: " << device << endl;
+  cudaDeviceProp prop;
+  cudaGetDeviceProperties(&prop, device);
+  printf("Major revision number:         %d\n",  prop.major);
+  printf("Minor revision number:         %d\n",  prop.minor);
+  printf("Name:                          %s\n",  prop.name);
+  printf("Total global memory:           %u\n",  prop.totalGlobalMem);
+  printf("Total shared memory per block: %u\n",  prop.sharedMemPerBlock);
+  printf("Total registers per block:     %d\n",  prop.regsPerBlock);
+  printf("Warp size:                     %d\n",  prop.warpSize);
+  printf("Maximum memory pitch:          %u\n",  prop.memPitch);
+  printf("Maximum threads per block:     %d\n",  prop.maxThreadsPerBlock);
+  for (int i = 0; i < 3; ++i)
+    printf("Maximum dimension %d of block:  %d\n", i, prop.maxThreadsDim[i]);
+  for (int i = 0; i < 3; ++i)
+    printf("Maximum dimension %d of grid:   %d\n", i, prop.maxGridSize[i]);
+  printf("Clock rate:                    %d\n",  prop.clockRate);
+  printf("Total constant memory:         %u\n",  prop.totalConstMem);
+  printf("Texture alignment:             %u\n",  prop.textureAlignment);
+  printf("Concurrent copy and execution: %s\n",  (prop.deviceOverlap ? "Yes" : "No"));
+  printf("Number of multiprocessors:     %d\n",  prop.multiProcessorCount);
+  printf("Kernel execution timeout:      %s\n",  (prop.kernelExecTimeoutEnabled ? "Yes" : "No"));
+  
   return RUN_ALL_TESTS();
 }
index db33f7b..92a50a5 100644 (file)
@@ -32,8 +32,23 @@ class NeuronLayerTest : public ::testing::Test {
 typedef ::testing::Types<float, double> Dtypes;
 TYPED_TEST_CASE(NeuronLayerTest, Dtypes);
 
-TYPED_TEST(NeuronLayerTest, TestReLU) {
+TYPED_TEST(NeuronLayerTest, TestReLUCPU) {
   LayerParameter layer_param;
+  Caffeine::set_mode(Caffeine::CPU);
+  ReLULayer<TypeParam> layer(layer_param);
+  layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+  // Now, check values
+  const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
+  const TypeParam* top_data = this->blob_top_->cpu_data();
+  for (int i = 0; i < this->blob_bottom_->count(); ++i) {
+    EXPECT_GE(top_data[i], 0.);
+    EXPECT_TRUE(top_data[i] == 0 || top_data[i] == bottom_data[i]);
+  }
+}
+
+TYPED_TEST(NeuronLayerTest, TestReLUGPU) {
+  LayerParameter layer_param;
+  Caffeine::set_mode(Caffeine::GPU);
   ReLULayer<TypeParam> layer(layer_param);
   layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
   // Now, check values