// because cuda does not work (at least now) well with C++11 features.
using boost::shared_ptr;
+// Common functions and classes from std that caffe often uses.
+using std::fstream;
+using std::ios;
+using std::iterator;
+using std::make_pair;
+using std::map;
+using std::ostringstream;
+using std::pair;
+using std::set;
+using std::string;
+using std::vector;
// A singleton class to hold common caffe stuff, such as the handler that
// caffe is going to use for cublas, curand, etc.
namespace caffe {
-using std::string;
-using std::vector;
-
template <typename Dtype>
class Layer {
public:
namespace caffe {
-using std::map;
-using std::pair;
-using std::set;
-using std::string;
-using std::vector;
-
template <typename Dtype>
class Net {
public:
#include "caffe/layer.hpp"
#include "caffe/net.hpp"
-using std::max;
-
namespace caffe {
// The gradient checker adds a L2 normalization loss function on top of the
|| fabs(feature) > kink_ + kink_range_) {
// We check relative accuracy, but for too small values, we threshold
// the scale factor by 1.
- Dtype scale = max(
- max(fabs(computed_gradient), fabs(estimated_gradient)), 1.);
+ Dtype scale = std::max(
+ std::max(fabs(computed_gradient), fabs(estimated_gradient)), 1.);
EXPECT_NEAR(computed_gradient, estimated_gradient, threshold_ * scale)
<< "debug: (top_id, top_data_id, blob_id, feat_id)="
<< top_id << "," << top_data_id << "," << blob_id << "," << feat_id;
namespace caffe {
-using std::pair;
-using std::string;
-
// Copy NetParameters with SplitLayers added to replace any shared bottom
// blobs with unique bottom blobs provided by the SplitLayer.
void InsertSplits(const NetParameter& param, NetParameter* param_split);
namespace caffe {
-using std::string;
using ::google::protobuf::Message;
bool ReadProtoFromTextFile(const char* filename, Message* proto);
namespace caffe {
-using std::string;
-
// Return true iff any layer contains parameters specified using
// deprecated V0LayerParameter.
bool NetNeedsUpgrade(const NetParameter& net_param);
namespace caffe {
-using std::string;
-
// A function to get a specific layer from the specification given in
// LayerParameter. Ideally this would be replaced by a factory pattern,
// but we will leave it this way for now.
namespace caffe {
-using std::max;
-
template <typename Dtype>
void AccuracyLayer<Dtype>::SetUp(
const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) {
namespace caffe {
-using std::min;
-
const float kBNLL_THRESHOLD = 50.;
template <typename Dtype>
const int count = (*bottom)[0]->count();
Dtype expval;
for (int i = 0; i < count; ++i) {
- expval = exp(min(bottom_data[i], Dtype(kBNLL_THRESHOLD)));
+ expval = exp(std::min(bottom_data[i], Dtype(kBNLL_THRESHOLD)));
bottom_diff[i] = top_diff[i] * expval / (expval + 1.);
}
}
namespace caffe {
-using std::max;
-
const float kBNLL_THRESHOLD = 50.;
template <typename Dtype>
#include "caffe/vision_layers.hpp"
#include "caffe/util/math_functions.hpp"
-using std::max;
-
namespace caffe {
#include "caffe/vision_layers.hpp"
namespace caffe {
-using std::vector;
template <typename Dtype>
HDF5OutputLayer<Dtype>::HDF5OutputLayer(const LayerParameter& param)
#include "caffe/vision_layers.hpp"
namespace caffe {
-using std::vector;
template <typename Dtype>
Dtype HDF5OutputLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
#include "caffe/util/math_functions.hpp"
#include "caffe/util/io.hpp"
-using std::max;
-
namespace caffe {
template <typename Dtype>
}
for (int i = 0; i < num; ++i) {
for (int j = 0; j < dim; ++j) {
- bottom_diff[i * dim + j] = max(Dtype(0), 1 + bottom_diff[i * dim + j]);
+ bottom_diff[i * dim + j] = std::max(
+ Dtype(0), 1 + bottom_diff[i * dim + j]);
}
}
switch (this->layer_param_.hinge_loss_param().norm()) {
namespace caffe {
-using std::iterator;
-using std::pair;
-
template <typename Dtype>
void* ImageDataLayerPrefetch(void* layer_pointer) {
CHECK(layer_pointer);
namespace caffe {
-using std::pair;
-
template <typename Dtype>
Dtype ImageDataLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
#include "caffe/util/math_functions.hpp"
#include "caffe/util/io.hpp"
-using std::max;
-
namespace caffe {
template <typename Dtype>
for (int i = 0; i < num; ++i) {
int label = static_cast<int>(bottom_label[i]);
for (int j = 0; j < dim; ++j) {
- Dtype prob = max(bottom_data[i * dim + j], Dtype(kLOG_THRESHOLD));
+ Dtype prob = std::max(bottom_data[i * dim + j], Dtype(kLOG_THRESHOLD));
loss -= infogain_mat[label * dim + j] * log(prob);
}
}
for (int i = 0; i < num; ++i) {
int label = static_cast<int>(bottom_label[i]);
for (int j = 0; j < dim; ++j) {
- Dtype prob = max(bottom_data[i * dim + j], Dtype(kLOG_THRESHOLD));
+ Dtype prob = std::max(bottom_data[i * dim + j], Dtype(kLOG_THRESHOLD));
bottom_diff[i * dim + j] = - infogain_mat[label * dim + j] / prob / num;
}
}
#include "caffe/util/math_functions.hpp"
#include "caffe/util/io.hpp"
-using std::max;
-
namespace caffe {
template <typename Dtype>
#include "caffe/util/math_functions.hpp"
#include "caffe/util/io.hpp"
-using std::max;
-
namespace caffe {
template <typename Dtype>
Dtype loss = 0;
for (int i = 0; i < num; ++i) {
int label = static_cast<int>(bottom_label[i]);
- Dtype prob = max(bottom_data[i * dim + label], Dtype(kLOG_THRESHOLD));
+ Dtype prob = std::max(
+ bottom_data[i * dim + label], Dtype(kLOG_THRESHOLD));
loss -= log(prob);
}
if (top->size() == 1) {
caffe_set((*bottom)[0]->count(), Dtype(0), bottom_diff);
for (int i = 0; i < num; ++i) {
int label = static_cast<int>(bottom_label[i]);
- Dtype prob = max(bottom_data[i * dim + label], Dtype(kLOG_THRESHOLD));
+ Dtype prob = std::max(
+ bottom_data[i * dim + label], Dtype(kLOG_THRESHOLD));
bottom_diff[i * dim + label] = -1. / prob / num;
}
}
#include "caffe/syncedmem.hpp"
#include "caffe/util/math_functions.hpp"
-using std::max;
-using std::min;
-
namespace caffe {
+using std::min;
+using std::max;
+
template <typename Dtype>
void PoolingLayer<Dtype>::SetUp(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
#include "caffe/vision_layers.hpp"
#include "caffe/util/math_functions.hpp"
-using std::max;
-using std::min;
-
namespace caffe {
template <typename Dtype>
#include "caffe/vision_layers.hpp"
#include "caffe/util/math_functions.hpp"
-using std::max;
-
namespace caffe {
template <typename Dtype>
#include "caffe/vision_layers.hpp"
#include "caffe/util/math_functions.hpp"
-using std::max;
-
namespace caffe {
template <typename Dtype>
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
-using std::max;
-
namespace caffe {
template <typename Dtype>
Dtype* top_data = (*top)[0]->mutable_cpu_data();
const int count = bottom[0]->count();
for (int i = 0; i < count; ++i) {
- top_data[i] = max(bottom_data[i], Dtype(0));
+ top_data[i] = std::max(bottom_data[i], Dtype(0));
}
return Dtype(0);
}
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
-using std::max;
-
namespace caffe {
template <typename Dtype>
#include "caffe/vision_layers.hpp"
#include "caffe/util/math_functions.hpp"
-using std::max;
-
namespace caffe {
template <typename Dtype>
#include "caffe/vision_layers.hpp"
#include "caffe/util/math_functions.hpp"
-using std::max;
-
namespace caffe {
template <typename Dtype>
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
-using std::max;
-
namespace caffe {
template <typename Dtype>
#include "caffe/vision_layers.hpp"
#include "caffe/util/math_functions.hpp"
-using std::max;
-
namespace caffe {
template <typename Dtype>
for (int i = 0; i < num; ++i) {
scale_data[i] = bottom_data[i*dim];
for (int j = 0; j < dim; ++j) {
- scale_data[i] = max(scale_data[i], bottom_data[i * dim + j]);
+ scale_data[i] = std::max(scale_data[i], bottom_data[i * dim + j]);
}
}
// subtraction
#include "caffe/vision_layers.hpp"
#include "caffe/util/math_functions.hpp"
-using std::max;
-
namespace caffe {
template <typename Dtype>
#include "caffe/vision_layers.hpp"
#include "caffe/util/math_functions.hpp"
-using std::max;
-
namespace caffe {
template <typename Dtype>
int dim = prob_.count() / num;
Dtype loss = 0;
for (int i = 0; i < num; ++i) {
- loss += -log(max(prob_data[i * dim + static_cast<int>(label[i])],
- Dtype(FLT_MIN)));
+ loss += -log(std::max(prob_data[i * dim + static_cast<int>(label[i])],
+ Dtype(FLT_MIN)));
}
if (top->size() >= 1) {
(*top)[0]->mutable_cpu_data()[0] = loss / num;
#include "caffe/vision_layers.hpp"
#include "caffe/util/math_functions.hpp"
-using std::max;
-
namespace caffe {
template <typename Dtype>
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
-using std::max;
-
namespace caffe {
template <typename Dtype>
namespace caffe {
-using std::map;
-using std::pair;
-
template <typename Dtype>
void* WindowDataLayerPrefetch(void* layer_pointer) {
WindowDataLayer<Dtype>* layer =
namespace caffe {
-using std::map;
-using std::pair;
-
template <typename Dtype>
Dtype WindowDataLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
#include "caffe/util/math_functions.hpp"
#include "caffe/util/upgrade_proto.hpp"
-using std::make_pair;
-using std::map;
-using std::pair;
-using std::set;
namespace caffe {
#include "caffe/util/io.hpp"
#include "caffe/util/math_functions.hpp"
-using std::max;
-using std::min;
-
namespace caffe {
template <typename Dtype>
if (isnan(expected_value)) {
EXPECT_TRUE(isnan(top_data[i]));
} else {
- Dtype precision = max(Dtype(abs(expected_value * 0.0001)),
- min_precision);
+ Dtype precision = std::max(
+ Dtype(std::abs(expected_value * Dtype(1e-4))), min_precision);
EXPECT_NEAR(expected_value, top_data[i], precision);
}
}
#include "caffe/common.hpp"
#include "caffe/util/insert_splits.hpp"
-using std::map;
-using std::ostringstream;
-using std::pair;
-using std::make_pair;
-
namespace caffe {
void InsertSplits(const NetParameter& param, NetParameter* param_split) {
#include "caffe/util/io.hpp"
#include "caffe/proto/caffe.pb.h"
-using std::fstream;
-using std::ios;
-using std::max;
-using std::string;
+namespace caffe {
+
using google::protobuf::io::FileInputStream;
using google::protobuf::io::FileOutputStream;
using google::protobuf::io::ZeroCopyInputStream;
using google::protobuf::io::CodedOutputStream;
using google::protobuf::Message;
-namespace caffe {
-
bool ReadProtoFromTextFile(const char* filename, Message* proto) {
int fd = open(filename, O_RDONLY);
CHECK_NE(fd, -1) << "File not found: " << filename;
#include "caffe/util/upgrade_proto.hpp"
#include "caffe/proto/caffe.pb.h"
-using std::map;
-using std::string;
-
namespace caffe {
bool NetNeedsUpgrade(const NetParameter& net_param) {