// The main caffe test code. Your test cpp code should include this hpp
// to allow a main function to be compiled into the binary.
+#include "caffe/caffe.hpp"
#include "caffe/test/test_caffe_main.hpp"
namespace caffe {
int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
- ::google::InitGoogleLogging(argv[0]);
+ caffe::GlobalInit(&argc, &argv);
#ifndef CPU_ONLY
// Before starting testing, let's first print out a few cuda defice info.
int device;
#include "caffe/test/test_caffe_main.hpp"
-using std::string;
-using std::stringstream;
-
namespace caffe {
template <typename Dtype>
#include "caffe/test/test_caffe_main.hpp"
-using std::string;
-using std::vector;
-
namespace caffe {
template<typename TypeParam>
#include "caffe/test/test_caffe_main.hpp"
-using std::string;
-
namespace caffe {
template <typename TypeParam>
#include "caffe/test/test_caffe_main.hpp"
-using std::string;
-
namespace caffe {
class PaddingLayerUpgradeTest : public ::testing::Test {
using caffe::Datum;
using caffe::BlobProto;
-using std::string;
using std::max;
int main(int argc, char** argv) {
return 1;
}
- string db_backend = "lmdb";
+ std::string db_backend = "lmdb";
if (argc == 4) {
- db_backend = string(argv[3]);
+ db_backend = std::string(argv[3]);
}
// leveldb
for (it->SeekToFirst(); it->Valid(); it->Next()) {
// just a dummy operation
datum.ParseFromString(it->value().ToString());
- const string& data = datum.data();
+ const std::string& data = datum.data();
size_in_datum = std::max<int>(datum.data().size(),
datum.float_data_size());
CHECK_EQ(size_in_datum, data_size) << "Incorrect data field size " <<
do {
// just a dummy operation
datum.ParseFromArray(mdb_value.mv_data, mdb_value.mv_size);
- const string& data = datum.data();
+ const std::string& data = datum.data();
size_in_datum = std::max<int>(datum.data().size(),
datum.float_data_size());
CHECK_EQ(size_in_datum, data_size) << "Incorrect data field size " <<
using namespace caffe; // NOLINT(build/namespaces)
using std::pair;
-using std::string;
DEFINE_bool(gray, false,
"When this option is on, treat images as grayscale ones");
bool is_color = !FLAGS_gray;
bool check_size = FLAGS_check_size;
std::ifstream infile(argv[2]);
- std::vector<std::pair<string, int> > lines;
- string filename;
+ std::vector<std::pair<std::string, int> > lines;
+ std::string filename;
int label;
while (infile >> filename >> label) {
lines.push_back(std::make_pair(filename, label));
}
LOG(INFO) << "A total of " << lines.size() << " images.";
- const string& db_backend = FLAGS_backend;
+ const std::string& db_backend = FLAGS_backend;
const char* db_path = argv[3];
int resize_height = std::max<int>(0, FLAGS_resize_height);
}
// Storing to db
- string root_folder(argv[1]);
+ std::string root_folder(argv[1]);
Datum datum;
int count = 0;
const int kMaxKeyLength = 256;
data_size = datum.channels() * datum.height() * datum.width();
data_size_initialized = true;
} else {
- const string& data = datum.data();
+ const std::string& data = datum.data();
CHECK_EQ(data.size(), data_size) << "Incorrect data field size "
<< data.size();
}
// sequential
snprintf(key_cstr, kMaxKeyLength, "%08d_%s", line_id,
lines[line_id].first.c_str());
- string value;
+ std::string value;
datum.SerializeToString(&value);
- string keystr(key_cstr);
+ std::string keystr(key_cstr);
// Put in db
if (db_backend == "leveldb") { // leveldb
#include "caffe/solver.hpp"
#include "caffe/util/io.hpp"
-using namespace caffe; // NOLINT(build/namespaces)
+using boost::shared_ptr;
+using caffe::Blob;
+using caffe::BlobProto;
+using caffe::Caffe;
+using caffe::Net;
+using caffe::NetParameter;
int main(int argc, char** argv) {
Caffe::set_mode(Caffe::GPU);
}
caffe_net->CopyTrainedLayersFrom(argv[2]);
- vector<Blob<float>* > input_vec;
+ std::vector<Blob<float>* > input_vec;
shared_ptr<Blob<float> > input_blob(new Blob<float>());
if (strcmp(argv[3], "none") != 0) {
BlobProto input_blob_proto;
input_vec.push_back(input_blob.get());
}
- string output_prefix(argv[4]);
+ std::string output_prefix(argv[4]);
// Run the network without training.
LOG(ERROR) << "Performing Forward";
caffe_net->Forward(input_vec);
}
// Now, let's dump all the layers
- const vector<string>& blob_names = caffe_net->blob_names();
- const vector<shared_ptr<Blob<float> > >& blobs = caffe_net->blobs();
+ const std::vector<std::string>& blob_names = caffe_net->blob_names();
+ const std::vector<shared_ptr<Blob<float> > >& blobs = caffe_net->blobs();
for (int blobid = 0; blobid < caffe_net->blobs().size(); ++blobid) {
// Serialize blob
LOG(ERROR) << "Dumping " << blob_names[blobid];
#include "caffe/util/io.hpp"
#include "caffe/vision_layers.hpp"
-using namespace caffe; // NOLINT(build/namespaces)
+using boost::shared_ptr;
+using caffe::Blob;
+using caffe::Caffe;
+using caffe::Datum;
+using caffe::Net;
template<typename Dtype>
int feature_extraction_pipeline(int argc, char** argv);
Caffe::set_phase(Caffe::TEST);
arg_pos = 0; // the name of the executable
- string pretrained_binary_proto(argv[++arg_pos]);
+ std::string pretrained_binary_proto(argv[++arg_pos]);
// Expected prototxt contains at least one data layer such as
// the layer data_layer_name and one feature blob such as the
top: "fc7"
}
*/
- string feature_extraction_proto(argv[++arg_pos]);
+ std::string feature_extraction_proto(argv[++arg_pos]);
shared_ptr<Net<Dtype> > feature_extraction_net(
new Net<Dtype>(feature_extraction_proto));
feature_extraction_net->CopyTrainedLayersFrom(pretrained_binary_proto);
- string extract_feature_blob_names(argv[++arg_pos]);
- vector<string> blob_names;
+ std::string extract_feature_blob_names(argv[++arg_pos]);
+ std::vector<std::string> blob_names;
boost::split(blob_names, extract_feature_blob_names, boost::is_any_of(","));
- string save_feature_leveldb_names(argv[++arg_pos]);
- vector<string> leveldb_names;
+ std::string save_feature_leveldb_names(argv[++arg_pos]);
+ std::vector<std::string> leveldb_names;
boost::split(leveldb_names, save_feature_leveldb_names,
boost::is_any_of(","));
CHECK_EQ(blob_names.size(), leveldb_names.size()) <<
options.error_if_exists = true;
options.create_if_missing = true;
options.write_buffer_size = 268435456;
- vector<shared_ptr<leveldb::DB> > feature_dbs;
+ std::vector<shared_ptr<leveldb::DB> > feature_dbs;
for (size_t i = 0; i < num_features; ++i) {
LOG(INFO)<< "Opening leveldb " << leveldb_names[i];
leveldb::DB* db;
LOG(ERROR)<< "Extacting Features";
Datum datum;
- vector<shared_ptr<leveldb::WriteBatch> > feature_batches(
+ std::vector<shared_ptr<leveldb::WriteBatch> > feature_batches(
num_features,
shared_ptr<leveldb::WriteBatch>(new leveldb::WriteBatch()));
const int kMaxKeyStrLength = 100;
char key_str[kMaxKeyStrLength];
- vector<Blob<float>*> input_vec;
- vector<int> image_indices(num_features, 0);
+ std::vector<Blob<float>*> input_vec;
+ std::vector<int> image_indices(num_features, 0);
for (int batch_index = 0; batch_index < num_mini_batches; ++batch_index) {
feature_extraction_net->Forward(input_vec);
for (int i = 0; i < num_features; ++i) {
for (int d = 0; d < dim_features; ++d) {
datum.add_float_data(feature_blob_data[d]);
}
- string value;
+ std::string value;
datum.SerializeToString(&value);
snprintf(key_str, kMaxKeyStrLength, "%d", image_indices[i]);
- feature_batches[i]->Put(string(key_str), value);
+ feature_batches[i]->Put(std::string(key_str), value);
++image_indices[i];
if (image_indices[i] % 1000 == 0) {
feature_dbs[i]->Write(leveldb::WriteOptions(),