const Dtype* top_diff = top[0]->cpu_diff();
Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff();
const int count = (*bottom)[0]->count();
- Dtype exp2x;
Dtype tanhx;
for (int i = 0; i < count; ++i) {
tanhx = top_data[i];
name_ = param.name();
map<string, int> blob_name_to_idx;
set<string> available_blobs;
- int num_layers = param.layers_size();
CHECK_EQ(param.input_size() * 4, param.input_dim_size())
<< "Incorrect input blob dimension specifications.";
memory_used_ = 0;
bottom_id_vecs_.resize(param.layers_size());
top_id_vecs_.resize(param.layers_size());
for (int layer_id = 0; layer_id < param.layers_size(); ++layer_id) {
- bool in_place = false;
const LayerParameter& layer_param = param.layers(layer_id);
layers_.push_back(shared_ptr<Layer<Dtype> >(GetLayer<Dtype>(layer_param)));
layer_names_.push_back(layer_param.name());
bottom_vecs_[layer_id].push_back(blobs_[blob_id].get());
bottom_id_vecs_[layer_id].push_back(blob_id);
available_blobs->erase(blob_name);
- bool need_backward = param.force_backward() || blob_need_backward_[blob_id];
return blob_id;
}
leveldb::WriteBatch* batch = new leveldb::WriteBatch();
const int kMaxKeyStrLength = 100;
char key_str[kMaxKeyStrLength];
- int num_bytes_of_binary_code = sizeof(Dtype);
vector<Blob<float>*> input_vec;
int image_index = 0;
for (int batch_index = 0; batch_index < num_mini_batches; ++batch_index) {