// Run forward with the input blobs already fed separately. You can get the
// input blobs using input_blobs().
- const vector<Blob<Dtype>*>& ForwardPrefilled(Dtype* loss);
- const vector<Blob<Dtype>*>& ForwardPrefilled();
+ const vector<Blob<Dtype>*>& ForwardPrefilled(Dtype* loss = NULL);
// Run forward using a set of bottom blobs, and return the result.
const vector<Blob<Dtype>*>& Forward(const vector<Blob<Dtype>* > & bottom,
- Dtype* loss);
- const vector<Blob<Dtype>*>& Forward(const vector<Blob<Dtype>* > & bottom);
+ Dtype* loss = NULL);
// Run forward using a serialized BlobProtoVector and return the result
// as a serialized BlobProtoVector
- string Forward(const string& input_blob_protos, Dtype* loss);
+ string Forward(const string& input_blob_protos, Dtype* loss = NULL);
// The network backward should take no input and output, since it solely
// computes the gradient w.r.t the parameters, and the data has already
}
template <typename Dtype>
-const vector<Blob<Dtype>*>& Net<Dtype>::ForwardPrefilled() {
- Dtype ignored_loss;
- return ForwardPrefilled(&ignored_loss);
-}
-
-template <typename Dtype>
const vector<Blob<Dtype>*>& Net<Dtype>::ForwardPrefilled(Dtype* loss) {
- *loss = Dtype(0.);
+ if (loss != NULL) {
+ *loss = Dtype(0.);
+ }
for (int i = 0; i < layers_.size(); ++i) {
// LOG(ERROR) << "Forwarding " << layer_names_[i];
- *loss += layers_[i]->Forward(bottom_vecs_[i], &top_vecs_[i]);
+ Dtype layer_loss = layers_[i]->Forward(bottom_vecs_[i], &top_vecs_[i]);
+ if (loss != NULL) {
+ *loss += layer_loss;
+ }
}
return net_output_blobs_;
}
template <typename Dtype>
const vector<Blob<Dtype>*>& Net<Dtype>::Forward(
- const vector<Blob<Dtype>*> & bottom) {
- Dtype ignored_loss;
- return Forward(bottom, &ignored_loss);
-}
-
-template <typename Dtype>
-const vector<Blob<Dtype>*>& Net<Dtype>::Forward(
const vector<Blob<Dtype>*> & bottom, Dtype* loss) {
// Copy bottom to internal bottom
for (int i = 0; i < bottom.size(); ++i) {