explicit ArgMaxLayer(const LayerParameter& param)
: Layer<Dtype>(param) {}
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual inline LayerParameter_LayerType type() const {
return LayerParameter_LayerType_ARGMAX;
* @f$ (for @f$ K = 1 @f$).
*/
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
/// @brief Not implemented (non-differentiable function)
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
NOT_IMPLEMENTED;
}
bool out_max_val_;
explicit ConcatLayer(const LayerParameter& param)
: Layer<Dtype>(param) {}
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual inline LayerParameter_LayerType type() const {
return LayerParameter_LayerType_CONCAT;
* @f$
*/
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
/**
* @brief Computes the error gradient w.r.t. the concatenate inputs.
* @f$
*/
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
Blob<Dtype> col_bob_;
int count_;
explicit EltwiseLayer(const LayerParameter& param)
: Layer<Dtype>(param) {}
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual inline LayerParameter_LayerType type() const {
return LayerParameter_LayerType_ELTWISE;
protected:
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
EltwiseParameter_EltwiseOp op_;
vector<Dtype> coeffs_;
explicit FlattenLayer(const LayerParameter& param)
: Layer<Dtype>(param) {}
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual inline LayerParameter_LayerType type() const {
return LayerParameter_LayerType_FLATTEN;
* the outputs -- i.e., the (virtually) copied, flattened inputs
*/
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
/**
* @brief Computes the error gradient w.r.t. the concatenate inputs.
* gradient is (virtually) copied
*/
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
int count_;
};
explicit InnerProductLayer(const LayerParameter& param)
: Layer<Dtype>(param) {}
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual inline LayerParameter_LayerType type() const {
return LayerParameter_LayerType_INNER_PRODUCT;
protected:
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
int M_;
int K_;
explicit MVNLayer(const LayerParameter& param)
: Layer<Dtype>(param) {}
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual inline LayerParameter_LayerType type() const {
return LayerParameter_LayerType_MVN;
protected:
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
Blob<Dtype> mean_, variance_, temp_;
explicit SilenceLayer(const LayerParameter& param)
: Layer<Dtype>(param) {}
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {}
+ const vector<Blob<Dtype>*>& top) {}
virtual inline LayerParameter_LayerType type() const {
return LayerParameter_LayerType_SILENCE;
protected:
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {}
+ const vector<Blob<Dtype>*>& top) {}
// We can't define Forward_gpu here, since STUB_GPU will provide
// its own definition for CPU_ONLY mode.
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
};
/**
explicit SoftmaxLayer(const LayerParameter& param)
: Layer<Dtype>(param) {}
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual inline LayerParameter_LayerType type() const {
return LayerParameter_LayerType_SOFTMAX;
protected:
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
/// sum_multiplier is used to carry out sum using BLAS
Blob<Dtype> sum_multiplier_;
explicit CuDNNSoftmaxLayer(const LayerParameter& param)
: SoftmaxLayer<Dtype>(param) {}
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual ~CuDNNSoftmaxLayer();
protected:
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
cudnnHandle_t handle_;
cudnnTensor4dDescriptor_t bottom_desc_;
explicit SplitLayer(const LayerParameter& param)
: Layer<Dtype>(param) {}
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual inline LayerParameter_LayerType type() const {
return LayerParameter_LayerType_SPLIT;
protected:
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
int count_;
};
explicit SliceLayer(const LayerParameter& param)
: Layer<Dtype>(param) {}
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual inline LayerParameter_LayerType type() const {
return LayerParameter_LayerType_SLICE;
protected:
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
Blob<Dtype> col_bob_;
int count_;
// DataLayerSetUp to do special data layer setup for individual layer types.
// This method may not be overridden except by the BasePrefetchingDataLayer.
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void DataLayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {}
+ const vector<Blob<Dtype>*>& top) {}
// Data layers have no bottoms, so reshaping is trivial.
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {}
+ const vector<Blob<Dtype>*>& top) {}
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {}
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {}
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {}
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {}
int datum_channels() const { return datum_channels_; }
int datum_height() const { return datum_height_; }
// DataLayerSetUp to do special data layer setup for individual layer types.
// This method may not be overridden.
void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void CreatePrefetchThread();
virtual void JoinPrefetchThread();
: BasePrefetchingDataLayer<Dtype>(param) {}
virtual ~DataLayer();
virtual void DataLayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual inline LayerParameter_LayerType type() const {
return LayerParameter_LayerType_DATA;
explicit DummyDataLayer(const LayerParameter& param)
: Layer<Dtype>(param) {}
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
// Data layers have no bottoms, so reshaping is trivial.
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {}
+ const vector<Blob<Dtype>*>& top) {}
virtual inline LayerParameter_LayerType type() const {
return LayerParameter_LayerType_DUMMY_DATA;
protected:
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {}
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {}
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {}
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {}
vector<shared_ptr<Filler<Dtype> > > fillers_;
vector<bool> refill_;
: Layer<Dtype>(param) {}
virtual ~HDF5DataLayer();
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
// Data layers have no bottoms, so reshaping is trivial.
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {}
+ const vector<Blob<Dtype>*>& top) {}
virtual inline LayerParameter_LayerType type() const {
return LayerParameter_LayerType_HDF5_DATA;
protected:
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {}
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {}
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {}
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {}
virtual void LoadHDF5FileData(const char* filename);
std::vector<std::string> hdf_filenames_;
explicit HDF5OutputLayer(const LayerParameter& param);
virtual ~HDF5OutputLayer();
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {}
+ const vector<Blob<Dtype>*>& top) {}
// Data layers have no bottoms, so reshaping is trivial.
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {}
+ const vector<Blob<Dtype>*>& top) {}
virtual inline LayerParameter_LayerType type() const {
return LayerParameter_LayerType_HDF5_OUTPUT;
protected:
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
virtual void SaveBlobs();
std::string file_name_;
: BasePrefetchingDataLayer<Dtype>(param) {}
virtual ~ImageDataLayer();
virtual void DataLayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual inline LayerParameter_LayerType type() const {
return LayerParameter_LayerType_IMAGE_DATA;
explicit MemoryDataLayer(const LayerParameter& param)
: BaseDataLayer<Dtype>(param), has_new_data_(false) {}
virtual void DataLayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual inline LayerParameter_LayerType type() const {
return LayerParameter_LayerType_MEMORY_DATA;
protected:
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
int batch_size_;
Dtype* data_;
: BasePrefetchingDataLayer<Dtype>(param) {}
virtual ~WindowDataLayer();
virtual void DataLayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual inline LayerParameter_LayerType type() const {
return LayerParameter_LayerType_WINDOW_DATA;
* Sets up the loss weight multiplier blobs for any non-zero loss weights.
* This method may not be overridden.
*/
- void SetUp(const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) {
- CheckBlobCounts(bottom, *top);
+ void SetUp(const vector<Blob<Dtype>*>& bottom,
+ const vector<Blob<Dtype>*>& top) {
+ CheckBlobCounts(bottom, top);
LayerSetUp(bottom, top);
Reshape(bottom, top);
SetLossWeights(top);
* adjust the top blob sizes.
*/
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {}
+ const vector<Blob<Dtype>*>& top) {}
/**
* @brief Adjust the shapes of top blobs and internal buffers to accomodate
* accomodate the bottom blobs.
*/
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) = 0;
+ const vector<Blob<Dtype>*>& top) = 0;
/**
* @brief Given the bottom blobs, compute the top blobs and the loss.
* Your layer should implement Forward_cpu and (optionally) Forward_gpu.
*/
inline Dtype Forward(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
/**
* @brief Given the top blob error gradients, compute the bottom blob error
*/
inline void Backward(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
- vector<Blob<Dtype>*>* bottom);
+ const vector<Blob<Dtype>*>& bottom);
/**
* @brief Returns the vector of learnable parameter blobs.
/** @brief Using the CPU device, compute the layer output. */
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) = 0;
+ const vector<Blob<Dtype>*>& top) = 0;
/**
* @brief Using the GPU device, compute the layer output.
* Fall back to Forward_cpu() if unavailable.
*/
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
// LOG(WARNING) << "Using CPU code as backup.";
return Forward_cpu(bottom, top);
}
*/
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
- vector<Blob<Dtype>*>* bottom) = 0;
+ const vector<Blob<Dtype>*>& bottom) = 0;
/**
* @brief Using the GPU device, compute the gradients for any parameters and
* for the bottom blobs if propagate_down is true.
*/
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
- vector<Blob<Dtype>*>* bottom) {
+ const vector<Blob<Dtype>*>& bottom) {
// LOG(WARNING) << "Using CPU code as backup.";
Backward_cpu(top, propagate_down, bottom);
}
* Called by SetUp to initialize the weights associated with any top blobs in
* the loss function. Store non-zero loss weights in the diff blob.
*/
- inline void SetLossWeights(vector<Blob<Dtype>*>* top) {
+ inline void SetLossWeights(const vector<Blob<Dtype>*>& top) {
const int num_loss_weights = layer_param_.loss_weight_size();
if (num_loss_weights) {
- CHECK_EQ(top->size(), num_loss_weights) << "loss_weight must be "
+ CHECK_EQ(top.size(), num_loss_weights) << "loss_weight must be "
"unspecified or specified once per top blob.";
- for (int top_id = 0; top_id < top->size(); ++top_id) {
+ for (int top_id = 0; top_id < top.size(); ++top_id) {
const Dtype loss_weight = layer_param_.loss_weight(top_id);
if (loss_weight == Dtype(0)) { continue; }
this->set_loss(top_id, loss_weight);
- const int count = (*top)[top_id]->count();
- Dtype* loss_multiplier = (*top)[top_id]->mutable_cpu_diff();
+ const int count = top[top_id]->count();
+ Dtype* loss_multiplier = top[top_id]->mutable_cpu_diff();
caffe_set(count, loss_weight, loss_multiplier);
}
}
// functions.
template <typename Dtype>
inline Dtype Layer<Dtype>::Forward(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
Dtype loss = 0;
switch (Caffe::mode()) {
case Caffe::CPU:
Forward_cpu(bottom, top);
- for (int top_id = 0; top_id < top->size(); ++top_id) {
+ for (int top_id = 0; top_id < top.size(); ++top_id) {
if (!this->loss(top_id)) { continue; }
- const int count = (*top)[top_id]->count();
- const Dtype* data = (*top)[top_id]->cpu_data();
- const Dtype* loss_weights = (*top)[top_id]->cpu_diff();
+ const int count = top[top_id]->count();
+ const Dtype* data = top[top_id]->cpu_data();
+ const Dtype* loss_weights = top[top_id]->cpu_diff();
loss += caffe_cpu_dot(count, data, loss_weights);
}
break;
case Caffe::GPU:
Forward_gpu(bottom, top);
#ifndef CPU_ONLY
- for (int top_id = 0; top_id < top->size(); ++top_id) {
+ for (int top_id = 0; top_id < top.size(); ++top_id) {
if (!this->loss(top_id)) { continue; }
- const int count = (*top)[top_id]->count();
- const Dtype* data = (*top)[top_id]->gpu_data();
- const Dtype* loss_weights = (*top)[top_id]->gpu_diff();
+ const int count = top[top_id]->count();
+ const Dtype* data = top[top_id]->gpu_data();
+ const Dtype* loss_weights = top[top_id]->gpu_diff();
Dtype blob_loss = 0;
caffe_gpu_dot(count, data, loss_weights, &blob_loss);
loss += blob_loss;
template <typename Dtype>
inline void Layer<Dtype>::Backward(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
- vector<Blob<Dtype>*>* bottom) {
+ const vector<Blob<Dtype>*>& bottom) {
switch (Caffe::mode()) {
case Caffe::CPU:
Backward_cpu(top, propagate_down, bottom);
explicit AccuracyLayer(const LayerParameter& param)
: Layer<Dtype>(param) {}
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual inline LayerParameter_LayerType type() const {
return LayerParameter_LayerType_ACCURACY;
* @f$
*/
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
/// @brief Not implemented -- AccuracyLayer cannot be used as a loss.
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
for (int i = 0; i < propagate_down.size(); ++i) {
if (propagate_down[i]) { NOT_IMPLEMENTED; }
}
explicit LossLayer(const LayerParameter& param)
: Layer<Dtype>(param) {}
virtual void LayerSetUp(
- const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top);
virtual void Reshape(
- const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top);
virtual inline int ExactNumBottomBlobs() const { return 2; }
explicit ContrastiveLossLayer(const LayerParameter& param)
: LossLayer<Dtype>(param), diff_() {}
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual inline int ExactNumBottomBlobs() const { return 3; }
virtual inline LayerParameter_LayerType type() const {
protected:
/// @copydoc ContrastiveLossLayer
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
/**
* @brief Computes the Contrastive error gradient w.r.t. the inputs.
* propagate_down[1]
*/
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
Blob<Dtype> diff_; // cached for backward pass
Blob<Dtype> dist_sq_; // cached for backward pass
explicit EuclideanLossLayer(const LayerParameter& param)
: LossLayer<Dtype>(param), diff_() {}
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual inline LayerParameter_LayerType type() const {
return LayerParameter_LayerType_EUCLIDEAN_LOSS;
protected:
/// @copydoc EuclideanLossLayer
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
/**
* @brief Computes the Euclidean error gradient w.r.t. the inputs.
* @f$ if propagate_down[1]
*/
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
Blob<Dtype> diff_;
};
protected:
/// @copydoc HingeLossLayer
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
/**
* @brief Computes the hinge loss error gradient w.r.t. the predictions.
* the labels -- ignored as we can't compute their error gradients
*/
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
};
/**
explicit InfogainLossLayer(const LayerParameter& param)
: LossLayer<Dtype>(param), infogain_() {}
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
// InfogainLossLayer takes 2-3 bottom Blobs; if there are 3 the third should
// be the infogain matrix. (Otherwise the infogain matrix is loaded from a
protected:
/// @copydoc InfogainLossLayer
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
/**
* @brief Computes the infogain loss error gradient w.r.t. the predictions.
* gradient computation is not implemented.
*/
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
Blob<Dtype> infogain_;
};
explicit MultinomialLogisticLossLayer(const LayerParameter& param)
: LossLayer<Dtype>(param) {}
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual inline LayerParameter_LayerType type() const {
return LayerParameter_LayerType_MULTINOMIAL_LOGISTIC_LOSS;
protected:
/// @copydoc MultinomialLogisticLossLayer
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
/**
* @brief Computes the multinomial logistic loss error gradient w.r.t. the
* the labels -- ignored as we can't compute their error gradients
*/
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
};
/**
sigmoid_layer_(new SigmoidLayer<Dtype>(param)),
sigmoid_output_(new Blob<Dtype>()) {}
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual inline LayerParameter_LayerType type() const {
return LayerParameter_LayerType_SIGMOID_CROSS_ENTROPY_LOSS;
protected:
/// @copydoc SigmoidCrossEntropyLossLayer
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
/**
* @brief Computes the sigmoid cross-entropy loss error gradient w.r.t. the
* the labels -- ignored as we can't compute their error gradients
*/
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
/// The internal SigmoidLayer used to map predictions to probabilities.
shared_ptr<SigmoidLayer<Dtype> > sigmoid_layer_;
: LossLayer<Dtype>(param),
softmax_layer_(new SoftmaxLayer<Dtype>(param)) {}
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual inline LayerParameter_LayerType type() const {
return LayerParameter_LayerType_SOFTMAX_LOSS;
protected:
/// @copydoc SoftmaxWithLossLayer
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
/**
* @brief Computes the softmax loss error gradient w.r.t. the predictions.
* the labels -- ignored as we can't compute their error gradients
*/
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
/// The internal SoftmaxLayer used to map predictions to a distribution.
shared_ptr<SoftmaxLayer<Dtype> > softmax_layer_;
explicit NeuronLayer(const LayerParameter& param)
: Layer<Dtype>(param) {}
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual inline LayerParameter_LayerType type() const {
return LayerParameter_LayerType_NONE;
explicit AbsValLayer(const LayerParameter& param)
: NeuronLayer<Dtype>(param) {}
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual inline LayerParameter_LayerType type() const {
return LayerParameter_LayerType_ABSVAL;
protected:
/// @copydoc AbsValLayer
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
/**
* @brief Computes the error gradient w.r.t. the absolute value inputs.
* @f$ if propagate_down[0]
*/
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
};
/**
protected:
/// @copydoc BNLLLayer
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
/**
* @brief Computes the error gradient w.r.t. the BNLL inputs.
* @f$ if propagate_down[0]
*/
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
};
/**
explicit DropoutLayer(const LayerParameter& param)
: NeuronLayer<Dtype>(param) {}
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual inline LayerParameter_LayerType type() const {
return LayerParameter_LayerType_DROPOUT;
* @f$ y_{\mbox{test}} = \mathbb{E}[y_{\mbox{train}}] = x @f$.
*/
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
/// when divided by UINT_MAX, the randomly generated values @f$u\sim U(0,1)@f$
Blob<unsigned int> rand_vec_;
explicit PowerLayer(const LayerParameter& param)
: NeuronLayer<Dtype>(param) {}
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual inline LayerParameter_LayerType type() const {
return LayerParameter_LayerType_POWER;
* @f$
*/
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
/**
* @brief Computes the error gradient w.r.t. the power inputs.
* @f$ if propagate_down[0]
*/
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
/// @brief @f$ \gamma @f$ from layer_param_.power_param()
Dtype power_;
* the computed outputs are @f$ y = \max(0, x) + \nu \min(0, x) @f$.
*/
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
/**
* @brief Computes the error gradient w.r.t. the ReLU inputs.
* @f$.
*/
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
};
#ifdef USE_CUDNN
explicit CuDNNReLULayer(const LayerParameter& param)
: ReLULayer<Dtype>(param) {}
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual ~CuDNNReLULayer();
protected:
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
cudnnHandle_t handle_;
cudnnTensor4dDescriptor_t bottom_desc_;
* @f$
*/
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
/**
* @brief Computes the error gradient w.r.t. the sigmoid inputs.
* @f$ if propagate_down[0]
*/
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
};
#ifdef USE_CUDNN
explicit CuDNNSigmoidLayer(const LayerParameter& param)
: SigmoidLayer<Dtype>(param) {}
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual ~CuDNNSigmoidLayer();
protected:
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
cudnnHandle_t handle_;
cudnnTensor4dDescriptor_t bottom_desc_;
* @f$
*/
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
/**
* @brief Computes the error gradient w.r.t. the sigmoid inputs.
* @f$ if propagate_down[0]
*/
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
};
#ifdef USE_CUDNN
explicit CuDNNTanHLayer(const LayerParameter& param)
: TanHLayer<Dtype>(param) {}
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual ~CuDNNTanHLayer();
protected:
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
cudnnHandle_t handle_;
cudnnTensor4dDescriptor_t bottom_desc_;
explicit ThresholdLayer(const LayerParameter& param)
: NeuronLayer<Dtype>(param) {}
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual inline LayerParameter_LayerType type() const {
return LayerParameter_LayerType_THRESHOLD;
* @f$
*/
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
/// @brief Not implemented (non-differentiable function)
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
NOT_IMPLEMENTED;
}
// layers.
// Note that after the gradient check, we do not guarantee that the data
// stored in the layer parameters and the blobs are unchanged.
- void CheckGradient(Layer<Dtype>* layer, vector<Blob<Dtype>*>* bottom,
- vector<Blob<Dtype>*>* top, int check_bottom = -1) {
- layer->SetUp(*bottom, top);
+ void CheckGradient(Layer<Dtype>* layer, const vector<Blob<Dtype>*>& bottom,
+ const vector<Blob<Dtype>*>& top, int check_bottom = -1) {
+ layer->SetUp(bottom, top);
CheckGradientSingle(layer, bottom, top, check_bottom, -1, -1);
}
void CheckGradientExhaustive(Layer<Dtype>* layer,
- vector<Blob<Dtype>*>* bottom, vector<Blob<Dtype>*>* top,
+ const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top,
int check_bottom = -1);
// CheckGradientEltwise can be used to test layers that perform element-wise
// computation only (e.g., neuron layers) -- where (d y_i) / (d x_j) = 0 when
// i != j.
void CheckGradientEltwise(Layer<Dtype>* layer,
- vector<Blob<Dtype>*>* bottom, vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top);
- void CheckGradientSingle(Layer<Dtype>* layer, vector<Blob<Dtype>*>* bottom,
- vector<Blob<Dtype>*>* top, int check_bottom, int top_id,
- int top_data_id, bool element_wise = false);
+ void CheckGradientSingle(Layer<Dtype>* layer,
+ const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top,
+ int check_bottom, int top_id, int top_data_id, bool element_wise = false);
// Checks the gradient of a network. This network should not have any data
// layers or loss layers, since the function does not explicitly deal with
const vector<Blob<Dtype>*>& input);
protected:
- Dtype GetObjAndGradient(const Layer<Dtype>& layer, vector<Blob<Dtype>*>* top,
- int top_id = -1, int top_data_id = -1);
+ Dtype GetObjAndGradient(const Layer<Dtype>& layer,
+ const vector<Blob<Dtype>*>& top, int top_id = -1, int top_data_id = -1);
Dtype stepsize_;
Dtype threshold_;
unsigned int seed_;
template <typename Dtype>
void GradientChecker<Dtype>::CheckGradientSingle(Layer<Dtype>* layer,
- vector<Blob<Dtype>*>* bottom, vector<Blob<Dtype>*>* top,
+ const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top,
int check_bottom, int top_id, int top_data_id, bool element_wise) {
if (element_wise) {
CHECK_EQ(0, layer->blobs().size());
CHECK_LE(0, top_id);
CHECK_LE(0, top_data_id);
- const int top_count = (*top)[top_id]->count();
- for (int blob_id = 0; blob_id < bottom->size(); ++blob_id) {
- CHECK_EQ(top_count, (*bottom)[blob_id]->count());
+ const int top_count = top[top_id]->count();
+ for (int blob_id = 0; blob_id < bottom.size(); ++blob_id) {
+ CHECK_EQ(top_count, bottom[blob_id]->count());
}
}
// First, figure out what blobs we need to check against.
vector<Blob<Dtype>*> blobs_to_check;
- vector<bool> propagate_down(bottom->size(), check_bottom < 0);
+ vector<bool> propagate_down(bottom.size(), check_bottom < 0);
for (int i = 0; i < layer->blobs().size(); ++i) {
blobs_to_check.push_back(layer->blobs()[i].get());
}
if (check_bottom < 0) {
- for (int i = 0; i < bottom->size(); ++i) {
- blobs_to_check.push_back((*bottom)[i]);
+ for (int i = 0; i < bottom.size(); ++i) {
+ blobs_to_check.push_back(bottom[i]);
}
} else {
- CHECK_LT(check_bottom, bottom->size());
- blobs_to_check.push_back((*bottom)[check_bottom]);
+ CHECK_LT(check_bottom, bottom.size());
+ blobs_to_check.push_back(bottom[check_bottom]);
propagate_down[check_bottom] = true;
}
// Compute the gradient analytically using Backward
Caffe::set_random_seed(seed_);
// Ignore the loss from the layer (it's just the weighted sum of the losses
// from the top blobs, whose gradients we may want to test individually).
- layer->Forward(*bottom, top);
+ layer->Forward(bottom, top);
// Get additional loss from the objective
GetObjAndGradient(*layer, top, top_id, top_data_id);
- layer->Backward(*top, propagate_down, bottom);
+ layer->Backward(top, propagate_down, bottom);
// Store computed gradients for all checked blobs
vector<shared_ptr<Blob<Dtype> > >
computed_gradient_blobs(blobs_to_check.size());
// << current_blob->count() << " parameters.";
for (int feat_id = 0; feat_id < current_blob->count(); ++feat_id) {
// For an element-wise layer, we only need to do finite differencing to
- // compute the derivative of (*top)[top_id][top_data_id] w.r.t.
- // (*bottom)[blob_id][i] only for i == top_data_id. For any other
+ // compute the derivative of top[top_id][top_data_id] w.r.t.
+ // bottom[blob_id][i] only for i == top_data_id. For any other
// i != top_data_id, we know the derivative is 0 by definition, and simply
// check that that's true.
Dtype estimated_gradient = 0;
// Compute loss with stepsize_ added to input.
current_blob->mutable_cpu_data()[feat_id] += stepsize_;
Caffe::set_random_seed(seed_);
- layer->Forward(*bottom, top);
+ layer->Forward(bottom, top);
positive_objective =
GetObjAndGradient(*layer, top, top_id, top_data_id);
// Compute loss with stepsize_ subtracted from input.
current_blob->mutable_cpu_data()[feat_id] -= stepsize_ * 2;
Caffe::set_random_seed(seed_);
- layer->Forward(*bottom, top);
+ layer->Forward(bottom, top);
negative_objective =
GetObjAndGradient(*layer, top, top_id, top_data_id);
// Recover original input value.
template <typename Dtype>
void GradientChecker<Dtype>::CheckGradientExhaustive(Layer<Dtype>* layer,
- vector<Blob<Dtype>*>* bottom, vector<Blob<Dtype>*>* top, int check_bottom) {
- layer->SetUp(*bottom, top);
- CHECK_GT(top->size(), 0) << "Exhaustive mode requires at least one top blob.";
+ const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top,
+ int check_bottom) {
+ layer->SetUp(bottom, top);
+ CHECK_GT(top.size(), 0) << "Exhaustive mode requires at least one top blob.";
// LOG(ERROR) << "Exhaustive Mode.";
- for (int i = 0; i < top->size(); ++i) {
+ for (int i = 0; i < top.size(); ++i) {
// LOG(ERROR) << "Exhaustive: blob " << i << " size " << top[i]->count();
- for (int j = 0; j < (*top)[i]->count(); ++j) {
+ for (int j = 0; j < top[i]->count(); ++j) {
// LOG(ERROR) << "Exhaustive: blob " << i << " data " << j;
CheckGradientSingle(layer, bottom, top, check_bottom, i, j);
}
template <typename Dtype>
void GradientChecker<Dtype>::CheckGradientEltwise(Layer<Dtype>* layer,
- vector<Blob<Dtype>*>* bottom, vector<Blob<Dtype>*>* top) {
- layer->SetUp(*bottom, top);
- CHECK_GT(top->size(), 0) << "Eltwise mode requires at least one top blob.";
+ const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
+ layer->SetUp(bottom, top);
+ CHECK_GT(top.size(), 0) << "Eltwise mode requires at least one top blob.";
const int check_bottom = -1;
const bool element_wise = true;
- for (int i = 0; i < top->size(); ++i) {
- for (int j = 0; j < (*top)[i]->count(); ++j) {
+ for (int i = 0; i < top.size(); ++i) {
+ for (int j = 0; j < top[i]->count(); ++j) {
CheckGradientSingle(layer, bottom, top, check_bottom, i, j, element_wise);
}
}
template <typename Dtype>
Dtype GradientChecker<Dtype>::GetObjAndGradient(const Layer<Dtype>& layer,
- vector<Blob<Dtype>*>* top, int top_id, int top_data_id) {
+ const vector<Blob<Dtype>*>& top, int top_id, int top_data_id) {
Dtype loss = 0;
if (top_id < 0) {
// the loss will be half of the sum of squares of all outputs
- for (int i = 0; i < top->size(); ++i) {
- Blob<Dtype>* top_blob = (*top)[i];
+ for (int i = 0; i < top.size(); ++i) {
+ Blob<Dtype>* top_blob = top[i];
const Dtype* top_blob_data = top_blob->cpu_data();
Dtype* top_blob_diff = top_blob->mutable_cpu_diff();
int count = top_blob->count();
loss /= 2.;
} else {
// the loss will be the top_data_id-th element in the top_id-th blob.
- for (int i = 0; i < top->size(); ++i) {
- Blob<Dtype>* top_blob = (*top)[i];
+ for (int i = 0; i < top.size(); ++i) {
+ Blob<Dtype>* top_blob = top[i];
Dtype* top_blob_diff = top_blob->mutable_cpu_diff();
caffe_set(top_blob->count(), Dtype(0), top_blob_diff);
}
const Dtype loss_weight = 2;
- loss = (*top)[top_id]->cpu_data()[top_data_id] * loss_weight;
- (*top)[top_id]->mutable_cpu_diff()[top_data_id] = loss_weight;
+ loss = top[top_id]->cpu_data()[top_data_id] * loss_weight;
+ top[top_id]->mutable_cpu_diff()[top_data_id] = loss_weight;
}
return loss;
}
#define STUB_GPU(classname) \
template <typename Dtype> \
void classname<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, \
- vector<Blob<Dtype>*>* top) { NO_GPU; } \
+ const vector<Blob<Dtype>*>& top) { NO_GPU; } \
template <typename Dtype> \
void classname<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, \
const vector<bool>& propagate_down, \
- vector<Blob<Dtype>*>* bottom) { NO_GPU; } \
+ const vector<Blob<Dtype>*>& bottom) { NO_GPU; } \
#define STUB_GPU_FORWARD(classname, funcname) \
template <typename Dtype> \
void classname<Dtype>::funcname##_##gpu(const vector<Blob<Dtype>*>& bottom, \
- vector<Blob<Dtype>*>* top) { NO_GPU; } \
+ const vector<Blob<Dtype>*>& top) { NO_GPU; } \
#define STUB_GPU_BACKWARD(classname, funcname) \
template <typename Dtype> \
void classname<Dtype>::funcname##_##gpu(const vector<Blob<Dtype>*>& top, \
const vector<bool>& propagate_down, \
- vector<Blob<Dtype>*>* bottom) { NO_GPU; } \
+ const vector<Blob<Dtype>*>& bottom) { NO_GPU; } \
#else // Normal GPU + CPU Caffe.
explicit ConvolutionLayer(const LayerParameter& param)
: Layer<Dtype>(param) {}
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual inline LayerParameter_LayerType type() const {
return LayerParameter_LayerType_CONVOLUTION;
protected:
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
int kernel_h_, kernel_w_;
int stride_h_, stride_w_;
explicit CuDNNConvolutionLayer(const LayerParameter& param)
: ConvolutionLayer<Dtype>(param) {}
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual ~CuDNNConvolutionLayer();
protected:
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
cudnnHandle_t* handle_;
cudaStream_t* stream_;
explicit Im2colLayer(const LayerParameter& param)
: Layer<Dtype>(param) {}
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual inline LayerParameter_LayerType type() const {
return LayerParameter_LayerType_IM2COL;
protected:
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
int kernel_h_, kernel_w_;
int stride_h_, stride_w_;
explicit LRNLayer(const LayerParameter& param)
: Layer<Dtype>(param) {}
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual inline LayerParameter_LayerType type() const {
return LayerParameter_LayerType_LRN;
protected:
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
virtual void CrossChannelForward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void CrossChannelForward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void WithinChannelForward(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void CrossChannelBackward_cpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
virtual void CrossChannelBackward_gpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
virtual void WithinChannelBackward(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
int size_;
int pre_pad_;
explicit PoolingLayer(const LayerParameter& param)
: Layer<Dtype>(param) {}
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual inline LayerParameter_LayerType type() const {
return LayerParameter_LayerType_POOLING;
protected:
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
int kernel_h_, kernel_w_;
int stride_h_, stride_w_;
explicit CuDNNPoolingLayer(const LayerParameter& param)
: PoolingLayer<Dtype>(param) {}
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual ~CuDNNPoolingLayer();
protected:
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ const vector<Blob<Dtype>*>& top);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
cudnnHandle_t handle_;
cudnnTensor4dDescriptor_t bottom_desc_, top_desc_;
template <typename Dtype>
void AbsValLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
NeuronLayer<Dtype>::LayerSetUp(bottom, top);
- CHECK_NE((*top)[0], bottom[0]) << this->type_name() << " Layer does not "
+ CHECK_NE(top[0], bottom[0]) << this->type_name() << " Layer does not "
"allow in-place computation.";
}
template <typename Dtype>
void AbsValLayer<Dtype>::Forward_cpu(
- const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) {
- const int count = (*top)[0]->count();
- Dtype* top_data = (*top)[0]->mutable_cpu_data();
+ const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
+ const int count = top[0]->count();
+ Dtype* top_data = top[0]->mutable_cpu_data();
caffe_abs(count, bottom[0]->cpu_data(), top_data);
}
template <typename Dtype>
void AbsValLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const int count = top[0]->count();
const Dtype* top_data = top[0]->cpu_data();
const Dtype* top_diff = top[0]->cpu_diff();
if (propagate_down[0]) {
- const Dtype* bottom_data = (*bottom)[0]->cpu_data();
- Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff();
+ const Dtype* bottom_data = bottom[0]->cpu_data();
+ Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
caffe_div(count, top_data, bottom_data, bottom_diff);
caffe_mul(count, bottom_diff, top_diff, bottom_diff);
}
template <typename Dtype>
void AbsValLayer<Dtype>::Forward_gpu(
- const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) {
- const int count = (*top)[0]->count();
- Dtype* top_data = (*top)[0]->mutable_gpu_data();
+ const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
+ const int count = top[0]->count();
+ Dtype* top_data = top[0]->mutable_gpu_data();
caffe_gpu_abs(count, bottom[0]->gpu_data(), top_data);
}
template <typename Dtype>
void AbsValLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const int count = top[0]->count();
const Dtype* top_data = top[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
if (propagate_down[0]) {
- const Dtype* bottom_data = (*bottom)[0]->gpu_data();
- Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
+ const Dtype* bottom_data = bottom[0]->gpu_data();
+ Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
caffe_gpu_div(count, top_data, bottom_data, bottom_diff);
caffe_gpu_mul(count, bottom_diff, top_diff, bottom_diff);
}
template <typename Dtype>
void AccuracyLayer<Dtype>::LayerSetUp(
- const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
top_k_ = this->layer_param_.accuracy_param().top_k();
}
template <typename Dtype>
void AccuracyLayer<Dtype>::Reshape(
- const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
CHECK_EQ(bottom[0]->num(), bottom[1]->num())
<< "The data and label should have the same number.";
CHECK_LE(top_k_, bottom[0]->count() / bottom[0]->num())
CHECK_EQ(bottom[1]->channels(), 1);
CHECK_EQ(bottom[1]->height(), 1);
CHECK_EQ(bottom[1]->width(), 1);
- (*top)[0]->Reshape(1, 1, 1, 1);
+ top[0]->Reshape(1, 1, 1, 1);
}
template <typename Dtype>
void AccuracyLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
Dtype accuracy = 0;
const Dtype* bottom_data = bottom[0]->cpu_data();
const Dtype* bottom_label = bottom[1]->cpu_data();
}
// LOG(INFO) << "Accuracy: " << accuracy;
- (*top)[0]->mutable_cpu_data()[0] = accuracy / num;
+ top[0]->mutable_cpu_data()[0] = accuracy / num;
// Accuracy layer should not be used as a loss function.
}
template <typename Dtype>
void ArgMaxLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
out_max_val_ = this->layer_param_.argmax_param().out_max_val();
top_k_ = this->layer_param_.argmax_param().top_k();
CHECK_GE(top_k_, 1) << " top k must not be less than 1.";
template <typename Dtype>
void ArgMaxLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
if (out_max_val_) {
// Produces max_ind and max_val
- (*top)[0]->Reshape(bottom[0]->num(), 2, top_k_, 1);
+ top[0]->Reshape(bottom[0]->num(), 2, top_k_, 1);
} else {
// Produces only max_ind
- (*top)[0]->Reshape(bottom[0]->num(), 1, top_k_, 1);
+ top[0]->Reshape(bottom[0]->num(), 1, top_k_, 1);
}
}
template <typename Dtype>
void ArgMaxLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->cpu_data();
- Dtype* top_data = (*top)[0]->mutable_cpu_data();
+ Dtype* top_data = top[0]->mutable_cpu_data();
int num = bottom[0]->num();
int dim = bottom[0]->count() / bottom[0]->num();
for (int i = 0; i < num; ++i) {
bottom_data_vector.begin(), bottom_data_vector.begin() + top_k_,
bottom_data_vector.end(), std::greater<std::pair<Dtype, int> >());
for (int j = 0; j < top_k_; ++j) {
- top_data[(*top)[0]->offset(i, 0, j)] = bottom_data_vector[j].second;
+ top_data[top[0]->offset(i, 0, j)] = bottom_data_vector[j].second;
}
if (out_max_val_) {
for (int j = 0; j < top_k_; ++j) {
- top_data[(*top)[0]->offset(i, 1, j)] = bottom_data_vector[j].first;
+ top_data[top[0]->offset(i, 1, j)] = bottom_data_vector[j].first;
}
}
}
template <typename Dtype>
void BaseDataLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
- if (top->size() == 1) {
+ const vector<Blob<Dtype>*>& top) {
+ if (top.size() == 1) {
output_labels_ = false;
} else {
output_labels_ = true;
template <typename Dtype>
void BasePrefetchingDataLayer<Dtype>::LayerSetUp(
- const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
BaseDataLayer<Dtype>::LayerSetUp(bottom, top);
// Now, start the prefetch thread. Before calling prefetch, we make two
// cpu_data calls so that the prefetch thread does not accidentally make
template <typename Dtype>
void BasePrefetchingDataLayer<Dtype>::Forward_cpu(
- const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
// First, join the thread
JoinPrefetchThread();
// Copy the data
caffe_copy(prefetch_data_.count(), prefetch_data_.cpu_data(),
- (*top)[0]->mutable_cpu_data());
+ top[0]->mutable_cpu_data());
if (this->output_labels_) {
caffe_copy(prefetch_label_.count(), prefetch_label_.cpu_data(),
- (*top)[1]->mutable_cpu_data());
+ top[1]->mutable_cpu_data());
}
// Start a new prefetch thread
CreatePrefetchThread();
template <typename Dtype>
void BasePrefetchingDataLayer<Dtype>::Forward_gpu(
- const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
// First, join the thread
JoinPrefetchThread();
// Copy the data
caffe_copy(prefetch_data_.count(), prefetch_data_.cpu_data(),
- (*top)[0]->mutable_gpu_data());
+ top[0]->mutable_gpu_data());
if (this->output_labels_) {
caffe_copy(prefetch_label_.count(), prefetch_label_.cpu_data(),
- (*top)[1]->mutable_gpu_data());
+ top[1]->mutable_gpu_data());
}
// Start a new prefetch thread
CreatePrefetchThread();
template <typename Dtype>
void BNLLLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->cpu_data();
- Dtype* top_data = (*top)[0]->mutable_cpu_data();
+ Dtype* top_data = top[0]->mutable_cpu_data();
const int count = bottom[0]->count();
for (int i = 0; i < count; ++i) {
top_data[i] = bottom_data[i] > 0 ?
template <typename Dtype>
void BNLLLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
- vector<Blob<Dtype>*>* bottom) {
+ const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
- const Dtype* bottom_data = (*bottom)[0]->cpu_data();
+ const Dtype* bottom_data = bottom[0]->cpu_data();
const Dtype* top_diff = top[0]->cpu_diff();
- Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff();
- const int count = (*bottom)[0]->count();
+ Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
+ const int count = bottom[0]->count();
Dtype expval;
for (int i = 0; i < count; ++i) {
expval = exp(std::min(bottom_data[i], Dtype(kBNLL_THRESHOLD)));
template <typename Dtype>
void BNLLLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
- Dtype* top_data = (*top)[0]->mutable_gpu_data();
+ Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
BNLLForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
template <typename Dtype>
void BNLLLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
- vector<Blob<Dtype>*>* bottom) {
+ const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
- const Dtype* bottom_data = (*bottom)[0]->gpu_data();
+ const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
- Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
- const int count = (*bottom)[0]->count();
+ Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
+ const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
BNLLBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, bottom_data, bottom_diff);
template <typename Dtype>
void ConcatLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
concat_dim_ = this->layer_param_.concat_param().concat_dim();
CHECK_GE(concat_dim_, 0) <<
"concat_dim should be >= 0";
template <typename Dtype>
void ConcatLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
// Initialize with the first blob.
count_ = bottom[0]->count();
num_ = bottom[0]->num();
width_ += bottom[i]->width();
}
}
- (*top)[0]->Reshape(num_, channels_, height_, width_);
- CHECK_EQ(count_, (*top)[0]->count());
+ top[0]->Reshape(num_, channels_, height_, width_);
+ CHECK_EQ(count_, top[0]->count());
}
template <typename Dtype>
void ConcatLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
- Dtype* top_data = (*top)[0]->mutable_cpu_data();
+ const vector<Blob<Dtype>*>& top) {
+ Dtype* top_data = top[0]->mutable_cpu_data();
if (concat_dim_== 0) {
int offset_num = 0;
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->cpu_data();
int num_elem = bottom[i]->count();
- caffe_copy(num_elem, bottom_data, top_data+(*top)[0]->offset(offset_num));
+ caffe_copy(num_elem, bottom_data, top_data+top[0]->offset(offset_num));
offset_num += bottom[i]->num();
}
} else if (concat_dim_ == 1) {
bottom[i]->channels()*bottom[i]->height()*bottom[i]->width();
for (int n = 0; n < num_; ++n) {
caffe_copy(num_elem, bottom_data+bottom[i]->offset(n),
- top_data+(*top)[0]->offset(n, offset_channel));
+ top_data+top[0]->offset(n, offset_channel));
}
offset_channel += bottom[i]->channels();
} // concat_dim_ is guaranteed to be 0 or 1 by LayerSetUp.
template <typename Dtype>
void ConcatLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->cpu_diff();
if (concat_dim_ == 0) {
int offset_num = 0;
- for (int i = 0; i < bottom->size(); ++i) {
- Blob<Dtype>* blob = (*bottom)[i];
+ for (int i = 0; i < bottom.size(); ++i) {
+ Blob<Dtype>* blob = bottom[i];
if (propagate_down[i]) {
Dtype* bottom_diff = blob->mutable_cpu_diff();
caffe_copy(blob->count(), top_diff + top[0]->offset(offset_num),
}
} else if (concat_dim_ == 1) {
int offset_channel = 0;
- for (int i = 0; i < bottom->size(); ++i) {
- Blob<Dtype>* blob = (*bottom)[i];
+ for (int i = 0; i < bottom.size(); ++i) {
+ Blob<Dtype>* blob = bottom[i];
if (propagate_down[i]) {
Dtype* bottom_diff = blob->mutable_cpu_diff();
int num_elem = blob->channels()*blob->height()*blob->width();
template <typename Dtype>
void ConcatLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
- Dtype* top_data = (*top)[0]->mutable_gpu_data();
+ const vector<Blob<Dtype>*>& top) {
+ Dtype* top_data = top[0]->mutable_gpu_data();
if (concat_dim_ == 0) {
int offset_num = 0;
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
caffe_copy(bottom[i]->count(), bottom_data,
- top_data + (*top)[0]->offset(offset_num));
+ top_data + top[0]->offset(offset_num));
offset_num += bottom[i]->num();
}
} else if (concat_dim_ == 1) {
bottom[i]->channels() * bottom[i]->height() * bottom[i]->width();
for (int n = 0; n < num_; ++n) {
caffe_copy(num_elem, bottom_data+bottom[i]->offset(n),
- top_data + (*top)[0]->offset(n, offset_channel));
+ top_data + top[0]->offset(n, offset_channel));
}
offset_channel += bottom[i]->channels();
}
template <typename Dtype>
void ConcatLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
if (concat_dim_ == 0) {
int offset_num = 0;
- for (int i = 0; i < bottom->size(); ++i) {
- Blob<Dtype>* blob = (*bottom)[i];
+ for (int i = 0; i < bottom.size(); ++i) {
+ Blob<Dtype>* blob = bottom[i];
if (propagate_down[i]) {
Dtype* bottom_diff = blob->mutable_gpu_diff();
caffe_copy(blob->count(), top_diff + top[0]->offset(offset_num),
}
} else if (concat_dim_ == 1) {
int offset_channel = 0;
- for (int i = 0; i < bottom->size(); ++i) {
- Blob<Dtype>* blob = (*bottom)[i];
+ for (int i = 0; i < bottom.size(); ++i) {
+ Blob<Dtype>* blob = bottom[i];
if (propagate_down[i]) {
Dtype* bottom_diff = blob->mutable_gpu_diff();
int num_elem = blob->channels()*blob->height()*blob->width();
template <typename Dtype>
void ContrastiveLossLayer<Dtype>::LayerSetUp(
- const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
LossLayer<Dtype>::LayerSetUp(bottom, top);
CHECK_EQ(bottom[0]->channels(), bottom[1]->channels());
CHECK_EQ(bottom[0]->height(), 1);
template <typename Dtype>
void ContrastiveLossLayer<Dtype>::Forward_cpu(
const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
int count = bottom[0]->count();
caffe_sub(
count,
}
}
loss = loss / static_cast<Dtype>(bottom[0]->num()) / Dtype(2);
- (*top)[0]->mutable_cpu_data()[0] = loss;
+ top[0]->mutable_cpu_data()[0] = loss;
}
template <typename Dtype>
void ContrastiveLossLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
Dtype margin = this->layer_param_.contrastive_loss_param().margin();
for (int i = 0; i < 2; ++i) {
if (propagate_down[i]) {
const Dtype sign = (i == 0) ? 1 : -1;
const Dtype alpha = sign * top[0]->cpu_diff()[0] /
- static_cast<Dtype>((*bottom)[i]->num());
- int num = (*bottom)[i]->num();
- int channels = (*bottom)[i]->channels();
+ static_cast<Dtype>(bottom[i]->num());
+ int num = bottom[i]->num();
+ int channels = bottom[i]->channels();
for (int j = 0; j < num; ++j) {
- Dtype* bout = (*bottom)[i]->mutable_cpu_diff();
- if (static_cast<int>((*bottom)[2]->cpu_data()[j])) { // similar pairs
+ Dtype* bout = bottom[i]->mutable_cpu_diff();
+ if (static_cast<int>(bottom[2]->cpu_data()[j])) { // similar pairs
caffe_cpu_axpby(
channels,
alpha,
template <typename Dtype>
void ContrastiveLossLayer<Dtype>::Forward_gpu(
- const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const int count = bottom[0]->count();
caffe_gpu_sub(
count,
}
}
loss = loss / static_cast<Dtype>(bottom[0]->num()) / Dtype(2);
- (*top)[0]->mutable_cpu_data()[0] = loss;
+ top[0]->mutable_cpu_data()[0] = loss;
}
template <typename Dtype>
template <typename Dtype>
void ContrastiveLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
for (int i = 0; i < 2; ++i) {
if (propagate_down[i]) {
- const int count = (*bottom)[0]->count();
- const int channels = (*bottom)[0]->channels();
+ const int count = bottom[0]->count();
+ const int channels = bottom[0]->channels();
Dtype margin = this->layer_param_.contrastive_loss_param().margin();
const Dtype sign = (i == 0) ? 1 : -1;
const Dtype alpha = sign * top[0]->cpu_diff()[0] /
- static_cast<Dtype>((*bottom)[0]->num());
+ static_cast<Dtype>(bottom[0]->num());
// NOLINT_NEXT_LINE(whitespace/operators)
CLLForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, channels, margin, alpha,
- (*bottom)[2]->gpu_data(), // pair similarity 0 or 1
+ bottom[2]->gpu_data(), // pair similarity 0 or 1
diff_.gpu_data(), // the cached eltwise difference between a and b
dist_sq_.gpu_data(), // the cached square distance between a and b
- (*bottom)[i]->mutable_gpu_diff());
+ bottom[i]->mutable_gpu_diff());
CUDA_POST_KERNEL_CHECK;
}
}
template <typename Dtype>
void ConvolutionLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
// Configure the kernel size, padding, stride, and inputs.
ConvolutionParameter conv_param = this->layer_param_.convolution_param();
CHECK(!conv_param.has_kernel_size() !=
template <typename Dtype>
void ConvolutionLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
num_ = bottom[0]->num();
height_ = bottom[0]->height();
width_ = bottom[0]->width();
height_out_ =
(height_ + 2 * pad_h_ - kernel_h_) / stride_h_ + 1;
width_out_ = (width_ + 2 * pad_w_ - kernel_w_) / stride_w_ + 1;
- for (int top_id = 0; top_id < top->size(); ++top_id) {
- (*top)[top_id]->Reshape(num_, num_output_, height_out_, width_out_);
+ for (int top_id = 0; top_id < top.size(); ++top_id) {
+ top[top_id]->Reshape(num_, num_output_, height_out_, width_out_);
}
// Prepare the matrix multiplication computation.
// Each input will be convolved as a single GEMM.
// overly large memory usage.
col_buffer_.Reshape(
1, channels_ * kernel_h_ * kernel_w_, height_out_, width_out_);
- for (int top_id = 0; top_id < top->size(); ++top_id) {
- (*top)[top_id]->Reshape(num_, num_output_, height_out_, width_out_);
+ for (int top_id = 0; top_id < top.size(); ++top_id) {
+ top[top_id]->Reshape(num_, num_output_, height_out_, width_out_);
}
// Set up the all ones "bias multiplier" for adding biases by BLAS
if (bias_term_) {
template <typename Dtype>
void ConvolutionLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->cpu_data();
- Dtype* top_data = (*top)[i]->mutable_cpu_data();
+ Dtype* top_data = top[i]->mutable_cpu_data();
Dtype* col_data = col_buffer_.mutable_cpu_data();
const Dtype* weight = this->blobs_[0]->cpu_data();
int weight_offset = M_ * K_; // number of filter parameters in a group
for (int g = 0; g < group_; ++g) {
caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, N_, K_,
(Dtype)1., weight + weight_offset * g, col_data + col_offset * g,
- (Dtype)0., top_data + (*top)[i]->offset(n) + top_offset * g);
+ (Dtype)0., top_data + top[i]->offset(n) + top_offset * g);
}
// Add bias.
if (bias_term_) {
caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_output_,
N_, 1, (Dtype)1., this->blobs_[1]->cpu_data(),
bias_multiplier_.cpu_data(),
- (Dtype)1., top_data + (*top)[i]->offset(n));
+ (Dtype)1., top_data + top[i]->offset(n));
}
}
}
template <typename Dtype>
void ConvolutionLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* weight = NULL;
Dtype* weight_diff = NULL;
if (this->param_propagate_down_[0]) {
}
Dtype* col_data = col_buffer_.mutable_cpu_data();
Dtype* col_diff = col_buffer_.mutable_cpu_diff();
- const Dtype* bottom_data = (*bottom)[i]->cpu_data();
- Dtype* bottom_diff = (*bottom)[i]->mutable_cpu_diff();
+ const Dtype* bottom_data = bottom[i]->cpu_data();
+ Dtype* bottom_diff = bottom[i]->mutable_cpu_diff();
for (int n = 0; n < num_; ++n) {
// Since we saved memory in the forward pass by not storing all col
// data, we will need to recompute them.
- im2col_cpu(bottom_data + (*bottom)[i]->offset(n), channels_, height_,
+ im2col_cpu(bottom_data + bottom[i]->offset(n), channels_, height_,
width_, kernel_h_, kernel_w_, pad_h_, pad_w_,
stride_h_, stride_w_, col_data);
// gradient w.r.t. weight. Note that we will accumulate diffs.
// col2im back to the data
col2im_cpu(col_diff, channels_, height_, width_,
kernel_h_, kernel_w_, pad_h_, pad_w_,
- stride_h_, stride_w_, bottom_diff + (*bottom)[i]->offset(n));
+ stride_h_, stride_w_, bottom_diff + bottom[i]->offset(n));
}
}
}
/// @brief refer to CPU forward -- the BLAS implementation is the same.
template <typename Dtype>
void ConvolutionLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
- Dtype* top_data = (*top)[i]->mutable_gpu_data();
+ Dtype* top_data = top[i]->mutable_gpu_data();
Dtype* col_data = col_buffer_.mutable_gpu_data();
const Dtype* weight = this->blobs_[0]->gpu_data();
int weight_offset = M_ * K_;
for (int g = 0; g < group_; ++g) {
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, N_, K_,
(Dtype)1., weight + weight_offset * g, col_data + col_offset * g,
- (Dtype)0., top_data + (*top)[i]->offset(n) + top_offset * g);
+ (Dtype)0., top_data + top[i]->offset(n) + top_offset * g);
}
// Add bias.
if (bias_term_) {
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_output_,
N_, 1, (Dtype)1., this->blobs_[1]->gpu_data(),
bias_multiplier_.gpu_data(),
- (Dtype)1., top_data + (*top)[i]->offset(n));
+ (Dtype)1., top_data + top[i]->offset(n));
}
}
}
/// @brief refer to CPU backward -- the BLAS implementation is the same.
template <typename Dtype>
void ConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* weight = NULL;
Dtype* weight_diff = NULL;
if (this->param_propagate_down_[0]) {
}
Dtype* col_data = col_buffer_.mutable_gpu_data();
Dtype* col_diff = col_buffer_.mutable_gpu_diff();
- const Dtype* bottom_data = (*bottom)[i]->gpu_data();
- Dtype* bottom_diff = (*bottom)[i]->mutable_gpu_diff();
+ const Dtype* bottom_data = bottom[i]->gpu_data();
+ Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
for (int n = 0; n < num_; ++n) {
// Since we saved memory in the forward pass by not storing all col
// data, we will need to recompute them.
- im2col_gpu(bottom_data + (*bottom)[i]->offset(n), channels_, height_,
+ im2col_gpu(bottom_data + bottom[i]->offset(n), channels_, height_,
width_, kernel_h_, kernel_w_, pad_h_, pad_w_,
stride_h_, stride_w_, col_data);
// gradient w.r.t. weight. Note that we will accumulate diffs.
// col2im back to the data
col2im_gpu(col_diff, channels_, height_, width_,
kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_,
- bottom_diff + (*bottom)[i]->offset(n));
+ bottom_diff + bottom[i]->offset(n));
}
}
}
*/
template <typename Dtype>
void CuDNNConvolutionLayer<Dtype>::LayerSetUp(
- const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
ConvolutionLayer<Dtype>::LayerSetUp(bottom, top);
// Initialize CUDA streams and cuDNN.
stream_ = new cudaStream_t[this->group_ * CUDNN_STREAMS_PER_GROUP];
template <typename Dtype>
void CuDNNConvolutionLayer<Dtype>::Reshape(
- const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
ConvolutionLayer<Dtype>::Reshape(bottom, top);
bottom_offset_ = (this->channels_ / this->group_)
* this->height_ * this->width_;
template <typename Dtype>
void CuDNNConvolutionLayer<Dtype>::Forward_gpu(
- const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
- Dtype* top_data = (*top)[i]->mutable_gpu_data();
+ Dtype* top_data = top[i]->mutable_gpu_data();
const Dtype* weight = this->blobs_[0]->gpu_data();
// Forward through cuDNN in parallel over groups.
template <typename Dtype>
void CuDNNConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* weight = NULL;
Dtype* weight_diff = NULL;
if (this->param_propagate_down_[0]) {
// Gradient w.r.t. weights.
if (this->param_propagate_down_[0]) {
- const Dtype* bottom_data = (*bottom)[i]->gpu_data();
+ const Dtype* bottom_data = bottom[i]->gpu_data();
CUDNN_CHECK(cudnnConvolutionBackwardFilter(handle_[1*this->group_ + g],
bottom_descs_[i], bottom_data + bottom_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
// Gradient w.r.t. bottom data.
if (propagate_down[i]) {
- Dtype* bottom_diff = (*bottom)[i]->mutable_gpu_diff();
+ Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
CUDNN_CHECK(cudnnConvolutionBackwardData(handle_[2*this->group_ + g],
filter_desc_, weight + weight_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
template <typename Dtype>
void CuDNNPoolingLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
PoolingLayer<Dtype>::LayerSetUp(bottom, top);
CUDNN_CHECK(cudnnCreate(&handle_));
template <typename Dtype>
void CuDNNPoolingLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
PoolingLayer<Dtype>::Reshape(bottom, top);
cudnn::setTensor4dDesc<Dtype>(&bottom_desc_, bottom[0]->num(),
this->channels_, this->height_, this->width_);
template <typename Dtype>
void CuDNNPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
// Fallback to Caffe for padded pooling, max top mask.
- if ((this->pad_h_ > 0 || this->pad_w_ > 0) || (*top).size() > 1) {
+ if ((this->pad_h_ > 0 || this->pad_w_ > 0) || top.size() > 1) {
LOG(WARNING) << "Falling back to standard Caffe for padded pooling.";
return PoolingLayer<Dtype>::Forward_gpu(bottom, top);
}
const Dtype* bottom_data = bottom[0]->gpu_data();
- Dtype* top_data = (*top)[0]->mutable_gpu_data();
+ Dtype* top_data = top[0]->mutable_gpu_data();
CUDNN_CHECK(cudnnPoolingForward(handle_, pooling_desc_,
bottom_desc_, bottom_data, top_desc_, top_data));
}
template <typename Dtype>
void CuDNNPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* top_data = top[0]->gpu_data();
- const Dtype* bottom_data = (*bottom)[0]->gpu_data();
- Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
+ const Dtype* bottom_data = bottom[0]->gpu_data();
+ Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
CUDNN_CHECK(cudnnPoolingBackward(handle_, pooling_desc_,
top_desc_, top_data, top_desc_, top_diff,
bottom_desc_, bottom_data, bottom_desc_, bottom_diff));
template <typename Dtype>
void CuDNNReLULayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
ReLULayer<Dtype>::LayerSetUp(bottom, top);
// initialize cuDNN
CUDNN_CHECK(cudnnCreate(&handle_));
template <typename Dtype>
void CuDNNReLULayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
ReLULayer<Dtype>::Reshape(bottom, top);
const int N = bottom[0]->num();
const int K = bottom[0]->channels();
template <typename Dtype>
void CuDNNReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
// Fallback to standard Caffe for leaky ReLU.
if (ReLULayer<Dtype>::layer_param_.relu_param().negative_slope() != 0) {
return ReLULayer<Dtype>::Forward_gpu(bottom, top);
}
const Dtype* bottom_data = bottom[0]->gpu_data();
- Dtype* top_data = (*top)[0]->mutable_gpu_data();
+ Dtype* top_data = top[0]->mutable_gpu_data();
CUDNN_CHECK(cudnnActivationForward(this->handle_,
CUDNN_ACTIVATION_RELU,
this->bottom_desc_, bottom_data, this->top_desc_, top_data));
template <typename Dtype>
void CuDNNReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
- vector<Blob<Dtype>*>* bottom) {
+ const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* top_data = top[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
- const Dtype* bottom_data = (*bottom)[0]->gpu_data();
- Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
+ const Dtype* bottom_data = bottom[0]->gpu_data();
+ Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
CUDNN_CHECK(cudnnActivationBackward(this->handle_,
CUDNN_ACTIVATION_RELU,
this->top_desc_, top_data, this->top_desc_, top_diff,
template <typename Dtype>
void CuDNNSigmoidLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
SigmoidLayer<Dtype>::LayerSetUp(bottom, top);
// initialize cuDNN
CUDNN_CHECK(cudnnCreate(&handle_));
template <typename Dtype>
void CuDNNSigmoidLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
SigmoidLayer<Dtype>::Reshape(bottom, top);
const int N = bottom[0]->num();
const int K = bottom[0]->channels();
template <typename Dtype>
void CuDNNSigmoidLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
- Dtype* top_data = (*top)[0]->mutable_gpu_data();
+ Dtype* top_data = top[0]->mutable_gpu_data();
CUDNN_CHECK(cudnnActivationForward(this->handle_,
CUDNN_ACTIVATION_SIGMOID,
this->bottom_desc_, bottom_data, this->top_desc_, top_data));
template <typename Dtype>
void CuDNNSigmoidLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
- vector<Blob<Dtype>*>* bottom) {
+ const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* top_data = top[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
- const Dtype* bottom_data = (*bottom)[0]->gpu_data();
- Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
+ const Dtype* bottom_data = bottom[0]->gpu_data();
+ Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
CUDNN_CHECK(cudnnActivationBackward(this->handle_,
CUDNN_ACTIVATION_SIGMOID,
this->top_desc_, top_data, this->top_desc_, top_diff,
template <typename Dtype>
void CuDNNSoftmaxLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
SoftmaxLayer<Dtype>::LayerSetUp(bottom, top);
// Initialize CUDNN.
CUDNN_CHECK(cudnnCreate(&handle_));
template <typename Dtype>
void CuDNNSoftmaxLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
SoftmaxLayer<Dtype>::Reshape(bottom, top);
int N = bottom[0]->num();
int K = bottom[0]->channels();
template <typename Dtype>
void CuDNNSoftmaxLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
- Dtype* top_data = (*top)[0]->mutable_gpu_data();
+ Dtype* top_data = top[0]->mutable_gpu_data();
CUDNN_CHECK(cudnnSoftmaxForward(handle_, CUDNN_SOFTMAX_ACCURATE,
CUDNN_SOFTMAX_MODE_CHANNEL,
bottom_desc_, bottom_data, top_desc_, top_data));
template <typename Dtype>
void CuDNNSoftmaxLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* top_data = top[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
- const Dtype* bottom_data = (*bottom)[0]->gpu_data();
- Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
+ const Dtype* bottom_data = bottom[0]->gpu_data();
+ Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
CUDNN_CHECK(cudnnSoftmaxBackward(handle_, CUDNN_SOFTMAX_ACCURATE,
CUDNN_SOFTMAX_MODE_CHANNEL,
top_desc_, top_data, top_desc_, top_diff, bottom_desc_, bottom_diff));
template <typename Dtype>
void CuDNNTanHLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
TanHLayer<Dtype>::LayerSetUp(bottom, top);
// initialize cuDNN
CUDNN_CHECK(cudnnCreate(&handle_));
template <typename Dtype>
void CuDNNTanHLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
TanHLayer<Dtype>::Reshape(bottom, top);
const int N = bottom[0]->num();
const int K = bottom[0]->channels();
template <typename Dtype>
void CuDNNTanHLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
- Dtype* top_data = (*top)[0]->mutable_gpu_data();
+ Dtype* top_data = top[0]->mutable_gpu_data();
CUDNN_CHECK(cudnnActivationForward(this->handle_,
CUDNN_ACTIVATION_TANH,
this->bottom_desc_, bottom_data, this->top_desc_, top_data));
template <typename Dtype>
void CuDNNTanHLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
- vector<Blob<Dtype>*>* bottom) {
+ const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* top_data = top[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
- const Dtype* bottom_data = (*bottom)[0]->gpu_data();
- Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
+ const Dtype* bottom_data = bottom[0]->gpu_data();
+ Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
CUDNN_CHECK(cudnnActivationBackward(this->handle_,
CUDNN_ACTIVATION_TANH,
this->top_desc_, top_data, this->top_desc_, top_diff,
template <typename Dtype>
void DataLayer<Dtype>::DataLayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
// Initialize DB
switch (this->layer_param_.data_param().backend()) {
case DataParameter_DB_LEVELDB:
// image
int crop_size = this->layer_param_.transform_param().crop_size();
if (crop_size > 0) {
- (*top)[0]->Reshape(this->layer_param_.data_param().batch_size(),
+ top[0]->Reshape(this->layer_param_.data_param().batch_size(),
datum.channels(), crop_size, crop_size);
this->prefetch_data_.Reshape(this->layer_param_.data_param().batch_size(),
datum.channels(), crop_size, crop_size);
} else {
- (*top)[0]->Reshape(
+ top[0]->Reshape(
this->layer_param_.data_param().batch_size(), datum.channels(),
datum.height(), datum.width());
this->prefetch_data_.Reshape(this->layer_param_.data_param().batch_size(),
datum.channels(), datum.height(), datum.width());
}
- LOG(INFO) << "output data size: " << (*top)[0]->num() << ","
- << (*top)[0]->channels() << "," << (*top)[0]->height() << ","
- << (*top)[0]->width();
+ LOG(INFO) << "output data size: " << top[0]->num() << ","
+ << top[0]->channels() << "," << top[0]->height() << ","
+ << top[0]->width();
// label
if (this->output_labels_) {
- (*top)[1]->Reshape(this->layer_param_.data_param().batch_size(), 1, 1, 1);
+ top[1]->Reshape(this->layer_param_.data_param().batch_size(), 1, 1, 1);
this->prefetch_label_.Reshape(this->layer_param_.data_param().batch_size(),
1, 1, 1);
}
template <typename Dtype>
void DropoutLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
NeuronLayer<Dtype>::LayerSetUp(bottom, top);
threshold_ = this->layer_param_.dropout_param().dropout_ratio();
DCHECK(threshold_ > 0.);
template <typename Dtype>
void DropoutLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
NeuronLayer<Dtype>::Reshape(bottom, top);
// Set up the cache for random number generation
rand_vec_.Reshape(bottom[0]->num(), bottom[0]->channels(),
template <typename Dtype>
void DropoutLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->cpu_data();
- Dtype* top_data = (*top)[0]->mutable_cpu_data();
+ Dtype* top_data = top[0]->mutable_cpu_data();
unsigned int* mask = rand_vec_.mutable_cpu_data();
const int count = bottom[0]->count();
if (Caffe::phase() == Caffe::TRAIN) {
template <typename Dtype>
void DropoutLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
- vector<Blob<Dtype>*>* bottom) {
+ const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->cpu_diff();
- Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff();
+ Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
if (Caffe::phase() == Caffe::TRAIN) {
const unsigned int* mask = rand_vec_.cpu_data();
- const int count = (*bottom)[0]->count();
+ const int count = bottom[0]->count();
for (int i = 0; i < count; ++i) {
bottom_diff[i] = top_diff[i] * mask[i] * scale_;
}
template <typename Dtype>
void DropoutLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
- Dtype* top_data = (*top)[0]->mutable_gpu_data();
+ Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
if (Caffe::phase() == Caffe::TRAIN) {
unsigned int* mask =
template <typename Dtype>
void DropoutLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
- vector<Blob<Dtype>*>* bottom) {
+ const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
- Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
+ Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
if (Caffe::phase() == Caffe::TRAIN) {
const unsigned int* mask =
static_cast<const unsigned int*>(rand_vec_.gpu_data());
- const int count = (*bottom)[0]->count();
+ const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
DropoutBackward<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(
template <typename Dtype>
void DummyDataLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
- const int num_top = top->size();
+ const vector<Blob<Dtype>*>& top) {
+ const int num_top = top.size();
const DummyDataParameter& param = this->layer_param_.dummy_data_param();
const int num_data_filler = param.data_filler_size();
CHECK(num_data_filler == 0 || num_data_filler == 1 ||
(param.height_size() == 1) ? param.height(0) : param.height(i);
const int width =
(param.width_size() == 1) ? param.width(0) : param.width(i);
- (*top)[i]->Reshape(num, channels, height, width);
+ top[i]->Reshape(num, channels, height, width);
}
// Run Forward once, with refill_ inverted, to fill the constant Blobs.
this->Forward(bottom, top);
template <typename Dtype>
void DummyDataLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
- for (int i = 0; i < top->size(); ++i) {
+ const vector<Blob<Dtype>*>& top) {
+ for (int i = 0; i < top.size(); ++i) {
const int filler_id = (fillers_.size() > 1) ? i : 0;
if (refill_[filler_id]) {
- fillers_[filler_id]->Fill((*top)[i]);
+ fillers_[filler_id]->Fill(top[i]);
}
}
}
template <typename Dtype>
void EltwiseLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
CHECK(this->layer_param().eltwise_param().coeff_size() == 0
|| this->layer_param().eltwise_param().coeff_size() == bottom.size()) <<
"Eltwise Layer takes one coefficient per bottom blob.";
template <typename Dtype>
void EltwiseLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
const int num = bottom[0]->num();
const int channels = bottom[0]->channels();
const int height = bottom[0]->height();
CHECK_EQ(height, bottom[i]->height());
CHECK_EQ(width, bottom[i]->width());
}
- (*top)[0]->Reshape(num, channels, height, width);
+ top[0]->Reshape(num, channels, height, width);
// If max operation, we will initialize the vector index part.
if (this->layer_param_.eltwise_param().operation() ==
- EltwiseParameter_EltwiseOp_MAX && top->size() == 1) {
+ EltwiseParameter_EltwiseOp_MAX && top.size() == 1) {
max_idx_.Reshape(bottom[0]->num(), channels, height, width);
}
}
template <typename Dtype>
void EltwiseLayer<Dtype>::Forward_cpu(
- const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
int* mask = NULL;
const Dtype* bottom_data_a = NULL;
const Dtype* bottom_data_b = NULL;
- const int count = (*top)[0]->count();
- Dtype* top_data = (*top)[0]->mutable_cpu_data();
+ const int count = top[0]->count();
+ Dtype* top_data = top[0]->mutable_cpu_data();
switch (op_) {
case EltwiseParameter_EltwiseOp_PROD:
caffe_mul(count, bottom[0]->cpu_data(), bottom[1]->cpu_data(), top_data);
template <typename Dtype>
void EltwiseLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const int* mask = NULL;
const int count = top[0]->count();
const Dtype* top_data = top[0]->cpu_data();
const Dtype* top_diff = top[0]->cpu_diff();
- for (int i = 0; i < bottom->size(); ++i) {
+ for (int i = 0; i < bottom.size(); ++i) {
if (propagate_down[i]) {
- const Dtype* bottom_data = (*bottom)[i]->cpu_data();
- Dtype* bottom_diff = (*bottom)[i]->mutable_cpu_diff();
+ const Dtype* bottom_data = bottom[i]->cpu_data();
+ Dtype* bottom_diff = bottom[i]->mutable_cpu_diff();
switch (op_) {
case EltwiseParameter_EltwiseOp_PROD:
if (stable_prod_grad_) {
bool initialized = false;
- for (int j = 0; j < bottom->size(); ++j) {
+ for (int j = 0; j < bottom.size(); ++j) {
if (i == j) { continue; }
if (!initialized) {
- caffe_copy(count, (*bottom)[j]->cpu_data(), bottom_diff);
+ caffe_copy(count, bottom[j]->cpu_data(), bottom_diff);
initialized = true;
} else {
- caffe_mul(count, (*bottom)[j]->cpu_data(), bottom_diff,
+ caffe_mul(count, bottom[j]->cpu_data(), bottom_diff,
bottom_diff);
}
}
template <typename Dtype>
void EltwiseLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
int* mask = NULL;
- const int count = (*top)[0]->count();
- Dtype* top_data = (*top)[0]->mutable_gpu_data();
+ const int count = top[0]->count();
+ Dtype* top_data = top[0]->mutable_gpu_data();
switch (op_) {
case EltwiseParameter_EltwiseOp_PROD:
caffe_gpu_mul(count, bottom[0]->gpu_data(), bottom[1]->gpu_data(),
template <typename Dtype>
void EltwiseLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const int* mask = NULL;
const int count = top[0]->count();
const Dtype* top_data = top[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
- for (int i = 0; i < bottom->size(); ++i) {
+ for (int i = 0; i < bottom.size(); ++i) {
if (propagate_down[i]) {
- const Dtype* bottom_data = (*bottom)[i]->gpu_data();
- Dtype* bottom_diff = (*bottom)[i]->mutable_gpu_diff();
+ const Dtype* bottom_data = bottom[i]->gpu_data();
+ Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
switch (op_) {
case EltwiseParameter_EltwiseOp_PROD:
if (stable_prod_grad_) {
bool initialized = false;
- for (int j = 0; j < bottom->size(); ++j) {
+ for (int j = 0; j < bottom.size(); ++j) {
if (i == j) { continue; }
if (!initialized) {
- caffe_copy(count, (*bottom)[j]->gpu_data(), bottom_diff);
+ caffe_copy(count, bottom[j]->gpu_data(), bottom_diff);
initialized = true;
} else {
- caffe_gpu_mul(count, (*bottom)[j]->gpu_data(), bottom_diff,
+ caffe_gpu_mul(count, bottom[j]->gpu_data(), bottom_diff,
bottom_diff);
}
}
template <typename Dtype>
void EuclideanLossLayer<Dtype>::Reshape(
- const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
LossLayer<Dtype>::Reshape(bottom, top);
CHECK_EQ(bottom[0]->channels(), bottom[1]->channels());
CHECK_EQ(bottom[0]->height(), bottom[1]->height());
template <typename Dtype>
void EuclideanLossLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
int count = bottom[0]->count();
caffe_sub(
count,
diff_.mutable_cpu_data());
Dtype dot = caffe_cpu_dot(count, diff_.cpu_data(), diff_.cpu_data());
Dtype loss = dot / bottom[0]->num() / Dtype(2);
- (*top)[0]->mutable_cpu_data()[0] = loss;
+ top[0]->mutable_cpu_data()[0] = loss;
}
template <typename Dtype>
void EuclideanLossLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
for (int i = 0; i < 2; ++i) {
if (propagate_down[i]) {
const Dtype sign = (i == 0) ? 1 : -1;
- const Dtype alpha = sign * top[0]->cpu_diff()[0] / (*bottom)[i]->num();
+ const Dtype alpha = sign * top[0]->cpu_diff()[0] / bottom[i]->num();
caffe_cpu_axpby(
- (*bottom)[i]->count(), // count
+ bottom[i]->count(), // count
alpha, // alpha
diff_.cpu_data(), // a
Dtype(0), // beta
- (*bottom)[i]->mutable_cpu_diff()); // b
+ bottom[i]->mutable_cpu_diff()); // b
}
}
}
template <typename Dtype>
void EuclideanLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
int count = bottom[0]->count();
caffe_gpu_sub(
count,
Dtype dot;
caffe_gpu_dot(count, diff_.gpu_data(), diff_.gpu_data(), &dot);
Dtype loss = dot / bottom[0]->num() / Dtype(2);
- (*top)[0]->mutable_cpu_data()[0] = loss;
+ top[0]->mutable_cpu_data()[0] = loss;
}
template <typename Dtype>
void EuclideanLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
for (int i = 0; i < 2; ++i) {
if (propagate_down[i]) {
const Dtype sign = (i == 0) ? 1 : -1;
- const Dtype alpha = sign * top[0]->cpu_diff()[0] / (*bottom)[i]->num();
+ const Dtype alpha = sign * top[0]->cpu_diff()[0] / bottom[i]->num();
caffe_gpu_axpby(
- (*bottom)[i]->count(), // count
+ bottom[i]->count(), // count
alpha, // alpha
diff_.gpu_data(), // a
Dtype(0), // beta
- (*bottom)[i]->mutable_gpu_diff()); // b
+ bottom[i]->mutable_gpu_diff()); // b
}
}
}
template <typename Dtype>
void FlattenLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
int channels_out = bottom[0]->channels() * bottom[0]->height()
* bottom[0]->width();
- (*top)[0]->Reshape(bottom[0]->num(), channels_out, 1, 1);
+ top[0]->Reshape(bottom[0]->num(), channels_out, 1, 1);
count_ = bottom[0]->num() * channels_out;
CHECK_EQ(count_, bottom[0]->count());
- CHECK_EQ(count_, (*top)[0]->count());
+ CHECK_EQ(count_, top[0]->count());
}
template <typename Dtype>
void FlattenLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
- (*top)[0]->ShareData(*bottom[0]);
+ const vector<Blob<Dtype>*>& top) {
+ top[0]->ShareData(*bottom[0]);
}
template <typename Dtype>
void FlattenLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {
- (*bottom)[0]->ShareDiff(*top[0]);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
+ bottom[0]->ShareDiff(*top[0]);
}
#ifdef CPU_ONLY
template <typename Dtype>
void FlattenLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
- (*top)[0]->ShareData(*bottom[0]);
+ const vector<Blob<Dtype>*>& top) {
+ top[0]->ShareData(*bottom[0]);
}
template <typename Dtype>
void FlattenLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {
- (*bottom)[0]->ShareDiff(*top[0]);
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
+ bottom[0]->ShareDiff(*top[0]);
}
INSTANTIATE_CLASS(FlattenLayer);
template <typename Dtype>
void HDF5DataLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
// Read the source to parse the filenames.
const string& source = this->layer_param_.hdf5_data_param().source();
LOG(INFO) << "Loading filename from " << source;
// Reshape blobs.
const int batch_size = this->layer_param_.hdf5_data_param().batch_size();
- (*top)[0]->Reshape(batch_size, data_blob_.channels(),
+ top[0]->Reshape(batch_size, data_blob_.channels(),
data_blob_.width(), data_blob_.height());
- (*top)[1]->Reshape(batch_size, label_blob_.channels(),
+ top[1]->Reshape(batch_size, label_blob_.channels(),
label_blob_.width(), label_blob_.height());
- LOG(INFO) << "output data size: " << (*top)[0]->num() << ","
- << (*top)[0]->channels() << "," << (*top)[0]->height() << ","
- << (*top)[0]->width();
+ LOG(INFO) << "output data size: " << top[0]->num() << ","
+ << top[0]->channels() << "," << top[0]->height() << ","
+ << top[0]->width();
}
template <typename Dtype>
void HDF5DataLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
const int batch_size = this->layer_param_.hdf5_data_param().batch_size();
- const int data_count = (*top)[0]->count() / (*top)[0]->num();
- const int label_data_count = (*top)[1]->count() / (*top)[1]->num();
+ const int data_count = top[0]->count() / top[0]->num();
+ const int label_data_count = top[1]->count() / top[1]->num();
for (int i = 0; i < batch_size; ++i, ++current_row_) {
if (current_row_ == data_blob_.num()) {
current_row_ = 0;
}
caffe_copy(data_count, &data_blob_.cpu_data()[current_row_ * data_count],
- &(*top)[0]->mutable_cpu_data()[i * data_count]);
+ &top[0]->mutable_cpu_data()[i * data_count]);
caffe_copy(label_data_count,
&label_blob_.cpu_data()[current_row_ * label_data_count],
- &(*top)[1]->mutable_cpu_data()[i * label_data_count]);
+ &top[1]->mutable_cpu_data()[i * label_data_count]);
}
}
template <typename Dtype>
void HDF5DataLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
const int batch_size = this->layer_param_.hdf5_data_param().batch_size();
- const int data_count = (*top)[0]->count() / (*top)[0]->num();
- const int label_data_count = (*top)[1]->count() / (*top)[1]->num();
+ const int data_count = top[0]->count() / top[0]->num();
+ const int label_data_count = top[1]->count() / top[1]->num();
for (int i = 0; i < batch_size; ++i, ++current_row_) {
if (current_row_ == data_blob_.num()) {
}
caffe_copy(data_count,
&data_blob_.cpu_data()[current_row_ * data_count],
- &(*top)[0]->mutable_gpu_data()[i * data_count]);
+ &top[0]->mutable_gpu_data()[i * data_count]);
caffe_copy(label_data_count,
&label_blob_.cpu_data()[current_row_ * label_data_count],
- &(*top)[1]->mutable_gpu_data()[i * label_data_count]);
+ &top[1]->mutable_gpu_data()[i * label_data_count]);
}
}
template <typename Dtype>
void HDF5OutputLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
CHECK_GE(bottom.size(), 2);
CHECK_EQ(bottom[0]->num(), bottom[1]->num());
data_blob_.Reshape(bottom[0]->num(), bottom[0]->channels(),
template <typename Dtype>
void HDF5OutputLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
return;
}
template <typename Dtype>
void HDF5OutputLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
CHECK_GE(bottom.size(), 2);
CHECK_EQ(bottom[0]->num(), bottom[1]->num());
data_blob_.Reshape(bottom[0]->num(), bottom[0]->channels(),
template <typename Dtype>
void HDF5OutputLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
return;
}
template <typename Dtype>
void HingeLossLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->cpu_data();
Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
const Dtype* label = bottom[1]->cpu_data();
Dtype(0), 1 + bottom_diff[i * dim + j]);
}
}
- Dtype* loss = (*top)[0]->mutable_cpu_data();
+ Dtype* loss = top[0]->mutable_cpu_data();
switch (this->layer_param_.hinge_loss_param().norm()) {
case HingeLossParameter_Norm_L1:
loss[0] = caffe_cpu_asum(count, bottom_diff) / num;
template <typename Dtype>
void HingeLossLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type_name()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
- Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff();
- const Dtype* label = (*bottom)[1]->cpu_data();
- int num = (*bottom)[0]->num();
- int count = (*bottom)[0]->count();
+ Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
+ const Dtype* label = bottom[1]->cpu_data();
+ int num = bottom[0]->num();
+ int count = bottom[0]->count();
int dim = count / num;
for (int i = 0; i < num; ++i) {
template <typename Dtype>
void Im2colLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
ConvolutionParameter conv_param = this->layer_param_.convolution_param();
CHECK(!conv_param.has_kernel_size() !=
!(conv_param.has_kernel_h() && conv_param.has_kernel_w()))
template <typename Dtype>
void Im2colLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
channels_ = bottom[0]->channels();
height_ = bottom[0]->height();
width_ = bottom[0]->width();
- (*top)[0]->Reshape(
+ top[0]->Reshape(
bottom[0]->num(), channels_ * kernel_h_ * kernel_w_,
(height_ + 2 * pad_h_ - kernel_h_) / stride_h_ + 1,
(width_ + 2 * pad_w_ - kernel_w_) / stride_w_ + 1);
template <typename Dtype>
void Im2colLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->cpu_data();
- Dtype* top_data = (*top)[0]->mutable_cpu_data();
+ Dtype* top_data = top[0]->mutable_cpu_data();
for (int n = 0; n < bottom[0]->num(); ++n) {
im2col_cpu(bottom_data + bottom[0]->offset(n), channels_, height_,
width_, kernel_h_, kernel_w_, pad_h_, pad_w_,
- stride_h_, stride_w_, top_data + (*top)[0]->offset(n));
+ stride_h_, stride_w_, top_data + top[0]->offset(n));
}
}
template <typename Dtype>
void Im2colLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->cpu_diff();
- Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff();
+ Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
for (int n = 0; n < top[0]->num(); ++n) {
col2im_cpu(top_diff + top[0]->offset(n), channels_, height_, width_,
kernel_h_, kernel_w_, pad_h_, pad_w_,
- stride_h_, stride_w_, bottom_diff + (*bottom)[0]->offset(n));
+ stride_h_, stride_w_, bottom_diff + bottom[0]->offset(n));
}
}
template <typename Dtype>
void Im2colLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
- Dtype* top_data = (*top)[0]->mutable_gpu_data();
+ Dtype* top_data = top[0]->mutable_gpu_data();
for (int n = 0; n < bottom[0]->num(); ++n) {
im2col_gpu(bottom_data + bottom[0]->offset(n), channels_, height_,
width_, kernel_h_, kernel_w_, pad_h_, pad_w_,
- stride_h_, stride_w_, top_data + (*top)[0]->offset(n));
+ stride_h_, stride_w_, top_data + top[0]->offset(n));
}
}
template <typename Dtype>
void Im2colLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
- Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
+ Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
for (int n = 0; n < top[0]->num(); ++n) {
col2im_gpu(top_diff + top[0]->offset(n), channels_, height_, width_,
kernel_h_, kernel_w_, pad_h_, pad_w_,
- stride_h_, stride_w_, bottom_diff + (*bottom)[0]->offset(n));
+ stride_h_, stride_w_, bottom_diff + bottom[0]->offset(n));
}
}
template <typename Dtype>
void ImageDataLayer<Dtype>::DataLayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
const int new_height = this->layer_param_.image_data_param().new_height();
const int new_width = this->layer_param_.image_data_param().new_width();
CHECK((new_height == 0 && new_width == 0) ||
const int crop_size = this->layer_param_.transform_param().crop_size();
const int batch_size = this->layer_param_.image_data_param().batch_size();
if (crop_size > 0) {
- (*top)[0]->Reshape(batch_size, datum.channels(), crop_size, crop_size);
+ top[0]->Reshape(batch_size, datum.channels(), crop_size, crop_size);
this->prefetch_data_.Reshape(batch_size, datum.channels(), crop_size,
crop_size);
} else {
- (*top)[0]->Reshape(batch_size, datum.channels(), datum.height(),
+ top[0]->Reshape(batch_size, datum.channels(), datum.height(),
datum.width());
this->prefetch_data_.Reshape(batch_size, datum.channels(), datum.height(),
datum.width());
}
- LOG(INFO) << "output data size: " << (*top)[0]->num() << ","
- << (*top)[0]->channels() << "," << (*top)[0]->height() << ","
- << (*top)[0]->width();
+ LOG(INFO) << "output data size: " << top[0]->num() << ","
+ << top[0]->channels() << "," << top[0]->height() << ","
+ << top[0]->width();
// label
- (*top)[1]->Reshape(batch_size, 1, 1, 1);
+ top[1]->Reshape(batch_size, 1, 1, 1);
this->prefetch_label_.Reshape(batch_size, 1, 1, 1);
// datum size
this->datum_channels_ = datum.channels();
template <typename Dtype>
void InfogainLossLayer<Dtype>::LayerSetUp(
- const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
LossLayer<Dtype>::LayerSetUp(bottom, top);
if (bottom.size() < 3) {
CHECK(this->layer_param_.infogain_loss_param().has_source())
template <typename Dtype>
void InfogainLossLayer<Dtype>::Reshape(
- const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
LossLayer<Dtype>::Reshape(bottom, top);
Blob<Dtype>* infogain = NULL;
if (bottom.size() < 3) {
template <typename Dtype>
void InfogainLossLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->cpu_data();
const Dtype* bottom_label = bottom[1]->cpu_data();
const Dtype* infogain_mat = NULL;
loss -= infogain_mat[label * dim + j] * log(prob);
}
}
- (*top)[0]->mutable_cpu_data()[0] = loss / num;
+ top[0]->mutable_cpu_data()[0] = loss / num;
}
template <typename Dtype>
void InfogainLossLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
- vector<Blob<Dtype>*>* bottom) {
+ const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type_name()
<< " Layer cannot backpropagate to label inputs.";
<< " Layer cannot backpropagate to infogain inputs.";
}
if (propagate_down[0]) {
- const Dtype* bottom_data = (*bottom)[0]->cpu_data();
- const Dtype* bottom_label = (*bottom)[1]->cpu_data();
+ const Dtype* bottom_data = bottom[0]->cpu_data();
+ const Dtype* bottom_label = bottom[1]->cpu_data();
const Dtype* infogain_mat = NULL;
- if (bottom->size() < 3) {
+ if (bottom.size() < 3) {
infogain_mat = infogain_.cpu_data();
} else {
- infogain_mat = (*bottom)[2]->cpu_data();
+ infogain_mat = bottom[2]->cpu_data();
}
- Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff();
- int num = (*bottom)[0]->num();
- int dim = (*bottom)[0]->count() / (*bottom)[0]->num();
+ Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
+ int num = bottom[0]->num();
+ int dim = bottom[0]->count() / bottom[0]->num();
const Dtype scale = - top[0]->cpu_diff()[0] / num;
for (int i = 0; i < num; ++i) {
const int label = static_cast<int>(bottom_label[i]);
template <typename Dtype>
void InnerProductLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
const int num_output = this->layer_param_.inner_product_param().num_output();
bias_term_ = this->layer_param_.inner_product_param().bias_term();
N_ = num_output;
template <typename Dtype>
void InnerProductLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
// Figure out the dimensions
M_ = bottom[0]->num();
CHECK_EQ(bottom[0]->count() / bottom[0]->num(), K_) << "Input size "
"incompatible with inner product parameters.";
- (*top)[0]->Reshape(bottom[0]->num(), N_, 1, 1);
+ top[0]->Reshape(bottom[0]->num(), N_, 1, 1);
// Set up the bias multiplier
if (bias_term_) {
bias_multiplier_.Reshape(1, 1, 1, M_);
template <typename Dtype>
void InnerProductLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->cpu_data();
- Dtype* top_data = (*top)[0]->mutable_cpu_data();
+ Dtype* top_data = top[0]->mutable_cpu_data();
const Dtype* weight = this->blobs_[0]->cpu_data();
caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype)1.,
bottom_data, weight, (Dtype)0., top_data);
template <typename Dtype>
void InnerProductLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
- vector<Blob<Dtype>*>* bottom) {
+ const vector<Blob<Dtype>*>& bottom) {
if (this->param_propagate_down_[0]) {
const Dtype* top_diff = top[0]->cpu_diff();
- const Dtype* bottom_data = (*bottom)[0]->cpu_data();
+ const Dtype* bottom_data = bottom[0]->cpu_data();
// Gradient with respect to weight
caffe_cpu_gemm<Dtype>(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype)1.,
top_diff, bottom_data, (Dtype)0., this->blobs_[0]->mutable_cpu_diff());
// Gradient with respect to bottom data
caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype)1.,
top_diff, this->blobs_[0]->cpu_data(), (Dtype)0.,
- (*bottom)[0]->mutable_cpu_diff());
+ bottom[0]->mutable_cpu_diff());
}
}
template <typename Dtype>
void InnerProductLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
- Dtype* top_data = (*top)[0]->mutable_gpu_data();
+ Dtype* top_data = top[0]->mutable_gpu_data();
const Dtype* weight = this->blobs_[0]->gpu_data();
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype)1.,
bottom_data, weight, (Dtype)0., top_data);
template <typename Dtype>
void InnerProductLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
- vector<Blob<Dtype>*>* bottom) {
+ const vector<Blob<Dtype>*>& bottom) {
if (this->param_propagate_down_[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
- const Dtype* bottom_data = (*bottom)[0]->gpu_data();
+ const Dtype* bottom_data = bottom[0]->gpu_data();
// Gradient with respect to weight
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype)1.,
top_diff, bottom_data, (Dtype)0., this->blobs_[0]->mutable_gpu_diff());
// Gradient with respect to bottom data
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype)1.,
top_diff, this->blobs_[0]->gpu_data(), (Dtype)0.,
- (*bottom)[0]->mutable_gpu_diff());
+ bottom[0]->mutable_gpu_diff());
}
}
template <typename Dtype>
void LossLayer<Dtype>::LayerSetUp(
- const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
// LossLayers have a non-zero (1) loss by default.
if (this->layer_param_.loss_weight_size() == 0) {
this->layer_param_.add_loss_weight(Dtype(1));
template <typename Dtype>
void LossLayer<Dtype>::Reshape(
- const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
CHECK_EQ(bottom[0]->num(), bottom[1]->num())
<< "The data and label should have the same number.";
- (*top)[0]->Reshape(1, 1, 1, 1);
+ top[0]->Reshape(1, 1, 1, 1);
}
INSTANTIATE_CLASS(LossLayer);
template <typename Dtype>
void LRNLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
size_ = this->layer_param_.lrn_param().local_size();
CHECK_EQ(size_ % 2, 1) << "LRN only supports odd values for local_size";
pre_pad_ = (size_ - 1) / 2;
split_top_vec_.push_back(&square_input_);
LayerParameter split_param;
split_layer_.reset(new SplitLayer<Dtype>(split_param));
- split_layer_->SetUp(bottom, &split_top_vec_);
+ split_layer_->SetUp(bottom, split_top_vec_);
// Set up square_layer_ to square the inputs.
square_bottom_vec_.clear();
square_top_vec_.clear();
LayerParameter square_param;
square_param.mutable_power_param()->set_power(Dtype(2));
square_layer_.reset(new PowerLayer<Dtype>(square_param));
- square_layer_->SetUp(square_bottom_vec_, &square_top_vec_);
+ square_layer_->SetUp(square_bottom_vec_, square_top_vec_);
// Set up pool_layer_ to sum over square neighborhoods of the input.
pool_top_vec_.clear();
pool_top_vec_.push_back(&pool_output_);
pool_param.mutable_pooling_param()->set_pad(pre_pad_);
pool_param.mutable_pooling_param()->set_kernel_size(size_);
pool_layer_.reset(new PoolingLayer<Dtype>(pool_param));
- pool_layer_->SetUp(square_top_vec_, &pool_top_vec_);
+ pool_layer_->SetUp(square_top_vec_, pool_top_vec_);
// Set up power_layer_ to compute (1 + alpha_/N^2 s)^-beta_, where s is
// the sum of a squared neighborhood (the output of pool_layer_).
power_top_vec_.clear();
power_param.mutable_power_param()->set_scale(alpha_);
power_param.mutable_power_param()->set_shift(Dtype(1));
power_layer_.reset(new PowerLayer<Dtype>(power_param));
- power_layer_->SetUp(pool_top_vec_, &power_top_vec_);
+ power_layer_->SetUp(pool_top_vec_, power_top_vec_);
// Set up a product_layer_ to compute outputs by multiplying inputs by the
// inverse demoninator computed by the power layer.
product_bottom_vec_.clear();
template <typename Dtype>
void LRNLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
num_ = bottom[0]->num();
channels_ = bottom[0]->channels();
height_ = bottom[0]->height();
width_ = bottom[0]->width();
switch (this->layer_param_.lrn_param().norm_region()) {
case LRNParameter_NormRegion_ACROSS_CHANNELS:
- (*top)[0]->Reshape(num_, channels_, height_, width_);
+ top[0]->Reshape(num_, channels_, height_, width_);
scale_.Reshape(num_, channels_, height_, width_);
break;
case LRNParameter_NormRegion_WITHIN_CHANNEL:
- split_layer_->Reshape(bottom, &split_top_vec_);
- square_layer_->Reshape(square_bottom_vec_, &square_top_vec_);
- pool_layer_->Reshape(square_top_vec_, &pool_top_vec_);
- power_layer_->Reshape(pool_top_vec_, &power_top_vec_);
+ split_layer_->Reshape(bottom, split_top_vec_);
+ square_layer_->Reshape(square_bottom_vec_, square_top_vec_);
+ pool_layer_->Reshape(square_top_vec_, pool_top_vec_);
+ power_layer_->Reshape(pool_top_vec_, power_top_vec_);
product_layer_->Reshape(product_bottom_vec_, top);
break;
}
template <typename Dtype>
void LRNLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
switch (this->layer_param_.lrn_param().norm_region()) {
case LRNParameter_NormRegion_ACROSS_CHANNELS:
CrossChannelForward_cpu(bottom, top);
template <typename Dtype>
void LRNLayer<Dtype>::CrossChannelForward_cpu(
- const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->cpu_data();
- Dtype* top_data = (*top)[0]->mutable_cpu_data();
+ Dtype* top_data = top[0]->mutable_cpu_data();
Dtype* scale_data = scale_.mutable_cpu_data();
// start with the constant value
for (int i = 0; i < scale_.count(); ++i) {
template <typename Dtype>
void LRNLayer<Dtype>::WithinChannelForward(
- const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) {
- split_layer_->Forward(bottom, &split_top_vec_);
- square_layer_->Forward(square_bottom_vec_, &square_top_vec_);
- pool_layer_->Forward(square_top_vec_, &pool_top_vec_);
- power_layer_->Forward(pool_top_vec_, &power_top_vec_);
+ const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
+ split_layer_->Forward(bottom, split_top_vec_);
+ square_layer_->Forward(square_bottom_vec_, square_top_vec_);
+ pool_layer_->Forward(square_top_vec_, pool_top_vec_);
+ power_layer_->Forward(pool_top_vec_, power_top_vec_);
product_layer_->Forward(product_bottom_vec_, top);
}
template <typename Dtype>
void LRNLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
switch (this->layer_param_.lrn_param().norm_region()) {
case LRNParameter_NormRegion_ACROSS_CHANNELS:
CrossChannelBackward_cpu(top, propagate_down, bottom);
template <typename Dtype>
void LRNLayer<Dtype>::CrossChannelBackward_cpu(
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
- vector<Blob<Dtype>*>* bottom) {
+ const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->cpu_diff();
const Dtype* top_data = top[0]->cpu_data();
- const Dtype* bottom_data = (*bottom)[0]->cpu_data();
+ const Dtype* bottom_data = bottom[0]->cpu_data();
const Dtype* scale_data = scale_.cpu_data();
- Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff();
+ Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
Blob<Dtype> padded_ratio(1, channels_ + size_ - 1, height_, width_);
Blob<Dtype> accum_ratio(1, 1, height_, width_);
Dtype* padded_ratio_data = padded_ratio.mutable_cpu_data();
template <typename Dtype>
void LRNLayer<Dtype>::WithinChannelBackward(
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
- vector<Blob<Dtype>*>* bottom) {
+ const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
vector<bool> product_propagate_down(2, true);
- product_layer_->Backward(top, product_propagate_down, &product_bottom_vec_);
- power_layer_->Backward(power_top_vec_, propagate_down, &pool_top_vec_);
- pool_layer_->Backward(pool_top_vec_, propagate_down, &square_top_vec_);
+ product_layer_->Backward(top, product_propagate_down, product_bottom_vec_);
+ power_layer_->Backward(power_top_vec_, propagate_down, pool_top_vec_);
+ pool_layer_->Backward(pool_top_vec_, propagate_down, square_top_vec_);
square_layer_->Backward(square_top_vec_, propagate_down,
- &square_bottom_vec_);
+ square_bottom_vec_);
split_layer_->Backward(split_top_vec_, propagate_down, bottom);
}
}
template <typename Dtype>
void LRNLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
switch (this->layer_param_.lrn_param().norm_region()) {
case LRNParameter_NormRegion_ACROSS_CHANNELS:
CrossChannelForward_gpu(bottom, top);
template <typename Dtype>
void LRNLayer<Dtype>::CrossChannelForward_gpu(
- const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
// First, compute scale
const Dtype* bottom_data = bottom[0]->gpu_data();
- Dtype* top_data = (*top)[0]->mutable_gpu_data();
+ Dtype* top_data = top[0]->mutable_gpu_data();
Dtype* scale_data = scale_.mutable_gpu_data();
// We will launch one kernel for each pixel location, and have the kernel
// go through all the channels.
template <typename Dtype>
void LRNLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
switch (this->layer_param_.lrn_param().norm_region()) {
case LRNParameter_NormRegion_ACROSS_CHANNELS:
CrossChannelBackward_gpu(top, propagate_down, bottom);
template <typename Dtype>
void LRNLayer<Dtype>::CrossChannelBackward_gpu(
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
- vector<Blob<Dtype>*>* bottom) {
+ const vector<Blob<Dtype>*>& bottom) {
int n_threads = num_ * height_ * width_;
// NOLINT_NEXT_LINE(whitespace/operators)
LRNComputeDiff<<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS>>>(
- n_threads, (*bottom)[0]->gpu_data(), top[0]->gpu_data(),
+ n_threads, bottom[0]->gpu_data(), top[0]->gpu_data(),
scale_.gpu_data(), top[0]->gpu_diff(), num_, channels_, height_, width_,
size_, -beta_, Dtype(2. * alpha_ * beta_ / size_),
- (*bottom)[0]->mutable_gpu_diff());
+ bottom[0]->mutable_gpu_diff());
}
template <typename Dtype>
void MemoryDataLayer<Dtype>::DataLayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
batch_size_ = this->layer_param_.memory_data_param().batch_size();
this->datum_channels_ = this->layer_param_.memory_data_param().channels();
this->datum_height_ = this->layer_param_.memory_data_param().height();
CHECK_GT(batch_size_ * this->datum_size_, 0) <<
"batch_size, channels, height, and width must be specified and"
" positive in memory_data_param";
- (*top)[0]->Reshape(batch_size_, this->datum_channels_, this->datum_height_,
+ top[0]->Reshape(batch_size_, this->datum_channels_, this->datum_height_,
this->datum_width_);
- (*top)[1]->Reshape(batch_size_, 1, 1, 1);
+ top[1]->Reshape(batch_size_, 1, 1, 1);
added_data_.Reshape(batch_size_, this->datum_channels_, this->datum_height_,
this->datum_width_);
added_label_.Reshape(batch_size_, 1, 1, 1);
template <typename Dtype>
void MemoryDataLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
CHECK(data_) << "MemoryDataLayer needs to be initalized by calling Reset";
- (*top)[0]->set_cpu_data(data_ + pos_ * this->datum_size_);
- (*top)[1]->set_cpu_data(labels_ + pos_);
+ top[0]->set_cpu_data(data_ + pos_ * this->datum_size_);
+ top[1]->set_cpu_data(labels_ + pos_);
pos_ = (pos_ + batch_size_) % n_;
has_new_data_ = false;
}
template <typename Dtype>
void MultinomialLogisticLossLayer<Dtype>::Reshape(
- const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
LossLayer<Dtype>::Reshape(bottom, top);
CHECK_EQ(bottom[1]->channels(), 1);
CHECK_EQ(bottom[1]->height(), 1);
template <typename Dtype>
void MultinomialLogisticLossLayer<Dtype>::Forward_cpu(
- const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->cpu_data();
const Dtype* bottom_label = bottom[1]->cpu_data();
int num = bottom[0]->num();
bottom_data[i * dim + label], Dtype(kLOG_THRESHOLD));
loss -= log(prob);
}
- (*top)[0]->mutable_cpu_data()[0] = loss / num;
+ top[0]->mutable_cpu_data()[0] = loss / num;
}
template <typename Dtype>
void MultinomialLogisticLossLayer<Dtype>::Backward_cpu(
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
- vector<Blob<Dtype>*>* bottom) {
+ const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type_name()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
- const Dtype* bottom_data = (*bottom)[0]->cpu_data();
- const Dtype* bottom_label = (*bottom)[1]->cpu_data();
- Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff();
- int num = (*bottom)[0]->num();
- int dim = (*bottom)[0]->count() / (*bottom)[0]->num();
- caffe_set((*bottom)[0]->count(), Dtype(0), bottom_diff);
+ const Dtype* bottom_data = bottom[0]->cpu_data();
+ const Dtype* bottom_label = bottom[1]->cpu_data();
+ Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
+ int num = bottom[0]->num();
+ int dim = bottom[0]->count() / bottom[0]->num();
+ caffe_set(bottom[0]->count(), Dtype(0), bottom_diff);
const Dtype scale = - top[0]->cpu_diff()[0] / num;
for (int i = 0; i < num; ++i) {
int label = static_cast<int>(bottom_label[i]);
template <typename Dtype>
void MVNLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
- (*top)[0]->Reshape(bottom[0]->num(), bottom[0]->channels(),
+ const vector<Blob<Dtype>*>& top) {
+ top[0]->Reshape(bottom[0]->num(), bottom[0]->channels(),
bottom[0]->height(), bottom[0]->width());
mean_.Reshape(bottom[0]->num(), bottom[0]->channels(),
1, 1);
template <typename Dtype>
void MVNLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->cpu_data();
- Dtype* top_data = (*top)[0]->mutable_cpu_data();
+ Dtype* top_data = top[0]->mutable_cpu_data();
int num;
if (this->layer_param_.mvn_param().across_channels())
num = bottom[0]->num();
template <typename Dtype>
void MVNLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
- vector<Blob<Dtype>*>* bottom) {
+ const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->cpu_diff();
const Dtype* top_data = top[0]->cpu_data();
- const Dtype* bottom_data = (*bottom)[0]->cpu_data();
- Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff();
+ const Dtype* bottom_data = bottom[0]->cpu_data();
+ Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
int num;
if (this->layer_param_.mvn_param().across_channels())
- num = (*bottom)[0]->num();
+ num = bottom[0]->num();
else
- num = (*bottom)[0]->num() * (*bottom)[0]->channels();
+ num = bottom[0]->num() * bottom[0]->channels();
- int dim = (*bottom)[0]->count() / num;
+ int dim = bottom[0]->count() / num;
Dtype eps = 1e-10;
if (this->layer_param_.mvn_param().normalize_variance()) {
template <typename Dtype>
void MVNLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
- Dtype* top_data = (*top)[0]->mutable_gpu_data();
+ Dtype* top_data = top[0]->mutable_gpu_data();
int num;
if (this->layer_param_.mvn_param().across_channels())
num = bottom[0]->num();
template <typename Dtype>
void MVNLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
- vector<Blob<Dtype>*>* bottom) {
+ const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* top_data = top[0]->gpu_data();
- const Dtype* bottom_data = (*bottom)[0]->gpu_data();
- Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
+ const Dtype* bottom_data = bottom[0]->gpu_data();
+ Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
int num;
if (this->layer_param_.mvn_param().across_channels())
- num = (*bottom)[0]->num();
+ num = bottom[0]->num();
else
- num = (*bottom)[0]->num() * (*bottom)[0]->channels();
+ num = bottom[0]->num() * bottom[0]->channels();
- int dim = (*bottom)[0]->count() / num;
+ int dim = bottom[0]->count() / num;
Dtype eps = 1e-10;
template <typename Dtype>
void NeuronLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
- (*top)[0]->ReshapeLike(*bottom[0]);
+ const vector<Blob<Dtype>*>& top) {
+ top[0]->ReshapeLike(*bottom[0]);
}
INSTANTIATE_CLASS(NeuronLayer);
template <typename Dtype>
void PoolingLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
PoolingParameter pool_param = this->layer_param_.pooling_param();
CHECK(!pool_param.has_kernel_size() !=
!(pool_param.has_kernel_h() && pool_param.has_kernel_w()))
template <typename Dtype>
void PoolingLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
channels_ = bottom[0]->channels();
height_ = bottom[0]->height();
width_ = bottom[0]->width();
CHECK_LT((pooled_height_ - 1) * stride_h_, height_ + pad_h_);
CHECK_LT((pooled_width_ - 1) * stride_w_, width_ + pad_w_);
}
- (*top)[0]->Reshape(bottom[0]->num(), channels_, pooled_height_,
+ top[0]->Reshape(bottom[0]->num(), channels_, pooled_height_,
pooled_width_);
- if (top->size() > 1) {
- (*top)[1]->ReshapeLike(*(*top)[0]);
+ if (top.size() > 1) {
+ top[1]->ReshapeLike(*top[0]);
}
// If max pooling, we will initialize the vector index part.
if (this->layer_param_.pooling_param().pool() ==
- PoolingParameter_PoolMethod_MAX && top->size() == 1) {
+ PoolingParameter_PoolMethod_MAX && top.size() == 1) {
max_idx_.Reshape(bottom[0]->num(), channels_, pooled_height_,
pooled_width_);
}
// case?
template <typename Dtype>
void PoolingLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->cpu_data();
- Dtype* top_data = (*top)[0]->mutable_cpu_data();
- const int top_count = (*top)[0]->count();
+ Dtype* top_data = top[0]->mutable_cpu_data();
+ const int top_count = top[0]->count();
// We'll output the mask to top[1] if it's of size >1.
- const bool use_top_mask = top->size() > 1;
+ const bool use_top_mask = top.size() > 1;
int* mask = NULL; // suppress warnings about uninitalized variables
Dtype* top_mask = NULL;
// Different pooling methods. We explicitly do the switch outside the for
case PoolingParameter_PoolMethod_MAX:
// Initialize
if (use_top_mask) {
- top_mask = (*top)[1]->mutable_cpu_data();
+ top_mask = top[1]->mutable_cpu_data();
caffe_set(top_count, Dtype(-1), top_mask);
} else {
mask = max_idx_.mutable_cpu_data();
}
// compute offset
bottom_data += bottom[0]->offset(0, 1);
- top_data += (*top)[0]->offset(0, 1);
+ top_data += top[0]->offset(0, 1);
if (use_top_mask) {
- top_mask += (*top)[0]->offset(0, 1);
+ top_mask += top[0]->offset(0, 1);
} else {
- mask += (*top)[0]->offset(0, 1);
+ mask += top[0]->offset(0, 1);
}
}
}
}
// compute offset
bottom_data += bottom[0]->offset(0, 1);
- top_data += (*top)[0]->offset(0, 1);
+ top_data += top[0]->offset(0, 1);
}
}
break;
template <typename Dtype>
void PoolingLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* top_diff = top[0]->cpu_diff();
- Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff();
+ Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
// Different pooling methods. We explicitly do the switch outside the for
// loop to save time, although this results in more codes.
- caffe_set((*bottom)[0]->count(), Dtype(0), bottom_diff);
+ caffe_set(bottom[0]->count(), Dtype(0), bottom_diff);
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
const int* mask = NULL; // suppress warnings about uninitialized variables
bottom_diff[bottom_index] += top_diff[index];
}
}
- bottom_diff += (*bottom)[0]->offset(0, 1);
+ bottom_diff += bottom[0]->offset(0, 1);
top_diff += top[0]->offset(0, 1);
if (use_top_mask) {
top_mask += top[0]->offset(0, 1);
}
}
// offset
- bottom_diff += (*bottom)[0]->offset(0, 1);
+ bottom_diff += bottom[0]->offset(0, 1);
top_diff += top[0]->offset(0, 1);
}
}
template <typename Dtype>
void PoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
- Dtype* top_data = (*top)[0]->mutable_gpu_data();
- int count = (*top)[0]->count();
+ Dtype* top_data = top[0]->mutable_gpu_data();
+ int count = top[0]->count();
// We'll output the mask to top[1] if it's of size >1.
- const bool use_top_mask = top->size() > 1;
+ const bool use_top_mask = top.size() > 1;
int* mask = NULL;
Dtype* top_mask = NULL;
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
- top_mask = (*top)[1]->mutable_gpu_data();
+ top_mask = top[1]->mutable_gpu_data();
} else {
mask = max_idx_.mutable_gpu_data();
}
template <typename Dtype>
void PoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* top_diff = top[0]->gpu_diff();
- Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
- const int count = (*bottom)[0]->count();
+ Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
+ const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
template <typename Dtype>
void PowerLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
NeuronLayer<Dtype>::LayerSetUp(bottom, top);
power_ = this->layer_param_.power_param().power();
scale_ = this->layer_param_.power_param().scale();
// Compute y = (shift + scale * x)^power
template <typename Dtype>
void PowerLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
- Dtype* top_data = (*top)[0]->mutable_cpu_data();
+ const vector<Blob<Dtype>*>& top) {
+ Dtype* top_data = top[0]->mutable_cpu_data();
const int count = bottom[0]->count();
// Special case where we can ignore the input: scale or power is 0.
if (diff_scale_ == Dtype(0)) {
template <typename Dtype>
void PowerLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
- vector<Blob<Dtype>*>* bottom) {
+ const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
- Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff();
- const int count = (*bottom)[0]->count();
+ Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
+ const int count = bottom[0]->count();
const Dtype* top_diff = top[0]->cpu_diff();
if (diff_scale_ == Dtype(0) || power_ == Dtype(1)) {
caffe_set(count, diff_scale_, bottom_diff);
} else {
- const Dtype* bottom_data = (*bottom)[0]->cpu_data();
+ const Dtype* bottom_data = bottom[0]->cpu_data();
// Compute dy/dx = scale * power * (shift + scale * x)^(power - 1)
// = diff_scale * y / (shift + scale * x)
if (power_ == Dtype(2)) {
template <typename Dtype>
void PowerLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
- Dtype* top_data = (*top)[0]->mutable_gpu_data();
+ const vector<Blob<Dtype>*>& top) {
+ Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
// Special case where we can ignore the input: scale or power is 0.
if (diff_scale_ == Dtype(0)) {
template <typename Dtype>
void PowerLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
- vector<Blob<Dtype>*>* bottom) {
+ const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
- Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
- const int count = (*bottom)[0]->count();
+ Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
+ const int count = bottom[0]->count();
const Dtype* top_diff = top[0]->gpu_diff();
if (diff_scale_ == Dtype(0) || power_ == Dtype(1)) {
caffe_gpu_set(count, diff_scale_, bottom_diff);
} else {
- const Dtype* bottom_data = (*bottom)[0]->gpu_data();
+ const Dtype* bottom_data = bottom[0]->gpu_data();
// Compute dy/dx = scale * power * (shift + scale * x)^(power - 1)
// = diff_scale * y / (shift + scale * x)
if (power_ == Dtype(2)) {
template <typename Dtype>
void ReLULayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->cpu_data();
- Dtype* top_data = (*top)[0]->mutable_cpu_data();
+ Dtype* top_data = top[0]->mutable_cpu_data();
const int count = bottom[0]->count();
Dtype negative_slope = this->layer_param_.relu_param().negative_slope();
for (int i = 0; i < count; ++i) {
template <typename Dtype>
void ReLULayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
- vector<Blob<Dtype>*>* bottom) {
+ const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
- const Dtype* bottom_data = (*bottom)[0]->cpu_data();
+ const Dtype* bottom_data = bottom[0]->cpu_data();
const Dtype* top_diff = top[0]->cpu_diff();
- Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff();
- const int count = (*bottom)[0]->count();
+ Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
+ const int count = bottom[0]->count();
Dtype negative_slope = this->layer_param_.relu_param().negative_slope();
for (int i = 0; i < count; ++i) {
bottom_diff[i] = top_diff[i] * ((bottom_data[i] > 0)
template <typename Dtype>
void ReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
- Dtype* top_data = (*top)[0]->mutable_gpu_data();
+ Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
Dtype negative_slope = this->layer_param_.relu_param().negative_slope();
// NOLINT_NEXT_LINE(whitespace/operators)
template <typename Dtype>
void ReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
- vector<Blob<Dtype>*>* bottom) {
+ const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
- const Dtype* bottom_data = (*bottom)[0]->gpu_data();
+ const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
- Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
- const int count = (*bottom)[0]->count();
+ Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
+ const int count = bottom[0]->count();
Dtype negative_slope = this->layer_param_.relu_param().negative_slope();
// NOLINT_NEXT_LINE(whitespace/operators)
ReLUBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
template <typename Dtype>
void SigmoidCrossEntropyLossLayer<Dtype>::LayerSetUp(
- const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
LossLayer<Dtype>::LayerSetUp(bottom, top);
sigmoid_bottom_vec_.clear();
sigmoid_bottom_vec_.push_back(bottom[0]);
sigmoid_top_vec_.clear();
sigmoid_top_vec_.push_back(sigmoid_output_.get());
- sigmoid_layer_->SetUp(sigmoid_bottom_vec_, &sigmoid_top_vec_);
+ sigmoid_layer_->SetUp(sigmoid_bottom_vec_, sigmoid_top_vec_);
}
template <typename Dtype>
void SigmoidCrossEntropyLossLayer<Dtype>::Reshape(
- const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
LossLayer<Dtype>::Reshape(bottom, top);
CHECK_EQ(bottom[0]->count(), bottom[1]->count()) <<
"SIGMOID_CROSS_ENTROPY_LOSS layer inputs must have the same count.";
- sigmoid_layer_->Reshape(sigmoid_bottom_vec_, &sigmoid_top_vec_);
+ sigmoid_layer_->Reshape(sigmoid_bottom_vec_, sigmoid_top_vec_);
}
template <typename Dtype>
void SigmoidCrossEntropyLossLayer<Dtype>::Forward_cpu(
- const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
// The forward pass computes the sigmoid outputs.
sigmoid_bottom_vec_[0] = bottom[0];
- sigmoid_layer_->Forward(sigmoid_bottom_vec_, &sigmoid_top_vec_);
+ sigmoid_layer_->Forward(sigmoid_bottom_vec_, sigmoid_top_vec_);
// Compute the loss (negative log likelihood)
const int count = bottom[0]->count();
const int num = bottom[0]->num();
loss -= input_data[i] * (target[i] - (input_data[i] >= 0)) -
log(1 + exp(input_data[i] - 2 * input_data[i] * (input_data[i] >= 0)));
}
- (*top)[0]->mutable_cpu_data()[0] = loss / num;
+ top[0]->mutable_cpu_data()[0] = loss / num;
}
template <typename Dtype>
void SigmoidCrossEntropyLossLayer<Dtype>::Backward_cpu(
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
- vector<Blob<Dtype>*>* bottom) {
+ const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type_name()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
// First, compute the diff
- const int count = (*bottom)[0]->count();
- const int num = (*bottom)[0]->num();
+ const int count = bottom[0]->count();
+ const int num = bottom[0]->num();
const Dtype* sigmoid_output_data = sigmoid_output_->cpu_data();
- const Dtype* target = (*bottom)[1]->cpu_data();
- Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff();
+ const Dtype* target = bottom[1]->cpu_data();
+ Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
caffe_sub(count, sigmoid_output_data, target, bottom_diff);
// Scale down gradient
const Dtype loss_weight = top[0]->cpu_diff()[0];
template <typename Dtype>
void SigmoidCrossEntropyLossLayer<Dtype>::Forward_gpu(
- const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
// The forward pass computes the sigmoid outputs.
sigmoid_bottom_vec_[0] = bottom[0];
- sigmoid_layer_->Forward(sigmoid_bottom_vec_, &sigmoid_top_vec_);
+ sigmoid_layer_->Forward(sigmoid_bottom_vec_, sigmoid_top_vec_);
// Compute the loss (negative log likelihood)
const int count = bottom[0]->count();
const int num = bottom[0]->num();
loss -= input_data[i] * (target[i] - (input_data[i] >= 0)) -
log(1 + exp(input_data[i] - 2 * input_data[i] * (input_data[i] >= 0)));
}
- (*top)[0]->mutable_cpu_data()[0] = loss / num;
+ top[0]->mutable_cpu_data()[0] = loss / num;
}
template <typename Dtype>
void SigmoidCrossEntropyLossLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
- vector<Blob<Dtype>*>* bottom) {
+ const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type_name()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
// First, compute the diff
- const int count = (*bottom)[0]->count();
- const int num = (*bottom)[0]->num();
+ const int count = bottom[0]->count();
+ const int num = bottom[0]->num();
const Dtype* sigmoid_output_data = sigmoid_output_->gpu_data();
- const Dtype* target = (*bottom)[1]->gpu_data();
- Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
+ const Dtype* target = bottom[1]->gpu_data();
+ Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
caffe_copy(count, sigmoid_output_data, bottom_diff);
caffe_gpu_axpy(count, Dtype(-1), target, bottom_diff);
// Scale down gradient
template <typename Dtype>
void SigmoidLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->cpu_data();
- Dtype* top_data = (*top)[0]->mutable_cpu_data();
+ Dtype* top_data = top[0]->mutable_cpu_data();
const int count = bottom[0]->count();
for (int i = 0; i < count; ++i) {
top_data[i] = sigmoid(bottom_data[i]);
template <typename Dtype>
void SigmoidLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
- vector<Blob<Dtype>*>* bottom) {
+ const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* top_data = top[0]->cpu_data();
const Dtype* top_diff = top[0]->cpu_diff();
- Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff();
- const int count = (*bottom)[0]->count();
+ Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
+ const int count = bottom[0]->count();
for (int i = 0; i < count; ++i) {
const Dtype sigmoid_x = top_data[i];
bottom_diff[i] = top_diff[i] * sigmoid_x * (1. - sigmoid_x);
template <typename Dtype>
void SigmoidLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
- Dtype* top_data = (*top)[0]->mutable_gpu_data();
+ Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
SigmoidForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
template <typename Dtype>
void SigmoidLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
- vector<Blob<Dtype>*>* bottom) {
+ const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* top_data = top[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
- Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
- const int count = (*bottom)[0]->count();
+ Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
+ const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
SigmoidBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, top_data, bottom_diff);
template <typename Dtype>
void SilenceLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {
- for (int i = 0; i < bottom->size(); ++i) {
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
+ for (int i = 0; i < bottom.size(); ++i) {
if (propagate_down[i]) {
- caffe_set((*bottom)[i]->count(), Dtype(0),
- (*bottom)[i]->mutable_cpu_data());
+ caffe_set(bottom[i]->count(), Dtype(0),
+ bottom[i]->mutable_cpu_data());
}
}
}
template <typename Dtype>
void SilenceLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
// Do nothing.
}
template <typename Dtype>
void SilenceLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {
- for (int i = 0; i < bottom->size(); ++i) {
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
+ for (int i = 0; i < bottom.size(); ++i) {
if (propagate_down[i]) {
- caffe_gpu_set((*bottom)[i]->count(), Dtype(0),
- (*bottom)[i]->mutable_gpu_data());
+ caffe_gpu_set(bottom[i]->count(), Dtype(0),
+ bottom[i]->mutable_gpu_data());
}
}
}
template <typename Dtype>
void SliceLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
const SliceParameter& slice_param = this->layer_param_.slice_param();
slice_dim_ = slice_param.slice_dim();
CHECK_GE(slice_dim_, 0);
template <typename Dtype>
void SliceLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
count_ = 0;
num_ = bottom[0]->num();
channels_ = bottom[0]->channels();
height_ = bottom[0]->height();
width_ = bottom[0]->width();
if (slice_point_.size() != 0) {
- CHECK_EQ(slice_point_.size(), top->size() - 1);
+ CHECK_EQ(slice_point_.size(), top.size() - 1);
if (slice_dim_ == 0) {
- CHECK_LE(top->size(), num_);
+ CHECK_LE(top.size(), num_);
} else {
- CHECK_LE(top->size(), channels_);
+ CHECK_LE(top.size(), channels_);
}
int prev = 0;
vector<int> slices;
}
if (slice_dim_ == 0) {
slices.push_back(num_ - prev);
- for (int i = 0; i < top->size(); ++i) {
- (*top)[i]->Reshape(slices[i], channels_, height_, width_);
- count_ += (*top)[i]->count();
+ for (int i = 0; i < top.size(); ++i) {
+ top[i]->Reshape(slices[i], channels_, height_, width_);
+ count_ += top[i]->count();
}
} else {
slices.push_back(channels_ - prev);
- for (int i = 0; i < top->size(); ++i) {
- (*top)[i]->Reshape(num_, slices[i], height_, width_);
- count_ += (*top)[i]->count();
+ for (int i = 0; i < top.size(); ++i) {
+ top[i]->Reshape(num_, slices[i], height_, width_);
+ count_ += top[i]->count();
}
}
} else {
if (slice_dim_ == 0) {
- CHECK_EQ(num_ % top->size(), 0)
- << "Number of top blobs (" << top->size() << ") "
+ CHECK_EQ(num_ % top.size(), 0)
+ << "Number of top blobs (" << top.size() << ") "
<< "should evenly divide input num ( " << num_ << ")";
- num_ = num_ / top->size();
+ num_ = num_ / top.size();
} else {
- CHECK_EQ(channels_ % top->size(), 0)
- << "Number of top blobs (" << top->size() << ") "
+ CHECK_EQ(channels_ % top.size(), 0)
+ << "Number of top blobs (" << top.size() << ") "
<< "should evenly divide input channels ( " << channels_ << ")";
- channels_ = channels_ / top->size();
+ channels_ = channels_ / top.size();
}
- for (int i = 0; i < top->size(); ++i) {
- (*top)[i]->Reshape(num_, channels_, height_, width_);
- count_ += (*top)[i]->count();
+ for (int i = 0; i < top.size(); ++i) {
+ top[i]->Reshape(num_, channels_, height_, width_);
+ count_ += top[i]->count();
}
}
CHECK_EQ(count_, bottom[0]->count());
template <typename Dtype>
void SliceLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->mutable_cpu_data();
if (slice_dim_ == 0) {
int offset_num = 0;
- for (int i = 0; i < top->size(); ++i) {
- Blob<Dtype>* blob = (*top)[i];
+ for (int i = 0; i < top.size(); ++i) {
+ Blob<Dtype>* blob = top[i];
Dtype* top_data = blob->mutable_cpu_data();
caffe_copy(blob->count(), bottom_data + bottom[0]->offset(offset_num),
top_data);
}
} else if (slice_dim_ == 1) {
int offset_channel = 0;
- for (int i = 0; i < top->size(); ++i) {
- Blob<Dtype>* blob = (*top)[i];
+ for (int i = 0; i < top.size(); ++i) {
+ Blob<Dtype>* blob = top[i];
Dtype* top_data = blob->mutable_cpu_data();
const int num_elem = blob->channels() * blob->height() * blob->width();
for (int n = 0; n < num_; ++n) {
template <typename Dtype>
void SliceLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) { return; }
- Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff();
+ Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
if (slice_dim_ == 0) {
int offset_num = 0;
for (int i = 0; i < top.size(); ++i) {
Blob<Dtype>* blob = top[i];
const Dtype* top_diff = blob->cpu_diff();
caffe_copy(blob->count(), top_diff,
- bottom_diff + (*bottom)[0]->offset(offset_num));
+ bottom_diff + bottom[0]->offset(offset_num));
offset_num += blob->num();
}
} else if (slice_dim_ == 1) {
const int num_elem = blob->channels() * blob->height() * blob->width();
for (int n = 0; n < num_; ++n) {
caffe_copy(num_elem, top_diff + blob->offset(n),
- bottom_diff + (*bottom)[0]->offset(n, offset_channel));
+ bottom_diff + bottom[0]->offset(n, offset_channel));
}
offset_channel += blob->channels();
}
template <typename Dtype>
void SliceLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->mutable_gpu_data();
if (slice_dim_ == 0) {
int offset_num = 0;
- for (int i = 0; i < top->size(); ++i) {
- Blob<Dtype>* blob = (*top)[i];
+ for (int i = 0; i < top.size(); ++i) {
+ Blob<Dtype>* blob = top[i];
Dtype* top_data = blob->mutable_gpu_data();
caffe_copy(blob->count(), bottom_data + bottom[0]->offset(offset_num),
top_data);
}
} else if (slice_dim_ == 1) {
int offset_channel = 0;
- for (int i = 0; i < top->size(); ++i) {
- Blob<Dtype>* blob = (*top)[i];
+ for (int i = 0; i < top.size(); ++i) {
+ Blob<Dtype>* blob = top[i];
Dtype* top_data = blob->mutable_gpu_data();
const int num_elem = blob->channels() * blob->height() * blob->width();
for (int n = 0; n < num_; ++n) {
template <typename Dtype>
void SliceLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) { return; }
- Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
+ Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
if (slice_dim_ == 0) {
int offset_num = 0;
for (int i = 0; i < top.size(); ++i) {
Blob<Dtype>* blob = top[i];
const Dtype* top_diff = blob->gpu_diff();
caffe_copy(blob->count(), top_diff,
- bottom_diff + (*bottom)[0]->offset(offset_num));
+ bottom_diff + bottom[0]->offset(offset_num));
offset_num += blob->num();
}
} else if (slice_dim_ == 1) {
const int num_elem = blob->channels() * blob->height() * blob->width();
for (int n = 0; n < num_; ++n) {
caffe_copy(num_elem, top_diff + blob->offset(n),
- bottom_diff + (*bottom)[0]->offset(n, offset_channel));
+ bottom_diff + bottom[0]->offset(n, offset_channel));
}
offset_channel += blob->channels();
}
template <typename Dtype>
void SoftmaxLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
- (*top)[0]->Reshape(bottom[0]->num(), bottom[0]->channels(),
+ const vector<Blob<Dtype>*>& top) {
+ top[0]->Reshape(bottom[0]->num(), bottom[0]->channels(),
bottom[0]->height(), bottom[0]->width());
sum_multiplier_.Reshape(1, bottom[0]->channels(), 1, 1);
Dtype* multiplier_data = sum_multiplier_.mutable_cpu_data();
template <typename Dtype>
void SoftmaxLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->cpu_data();
- Dtype* top_data = (*top)[0]->mutable_cpu_data();
+ Dtype* top_data = top[0]->mutable_cpu_data();
Dtype* scale_data = scale_.mutable_cpu_data();
int num = bottom[0]->num();
int channels = bottom[0]->channels();
top_data + i * dim, sum_multiplier_.cpu_data(), 0., scale_data);
// division
for (int j = 0; j < channels; j++) {
- caffe_div(spatial_dim, top_data + (*top)[0]->offset(i, j), scale_data,
- top_data + (*top)[0]->offset(i, j));
+ caffe_div(spatial_dim, top_data + top[0]->offset(i, j), scale_data,
+ top_data + top[0]->offset(i, j));
}
}
}
template <typename Dtype>
void SoftmaxLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
- vector<Blob<Dtype>*>* bottom) {
+ const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->cpu_diff();
const Dtype* top_data = top[0]->cpu_data();
- Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff();
+ Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
Dtype* scale_data = scale_.mutable_cpu_data();
int num = top[0]->num();
int channels = top[0]->channels();
template <typename Dtype>
void SoftmaxLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
- Dtype* top_data = (*top)[0]->mutable_gpu_data();
+ Dtype* top_data = top[0]->mutable_gpu_data();
Dtype* scale_data = scale_.mutable_gpu_data();
int num = bottom[0]->num();
int channels = bottom[0]->channels();
template <typename Dtype>
void SoftmaxLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* top_data = top[0]->gpu_data();
- Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
+ Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
Dtype* scale_data = scale_.mutable_gpu_data();
int num = top[0]->num();
int channels = top[0]->channels();
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::LayerSetUp(
- const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
LossLayer<Dtype>::LayerSetUp(bottom, top);
softmax_bottom_vec_.clear();
softmax_bottom_vec_.push_back(bottom[0]);
softmax_top_vec_.clear();
softmax_top_vec_.push_back(&prob_);
- softmax_layer_->SetUp(softmax_bottom_vec_, &softmax_top_vec_);
+ softmax_layer_->SetUp(softmax_bottom_vec_, softmax_top_vec_);
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Reshape(
- const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
LossLayer<Dtype>::Reshape(bottom, top);
- softmax_layer_->Reshape(softmax_bottom_vec_, &softmax_top_vec_);
- if (top->size() >= 2) {
+ softmax_layer_->Reshape(softmax_bottom_vec_, softmax_top_vec_);
+ if (top.size() >= 2) {
// softmax output
- (*top)[1]->ReshapeLike(*bottom[0]);
+ top[1]->ReshapeLike(*bottom[0]);
}
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Forward_cpu(
- const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
// The forward pass computes the softmax prob values.
- softmax_layer_->Forward(softmax_bottom_vec_, &softmax_top_vec_);
+ softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.cpu_data();
const Dtype* label = bottom[1]->cpu_data();
int num = prob_.num();
Dtype(FLT_MIN)));
}
}
- (*top)[0]->mutable_cpu_data()[0] = loss / num / spatial_dim;
- if (top->size() == 2) {
- (*top)[1]->ShareData(prob_);
+ top[0]->mutable_cpu_data()[0] = loss / num / spatial_dim;
+ if (top.size() == 2) {
+ top[1]->ShareData(prob_);
}
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
- vector<Blob<Dtype>*>* bottom) {
+ const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type_name()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
- Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff();
+ Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
const Dtype* prob_data = prob_.cpu_data();
caffe_copy(prob_.count(), prob_data, bottom_diff);
- const Dtype* label = (*bottom)[1]->cpu_data();
+ const Dtype* label = bottom[1]->cpu_data();
int num = prob_.num();
int dim = prob_.count() / num;
int spatial_dim = prob_.height() * prob_.width();
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Forward_gpu(
- const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
Forward_cpu(bottom, top);
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
// TODO(Yangqing): implement the GPU version of softmax.
Backward_cpu(top, propagate_down, bottom);
}
template <typename Dtype>
void SplitLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
count_ = bottom[0]->count();
- for (int i = 0; i < top->size(); ++i) {
+ for (int i = 0; i < top.size(); ++i) {
// Do not allow in-place computation in the SplitLayer. Instead, share data
// by reference in the forward pass, and keep separate diff allocations in
// the backward pass. (Technically, it should be possible to share the diff
// blob of the first split output with the input, but this seems to cause
// some strange effects in practice...)
- CHECK_NE((*top)[i], bottom[0]) << this->type_name() << " Layer does not "
+ CHECK_NE(top[i], bottom[0]) << this->type_name() << " Layer does not "
"allow in-place computation.";
- (*top)[i]->Reshape(bottom[0]->num(), bottom[0]->channels(),
+ top[i]->Reshape(bottom[0]->num(), bottom[0]->channels(),
bottom[0]->height(), bottom[0]->width());
- CHECK_EQ(count_, (*top)[i]->count());
+ CHECK_EQ(count_, top[i]->count());
}
}
template <typename Dtype>
void SplitLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
- for (int i = 0; i < top->size(); ++i) {
- (*top)[i]->ShareData(*bottom[0]);
+ const vector<Blob<Dtype>*>& top) {
+ for (int i = 0; i < top.size(); ++i) {
+ top[i]->ShareData(*bottom[0]);
}
}
template <typename Dtype>
void SplitLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) { return; }
if (top.size() == 1) {
- caffe_copy(count_, top[0]->cpu_diff(), (*bottom)[0]->mutable_cpu_diff());
+ caffe_copy(count_, top[0]->cpu_diff(), bottom[0]->mutable_cpu_diff());
return;
}
caffe_add(count_, top[0]->cpu_diff(), top[1]->cpu_diff(),
- (*bottom)[0]->mutable_cpu_diff());
+ bottom[0]->mutable_cpu_diff());
// Add remaining top blob diffs.
for (int i = 2; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->cpu_diff();
- Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff();
+ Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
caffe_axpy(count_, Dtype(1.), top_diff, bottom_diff);
}
}
template <typename Dtype>
void SplitLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
- for (int i = 0; i < top->size(); ++i) {
- (*top)[i]->ShareData(*bottom[0]);
+ const vector<Blob<Dtype>*>& top) {
+ for (int i = 0; i < top.size(); ++i) {
+ top[i]->ShareData(*bottom[0]);
}
}
template <typename Dtype>
void SplitLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
- const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) { return; }
if (top.size() == 1) {
- caffe_copy(count_, top[0]->gpu_diff(), (*bottom)[0]->mutable_gpu_diff());
+ caffe_copy(count_, top[0]->gpu_diff(), bottom[0]->mutable_gpu_diff());
return;
}
caffe_gpu_add(count_, top[0]->gpu_diff(), top[1]->gpu_diff(),
- (*bottom)[0]->mutable_gpu_diff());
+ bottom[0]->mutable_gpu_diff());
// Add remaining top blob diffs.
for (int i = 2; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
- Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
+ Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
caffe_gpu_axpy(count_, Dtype(1.), top_diff, bottom_diff);
}
}
template <typename Dtype>
void TanHLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->cpu_data();
- Dtype* top_data = (*top)[0]->mutable_cpu_data();
+ Dtype* top_data = top[0]->mutable_cpu_data();
Dtype exp2x;
const int count = bottom[0]->count();
for (int i = 0; i < count; ++i) {
template <typename Dtype>
void TanHLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
- vector<Blob<Dtype>*>* bottom) {
+ const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* top_data = top[0]->cpu_data();
const Dtype* top_diff = top[0]->cpu_diff();
- Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff();
- const int count = (*bottom)[0]->count();
+ Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
+ const int count = bottom[0]->count();
Dtype tanhx;
for (int i = 0; i < count; ++i) {
tanhx = top_data[i];
template <typename Dtype>
void TanHLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
- Dtype* top_data = (*top)[0]->mutable_gpu_data();
+ Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
TanHForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
template <typename Dtype>
void TanHLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
- vector<Blob<Dtype>*>* bottom) {
+ const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* top_data = top[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
- Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
- const int count = (*bottom)[0]->count();
+ Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
+ const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
TanHBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, top_data, bottom_diff);
template <typename Dtype>
void ThresholdLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
NeuronLayer<Dtype>::LayerSetUp(bottom, top);
threshold_ = this->layer_param_.threshold_param().threshold();
}
template <typename Dtype>
void ThresholdLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->cpu_data();
- Dtype* top_data = (*top)[0]->mutable_cpu_data();
+ Dtype* top_data = top[0]->mutable_cpu_data();
const int count = bottom[0]->count();
for (int i = 0; i < count; ++i) {
top_data[i] = (bottom_data[i] > threshold_) ? Dtype(1) : Dtype(0);
template <typename Dtype>
void ThresholdLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
- Dtype* top_data = (*top)[0]->mutable_gpu_data();
+ Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
ThresholdForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
template <typename Dtype>
void WindowDataLayer<Dtype>::DataLayerSetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top) {
+ const vector<Blob<Dtype>*>& top) {
// LayerSetUp runs through the window_file and creates two structures
// that hold windows: one for foreground (object) windows and one
// for background (non-object) windows. We use an overlap threshold
int crop_size = this->layer_param_.window_data_param().crop_size();
CHECK_GT(crop_size, 0);
const int batch_size = this->layer_param_.window_data_param().batch_size();
- (*top)[0]->Reshape(batch_size, channels, crop_size, crop_size);
+ top[0]->Reshape(batch_size, channels, crop_size, crop_size);
this->prefetch_data_.Reshape(batch_size, channels, crop_size, crop_size);
- LOG(INFO) << "output data size: " << (*top)[0]->num() << ","
- << (*top)[0]->channels() << "," << (*top)[0]->height() << ","
- << (*top)[0]->width();
+ LOG(INFO) << "output data size: " << top[0]->num() << ","
+ << top[0]->channels() << "," << top[0]->height() << ","
+ << top[0]->width();
// datum size
- this->datum_channels_ = (*top)[0]->channels();
- this->datum_height_ = (*top)[0]->height();
- this->datum_width_ = (*top)[0]->width();
+ this->datum_channels_ = top[0]->channels();
+ this->datum_height_ = top[0]->height();
+ this->datum_width_ = top[0]->width();
this->datum_size_ =
- (*top)[0]->channels() * (*top)[0]->height() * (*top)[0]->width();
+ top[0]->channels() * top[0]->height() * top[0]->width();
// label
- (*top)[1]->Reshape(batch_size, 1, 1, 1);
+ top[1]->Reshape(batch_size, 1, 1, 1);
this->prefetch_label_.Reshape(batch_size, 1, 1, 1);
}
}
// After this layer is connected, set it up.
LOG(INFO) << "Setting up " << layer_names_[layer_id];
- layers_[layer_id]->SetUp(bottom_vecs_[layer_id], &top_vecs_[layer_id]);
+ layers_[layer_id]->SetUp(bottom_vecs_[layer_id], top_vecs_[layer_id]);
for (int top_id = 0; top_id < top_vecs_[layer_id].size(); ++top_id) {
if (blob_loss_weights_.size() <= top_id_vecs_[layer_id][top_id]) {
blob_loss_weights_.resize(top_id_vecs_[layer_id][top_id] + 1, Dtype(0));
Dtype loss = 0;
for (int i = start; i <= end; ++i) {
// LOG(ERROR) << "Forwarding " << layer_names_[i];
- layers_[i]->Reshape(bottom_vecs_[i], &top_vecs_[i]);
- Dtype layer_loss = layers_[i]->Forward(bottom_vecs_[i], &top_vecs_[i]);
+ layers_[i]->Reshape(bottom_vecs_[i], top_vecs_[i]);
+ Dtype layer_loss = layers_[i]->Forward(bottom_vecs_[i], top_vecs_[i]);
loss += layer_loss;
if (debug_info_) { ForwardDebugInfo(i); }
}
for (int i = start; i >= end; --i) {
if (layer_need_backward_[i]) {
layers_[i]->Backward(
- top_vecs_[i], bottom_need_backward_[i], &bottom_vecs_[i]);
+ top_vecs_[i], bottom_need_backward_[i], bottom_vecs_[i]);
if (debug_info_) { BackwardDebugInfo(i); }
}
}
template <typename Dtype>
void Net<Dtype>::Reshape() {
for (int i = 0; i < layers_.size(); ++i) {
- layers_[i]->Reshape(bottom_vecs_[i], &top_vecs_[i]);
+ layers_[i]->Reshape(bottom_vecs_[i], top_vecs_[i]);
}
}
TYPED_TEST(AccuracyLayerTest, TestSetup) {
LayerParameter layer_param;
AccuracyLayer<TypeParam> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
EXPECT_EQ(this->blob_top_->num(), 1);
EXPECT_EQ(this->blob_top_->channels(), 1);
EXPECT_EQ(this->blob_top_->height(), 1);
layer_param.mutable_accuracy_param();
accuracy_param->set_top_k(5);
AccuracyLayer<TypeParam> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
EXPECT_EQ(this->blob_top_->num(), 1);
EXPECT_EQ(this->blob_top_->channels(), 1);
EXPECT_EQ(this->blob_top_->height(), 1);
LayerParameter layer_param;
Caffe::set_mode(Caffe::CPU);
AccuracyLayer<TypeParam> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
TypeParam max_value;
int max_id;
AccuracyParameter* accuracy_param = layer_param.mutable_accuracy_param();
accuracy_param->set_top_k(this->top_k_);
AccuracyLayer<TypeParam> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
TypeParam current_value;
int current_rank;
TYPED_TEST(ArgMaxLayerTest, TestSetup) {
LayerParameter layer_param;
ArgMaxLayer<TypeParam> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num());
EXPECT_EQ(this->blob_top_->channels(), 1);
}
ArgMaxParameter* argmax_param = layer_param.mutable_argmax_param();
argmax_param->set_out_max_val(true);
ArgMaxLayer<TypeParam> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num());
EXPECT_EQ(this->blob_top_->channels(), 2);
}
TYPED_TEST(ArgMaxLayerTest, TestCPU) {
LayerParameter layer_param;
ArgMaxLayer<TypeParam> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
// Now, check values
const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
const TypeParam* top_data = this->blob_top_->cpu_data();
ArgMaxParameter* argmax_param = layer_param.mutable_argmax_param();
argmax_param->set_out_max_val(true);
ArgMaxLayer<TypeParam> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
// Now, check values
const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
const TypeParam* top_data = this->blob_top_->cpu_data();
ArgMaxParameter* argmax_param = layer_param.mutable_argmax_param();
argmax_param->set_top_k(this->top_k_);
ArgMaxLayer<TypeParam> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
// Now, check values
int max_ind;
TypeParam max_val;
argmax_param->set_out_max_val(true);
argmax_param->set_top_k(this->top_k_);
ArgMaxLayer<TypeParam> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
// Now, check values
int max_ind;
TypeParam max_val;
LayerParameter layer_param;
layer_param.mutable_concat_param()->set_concat_dim(0);
ConcatLayer<Dtype> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_1, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_1, this->blob_top_vec_);
EXPECT_EQ(this->blob_top_->num(),
this->blob_bottom_0->num() + this->blob_bottom_2->num());
EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_0->channels());
typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
ConcatLayer<Dtype> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_0, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_0, this->blob_top_vec_);
EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_0->num());
EXPECT_EQ(this->blob_top_->channels(),
this->blob_bottom_0->channels()+this->blob_bottom_1->channels());
typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
ConcatLayer<Dtype> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_0, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_0, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_0, this->blob_top_vec_);
+ layer.Forward(this->blob_bottom_vec_0, this->blob_top_vec_);
for (int n = 0; n < this->blob_top_->num(); ++n) {
for (int c = 0; c < this->blob_bottom_0->channels(); ++c) {
for (int h = 0; h < this->blob_top_->height(); ++h) {
LayerParameter layer_param;
ConcatLayer<Dtype> layer(layer_param);
GradientChecker<Dtype> checker(1e-2, 1e-2);
- checker.CheckGradient(&layer, &(this->blob_bottom_vec_0),
- &(this->blob_top_vec_));
+ checker.CheckGradient(&layer, this->blob_bottom_vec_0,
+ this->blob_top_vec_);
}
} // namespace caffe
typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
ContrastiveLossLayer<Dtype> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
- layer.Forward(this->blob_bottom_vec_, &this->blob_top_vec_);
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
// manually compute to compare
const Dtype margin = layer_param.contrastive_loss_param().margin();
const int num = this->blob_bottom_data_i_->num();
typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
ContrastiveLossLayer<Dtype> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
GradientChecker<Dtype> checker(1e-2, 1e-2, 1701);
// check the gradient for the first two bottom layers
- checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_), 0);
- checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_), 1);
+ checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_,
+ this->blob_top_vec_, 0);
+ checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_,
+ this->blob_top_vec_, 1);
}
} // namespace caffe
this->blob_top_vec_.push_back(this->blob_top_2_);
shared_ptr<Layer<Dtype> > layer(
new ConvolutionLayer<Dtype>(layer_param));
- layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
EXPECT_EQ(this->blob_top_->num(), 2);
EXPECT_EQ(this->blob_top_->channels(), 4);
EXPECT_EQ(this->blob_top_->height(), 2);
convolution_param->set_num_output(3);
convolution_param->set_group(3);
layer.reset(new ConvolutionLayer<Dtype>(layer_param));
- layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
EXPECT_EQ(this->blob_top_->num(), 2);
EXPECT_EQ(this->blob_top_->channels(), 3);
EXPECT_EQ(this->blob_top_->height(), 2);
convolution_param->mutable_bias_filler()->set_value(0.1);
shared_ptr<Layer<Dtype> > layer(
new ConvolutionLayer<Dtype>(layer_param));
- layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_);
// Check against reference convolution.
const Dtype* top_data;
const Dtype* ref_top_data;
convolution_param->mutable_bias_filler()->set_value(0.1);
shared_ptr<Layer<Dtype> > layer(
new ConvolutionLayer<Dtype>(layer_param));
- layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_);
// Check against reference convolution.
const Dtype* top_data;
const Dtype* ref_top_data;
weights[i + 7] = 0;
weights[i + 8] = 1;
}
- layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_);
// Compute Sobel G_x operator as separable 3 x 1 and 1 x 3 convolutions.
// (1) the [1 2 1] column filter
vector<Blob<Dtype>*> sep_blob_bottom_vec;
weights_1[i + 1] = 2;
weights_1[i + 2] = 1;
}
- layer->SetUp(sep_blob_bottom_vec, &(sep_blob_top_vec));
- layer->Forward(sep_blob_bottom_vec, &(sep_blob_top_vec));
+ layer->SetUp(sep_blob_bottom_vec, sep_blob_top_vec);
+ layer->Forward(sep_blob_bottom_vec, sep_blob_top_vec);
// (2) the [-1 0 1] row filter
blob_sep->CopyFrom(*this->blob_top_2_, false, true);
sep_blob_bottom_vec.clear();
weights_2[i + 1] = 0;
weights_2[i + 2] = 1;
}
- layer->SetUp(sep_blob_bottom_vec, &(sep_blob_top_vec));
- layer->Forward(sep_blob_bottom_vec, &(sep_blob_top_vec));
+ layer->SetUp(sep_blob_bottom_vec, sep_blob_top_vec);
+ layer->Forward(sep_blob_bottom_vec, sep_blob_top_vec);
// Test equivalence of full and separable filters.
const Dtype* top_data = this->blob_top_->cpu_data();
const Dtype* sep_top_data = this->blob_top_2_->cpu_data();
convolution_param->mutable_bias_filler()->set_type("gaussian");
ConvolutionLayer<Dtype> layer(layer_param);
GradientChecker<Dtype> checker(1e-2, 1e-3);
- checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
+ checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
TYPED_TEST(ConvolutionLayerTest, TestGradientGroup) {
convolution_param->mutable_bias_filler()->set_type("gaussian");
ConvolutionLayer<Dtype> layer(layer_param);
GradientChecker<Dtype> checker(1e-2, 1e-3);
- checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
+ checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
#ifdef USE_CUDNN
this->blob_top_vec_.push_back(this->blob_top_2_);
shared_ptr<Layer<TypeParam> > layer(
new CuDNNConvolutionLayer<TypeParam>(layer_param));
- layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
EXPECT_EQ(this->blob_top_->num(), 2);
EXPECT_EQ(this->blob_top_->channels(), 4);
EXPECT_EQ(this->blob_top_->height(), 2);
convolution_param->set_num_output(3);
convolution_param->set_group(3);
layer.reset(new CuDNNConvolutionLayer<TypeParam>(layer_param));
- layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
EXPECT_EQ(this->blob_top_->num(), 2);
EXPECT_EQ(this->blob_top_->channels(), 3);
EXPECT_EQ(this->blob_top_->height(), 2);
convolution_param->mutable_bias_filler()->set_value(0.1);
shared_ptr<Layer<TypeParam> > layer(
new CuDNNConvolutionLayer<TypeParam>(layer_param));
- layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_);
// Check against reference convolution.
const TypeParam* top_data;
const TypeParam* ref_top_data;
convolution_param->mutable_bias_filler()->set_value(0.1);
shared_ptr<Layer<TypeParam> > layer(
new CuDNNConvolutionLayer<TypeParam>(layer_param));
- layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_);
// Check against reference convolution.
const TypeParam* top_data;
const TypeParam* ref_top_data;
weights[i + 7] = 0;
weights[i + 8] = 1;
}
- layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_);
// Compute Sobel G_x operator as separable 3 x 1 and 1 x 3 convolutions.
// (1) the [1 2 1] column filter
vector<Blob<TypeParam>*> sep_blob_bottom_vec;
weights_1[i + 1] = 2;
weights_1[i + 2] = 1;
}
- layer->SetUp(sep_blob_bottom_vec, &(sep_blob_top_vec));
- layer->Forward(sep_blob_bottom_vec, &(sep_blob_top_vec));
+ layer->SetUp(sep_blob_bottom_vec, sep_blob_top_vec);
+ layer->Forward(sep_blob_bottom_vec, sep_blob_top_vec);
// (2) the [-1 0 1] row filter
blob_sep->CopyFrom(*this->blob_top_2_, false, true);
sep_blob_bottom_vec.clear();
weights_2[i + 1] = 0;
weights_2[i + 2] = 1;
}
- layer->SetUp(sep_blob_bottom_vec, &(sep_blob_top_vec));
- layer->Forward(sep_blob_bottom_vec, &(sep_blob_top_vec));
+ layer->SetUp(sep_blob_bottom_vec, sep_blob_top_vec);
+ layer->Forward(sep_blob_bottom_vec, sep_blob_top_vec);
// Test equivalence of full and separable filters.
const TypeParam* top_data = this->blob_top_->cpu_data();
const TypeParam* sep_top_data = this->blob_top_2_->cpu_data();
convolution_param->mutable_bias_filler()->set_type("gaussian");
CuDNNConvolutionLayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-2, 1e-3);
- checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
+ checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
TYPED_TEST(CuDNNConvolutionLayerTest, TestGradientGroupCuDNN) {
convolution_param->mutable_bias_filler()->set_type("gaussian");
CuDNNConvolutionLayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-2, 1e-3);
- checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
+ checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
#endif
transform_param->set_scale(scale);
DataLayer<Dtype> layer(param);
- layer.SetUp(blob_bottom_vec_, &blob_top_vec_);
+ layer.SetUp(blob_bottom_vec_, blob_top_vec_);
EXPECT_EQ(blob_top_data_->num(), 5);
EXPECT_EQ(blob_top_data_->channels(), 2);
EXPECT_EQ(blob_top_data_->height(), 3);
EXPECT_EQ(blob_top_label_->width(), 1);
for (int iter = 0; iter < 100; ++iter) {
- layer.Forward(blob_bottom_vec_, &blob_top_vec_);
+ layer.Forward(blob_bottom_vec_, blob_top_vec_);
for (int i = 0; i < 5; ++i) {
EXPECT_EQ(i, blob_top_label_->cpu_data()[i]);
}
transform_param->set_crop_size(1);
DataLayer<Dtype> layer(param);
- layer.SetUp(blob_bottom_vec_, &blob_top_vec_);
+ layer.SetUp(blob_bottom_vec_, blob_top_vec_);
EXPECT_EQ(blob_top_data_->num(), 5);
EXPECT_EQ(blob_top_data_->channels(), 2);
EXPECT_EQ(blob_top_data_->height(), 1);
EXPECT_EQ(blob_top_label_->width(), 1);
for (int iter = 0; iter < 2; ++iter) {
- layer.Forward(blob_bottom_vec_, &blob_top_vec_);
+ layer.Forward(blob_bottom_vec_, blob_top_vec_);
for (int i = 0; i < 5; ++i) {
EXPECT_EQ(i, blob_top_label_->cpu_data()[i]);
}
vector<vector<Dtype> > crop_sequence;
{
DataLayer<Dtype> layer1(param);
- layer1.SetUp(blob_bottom_vec_, &blob_top_vec_);
+ layer1.SetUp(blob_bottom_vec_, blob_top_vec_);
for (int iter = 0; iter < 2; ++iter) {
- layer1.Forward(blob_bottom_vec_, &blob_top_vec_);
+ layer1.Forward(blob_bottom_vec_, blob_top_vec_);
for (int i = 0; i < 5; ++i) {
EXPECT_EQ(i, blob_top_label_->cpu_data()[i]);
}
// Check that the sequence is the same as the original.
Caffe::set_random_seed(seed_);
DataLayer<Dtype> layer2(param);
- layer2.SetUp(blob_bottom_vec_, &blob_top_vec_);
+ layer2.SetUp(blob_bottom_vec_, blob_top_vec_);
for (int iter = 0; iter < 2; ++iter) {
- layer2.Forward(blob_bottom_vec_, &blob_top_vec_);
+ layer2.Forward(blob_bottom_vec_, blob_top_vec_);
for (int i = 0; i < 5; ++i) {
EXPECT_EQ(i, blob_top_label_->cpu_data()[i]);
}
vector<vector<Dtype> > crop_sequence;
{
DataLayer<Dtype> layer1(param);
- layer1.SetUp(blob_bottom_vec_, &blob_top_vec_);
+ layer1.SetUp(blob_bottom_vec_, blob_top_vec_);
for (int iter = 0; iter < 2; ++iter) {
- layer1.Forward(blob_bottom_vec_, &blob_top_vec_);
+ layer1.Forward(blob_bottom_vec_, blob_top_vec_);
for (int i = 0; i < 5; ++i) {
EXPECT_EQ(i, blob_top_label_->cpu_data()[i]);
}
// srand with 1701. Check that the sequence differs from the original.
srand(seed_);
DataLayer<Dtype> layer2(param);
- layer2.SetUp(blob_bottom_vec_, &blob_top_vec_);
+ layer2.SetUp(blob_bottom_vec_, blob_top_vec_);
for (int iter = 0; iter < 2; ++iter) {
- layer2.Forward(blob_bottom_vec_, &blob_top_vec_);
+ layer2.Forward(blob_bottom_vec_, blob_top_vec_);
for (int i = 0; i < 5; ++i) {
EXPECT_EQ(i, blob_top_label_->cpu_data()[i]);
}
dummy_data_param->add_width(4);
this->blob_top_vec_.resize(1);
DummyDataLayer<TypeParam> layer(param);
- layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
EXPECT_EQ(this->blob_top_a_->num(), 5);
EXPECT_EQ(this->blob_top_a_->channels(), 3);
EXPECT_EQ(this->blob_top_a_->height(), 2);
EXPECT_EQ(0, this->blob_top_vec_[i]->cpu_data()[j]);
}
}
- layer.Forward(this->blob_bottom_vec_, &this->blob_top_vec_);
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
for (int i = 0; i < this->blob_top_vec_.size(); ++i) {
for (int j = 0; j < this->blob_top_vec_[i]->count(); ++j) {
EXPECT_EQ(0, this->blob_top_vec_[i]->cpu_data()[j]);
data_filler_param->set_value(7);
this->blob_top_vec_.resize(2);
DummyDataLayer<TypeParam> layer(param);
- layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
EXPECT_EQ(this->blob_top_a_->num(), 5);
EXPECT_EQ(this->blob_top_a_->channels(), 3);
EXPECT_EQ(this->blob_top_a_->height(), 2);
EXPECT_EQ(7, this->blob_top_vec_[i]->cpu_data()[j]);
}
}
- layer.Forward(this->blob_bottom_vec_, &this->blob_top_vec_);
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
for (int i = 0; i < this->blob_top_vec_.size(); ++i) {
for (int j = 0; j < this->blob_top_vec_[i]->count(); ++j) {
EXPECT_EQ(7, this->blob_top_vec_[i]->cpu_data()[j]);
FillerParameter* data_filler_param_c = dummy_data_param->add_data_filler();
data_filler_param_c->set_value(9);
DummyDataLayer<TypeParam> layer(param);
- layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
EXPECT_EQ(this->blob_top_a_->num(), 5);
EXPECT_EQ(this->blob_top_a_->channels(), 3);
EXPECT_EQ(this->blob_top_a_->height(), 2);
}
// Do a Forward pass to fill in Blob b with Gaussian data.
- layer.Forward(this->blob_bottom_vec_, &this->blob_top_vec_);
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
for (int i = 0; i < this->blob_top_a_->count(); ++i) {
EXPECT_EQ(7, this->blob_top_a_->cpu_data()[i]);
}
// Do another Forward pass to fill in Blob b with Gaussian data again,
// checking that we get different values.
- layer.Forward(this->blob_bottom_vec_, &this->blob_top_vec_);
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
for (int i = 0; i < this->blob_top_a_->count(); ++i) {
EXPECT_EQ(7, this->blob_top_a_->cpu_data()[i]);
}
eltwise_param->set_operation(EltwiseParameter_EltwiseOp_PROD);
shared_ptr<EltwiseLayer<Dtype> > layer(
new EltwiseLayer<Dtype>(layer_param));
- layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
EXPECT_EQ(this->blob_top_->num(), 2);
EXPECT_EQ(this->blob_top_->channels(), 3);
EXPECT_EQ(this->blob_top_->height(), 4);
eltwise_param->set_operation(EltwiseParameter_EltwiseOp_PROD);
shared_ptr<EltwiseLayer<Dtype> > layer(
new EltwiseLayer<Dtype>(layer_param));
- layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_);
const Dtype* data = this->blob_top_->cpu_data();
const int count = this->blob_top_->count();
const Dtype* in_data_a = this->blob_bottom_a_->cpu_data();
eltwise_param->set_operation(EltwiseParameter_EltwiseOp_SUM);
shared_ptr<EltwiseLayer<Dtype> > layer(
new EltwiseLayer<Dtype>(layer_param));
- layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_);
const Dtype* data = this->blob_top_->cpu_data();
const int count = this->blob_top_->count();
const Dtype* in_data_a = this->blob_bottom_a_->cpu_data();
eltwise_param->add_coeff(2);
shared_ptr<EltwiseLayer<Dtype> > layer(
new EltwiseLayer<Dtype>(layer_param));
- layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_);
const Dtype* data = this->blob_top_->cpu_data();
const int count = this->blob_top_->count();
const Dtype* in_data_a = this->blob_bottom_a_->cpu_data();
eltwise_param->set_stable_prod_grad(true);
EltwiseLayer<Dtype> layer(layer_param);
GradientChecker<Dtype> checker(1e-2, 1e-3);
- checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
+ checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
TYPED_TEST(EltwiseLayerTest, TestUnstableProdGradient) {
eltwise_param->set_stable_prod_grad(false);
EltwiseLayer<Dtype> layer(layer_param);
GradientChecker<Dtype> checker(1e-2, 1e-3);
- checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
+ checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
TYPED_TEST(EltwiseLayerTest, TestSumGradient) {
eltwise_param->set_operation(EltwiseParameter_EltwiseOp_SUM);
EltwiseLayer<Dtype> layer(layer_param);
GradientChecker<Dtype> checker(1e-2, 1e-3);
- checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
+ checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
TYPED_TEST(EltwiseLayerTest, TestSumCoeffGradient) {
eltwise_param->add_coeff(2);
EltwiseLayer<Dtype> layer(layer_param);
GradientChecker<Dtype> checker(1e-2, 1e-3);
- checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
+ checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
TYPED_TEST(EltwiseLayerTest, TestMax) {
eltwise_param->set_operation(EltwiseParameter_EltwiseOp_MAX);
shared_ptr<EltwiseLayer<Dtype> > layer(
new EltwiseLayer<Dtype>(layer_param));
- layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_);
const Dtype* data = this->blob_top_->cpu_data();
const int count = this->blob_top_->count();
const Dtype* in_data_a = this->blob_bottom_a_->cpu_data();
eltwise_param->set_operation(EltwiseParameter_EltwiseOp_MAX);
EltwiseLayer<Dtype> layer(layer_param);
GradientChecker<Dtype> checker(1e-4, 1e-3);
- checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
+ checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
} // namespace caffe
// equivalent to explicitly specifiying a weight of 1.
LayerParameter layer_param;
EuclideanLossLayer<Dtype> layer_weight_1(layer_param);
- layer_weight_1.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
+ layer_weight_1.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
const Dtype loss_weight_1 =
- layer_weight_1.Forward(this->blob_bottom_vec_, &this->blob_top_vec_);
+ layer_weight_1.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
// Get the loss again with a different objective weight; check that it is
// scaled appropriately.
const Dtype kLossWeight = 3.7;
layer_param.add_loss_weight(kLossWeight);
EuclideanLossLayer<Dtype> layer_weight_2(layer_param);
- layer_weight_2.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
+ layer_weight_2.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
const Dtype loss_weight_2 =
- layer_weight_2.Forward(this->blob_bottom_vec_, &this->blob_top_vec_);
+ layer_weight_2.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
const Dtype kErrorMargin = 1e-5;
EXPECT_NEAR(loss_weight_1 * kLossWeight, loss_weight_2, kErrorMargin);
// Make sure the loss is non-trivial.
const Dtype kLossWeight = 3.7;
layer_param.add_loss_weight(kLossWeight);
EuclideanLossLayer<Dtype> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
GradientChecker<Dtype> checker(1e-2, 1e-2, 1701);
- checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
+ checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
} // namespace caffe
typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
FlattenLayer<Dtype> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
EXPECT_EQ(this->blob_top_->num(), 2);
EXPECT_EQ(this->blob_top_->channels(), 3 * 6 * 5);
EXPECT_EQ(this->blob_top_->height(), 1);
typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
FlattenLayer<Dtype> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
for (int c = 0; c < 3 * 6 * 5; ++c) {
EXPECT_EQ(this->blob_top_->data_at(0, c, 0, 0),
this->blob_bottom_->data_at(0, c / (6 * 5), (c / 5) % 6, c % 5));
LayerParameter layer_param;
FlattenLayer<Dtype> layer(layer_param);
GradientChecker<Dtype> checker(1e-2, 1e-2);
- checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
+ checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
{
HDF5OutputLayer<Dtype> layer(param);
EXPECT_EQ(layer.file_name(), this->output_file_name_);
- layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
- layer.Forward(this->blob_bottom_vec_, &this->blob_top_vec_);
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
}
file_id = H5Fopen(this->output_file_name_.c_str(), H5F_ACC_RDONLY,
H5P_DEFAULT);
// Test that the layer setup got the correct parameters.
HDF5DataLayer<Dtype> layer(param);
- layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
EXPECT_EQ(this->blob_top_data_->num(), batch_size);
EXPECT_EQ(this->blob_top_data_->channels(), num_cols);
EXPECT_EQ(this->blob_top_data_->height(), height);
EXPECT_EQ(this->blob_top_label_->height(), 1);
EXPECT_EQ(this->blob_top_label_->width(), 1);
- layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
// Go through the data 10 times (5 batches).
const int data_size = num_cols * height * width;
for (int iter = 0; iter < 10; ++iter) {
- layer.Forward(this->blob_bottom_vec_, &this->blob_top_vec_);
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
// On even iterations, we're reading the first half of the data.
// On odd iterations, we're reading the second half of the data.
LayerParameter layer_param;
HingeLossLayer<Dtype> layer(layer_param);
GradientChecker<Dtype> checker(1e-2, 2e-3, 1701, 1, 0.01);
- checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_), 0);
+ checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_,
+ this->blob_top_vec_, 0);
}
TYPED_TEST(HingeLossLayerTest, TestGradientL2) {
hinge_loss_param->set_norm(HingeLossParameter_Norm_L2);
HingeLossLayer<Dtype> layer(layer_param);
GradientChecker<Dtype> checker(1e-2, 1e-2, 1701);
- checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_), 0);
+ checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_,
+ this->blob_top_vec_, 0);
}
} // namespace caffe
convolution_param->set_kernel_size(3);
convolution_param->set_stride(2);
Im2colLayer<Dtype> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
EXPECT_EQ(this->blob_top_->num(), 2);
EXPECT_EQ(this->blob_top_->channels(), 27);
EXPECT_EQ(this->blob_top_->height(), 2);
convolution_param->set_kernel_size(3);
convolution_param->set_stride(2);
Im2colLayer<Dtype> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
// We are lazy and will only check the top left block
for (int c = 0; c < 27; ++c) {
EXPECT_EQ(this->blob_bottom_->data_at(0, (c / 9), (c / 3) % 3, c % 3),
convolution_param->set_stride(2);
Im2colLayer<Dtype> layer(layer_param);
GradientChecker<Dtype> checker(1e-2, 1e-2);
- checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
+ checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
convolution_param->set_kernel_w(3);
convolution_param->set_stride(2);
Im2colLayer<Dtype> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
// We are lazy and will only check the top left block
for (int c = 0; c < 45; ++c) {
EXPECT_EQ(this->blob_top_->data_at(0, c, 0, 0),
convolution_param->set_stride(2);
Im2colLayer<Dtype> layer(layer_param);
GradientChecker<Dtype> checker(1e-2, 1e-2);
- checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
+ checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
} // namespace caffe
image_data_param->set_source(this->filename_.c_str());
image_data_param->set_shuffle(false);
ImageDataLayer<Dtype> layer(param);
- layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
EXPECT_EQ(this->blob_top_data_->num(), 5);
EXPECT_EQ(this->blob_top_data_->channels(), 3);
EXPECT_EQ(this->blob_top_data_->height(), 360);
EXPECT_EQ(this->blob_top_label_->width(), 1);
// Go through the data twice
for (int iter = 0; iter < 2; ++iter) {
- layer.Forward(this->blob_bottom_vec_, &this->blob_top_vec_);
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
for (int i = 0; i < 5; ++i) {
EXPECT_EQ(i, this->blob_top_label_->cpu_data()[i]);
}
image_data_param->set_new_width(256);
image_data_param->set_shuffle(false);
ImageDataLayer<Dtype> layer(param);
- layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
EXPECT_EQ(this->blob_top_data_->num(), 5);
EXPECT_EQ(this->blob_top_data_->channels(), 3);
EXPECT_EQ(this->blob_top_data_->height(), 256);
EXPECT_EQ(this->blob_top_label_->width(), 1);
// Go through the data twice
for (int iter = 0; iter < 2; ++iter) {
- layer.Forward(this->blob_bottom_vec_, &this->blob_top_vec_);
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
for (int i = 0; i < 5; ++i) {
EXPECT_EQ(i, this->blob_top_label_->cpu_data()[i]);
}
image_data_param->set_source(this->filename_.c_str());
image_data_param->set_shuffle(true);
ImageDataLayer<Dtype> layer(param);
- layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
EXPECT_EQ(this->blob_top_data_->num(), 5);
EXPECT_EQ(this->blob_top_data_->channels(), 3);
EXPECT_EQ(this->blob_top_data_->height(), 360);
EXPECT_EQ(this->blob_top_label_->width(), 1);
// Go through the data twice
for (int iter = 0; iter < 2; ++iter) {
- layer.Forward(this->blob_bottom_vec_, &this->blob_top_vec_);
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
map<Dtype, int> values_to_indices;
int num_in_order = 0;
for (int i = 0; i < 5; ++i) {
LayerParameter layer_param;
InfogainLossLayer<Dtype> layer(layer_param);
GradientChecker<Dtype> checker(1e-4, 2e-2, 1701, 1, 0.01);
- checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_), 0);
+ checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_,
+ this->blob_top_vec_, 0);
}
} // namespace caffe
inner_product_param->set_num_output(10);
shared_ptr<InnerProductLayer<Dtype> > layer(
new InnerProductLayer<Dtype>(layer_param));
- layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
EXPECT_EQ(this->blob_top_->num(), 2);
EXPECT_EQ(this->blob_top_->height(), 1);
EXPECT_EQ(this->blob_top_->width(), 1);
inner_product_param->mutable_bias_filler()->set_max(2);
shared_ptr<InnerProductLayer<Dtype> > layer(
new InnerProductLayer<Dtype>(layer_param));
- layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_);
const Dtype* data = this->blob_top_->cpu_data();
const int count = this->blob_top_->count();
for (int i = 0; i < count; ++i) {
inner_product_param->mutable_bias_filler()->set_max(2);
InnerProductLayer<Dtype> layer(layer_param);
GradientChecker<Dtype> checker(1e-2, 1e-3);
- checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
+ checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
} else {
LOG(ERROR) << "Skipping test due to old architecture.";
}
typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
LRNLayer<Dtype> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
EXPECT_EQ(this->blob_top_->num(), 2);
EXPECT_EQ(this->blob_top_->channels(), 7);
EXPECT_EQ(this->blob_top_->height(), 3);
typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
LRNLayer<Dtype> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
Blob<Dtype> top_reference;
this->ReferenceLRNForward(*(this->blob_bottom_), layer_param,
&top_reference);
LayerParameter layer_param;
LRNLayer<Dtype> layer(layer_param);
GradientChecker<Dtype> checker(1e-2, 1e-2);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
for (int i = 0; i < this->blob_top_->count(); ++i) {
this->blob_top_->mutable_cpu_diff()[i] = 1.;
}
vector<bool> propagate_down(this->blob_bottom_vec_.size(), true);
layer.Backward(this->blob_top_vec_, propagate_down,
- &(this->blob_bottom_vec_));
+ this->blob_bottom_vec_);
// for (int i = 0; i < this->blob_bottom_->count(); ++i) {
// std::cout << "CPU diff " << this->blob_bottom_->cpu_diff()[i]
// << std::endl;
// }
- checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
+ checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
TYPED_TEST(LRNLayerTest, TestSetupWithinChannel) {
LRNParameter_NormRegion_WITHIN_CHANNEL);
layer_param.mutable_lrn_param()->set_local_size(3);
LRNLayer<Dtype> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
EXPECT_EQ(this->blob_top_->num(), 2);
EXPECT_EQ(this->blob_top_->channels(), 7);
EXPECT_EQ(this->blob_top_->height(), 3);
LRNParameter_NormRegion_WITHIN_CHANNEL);
layer_param.mutable_lrn_param()->set_local_size(3);
LRNLayer<Dtype> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
Blob<Dtype> top_reference;
this->ReferenceLRNForward(*(this->blob_bottom_), layer_param,
&top_reference);
layer_param.mutable_lrn_param()->set_local_size(3);
LRNLayer<Dtype> layer(layer_param);
GradientChecker<Dtype> checker(1e-2, 1e-2);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
for (int i = 0; i < this->blob_top_->count(); ++i) {
this->blob_top_->mutable_cpu_diff()[i] = 1.;
}
- checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
+ checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
pooling_param->set_kernel_size(3);
pooling_param->set_stride(2);
PoolingLayer<Dtype> max_layer(layer_param);
- max_layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ max_layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
DropoutLayer<Dtype> dropout_layer(layer_param);
- dropout_layer.SetUp(this->blob_top_vec_, &(this->blob_top_vec_));
+ dropout_layer.SetUp(this->blob_top_vec_, this->blob_top_vec_);
EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num());
EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_->channels());
EXPECT_EQ(this->blob_top_->height(), 3);
pooling_param->set_kernel_size(3);
pooling_param->set_stride(2);
PoolingLayer<Dtype> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
const Dtype* top_data = this->blob_top_->cpu_data();
Dtype sum = 0.;
for (int i = 0; i < this->blob_top_->count(); ++i) {
EXPECT_EQ(sum, this->blob_top_->count());
// Dropout in-place
DropoutLayer<Dtype> dropout_layer(layer_param);
- dropout_layer.SetUp(this->blob_top_vec_, &(this->blob_top_vec_));
- dropout_layer.Forward(this->blob_top_vec_, &(this->blob_top_vec_));
+ dropout_layer.SetUp(this->blob_top_vec_, this->blob_top_vec_);
+ dropout_layer.Forward(this->blob_top_vec_, this->blob_top_vec_);
sum = 0.;
Dtype scale = 1. / (1. - layer_param.dropout_param().dropout_ratio());
top_data = this->blob_top_->cpu_data();
pooling_param->set_kernel_size(3);
pooling_param->set_stride(2);
PoolingLayer<Dtype> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
for (int i = 0; i < this->blob_top_->count(); ++i) {
this->blob_top_->mutable_cpu_diff()[i] = 1.;
}
vector<bool> propagate_down(this->blob_bottom_vec_.size(), true);
layer.Backward(this->blob_top_vec_, propagate_down,
- &(this->blob_bottom_vec_));
+ this->blob_bottom_vec_);
const Dtype* bottom_diff = this->blob_bottom_->cpu_diff();
Dtype sum = 0.;
for (int i = 0; i < this->blob_bottom_->count(); ++i) {
EXPECT_EQ(sum, this->blob_top_->count());
// Dropout in-place
DropoutLayer<Dtype> dropout_layer(layer_param);
- dropout_layer.SetUp(this->blob_top_vec_, &(this->blob_top_vec_));
- dropout_layer.Forward(this->blob_top_vec_, &(this->blob_top_vec_));
+ dropout_layer.SetUp(this->blob_top_vec_, this->blob_top_vec_);
+ dropout_layer.Forward(this->blob_top_vec_, this->blob_top_vec_);
dropout_layer.Backward(this->blob_top_vec_, propagate_down,
- &(this->blob_top_vec_));
+ this->blob_top_vec_);
layer.Backward(this->blob_top_vec_, propagate_down,
- &(this->blob_bottom_vec_));
+ this->blob_bottom_vec_);
Dtype sum_with_dropout = 0.;
bottom_diff = this->blob_bottom_->cpu_diff();
for (int i = 0; i < this->blob_bottom_->count(); ++i) {
md_param->set_width(this->width_);
shared_ptr<Layer<Dtype> > layer(
new MemoryDataLayer<Dtype>(layer_param));
- layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
EXPECT_EQ(this->data_blob_->num(), this->batch_size_);
EXPECT_EQ(this->data_blob_->channels(), this->channels_);
EXPECT_EQ(this->data_blob_->height(), this->height_);
md_param->set_width(this->width_);
shared_ptr<MemoryDataLayer<Dtype> > layer(
new MemoryDataLayer<Dtype>(layer_param));
- layer->DataLayerSetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer->DataLayerSetUp(this->blob_bottom_vec_, this->blob_top_vec_);
layer->Reset(this->data_->mutable_cpu_data(),
this->labels_->mutable_cpu_data(), this->data_->num());
for (int i = 0; i < this->batches_ * 6; ++i) {
int batch_num = i % this->batches_;
- layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_);
for (int j = 0; j < this->data_blob_->count(); ++j) {
EXPECT_EQ(this->data_blob_->cpu_data()[j],
this->data_->cpu_data()[
memory_data_param->set_height(this->height_);
memory_data_param->set_width(this->width_);
MemoryDataLayer<Dtype> layer(param);
- layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
vector<Datum> datum_vector(this->batch_size_);
const size_t count = this->channels_ * this->height_ * this->width_;
int data_index;
// Go through the data 5 times
for (int iter = 0; iter < 5; ++iter) {
- layer.Forward(this->blob_bottom_vec_, &this->blob_top_vec_);
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
const Dtype* data = this->data_blob_->cpu_data();
size_t index = 0;
for (int i = 0; i < this->batch_size_; ++i) {
LayerParameter layer_param;
Caffe::set_mode(Caffe::CPU);
MultinomialLogisticLossLayer<TypeParam> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
GradientChecker<TypeParam> checker(1e-2, 2*1e-2, 1701, 0, 0.05);
- checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_), 0);
+ checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_,
+ this->blob_top_vec_, 0);
}
} // namespace caffe
typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
MVNLayer<Dtype> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
// Test mean
int num = this->blob_bottom_->num();
int channels = this->blob_bottom_->channels();
LayerParameter layer_param;
layer_param.ParseFromString("mvn_param{normalize_variance: false}");
MVNLayer<Dtype> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
// Test mean
int num = this->blob_bottom_->num();
int channels = this->blob_bottom_->channels();
LayerParameter layer_param;
layer_param.ParseFromString("mvn_param{across_channels: true}");
MVNLayer<Dtype> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
// Test mean
int num = this->blob_bottom_->num();
int channels = this->blob_bottom_->channels();
LayerParameter layer_param;
MVNLayer<Dtype> layer(layer_param);
GradientChecker<Dtype> checker(1e-2, 1e-3);
- checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
+ checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
TYPED_TEST(MVNLayerTest, TestGradientMeanOnly) {
layer_param.ParseFromString("mvn_param{normalize_variance: false}");
MVNLayer<Dtype> layer(layer_param);
GradientChecker<Dtype> checker(1e-2, 1e-3);
- checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
+ checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
TYPED_TEST(MVNLayerTest, TestGradientAcrossChannels) {
layer_param.ParseFromString("mvn_param{across_channels: true}");
MVNLayer<Dtype> layer(layer_param);
GradientChecker<Dtype> checker(1e-2, 1e-3);
- checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
+ checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
} // namespace caffe
}
Caffe::set_phase(Caffe::TRAIN);
DropoutLayer<Dtype> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
// Now, check values
const Dtype* bottom_data = this->blob_bottom_->cpu_data();
const Dtype* top_data = this->blob_top_->cpu_data();
typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
AbsValLayer<Dtype> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
const Dtype* bottom_data = this->blob_bottom_->cpu_data();
const Dtype* top_data = this->blob_top_->cpu_data();
const int count = this->blob_bottom_->count();
LayerParameter layer_param;
AbsValLayer<Dtype> layer(layer_param);
GradientChecker<Dtype> checker(1e-2, 1e-3, 1701, 0., 0.01);
- checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
+ checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
TYPED_TEST(NeuronLayerTest, TestReLU) {
typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
ReLULayer<Dtype> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
// Now, check values
const Dtype* bottom_data = this->blob_bottom_->cpu_data();
const Dtype* top_data = this->blob_top_->cpu_data();
LayerParameter layer_param;
ReLULayer<Dtype> layer(layer_param);
GradientChecker<Dtype> checker(1e-2, 1e-3, 1701, 0., 0.01);
- checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
+ checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
TYPED_TEST(NeuronLayerTest, TestReLUWithNegativeSlope) {
LayerParameter layer_param;
layer_param.ParseFromString("relu_param{negative_slope:0.01}");
ReLULayer<Dtype> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
// Now, check values
const Dtype* bottom_data = this->blob_bottom_->cpu_data();
const Dtype* top_data = this->blob_top_->cpu_data();
layer_param.ParseFromString("relu_param{negative_slope:0.01}");
ReLULayer<Dtype> layer(layer_param);
GradientChecker<Dtype> checker(1e-2, 1e-3, 1701, 0., 0.01);
- checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
+ checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
TYPED_TEST(NeuronLayerTest, TestSigmoid) {
typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
SigmoidLayer<Dtype> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
// Now, check values
const Dtype* bottom_data = this->blob_bottom_->cpu_data();
const Dtype* top_data = this->blob_top_->cpu_data();
LayerParameter layer_param;
SigmoidLayer<Dtype> layer(layer_param);
GradientChecker<Dtype> checker(1e-2, 1e-3, 1701, 0., 0.01);
- checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
+ checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
TYPED_TEST(NeuronLayerTest, TestTanH) {
typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
TanHLayer<Dtype> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
// Test exact values
for (int i = 0; i < this->blob_bottom_->num(); ++i) {
for (int j = 0; j < this->blob_bottom_->channels(); ++j) {
LayerParameter layer_param;
TanHLayer<Dtype> layer(layer_param);
GradientChecker<Dtype> checker(1e-2, 1e-3);
- checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
+ checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
TYPED_TEST(NeuronLayerTest, TestDropoutHalf) {
LayerParameter layer_param;
Caffe::set_phase(Caffe::TEST);
DropoutLayer<Dtype> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
// Now, check values
const Dtype* bottom_data = this->blob_bottom_->cpu_data();
const Dtype* top_data = this->blob_top_->cpu_data();
Caffe::set_phase(Caffe::TRAIN);
DropoutLayer<Dtype> layer(layer_param);
GradientChecker<Dtype> checker(1e-2, 1e-3);
- checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
+ checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
TYPED_TEST(NeuronLayerTest, TestDropoutGradientTest) {
Caffe::set_phase(Caffe::TEST);
DropoutLayer<Dtype> layer(layer_param);
GradientChecker<Dtype> checker(1e-2, 1e-3);
- checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
+ checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
TYPED_TEST(NeuronLayerTest, TestBNLL) {
typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
BNLLLayer<Dtype> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
// Now, check values
const Dtype* bottom_data = this->blob_bottom_->cpu_data();
const Dtype* top_data = this->blob_top_->cpu_data();
LayerParameter layer_param;
BNLLLayer<Dtype> layer(layer_param);
GradientChecker<Dtype> checker(1e-2, 1e-3);
- checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
+ checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
#ifdef USE_CUDNN
Caffe::set_mode(Caffe::GPU);
LayerParameter layer_param;
CuDNNReLULayer<TypeParam> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
// Now, check values
const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
const TypeParam* top_data = this->blob_top_->cpu_data();
LayerParameter layer_param;
CuDNNReLULayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-2, 1e-3, 1701, 0., 0.01);
- checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
+ checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
TYPED_TEST(CuDNNNeuronLayerTest, TestReLUWithNegativeSlopeCuDNN) {
LayerParameter layer_param;
layer_param.ParseFromString("relu_param{negative_slope:0.01}");
CuDNNReLULayer<TypeParam> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
// Now, check values
const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
const TypeParam* top_data = this->blob_top_->cpu_data();
layer_param.ParseFromString("relu_param{negative_slope:0.01}");
CuDNNReLULayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-2, 1e-3, 1701, 0., 0.01);
- checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
+ checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
TYPED_TEST(CuDNNNeuronLayerTest, TestSigmoidCuDNN) {
Caffe::set_mode(Caffe::GPU);
LayerParameter layer_param;
CuDNNSigmoidLayer<TypeParam> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
// Now, check values
const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
const TypeParam* top_data = this->blob_top_->cpu_data();
LayerParameter layer_param;
CuDNNSigmoidLayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-2, 1e-3, 1701, 0., 0.01);
- checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
+ checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
TYPED_TEST(CuDNNNeuronLayerTest, TestTanHCuDNN) {
Caffe::set_mode(Caffe::GPU);
LayerParameter layer_param;
CuDNNTanHLayer<TypeParam> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
// Test exact values
for (int i = 0; i < this->blob_bottom_->num(); ++i) {
for (int j = 0; j < this->blob_bottom_->channels(); ++j) {
LayerParameter layer_param;
CuDNNTanHLayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-2, 1e-3);
- checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
+ checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
#endif
blob_bottom_->mutable_cpu_data()[i + 14] = 3;
}
PoolingLayer<Dtype> layer(layer_param);
- layer.SetUp(blob_bottom_vec_, &blob_top_vec_);
+ layer.SetUp(blob_bottom_vec_, blob_top_vec_);
EXPECT_EQ(blob_top_->num(), num);
EXPECT_EQ(blob_top_->channels(), channels);
EXPECT_EQ(blob_top_->height(), 2);
EXPECT_EQ(blob_top_mask_->height(), 2);
EXPECT_EQ(blob_top_mask_->width(), 4);
}
- layer.Forward(blob_bottom_vec_, &blob_top_vec_);
+ layer.Forward(blob_bottom_vec_, blob_top_vec_);
// Expected output: 2x 2 channels of:
// [9 5 5 8]
// [9 5 5 8]
blob_bottom_->mutable_cpu_data()[i + 35] = 11;
}
PoolingLayer<Dtype> layer(layer_param);
- layer.SetUp(blob_bottom_vec_, &blob_top_vec_);
+ layer.SetUp(blob_bottom_vec_, blob_top_vec_);
EXPECT_EQ(blob_top_->num(), num);
EXPECT_EQ(blob_top_->channels(), channels);
EXPECT_EQ(blob_top_->height(), 4);
EXPECT_EQ(blob_top_mask_->height(), 4);
EXPECT_EQ(blob_top_mask_->width(), 5);
}
- layer.Forward(blob_bottom_vec_, &blob_top_vec_);
+ layer.Forward(blob_bottom_vec_, blob_top_vec_);
// Expected output: 2x 2 channels of:
// [35 32 26 27 27]
// [32 33 33 27 27]
blob_bottom_->mutable_cpu_data()[i + 35] = 11;
}
PoolingLayer<Dtype> layer(layer_param);
- layer.SetUp(blob_bottom_vec_, &blob_top_vec_);
+ layer.SetUp(blob_bottom_vec_, blob_top_vec_);
EXPECT_EQ(blob_top_->num(), num);
EXPECT_EQ(blob_top_->channels(), channels);
EXPECT_EQ(blob_top_->height(), 5);
EXPECT_EQ(blob_top_mask_->height(), 5);
EXPECT_EQ(blob_top_mask_->width(), 4);
}
- layer.Forward(blob_bottom_vec_, &blob_top_vec_);
+ layer.Forward(blob_bottom_vec_, blob_top_vec_);
// Expected output: 2x 2 channels of:
// [35 32 26 26]
// [32 32 27 27]
pooling_param->set_kernel_size(3);
pooling_param->set_stride(2);
PoolingLayer<Dtype> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num());
EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_->channels());
EXPECT_EQ(this->blob_top_->height(), 3);
pooling_param->set_pad(1);
pooling_param->set_pool(PoolingParameter_PoolMethod_AVE);
PoolingLayer<Dtype> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num());
EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_->channels());
EXPECT_EQ(this->blob_top_->height(), 4);
layer_param.set_stride(2);
layer_param.set_pool(LayerParameter_PoolMethod_MAX);
PoolingLayer<TypeParam> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
for (int i = 0; i < this->blob_bottom_->count(); ++i) {
cout << "bottom data " << i << " " << this->blob_bottom_->cpu_data()[i] << endl;
}
for (int i = 0; i < this->blob_top_->count(); ++i) {
this->blob_top_->mutable_cpu_diff()[i] = i;
}
- layer.Backward(this->blob_top_vec_, true, &(this->blob_bottom_vec_));
+ layer.Backward(this->blob_top_vec_, true, this->blob_bottom_vec_);
for (int i = 0; i < this->blob_bottom_->count(); ++i) {
cout << "bottom diff " << i << " " << this->blob_bottom_->cpu_diff()[i] << endl;
}
pooling_param->set_pool(PoolingParameter_PoolMethod_MAX);
PoolingLayer<Dtype> layer(layer_param);
GradientChecker<Dtype> checker(1e-4, 1e-2);
- checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
+ checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
}
}
this->blob_bottom_->mutable_cpu_data()[7] = 2;
this->blob_bottom_->mutable_cpu_data()[8] = 1;
PoolingLayer<Dtype> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
EXPECT_EQ(this->blob_top_->num(), 1);
EXPECT_EQ(this->blob_top_->channels(), 1);
EXPECT_EQ(this->blob_top_->height(), 3);
EXPECT_EQ(this->blob_top_->width(), 3);
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
Dtype epsilon = 1e-8;
// Output:
// [ 1 4 4 ]
this->blob_top_vec_.push_back(this->blob_top_mask_);
PoolingLayer<Dtype> layer(layer_param);
GradientChecker<Dtype> checker(1e-4, 1e-2);
- checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
+ checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
this->blob_top_vec_.pop_back();
}
}
ConstantFiller<Dtype> filler(filler_param);
filler.Fill(this->blob_bottom_);
PoolingLayer<Dtype> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
EXPECT_EQ(this->blob_top_->num(), 1);
EXPECT_EQ(this->blob_top_->channels(), 1);
EXPECT_EQ(this->blob_top_->height(), 3);
EXPECT_EQ(this->blob_top_->width(), 3);
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
Dtype epsilon = 1e-5;
EXPECT_NEAR(this->blob_top_->cpu_data()[0], 8.0 / 9, epsilon);
EXPECT_NEAR(this->blob_top_->cpu_data()[1], 4.0 / 3, epsilon);
pooling_param->set_pool(PoolingParameter_PoolMethod_AVE);
PoolingLayer<Dtype> layer(layer_param);
GradientChecker<Dtype> checker(1e-2, 1e-2);
- checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
+ checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
}
}
pooling_param->set_pool(PoolingParameter_PoolMethod_AVE);
PoolingLayer<Dtype> layer(layer_param);
GradientChecker<Dtype> checker(1e-2, 1e-2);
- checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
+ checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
}
}
blob_bottom_->mutable_cpu_data()[i + 14] = 3;
}
CuDNNPoolingLayer<Dtype> layer(layer_param);
- layer.SetUp(blob_bottom_vec_, &blob_top_vec_);
+ layer.SetUp(blob_bottom_vec_, blob_top_vec_);
EXPECT_EQ(blob_top_->num(), num);
EXPECT_EQ(blob_top_->channels(), channels);
EXPECT_EQ(blob_top_->height(), 2);
EXPECT_EQ(blob_top_mask_->height(), 2);
EXPECT_EQ(blob_top_mask_->width(), 4);
}
- layer.Forward(blob_bottom_vec_, &blob_top_vec_);
+ layer.Forward(blob_bottom_vec_, blob_top_vec_);
// Expected output: 2x 2 channels of:
// [9 5 5 8]
// [9 5 5 8]
blob_bottom_->mutable_cpu_data()[i + 35] = 11;
}
CuDNNPoolingLayer<Dtype> layer(layer_param);
- layer.SetUp(blob_bottom_vec_, &blob_top_vec_);
+ layer.SetUp(blob_bottom_vec_, blob_top_vec_);
EXPECT_EQ(blob_top_->num(), num);
EXPECT_EQ(blob_top_->channels(), channels);
EXPECT_EQ(blob_top_->height(), 4);
EXPECT_EQ(blob_top_mask_->height(), 4);
EXPECT_EQ(blob_top_mask_->width(), 5);
}
- layer.Forward(blob_bottom_vec_, &blob_top_vec_);
+ layer.Forward(blob_bottom_vec_, blob_top_vec_);
// Expected output: 2x 2 channels of:
// [35 32 26 27 27]
// [32 33 33 27 27]
blob_bottom_->mutable_cpu_data()[i + 35] = 11;
}
CuDNNPoolingLayer<Dtype> layer(layer_param);
- layer.SetUp(blob_bottom_vec_, &blob_top_vec_);
+ layer.SetUp(blob_bottom_vec_, blob_top_vec_);
EXPECT_EQ(blob_top_->num(), num);
EXPECT_EQ(blob_top_->channels(), channels);
EXPECT_EQ(blob_top_->height(), 5);
EXPECT_EQ(blob_top_mask_->height(), 5);
EXPECT_EQ(blob_top_mask_->width(), 4);
}
- layer.Forward(blob_bottom_vec_, &blob_top_vec_);
+ layer.Forward(blob_bottom_vec_, blob_top_vec_);
// Expected output: 2x 2 channels of:
// [35 32 26 26]
// [32 32 27 27]
pooling_param->set_kernel_size(3);
pooling_param->set_stride(2);
CuDNNPoolingLayer<TypeParam> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num());
EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_->channels());
EXPECT_EQ(this->blob_top_->height(), 3);
pooling_param->set_pad(1);
pooling_param->set_pool(PoolingParameter_PoolMethod_AVE);
CuDNNPoolingLayer<TypeParam> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num());
EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_->channels());
EXPECT_EQ(this->blob_top_->height(), 4);
layer_param.set_stride(2);
layer_param.set_pool(LayerParameter_PoolMethod_MAX);
CuDNNPoolingLayer<TypeParam> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
for (int i = 0; i < this->blob_bottom_->count(); ++i) {
cout << "bottom data " << i << " " << this->blob_bottom_->cpu_data()[i] << endl;
}
for (int i = 0; i < this->blob_top_->count(); ++i) {
this->blob_top_->mutable_cpu_diff()[i] = i;
}
- layer.Backward(this->blob_top_vec_, true, &(this->blob_bottom_vec_));
+ layer.Backward(this->blob_top_vec_, true, this->blob_bottom_vec_);
for (int i = 0; i < this->blob_bottom_->count(); ++i) {
cout << "bottom diff " << i << " " << this->blob_bottom_->cpu_diff()[i] << endl;
}
pooling_param->set_pool(PoolingParameter_PoolMethod_MAX);
CuDNNPoolingLayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-4, 1e-2);
- checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
+ checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
}
}
this->blob_bottom_->mutable_cpu_data()[7] = 2;
this->blob_bottom_->mutable_cpu_data()[8] = 1;
CuDNNPoolingLayer<TypeParam> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
EXPECT_EQ(this->blob_top_->num(), 1);
EXPECT_EQ(this->blob_top_->channels(), 1);
EXPECT_EQ(this->blob_top_->height(), 3);
EXPECT_EQ(this->blob_top_->width(), 3);
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
TypeParam epsilon = 1e-8;
// Output:
// [ 1 4 4 ]
this->blob_top_vec_.push_back(this->blob_top_mask_);
CuDNNPoolingLayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-4, 1e-2);
- checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
+ checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
this->blob_top_vec_.pop_back();
}
}
ConstantFiller<TypeParam> filler(filler_param);
filler.Fill(this->blob_bottom_);
CuDNNPoolingLayer<TypeParam> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
EXPECT_EQ(this->blob_top_->num(), 1);
EXPECT_EQ(this->blob_top_->channels(), 1);
EXPECT_EQ(this->blob_top_->height(), 3);
EXPECT_EQ(this->blob_top_->width(), 3);
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
TypeParam epsilon = 1e-5;
EXPECT_NEAR(this->blob_top_->cpu_data()[0], 8.0 / 9, epsilon);
EXPECT_NEAR(this->blob_top_->cpu_data()[1], 4.0 / 3, epsilon);
pooling_param->set_pool(PoolingParameter_PoolMethod_AVE);
CuDNNPoolingLayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-2, 1e-2);
- checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
+ checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
}
}
pooling_param->set_pool(PoolingParameter_PoolMethod_AVE);
CuDNNPoolingLayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-2, 1e-2);
- checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
+ checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
}
}
layer_param.mutable_power_param()->set_scale(scale);
layer_param.mutable_power_param()->set_shift(shift);
PowerLayer<Dtype> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
// Now, check values
const Dtype* bottom_data = this->blob_bottom_->cpu_data();
const Dtype* top_data = this->blob_top_->cpu_data();
}
}
GradientChecker<Dtype> checker(1e-2, 1e-2, 1701, 0., 0.01);
- checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
+ checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
Blob<Dtype>* const blob_bottom_;
// Fill the targets vector
targets_filler.Fill(this->blob_bottom_targets_);
SigmoidCrossEntropyLossLayer<Dtype> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
Dtype layer_loss =
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
const int count = this->blob_bottom_data_->count();
const int num = this->blob_bottom_data_->num();
const Dtype* blob_bottom_data = this->blob_bottom_data_->cpu_data();
const Dtype kLossWeight = 3.7;
layer_param.add_loss_weight(kLossWeight);
SigmoidCrossEntropyLossLayer<Dtype> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
GradientChecker<Dtype> checker(1e-2, 1e-2, 1701);
- checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_), 0);
+ checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_,
+ this->blob_top_vec_, 0);
}
LayerParameter layer_param;
layer_param.mutable_slice_param()->set_slice_dim(0);
SliceLayer<Dtype> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_1_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_1_);
EXPECT_EQ(this->blob_bottom_->num(), 3 * this->blob_top_0_->num());
EXPECT_EQ(this->blob_top_0_->num(), this->blob_top_1_->num());
EXPECT_EQ(this->blob_top_0_->num(), this->blob_top_2_->num());
LayerParameter layer_param;
layer_param.mutable_slice_param()->add_slice_point(3);
SliceLayer<Dtype> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_0_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_0_);
EXPECT_EQ(this->blob_top_0_->num(), this->blob_bottom_->num());
EXPECT_EQ(this->blob_top_0_->channels(), 3);
EXPECT_EQ(this->blob_top_1_->channels(), 9);
LayerParameter layer_param;
layer_param.mutable_slice_param()->set_slice_dim(0);
SliceLayer<Dtype> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_0_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_0_);
const int top_num = this->blob_bottom_->num() / 2;
ASSERT_EQ(top_num, this->blob_top_0_->num());
ASSERT_EQ(top_num, this->blob_top_1_->num());
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_0_));
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_0_);
for (int n = 0; n < top_num; ++n) {
for (int c = 0; c < this->blob_top_0_->channels(); ++c) {
for (int h = 0; h < this->blob_bottom_->height(); ++h) {
layer_param.mutable_slice_param()->add_slice_point(kSlicePoint0);
layer_param.mutable_slice_param()->add_slice_point(kSlicePoint1);
SliceLayer<Dtype> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_1_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_1_);
ASSERT_EQ(kSlicePoint0, this->blob_top_0_->channels());
ASSERT_EQ(kSlicePoint1 - kSlicePoint0, this->blob_top_1_->channels());
ASSERT_EQ(this->blob_bottom_->channels() - kSlicePoint1,
this->blob_top_2_->channels());
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_1_));
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_1_);
for (int n = 0; n < this->blob_bottom_->num(); ++n) {
for (int c = 0; c < this->blob_top_0_->channels(); ++c) {
for (int h = 0; h < this->blob_bottom_->height(); ++h) {
layer_param.mutable_slice_param()->set_slice_dim(0);
SliceLayer<Dtype> layer(layer_param);
GradientChecker<Dtype> checker(1e-2, 1e-3);
- checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_0_));
+ checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_,
+ this->blob_top_vec_0_);
}
TYPED_TEST(SliceLayerTest, TestGradientAcrossChannels) {
layer_param.mutable_slice_param()->add_slice_point(kSlicePoint);
SliceLayer<Dtype> layer(layer_param);
GradientChecker<Dtype> checker(1e-2, 1e-3);
- checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_0_));
+ checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_,
+ this->blob_top_vec_0_);
}
} // namespace caffe
typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
SoftmaxLayer<Dtype> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
// Test sum
for (int i = 0; i < this->blob_bottom_->num(); ++i) {
for (int k = 0; k < this->blob_bottom_->height(); ++k) {
LayerParameter layer_param;
SoftmaxLayer<Dtype> layer(layer_param);
GradientChecker<Dtype> checker(1e-2, 1e-3);
- checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
+ checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
#ifdef USE_CUDNN
Caffe::set_mode(Caffe::GPU);
LayerParameter layer_param;
CuDNNSoftmaxLayer<TypeParam> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
// Test sum
for (int i = 0; i < this->blob_bottom_->num(); ++i) {
for (int k = 0; k < this->blob_bottom_->height(); ++k) {
LayerParameter layer_param;
CuDNNSoftmaxLayer<TypeParam> layer(layer_param);
GradientChecker<TypeParam> checker(1e-2, 1e-3);
- checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
+ checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
#endif
layer_param.add_loss_weight(3);
SoftmaxWithLossLayer<Dtype> layer(layer_param);
GradientChecker<Dtype> checker(1e-2, 1e-2, 1701);
- checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_), 0);
+ checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_,
+ this->blob_top_vec_, 0);
}
} // namespace caffe
typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
SplitLayer<Dtype> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
EXPECT_EQ(this->blob_top_a_->num(), 2);
EXPECT_EQ(this->blob_top_a_->channels(), 3);
EXPECT_EQ(this->blob_top_a_->height(), 6);
typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
SplitLayer<Dtype> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
for (int i = 0; i < this->blob_bottom_->count(); ++i) {
Dtype bottom_value = this->blob_bottom_->cpu_data()[i];
EXPECT_EQ(bottom_value, this->blob_top_a_->cpu_data()[i]);
LayerParameter layer_param;
SplitLayer<Dtype> layer(layer_param);
GradientChecker<Dtype> checker(1e-2, 1e-2);
- checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
+ checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
pooling_param->set_kernel_size(3);
pooling_param->set_stride(2);
PoolingLayer<TypeParam> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num());
EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_->channels());
EXPECT_EQ(this->blob_top_->height(), 3);
pooling_param->set_stride(2);
pooling_param->set_pool(PoolingParameter_PoolMethod_STOCHASTIC);
PoolingLayer<TypeParam> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
// Check if the output is correct - it should do random sampling
const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
pooling_param->set_stride(2);
pooling_param->set_pool(PoolingParameter_PoolMethod_STOCHASTIC);
PoolingLayer<TypeParam> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
// Check if the output is correct - it should do random sampling
const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
GradientChecker<TypeParam> checker(1e-4, 1e-2);
// it is too expensive to call curand multiple times, so we don't do an
// exhaustive gradient check.
- checker.CheckGradient(&layer, &(this->blob_bottom_vec_),
- &(this->blob_top_vec_));
+ checker.CheckGradient(&layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
}
typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
ThresholdLayer<Dtype> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num());
EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_->channels());
EXPECT_EQ(this->blob_top_->height(), this->blob_bottom_->height());
typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
ThresholdLayer<Dtype> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
// Now, check values
const Dtype* bottom_data = this->blob_bottom_->cpu_data();
const Dtype* top_data = this->blob_top_->cpu_data();
layer_param.mutable_threshold_param();
threshold_param->set_threshold(0.5);
ThresholdLayer<Dtype> layer(layer_param);
- layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
- layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
// Now, check values
const Dtype* bottom_data = this->blob_bottom_->cpu_data();
const Dtype* top_data = this->blob_top_->cpu_data();
for (int j = 0; j < FLAGS_iterations; ++j) {
// Although Reshape should be essentially free, we include it here
// so that we will notice Reshape performance bugs.
- layers[i]->Reshape(bottom_vecs[i], &top_vecs[i]);
- layers[i]->Forward(bottom_vecs[i], &top_vecs[i]);
+ layers[i]->Reshape(bottom_vecs[i], top_vecs[i]);
+ layers[i]->Forward(bottom_vecs[i], top_vecs[i]);
}
LOG(INFO) << layername << "\tforward: " << timer.MilliSeconds() <<
" milliseconds.";
timer.Start();
for (int j = 0; j < FLAGS_iterations; ++j) {
layers[i]->Backward(top_vecs[i], bottom_need_backward[i],
- &bottom_vecs[i]);
+ bottom_vecs[i]);
}
LOG(INFO) << layername << "\tbackward: "
<< timer.MilliSeconds() << " milliseconds.";