Skip to content

Commit

Permalink
Merge pull request #2466 from ducha-aiki/mvn-less
Browse files Browse the repository at this point in the history
Remove unnecessary variance computation from backward in MVN layer
  • Loading branch information
jeffdonahue committed May 16, 2015
2 parents e8d93cb + b866d14 commit 352aef4
Show file tree
Hide file tree
Showing 4 changed files with 7 additions and 43 deletions.
1 change: 1 addition & 0 deletions include/caffe/common_layers.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -295,6 +295,7 @@ class MVNLayer : public Layer<Dtype> {

/// sum_multiplier is used to carry out sum using BLAS
Blob<Dtype> sum_multiplier_;
Dtype eps_;
};

/*
Expand Down
23 changes: 2 additions & 21 deletions src/caffe/layers/mvn_layer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ void MVNLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
bottom[0]->height(), bottom[0]->width());
Dtype* multiplier_data = sum_multiplier_.mutable_cpu_data();
caffe_set(sum_multiplier_.count(), Dtype(1), multiplier_data);
eps_ = this->layer_param_.mvn_param().eps();
}

template <typename Dtype>
Expand All @@ -36,7 +37,6 @@ void MVNLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
num = bottom[0]->num() * bottom[0]->channels();

int dim = bottom[0]->count() / num;
Dtype eps = 1e-10;

if (this->layer_param_.mvn_param().normalize_variance()) {
// put the squares of bottom into temp_
Expand Down Expand Up @@ -66,7 +66,7 @@ void MVNLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
caffe_powx(variance_.count(), variance_.cpu_data(), Dtype(0.5),
variance_.mutable_cpu_data());

caffe_add_scalar(variance_.count(), eps, variance_.mutable_cpu_data());
caffe_add_scalar(variance_.count(), eps_, variance_.mutable_cpu_data());

caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, dim, 1, 1.,
variance_.cpu_data(), sum_multiplier_.cpu_data(), 0.,
Expand Down Expand Up @@ -102,7 +102,6 @@ void MVNLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
num = bottom[0]->num() * bottom[0]->channels();

int dim = bottom[0]->count() / num;
Dtype eps = 1e-10;

if (this->layer_param_.mvn_param().normalize_variance()) {
caffe_mul(temp_.count(), top_data, top_diff, bottom_diff);
Expand All @@ -125,24 +124,6 @@ void MVNLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
// put the squares of bottom into temp_
caffe_powx(temp_.count(), bottom_data, Dtype(2),
temp_.mutable_cpu_data());

// computes variance using var(X) = E(X^2) - (EX)^2
caffe_cpu_gemv<Dtype>(CblasNoTrans, num, dim, 1. / dim, bottom_data,
sum_multiplier_.cpu_data(), 0., mean_.mutable_cpu_data()); // EX
caffe_cpu_gemv<Dtype>(CblasNoTrans, num, dim, 1. / dim, temp_.cpu_data(),
sum_multiplier_.cpu_data(), 0.,
variance_.mutable_cpu_data()); // E(X^2)
caffe_powx(mean_.count(), mean_.cpu_data(), Dtype(2),
temp_.mutable_cpu_data()); // (EX)^2
caffe_sub(mean_.count(), variance_.cpu_data(), temp_.cpu_data(),
variance_.mutable_cpu_data()); // variance

// normalize variance
caffe_powx(variance_.count(), variance_.cpu_data(), Dtype(0.5),
variance_.mutable_cpu_data());

caffe_add_scalar(variance_.count(), eps, variance_.mutable_cpu_data());

caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, dim, 1, 1.,
variance_.cpu_data(), sum_multiplier_.cpu_data(), 0.,
temp_.mutable_cpu_data());
Expand Down
23 changes: 1 addition & 22 deletions src/caffe/layers/mvn_layer.cu
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,6 @@ void MVNLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
caffe_gpu_sub(mean_.count(), variance_.gpu_data(), temp_.gpu_data(),
variance_.mutable_gpu_data()); // variance

Dtype eps = 1e-10;

// do mean and variance normalization
// subtract mean
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, dim, 1, -1.,
Expand All @@ -50,7 +48,7 @@ void MVNLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
caffe_gpu_powx(variance_.count(), variance_.gpu_data(), Dtype(0.5),
variance_.mutable_gpu_data());

caffe_gpu_add_scalar(variance_.count(), eps, variance_.mutable_gpu_data());
caffe_gpu_add_scalar(variance_.count(), eps_, variance_.mutable_gpu_data());

caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, dim, 1, 1.,
variance_.gpu_data(), sum_multiplier_.gpu_data(), 0.,
Expand Down Expand Up @@ -87,8 +85,6 @@ void MVNLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,

int dim = bottom[0]->count() / num;

Dtype eps = 1e-10;

if (this->layer_param_.mvn_param().normalize_variance()) {
caffe_gpu_mul(temp_.count(), top_data, top_diff, bottom_diff);
caffe_gpu_gemv<Dtype>(CblasNoTrans, num, dim, 1., bottom_diff,
Expand All @@ -111,23 +107,6 @@ void MVNLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
caffe_gpu_powx(temp_.count(), bottom_data, Dtype(2),
temp_.mutable_gpu_data());

// computes variance using var(X) = E(X^2) - (EX)^2
caffe_gpu_gemv<Dtype>(CblasNoTrans, num, dim, 1. / dim, bottom_data,
sum_multiplier_.gpu_data(), 0., mean_.mutable_gpu_data()); // EX
caffe_gpu_gemv<Dtype>(CblasNoTrans, num, dim, 1. / dim, temp_.gpu_data(),
sum_multiplier_.gpu_data(), 0.,
variance_.mutable_gpu_data()); // E(X^2)
caffe_gpu_powx(mean_.count(), mean_.gpu_data(), Dtype(2),
temp_.mutable_gpu_data()); // (EX)^2
caffe_gpu_sub(mean_.count(), variance_.gpu_data(), temp_.gpu_data(),
variance_.mutable_gpu_data()); // variance

// normalize variance
caffe_gpu_powx(variance_.count(), variance_.gpu_data(), Dtype(0.5),
variance_.mutable_gpu_data());

caffe_gpu_add_scalar(variance_.count(), eps, variance_.mutable_gpu_data());

caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, dim, 1, 1.,
variance_.gpu_data(), sum_multiplier_.gpu_data(), 0.,
temp_.mutable_gpu_data());
Expand Down
3 changes: 3 additions & 0 deletions src/caffe/proto/caffe.proto
Original file line number Diff line number Diff line change
Expand Up @@ -633,6 +633,9 @@ message MVNParameter {

// This parameter can be set to true to perform DNN-like MVN
optional bool across_channels = 2 [default = false];

// Epsilon for not dividing by zero while normalizing variance
optional float eps = 3 [default = 1e-9];
}

// Message that stores parameters used by PoolingLayer
Expand Down

0 comments on commit 352aef4

Please sign in to comment.