Skip to content

Commit

Permalink
Cleanup batch norm layer, include global stats computation
Browse files Browse the repository at this point in the history
  • Loading branch information
cdoersch committed Oct 22, 2015
1 parent 2f05b03 commit a52ee65
Show file tree
Hide file tree
Showing 7 changed files with 486 additions and 632 deletions.
4 changes: 2 additions & 2 deletions examples/cifar10/cifar10_full_sigmoid_train_test.prototxt
Original file line number Diff line number Diff line change
Expand Up @@ -176,10 +176,10 @@ layer {
top: "ip1"
param {
lr_mult: 1
decay_mult: 250
decay_mult: 0
}
param {
lr_mult: 0.2
lr_mult: 2
decay_mult: 0
}
inner_product_param {
Expand Down
90 changes: 23 additions & 67 deletions examples/cifar10/cifar10_full_sigmoid_train_test_bn.prototxt
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ layer {
}
data_param {
source: "examples/cifar10/cifar10_train_lmdb"
batch_size: 111
batch_size: 100
backend: LMDB
}
}
Expand Down Expand Up @@ -41,21 +41,16 @@ layer {
param {
lr_mult: 1
}
param {
lr_mult: 2
}
convolution_param {
num_output: 32
pad: 2
kernel_size: 5
stride: 1
bias_term: false
weight_filler {
type: "gaussian"
std: 0.0001
}
bias_filler {
type: "constant"
}
}
}
layer {
Expand All @@ -75,23 +70,14 @@ layer {
type: "BatchNorm"
bottom: "pool1"
top: "bn1"
bn_param {
scale_filler {
type: "constant"
value: 1
}
shift_filler {
type: "constant"
value: 0.001
}
param {
lr_mult: 0
}
param {
lr_mult: 1.00001
decay_mult: 0
lr_mult: 0
}
param {
lr_mult: 1.00001
decay_mult: 0
lr_mult: 0
}
}

Expand All @@ -110,50 +96,35 @@ layer {
param {
lr_mult: 1
}
param {
lr_mult: 2
}
convolution_param {
num_output: 32
pad: 2
kernel_size: 5
stride: 1
bias_term: false
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
}
}
}



layer {
name: "bn2"
type: "BatchNorm"
bottom: "conv2"
top: "bn2"
bn_param {
scale_filler {
type: "constant"
value: 1
}
shift_filler {
type: "constant"
value: 0.001
}
param {
lr_mult: 0
}
param {
lr_mult: 1.00001
decay_mult: 0
lr_mult: 0
}
param {
lr_mult: 1.00001
decay_mult: 0
lr_mult: 0
}
}

layer {
name: "Sigmoid2"
type: "Sigmoid"
Expand All @@ -176,53 +147,38 @@ layer {
type: "Convolution"
bottom: "pool2"
top: "conv3"
param {
lr_mult: 1
}
convolution_param {
num_output: 64
pad: 2
kernel_size: 5
stride: 1
bias_term: false
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
}
}
param {
lr_mult: 1
}
param {
lr_mult: 1
}

}


layer {
name: "bn3"
type: "BatchNorm"
bottom: "conv3"
top: "bn3"
bn_param {
scale_filler {
type: "constant"
value: 1
}
shift_filler {
type: "constant"
value: 0.001
}
param {
lr_mult: 0
}
param {
lr_mult: 1.00001
decay_mult: 0
lr_mult: 0
}
param {
lr_mult: 1.00001
decay_mult: 0
lr_mult: 0
}
}

layer {
name: "Sigmoid3"
type: "Sigmoid"
Expand All @@ -248,10 +204,10 @@ layer {
top: "ip1"
param {
lr_mult: 1
decay_mult: 250
decay_mult: 1
}
param {
lr_mult: 0.2
lr_mult: 1
decay_mult: 0
}
inner_product_param {
Expand Down
64 changes: 41 additions & 23 deletions include/caffe/common_layers.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -79,21 +79,46 @@ class ArgMaxLayer : public Layer<Dtype> {
};

/**
* @brief Batch Normalization per-channel with scale & shift linear transform.
*
*/
* @brief Normalizes the input to have 0-mean and/or unit (1) variance across
* the batch.
*
* This layer computes Batch Normalization described in [1]. For
* each channel in the data (i.e. axis 1), it subtracts the mean and divides
* by the variance, where both statistics are computed across both spatial
* dimensions and across the different examples in the batch.
*
* By default, during training time, the network is computing global mean/
* variance statistics via a running average, which is then used at test
* time to allow deterministic outputs for each input. You can manually
* toggle whether the network is accumulating or using the statistics via the
* use_global_stats option. IMPORTANT: for this feature to work, you MUST
* set the learning rate to zero for all three parameter blobs, i.e.,
* param {lr_mult: 0} three times in the layer definition.
*
* Note that the original paper also included a per-channel learned bias and
* scaling factor. It is possible (though a bit cumbersome) to implement
* this in caffe using a single-channel DummyDataLayer filled with zeros,
* followed by a Convolution layer with output the same size as the current.
* This produces a channel-specific value that can be added or multiplied by
* the BatchNorm layer's output.
*
* [1] S. Ioffe and C. Szegedy, "Batch Normalization: Accelerating Deep Network
* Training by Reducing Internal Covariate Shift." arXiv preprint
* arXiv:1502.03167 (2015).
*
* TODO(dox): thorough documentation for Forward, Backward, and proto params.
*/
template <typename Dtype>
class BatchNormLayer : public Layer<Dtype> {
public:
explicit BatchNormLayer(const LayerParameter& param)
: Layer<Dtype>(param) {}
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);

virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);

virtual inline const char* type() const { return "BN"; }
virtual inline const char* type() const { return "BatchNorm"; }
virtual inline int ExactNumBottomBlobs() const { return 1; }
virtual inline int ExactNumTopBlobs() const { return 1; }

Expand All @@ -105,26 +130,19 @@ class BatchNormLayer : public Layer<Dtype> {
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);

This comment has been minimized.

Copy link
@bazige

bazige Jun 28, 2019

sf


// spatial mean & variance
Blob<Dtype> spatial_mean_, spatial_variance_;
// batch mean & variance
Blob<Dtype> batch_mean_, batch_variance_;
// buffer blob
Blob<Dtype> buffer_blob_;
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);

Blob<Dtype> x_norm_;
// x_sum_multiplier is used to carry out sum using BLAS
Blob<Dtype> spatial_sum_multiplier_, batch_sum_multiplier_;
Blob<Dtype> mean_, variance_, temp_, x_norm_;
bool use_global_stats_;
Dtype moving_average_fraction_;
int channels_;
Dtype eps_;

// dimension
int N_;
int C_;
int H_;
int W_;
// eps
Dtype var_eps_;
// extra temporarary variables is used to carry out sums/broadcasting
// using BLAS
Blob<Dtype> batch_sum_multiplier_;
Blob<Dtype> num_by_chans_;
Blob<Dtype> spatial_sum_multiplier_;
};

/**
Expand Down
Loading

0 comments on commit a52ee65

Please sign in to comment.