提交 8a49f7f1 编写于 作者: P peterzhang2029

add epsilon in bn

上级 08bc08d6
...@@ -41,6 +41,7 @@ bool BatchNormBaseLayer::init(const LayerMap& layerMap, ...@@ -41,6 +41,7 @@ bool BatchNormBaseLayer::init(const LayerMap& layerMap,
useGlobalStats_ = config_.use_global_stats(); useGlobalStats_ = config_.use_global_stats();
} }
movingAvgFraction_ = config_.moving_average_fraction(); movingAvgFraction_ = config_.moving_average_fraction();
EPS = config_.epsilon();
weight_.reset(new Weight(1, channels_, parameters_[0])); weight_.reset(new Weight(1, channels_, parameters_[0]));
movingMean_.reset(new Weight(1, channels_, parameters_[1])); movingMean_.reset(new Weight(1, channels_, parameters_[1]));
......
...@@ -94,6 +94,8 @@ protected: ...@@ -94,6 +94,8 @@ protected:
bool useGlobalStats_; bool useGlobalStats_;
// use to compute moving mean and variance. // use to compute moving mean and variance.
real movingAvgFraction_; real movingAvgFraction_;
// Epsilon value used in the batch normalization formula.
real EPS;
}; };
} // namespace paddle } // namespace paddle
...@@ -22,8 +22,6 @@ namespace paddle { ...@@ -22,8 +22,6 @@ namespace paddle {
REGISTER_LAYER(batch_norm, BatchNormalizationLayer); REGISTER_LAYER(batch_norm, BatchNormalizationLayer);
const real BatchNormalizationLayer::EPS = 1E-5;
bool BatchNormalizationLayer::init(const LayerMap& layerMap, bool BatchNormalizationLayer::init(const LayerMap& layerMap,
const ParameterMap& parameterMap) { const ParameterMap& parameterMap) {
/* Initialize the basic parent class */ /* Initialize the basic parent class */
......
...@@ -39,9 +39,6 @@ public: ...@@ -39,9 +39,6 @@ public:
void backward(const UpdateCallback& callback = nullptr) override; void backward(const UpdateCallback& callback = nullptr) override;
protected: protected:
/// Epsilon value used in the batch normalization formula.
static const real EPS;
/// Load pre-calculated mean and std. /// Load pre-calculated mean and std.
void setMeanAndStd(); void setMeanAndStd();
......
...@@ -21,7 +21,7 @@ namespace paddle { ...@@ -21,7 +21,7 @@ namespace paddle {
REGISTER_LAYER(cudnn_batch_norm, CudnnBatchNormLayer); REGISTER_LAYER(cudnn_batch_norm, CudnnBatchNormLayer);
const double CudnnBatchNormLayer::EPS = 1E-5; const double CudnnBatchNormLayer::MIN_EPS = 1E-5;
bool CudnnBatchNormLayer::init(const LayerMap& layerMap, bool CudnnBatchNormLayer::init(const LayerMap& layerMap,
const ParameterMap& parameterMap) { const ParameterMap& parameterMap) {
...@@ -60,6 +60,7 @@ void CudnnBatchNormLayer::forward(PassType passType) { ...@@ -60,6 +60,7 @@ void CudnnBatchNormLayer::forward(PassType passType) {
real* beta = biases_->getW()->getData(); real* beta = biases_->getW()->getData();
real* movingMean = movingMean_->getW()->getData(); real* movingMean = movingMean_->getW()->getData();
real* movingVar = movingVar_->getW()->getData(); real* movingVar = movingVar_->getW()->getData();
EPS_ = std::max(MIN_EPS, static_cast<double>(EPS));
if (!useGlobalStats_) { if (!useGlobalStats_) {
REGISTER_TIMER_INFO("CudnnBatchFwTimer", getName().c_str()); REGISTER_TIMER_INFO("CudnnBatchFwTimer", getName().c_str());
...@@ -75,7 +76,7 @@ void CudnnBatchNormLayer::forward(PassType passType) { ...@@ -75,7 +76,7 @@ void CudnnBatchNormLayer::forward(PassType passType) {
1.0 - movingAvgFraction_, 1.0 - movingAvgFraction_,
movingMean, movingMean,
movingVar, movingVar,
EPS, EPS_,
savedMean, savedMean,
savedInvVar); savedInvVar);
} else { } else {
...@@ -90,7 +91,7 @@ void CudnnBatchNormLayer::forward(PassType passType) { ...@@ -90,7 +91,7 @@ void CudnnBatchNormLayer::forward(PassType passType) {
beta, beta,
movingMean, movingMean,
movingVar, movingVar,
EPS); EPS_);
} else { } else {
// There is a limitation in cudnn library. // There is a limitation in cudnn library.
// When the batch size is larger than 1024 in cuDNN v5.1, // When the batch size is larger than 1024 in cuDNN v5.1,
...@@ -101,7 +102,7 @@ void CudnnBatchNormLayer::forward(PassType passType) { ...@@ -101,7 +102,7 @@ void CudnnBatchNormLayer::forward(PassType passType) {
beta, beta,
movingMean, movingMean,
movingVar, movingVar,
EPS, EPS_,
batchSize, batchSize,
channels_, channels_,
imageH_ * imageD_, imageH_ * imageD_,
...@@ -127,6 +128,7 @@ void CudnnBatchNormLayer::backward(const UpdateCallback& callback) { ...@@ -127,6 +128,7 @@ void CudnnBatchNormLayer::backward(const UpdateCallback& callback) {
real* gamma = weight_->getW()->getData(); real* gamma = weight_->getW()->getData();
real* savedMean = savedMean_->getData(); real* savedMean = savedMean_->getData();
real* savedInvVar = savedInvVar_->getData(); real* savedInvVar = savedInvVar_->getData();
EPS_ = std::max(MIN_EPS, static_cast<double>(EPS));
auto create = [](MatrixPtr& m, size_t h, size_t w, real** p) { auto create = [](MatrixPtr& m, size_t h, size_t w, real** p) {
Matrix::resizeOrCreate(m, h, w, false, true); Matrix::resizeOrCreate(m, h, w, false, true);
...@@ -157,7 +159,7 @@ void CudnnBatchNormLayer::backward(const UpdateCallback& callback) { ...@@ -157,7 +159,7 @@ void CudnnBatchNormLayer::backward(const UpdateCallback& callback) {
gamma, gamma,
gammaGrad, gammaGrad,
betaGrad, betaGrad,
EPS, EPS_,
savedMean, savedMean,
savedInvVar); savedInvVar);
......
...@@ -47,11 +47,14 @@ public: ...@@ -47,11 +47,14 @@ public:
protected: protected:
/** /**
* Epsilon value used in the batch normalization formula.
* Minimum allowed value is CUDNN_BN_MIN_EPSILON defined in cudnn.h. * Minimum allowed value is CUDNN_BN_MIN_EPSILON defined in cudnn.h.
* Same epsilon value should be used in forward and backward functions. * Same epsilon value should be used in forward and backward functions.
*/ */
static const double EPS; static const double MIN_EPS;
/// Epsilon value used in the batch normalization formula.
/// If EPS_ is smaller than MIN_EPS, MIN_EPS will be used.
double EPS_;
/// Input/output tensor descriptor desc /// Input/output tensor descriptor desc
hl_tensor_descriptor ioDesc_; hl_tensor_descriptor ioDesc_;
......
...@@ -21,8 +21,6 @@ namespace paddle { ...@@ -21,8 +21,6 @@ namespace paddle {
REGISTER_LAYER(mkldnn_batch_norm, MKLDNNBatchNormLayer); REGISTER_LAYER(mkldnn_batch_norm, MKLDNNBatchNormLayer);
const real MKLDNNBatchNormLayer::EPS = 1E-5;
bool MKLDNNBatchNormLayer::init(const LayerMap& layerMap, bool MKLDNNBatchNormLayer::init(const LayerMap& layerMap,
const ParameterMap& parameterMap) { const ParameterMap& parameterMap) {
if (!MKLDNNLayer::init(layerMap, parameterMap)) { if (!MKLDNNLayer::init(layerMap, parameterMap)) {
...@@ -50,6 +48,8 @@ bool MKLDNNBatchNormLayer::init(const LayerMap& layerMap, ...@@ -50,6 +48,8 @@ bool MKLDNNBatchNormLayer::init(const LayerMap& layerMap,
useGlobalStats_ = config_.use_global_stats(); useGlobalStats_ = config_.use_global_stats();
} }
movingAvgFraction_ = config_.moving_average_fraction(); movingAvgFraction_ = config_.moving_average_fraction();
EPS = config_.epsilon();
VLOG(MKLDNN_BASE) << "--- " << (useGlobalStats_ ? "use" : "do not use") VLOG(MKLDNN_BASE) << "--- " << (useGlobalStats_ ? "use" : "do not use")
<< " --- global stats"; << " --- global stats";
VLOG(MKLDNN_BASE) << "Moving average fraction: " << movingAvgFraction_; VLOG(MKLDNN_BASE) << "Moving average fraction: " << movingAvgFraction_;
......
...@@ -32,7 +32,8 @@ protected: ...@@ -32,7 +32,8 @@ protected:
std::shared_ptr<bn_fwd::primitive_desc> fwdPD_; std::shared_ptr<bn_fwd::primitive_desc> fwdPD_;
// Epsilon value used in the batch normalization formula. // Epsilon value used in the batch normalization formula.
static const real EPS; real EPS;
// weight and bias in paddle // weight and bias in paddle
std::unique_ptr<Weight> weight_; std::unique_ptr<Weight> weight_;
std::unique_ptr<Weight> biases_; std::unique_ptr<Weight> biases_;
......
...@@ -540,6 +540,10 @@ message LayerConfig { ...@@ -540,6 +540,10 @@ message LayerConfig {
// for switch order layer // for switch order layer
optional ReshapeConfig reshape_conf = 59; optional ReshapeConfig reshape_conf = 59;
// for batch normalization layer
// small constant added to the variance to avoid numerical problems.
optional double epsilon = 60 [ default = 0.00001 ];
} }
message EvaluatorConfig { message EvaluatorConfig {
......
...@@ -2434,6 +2434,7 @@ class BatchNormLayer(LayerBase): ...@@ -2434,6 +2434,7 @@ class BatchNormLayer(LayerBase):
bias=True, bias=True,
img3D=False, img3D=False,
use_global_stats=True, use_global_stats=True,
epsilon=1e-5,
moving_average_fraction=0.9, moving_average_fraction=0.9,
batch_norm_type=None, batch_norm_type=None,
mean_var_names=None, mean_var_names=None,
...@@ -2482,6 +2483,8 @@ class BatchNormLayer(LayerBase): ...@@ -2482,6 +2483,8 @@ class BatchNormLayer(LayerBase):
self.config.use_global_stats = use_global_stats self.config.use_global_stats = use_global_stats
if moving_average_fraction is not None: if moving_average_fraction is not None:
self.config.moving_average_fraction = moving_average_fraction self.config.moving_average_fraction = moving_average_fraction
if epsilon is not None:
self.config.epsilon = epsilon
input_layer = self.get_input_layer(0) input_layer = self.get_input_layer(0)
image_conf = self.config.inputs[0].image_conf image_conf = self.config.inputs[0].image_conf
......
...@@ -3036,6 +3036,7 @@ def batch_norm_layer(input, ...@@ -3036,6 +3036,7 @@ def batch_norm_layer(input,
param_attr=None, param_attr=None,
layer_attr=None, layer_attr=None,
batch_norm_type=None, batch_norm_type=None,
epsilon=1e-5,
moving_average_fraction=0.9, moving_average_fraction=0.9,
use_global_stats=None, use_global_stats=None,
mean_var_names=None): mean_var_names=None):
...@@ -3106,6 +3107,8 @@ def batch_norm_layer(input, ...@@ -3106,6 +3107,8 @@ def batch_norm_layer(input,
will use the mean and variance of the current batch will use the mean and variance of the current batch
of test data. of test data.
:type use_global_stats: bool | None. :type use_global_stats: bool | None.
:param epsilon: Small constant added to the variance to avoid numerical problems.
:type epsilon: float.
:param moving_average_fraction: Factor used in the moving average computation. :param moving_average_fraction: Factor used in the moving average computation.
:math:`runningMean = newMean*(1-factor) + runningMean*factor` :math:`runningMean = newMean*(1-factor) + runningMean*factor`
:type moving_average_fraction: float. :type moving_average_fraction: float.
...@@ -3123,6 +3126,9 @@ def batch_norm_layer(input, ...@@ -3123,6 +3126,9 @@ def batch_norm_layer(input,
assert (batch_norm_type is None) or (batch_norm_type == "batch_norm") or \ assert (batch_norm_type is None) or (batch_norm_type == "batch_norm") or \
(batch_norm_type == "mkldnn_batch_norm") or \ (batch_norm_type == "mkldnn_batch_norm") or \
(batch_norm_type == "cudnn_batch_norm") (batch_norm_type == "cudnn_batch_norm")
assert epsilon >= 1e-5, "Parameter epsilon must be no less than 1e-5."
l = Layer( l = Layer(
name=name, name=name,
img3D=img3D, img3D=img3D,
...@@ -3132,6 +3138,7 @@ def batch_norm_layer(input, ...@@ -3132,6 +3138,7 @@ def batch_norm_layer(input,
type=LayerType.BATCH_NORM_LAYER, type=LayerType.BATCH_NORM_LAYER,
batch_norm_type=batch_norm_type, batch_norm_type=batch_norm_type,
bias=ParamAttr.to_bias(bias_attr), bias=ParamAttr.to_bias(bias_attr),
epsilon=epsilon,
moving_average_fraction=moving_average_fraction, moving_average_fraction=moving_average_fraction,
use_global_stats=use_global_stats, use_global_stats=use_global_stats,
mean_var_names=mean_var_names, mean_var_names=mean_var_names,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册