提交 5502abb9 编写于 作者: P peterzhang2029

refine docstrings

上级 9580c450
...@@ -21,8 +21,6 @@ namespace paddle { ...@@ -21,8 +21,6 @@ namespace paddle {
REGISTER_LAYER(cudnn_batch_norm, CudnnBatchNormLayer); REGISTER_LAYER(cudnn_batch_norm, CudnnBatchNormLayer);
const double CudnnBatchNormLayer::MIN_EPS = 1E-5;
bool CudnnBatchNormLayer::init(const LayerMap& layerMap, bool CudnnBatchNormLayer::init(const LayerMap& layerMap,
const ParameterMap& parameterMap) { const ParameterMap& parameterMap) {
/* Initialize the basic parent class */ /* Initialize the basic parent class */
...@@ -61,14 +59,8 @@ void CudnnBatchNormLayer::forward(PassType passType) { ...@@ -61,14 +59,8 @@ void CudnnBatchNormLayer::forward(PassType passType) {
real* movingMean = movingMean_->getW()->getData(); real* movingMean = movingMean_->getW()->getData();
real* movingVar = movingVar_->getW()->getData(); real* movingVar = movingVar_->getW()->getData();
/** // cuDNN does not allow an epsilon value less than CUDNN_BN_MIN_EPSILON.
* If epsilon_ equals to 1e-5 and eps_ is assigned the value of eps_ = std::max(CUDNN_BN_MIN_EPSILON, static_cast<double>(epsilon_));
* static_cast<double>(epsilon_), The CUDNN_STATUS_BAD_PARAM error
* will occur due to eps_ value is less than
* CUDNN_BN_MIN_EPSILON.
* The following code is to ensure that the eps_ meets requirement.
*/
eps_ = std::max(MIN_EPS, static_cast<double>(epsilon_));
if (!useGlobalStats_) { if (!useGlobalStats_) {
REGISTER_TIMER_INFO("CudnnBatchFwTimer", getName().c_str()); REGISTER_TIMER_INFO("CudnnBatchFwTimer", getName().c_str());
...@@ -137,14 +129,8 @@ void CudnnBatchNormLayer::backward(const UpdateCallback& callback) { ...@@ -137,14 +129,8 @@ void CudnnBatchNormLayer::backward(const UpdateCallback& callback) {
real* savedMean = savedMean_->getData(); real* savedMean = savedMean_->getData();
real* savedInvVar = savedInvVar_->getData(); real* savedInvVar = savedInvVar_->getData();
/** // cuDNN does not allow an epsilon value less than CUDNN_BN_MIN_EPSILON.
* If epsilon_ equals to 1e-5 and eps_ is assigned the value of eps_ = std::max(CUDNN_BN_MIN_EPSILON, static_cast<double>(epsilon_));
* static_cast<double>(epsilon_), The CUDNN_STATUS_BAD_PARAM error
* will occur due to eps_ value is less than
* CUDNN_BN_MIN_EPSILON.
* The following code is to ensure that the eps_ meets requirement.
*/
eps_ = std::max(MIN_EPS, static_cast<double>(epsilon_));
auto create = [](MatrixPtr& m, size_t h, size_t w, real** p) { auto create = [](MatrixPtr& m, size_t h, size_t w, real** p) {
Matrix::resizeOrCreate(m, h, w, false, true); Matrix::resizeOrCreate(m, h, w, false, true);
......
...@@ -14,6 +14,7 @@ limitations under the License. */ ...@@ -14,6 +14,7 @@ limitations under the License. */
#pragma once #pragma once
#include <cudnn.h>
#include "BatchNormBaseLayer.h" #include "BatchNormBaseLayer.h"
#include "Layer.h" #include "Layer.h"
#include "paddle/utils/Stat.h" #include "paddle/utils/Stat.h"
...@@ -46,9 +47,6 @@ public: ...@@ -46,9 +47,6 @@ public:
void backward(const UpdateCallback& callback = nullptr) override; void backward(const UpdateCallback& callback = nullptr) override;
protected: protected:
/// Minimum allowed value is CUDNN_BN_MIN_EPSILON defined in cudnn.h.
static const double MIN_EPS;
/// Epsilon value used in the batch normalization formula. /// Epsilon value used in the batch normalization formula.
/// Same epsilon value should be used in forward and backward functions. /// Same epsilon value should be used in forward and backward functions.
double eps_; double eps_;
......
...@@ -542,7 +542,7 @@ message LayerConfig { ...@@ -542,7 +542,7 @@ message LayerConfig {
optional ReshapeConfig reshape_conf = 59; optional ReshapeConfig reshape_conf = 59;
// for batch normalization layer // for batch normalization layer
// small constant added to the variance to avoid numerical problems. // The small constant added to the variance to improve numeric stability.
optional double epsilon = 60 [ default = 0.00001 ]; optional double epsilon = 60 [ default = 0.00001 ];
} }
......
...@@ -2483,8 +2483,9 @@ class BatchNormLayer(LayerBase): ...@@ -2483,8 +2483,9 @@ class BatchNormLayer(LayerBase):
self.config.use_global_stats = use_global_stats self.config.use_global_stats = use_global_stats
if moving_average_fraction is not None: if moving_average_fraction is not None:
self.config.moving_average_fraction = moving_average_fraction self.config.moving_average_fraction = moving_average_fraction
if epsilon is not None:
self.config.epsilon = epsilon assert epsilon >= 1e-5, "epsilon must be no less than 1e-5."
self.config.epsilon = epsilon
input_layer = self.get_input_layer(0) input_layer = self.get_input_layer(0)
image_conf = self.config.inputs[0].image_conf image_conf = self.config.inputs[0].image_conf
......
...@@ -3107,7 +3107,7 @@ def batch_norm_layer(input, ...@@ -3107,7 +3107,7 @@ def batch_norm_layer(input,
will use the mean and variance of the current batch will use the mean and variance of the current batch
of test data. of test data.
:type use_global_stats: bool | None. :type use_global_stats: bool | None.
:param epsilon: Small constant added to the variance to avoid numerical problems. :param epsilon: The small constant added to the variance to improve numeric stability.
:type epsilon: float. :type epsilon: float.
:param moving_average_fraction: Factor used in the moving average computation. :param moving_average_fraction: Factor used in the moving average computation.
:math:`runningMean = newMean*(1-factor) + runningMean*factor` :math:`runningMean = newMean*(1-factor) + runningMean*factor`
...@@ -3127,8 +3127,6 @@ def batch_norm_layer(input, ...@@ -3127,8 +3127,6 @@ def batch_norm_layer(input,
(batch_norm_type == "mkldnn_batch_norm") or \ (batch_norm_type == "mkldnn_batch_norm") or \
(batch_norm_type == "cudnn_batch_norm") (batch_norm_type == "cudnn_batch_norm")
assert epsilon >= 1e-5, "epsilon must be no less than 1e-5."
l = Layer( l = Layer(
name=name, name=name,
img3D=img3D, img3D=img3D,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册