From bf08e5d985a39f1bb4d9085c042cdc78de8fbecb Mon Sep 17 00:00:00 2001 From: dangqingqing Date: Mon, 7 Aug 2017 19:18:40 +0800 Subject: [PATCH] modify code comments. --- paddle/cuda/include/hl_batch_norm.h | 24 +++++++++---------- paddle/gserver/layers/CudnnBatchNormLayer.cpp | 4 ++-- 2 files changed, 13 insertions(+), 15 deletions(-) diff --git a/paddle/cuda/include/hl_batch_norm.h b/paddle/cuda/include/hl_batch_norm.h index e1fea131634..afc5e0b2dea 100644 --- a/paddle/cuda/include/hl_batch_norm.h +++ b/paddle/cuda/include/hl_batch_norm.h @@ -20,20 +20,18 @@ limitations under the License. */ /** * @brief batch norm inferece. * - * @param[in] input input data. - * @param[out] output output data. - * @param[in] scale batch normalization scale parameter (in original - * paper scale is referred to as gamma). - * @param[in] bias batch normalization bias parameter (in original - * paper scale is referred to as beta). + * @param[in] input input data. + * @param[out] output output data. + * @param[in] scale batch normalization scale parameter (in original + * paper scale is referred to as gamma). + * @param[in] bias batch normalization bias parameter (in original + * paper scale is referred to as beta). * @param[in] estimatedMean - * @param[in] estimatedVar It is suggested that resultRunningMean, - * resultRunningVariance from the - * cudnnBatchNormalizationForwardTraining call - * accumulated during the training phase are passed - * as inputs here. - * @param[in] epsilon Epsilon value used in the batch - * normalization formula. + * @param[in] estimatedVar The moving mean and variance + * accumulated during the training phase are passed + * as inputs here. + * @param[in] epsilon Epsilon value used in the batch + * normalization formula. */ extern void hl_batch_norm_cuda_inference(const real* input, real* output, diff --git a/paddle/gserver/layers/CudnnBatchNormLayer.cpp b/paddle/gserver/layers/CudnnBatchNormLayer.cpp index d99b50385e6..cc2cc21cdfd 100644 --- a/paddle/gserver/layers/CudnnBatchNormLayer.cpp +++ b/paddle/gserver/layers/CudnnBatchNormLayer.cpp @@ -81,8 +81,8 @@ void CudnnBatchNormLayer::forward(PassType passType) { } else { // used movingMean and movingVar in testing if (batchSize > 1024) { - // when batchSize is larger than 1024, there is a bug - // in cudnn library. + // there is a bug in cudnn library when the batch size + // is larger than 1024. hl_batch_norm_cuda_inference(input, output, gamma, -- GitLab