From f5912d0c7ee3f73183e9801fd0bbcfe48a5d22e3 Mon Sep 17 00:00:00 2001 From: Nyakku Shigure Date: Mon, 31 Oct 2022 11:15:50 +0800 Subject: [PATCH] fix typos for `True` and `False` (#47477) * fix typo `Fasle`/`Flase` -> `Flase` * fix typo `Ture` -> `True` --- paddle/fluid/framework/naive_executor.h | 2 +- paddle/fluid/memory/allocation/allocator_facade.cc | 2 +- paddle/fluid/operators/detection/yolo_box_op.cc | 2 +- .../fluid/operators/fused/fused_dropout_helper.h | 2 +- paddle/fluid/operators/select_op_helper.h | 2 +- .../fluid/operators/tensorrt/tensorrt_engine_op.h | 2 +- paddle/fluid/operators/unique_op.cc | 2 +- python/paddle/fluid/contrib/sparsity/asp.py | 2 +- .../dygraph/dygraph_to_static/convert_operators.py | 2 +- python/paddle/fluid/dygraph/nn.py | 2 +- python/paddle/fluid/layers/control_flow.py | 6 +++--- python/paddle/fluid/layers/nn.py | 2 +- python/paddle/nn/layer/norm.py | 14 +++++++------- python/paddle/sparse/nn/layer/norm.py | 4 ++-- python/paddle/vision/ops.py | 2 +- 15 files changed, 24 insertions(+), 24 deletions(-) diff --git a/paddle/fluid/framework/naive_executor.h b/paddle/fluid/framework/naive_executor.h index 02b2249dea..8ca3f5997a 100644 --- a/paddle/fluid/framework/naive_executor.h +++ b/paddle/fluid/framework/naive_executor.h @@ -53,7 +53,7 @@ class NaiveExecutor { bool with_feed_fetch_ops); // Create variables before head. - // Create parameters if persistable is ture, or create the temporary variables + // Create parameters if persistable is true, or create the temporary variables // instead. void CreateVariables(const ProgramDesc& desc, int block_id, diff --git a/paddle/fluid/memory/allocation/allocator_facade.cc b/paddle/fluid/memory/allocation/allocator_facade.cc index fb60d9110a..6bd0876787 100644 --- a/paddle/fluid/memory/allocation/allocator_facade.cc +++ b/paddle/fluid/memory/allocation/allocator_facade.cc @@ -238,7 +238,7 @@ class AllocatorFacadePrivate { // releate to non-default stream (i.e., the stream users pass in). The // default stream Allocator is built in the structure of // AllocatorFacadePrivate, while the non-default stream is build in a - // manner in GetAllocator function with 'create_if_not_found = ture'. + // manner in GetAllocator function with 'create_if_not_found = true'. // We make special treatment for the default stream for performance // reasons. Since most Alloc calls are for default stream in // application, treating it separately can avoid lots of overhead of diff --git a/paddle/fluid/operators/detection/yolo_box_op.cc b/paddle/fluid/operators/detection/yolo_box_op.cc index 257347f663..fbf4b55dfe 100644 --- a/paddle/fluid/operators/detection/yolo_box_op.cc +++ b/paddle/fluid/operators/detection/yolo_box_op.cc @@ -237,7 +237,7 @@ class YoloBoxOpMaker : public framework::OpProtoAndCheckerMaker { .. math:: score_{conf} = \begin{case} - obj, \text{if } iou_aware == flase \\ + obj, \text{if } iou_aware == false \\ obj^{1 - iou_aware_factor} * iou^{iou_aware_factor}, \text{otherwise} \end{case} diff --git a/paddle/fluid/operators/fused/fused_dropout_helper.h b/paddle/fluid/operators/fused/fused_dropout_helper.h index 5d6dd1a5bb..3230854284 100644 --- a/paddle/fluid/operators/fused/fused_dropout_helper.h +++ b/paddle/fluid/operators/fused/fused_dropout_helper.h @@ -28,7 +28,7 @@ namespace operators { * Support two Dropouts in the use senarieo. * This warpper can be used in FFN op. * The DropoutParam will be used in the fused_dropout_act_bias, - * fused_residual_dropout_bias(pre_layer_norm=ture) or + * fused_residual_dropout_bias(pre_layer_norm=true) or * fused_layernorm_residual_dropout_bias(pre_layer_norm=false). */ struct DropoutParam { diff --git a/paddle/fluid/operators/select_op_helper.h b/paddle/fluid/operators/select_op_helper.h index 46ef90c1a9..ffab83e4e7 100644 --- a/paddle/fluid/operators/select_op_helper.h +++ b/paddle/fluid/operators/select_op_helper.h @@ -37,7 +37,7 @@ inline int GetBranchNumber(const phi::DenseTensor &mask) { if (platform::is_cpu_place(mask.place())) { return mask.data()[0]; } - // when platform::is_gpu_place(mask.place()) is ture + // when platform::is_gpu_place(mask.place()) is true std::unique_ptr cpu_mask{new phi::DenseTensor()}; #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) framework::TensorCopySync(mask, platform::CPUPlace(), cpu_mask.get()); diff --git a/paddle/fluid/operators/tensorrt/tensorrt_engine_op.h b/paddle/fluid/operators/tensorrt/tensorrt_engine_op.h index eea337d93f..8096acc0a8 100644 --- a/paddle/fluid/operators/tensorrt/tensorrt_engine_op.h +++ b/paddle/fluid/operators/tensorrt/tensorrt_engine_op.h @@ -269,7 +269,7 @@ class TensorRTEngineOp : public framework::OperatorBase { if (param_names_.count(x)) continue; runtime_input_names_.emplace_back(x); } - // calibration_mode is ture represents we need to + // calibration_mode is true represents we need to // generate the calibration table data. calibration_mode_ = (enable_int8_ && calibration_data_.size() == 0 && use_calib_mode_); diff --git a/paddle/fluid/operators/unique_op.cc b/paddle/fluid/operators/unique_op.cc index 4d772e50e6..c99f60ca87 100644 --- a/paddle/fluid/operators/unique_op.cc +++ b/paddle/fluid/operators/unique_op.cc @@ -119,7 +119,7 @@ class UniqueOpMaker : public framework::OpProtoAndCheckerMaker { void Make() override { AddInput("X", "Input tensor. It should be a 1-D tensor when Attr(is_sorted)" - " is fasle or a N-D tensor when Attr(is_sorted) is true."); + " is false or a N-D tensor when Attr(is_sorted) is true."); AddAttr("dtype", "data type for output index"); AddOutput("Out", "A unique subsequence for input tensor."); AddOutput("Index", diff --git a/python/paddle/fluid/contrib/sparsity/asp.py b/python/paddle/fluid/contrib/sparsity/asp.py index d770bd36e3..b10f326132 100644 --- a/python/paddle/fluid/contrib/sparsity/asp.py +++ b/python/paddle/fluid/contrib/sparsity/asp.py @@ -322,7 +322,7 @@ def prune_model(model, n=2, m=4, mask_algo='mask_1d', with_mask=True): m (int, optional): m of `n:m` sparse pattern. Default is 4. mask_algo (string, optional): The function name to generate spase mask. Default is `mask_1d`. The vaild inputs should be one of 'mask_1d', 'mask_2d_greedy' and 'mask_2d_best'. - with_mask (bool, optional): To prune mask Variables related to parameters or not. Ture is purning also, False is not. Default is True. + with_mask (bool, optional): To prune mask Variables related to parameters or not. True is purning also, False is not. Default is True. Returns: dictionary: A dictionary with key: `parameter name` (string) and value: its corresponding mask Variable. Examples: diff --git a/python/paddle/fluid/dygraph/dygraph_to_static/convert_operators.py b/python/paddle/fluid/dygraph/dygraph_to_static/convert_operators.py index 0fa48c4260..abf9c48828 100644 --- a/python/paddle/fluid/dygraph/dygraph_to_static/convert_operators.py +++ b/python/paddle/fluid/dygraph/dygraph_to_static/convert_operators.py @@ -373,7 +373,7 @@ def _run_paddle_cond( pred, true_fn, false_fn, get_args, set_args, return_name_ids, push_pop_names ): """ - Paddle cond API will evaluate both ture_fn and false_fn codes. + Paddle cond API will evaluate both true_fn and false_fn codes. """ helper = GetterSetterHelper( get_args, set_args, return_name_ids, push_pop_names diff --git a/python/paddle/fluid/dygraph/nn.py b/python/paddle/fluid/dygraph/nn.py index 60202c2a6b..f93a031f7b 100644 --- a/python/paddle/fluid/dygraph/nn.py +++ b/python/paddle/fluid/dygraph/nn.py @@ -1268,7 +1268,7 @@ class InstanceNorm(layers.Layer): if param_attr == False or bias_attr == False: assert ( bias_attr == param_attr - ), "param_attr and bias_attr must be set to Fasle at the same time in InstanceNorm" + ), "param_attr and bias_attr must be set to False at the same time in InstanceNorm" self._epsilon = epsilon self._param_attr = param_attr self._bias_attr = bias_attr diff --git a/python/paddle/fluid/layers/control_flow.py b/python/paddle/fluid/layers/control_flow.py index 18b2ec4964..5b79e3b86f 100755 --- a/python/paddle/fluid/layers/control_flow.py +++ b/python/paddle/fluid/layers/control_flow.py @@ -207,7 +207,7 @@ def select_input_with_buildin_type(inputs, mask, name): inputs = [to_static_variable(false_var), to_static_variable(true_var)] warnings.warn( "Return results from different branches in cond are not same type: " - "false_var returned by fasle_fn is '{}' and true_var of true_fn is " + "false_var returned by false_fn is '{}' and true_var of true_fn is " "'{}'".format(type(false_var), type(true_var)) ) elif ( @@ -230,7 +230,7 @@ def select_input_with_buildin_type(inputs, mask, name): else: raise TypeError( "Unsupported return type of true_fn and false_fn in cond: false_var " - "returned by fasle_fn is '{}' and true_var of true_fn is '{}'".format( + "returned by false_fn is '{}' and true_var of true_fn is '{}'".format( type(false_var), type(true_var) ) ) @@ -2835,7 +2835,7 @@ def cond(pred, true_fn=None, false_fn=None, name=None, return_names=None): "true_fn returns non-None while false_fn returns None" ) - # Merge ture and false output if they are not None + # Merge true and false output if they are not None if return_names is None: is_dy2staic = False return_names = ["no name"] * len(_to_sequence_except_dict(true_output)) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 4a5dbe4a10..525558cb77 100755 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -3678,7 +3678,7 @@ def instance_norm( if param_attr is False: assert ( bias_attr is False - ), "param_attr and bias_attr must be set to Fasle at the same time in instance_norm" + ), "param_attr and bias_attr must be set to False at the same time in instance_norm" helper = LayerHelper('instance_norm', **locals()) dtype = helper.input_dtype() diff --git a/python/paddle/nn/layer/norm.py b/python/paddle/nn/layer/norm.py index 1fca251c57..1b5784fbed 100644 --- a/python/paddle/nn/layer/norm.py +++ b/python/paddle/nn/layer/norm.py @@ -74,7 +74,7 @@ class _InstanceNormBase(Layer): if weight_attr == False or bias_attr == False: assert ( weight_attr == bias_attr - ), "weight_attr and bias_attr must be set to Fasle at the same time in InstanceNorm" + ), "weight_attr and bias_attr must be set to False at the same time in InstanceNorm" self._epsilon = epsilon self._weight_attr = weight_attr self._bias_attr = bias_attr @@ -779,11 +779,11 @@ class BatchNorm1D(_BatchNormBase): momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9. weight_attr(ParamAttr|bool, optional): The parameter attribute for Parameter `scale` of batch_norm. If it is set to None or one attribute of ParamAttr, batch_norm - will create ParamAttr as weight_attr. If it is set to Fasle, the weight is not learnable. + will create ParamAttr as weight_attr. If it is set to False, the weight is not learnable. If the Initializer of the weight_attr is not set, the parameter is initialized with ones. Default: None. bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of batch_norm. If it is set to None or one attribute of ParamAttr, batch_norm - will create ParamAttr as bias_attr. If it is set to Fasle, the weight is not learnable. + will create ParamAttr as bias_attr. If it is set to False, the weight is not learnable. If the Initializer of the bias_attr is not set, the bias is initialized zero. Default: None. data_format(str, optional): Specify the input data format, may be "NC", "NCL" or "NLC". Default "NCL". use_global_stats(bool|None, optional): Whether to use global mean and variance. If set to False, use the statistics of one mini-batch, if set to True, use the global statistics, if set to None, use global statistics in the test phase and use the statistics of one mini-batch in the training phase. Default: None. @@ -892,11 +892,11 @@ class BatchNorm2D(_BatchNormBase): momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9. weight_attr(ParamAttr|bool, optional): The parameter attribute for Parameter `scale` of batch_norm. If it is set to None or one attribute of ParamAttr, batch_norm - will create ParamAttr as weight_attr. If it is set to Fasle, the weight is not learnable. + will create ParamAttr as weight_attr. If it is set to False, the weight is not learnable. If the Initializer of the weight_attr is not set, the parameter is initialized with ones. Default: None. bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of batch_norm. If it is set to None or one attribute of ParamAttr, batch_norm - will create ParamAttr as bias_attr. If it is set to Fasle, the weight is not learnable. + will create ParamAttr as bias_attr. If it is set to False, the weight is not learnable. If the Initializer of the bias_attr is not set, the bias is initialized zero. Default: None. data_format(str, optional): Specify the input data format, the data format can be "NCHW" or "NHWC". Default: NCHW. use_global_stats(bool|None, optional): Whether to use global mean and variance. If set to False, use the statistics of one mini-batch, if set to True, use the global statistics, if set to None, use global statistics in the test phase and use the statistics of one mini-batch in the training phase. Default: None. @@ -978,11 +978,11 @@ class BatchNorm3D(_BatchNormBase): momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9. weight_attr(ParamAttr|bool, optional): The parameter attribute for Parameter `scale` of batch_norm. If it is set to None or one attribute of ParamAttr, batch_norm - will create ParamAttr as weight_attr. If it is set to Fasle, the weight is not learnable. + will create ParamAttr as weight_attr. If it is set to False, the weight is not learnable. If the Initializer of the weight_attr is not set, the parameter is initialized with ones. Default: None. bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of batch_norm. If it is set to None or one attribute of ParamAttr, batch_norm - will create ParamAttr as bias_attr. If it is set to Fasle, the weight is not learnable. + will create ParamAttr as bias_attr. If it is set to False, the weight is not learnable. If the Initializer of the bias_attr is not set, the bias is initialized zero. Default: None. data_format(str, optional): Specify the input data format, the data format can be "NCDHW" or "NDHWC. Default: NCDHW. use_global_stats(bool|None, optional): Whether to use global mean and variance. If set to False, use the statistics of one mini-batch, if set to True, use the global statistics, if set to None, use global statistics in the test phase and use the statistics of one mini-batch in the training phase. Default: None. diff --git a/python/paddle/sparse/nn/layer/norm.py b/python/paddle/sparse/nn/layer/norm.py index 8bbad41ef3..936e43a18f 100644 --- a/python/paddle/sparse/nn/layer/norm.py +++ b/python/paddle/sparse/nn/layer/norm.py @@ -61,11 +61,11 @@ class BatchNorm(paddle.nn.BatchNorm1D): epsilon(float, optional): The small value added to the variance to prevent division by zero. Default: 1e-5. weight_attr(ParamAttr|bool, optional): The parameter attribute for Parameter `scale` of batch_norm. If it is set to None or one attribute of ParamAttr, batch_norm - will create ParamAttr as weight_attr. If it is set to Fasle, the weight is not learnable. + will create ParamAttr as weight_attr. If it is set to False, the weight is not learnable. If the Initializer of the weight_attr is not set, the parameter is initialized with Xavier. Default: None. bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of batch_norm. If it is set to None or one attribute of ParamAttr, batch_norm - will create ParamAttr as bias_attr. If it is set to Fasle, the weight is not learnable. + will create ParamAttr as bias_attr. If it is set to False, the weight is not learnable. If the Initializer of the bias_attr is not set, the bias is initialized zero. Default: None. data_format(str, optional): Specify the input data format, may be "NC", "NCL" or "NLC". Default "NCL". use_global_stats(bool|None, optional): Whether to use global mean and variance. If set to False, use the statistics of one mini-batch, if set to True, use the global statistics, if set to None, use global statistics in the test phase and use the statistics of one mini-batch in the training phase. Default: None. diff --git a/python/paddle/vision/ops.py b/python/paddle/vision/ops.py index bdb8bc4ee9..519ac1db4c 100755 --- a/python/paddle/vision/ops.py +++ b/python/paddle/vision/ops.py @@ -347,7 +347,7 @@ def yolo_box( .. math:: score_{conf} = \begin{case} - obj, \text{if } iou_aware == flase \\ + obj, \text{if } iou_aware == false \\ obj^{1 - iou_aware_factor} * iou^{iou_aware_factor}, \text{otherwise} \end{case} -- GitLab