diff --git a/paddle/fluid/framework/naive_executor.h b/paddle/fluid/framework/naive_executor.h index 02b2249dea5690e780dadac456476f42ba9ae71b..8ca3f5997af46bb97b2b9df7ace25a305be58630 100644 --- a/paddle/fluid/framework/naive_executor.h +++ b/paddle/fluid/framework/naive_executor.h @@ -53,7 +53,7 @@ class NaiveExecutor { bool with_feed_fetch_ops); // Create variables before head. - // Create parameters if persistable is ture, or create the temporary variables + // Create parameters if persistable is true, or create the temporary variables // instead. void CreateVariables(const ProgramDesc& desc, int block_id, diff --git a/paddle/fluid/memory/allocation/allocator_facade.cc b/paddle/fluid/memory/allocation/allocator_facade.cc index fb60d9110af384f9d2240197117dbe4d82ffc465..6bd08767871cc3a48549b3c0feeeb6e2083d82fe 100644 --- a/paddle/fluid/memory/allocation/allocator_facade.cc +++ b/paddle/fluid/memory/allocation/allocator_facade.cc @@ -238,7 +238,7 @@ class AllocatorFacadePrivate { // releate to non-default stream (i.e., the stream users pass in). The // default stream Allocator is built in the structure of // AllocatorFacadePrivate, while the non-default stream is build in a - // manner in GetAllocator function with 'create_if_not_found = ture'. + // manner in GetAllocator function with 'create_if_not_found = true'. // We make special treatment for the default stream for performance // reasons. Since most Alloc calls are for default stream in // application, treating it separately can avoid lots of overhead of diff --git a/paddle/fluid/operators/detection/yolo_box_op.cc b/paddle/fluid/operators/detection/yolo_box_op.cc index 257347f663c682169ba39f2901017cf098df2ef7..fbf4b55dfe44e02226c2321a739298f1484a7f37 100644 --- a/paddle/fluid/operators/detection/yolo_box_op.cc +++ b/paddle/fluid/operators/detection/yolo_box_op.cc @@ -237,7 +237,7 @@ class YoloBoxOpMaker : public framework::OpProtoAndCheckerMaker { .. math:: score_{conf} = \begin{case} - obj, \text{if } iou_aware == flase \\ + obj, \text{if } iou_aware == false \\ obj^{1 - iou_aware_factor} * iou^{iou_aware_factor}, \text{otherwise} \end{case} diff --git a/paddle/fluid/operators/fused/fused_dropout_helper.h b/paddle/fluid/operators/fused/fused_dropout_helper.h index 5d6dd1a5bbf81d8f114e4f8f783c8db2835ad0b8..3230854284062dc528664bf8752acc358e2c1f3c 100644 --- a/paddle/fluid/operators/fused/fused_dropout_helper.h +++ b/paddle/fluid/operators/fused/fused_dropout_helper.h @@ -28,7 +28,7 @@ namespace operators { * Support two Dropouts in the use senarieo. * This warpper can be used in FFN op. * The DropoutParam will be used in the fused_dropout_act_bias, - * fused_residual_dropout_bias(pre_layer_norm=ture) or + * fused_residual_dropout_bias(pre_layer_norm=true) or * fused_layernorm_residual_dropout_bias(pre_layer_norm=false). */ struct DropoutParam { diff --git a/paddle/fluid/operators/select_op_helper.h b/paddle/fluid/operators/select_op_helper.h index 46ef90c1a9219e97d4c07eb941ebe30613a458e8..ffab83e4e74fa86e44c8f5bd919238e705fc6330 100644 --- a/paddle/fluid/operators/select_op_helper.h +++ b/paddle/fluid/operators/select_op_helper.h @@ -37,7 +37,7 @@ inline int GetBranchNumber(const phi::DenseTensor &mask) { if (platform::is_cpu_place(mask.place())) { return mask.data()[0]; } - // when platform::is_gpu_place(mask.place()) is ture + // when platform::is_gpu_place(mask.place()) is true std::unique_ptr cpu_mask{new phi::DenseTensor()}; #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) framework::TensorCopySync(mask, platform::CPUPlace(), cpu_mask.get()); diff --git a/paddle/fluid/operators/tensorrt/tensorrt_engine_op.h b/paddle/fluid/operators/tensorrt/tensorrt_engine_op.h index eea337d93fb7eb0f5fe9b35daafe5be3b982b006..8096acc0a821c81a065e7eceb0ec6e4bf9d021ef 100644 --- a/paddle/fluid/operators/tensorrt/tensorrt_engine_op.h +++ b/paddle/fluid/operators/tensorrt/tensorrt_engine_op.h @@ -269,7 +269,7 @@ class TensorRTEngineOp : public framework::OperatorBase { if (param_names_.count(x)) continue; runtime_input_names_.emplace_back(x); } - // calibration_mode is ture represents we need to + // calibration_mode is true represents we need to // generate the calibration table data. calibration_mode_ = (enable_int8_ && calibration_data_.size() == 0 && use_calib_mode_); diff --git a/paddle/fluid/operators/unique_op.cc b/paddle/fluid/operators/unique_op.cc index 4d772e50e65257496b89e60519a8711de918bea8..c99f60ca873b1cad1124cd3ebf776cc4cff94e93 100644 --- a/paddle/fluid/operators/unique_op.cc +++ b/paddle/fluid/operators/unique_op.cc @@ -119,7 +119,7 @@ class UniqueOpMaker : public framework::OpProtoAndCheckerMaker { void Make() override { AddInput("X", "Input tensor. It should be a 1-D tensor when Attr(is_sorted)" - " is fasle or a N-D tensor when Attr(is_sorted) is true."); + " is false or a N-D tensor when Attr(is_sorted) is true."); AddAttr("dtype", "data type for output index"); AddOutput("Out", "A unique subsequence for input tensor."); AddOutput("Index", diff --git a/python/paddle/fluid/contrib/sparsity/asp.py b/python/paddle/fluid/contrib/sparsity/asp.py index d770bd36e3980942cca37d25a825ca000afd257f..b10f3261324ecb63ba22caef4baf11a44a61a7f7 100644 --- a/python/paddle/fluid/contrib/sparsity/asp.py +++ b/python/paddle/fluid/contrib/sparsity/asp.py @@ -322,7 +322,7 @@ def prune_model(model, n=2, m=4, mask_algo='mask_1d', with_mask=True): m (int, optional): m of `n:m` sparse pattern. Default is 4. mask_algo (string, optional): The function name to generate spase mask. Default is `mask_1d`. The vaild inputs should be one of 'mask_1d', 'mask_2d_greedy' and 'mask_2d_best'. - with_mask (bool, optional): To prune mask Variables related to parameters or not. Ture is purning also, False is not. Default is True. + with_mask (bool, optional): To prune mask Variables related to parameters or not. True is purning also, False is not. Default is True. Returns: dictionary: A dictionary with key: `parameter name` (string) and value: its corresponding mask Variable. Examples: diff --git a/python/paddle/fluid/dygraph/dygraph_to_static/convert_operators.py b/python/paddle/fluid/dygraph/dygraph_to_static/convert_operators.py index 0fa48c4260c46d1555aa8b6dc6532745ca1964ed..abf9c4882803986c72c3601df2d747648a28131a 100644 --- a/python/paddle/fluid/dygraph/dygraph_to_static/convert_operators.py +++ b/python/paddle/fluid/dygraph/dygraph_to_static/convert_operators.py @@ -373,7 +373,7 @@ def _run_paddle_cond( pred, true_fn, false_fn, get_args, set_args, return_name_ids, push_pop_names ): """ - Paddle cond API will evaluate both ture_fn and false_fn codes. + Paddle cond API will evaluate both true_fn and false_fn codes. """ helper = GetterSetterHelper( get_args, set_args, return_name_ids, push_pop_names diff --git a/python/paddle/fluid/dygraph/nn.py b/python/paddle/fluid/dygraph/nn.py index 60202c2a6b105bb60b6cbb80c8c2668c670b2aa9..f93a031f7bc13829bff8e3bcfcd265eb1f19b269 100644 --- a/python/paddle/fluid/dygraph/nn.py +++ b/python/paddle/fluid/dygraph/nn.py @@ -1268,7 +1268,7 @@ class InstanceNorm(layers.Layer): if param_attr == False or bias_attr == False: assert ( bias_attr == param_attr - ), "param_attr and bias_attr must be set to Fasle at the same time in InstanceNorm" + ), "param_attr and bias_attr must be set to False at the same time in InstanceNorm" self._epsilon = epsilon self._param_attr = param_attr self._bias_attr = bias_attr diff --git a/python/paddle/fluid/layers/control_flow.py b/python/paddle/fluid/layers/control_flow.py index 18b2ec496499d1e2decdd0fc0b03bd771a9c5085..5b79e3b86fadf142d3a490ad149ccd50f5bc7023 100755 --- a/python/paddle/fluid/layers/control_flow.py +++ b/python/paddle/fluid/layers/control_flow.py @@ -207,7 +207,7 @@ def select_input_with_buildin_type(inputs, mask, name): inputs = [to_static_variable(false_var), to_static_variable(true_var)] warnings.warn( "Return results from different branches in cond are not same type: " - "false_var returned by fasle_fn is '{}' and true_var of true_fn is " + "false_var returned by false_fn is '{}' and true_var of true_fn is " "'{}'".format(type(false_var), type(true_var)) ) elif ( @@ -230,7 +230,7 @@ def select_input_with_buildin_type(inputs, mask, name): else: raise TypeError( "Unsupported return type of true_fn and false_fn in cond: false_var " - "returned by fasle_fn is '{}' and true_var of true_fn is '{}'".format( + "returned by false_fn is '{}' and true_var of true_fn is '{}'".format( type(false_var), type(true_var) ) ) @@ -2835,7 +2835,7 @@ def cond(pred, true_fn=None, false_fn=None, name=None, return_names=None): "true_fn returns non-None while false_fn returns None" ) - # Merge ture and false output if they are not None + # Merge true and false output if they are not None if return_names is None: is_dy2staic = False return_names = ["no name"] * len(_to_sequence_except_dict(true_output)) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 4a5dbe4a106c29cc7aa55fbfcc85778a58d47cf3..525558cb77b79335333ddc2972782b4f901e69cc 100755 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -3678,7 +3678,7 @@ def instance_norm( if param_attr is False: assert ( bias_attr is False - ), "param_attr and bias_attr must be set to Fasle at the same time in instance_norm" + ), "param_attr and bias_attr must be set to False at the same time in instance_norm" helper = LayerHelper('instance_norm', **locals()) dtype = helper.input_dtype() diff --git a/python/paddle/nn/layer/norm.py b/python/paddle/nn/layer/norm.py index 1fca251c57007a410a25d12b996c7bdcd37b27c9..1b5784fbedff1709e91bc739c96e82968cdab1f4 100644 --- a/python/paddle/nn/layer/norm.py +++ b/python/paddle/nn/layer/norm.py @@ -74,7 +74,7 @@ class _InstanceNormBase(Layer): if weight_attr == False or bias_attr == False: assert ( weight_attr == bias_attr - ), "weight_attr and bias_attr must be set to Fasle at the same time in InstanceNorm" + ), "weight_attr and bias_attr must be set to False at the same time in InstanceNorm" self._epsilon = epsilon self._weight_attr = weight_attr self._bias_attr = bias_attr @@ -779,11 +779,11 @@ class BatchNorm1D(_BatchNormBase): momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9. weight_attr(ParamAttr|bool, optional): The parameter attribute for Parameter `scale` of batch_norm. If it is set to None or one attribute of ParamAttr, batch_norm - will create ParamAttr as weight_attr. If it is set to Fasle, the weight is not learnable. + will create ParamAttr as weight_attr. If it is set to False, the weight is not learnable. If the Initializer of the weight_attr is not set, the parameter is initialized with ones. Default: None. bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of batch_norm. If it is set to None or one attribute of ParamAttr, batch_norm - will create ParamAttr as bias_attr. If it is set to Fasle, the weight is not learnable. + will create ParamAttr as bias_attr. If it is set to False, the weight is not learnable. If the Initializer of the bias_attr is not set, the bias is initialized zero. Default: None. data_format(str, optional): Specify the input data format, may be "NC", "NCL" or "NLC". Default "NCL". use_global_stats(bool|None, optional): Whether to use global mean and variance. If set to False, use the statistics of one mini-batch, if set to True, use the global statistics, if set to None, use global statistics in the test phase and use the statistics of one mini-batch in the training phase. Default: None. @@ -892,11 +892,11 @@ class BatchNorm2D(_BatchNormBase): momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9. weight_attr(ParamAttr|bool, optional): The parameter attribute for Parameter `scale` of batch_norm. If it is set to None or one attribute of ParamAttr, batch_norm - will create ParamAttr as weight_attr. If it is set to Fasle, the weight is not learnable. + will create ParamAttr as weight_attr. If it is set to False, the weight is not learnable. If the Initializer of the weight_attr is not set, the parameter is initialized with ones. Default: None. bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of batch_norm. If it is set to None or one attribute of ParamAttr, batch_norm - will create ParamAttr as bias_attr. If it is set to Fasle, the weight is not learnable. + will create ParamAttr as bias_attr. If it is set to False, the weight is not learnable. If the Initializer of the bias_attr is not set, the bias is initialized zero. Default: None. data_format(str, optional): Specify the input data format, the data format can be "NCHW" or "NHWC". Default: NCHW. use_global_stats(bool|None, optional): Whether to use global mean and variance. If set to False, use the statistics of one mini-batch, if set to True, use the global statistics, if set to None, use global statistics in the test phase and use the statistics of one mini-batch in the training phase. Default: None. @@ -978,11 +978,11 @@ class BatchNorm3D(_BatchNormBase): momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9. weight_attr(ParamAttr|bool, optional): The parameter attribute for Parameter `scale` of batch_norm. If it is set to None or one attribute of ParamAttr, batch_norm - will create ParamAttr as weight_attr. If it is set to Fasle, the weight is not learnable. + will create ParamAttr as weight_attr. If it is set to False, the weight is not learnable. If the Initializer of the weight_attr is not set, the parameter is initialized with ones. Default: None. bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of batch_norm. If it is set to None or one attribute of ParamAttr, batch_norm - will create ParamAttr as bias_attr. If it is set to Fasle, the weight is not learnable. + will create ParamAttr as bias_attr. If it is set to False, the weight is not learnable. If the Initializer of the bias_attr is not set, the bias is initialized zero. Default: None. data_format(str, optional): Specify the input data format, the data format can be "NCDHW" or "NDHWC. Default: NCDHW. use_global_stats(bool|None, optional): Whether to use global mean and variance. If set to False, use the statistics of one mini-batch, if set to True, use the global statistics, if set to None, use global statistics in the test phase and use the statistics of one mini-batch in the training phase. Default: None. diff --git a/python/paddle/sparse/nn/layer/norm.py b/python/paddle/sparse/nn/layer/norm.py index 8bbad41ef38c179f7f21bb9feca28712027306fd..936e43a18faf909bdfd3d82eb716178913d7217a 100644 --- a/python/paddle/sparse/nn/layer/norm.py +++ b/python/paddle/sparse/nn/layer/norm.py @@ -61,11 +61,11 @@ class BatchNorm(paddle.nn.BatchNorm1D): epsilon(float, optional): The small value added to the variance to prevent division by zero. Default: 1e-5. weight_attr(ParamAttr|bool, optional): The parameter attribute for Parameter `scale` of batch_norm. If it is set to None or one attribute of ParamAttr, batch_norm - will create ParamAttr as weight_attr. If it is set to Fasle, the weight is not learnable. + will create ParamAttr as weight_attr. If it is set to False, the weight is not learnable. If the Initializer of the weight_attr is not set, the parameter is initialized with Xavier. Default: None. bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of batch_norm. If it is set to None or one attribute of ParamAttr, batch_norm - will create ParamAttr as bias_attr. If it is set to Fasle, the weight is not learnable. + will create ParamAttr as bias_attr. If it is set to False, the weight is not learnable. If the Initializer of the bias_attr is not set, the bias is initialized zero. Default: None. data_format(str, optional): Specify the input data format, may be "NC", "NCL" or "NLC". Default "NCL". use_global_stats(bool|None, optional): Whether to use global mean and variance. If set to False, use the statistics of one mini-batch, if set to True, use the global statistics, if set to None, use global statistics in the test phase and use the statistics of one mini-batch in the training phase. Default: None. diff --git a/python/paddle/vision/ops.py b/python/paddle/vision/ops.py index bdb8bc4ee983a2e8eb83437530a0c3c7ca382dad..519ac1db4c681e5de8eeecc6a5985a0f4bddce98 100755 --- a/python/paddle/vision/ops.py +++ b/python/paddle/vision/ops.py @@ -347,7 +347,7 @@ def yolo_box( .. math:: score_{conf} = \begin{case} - obj, \text{if } iou_aware == flase \\ + obj, \text{if } iou_aware == false \\ obj^{1 - iou_aware_factor} * iou^{iou_aware_factor}, \text{otherwise} \end{case}