未验证 提交 f5912d0c 编写于 作者: N Nyakku Shigure 提交者: GitHub

fix typos for `True` and `False` (#47477)

* fix typo `Fasle`/`Flase` -> `Flase`

* fix typo `Ture` -> `True`
上级 c8fc3379
......@@ -53,7 +53,7 @@ class NaiveExecutor {
bool with_feed_fetch_ops);
// Create variables before head.
// Create parameters if persistable is ture, or create the temporary variables
// Create parameters if persistable is true, or create the temporary variables
// instead.
void CreateVariables(const ProgramDesc& desc,
int block_id,
......
......@@ -238,7 +238,7 @@ class AllocatorFacadePrivate {
// releate to non-default stream (i.e., the stream users pass in). The
// default stream Allocator is built in the structure of
// AllocatorFacadePrivate, while the non-default stream is build in a
// manner in GetAllocator function with 'create_if_not_found = ture'.
// manner in GetAllocator function with 'create_if_not_found = true'.
// We make special treatment for the default stream for performance
// reasons. Since most Alloc calls are for default stream in
// application, treating it separately can avoid lots of overhead of
......
......@@ -237,7 +237,7 @@ class YoloBoxOpMaker : public framework::OpProtoAndCheckerMaker {
.. math::
score_{conf} = \begin{case}
obj, \text{if } iou_aware == flase \\
obj, \text{if } iou_aware == false \\
obj^{1 - iou_aware_factor} * iou^{iou_aware_factor}, \text{otherwise}
\end{case}
......
......@@ -28,7 +28,7 @@ namespace operators {
* Support two Dropouts in the use senarieo.
* This warpper can be used in FFN op.
* The DropoutParam will be used in the fused_dropout_act_bias,
* fused_residual_dropout_bias(pre_layer_norm=ture) or
* fused_residual_dropout_bias(pre_layer_norm=true) or
* fused_layernorm_residual_dropout_bias(pre_layer_norm=false).
*/
struct DropoutParam {
......
......@@ -37,7 +37,7 @@ inline int GetBranchNumber(const phi::DenseTensor &mask) {
if (platform::is_cpu_place(mask.place())) {
return mask.data<int>()[0];
}
// when platform::is_gpu_place(mask.place()) is ture
// when platform::is_gpu_place(mask.place()) is true
std::unique_ptr<phi::DenseTensor> cpu_mask{new phi::DenseTensor()};
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
framework::TensorCopySync(mask, platform::CPUPlace(), cpu_mask.get());
......
......@@ -269,7 +269,7 @@ class TensorRTEngineOp : public framework::OperatorBase {
if (param_names_.count(x)) continue;
runtime_input_names_.emplace_back(x);
}
// calibration_mode is ture represents we need to
// calibration_mode is true represents we need to
// generate the calibration table data.
calibration_mode_ =
(enable_int8_ && calibration_data_.size() == 0 && use_calib_mode_);
......
......@@ -119,7 +119,7 @@ class UniqueOpMaker : public framework::OpProtoAndCheckerMaker {
void Make() override {
AddInput("X",
"Input tensor. It should be a 1-D tensor when Attr(is_sorted)"
" is fasle or a N-D tensor when Attr(is_sorted) is true.");
" is false or a N-D tensor when Attr(is_sorted) is true.");
AddAttr<int>("dtype", "data type for output index");
AddOutput("Out", "A unique subsequence for input tensor.");
AddOutput("Index",
......
......@@ -322,7 +322,7 @@ def prune_model(model, n=2, m=4, mask_algo='mask_1d', with_mask=True):
m (int, optional): m of `n:m` sparse pattern. Default is 4.
mask_algo (string, optional): The function name to generate spase mask. Default is `mask_1d`.
The vaild inputs should be one of 'mask_1d', 'mask_2d_greedy' and 'mask_2d_best'.
with_mask (bool, optional): To prune mask Variables related to parameters or not. Ture is purning also, False is not. Default is True.
with_mask (bool, optional): To prune mask Variables related to parameters or not. True is purning also, False is not. Default is True.
Returns:
dictionary: A dictionary with key: `parameter name` (string) and value: its corresponding mask Variable.
Examples:
......
......@@ -373,7 +373,7 @@ def _run_paddle_cond(
pred, true_fn, false_fn, get_args, set_args, return_name_ids, push_pop_names
):
"""
Paddle cond API will evaluate both ture_fn and false_fn codes.
Paddle cond API will evaluate both true_fn and false_fn codes.
"""
helper = GetterSetterHelper(
get_args, set_args, return_name_ids, push_pop_names
......
......@@ -1268,7 +1268,7 @@ class InstanceNorm(layers.Layer):
if param_attr == False or bias_attr == False:
assert (
bias_attr == param_attr
), "param_attr and bias_attr must be set to Fasle at the same time in InstanceNorm"
), "param_attr and bias_attr must be set to False at the same time in InstanceNorm"
self._epsilon = epsilon
self._param_attr = param_attr
self._bias_attr = bias_attr
......
......@@ -207,7 +207,7 @@ def select_input_with_buildin_type(inputs, mask, name):
inputs = [to_static_variable(false_var), to_static_variable(true_var)]
warnings.warn(
"Return results from different branches in cond are not same type: "
"false_var returned by fasle_fn is '{}' and true_var of true_fn is "
"false_var returned by false_fn is '{}' and true_var of true_fn is "
"'{}'".format(type(false_var), type(true_var))
)
elif (
......@@ -230,7 +230,7 @@ def select_input_with_buildin_type(inputs, mask, name):
else:
raise TypeError(
"Unsupported return type of true_fn and false_fn in cond: false_var "
"returned by fasle_fn is '{}' and true_var of true_fn is '{}'".format(
"returned by false_fn is '{}' and true_var of true_fn is '{}'".format(
type(false_var), type(true_var)
)
)
......@@ -2835,7 +2835,7 @@ def cond(pred, true_fn=None, false_fn=None, name=None, return_names=None):
"true_fn returns non-None while false_fn returns None"
)
# Merge ture and false output if they are not None
# Merge true and false output if they are not None
if return_names is None:
is_dy2staic = False
return_names = ["no name"] * len(_to_sequence_except_dict(true_output))
......
......@@ -3678,7 +3678,7 @@ def instance_norm(
if param_attr is False:
assert (
bias_attr is False
), "param_attr and bias_attr must be set to Fasle at the same time in instance_norm"
), "param_attr and bias_attr must be set to False at the same time in instance_norm"
helper = LayerHelper('instance_norm', **locals())
dtype = helper.input_dtype()
......
......@@ -74,7 +74,7 @@ class _InstanceNormBase(Layer):
if weight_attr == False or bias_attr == False:
assert (
weight_attr == bias_attr
), "weight_attr and bias_attr must be set to Fasle at the same time in InstanceNorm"
), "weight_attr and bias_attr must be set to False at the same time in InstanceNorm"
self._epsilon = epsilon
self._weight_attr = weight_attr
self._bias_attr = bias_attr
......@@ -779,11 +779,11 @@ class BatchNorm1D(_BatchNormBase):
momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9.
weight_attr(ParamAttr|bool, optional): The parameter attribute for Parameter `scale`
of batch_norm. If it is set to None or one attribute of ParamAttr, batch_norm
will create ParamAttr as weight_attr. If it is set to Fasle, the weight is not learnable.
will create ParamAttr as weight_attr. If it is set to False, the weight is not learnable.
If the Initializer of the weight_attr is not set, the parameter is initialized with ones. Default: None.
bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of batch_norm.
If it is set to None or one attribute of ParamAttr, batch_norm
will create ParamAttr as bias_attr. If it is set to Fasle, the weight is not learnable.
will create ParamAttr as bias_attr. If it is set to False, the weight is not learnable.
If the Initializer of the bias_attr is not set, the bias is initialized zero. Default: None.
data_format(str, optional): Specify the input data format, may be "NC", "NCL" or "NLC". Default "NCL".
use_global_stats(bool|None, optional): Whether to use global mean and variance. If set to False, use the statistics of one mini-batch, if set to True, use the global statistics, if set to None, use global statistics in the test phase and use the statistics of one mini-batch in the training phase. Default: None.
......@@ -892,11 +892,11 @@ class BatchNorm2D(_BatchNormBase):
momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9.
weight_attr(ParamAttr|bool, optional): The parameter attribute for Parameter `scale`
of batch_norm. If it is set to None or one attribute of ParamAttr, batch_norm
will create ParamAttr as weight_attr. If it is set to Fasle, the weight is not learnable.
will create ParamAttr as weight_attr. If it is set to False, the weight is not learnable.
If the Initializer of the weight_attr is not set, the parameter is initialized with ones. Default: None.
bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of batch_norm.
If it is set to None or one attribute of ParamAttr, batch_norm
will create ParamAttr as bias_attr. If it is set to Fasle, the weight is not learnable.
will create ParamAttr as bias_attr. If it is set to False, the weight is not learnable.
If the Initializer of the bias_attr is not set, the bias is initialized zero. Default: None.
data_format(str, optional): Specify the input data format, the data format can be "NCHW" or "NHWC". Default: NCHW.
use_global_stats(bool|None, optional): Whether to use global mean and variance. If set to False, use the statistics of one mini-batch, if set to True, use the global statistics, if set to None, use global statistics in the test phase and use the statistics of one mini-batch in the training phase. Default: None.
......@@ -978,11 +978,11 @@ class BatchNorm3D(_BatchNormBase):
momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9.
weight_attr(ParamAttr|bool, optional): The parameter attribute for Parameter `scale`
of batch_norm. If it is set to None or one attribute of ParamAttr, batch_norm
will create ParamAttr as weight_attr. If it is set to Fasle, the weight is not learnable.
will create ParamAttr as weight_attr. If it is set to False, the weight is not learnable.
If the Initializer of the weight_attr is not set, the parameter is initialized with ones. Default: None.
bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of batch_norm.
If it is set to None or one attribute of ParamAttr, batch_norm
will create ParamAttr as bias_attr. If it is set to Fasle, the weight is not learnable.
will create ParamAttr as bias_attr. If it is set to False, the weight is not learnable.
If the Initializer of the bias_attr is not set, the bias is initialized zero. Default: None.
data_format(str, optional): Specify the input data format, the data format can be "NCDHW" or "NDHWC. Default: NCDHW.
use_global_stats(bool|None, optional): Whether to use global mean and variance. If set to False, use the statistics of one mini-batch, if set to True, use the global statistics, if set to None, use global statistics in the test phase and use the statistics of one mini-batch in the training phase. Default: None.
......
......@@ -61,11 +61,11 @@ class BatchNorm(paddle.nn.BatchNorm1D):
epsilon(float, optional): The small value added to the variance to prevent division by zero. Default: 1e-5.
weight_attr(ParamAttr|bool, optional): The parameter attribute for Parameter `scale`
of batch_norm. If it is set to None or one attribute of ParamAttr, batch_norm
will create ParamAttr as weight_attr. If it is set to Fasle, the weight is not learnable.
will create ParamAttr as weight_attr. If it is set to False, the weight is not learnable.
If the Initializer of the weight_attr is not set, the parameter is initialized with Xavier. Default: None.
bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of batch_norm.
If it is set to None or one attribute of ParamAttr, batch_norm
will create ParamAttr as bias_attr. If it is set to Fasle, the weight is not learnable.
will create ParamAttr as bias_attr. If it is set to False, the weight is not learnable.
If the Initializer of the bias_attr is not set, the bias is initialized zero. Default: None.
data_format(str, optional): Specify the input data format, may be "NC", "NCL" or "NLC". Default "NCL".
use_global_stats(bool|None, optional): Whether to use global mean and variance. If set to False, use the statistics of one mini-batch, if set to True, use the global statistics, if set to None, use global statistics in the test phase and use the statistics of one mini-batch in the training phase. Default: None.
......
......@@ -347,7 +347,7 @@ def yolo_box(
.. math::
score_{conf} = \begin{case}
obj, \text{if } iou_aware == flase \\
obj, \text{if } iou_aware == false \\
obj^{1 - iou_aware_factor} * iou^{iou_aware_factor}, \text{otherwise}
\end{case}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册