From 42e56f65fa5809b3818e0630c488674e9d121f4b Mon Sep 17 00:00:00 2001 From: HongyuJia Date: Tue, 20 Sep 2022 19:14:00 +0800 Subject: [PATCH] [PolishComments] Polish some code comments (#46032) (#46261) * polish code comments * polish data_device_transform.cc --- paddle/fluid/framework/attribute_checker.h | 5 ++--- paddle/fluid/framework/custom_operator.cc | 2 +- paddle/fluid/framework/data_device_transform.cc | 3 +-- paddle/fluid/framework/operator.h | 2 +- paddle/fluid/imperative/infer_shape_context.h | 1 + paddle/fluid/operators/fill_any_like_op.cc | 2 +- paddle/fluid/pybind/cuda_streams_py.cc | 4 ++-- paddle/phi/api/lib/data_transform.cc | 8 ++++---- paddle/phi/kernels/cpu/put_along_axis_kernel.cc | 2 +- paddle/phi/kernels/gpu/put_along_axis_kernel.cu | 2 +- python/paddle/distributed/fleet/dataset/dataset.py | 6 +++--- python/paddle/fluid/contrib/sparsity/asp.py | 2 +- python/paddle/fluid/layers/nn.py | 2 +- .../fluid/tests/custom_kernel/test_custom_kernel_load.py | 2 +- python/paddle/fluid/tests/unittests/collective/README.md | 8 ++++---- python/paddle/incubate/sparse/nn/layer/norm.py | 2 +- python/paddle/nn/functional/norm.py | 6 +++--- python/paddle/nn/layer/norm.py | 4 ++-- python/paddle/tensor/linalg.py | 4 ++-- 19 files changed, 33 insertions(+), 34 deletions(-) diff --git a/paddle/fluid/framework/attribute_checker.h b/paddle/fluid/framework/attribute_checker.h index fbafe9c73a..24f3f0be96 100644 --- a/paddle/fluid/framework/attribute_checker.h +++ b/paddle/fluid/framework/attribute_checker.h @@ -342,13 +342,12 @@ class OpAttrChecker { AttributeMap default_attrs_; // in order to improve the efficiency of dynamic graph mode, - // we divede the attribute into explicit type and implicit type. + // we divide the attribute into explicit type and implicit type. // for explicit attribute, we mean the attribute added in the customized // op makers, usually it's defined in the overloaded Make method. // for implicit attribute, we mean the attribute added outside of the Make // method like "op_role", "op_role_var", and they are useless in dynamic - // graph - // mode + // graph mode size_t explicit_checker_num_; }; diff --git a/paddle/fluid/framework/custom_operator.cc b/paddle/fluid/framework/custom_operator.cc index 8c8d702e28..d3e0ed4293 100644 --- a/paddle/fluid/framework/custom_operator.cc +++ b/paddle/fluid/framework/custom_operator.cc @@ -801,7 +801,7 @@ void RegisterOperatorWithMetaInfo(const std::vector& op_meta_infos, // Infer Dtype if (infer_dtype_func == nullptr) { - // use defalut InferDtype + // use default InferDtype info.infer_var_type_ = [op_inputs, op_outputs](InferVarTypeContext* ctx) { PADDLE_ENFORCE_EQ( op_inputs.size(), diff --git a/paddle/fluid/framework/data_device_transform.cc b/paddle/fluid/framework/data_device_transform.cc index 36e558c1d5..e65ecff60e 100644 --- a/paddle/fluid/framework/data_device_transform.cc +++ b/paddle/fluid/framework/data_device_transform.cc @@ -51,8 +51,7 @@ void TransDataDevice(const Tensor &in, // the elements of learning rate are one and it's CPU side. // One solution is to use a CUDA kernel to complete the copy operation when // the transforming is from CPU to GPU and the number of elements is little. - // But the embarrassment is that this solution this solution makes training - // slower. + // But the embarrassment is that this solution makes training slower. TensorCopySync(in, dst_place, out); } diff --git a/paddle/fluid/framework/operator.h b/paddle/fluid/framework/operator.h index c82976d197..0faaee4843 100644 --- a/paddle/fluid/framework/operator.h +++ b/paddle/fluid/framework/operator.h @@ -682,7 +682,7 @@ class OperatorWithKernel : public OperatorBase { * Transfer data from scope to a transferred scope. If there is no data need * to be transferred, it returns nullptr. * - * * transfered_inplace_vars is a output vector. + * transfered_inplace_vars is a output vector. */ Scope* PrepareData(const Scope& scope, const OpKernelType& expected_kernel_key, diff --git a/paddle/fluid/imperative/infer_shape_context.h b/paddle/fluid/imperative/infer_shape_context.h index b7345cf397..5702bcfca7 100644 --- a/paddle/fluid/imperative/infer_shape_context.h +++ b/paddle/fluid/imperative/infer_shape_context.h @@ -169,6 +169,7 @@ class DygraphInferShapeContext : public framework::InferShapeContext { return vec_res; } + std::string GetInputNameByIdx(size_t idx) const override { auto& op_proto = paddle::framework::OpInfoMap::Instance().Get(op_type_).proto_; diff --git a/paddle/fluid/operators/fill_any_like_op.cc b/paddle/fluid/operators/fill_any_like_op.cc index 528ea076a3..eb66cc88b3 100644 --- a/paddle/fluid/operators/fill_any_like_op.cc +++ b/paddle/fluid/operators/fill_any_like_op.cc @@ -58,7 +58,7 @@ class FillAnyLikeOpMaker : public framework::OpProtoAndCheckerMaker { AddOutput("Out", "The variable will be filled up with specified value."); AddAttr("value", "The filled value").SetDefault(0.0); AddAttr("dtype", - "Output tensor data type. defalut value is -1," + "Output tensor data type. default value is -1," "according to the input dtype.") .SetDefault(-1); AddComment(R"DOC( diff --git a/paddle/fluid/pybind/cuda_streams_py.cc b/paddle/fluid/pybind/cuda_streams_py.cc index 66cd20340c..8fa1df2a53 100644 --- a/paddle/fluid/pybind/cuda_streams_py.cc +++ b/paddle/fluid/pybind/cuda_streams_py.cc @@ -321,8 +321,8 @@ void BindCudaStream(py::module *m_ptr) { Parameters: enable_timing(bool, optional): Whether the event will measure time. Default: False. blocking(bool, optional): Whether the wait() func will be blocking. Default: False; - interprocess(bool, optional): Whether the event can be shared between processes. Defalut: False. - + interprocess(bool, optional): Whether the event can be shared between processes. Default: False. + Examples: .. code-block:: python diff --git a/paddle/phi/api/lib/data_transform.cc b/paddle/phi/api/lib/data_transform.cc index 72e65ae528..04ac701ae0 100644 --- a/paddle/phi/api/lib/data_transform.cc +++ b/paddle/phi/api/lib/data_transform.cc @@ -81,7 +81,7 @@ inline phi::DenseTensor TransDataLayout(const phi::DenseTensor& tensor, } template -phi::DenseTensor CastDateType(const Context& dev_ctx, +phi::DenseTensor CastDataType(const Context& dev_ctx, const phi::DenseTensor& tensor, DataType dtype) { switch (tensor.dtype()) { @@ -111,7 +111,7 @@ phi::DenseTensor CastDateType(const Context& dev_ctx, } #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) -phi::DenseTensor CastDateType(const phi::GPUContext& dev_ctx, +phi::DenseTensor CastDataType(const phi::GPUContext& dev_ctx, const phi::DenseTensor& tensor, DataType dtype) { switch (tensor.dtype()) { @@ -151,11 +151,11 @@ inline phi::DenseTensor TransDataType(const phi::DenseTensor& tensor, if (platform::is_cpu_place(tensor.place())) { auto* dev_ctx = static_cast(pool.Get(tensor.place())); - return CastDateType(*dev_ctx, tensor, dtype); + return CastDataType(*dev_ctx, tensor, dtype); #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) } else if (platform::is_gpu_place(tensor.place())) { auto* dev_ctx = static_cast(pool.Get(tensor.place())); - return CastDateType(*dev_ctx, tensor, dtype); + return CastDataType(*dev_ctx, tensor, dtype); #endif } else { PADDLE_THROW(phi::errors::Unimplemented( diff --git a/paddle/phi/kernels/cpu/put_along_axis_kernel.cc b/paddle/phi/kernels/cpu/put_along_axis_kernel.cc index a297843b0c..573065cbc6 100644 --- a/paddle/phi/kernels/cpu/put_along_axis_kernel.cc +++ b/paddle/phi/kernels/cpu/put_along_axis_kernel.cc @@ -67,7 +67,7 @@ void PutAlongAxisKernel(const Context& dev_ctx, PADDLE_THROW(errors::InvalidArgument( "can not support reduce: '%s' for scatter kernel, only " "support reduce op: 'add', 'assign', 'mul' and 'multiply', the " - "defalut reduce " + "default reduce " "op is 'assign' ", reduce)); return; diff --git a/paddle/phi/kernels/gpu/put_along_axis_kernel.cu b/paddle/phi/kernels/gpu/put_along_axis_kernel.cu index b4fde608b1..648c0fa627 100644 --- a/paddle/phi/kernels/gpu/put_along_axis_kernel.cu +++ b/paddle/phi/kernels/gpu/put_along_axis_kernel.cu @@ -68,7 +68,7 @@ void PutAlongAxisKernel(const Context& dev_ctx, PADDLE_THROW(errors::InvalidArgument( "can not support reduce: '%s' for scatter kernel, only " "support reduce op: 'add', 'assign', 'mul' and 'multiply', the " - "defalut reduce op is 'assign' ", + "default reduce op is 'assign' ", reduce)); return; } diff --git a/python/paddle/distributed/fleet/dataset/dataset.py b/python/paddle/distributed/fleet/dataset/dataset.py index 3c6da4bd95..56bc6eb268 100755 --- a/python/paddle/distributed/fleet/dataset/dataset.py +++ b/python/paddle/distributed/fleet/dataset/dataset.py @@ -54,7 +54,7 @@ class DatasetBase(object): thread_num(int): thread num, it is the num of readers. default is 1. use_var(list): list of variables. Variables which you will use. default is []. pipe_command(str): pipe command of current dataset. A pipe command is a UNIX pipeline command that can be used only. default is "cat" - input_type(int): the input type of generated input. 0 is for one sample, 1 is for one batch. defalut is 0. + input_type(int): the input type of generated input. 0 is for one sample, 1 is for one batch. default is 0. fs_name(str): fs name. default is "". fs_ugi(str): fs ugi. default is "". download_cmd(str): customized download command. default is "cat" @@ -441,7 +441,7 @@ class InMemoryDataset(DatasetBase): batch_size(int): batch size. It will be effective during training. default is 1. thread_num(int): thread num, it is the num of readers. default is 1. use_var(list): list of variables. Variables which you will use. default is []. - input_type(int): the input type of generated input. 0 is for one sample, 1 is for one batch. defalut is 0. + input_type(int): the input type of generated input. 0 is for one sample, 1 is for one batch. default is 0. fs_name(str): fs name. default is "". fs_ugi(str): fs ugi. default is "". pipe_command(str): pipe command of current dataset. A pipe command is a UNIX pipeline command that can be used only. default is "cat" @@ -522,7 +522,7 @@ class InMemoryDataset(DatasetBase): batch_size(int): batch size. It will be effective during training. default is 1. thread_num(int): thread num, it is the num of readers. default is 1. use_var(list): list of variables. Variables which you will use. default is []. - input_type(int): the input type of generated input. 0 is for one sample, 1 is for one batch. defalut is 0. + input_type(int): the input type of generated input. 0 is for one sample, 1 is for one batch. default is 0. fs_name(str): fs name. default is "". fs_ugi(str): fs ugi. default is "". pipe_command(str): pipe command of current dataset. A pipe command is a UNIX pipeline command that can be used only. default is "cat" diff --git a/python/paddle/fluid/contrib/sparsity/asp.py b/python/paddle/fluid/contrib/sparsity/asp.py index 40c96e0ce3..692591d770 100644 --- a/python/paddle/fluid/contrib/sparsity/asp.py +++ b/python/paddle/fluid/contrib/sparsity/asp.py @@ -316,7 +316,7 @@ def prune_model(model, n=2, m=4, mask_algo='mask_1d', with_mask=True): m (int, optional): m of `n:m` sparse pattern. Default is 4. mask_algo (string, optional): The function name to generate spase mask. Default is `mask_1d`. The vaild inputs should be one of 'mask_1d', 'mask_2d_greedy' and 'mask_2d_best'. - with_mask (bool, optional): To prune mask Variables related to parameters or not. Ture is purning also, False is not. Defalut is True. + with_mask (bool, optional): To prune mask Variables related to parameters or not. Ture is purning also, False is not. Default is True. Returns: dictionary: A dictionary with key: `parameter name` (string) and value: its corresponding mask Variable. Examples: diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index b4330f1c4a..b39284242e 100755 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -14912,7 +14912,7 @@ def unique_with_counts(x, dtype='int32'): Args: x(Variable): A 1-D input tensor with input shape of :math:`[N]` , the input data type is float32, float64, int32, int64. - dtype(np.dtype|core.VarDesc.VarType|str): The type of count and index tensor, it could be int32, int64. Defalut value is int32. + dtype(np.dtype|core.VarDesc.VarType|str): The type of count and index tensor, it could be int32, int64. Default value is int32. Returns: tuple, the variable type in tuple is Tensor, the output :attr:`out` data type is the same as input :attr:`x`, \ diff --git a/python/paddle/fluid/tests/custom_kernel/test_custom_kernel_load.py b/python/paddle/fluid/tests/custom_kernel/test_custom_kernel_load.py index 4ca05909fb..ff7ff3e04a 100644 --- a/python/paddle/fluid/tests/custom_kernel/test_custom_kernel_load.py +++ b/python/paddle/fluid/tests/custom_kernel/test_custom_kernel_load.py @@ -48,7 +48,7 @@ class TestCustomKernelLoad(unittest.TestCase): paddle_lib_path = lib_dir self.default_path = os.path.sep.join( [paddle_lib_path, '..', '..', 'paddle-plugins']) - # copy so to defalut path + # copy so to default path cmd = 'mkdir -p {} && cp ./*.so {}'.format(self.default_path, self.default_path) os.system(cmd) # wait diff --git a/python/paddle/fluid/tests/unittests/collective/README.md b/python/paddle/fluid/tests/unittests/collective/README.md index 2370ce07e0..e4d3c90c30 100644 --- a/python/paddle/fluid/tests/unittests/collective/README.md +++ b/python/paddle/fluid/tests/unittests/collective/README.md @@ -8,11 +8,11 @@ * `name`: the test's name * `os`: The supported operator system, ignoring case. If the test run in multiple operator systems, use ";" to split systems, for example, `apple;linux` means the test runs on both Apple and Linux. The supported values are `linux`,`win32` and `apple`. If the value is empty, this means the test runs on all opertaor systems. * `arch`: the device's architecture. similar to `os`, multiple valuse ars splited by ";" and ignoring case. The supported architectures are `gpu`, `xpu`, `ASCEND`, `ASCEND_CL` and `rocm`. -* `timeout`: timeout of a unittest, whose unit is second. Blank means defalut. -* `run_type`: run_type of a unittest. Supported values are `NIGHTLY`, `EXCLUSIVE`, `CINN`, `DIST`, `GPUPS`, `INFER`, `EXCLUSIVE:NIGHTLY`, `DIST:NIGHTLY`,which are case-insensitive. +* `timeout`: timeout of a unittest, whose unit is second. Blank means default. +* `run_type`: run_type of a unittest. Supported values are `NIGHTLY`, `EXCLUSIVE`, `CINN`, `DIST`, `GPUPS`, `INFER`, `EXCLUSIVE:NIGHTLY`, `DIST:NIGHTLY`,which are case-insensitive. * `launcher`: the test launcher.Supported values are test_runner.py, dist_test.sh and custom scripts' name. Blank means test_runner.py. -* `num_port`: the number of port used in a distributed unit test. Blank means automatically distributed port. -* `run_serial`: whether in serial mode. the value can be 1 or 0.Default (empty) is 0. Blank means defalut. +* `num_port`: the number of port used in a distributed unit test. Blank means automatically distributed port. +* `run_serial`: whether in serial mode. the value can be 1 or 0.Default (empty) is 0. Blank means default. * `ENVS`: required environments. multiple envirenmonts are splited by ";". * `conditions`: extra required conditions for some tests. The value is a list of boolean expression in cmake programmer, splited with ";". For example, the value can be `WITH_DGC;NOT WITH_NCCL` or `WITH_NCCL;${NCCL_VERSION} VERSION_GREATER_EQUAL 2212`,The relationship between these expressions is a conjunction. diff --git a/python/paddle/incubate/sparse/nn/layer/norm.py b/python/paddle/incubate/sparse/nn/layer/norm.py index 776967ac04..c89e9a6b90 100644 --- a/python/paddle/incubate/sparse/nn/layer/norm.py +++ b/python/paddle/incubate/sparse/nn/layer/norm.py @@ -78,7 +78,7 @@ class BatchNorm(paddle.nn.BatchNorm1D): If it is set to None or one attribute of ParamAttr, batch_norm will create ParamAttr as bias_attr. If it is set to Fasle, the weight is not learnable. If the Initializer of the bias_attr is not set, the bias is initialized zero. Default: None. - data_format(str, optional): Specify the input data format, may be "NC", "NCL" or "NLC". Defalut "NCL". + data_format(str, optional): Specify the input data format, may be "NC", "NCL" or "NLC". Default "NCL". use_global_stats(bool|None, optional): Whether to use global mean and variance. If set to False, use the statistics of one mini-batch, if set to True, use the global statistics, if set to None, use global statistics in the test phase and use the statistics of one mini-batch in the training phase. Default: None. name(str, optional): Name for the BatchNorm, default is None. For more information, please refer to :ref:`api_guide_Name`.. diff --git a/python/paddle/nn/functional/norm.py b/python/paddle/nn/functional/norm.py index 03ba72fdda..1f5d743630 100644 --- a/python/paddle/nn/functional/norm.py +++ b/python/paddle/nn/functional/norm.py @@ -140,8 +140,8 @@ def batch_norm(x, bias(Tensor): The bias tensor of batch_norm can not be None. epsilon(float, optional): The small value added to the variance to prevent division by zero. Default: 1e-5. momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9. - training(bool, optional): True means train mode which compute by batch data and track global mean and var during train period. False means inference mode which compute by global mean and var which calculated by train period. Defalut False. - data_format(str, optional): Specify the input data format, may be "NC", "NCL", "NCHW", "NCDHW", "NLC", "NHWC" or "NDHWC". Defalut "NCHW". + training(bool, optional): True means train mode which compute by batch data and track global mean and var during train period. False means inference mode which compute by global mean and var which calculated by train period. Default False. + data_format(str, optional): Specify the input data format, may be "NC", "NCL", "NCHW", "NCDHW", "NLC", "NHWC" or "NDHWC". Default "NCHW". use_global_stats(bool|None, optional): Whether to use global mean and variance. If set to False, use the statistics of one mini-batch, if set to True, use the global statistics, if set to None, use global statistics in the test phase and use the statistics of one mini-batch in the training phase. Default: None. name(str, optional): Name for the BatchNorm, default is None. For more information, please refer to :ref:`api_guide_Name`.. @@ -392,7 +392,7 @@ def instance_norm(x, eps(float, optional): A value added to the denominator for numerical stability. Default is 1e-5. momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9. use_input_stats(bool): Default True. - data_format(str, optional): Specify the input data format, may be "NC", "NCL", "NCHW" or "NCDHW". Defalut "NCHW". + data_format(str, optional): Specify the input data format, may be "NC", "NCL", "NCHW" or "NCDHW". Default "NCHW". name(str, optional): Name for the InstanceNorm, default is None. For more information, please refer to :ref:`api_guide_Name`.. Returns: diff --git a/python/paddle/nn/layer/norm.py b/python/paddle/nn/layer/norm.py index 93d6b21c13..0a259b5812 100644 --- a/python/paddle/nn/layer/norm.py +++ b/python/paddle/nn/layer/norm.py @@ -144,7 +144,7 @@ class InstanceNorm1D(_InstanceNormBase): will create ParamAttr as bias_attr, the name of bias can be set in ParamAttr. If the Initializer of the bias_attr is not set, the bias is initialized zero. If it is set to False, will not create bias_attr. Default: None. - data_format(str, optional): Specify the input data format, may be "NC", "NCL". Defalut "NCL". + data_format(str, optional): Specify the input data format, may be "NC", "NCL". Default "NCL". name(str, optional): Name for the InstanceNorm, default is None. For more information, please refer to :ref:`api_guide_Name`.. @@ -743,7 +743,7 @@ class BatchNorm1D(_BatchNormBase): If it is set to None or one attribute of ParamAttr, batch_norm will create ParamAttr as bias_attr. If it is set to Fasle, the weight is not learnable. If the Initializer of the bias_attr is not set, the bias is initialized zero. Default: None. - data_format(str, optional): Specify the input data format, may be "NC", "NCL" or "NLC". Defalut "NCL". + data_format(str, optional): Specify the input data format, may be "NC", "NCL" or "NLC". Default "NCL". use_global_stats(bool|None, optional): Whether to use global mean and variance. If set to False, use the statistics of one mini-batch, if set to True, use the global statistics, if set to None, use global statistics in the test phase and use the statistics of one mini-batch in the training phase. Default: None. name(str, optional): Name for the BatchNorm, default is None. For more information, please refer to :ref:`api_guide_Name`.. diff --git a/python/paddle/tensor/linalg.py b/python/paddle/tensor/linalg.py index 700c6c340d..8765c7a504 100644 --- a/python/paddle/tensor/linalg.py +++ b/python/paddle/tensor/linalg.py @@ -276,7 +276,7 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None): or list(int)/tuple(int) with only one element, the vector norm is computed over the axis. If `axis < 0`, the dimension to norm operation is rank(input) + axis. If axis is a list(int)/tuple(int) with two elements, the matrix norm is computed over the axis. - Defalut value is `None`. + Default value is `None`. keepdim (bool, optional): Whether to reserve the reduced dimension in the output Tensor. The result tensor will have fewer dimension than the :attr:`input` unless :attr:`keepdim` is true, default @@ -2589,7 +2589,7 @@ def pinv(x, rcond=1e-15, hermitian=False, name=None): True. rcond(Tensor, optional): the tolerance value to determine - when is a singular value zero. Defalut:1e-15. + when is a singular value zero. Default:1e-15. hermitian(bool, optional): indicates whether x is Hermitian if complex or symmetric if real. Default: False. -- GitLab