diff --git a/paddle/fluid/framework/attribute_checker.h b/paddle/fluid/framework/attribute_checker.h index fbafe9c73a9cc636a72f2c66685122a0ef53e5c8..24f3f0be96b6cb2dd004c74bb0ed4a1f34d28ed3 100644 --- a/paddle/fluid/framework/attribute_checker.h +++ b/paddle/fluid/framework/attribute_checker.h @@ -342,13 +342,12 @@ class OpAttrChecker { AttributeMap default_attrs_; // in order to improve the efficiency of dynamic graph mode, - // we divede the attribute into explicit type and implicit type. + // we divide the attribute into explicit type and implicit type. // for explicit attribute, we mean the attribute added in the customized // op makers, usually it's defined in the overloaded Make method. // for implicit attribute, we mean the attribute added outside of the Make // method like "op_role", "op_role_var", and they are useless in dynamic - // graph - // mode + // graph mode size_t explicit_checker_num_; }; diff --git a/paddle/fluid/framework/custom_operator.cc b/paddle/fluid/framework/custom_operator.cc index 8c8d702e28f42a5bd2689af0159f54ac35c3ac16..d3e0ed42935cfbf5cf3f3da10a50b0a11035a17d 100644 --- a/paddle/fluid/framework/custom_operator.cc +++ b/paddle/fluid/framework/custom_operator.cc @@ -801,7 +801,7 @@ void RegisterOperatorWithMetaInfo(const std::vector& op_meta_infos, // Infer Dtype if (infer_dtype_func == nullptr) { - // use defalut InferDtype + // use default InferDtype info.infer_var_type_ = [op_inputs, op_outputs](InferVarTypeContext* ctx) { PADDLE_ENFORCE_EQ( op_inputs.size(), diff --git a/paddle/fluid/framework/data_device_transform.cc b/paddle/fluid/framework/data_device_transform.cc index 36e558c1d504d8385af784153db70f0b84cf1a8a..e65ecff60edd76e77c1f881e4cbbcb79bc24b0a2 100644 --- a/paddle/fluid/framework/data_device_transform.cc +++ b/paddle/fluid/framework/data_device_transform.cc @@ -51,8 +51,7 @@ void TransDataDevice(const Tensor &in, // the elements of learning rate are one and it's CPU side. // One solution is to use a CUDA kernel to complete the copy operation when // the transforming is from CPU to GPU and the number of elements is little. - // But the embarrassment is that this solution this solution makes training - // slower. + // But the embarrassment is that this solution makes training slower. TensorCopySync(in, dst_place, out); } diff --git a/paddle/fluid/framework/operator.h b/paddle/fluid/framework/operator.h index c82976d197c27904bd6d195ac4caf86be5bdb5f3..0faaee48439234e87d6367cf46732822b55dbb1e 100644 --- a/paddle/fluid/framework/operator.h +++ b/paddle/fluid/framework/operator.h @@ -682,7 +682,7 @@ class OperatorWithKernel : public OperatorBase { * Transfer data from scope to a transferred scope. If there is no data need * to be transferred, it returns nullptr. * - * * transfered_inplace_vars is a output vector. + * transfered_inplace_vars is a output vector. */ Scope* PrepareData(const Scope& scope, const OpKernelType& expected_kernel_key, diff --git a/paddle/fluid/imperative/infer_shape_context.h b/paddle/fluid/imperative/infer_shape_context.h index b7345cf397356f99b9ef03dcfe5bb9741bfe9fc6..5702bcfca73296106b1d0a2cb24fd052150bd647 100644 --- a/paddle/fluid/imperative/infer_shape_context.h +++ b/paddle/fluid/imperative/infer_shape_context.h @@ -169,6 +169,7 @@ class DygraphInferShapeContext : public framework::InferShapeContext { return vec_res; } + std::string GetInputNameByIdx(size_t idx) const override { auto& op_proto = paddle::framework::OpInfoMap::Instance().Get(op_type_).proto_; diff --git a/paddle/fluid/operators/fill_any_like_op.cc b/paddle/fluid/operators/fill_any_like_op.cc index 528ea076a322be63fde9eda7f871bb4a2fb7dcdb..eb66cc88b3145cecf245879b7ed9788fbec23b68 100644 --- a/paddle/fluid/operators/fill_any_like_op.cc +++ b/paddle/fluid/operators/fill_any_like_op.cc @@ -58,7 +58,7 @@ class FillAnyLikeOpMaker : public framework::OpProtoAndCheckerMaker { AddOutput("Out", "The variable will be filled up with specified value."); AddAttr("value", "The filled value").SetDefault(0.0); AddAttr("dtype", - "Output tensor data type. defalut value is -1," + "Output tensor data type. default value is -1," "according to the input dtype.") .SetDefault(-1); AddComment(R"DOC( diff --git a/paddle/fluid/pybind/cuda_streams_py.cc b/paddle/fluid/pybind/cuda_streams_py.cc index 66cd20340ca857b35c65190ccf08aef4df7d2249..8fa1df2a53d3a30257d15caab9bac4e979edc838 100644 --- a/paddle/fluid/pybind/cuda_streams_py.cc +++ b/paddle/fluid/pybind/cuda_streams_py.cc @@ -321,8 +321,8 @@ void BindCudaStream(py::module *m_ptr) { Parameters: enable_timing(bool, optional): Whether the event will measure time. Default: False. blocking(bool, optional): Whether the wait() func will be blocking. Default: False; - interprocess(bool, optional): Whether the event can be shared between processes. Defalut: False. - + interprocess(bool, optional): Whether the event can be shared between processes. Default: False. + Examples: .. code-block:: python diff --git a/paddle/phi/api/lib/data_transform.cc b/paddle/phi/api/lib/data_transform.cc index 72e65ae5286ee562097d6a075fb73125c15220ac..04ac701ae0f5979d6607fd5250303e5f0f61fa75 100644 --- a/paddle/phi/api/lib/data_transform.cc +++ b/paddle/phi/api/lib/data_transform.cc @@ -81,7 +81,7 @@ inline phi::DenseTensor TransDataLayout(const phi::DenseTensor& tensor, } template -phi::DenseTensor CastDateType(const Context& dev_ctx, +phi::DenseTensor CastDataType(const Context& dev_ctx, const phi::DenseTensor& tensor, DataType dtype) { switch (tensor.dtype()) { @@ -111,7 +111,7 @@ phi::DenseTensor CastDateType(const Context& dev_ctx, } #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) -phi::DenseTensor CastDateType(const phi::GPUContext& dev_ctx, +phi::DenseTensor CastDataType(const phi::GPUContext& dev_ctx, const phi::DenseTensor& tensor, DataType dtype) { switch (tensor.dtype()) { @@ -151,11 +151,11 @@ inline phi::DenseTensor TransDataType(const phi::DenseTensor& tensor, if (platform::is_cpu_place(tensor.place())) { auto* dev_ctx = static_cast(pool.Get(tensor.place())); - return CastDateType(*dev_ctx, tensor, dtype); + return CastDataType(*dev_ctx, tensor, dtype); #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) } else if (platform::is_gpu_place(tensor.place())) { auto* dev_ctx = static_cast(pool.Get(tensor.place())); - return CastDateType(*dev_ctx, tensor, dtype); + return CastDataType(*dev_ctx, tensor, dtype); #endif } else { PADDLE_THROW(phi::errors::Unimplemented( diff --git a/paddle/phi/kernels/cpu/put_along_axis_kernel.cc b/paddle/phi/kernels/cpu/put_along_axis_kernel.cc index a297843b0c7cddd5d07f5cba71ccc279ebb7784f..573065cbc661573c4414993f059261bb94ce27a8 100644 --- a/paddle/phi/kernels/cpu/put_along_axis_kernel.cc +++ b/paddle/phi/kernels/cpu/put_along_axis_kernel.cc @@ -67,7 +67,7 @@ void PutAlongAxisKernel(const Context& dev_ctx, PADDLE_THROW(errors::InvalidArgument( "can not support reduce: '%s' for scatter kernel, only " "support reduce op: 'add', 'assign', 'mul' and 'multiply', the " - "defalut reduce " + "default reduce " "op is 'assign' ", reduce)); return; diff --git a/paddle/phi/kernels/gpu/put_along_axis_kernel.cu b/paddle/phi/kernels/gpu/put_along_axis_kernel.cu index b4fde608b1e7883ffc37cfaaff22aac108549790..648c0fa627b25349745c9945c73e9b9b78becd9b 100644 --- a/paddle/phi/kernels/gpu/put_along_axis_kernel.cu +++ b/paddle/phi/kernels/gpu/put_along_axis_kernel.cu @@ -68,7 +68,7 @@ void PutAlongAxisKernel(const Context& dev_ctx, PADDLE_THROW(errors::InvalidArgument( "can not support reduce: '%s' for scatter kernel, only " "support reduce op: 'add', 'assign', 'mul' and 'multiply', the " - "defalut reduce op is 'assign' ", + "default reduce op is 'assign' ", reduce)); return; } diff --git a/python/paddle/distributed/fleet/dataset/dataset.py b/python/paddle/distributed/fleet/dataset/dataset.py index 3c6da4bd957cf98e74a8a00c7bdc2ff5fd3e02ba..56bc6eb268a779204bccee94fec85ef1d87c54a2 100755 --- a/python/paddle/distributed/fleet/dataset/dataset.py +++ b/python/paddle/distributed/fleet/dataset/dataset.py @@ -54,7 +54,7 @@ class DatasetBase(object): thread_num(int): thread num, it is the num of readers. default is 1. use_var(list): list of variables. Variables which you will use. default is []. pipe_command(str): pipe command of current dataset. A pipe command is a UNIX pipeline command that can be used only. default is "cat" - input_type(int): the input type of generated input. 0 is for one sample, 1 is for one batch. defalut is 0. + input_type(int): the input type of generated input. 0 is for one sample, 1 is for one batch. default is 0. fs_name(str): fs name. default is "". fs_ugi(str): fs ugi. default is "". download_cmd(str): customized download command. default is "cat" @@ -441,7 +441,7 @@ class InMemoryDataset(DatasetBase): batch_size(int): batch size. It will be effective during training. default is 1. thread_num(int): thread num, it is the num of readers. default is 1. use_var(list): list of variables. Variables which you will use. default is []. - input_type(int): the input type of generated input. 0 is for one sample, 1 is for one batch. defalut is 0. + input_type(int): the input type of generated input. 0 is for one sample, 1 is for one batch. default is 0. fs_name(str): fs name. default is "". fs_ugi(str): fs ugi. default is "". pipe_command(str): pipe command of current dataset. A pipe command is a UNIX pipeline command that can be used only. default is "cat" @@ -522,7 +522,7 @@ class InMemoryDataset(DatasetBase): batch_size(int): batch size. It will be effective during training. default is 1. thread_num(int): thread num, it is the num of readers. default is 1. use_var(list): list of variables. Variables which you will use. default is []. - input_type(int): the input type of generated input. 0 is for one sample, 1 is for one batch. defalut is 0. + input_type(int): the input type of generated input. 0 is for one sample, 1 is for one batch. default is 0. fs_name(str): fs name. default is "". fs_ugi(str): fs ugi. default is "". pipe_command(str): pipe command of current dataset. A pipe command is a UNIX pipeline command that can be used only. default is "cat" diff --git a/python/paddle/fluid/contrib/sparsity/asp.py b/python/paddle/fluid/contrib/sparsity/asp.py index 40c96e0ce3403dd840e1f9e277f67b1629115559..692591d770f5dc74a168326a8a8a97f119501494 100644 --- a/python/paddle/fluid/contrib/sparsity/asp.py +++ b/python/paddle/fluid/contrib/sparsity/asp.py @@ -316,7 +316,7 @@ def prune_model(model, n=2, m=4, mask_algo='mask_1d', with_mask=True): m (int, optional): m of `n:m` sparse pattern. Default is 4. mask_algo (string, optional): The function name to generate spase mask. Default is `mask_1d`. The vaild inputs should be one of 'mask_1d', 'mask_2d_greedy' and 'mask_2d_best'. - with_mask (bool, optional): To prune mask Variables related to parameters or not. Ture is purning also, False is not. Defalut is True. + with_mask (bool, optional): To prune mask Variables related to parameters or not. Ture is purning also, False is not. Default is True. Returns: dictionary: A dictionary with key: `parameter name` (string) and value: its corresponding mask Variable. Examples: diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index b4330f1c4a78bf8d4296cde2162edb91d1a4f3fe..b39284242ec088db9bd973baaa92d06690545ee5 100755 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -14912,7 +14912,7 @@ def unique_with_counts(x, dtype='int32'): Args: x(Variable): A 1-D input tensor with input shape of :math:`[N]` , the input data type is float32, float64, int32, int64. - dtype(np.dtype|core.VarDesc.VarType|str): The type of count and index tensor, it could be int32, int64. Defalut value is int32. + dtype(np.dtype|core.VarDesc.VarType|str): The type of count and index tensor, it could be int32, int64. Default value is int32. Returns: tuple, the variable type in tuple is Tensor, the output :attr:`out` data type is the same as input :attr:`x`, \ diff --git a/python/paddle/fluid/tests/custom_kernel/test_custom_kernel_load.py b/python/paddle/fluid/tests/custom_kernel/test_custom_kernel_load.py index 4ca05909fb17ad9d99e863126dc76c0ab3f11075..ff7ff3e04a88e569af7b97266d41381554b912d3 100644 --- a/python/paddle/fluid/tests/custom_kernel/test_custom_kernel_load.py +++ b/python/paddle/fluid/tests/custom_kernel/test_custom_kernel_load.py @@ -48,7 +48,7 @@ class TestCustomKernelLoad(unittest.TestCase): paddle_lib_path = lib_dir self.default_path = os.path.sep.join( [paddle_lib_path, '..', '..', 'paddle-plugins']) - # copy so to defalut path + # copy so to default path cmd = 'mkdir -p {} && cp ./*.so {}'.format(self.default_path, self.default_path) os.system(cmd) # wait diff --git a/python/paddle/fluid/tests/unittests/collective/README.md b/python/paddle/fluid/tests/unittests/collective/README.md index 2370ce07e05b4a77c0822e3e986e094030cc04b2..e4d3c90c309dd1a348d0629b0ec86bd58181b60a 100644 --- a/python/paddle/fluid/tests/unittests/collective/README.md +++ b/python/paddle/fluid/tests/unittests/collective/README.md @@ -8,11 +8,11 @@ * `name`: the test's name * `os`: The supported operator system, ignoring case. If the test run in multiple operator systems, use ";" to split systems, for example, `apple;linux` means the test runs on both Apple and Linux. The supported values are `linux`,`win32` and `apple`. If the value is empty, this means the test runs on all opertaor systems. * `arch`: the device's architecture. similar to `os`, multiple valuse ars splited by ";" and ignoring case. The supported architectures are `gpu`, `xpu`, `ASCEND`, `ASCEND_CL` and `rocm`. -* `timeout`: timeout of a unittest, whose unit is second. Blank means defalut. -* `run_type`: run_type of a unittest. Supported values are `NIGHTLY`, `EXCLUSIVE`, `CINN`, `DIST`, `GPUPS`, `INFER`, `EXCLUSIVE:NIGHTLY`, `DIST:NIGHTLY`,which are case-insensitive. +* `timeout`: timeout of a unittest, whose unit is second. Blank means default. +* `run_type`: run_type of a unittest. Supported values are `NIGHTLY`, `EXCLUSIVE`, `CINN`, `DIST`, `GPUPS`, `INFER`, `EXCLUSIVE:NIGHTLY`, `DIST:NIGHTLY`,which are case-insensitive. * `launcher`: the test launcher.Supported values are test_runner.py, dist_test.sh and custom scripts' name. Blank means test_runner.py. -* `num_port`: the number of port used in a distributed unit test. Blank means automatically distributed port. -* `run_serial`: whether in serial mode. the value can be 1 or 0.Default (empty) is 0. Blank means defalut. +* `num_port`: the number of port used in a distributed unit test. Blank means automatically distributed port. +* `run_serial`: whether in serial mode. the value can be 1 or 0.Default (empty) is 0. Blank means default. * `ENVS`: required environments. multiple envirenmonts are splited by ";". * `conditions`: extra required conditions for some tests. The value is a list of boolean expression in cmake programmer, splited with ";". For example, the value can be `WITH_DGC;NOT WITH_NCCL` or `WITH_NCCL;${NCCL_VERSION} VERSION_GREATER_EQUAL 2212`,The relationship between these expressions is a conjunction. diff --git a/python/paddle/incubate/sparse/nn/layer/norm.py b/python/paddle/incubate/sparse/nn/layer/norm.py index 776967ac04dc00549e0b54bc91d2b6047f6caaa4..c89e9a6b906d128c021fc509443fb21283c28ccb 100644 --- a/python/paddle/incubate/sparse/nn/layer/norm.py +++ b/python/paddle/incubate/sparse/nn/layer/norm.py @@ -78,7 +78,7 @@ class BatchNorm(paddle.nn.BatchNorm1D): If it is set to None or one attribute of ParamAttr, batch_norm will create ParamAttr as bias_attr. If it is set to Fasle, the weight is not learnable. If the Initializer of the bias_attr is not set, the bias is initialized zero. Default: None. - data_format(str, optional): Specify the input data format, may be "NC", "NCL" or "NLC". Defalut "NCL". + data_format(str, optional): Specify the input data format, may be "NC", "NCL" or "NLC". Default "NCL". use_global_stats(bool|None, optional): Whether to use global mean and variance. If set to False, use the statistics of one mini-batch, if set to True, use the global statistics, if set to None, use global statistics in the test phase and use the statistics of one mini-batch in the training phase. Default: None. name(str, optional): Name for the BatchNorm, default is None. For more information, please refer to :ref:`api_guide_Name`.. diff --git a/python/paddle/nn/functional/norm.py b/python/paddle/nn/functional/norm.py index 03ba72fdda344e1d768e1987a68afb32ece1a42c..1f5d743630283d3b3ed4cf39571e2d9c0c148b6e 100644 --- a/python/paddle/nn/functional/norm.py +++ b/python/paddle/nn/functional/norm.py @@ -140,8 +140,8 @@ def batch_norm(x, bias(Tensor): The bias tensor of batch_norm can not be None. epsilon(float, optional): The small value added to the variance to prevent division by zero. Default: 1e-5. momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9. - training(bool, optional): True means train mode which compute by batch data and track global mean and var during train period. False means inference mode which compute by global mean and var which calculated by train period. Defalut False. - data_format(str, optional): Specify the input data format, may be "NC", "NCL", "NCHW", "NCDHW", "NLC", "NHWC" or "NDHWC". Defalut "NCHW". + training(bool, optional): True means train mode which compute by batch data and track global mean and var during train period. False means inference mode which compute by global mean and var which calculated by train period. Default False. + data_format(str, optional): Specify the input data format, may be "NC", "NCL", "NCHW", "NCDHW", "NLC", "NHWC" or "NDHWC". Default "NCHW". use_global_stats(bool|None, optional): Whether to use global mean and variance. If set to False, use the statistics of one mini-batch, if set to True, use the global statistics, if set to None, use global statistics in the test phase and use the statistics of one mini-batch in the training phase. Default: None. name(str, optional): Name for the BatchNorm, default is None. For more information, please refer to :ref:`api_guide_Name`.. @@ -392,7 +392,7 @@ def instance_norm(x, eps(float, optional): A value added to the denominator for numerical stability. Default is 1e-5. momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9. use_input_stats(bool): Default True. - data_format(str, optional): Specify the input data format, may be "NC", "NCL", "NCHW" or "NCDHW". Defalut "NCHW". + data_format(str, optional): Specify the input data format, may be "NC", "NCL", "NCHW" or "NCDHW". Default "NCHW". name(str, optional): Name for the InstanceNorm, default is None. For more information, please refer to :ref:`api_guide_Name`.. Returns: diff --git a/python/paddle/nn/layer/norm.py b/python/paddle/nn/layer/norm.py index 93d6b21c13f5481bb9853886cd4312d6e098c265..0a259b581256aff4b7d6fdfab1714b7c3f00b2dd 100644 --- a/python/paddle/nn/layer/norm.py +++ b/python/paddle/nn/layer/norm.py @@ -144,7 +144,7 @@ class InstanceNorm1D(_InstanceNormBase): will create ParamAttr as bias_attr, the name of bias can be set in ParamAttr. If the Initializer of the bias_attr is not set, the bias is initialized zero. If it is set to False, will not create bias_attr. Default: None. - data_format(str, optional): Specify the input data format, may be "NC", "NCL". Defalut "NCL". + data_format(str, optional): Specify the input data format, may be "NC", "NCL". Default "NCL". name(str, optional): Name for the InstanceNorm, default is None. For more information, please refer to :ref:`api_guide_Name`.. @@ -743,7 +743,7 @@ class BatchNorm1D(_BatchNormBase): If it is set to None or one attribute of ParamAttr, batch_norm will create ParamAttr as bias_attr. If it is set to Fasle, the weight is not learnable. If the Initializer of the bias_attr is not set, the bias is initialized zero. Default: None. - data_format(str, optional): Specify the input data format, may be "NC", "NCL" or "NLC". Defalut "NCL". + data_format(str, optional): Specify the input data format, may be "NC", "NCL" or "NLC". Default "NCL". use_global_stats(bool|None, optional): Whether to use global mean and variance. If set to False, use the statistics of one mini-batch, if set to True, use the global statistics, if set to None, use global statistics in the test phase and use the statistics of one mini-batch in the training phase. Default: None. name(str, optional): Name for the BatchNorm, default is None. For more information, please refer to :ref:`api_guide_Name`.. diff --git a/python/paddle/tensor/linalg.py b/python/paddle/tensor/linalg.py index 700c6c340dc3c57ab15e00c77fcf3e4ca7308430..8765c7a50496d3a13dd203f14653a75904ab1a11 100644 --- a/python/paddle/tensor/linalg.py +++ b/python/paddle/tensor/linalg.py @@ -276,7 +276,7 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None): or list(int)/tuple(int) with only one element, the vector norm is computed over the axis. If `axis < 0`, the dimension to norm operation is rank(input) + axis. If axis is a list(int)/tuple(int) with two elements, the matrix norm is computed over the axis. - Defalut value is `None`. + Default value is `None`. keepdim (bool, optional): Whether to reserve the reduced dimension in the output Tensor. The result tensor will have fewer dimension than the :attr:`input` unless :attr:`keepdim` is true, default @@ -2589,7 +2589,7 @@ def pinv(x, rcond=1e-15, hermitian=False, name=None): True. rcond(Tensor, optional): the tolerance value to determine - when is a singular value zero. Defalut:1e-15. + when is a singular value zero. Default:1e-15. hermitian(bool, optional): indicates whether x is Hermitian if complex or symmetric if real. Default: False.