From 92121d17dbd14233469157f5014d41a94ccaf462 Mon Sep 17 00:00:00 2001 From: co63oc Date: Thu, 18 May 2023 10:37:36 +0800 Subject: [PATCH] Fix typos, test=document_fix (#53916) --- python/paddle/amp/debugging.py | 4 ++-- python/paddle/device/__init__.py | 4 ++-- python/paddle/distributed/fleet/base/distributed_strategy.py | 2 +- python/paddle/distribution/beta.py | 2 +- python/paddle/distribution/kl.py | 2 +- python/paddle/distribution/transform.py | 2 +- python/paddle/fluid/dygraph/learning_rate_scheduler.py | 2 +- python/paddle/fluid/multiprocess_utils.py | 2 +- python/paddle/fluid/tests/unittests/cc_imp_py_test.cc | 2 +- python/paddle/io/multiprocess_utils.py | 2 +- python/paddle/optimizer/lr.py | 2 +- 11 files changed, 13 insertions(+), 13 deletions(-) diff --git a/python/paddle/amp/debugging.py b/python/paddle/amp/debugging.py index 3672ecd6b35..3c99e003490 100644 --- a/python/paddle/amp/debugging.py +++ b/python/paddle/amp/debugging.py @@ -315,7 +315,7 @@ def enable_operator_stats_collection(): """ Enable to collect the number of operators for different data types. The statistical data are categorized according to four data types, namely - float32, float16, bfloat16 and others. This funciton is used in pair with + float32, float16, bfloat16 and others. This function is used in pair with the corresponding disable function. Examples: @@ -351,7 +351,7 @@ def enable_operator_stats_collection(): def disable_operator_stats_collection(): """ Disable the collection the number of operators for different data types. - This funciton is used in pair with the corresponding enable function. + This function is used in pair with the corresponding enable function. The statistical data are categorized according to four data types, namely float32, float16, bfloat16 and others, and will be printed after the function call. diff --git a/python/paddle/device/__init__.py b/python/paddle/device/__init__.py index 5fce6b1442f..c19e6e08b4d 100644 --- a/python/paddle/device/__init__.py +++ b/python/paddle/device/__init__.py @@ -135,7 +135,7 @@ def XPUPlace(dev_id): def get_cudnn_version(): """ - This funciton return the version of cudnn. the retuen value is int which represents the + This function return the version of cudnn. the retuen value is int which represents the cudnn version. For example, if it return 7600, it represents the version of cudnn is 7.6. Returns: @@ -270,7 +270,7 @@ def set_device(device): def get_device(): """ - This funciton can get the current global device of the program is running. + This function can get the current global device of the program is running. It's a string which is like 'cpu', 'gpu:x', 'xpu:x' and 'npu:x'. if the global device is not set, it will return a string which is 'gpu:x' when cuda is avaliable or it will return a string which is 'cpu' when cuda is not avaliable. diff --git a/python/paddle/distributed/fleet/base/distributed_strategy.py b/python/paddle/distributed/fleet/base/distributed_strategy.py index b7519891024..87d9f77018b 100755 --- a/python/paddle/distributed/fleet/base/distributed_strategy.py +++ b/python/paddle/distributed/fleet/base/distributed_strategy.py @@ -2388,7 +2388,7 @@ class DistributedStrategy: """ The workspace limit size in MB unit for choosing cuDNN convolution algorithms. - The inner funciton of cuDNN obtain the fastest suited algorithm that fits within this memory limit. + The inner function of cuDNN obtain the fastest suited algorithm that fits within this memory limit. Usually, large workspace size may lead to choose faster algorithms, but significant increasing memory workspace. Users need to trade-off between memory and speed. Default Value: 4000 diff --git a/python/paddle/distribution/beta.py b/python/paddle/distribution/beta.py index ebf373bf6b1..64da77b978f 100644 --- a/python/paddle/distribution/beta.py +++ b/python/paddle/distribution/beta.py @@ -120,7 +120,7 @@ class Beta(exponential_family.ExponentialFamily): return paddle.exp(self.log_prob(value)) def log_prob(self, value): - """Log probability density funciton evaluated at value + """Log probability density function evaluated at value Args: value (Tensor): Value to be evaluated diff --git a/python/paddle/distribution/kl.py b/python/paddle/distribution/kl.py index a47eefc7893..40be41e06c5 100644 --- a/python/paddle/distribution/kl.py +++ b/python/paddle/distribution/kl.py @@ -73,7 +73,7 @@ def register_kl(cls_p, cls_q): functions registered by ``register_kl``, according to multi-dispatch pattern. If an implemention function is found, it will return the result, otherwise, it will raise ``NotImplementError`` exception. Users can register - implemention funciton by the decorator. + implemention function by the decorator. Args: cls_p (Distribution): The Distribution type of Instance p. Subclass derived from ``Distribution``. diff --git a/python/paddle/distribution/transform.py b/python/paddle/distribution/transform.py index f1ee702c15b..53316911559 100644 --- a/python/paddle/distribution/transform.py +++ b/python/paddle/distribution/transform.py @@ -66,7 +66,7 @@ class Transform: Suppose :math:`X` is a K-dimensional random variable with probability density function :math:`p_X(x)`. A new random variable :math:`Y = f(X)` may - be defined by transforming :math:`X` with a suitably well-behaved funciton + be defined by transforming :math:`X` with a suitably well-behaved function :math:`f`. It suffices for what follows to note that if `f` is one-to-one and its inverse :math:`f^{-1}` have a well-defined Jacobian, then the density of :math:`Y` is diff --git a/python/paddle/fluid/dygraph/learning_rate_scheduler.py b/python/paddle/fluid/dygraph/learning_rate_scheduler.py index 9951f5a7c40..2da10b2e52d 100644 --- a/python/paddle/fluid/dygraph/learning_rate_scheduler.py +++ b/python/paddle/fluid/dygraph/learning_rate_scheduler.py @@ -1234,7 +1234,7 @@ class LambdaDecay(_LearningRateEpochDecay): :api_attr: imperative Sets the learning rate of ``optimizer`` to the initial lr times a multiplicative factor, and this multiplicative - factor is computed by function ``lr_lambda`` . ``lr_lambda`` is funciton which receives ``epoch`` . + factor is computed by function ``lr_lambda`` . ``lr_lambda`` is function which receives ``epoch`` . The algorithm can be described as the code below. diff --git a/python/paddle/fluid/multiprocess_utils.py b/python/paddle/fluid/multiprocess_utils.py index bc7d58d1284..b763446930f 100644 --- a/python/paddle/fluid/multiprocess_utils.py +++ b/python/paddle/fluid/multiprocess_utils.py @@ -44,7 +44,7 @@ def _clear_multiprocess_queue_set(): def _cleanup(): # NOTE: inter-process Queue shared memory objects clear function _clear_multiprocess_queue_set() - # NOTE: main process memory map files clear funciton + # NOTE: main process memory map files clear function core._cleanup_mmap_fds() diff --git a/python/paddle/fluid/tests/unittests/cc_imp_py_test.cc b/python/paddle/fluid/tests/unittests/cc_imp_py_test.cc index a0b9ec5f9f6..e36c8527369 100644 --- a/python/paddle/fluid/tests/unittests/cc_imp_py_test.cc +++ b/python/paddle/fluid/tests/unittests/cc_imp_py_test.cc @@ -25,7 +25,7 @@ TEST(CC, IMPORT_PY) { ASSERT_FALSE(PyRun_SimpleString("import paddle")); ASSERT_FALSE(PyRun_SimpleString("print(paddle.to_tensor(1))")); - // 2. C/C++ Run Python funciton + // 2. C/C++ Run Python function PyRun_SimpleString("import sys"); PyRun_SimpleString("import os"); PyRun_SimpleString("sys.path.append(os.getcwd())"); diff --git a/python/paddle/io/multiprocess_utils.py b/python/paddle/io/multiprocess_utils.py index 5792983ceb4..51b0c2b8182 100644 --- a/python/paddle/io/multiprocess_utils.py +++ b/python/paddle/io/multiprocess_utils.py @@ -45,7 +45,7 @@ def _clear_multiprocess_queue_set(): def _cleanup(): # NOTE: inter-process Queue shared memory objects clear function _clear_multiprocess_queue_set() - # NOTE: main process memory map files clear funciton + # NOTE: main process memory map files clear function core._cleanup_mmap_fds() diff --git a/python/paddle/optimizer/lr.py b/python/paddle/optimizer/lr.py index ad639a06c7a..d28abf5d631 100644 --- a/python/paddle/optimizer/lr.py +++ b/python/paddle/optimizer/lr.py @@ -1138,7 +1138,7 @@ class StepDecay(LRScheduler): class LambdaDecay(LRScheduler): """ - Sets the learning rate of ``optimizer`` by function ``lr_lambda`` . ``lr_lambda`` is funciton which receives ``epoch`` . + Sets the learning rate of ``optimizer`` by function ``lr_lambda`` . ``lr_lambda`` is function which receives ``epoch`` . The algorithm can be described as the code below. -- GitLab