diff --git a/python/paddle/amp/debugging.py b/python/paddle/amp/debugging.py index 3672ecd6b353dd044ca309bb12175726512727a2..3c99e003490fa80ded4c792f03934bb72336b6f2 100644 --- a/python/paddle/amp/debugging.py +++ b/python/paddle/amp/debugging.py @@ -315,7 +315,7 @@ def enable_operator_stats_collection(): """ Enable to collect the number of operators for different data types. The statistical data are categorized according to four data types, namely - float32, float16, bfloat16 and others. This funciton is used in pair with + float32, float16, bfloat16 and others. This function is used in pair with the corresponding disable function. Examples: @@ -351,7 +351,7 @@ def enable_operator_stats_collection(): def disable_operator_stats_collection(): """ Disable the collection the number of operators for different data types. - This funciton is used in pair with the corresponding enable function. + This function is used in pair with the corresponding enable function. The statistical data are categorized according to four data types, namely float32, float16, bfloat16 and others, and will be printed after the function call. diff --git a/python/paddle/device/__init__.py b/python/paddle/device/__init__.py index 5fce6b1442f9536028cfae90cd091a690ddaea4a..c19e6e08b4d2d0272c8ece0b9e293347a2ef455d 100644 --- a/python/paddle/device/__init__.py +++ b/python/paddle/device/__init__.py @@ -135,7 +135,7 @@ def XPUPlace(dev_id): def get_cudnn_version(): """ - This funciton return the version of cudnn. the retuen value is int which represents the + This function return the version of cudnn. the retuen value is int which represents the cudnn version. For example, if it return 7600, it represents the version of cudnn is 7.6. Returns: @@ -270,7 +270,7 @@ def set_device(device): def get_device(): """ - This funciton can get the current global device of the program is running. + This function can get the current global device of the program is running. It's a string which is like 'cpu', 'gpu:x', 'xpu:x' and 'npu:x'. if the global device is not set, it will return a string which is 'gpu:x' when cuda is avaliable or it will return a string which is 'cpu' when cuda is not avaliable. diff --git a/python/paddle/distributed/fleet/base/distributed_strategy.py b/python/paddle/distributed/fleet/base/distributed_strategy.py index b7519891024834e7f64a650e7fb9aa885c22f053..87d9f77018b70cdf6d05b6f397b67103b462f16d 100755 --- a/python/paddle/distributed/fleet/base/distributed_strategy.py +++ b/python/paddle/distributed/fleet/base/distributed_strategy.py @@ -2388,7 +2388,7 @@ class DistributedStrategy: """ The workspace limit size in MB unit for choosing cuDNN convolution algorithms. - The inner funciton of cuDNN obtain the fastest suited algorithm that fits within this memory limit. + The inner function of cuDNN obtain the fastest suited algorithm that fits within this memory limit. Usually, large workspace size may lead to choose faster algorithms, but significant increasing memory workspace. Users need to trade-off between memory and speed. Default Value: 4000 diff --git a/python/paddle/distribution/beta.py b/python/paddle/distribution/beta.py index ebf373bf6b1153a8285fcbc0fdc1384b010b6d52..64da77b978f1ecfd54792b3f07a82494f02c3fae 100644 --- a/python/paddle/distribution/beta.py +++ b/python/paddle/distribution/beta.py @@ -120,7 +120,7 @@ class Beta(exponential_family.ExponentialFamily): return paddle.exp(self.log_prob(value)) def log_prob(self, value): - """Log probability density funciton evaluated at value + """Log probability density function evaluated at value Args: value (Tensor): Value to be evaluated diff --git a/python/paddle/distribution/kl.py b/python/paddle/distribution/kl.py index a47eefc7893e14a8e96dc794e5bece3cedbecff6..40be41e06c5f6c731c0e0d72e85e1741c3af5684 100644 --- a/python/paddle/distribution/kl.py +++ b/python/paddle/distribution/kl.py @@ -73,7 +73,7 @@ def register_kl(cls_p, cls_q): functions registered by ``register_kl``, according to multi-dispatch pattern. If an implemention function is found, it will return the result, otherwise, it will raise ``NotImplementError`` exception. Users can register - implemention funciton by the decorator. + implemention function by the decorator. Args: cls_p (Distribution): The Distribution type of Instance p. Subclass derived from ``Distribution``. diff --git a/python/paddle/distribution/transform.py b/python/paddle/distribution/transform.py index f1ee702c15b66e2d7ee2e4ac70dac07f7fc13a91..53316911559e8707f5334245fa988c01925c25db 100644 --- a/python/paddle/distribution/transform.py +++ b/python/paddle/distribution/transform.py @@ -66,7 +66,7 @@ class Transform: Suppose :math:`X` is a K-dimensional random variable with probability density function :math:`p_X(x)`. A new random variable :math:`Y = f(X)` may - be defined by transforming :math:`X` with a suitably well-behaved funciton + be defined by transforming :math:`X` with a suitably well-behaved function :math:`f`. It suffices for what follows to note that if `f` is one-to-one and its inverse :math:`f^{-1}` have a well-defined Jacobian, then the density of :math:`Y` is diff --git a/python/paddle/fluid/dygraph/learning_rate_scheduler.py b/python/paddle/fluid/dygraph/learning_rate_scheduler.py index 9951f5a7c40f4a7785a22998cb6b431a4e4c8dc4..2da10b2e52dce33d6714a7cbd0ed59155c5c5360 100644 --- a/python/paddle/fluid/dygraph/learning_rate_scheduler.py +++ b/python/paddle/fluid/dygraph/learning_rate_scheduler.py @@ -1234,7 +1234,7 @@ class LambdaDecay(_LearningRateEpochDecay): :api_attr: imperative Sets the learning rate of ``optimizer`` to the initial lr times a multiplicative factor, and this multiplicative - factor is computed by function ``lr_lambda`` . ``lr_lambda`` is funciton which receives ``epoch`` . + factor is computed by function ``lr_lambda`` . ``lr_lambda`` is function which receives ``epoch`` . The algorithm can be described as the code below. diff --git a/python/paddle/fluid/multiprocess_utils.py b/python/paddle/fluid/multiprocess_utils.py index bc7d58d1284f1ffc7cb93bf16920f7404b935707..b763446930fdb1ad86d0e7aec1f6fd09282de251 100644 --- a/python/paddle/fluid/multiprocess_utils.py +++ b/python/paddle/fluid/multiprocess_utils.py @@ -44,7 +44,7 @@ def _clear_multiprocess_queue_set(): def _cleanup(): # NOTE: inter-process Queue shared memory objects clear function _clear_multiprocess_queue_set() - # NOTE: main process memory map files clear funciton + # NOTE: main process memory map files clear function core._cleanup_mmap_fds() diff --git a/python/paddle/fluid/tests/unittests/cc_imp_py_test.cc b/python/paddle/fluid/tests/unittests/cc_imp_py_test.cc index a0b9ec5f9f6d465d767f7071eaaf121e7d17f955..e36c8527369d1602729d62ea54b581542c4259dd 100644 --- a/python/paddle/fluid/tests/unittests/cc_imp_py_test.cc +++ b/python/paddle/fluid/tests/unittests/cc_imp_py_test.cc @@ -25,7 +25,7 @@ TEST(CC, IMPORT_PY) { ASSERT_FALSE(PyRun_SimpleString("import paddle")); ASSERT_FALSE(PyRun_SimpleString("print(paddle.to_tensor(1))")); - // 2. C/C++ Run Python funciton + // 2. C/C++ Run Python function PyRun_SimpleString("import sys"); PyRun_SimpleString("import os"); PyRun_SimpleString("sys.path.append(os.getcwd())"); diff --git a/python/paddle/io/multiprocess_utils.py b/python/paddle/io/multiprocess_utils.py index 5792983ceb475003a185f1439d253e791d9b3f93..51b0c2b81821427a8fa879017097c50563c19c42 100644 --- a/python/paddle/io/multiprocess_utils.py +++ b/python/paddle/io/multiprocess_utils.py @@ -45,7 +45,7 @@ def _clear_multiprocess_queue_set(): def _cleanup(): # NOTE: inter-process Queue shared memory objects clear function _clear_multiprocess_queue_set() - # NOTE: main process memory map files clear funciton + # NOTE: main process memory map files clear function core._cleanup_mmap_fds() diff --git a/python/paddle/optimizer/lr.py b/python/paddle/optimizer/lr.py index ad639a06c7a0f92741ad9def0ec7170c312eec88..d28abf5d631c9be63f7e8dec1b0e10fd30a1b1a9 100644 --- a/python/paddle/optimizer/lr.py +++ b/python/paddle/optimizer/lr.py @@ -1138,7 +1138,7 @@ class StepDecay(LRScheduler): class LambdaDecay(LRScheduler): """ - Sets the learning rate of ``optimizer`` by function ``lr_lambda`` . ``lr_lambda`` is funciton which receives ``epoch`` . + Sets the learning rate of ``optimizer`` by function ``lr_lambda`` . ``lr_lambda`` is function which receives ``epoch`` . The algorithm can be described as the code below.