From 20a66bbf550fe60b9a362fc2a9fee4438e04d252 Mon Sep 17 00:00:00 2001 From: co63oc Date: Thu, 20 Apr 2023 10:59:42 +0800 Subject: [PATCH] Fix typos, test=document_fix (#53099) --- paddle/phi/kernels/digamma_kernel.h | 2 +- python/paddle/fluid/__init__.py | 4 ++-- python/paddle/fluid/backward.py | 18 ++++++++++-------- python/paddle/fluid/compiler.py | 2 +- python/paddle/fluid/core.py | 4 ++-- 5 files changed, 16 insertions(+), 14 deletions(-) diff --git a/paddle/phi/kernels/digamma_kernel.h b/paddle/phi/kernels/digamma_kernel.h index b45b7070d2d..f2103d24168 100644 --- a/paddle/phi/kernels/digamma_kernel.h +++ b/paddle/phi/kernels/digamma_kernel.h @@ -19,7 +19,7 @@ namespace phi { /** - * @brief This kernrel is used to perform elementwise digamma for x. + * @brief This kernel is used to perform elementwise digamma for x. * $$out = \Psi(x) = \frac{ \Gamma^{'}(x) }{ \Gamma(x) }$$ * @param ctx device context * @param x the input tensor of digamma diff --git a/python/paddle/fluid/__init__.py b/python/paddle/fluid/__init__.py index 5df4a15efc7..96a15cb0ec9 100644 --- a/python/paddle/fluid/__init__.py +++ b/python/paddle/fluid/__init__.py @@ -17,7 +17,7 @@ import sys import atexit # The legacy core need to be removed before "import core", -# in case of users installing paddlepadde without -U option +# in case of users installing paddlepaddle without -U option core_suffix = 'so' if os.name == 'nt': core_suffix = 'pyd' @@ -219,7 +219,7 @@ monkey_patch_varbase() atexit.register(core.clear_executor_cache) # NOTE(Aganlengzi): clean up KernelFactory in advance manually. -# NOTE(wangran16): clean up DeviceManger in advance manually. +# NOTE(wangran16): clean up DeviceManager in advance manually. # Keep clear_kernel_factory running before clear_device_manager atexit.register(core.clear_device_manager) atexit.register(core.clear_kernel_factory) diff --git a/python/paddle/fluid/backward.py b/python/paddle/fluid/backward.py index 46f225e0d09..ef66532e6b7 100755 --- a/python/paddle/fluid/backward.py +++ b/python/paddle/fluid/backward.py @@ -313,7 +313,7 @@ def _find_loss_op_(loss): loss.op = op break if loss.op is None: - raise ValueError("loss.op is None. Should not happend") + raise ValueError("loss.op is None. Should not happen") def _rename_arg_(op_descs, old_name, new_name, begin_idx=None, end_idx=None): @@ -1361,7 +1361,7 @@ def _append_backward_ops_( sub_block = program.block(op._block_attr_id("sub_block")) grad_sub_block = program._create_block() grad_sub_block._set_forward_block_idx(sub_block.idx) - # see follwing comments for why set None here. + # see following comments for why set None here. pre_input_grad_names_set = copy.copy(input_grad_names_set) input_grad_names_set = None sub_block_path = op_path_dict[op._block_attr_id("sub_block")] @@ -1383,7 +1383,7 @@ def _append_backward_ops_( grad_sub_block_list.append(grad_sub_block.desc) # In primitive mode, raw phi GradOp will be split into multiple small # primitive operators, and the split rules are defined in c++ level, - # see detials: paddle/fluid/prim/api/manual/backward/composite_backward_api.h + # see details: paddle/fluid/prim/api/manual/backward/composite_backward_api.h # It means that the output's shape and dtype of previous operators which # maybe used as the input of next operators must be known. Therefore, # we infer shape and dtype in a sandbox block(named composite_block) for @@ -1391,7 +1391,7 @@ def _append_backward_ops_( # For example: # forward: # z = multiply(x, y) //maybe broadcast in kernel - # bcckward: + # backward: # x_grad_unreduce = z_grad * y // maybe unreduce # reduced_axes = get_reduced_axes(x_grad.shape, x.shape) // need known shape # x_grad = reduce_sum(x_grad_unreduce) @@ -1515,7 +1515,7 @@ def _append_backward_ops_( grad_op_descs.extend(grad_op_desc) grad_to_var.update(op_grad_to_var) - # record mapping bewteen grad var name and var name (Only for auto parallel) + # record mapping between grad var name and var name (Only for auto parallel) grad_var_to_var = None if distop_context is not None: grad_var_to_var = distop_context.grad_var_to_var[ @@ -1548,7 +1548,9 @@ def _append_backward_ops_( op_desc for op_desc in grad_op_descs if op_desc not in not_need_ops ] else: - logging.debug("Runing backward composite and disable find_not_need_ops") + logging.debug( + "Running backward composite and disable find_not_need_ops" + ) # append op_desc in grad_op_descs to target_block op_role_attr_name = core.op_proto_and_checker_maker.kOpRoleAttrName() @@ -1716,7 +1718,7 @@ def _append_backward_vars_(block, start_op_idx, grad_to_var, grad_info_map): def infershape_for_composite(block, grad_op_desc): # NOTE: why pruning the operator with empty output here ? - # Some backward operator will output emtpy var, which will cause infer + # Some backward operator will output empty var, which will cause infer # shape error, such assign with input's stop_gradient=True if len(grad_op_desc.output_arg_names()) == 0: return @@ -1748,7 +1750,7 @@ def infershape_for_composite(block, grad_op_desc): for name, args in grad_op_desc.outputs().items() }, # NOTE Runtime attr will be ignore as the c++ GetRuntimeAttr - # interface cann't be exported to python. Please note the WARNNING + # interface cann't be exported to python. Please note the WARNING # message logged in RuntimeAttrs of composite_grad_desc_maker.h attrs=grad_op_desc.get_attr_map(), ) diff --git a/python/paddle/fluid/compiler.py b/python/paddle/fluid/compiler.py index 0a17b4a9151..505b7ad566d 100644 --- a/python/paddle/fluid/compiler.py +++ b/python/paddle/fluid/compiler.py @@ -245,7 +245,7 @@ class CompiledProgram: ) self._exec_strategy.num_threads = 1 - # TODO(wuyi): trainer endpoings should be passed in through + # TODO(wuyi): trainer endpoints should be passed in through # build_strategy, not program.xxx. # TODO(gongwb): let user to set them once. if ( diff --git a/python/paddle/fluid/core.py b/python/paddle/fluid/core.py index a07e12f199c..1d587c44912 100644 --- a/python/paddle/fluid/core.py +++ b/python/paddle/fluid/core.py @@ -271,7 +271,7 @@ try: if avx_supported() and not libpaddle.is_compiled_with_avx(): sys.stderr.write( "Hint: Your machine support AVX, but the installed paddlepaddle doesn't have avx core. " - "Hence, no-avx core with worse preformance will be imported.\nIf you like, you could " + "Hence, no-avx core with worse performance will be imported.\nIf you like, you could " "reinstall paddlepaddle by 'python -m pip install --force-reinstall paddlepaddle-gpu[==version]' " "to get better performance.\n" ) @@ -450,7 +450,7 @@ def _is_all_prim_enabled(): return _is_fwd_prim_enabled() and _is_bwd_prim_enabled() -# Alert!!! This method is only for test coveraget, user should never use it directly, this may cause serious system errors. +# Alert!!! This method is only for test coverage, user should never use it directly, this may cause serious system errors. def _test_use_sync(value): __sync_stat_with_flag(value) -- GitLab