未验证 提交 20a66bbf 编写于 作者: C co63oc 提交者: GitHub

Fix typos, test=document_fix (#53099)

上级 cea6b6de
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
namespace phi { namespace phi {
/** /**
* @brief This kernrel is used to perform elementwise digamma for x. * @brief This kernel is used to perform elementwise digamma for x.
* $$out = \Psi(x) = \frac{ \Gamma^{'}(x) }{ \Gamma(x) }$$ * $$out = \Psi(x) = \frac{ \Gamma^{'}(x) }{ \Gamma(x) }$$
* @param ctx device context * @param ctx device context
* @param x the input tensor of digamma * @param x the input tensor of digamma
......
...@@ -17,7 +17,7 @@ import sys ...@@ -17,7 +17,7 @@ import sys
import atexit import atexit
# The legacy core need to be removed before "import core", # The legacy core need to be removed before "import core",
# in case of users installing paddlepadde without -U option # in case of users installing paddlepaddle without -U option
core_suffix = 'so' core_suffix = 'so'
if os.name == 'nt': if os.name == 'nt':
core_suffix = 'pyd' core_suffix = 'pyd'
...@@ -219,7 +219,7 @@ monkey_patch_varbase() ...@@ -219,7 +219,7 @@ monkey_patch_varbase()
atexit.register(core.clear_executor_cache) atexit.register(core.clear_executor_cache)
# NOTE(Aganlengzi): clean up KernelFactory in advance manually. # NOTE(Aganlengzi): clean up KernelFactory in advance manually.
# NOTE(wangran16): clean up DeviceManger in advance manually. # NOTE(wangran16): clean up DeviceManager in advance manually.
# Keep clear_kernel_factory running before clear_device_manager # Keep clear_kernel_factory running before clear_device_manager
atexit.register(core.clear_device_manager) atexit.register(core.clear_device_manager)
atexit.register(core.clear_kernel_factory) atexit.register(core.clear_kernel_factory)
......
...@@ -313,7 +313,7 @@ def _find_loss_op_(loss): ...@@ -313,7 +313,7 @@ def _find_loss_op_(loss):
loss.op = op loss.op = op
break break
if loss.op is None: if loss.op is None:
raise ValueError("loss.op is None. Should not happend") raise ValueError("loss.op is None. Should not happen")
def _rename_arg_(op_descs, old_name, new_name, begin_idx=None, end_idx=None): def _rename_arg_(op_descs, old_name, new_name, begin_idx=None, end_idx=None):
...@@ -1361,7 +1361,7 @@ def _append_backward_ops_( ...@@ -1361,7 +1361,7 @@ def _append_backward_ops_(
sub_block = program.block(op._block_attr_id("sub_block")) sub_block = program.block(op._block_attr_id("sub_block"))
grad_sub_block = program._create_block() grad_sub_block = program._create_block()
grad_sub_block._set_forward_block_idx(sub_block.idx) grad_sub_block._set_forward_block_idx(sub_block.idx)
# see follwing comments for why set None here. # see following comments for why set None here.
pre_input_grad_names_set = copy.copy(input_grad_names_set) pre_input_grad_names_set = copy.copy(input_grad_names_set)
input_grad_names_set = None input_grad_names_set = None
sub_block_path = op_path_dict[op._block_attr_id("sub_block")] sub_block_path = op_path_dict[op._block_attr_id("sub_block")]
...@@ -1383,7 +1383,7 @@ def _append_backward_ops_( ...@@ -1383,7 +1383,7 @@ def _append_backward_ops_(
grad_sub_block_list.append(grad_sub_block.desc) grad_sub_block_list.append(grad_sub_block.desc)
# In primitive mode, raw phi GradOp will be split into multiple small # In primitive mode, raw phi GradOp will be split into multiple small
# primitive operators, and the split rules are defined in c++ level, # primitive operators, and the split rules are defined in c++ level,
# see detials: paddle/fluid/prim/api/manual/backward/composite_backward_api.h # see details: paddle/fluid/prim/api/manual/backward/composite_backward_api.h
# It means that the output's shape and dtype of previous operators which # It means that the output's shape and dtype of previous operators which
# maybe used as the input of next operators must be known. Therefore, # maybe used as the input of next operators must be known. Therefore,
# we infer shape and dtype in a sandbox block(named composite_block) for # we infer shape and dtype in a sandbox block(named composite_block) for
...@@ -1391,7 +1391,7 @@ def _append_backward_ops_( ...@@ -1391,7 +1391,7 @@ def _append_backward_ops_(
# For example: # For example:
# forward: # forward:
# z = multiply(x, y) //maybe broadcast in kernel # z = multiply(x, y) //maybe broadcast in kernel
# bcckward: # backward:
# x_grad_unreduce = z_grad * y // maybe unreduce # x_grad_unreduce = z_grad * y // maybe unreduce
# reduced_axes = get_reduced_axes(x_grad.shape, x.shape) // need known shape # reduced_axes = get_reduced_axes(x_grad.shape, x.shape) // need known shape
# x_grad = reduce_sum(x_grad_unreduce) # x_grad = reduce_sum(x_grad_unreduce)
...@@ -1515,7 +1515,7 @@ def _append_backward_ops_( ...@@ -1515,7 +1515,7 @@ def _append_backward_ops_(
grad_op_descs.extend(grad_op_desc) grad_op_descs.extend(grad_op_desc)
grad_to_var.update(op_grad_to_var) grad_to_var.update(op_grad_to_var)
# record mapping bewteen grad var name and var name (Only for auto parallel) # record mapping between grad var name and var name (Only for auto parallel)
grad_var_to_var = None grad_var_to_var = None
if distop_context is not None: if distop_context is not None:
grad_var_to_var = distop_context.grad_var_to_var[ grad_var_to_var = distop_context.grad_var_to_var[
...@@ -1548,7 +1548,9 @@ def _append_backward_ops_( ...@@ -1548,7 +1548,9 @@ def _append_backward_ops_(
op_desc for op_desc in grad_op_descs if op_desc not in not_need_ops op_desc for op_desc in grad_op_descs if op_desc not in not_need_ops
] ]
else: else:
logging.debug("Runing backward composite and disable find_not_need_ops") logging.debug(
"Running backward composite and disable find_not_need_ops"
)
# append op_desc in grad_op_descs to target_block # append op_desc in grad_op_descs to target_block
op_role_attr_name = core.op_proto_and_checker_maker.kOpRoleAttrName() op_role_attr_name = core.op_proto_and_checker_maker.kOpRoleAttrName()
...@@ -1716,7 +1718,7 @@ def _append_backward_vars_(block, start_op_idx, grad_to_var, grad_info_map): ...@@ -1716,7 +1718,7 @@ def _append_backward_vars_(block, start_op_idx, grad_to_var, grad_info_map):
def infershape_for_composite(block, grad_op_desc): def infershape_for_composite(block, grad_op_desc):
# NOTE: why pruning the operator with empty output here ? # NOTE: why pruning the operator with empty output here ?
# Some backward operator will output emtpy var, which will cause infer # Some backward operator will output empty var, which will cause infer
# shape error, such assign with input's stop_gradient=True # shape error, such assign with input's stop_gradient=True
if len(grad_op_desc.output_arg_names()) == 0: if len(grad_op_desc.output_arg_names()) == 0:
return return
...@@ -1748,7 +1750,7 @@ def infershape_for_composite(block, grad_op_desc): ...@@ -1748,7 +1750,7 @@ def infershape_for_composite(block, grad_op_desc):
for name, args in grad_op_desc.outputs().items() for name, args in grad_op_desc.outputs().items()
}, },
# NOTE Runtime attr will be ignore as the c++ GetRuntimeAttr # NOTE Runtime attr will be ignore as the c++ GetRuntimeAttr
# interface cann't be exported to python. Please note the WARNNING # interface cann't be exported to python. Please note the WARNING
# message logged in RuntimeAttrs of composite_grad_desc_maker.h # message logged in RuntimeAttrs of composite_grad_desc_maker.h
attrs=grad_op_desc.get_attr_map(), attrs=grad_op_desc.get_attr_map(),
) )
......
...@@ -245,7 +245,7 @@ class CompiledProgram: ...@@ -245,7 +245,7 @@ class CompiledProgram:
) )
self._exec_strategy.num_threads = 1 self._exec_strategy.num_threads = 1
# TODO(wuyi): trainer endpoings should be passed in through # TODO(wuyi): trainer endpoints should be passed in through
# build_strategy, not program.xxx. # build_strategy, not program.xxx.
# TODO(gongwb): let user to set them once. # TODO(gongwb): let user to set them once.
if ( if (
......
...@@ -271,7 +271,7 @@ try: ...@@ -271,7 +271,7 @@ try:
if avx_supported() and not libpaddle.is_compiled_with_avx(): if avx_supported() and not libpaddle.is_compiled_with_avx():
sys.stderr.write( sys.stderr.write(
"Hint: Your machine support AVX, but the installed paddlepaddle doesn't have avx core. " "Hint: Your machine support AVX, but the installed paddlepaddle doesn't have avx core. "
"Hence, no-avx core with worse preformance will be imported.\nIf you like, you could " "Hence, no-avx core with worse performance will be imported.\nIf you like, you could "
"reinstall paddlepaddle by 'python -m pip install --force-reinstall paddlepaddle-gpu[==version]' " "reinstall paddlepaddle by 'python -m pip install --force-reinstall paddlepaddle-gpu[==version]' "
"to get better performance.\n" "to get better performance.\n"
) )
...@@ -450,7 +450,7 @@ def _is_all_prim_enabled(): ...@@ -450,7 +450,7 @@ def _is_all_prim_enabled():
return _is_fwd_prim_enabled() and _is_bwd_prim_enabled() return _is_fwd_prim_enabled() and _is_bwd_prim_enabled()
# Alert!!! This method is only for test coveraget, user should never use it directly, this may cause serious system errors. # Alert!!! This method is only for test coverage, user should never use it directly, this may cause serious system errors.
def _test_use_sync(value): def _test_use_sync(value):
__sync_stat_with_flag(value) __sync_stat_with_flag(value)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册