Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
20a66bbf
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 1 年 前同步成功
通知
2299
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
20a66bbf
编写于
4月 20, 2023
作者:
C
co63oc
提交者:
GitHub
4月 20, 2023
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Fix typos, test=document_fix (#53099)
上级
cea6b6de
变更
5
显示空白变更内容
内联
并排
Showing
5 changed file
with
16 addition
and
14 deletion
+16
-14
paddle/phi/kernels/digamma_kernel.h
paddle/phi/kernels/digamma_kernel.h
+1
-1
python/paddle/fluid/__init__.py
python/paddle/fluid/__init__.py
+2
-2
python/paddle/fluid/backward.py
python/paddle/fluid/backward.py
+10
-8
python/paddle/fluid/compiler.py
python/paddle/fluid/compiler.py
+1
-1
python/paddle/fluid/core.py
python/paddle/fluid/core.py
+2
-2
未找到文件。
paddle/phi/kernels/digamma_kernel.h
浏览文件 @
20a66bbf
...
@@ -19,7 +19,7 @@
...
@@ -19,7 +19,7 @@
namespace
phi
{
namespace
phi
{
/**
/**
* @brief This kern
r
el is used to perform elementwise digamma for x.
* @brief This kernel is used to perform elementwise digamma for x.
* $$out = \Psi(x) = \frac{ \Gamma^{'}(x) }{ \Gamma(x) }$$
* $$out = \Psi(x) = \frac{ \Gamma^{'}(x) }{ \Gamma(x) }$$
* @param ctx device context
* @param ctx device context
* @param x the input tensor of digamma
* @param x the input tensor of digamma
...
...
python/paddle/fluid/__init__.py
浏览文件 @
20a66bbf
...
@@ -17,7 +17,7 @@ import sys
...
@@ -17,7 +17,7 @@ import sys
import
atexit
import
atexit
# The legacy core need to be removed before "import core",
# The legacy core need to be removed before "import core",
# in case of users installing paddlepadde without -U option
# in case of users installing paddlepadd
l
e without -U option
core_suffix
=
'so'
core_suffix
=
'so'
if
os
.
name
==
'nt'
:
if
os
.
name
==
'nt'
:
core_suffix
=
'pyd'
core_suffix
=
'pyd'
...
@@ -219,7 +219,7 @@ monkey_patch_varbase()
...
@@ -219,7 +219,7 @@ monkey_patch_varbase()
atexit
.
register
(
core
.
clear_executor_cache
)
atexit
.
register
(
core
.
clear_executor_cache
)
# NOTE(Aganlengzi): clean up KernelFactory in advance manually.
# NOTE(Aganlengzi): clean up KernelFactory in advance manually.
# NOTE(wangran16): clean up DeviceManger in advance manually.
# NOTE(wangran16): clean up DeviceMan
a
ger in advance manually.
# Keep clear_kernel_factory running before clear_device_manager
# Keep clear_kernel_factory running before clear_device_manager
atexit
.
register
(
core
.
clear_device_manager
)
atexit
.
register
(
core
.
clear_device_manager
)
atexit
.
register
(
core
.
clear_kernel_factory
)
atexit
.
register
(
core
.
clear_kernel_factory
)
...
...
python/paddle/fluid/backward.py
浏览文件 @
20a66bbf
...
@@ -313,7 +313,7 @@ def _find_loss_op_(loss):
...
@@ -313,7 +313,7 @@ def _find_loss_op_(loss):
loss
.
op
=
op
loss
.
op
=
op
break
break
if
loss
.
op
is
None
:
if
loss
.
op
is
None
:
raise
ValueError
(
"loss.op is None. Should not happen
d
"
)
raise
ValueError
(
"loss.op is None. Should not happen"
)
def
_rename_arg_
(
op_descs
,
old_name
,
new_name
,
begin_idx
=
None
,
end_idx
=
None
):
def
_rename_arg_
(
op_descs
,
old_name
,
new_name
,
begin_idx
=
None
,
end_idx
=
None
):
...
@@ -1361,7 +1361,7 @@ def _append_backward_ops_(
...
@@ -1361,7 +1361,7 @@ def _append_backward_ops_(
sub_block
=
program
.
block
(
op
.
_block_attr_id
(
"sub_block"
))
sub_block
=
program
.
block
(
op
.
_block_attr_id
(
"sub_block"
))
grad_sub_block
=
program
.
_create_block
()
grad_sub_block
=
program
.
_create_block
()
grad_sub_block
.
_set_forward_block_idx
(
sub_block
.
idx
)
grad_sub_block
.
_set_forward_block_idx
(
sub_block
.
idx
)
# see follwing comments for why set None here.
# see foll
o
wing comments for why set None here.
pre_input_grad_names_set
=
copy
.
copy
(
input_grad_names_set
)
pre_input_grad_names_set
=
copy
.
copy
(
input_grad_names_set
)
input_grad_names_set
=
None
input_grad_names_set
=
None
sub_block_path
=
op_path_dict
[
op
.
_block_attr_id
(
"sub_block"
)]
sub_block_path
=
op_path_dict
[
op
.
_block_attr_id
(
"sub_block"
)]
...
@@ -1383,7 +1383,7 @@ def _append_backward_ops_(
...
@@ -1383,7 +1383,7 @@ def _append_backward_ops_(
grad_sub_block_list
.
append
(
grad_sub_block
.
desc
)
grad_sub_block_list
.
append
(
grad_sub_block
.
desc
)
# In primitive mode, raw phi GradOp will be split into multiple small
# In primitive mode, raw phi GradOp will be split into multiple small
# primitive operators, and the split rules are defined in c++ level,
# primitive operators, and the split rules are defined in c++ level,
# see det
ia
ls: paddle/fluid/prim/api/manual/backward/composite_backward_api.h
# see det
ai
ls: paddle/fluid/prim/api/manual/backward/composite_backward_api.h
# It means that the output's shape and dtype of previous operators which
# It means that the output's shape and dtype of previous operators which
# maybe used as the input of next operators must be known. Therefore,
# maybe used as the input of next operators must be known. Therefore,
# we infer shape and dtype in a sandbox block(named composite_block) for
# we infer shape and dtype in a sandbox block(named composite_block) for
...
@@ -1391,7 +1391,7 @@ def _append_backward_ops_(
...
@@ -1391,7 +1391,7 @@ def _append_backward_ops_(
# For example:
# For example:
# forward:
# forward:
# z = multiply(x, y) //maybe broadcast in kernel
# z = multiply(x, y) //maybe broadcast in kernel
# b
c
ckward:
# b
a
ckward:
# x_grad_unreduce = z_grad * y // maybe unreduce
# x_grad_unreduce = z_grad * y // maybe unreduce
# reduced_axes = get_reduced_axes(x_grad.shape, x.shape) // need known shape
# reduced_axes = get_reduced_axes(x_grad.shape, x.shape) // need known shape
# x_grad = reduce_sum(x_grad_unreduce)
# x_grad = reduce_sum(x_grad_unreduce)
...
@@ -1515,7 +1515,7 @@ def _append_backward_ops_(
...
@@ -1515,7 +1515,7 @@ def _append_backward_ops_(
grad_op_descs
.
extend
(
grad_op_desc
)
grad_op_descs
.
extend
(
grad_op_desc
)
grad_to_var
.
update
(
op_grad_to_var
)
grad_to_var
.
update
(
op_grad_to_var
)
# record mapping be
wt
een grad var name and var name (Only for auto parallel)
# record mapping be
tw
een grad var name and var name (Only for auto parallel)
grad_var_to_var
=
None
grad_var_to_var
=
None
if
distop_context
is
not
None
:
if
distop_context
is
not
None
:
grad_var_to_var
=
distop_context
.
grad_var_to_var
[
grad_var_to_var
=
distop_context
.
grad_var_to_var
[
...
@@ -1548,7 +1548,9 @@ def _append_backward_ops_(
...
@@ -1548,7 +1548,9 @@ def _append_backward_ops_(
op_desc
for
op_desc
in
grad_op_descs
if
op_desc
not
in
not_need_ops
op_desc
for
op_desc
in
grad_op_descs
if
op_desc
not
in
not_need_ops
]
]
else
:
else
:
logging
.
debug
(
"Runing backward composite and disable find_not_need_ops"
)
logging
.
debug
(
"Running backward composite and disable find_not_need_ops"
)
# append op_desc in grad_op_descs to target_block
# append op_desc in grad_op_descs to target_block
op_role_attr_name
=
core
.
op_proto_and_checker_maker
.
kOpRoleAttrName
()
op_role_attr_name
=
core
.
op_proto_and_checker_maker
.
kOpRoleAttrName
()
...
@@ -1716,7 +1718,7 @@ def _append_backward_vars_(block, start_op_idx, grad_to_var, grad_info_map):
...
@@ -1716,7 +1718,7 @@ def _append_backward_vars_(block, start_op_idx, grad_to_var, grad_info_map):
def
infershape_for_composite
(
block
,
grad_op_desc
):
def
infershape_for_composite
(
block
,
grad_op_desc
):
# NOTE: why pruning the operator with empty output here ?
# NOTE: why pruning the operator with empty output here ?
# Some backward operator will output em
tp
y var, which will cause infer
# Some backward operator will output em
pt
y var, which will cause infer
# shape error, such assign with input's stop_gradient=True
# shape error, such assign with input's stop_gradient=True
if
len
(
grad_op_desc
.
output_arg_names
())
==
0
:
if
len
(
grad_op_desc
.
output_arg_names
())
==
0
:
return
return
...
@@ -1748,7 +1750,7 @@ def infershape_for_composite(block, grad_op_desc):
...
@@ -1748,7 +1750,7 @@ def infershape_for_composite(block, grad_op_desc):
for
name
,
args
in
grad_op_desc
.
outputs
().
items
()
for
name
,
args
in
grad_op_desc
.
outputs
().
items
()
},
},
# NOTE Runtime attr will be ignore as the c++ GetRuntimeAttr
# NOTE Runtime attr will be ignore as the c++ GetRuntimeAttr
# interface cann't be exported to python. Please note the WARN
N
ING
# interface cann't be exported to python. Please note the WARNING
# message logged in RuntimeAttrs of composite_grad_desc_maker.h
# message logged in RuntimeAttrs of composite_grad_desc_maker.h
attrs
=
grad_op_desc
.
get_attr_map
(),
attrs
=
grad_op_desc
.
get_attr_map
(),
)
)
...
...
python/paddle/fluid/compiler.py
浏览文件 @
20a66bbf
...
@@ -245,7 +245,7 @@ class CompiledProgram:
...
@@ -245,7 +245,7 @@ class CompiledProgram:
)
)
self
.
_exec_strategy
.
num_threads
=
1
self
.
_exec_strategy
.
num_threads
=
1
# TODO(wuyi): trainer endpoin
g
s should be passed in through
# TODO(wuyi): trainer endpoin
t
s should be passed in through
# build_strategy, not program.xxx.
# build_strategy, not program.xxx.
# TODO(gongwb): let user to set them once.
# TODO(gongwb): let user to set them once.
if
(
if
(
...
...
python/paddle/fluid/core.py
浏览文件 @
20a66bbf
...
@@ -271,7 +271,7 @@ try:
...
@@ -271,7 +271,7 @@ try:
if
avx_supported
()
and
not
libpaddle
.
is_compiled_with_avx
():
if
avx_supported
()
and
not
libpaddle
.
is_compiled_with_avx
():
sys
.
stderr
.
write
(
sys
.
stderr
.
write
(
"Hint: Your machine support AVX, but the installed paddlepaddle doesn't have avx core. "
"Hint: Your machine support AVX, but the installed paddlepaddle doesn't have avx core. "
"Hence, no-avx core with worse p
re
formance will be imported.
\n
If you like, you could "
"Hence, no-avx core with worse p
er
formance will be imported.
\n
If you like, you could "
"reinstall paddlepaddle by 'python -m pip install --force-reinstall paddlepaddle-gpu[==version]' "
"reinstall paddlepaddle by 'python -m pip install --force-reinstall paddlepaddle-gpu[==version]' "
"to get better performance.
\n
"
"to get better performance.
\n
"
)
)
...
@@ -450,7 +450,7 @@ def _is_all_prim_enabled():
...
@@ -450,7 +450,7 @@ def _is_all_prim_enabled():
return
_is_fwd_prim_enabled
()
and
_is_bwd_prim_enabled
()
return
_is_fwd_prim_enabled
()
and
_is_bwd_prim_enabled
()
# Alert!!! This method is only for test coverage
t
, user should never use it directly, this may cause serious system errors.
# Alert!!! This method is only for test coverage, user should never use it directly, this may cause serious system errors.
def
_test_use_sync
(
value
):
def
_test_use_sync
(
value
):
__sync_stat_with_flag
(
value
)
__sync_stat_with_flag
(
value
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录