Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
68497e7b
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 1 年 前同步成功
通知
2298
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
68497e7b
编写于
3月 24, 2021
作者:
C
Chen Weihang
提交者:
GitHub
3月 24, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
change trainable to stop_gradient in optimizer (#31823)
上级
270699e6
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
12 addition
and
11 deletion
+12
-11
python/paddle/optimizer/adam.py
python/paddle/optimizer/adam.py
+1
-1
python/paddle/optimizer/adamax.py
python/paddle/optimizer/adamax.py
+1
-1
python/paddle/optimizer/optimizer.py
python/paddle/optimizer/optimizer.py
+10
-9
未找到文件。
python/paddle/optimizer/adam.py
浏览文件 @
68497e7b
...
@@ -351,7 +351,7 @@ class Adam(Optimizer):
...
@@ -351,7 +351,7 @@ class Adam(Optimizer):
"""
"""
params_grads
=
[]
params_grads
=
[]
for
param
in
self
.
_parameter_list
:
for
param
in
self
.
_parameter_list
:
if
not
param
.
trainable
:
if
param
.
stop_gradient
:
continue
continue
if
param
.
_grad_ivar
()
is
not
None
:
if
param
.
_grad_ivar
()
is
not
None
:
grad_var
=
param
.
_grad_ivar
()
grad_var
=
param
.
_grad_ivar
()
...
...
python/paddle/optimizer/adamax.py
浏览文件 @
68497e7b
...
@@ -184,7 +184,7 @@ class Adamax(Optimizer):
...
@@ -184,7 +184,7 @@ class Adamax(Optimizer):
"""
"""
assert
isinstance
(
block
,
framework
.
Block
)
assert
isinstance
(
block
,
framework
.
Block
)
for
param
,
grad
in
parameters_and_grads
:
for
param
,
grad
in
parameters_and_grads
:
if
grad
is
None
or
param
.
trainable
is
Fals
e
:
if
grad
is
None
or
param
.
stop_gradient
is
Tru
e
:
continue
continue
with
param
.
block
.
program
.
_optimized_guard
(
with
param
.
block
.
program
.
_optimized_guard
(
[
param
,
grad
]),
name_scope
(
'adamax'
):
[
param
,
grad
]),
name_scope
(
'adamax'
):
...
...
python/paddle/optimizer/optimizer.py
浏览文件 @
68497e7b
...
@@ -542,7 +542,7 @@ class Optimizer(object):
...
@@ -542,7 +542,7 @@ class Optimizer(object):
def
_update_param_device_map
(
self
,
parameters_and_grads
,
target_block
):
def
_update_param_device_map
(
self
,
parameters_and_grads
,
target_block
):
for
param_and_grad
in
parameters_and_grads
:
for
param_and_grad
in
parameters_and_grads
:
if
param_and_grad
[
0
].
trainable
is
Tru
e
:
if
param_and_grad
[
0
].
stop_gradient
is
Fals
e
:
param_name
=
param_and_grad
[
0
].
name
param_name
=
param_and_grad
[
0
].
name
ops
=
target_block
.
ops
ops
=
target_block
.
ops
device_attr_name
=
core
.
op_proto_and_checker_maker
.
kOpDeviceAttrName
(
device_attr_name
=
core
.
op_proto_and_checker_maker
.
kOpDeviceAttrName
(
...
@@ -598,14 +598,14 @@ class Optimizer(object):
...
@@ -598,14 +598,14 @@ class Optimizer(object):
self
.
_update_param_device_map
(
parameters_and_grads
,
target_block
)
self
.
_update_param_device_map
(
parameters_and_grads
,
target_block
)
self
.
_create_accumulators
(
self
.
_create_accumulators
(
target_block
,
target_block
,
[
p
[
0
]
for
p
in
parameters_and_grads
if
p
[
0
].
trainable
])
[
p
[
0
]
for
p
in
parameters_and_grads
if
not
p
[
0
].
stop_gradient
])
self
.
_create_global_learning_rate
()
self
.
_create_global_learning_rate
()
if
framework
.
in_dygraph_mode
():
if
framework
.
in_dygraph_mode
():
for
param_and_grad
in
parameters_and_grads
:
for
param_and_grad
in
parameters_and_grads
:
if
param_and_grad
[
1
]
is
None
:
if
param_and_grad
[
1
]
is
None
:
continue
continue
if
param_and_grad
[
0
].
trainable
is
Tru
e
:
if
param_and_grad
[
0
].
stop_gradient
is
Fals
e
:
self
.
_append_optimize_op
(
target_block
,
param_and_grad
)
self
.
_append_optimize_op
(
target_block
,
param_and_grad
)
else
:
else
:
for
param_and_grad
in
parameters_and_grads
:
for
param_and_grad
in
parameters_and_grads
:
...
@@ -613,7 +613,7 @@ class Optimizer(object):
...
@@ -613,7 +613,7 @@ class Optimizer(object):
continue
continue
with
param_and_grad
[
0
].
block
.
program
.
_optimized_guard
(
with
param_and_grad
[
0
].
block
.
program
.
_optimized_guard
(
param_and_grad
),
name_scope
(
"optimizer"
):
param_and_grad
),
name_scope
(
"optimizer"
):
if
param_and_grad
[
0
].
trainable
is
Tru
e
:
if
param_and_grad
[
0
].
stop_gradient
is
Fals
e
:
device
=
self
.
_get_device_for_param
(
param_and_grad
[
0
]
device
=
self
.
_get_device_for_param
(
param_and_grad
[
0
]
.
name
)
.
name
)
with
device_guard
(
device
):
with
device_guard
(
device
):
...
@@ -689,7 +689,7 @@ class Optimizer(object):
...
@@ -689,7 +689,7 @@ class Optimizer(object):
params_grads
=
[]
params_grads
=
[]
for
param
in
parameter_list
:
for
param
in
parameter_list
:
if
not
param
.
trainable
:
if
param
.
stop_gradient
:
continue
continue
if
param
.
_grad_ivar
()
is
not
None
:
if
param
.
_grad_ivar
()
is
not
None
:
# create gradient tensor
# create gradient tensor
...
@@ -789,8 +789,9 @@ class Optimizer(object):
...
@@ -789,8 +789,9 @@ class Optimizer(object):
def
_get_no_grad_set
(
self
,
loss
,
no_grad_set
=
None
):
def
_get_no_grad_set
(
self
,
loss
,
no_grad_set
=
None
):
no_grad_set
=
_get_no_grad_set_name
(
no_grad_set
)
no_grad_set
=
_get_no_grad_set_name
(
no_grad_set
)
parameters
=
loss
.
block
.
program
.
global_block
().
all_parameters
()
parameters
=
loss
.
block
.
program
.
global_block
().
all_parameters
()
param_no_trainable
=
set
(
param_no_trainable
=
set
([
[
param
.
name
for
param
in
parameters
if
param
.
trainable
is
False
])
param
.
name
for
param
in
parameters
if
param
.
stop_gradient
is
True
])
# If the parameter is no trainable, it should not have a gradient.
# If the parameter is no trainable, it should not have a gradient.
no_grad_set
.
update
(
param_no_trainable
)
no_grad_set
.
update
(
param_no_trainable
)
...
@@ -825,7 +826,7 @@ class Optimizer(object):
...
@@ -825,7 +826,7 @@ class Optimizer(object):
"""
"""
for
p
in
self
.
_parameter_list
:
for
p
in
self
.
_parameter_list
:
if
p
.
trainable
:
if
not
p
.
stop_gradient
:
p
.
clear_gradient
()
p
.
clear_gradient
()
@
imperative_base
.
no_grad
@
imperative_base
.
no_grad
...
@@ -920,7 +921,7 @@ class Optimizer(object):
...
@@ -920,7 +921,7 @@ class Optimizer(object):
"""
"""
params_grads
=
[]
params_grads
=
[]
for
param
in
self
.
_parameter_list
:
for
param
in
self
.
_parameter_list
:
if
not
param
.
trainable
:
if
param
.
stop_gradient
:
continue
continue
if
param
.
_grad_ivar
()
is
not
None
:
if
param
.
_grad_ivar
()
is
not
None
:
grad_var
=
param
.
_grad_ivar
()
grad_var
=
param
.
_grad_ivar
()
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录