Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
eac23db1
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
eac23db1
编写于
3月 31, 2022
作者:
W
wanghuancoder
提交者:
GitHub
3月 31, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix some bug, test=develop (#41144)
上级
033b2748
变更
6
隐藏空白更改
内联
并排
Showing
6 changed file
with
121 addition
and
29 deletion
+121
-29
paddle/fluid/eager/auto_code_generator/eager_generator.cc
paddle/fluid/eager/auto_code_generator/eager_generator.cc
+1
-1
paddle/fluid/pybind/eager_method.cc
paddle/fluid/pybind/eager_method.cc
+1
-1
python/paddle/fluid/tests/unittests/test_io_save_load.py
python/paddle/fluid/tests/unittests/test_io_save_load.py
+25
-4
python/paddle/fluid/tests/unittests/test_onnx_export.py
python/paddle/fluid/tests/unittests/test_onnx_export.py
+10
-1
python/paddle/fluid/tests/unittests/test_optimizer_for_varbase.py
...addle/fluid/tests/unittests/test_optimizer_for_varbase.py
+83
-21
python/paddle/optimizer/optimizer.py
python/paddle/optimizer/optimizer.py
+1
-1
未找到文件。
paddle/fluid/eager/auto_code_generator/eager_generator.cc
浏览文件 @
eac23db1
...
...
@@ -1572,7 +1572,7 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents(
outs_contents_str
+=
paddle
::
string
::
Sprintf
(
FWD_OUTS_CONTENT_TEMPLATE
,
output_name
,
output_var_name
);
}
core_ops_args_info
[
op_type
].
push_back
(
output_
var_
name
);
core_ops_args_info
[
op_type
].
push_back
(
output_name
);
}
else
if
(
!
inplace_map
.
empty
()
&&
inplace_map
.
count
(
output_name
))
{
// In inplace op, replace the output with the input directly.
...
...
paddle/fluid/pybind/eager_method.cc
浏览文件 @
eac23db1
...
...
@@ -1089,7 +1089,7 @@ static PyObject* tensor__set_grad_type(TensorObject* self, PyObject* args,
EAGER_TRY
auto
var_type
=
pybind
::
CastPyArg2ProtoType
(
PyTuple_GET_ITEM
(
args
,
0
),
0
);
auto
grad_tensor
=
egr
::
EagerUtils
::
unsafe_autograd_meta
(
self
->
tensor
)
->
MutableGrad
();
egr
::
EagerUtils
::
autograd_meta
(
&
self
->
tensor
)
->
MutableGrad
();
if
(
var_type
==
framework
::
proto
::
VarType
::
LOD_TENSOR
)
{
grad_tensor
->
set_impl
(
std
::
make_shared
<
phi
::
DenseTensor
>
());
}
else
if
(
var_type
==
framework
::
proto
::
VarType
::
SELECTED_ROWS
)
{
...
...
python/paddle/fluid/tests/unittests/test_io_save_load.py
浏览文件 @
eac23db1
...
...
@@ -18,10 +18,11 @@ import unittest
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid
import
core
from
paddle.fluid.framework
import
_test_eager_guard
,
_in_legacy_dygraph
class
TestSaveLoadAPIError
(
unittest
.
TestCase
):
def
test_get_valid_program_error
(
self
):
def
func_
test_get_valid_program_error
(
self
):
# case 1: CompiledProgram no program
graph
=
core
.
Graph
(
core
.
ProgramDesc
())
compiled_program
=
fluid
.
CompiledProgram
(
graph
)
...
...
@@ -32,7 +33,12 @@ class TestSaveLoadAPIError(unittest.TestCase):
with
self
.
assertRaises
(
TypeError
):
fluid
.
io
.
_get_valid_program
(
"program"
)
def
test_load_vars_error
(
self
):
def
test_get_valid_program_error
(
self
):
with
_test_eager_guard
():
self
.
func_test_get_valid_program_error
()
self
.
func_test_get_valid_program_error
()
def
func_test_load_vars_error
(
self
):
place
=
fluid
.
CPUPlace
()
exe
=
fluid
.
Executor
(
place
)
# case 1: main_program type error when vars None
...
...
@@ -48,9 +54,14 @@ class TestSaveLoadAPIError(unittest.TestCase):
main_program
=
"program"
,
vars
=
"vars"
)
def
test_load_vars_error
(
self
):
with
_test_eager_guard
():
self
.
func_test_load_vars_error
()
self
.
func_test_load_vars_error
()
class
TestSaveInferenceModelAPIError
(
unittest
.
TestCase
):
def
test_useless_feeded_var_names
(
self
):
def
func_
test_useless_feeded_var_names
(
self
):
start_prog
=
fluid
.
Program
()
main_prog
=
fluid
.
Program
()
with
fluid
.
program_guard
(
main_prog
,
start_prog
):
...
...
@@ -69,9 +80,14 @@ class TestSaveInferenceModelAPIError(unittest.TestCase):
executor
=
exe
,
main_program
=
main_prog
)
def
test_useless_feeded_var_names
(
self
):
with
_test_eager_guard
():
self
.
func_test_useless_feeded_var_names
()
self
.
func_test_useless_feeded_var_names
()
class
TestWhenTrainWithNoGrad
(
unittest
.
TestCase
):
def
test_when_train_with_no_grad
(
self
):
def
func_
test_when_train_with_no_grad
(
self
):
paddle
.
disable_static
()
net
=
paddle
.
nn
.
Linear
(
1024
,
1
)
net
=
paddle
.
jit
.
to_static
(
net
)
...
...
@@ -86,6 +102,11 @@ class TestWhenTrainWithNoGrad(unittest.TestCase):
x
=
paddle
.
rand
([
1024
],
'float32'
)
net
(
x
)
def
test_when_train_with_no_grad
(
self
):
with
_test_eager_guard
():
self
.
func_test_when_train_with_no_grad
()
self
.
func_test_when_train_with_no_grad
()
if
__name__
==
'__main__'
:
paddle
.
enable_static
()
...
...
python/paddle/fluid/tests/unittests/test_onnx_export.py
浏览文件 @
eac23db1
...
...
@@ -21,6 +21,8 @@ import numpy as np
import
paddle
from
paddle.static
import
InputSpec
from
paddle.fluid.framework
import
in_dygraph_mode
class
LinearNet
(
paddle
.
nn
.
Layer
):
def
__init__
(
self
):
...
...
@@ -48,6 +50,8 @@ class TestExportWithTensor(unittest.TestCase):
shape
=
[
None
,
128
],
dtype
=
'float32'
)
def
test_with_tensor
(
self
):
if
in_dygraph_mode
():
return
model
=
LinearNet
()
paddle
.
onnx
.
export
(
model
,
'linear_net'
,
input_spec
=
[
self
.
x_spec
])
...
...
@@ -57,6 +61,8 @@ class TestExportWithTensor1(unittest.TestCase):
self
.
x
=
paddle
.
to_tensor
(
np
.
random
.
random
((
1
,
128
)))
def
test_with_tensor
(
self
):
if
in_dygraph_mode
():
return
model
=
LinearNet
()
paddle
.
onnx
.
export
(
model
,
'linear_net'
,
input_spec
=
[
self
.
x
])
...
...
@@ -67,6 +73,8 @@ class TestExportPrunedGraph(unittest.TestCase):
self
.
y
=
paddle
.
to_tensor
(
np
.
array
([
-
1
]))
def
test_prune_graph
(
self
):
if
in_dygraph_mode
():
return
model
=
Logic
()
paddle
.
jit
.
to_static
(
model
)
out
=
model
(
self
.
x
,
self
.
y
,
z
=
True
)
...
...
@@ -75,4 +83,5 @@ class TestExportPrunedGraph(unittest.TestCase):
if
__name__
==
'__main__'
:
unittest
.
main
()
if
not
in_dygraph_mode
():
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_optimizer_for_varbase.py
浏览文件 @
eac23db1
...
...
@@ -19,6 +19,7 @@ import unittest
import
paddle
import
paddle.optimizer
as
optimizer
from
paddle.fluid.framework
import
_test_eager_guard
,
_in_legacy_dygraph
class
TestOptimizerForVarBase
(
unittest
.
TestCase
):
...
...
@@ -54,42 +55,85 @@ class TestOptimizerForVarBase(unittest.TestCase):
self
.
assertTrue
(
np
.
allclose
(
x
.
numpy
(),
np
.
full
([
2
,
3
],
-
self
.
lr
)))
def
test_adam_with_varbase_list_input
(
self
):
def
func_
test_adam_with_varbase_list_input
(
self
):
self
.
run_optimizer_step_with_varbase_list_input
(
optimizer
.
Adam
)
self
.
run_optimizer_minimize_with_varbase_list_input
(
optimizer
.
Adam
)
def
test_sgd_with_varbase_list_input
(
self
):
def
test_adam_with_varbase_list_input
(
self
):
with
_test_eager_guard
():
self
.
func_test_adam_with_varbase_list_input
()
self
.
func_test_adam_with_varbase_list_input
()
def
func_test_sgd_with_varbase_list_input
(
self
):
self
.
run_optimizer_step_with_varbase_list_input
(
optimizer
.
SGD
)
self
.
run_optimizer_minimize_with_varbase_list_input
(
optimizer
.
SGD
)
def
test_adagrad_with_varbase_list_input
(
self
):
def
test_sgd_with_varbase_list_input
(
self
):
with
_test_eager_guard
():
self
.
func_test_sgd_with_varbase_list_input
()
self
.
func_test_sgd_with_varbase_list_input
()
def
func_test_adagrad_with_varbase_list_input
(
self
):
self
.
run_optimizer_step_with_varbase_list_input
(
optimizer
.
Adagrad
)
self
.
run_optimizer_minimize_with_varbase_list_input
(
optimizer
.
Adagrad
)
def
test_adamw_with_varbase_list_input
(
self
):
def
test_adagrad_with_varbase_list_input
(
self
):
with
_test_eager_guard
():
self
.
func_test_adagrad_with_varbase_list_input
()
self
.
func_test_adagrad_with_varbase_list_input
()
def
func_test_adamw_with_varbase_list_input
(
self
):
self
.
run_optimizer_step_with_varbase_list_input
(
optimizer
.
AdamW
)
self
.
run_optimizer_minimize_with_varbase_list_input
(
optimizer
.
AdamW
)
def
test_adamax_with_varbase_list_input
(
self
):
def
test_adamw_with_varbase_list_input
(
self
):
with
_test_eager_guard
():
self
.
func_test_adamw_with_varbase_list_input
()
self
.
func_test_adamw_with_varbase_list_input
()
def
func_test_adamax_with_varbase_list_input
(
self
):
self
.
run_optimizer_step_with_varbase_list_input
(
optimizer
.
Adamax
)
self
.
run_optimizer_minimize_with_varbase_list_input
(
optimizer
.
Adamax
)
def
test_momentum_with_varbase_list_input
(
self
):
def
test_adamax_with_varbase_list_input
(
self
):
with
_test_eager_guard
():
self
.
func_test_adamax_with_varbase_list_input
()
self
.
func_test_adamax_with_varbase_list_input
()
def
func_test_momentum_with_varbase_list_input
(
self
):
self
.
run_optimizer_step_with_varbase_list_input
(
optimizer
.
Momentum
)
self
.
run_optimizer_minimize_with_varbase_list_input
(
optimizer
.
Momentum
)
def
test_optimizer_with_varbase_input
(
self
):
def
test_momentum_with_varbase_list_input
(
self
):
with
_test_eager_guard
():
self
.
func_test_momentum_with_varbase_list_input
()
self
.
func_test_momentum_with_varbase_list_input
()
def
func_test_optimizer_with_varbase_input
(
self
):
x
=
paddle
.
zeros
([
2
,
3
])
with
self
.
assertRaises
(
TypeError
):
optimizer
.
Adam
(
learning_rate
=
self
.
lr
,
parameters
=
x
)
def
test_create_param_lr_with_1_for_coverage
(
self
):
x
=
paddle
.
fluid
.
framework
.
ParamBase
(
dtype
=
"float32"
,
shape
=
[
5
,
10
],
lod_level
=
0
,
name
=
"x"
,
optimize_attr
=
{
'learning_rate'
:
1.0
})
def
test_optimizer_with_varbase_input
(
self
):
with
_test_eager_guard
():
self
.
func_test_optimizer_with_varbase_input
()
self
.
func_test_optimizer_with_varbase_input
()
def
func_test_create_param_lr_with_1_for_coverage
(
self
):
if
_in_legacy_dygraph
():
x
=
paddle
.
fluid
.
framework
.
ParamBase
(
dtype
=
"float32"
,
shape
=
[
5
,
10
],
lod_level
=
0
,
name
=
"x"
,
optimize_attr
=
{
'learning_rate'
:
1.0
})
else
:
x
=
paddle
.
fluid
.
framework
.
EagerParamBase
(
dtype
=
"float32"
,
shape
=
[
5
,
10
],
lod_level
=
0
,
name
=
"x"
,
optimize_attr
=
{
'learning_rate'
:
1.0
})
x
.
value
().
get_tensor
().
set
(
np
.
random
.
random
((
5
,
10
)).
astype
(
'float32'
),
paddle
.
fluid
.
framework
.
_current_expected_place
())
...
...
@@ -100,13 +144,26 @@ class TestOptimizerForVarBase(unittest.TestCase):
z
.
backward
()
opt
.
step
()
def
test_create_param_lr_with_no_1_value_for_coverage
(
self
):
x
=
paddle
.
fluid
.
framework
.
ParamBase
(
dtype
=
"float32"
,
shape
=
[
5
,
10
],
lod_level
=
0
,
name
=
"x"
,
optimize_attr
=
{
'learning_rate'
:
0.12
})
def
test_create_param_lr_with_1_for_coverage
(
self
):
with
_test_eager_guard
():
self
.
func_test_create_param_lr_with_1_for_coverage
()
self
.
func_test_create_param_lr_with_1_for_coverage
()
def
func_test_create_param_lr_with_no_1_value_for_coverage
(
self
):
if
_in_legacy_dygraph
():
x
=
paddle
.
fluid
.
framework
.
ParamBase
(
dtype
=
"float32"
,
shape
=
[
5
,
10
],
lod_level
=
0
,
name
=
"x"
,
optimize_attr
=
{
'learning_rate'
:
0.12
})
else
:
x
=
paddle
.
fluid
.
framework
.
EagerParamBase
(
dtype
=
"float32"
,
shape
=
[
5
,
10
],
lod_level
=
0
,
name
=
"x"
,
optimize_attr
=
{
'learning_rate'
:
0.12
})
x
.
value
().
get_tensor
().
set
(
np
.
random
.
random
((
5
,
10
)).
astype
(
'float32'
),
paddle
.
fluid
.
framework
.
_current_expected_place
())
...
...
@@ -117,6 +174,11 @@ class TestOptimizerForVarBase(unittest.TestCase):
z
.
backward
()
opt
.
step
()
def
func_test_create_param_lr_with_no_1_value_for_coverage
(
self
):
with
_test_eager_guard
():
self
.
func_test_create_param_lr_with_1_for_coverage
()
self
.
func_test_create_param_lr_with_1_for_coverage
()
if
__name__
==
"__main__"
:
unittest
.
main
()
python/paddle/optimizer/optimizer.py
浏览文件 @
eac23db1
...
...
@@ -133,7 +133,7 @@ class Optimizer(object):
# paddle.Tensor is also iterable, so here we don't check whether
# the input is iterable, if the input is paddle.Tensor, the
# list(paddle.Tensor) will be a error value
if
isinstance
(
parameters
,
paddle
.
Tensor
):
if
isinstance
(
parameters
,
(
paddle
.
Tensor
,
core
.
eager
.
Tensor
)
):
raise
TypeError
(
"`parameters` argument given to the optimizer should be "
"an iterable of paddle Tensors, but got argument type is `{}`."
.
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录