Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
29d9dbe3
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
29d9dbe3
编写于
12月 12, 2022
作者:
姜
姜永久
提交者:
GitHub
12月 13, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
rm unittests eager guard tests part17 number2pool1d (#48840)
上级
f2c59b88
变更
10
隐藏空白更改
内联
并排
Showing
10 changed file
with
35 addition
and
177 deletion
+35
-177
python/paddle/fluid/tests/unittests/test_number_count_op.py
python/paddle/fluid/tests/unittests/test_number_count_op.py
+1
-7
python/paddle/fluid/tests/unittests/test_one_hot_v2_op.py
python/paddle/fluid/tests/unittests/test_one_hot_v2_op.py
+4
-5
python/paddle/fluid/tests/unittests/test_onnx_export.py
python/paddle/fluid/tests/unittests/test_onnx_export.py
+3
-20
python/paddle/fluid/tests/unittests/test_optimizer.py
python/paddle/fluid/tests/unittests/test_optimizer.py
+0
-6
python/paddle/fluid/tests/unittests/test_optimizer_for_varbase.py
...addle/fluid/tests/unittests/test_optimizer_for_varbase.py
+10
-55
python/paddle/fluid/tests/unittests/test_outer.py
python/paddle/fluid/tests/unittests/test_outer.py
+2
-13
python/paddle/fluid/tests/unittests/test_paddle_imperative_double_grad.py
...uid/tests/unittests/test_paddle_imperative_double_grad.py
+6
-36
python/paddle/fluid/tests/unittests/test_parameter.py
python/paddle/fluid/tests/unittests/test_parameter.py
+3
-18
python/paddle/fluid/tests/unittests/test_poisson_op.py
python/paddle/fluid/tests/unittests/test_poisson_op.py
+6
-8
python/paddle/fluid/tests/unittests/test_pool1d_api.py
python/paddle/fluid/tests/unittests/test_pool1d_api.py
+0
-9
未找到文件。
python/paddle/fluid/tests/unittests/test_number_count_op.py
浏览文件 @
29d9dbe3
...
@@ -20,7 +20,6 @@ import op_test
...
@@ -20,7 +20,6 @@ import op_test
import
paddle
import
paddle
import
paddle.fluid.core
as
core
import
paddle.fluid.core
as
core
from
paddle.distributed.models.moe
import
utils
from
paddle.distributed.models.moe
import
utils
from
paddle.fluid.framework
import
_test_eager_guard
def
count
(
x
,
upper_num
):
def
count
(
x
,
upper_num
):
...
@@ -68,17 +67,12 @@ class TestNumberCountAPI(unittest.TestCase):
...
@@ -68,17 +67,12 @@ class TestNumberCountAPI(unittest.TestCase):
res
=
exe
.
run
(
feed
=
{
'x'
:
self
.
x
},
fetch_list
=
[
out
])
res
=
exe
.
run
(
feed
=
{
'x'
:
self
.
x
},
fetch_list
=
[
out
])
assert
np
.
allclose
(
res
,
self
.
out
)
assert
np
.
allclose
(
res
,
self
.
out
)
def
func
_api_dygraph
(
self
):
def
test
_api_dygraph
(
self
):
paddle
.
disable_static
()
paddle
.
disable_static
()
x
=
paddle
.
to_tensor
(
self
.
x
)
x
=
paddle
.
to_tensor
(
self
.
x
)
out
=
utils
.
_number_count
(
x
,
self
.
upper_num
)
out
=
utils
.
_number_count
(
x
,
self
.
upper_num
)
assert
np
.
allclose
(
out
.
numpy
(),
self
.
out
)
assert
np
.
allclose
(
out
.
numpy
(),
self
.
out
)
def
test_api_dygraph
(
self
):
with
_test_eager_guard
():
self
.
func_api_dygraph
()
self
.
func_api_dygraph
()
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
paddle
.
enable_static
()
paddle
.
enable_static
()
...
...
python/paddle/fluid/tests/unittests/test_one_hot_v2_op.py
浏览文件 @
29d9dbe3
...
@@ -20,7 +20,7 @@ from op_test import OpTest
...
@@ -20,7 +20,7 @@ from op_test import OpTest
import
paddle
import
paddle
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
import
paddle.fluid.core
as
core
import
paddle.fluid.core
as
core
from
paddle.fluid.framework
import
Program
,
_test_eager_guard
,
program_guard
from
paddle.fluid.framework
import
Program
,
program_guard
class
TestOneHotOp
(
OpTest
):
class
TestOneHotOp
(
OpTest
):
...
@@ -182,10 +182,9 @@ class TestOneHotOpApi(unittest.TestCase):
...
@@ -182,10 +182,9 @@ class TestOneHotOpApi(unittest.TestCase):
one_hot_label
=
paddle
.
nn
.
functional
.
one_hot
(
one_hot_label
=
paddle
.
nn
.
functional
.
one_hot
(
fluid
.
dygraph
.
to_variable
(
label
),
depth
fluid
.
dygraph
.
to_variable
(
label
),
depth
)
)
with
_test_eager_guard
():
one_hot_label
=
paddle
.
nn
.
functional
.
one_hot
(
one_hot_label
=
paddle
.
nn
.
functional
.
one_hot
(
paddle
.
to_tensor
(
label
),
depth
paddle
.
to_tensor
(
label
),
depth
)
)
def
_run
(
self
,
depth
):
def
_run
(
self
,
depth
):
label
=
fluid
.
layers
.
data
(
name
=
"label"
,
shape
=
[
1
],
dtype
=
"int64"
)
label
=
fluid
.
layers
.
data
(
name
=
"label"
,
shape
=
[
1
],
dtype
=
"int64"
)
...
...
python/paddle/fluid/tests/unittests/test_onnx_export.py
浏览文件 @
29d9dbe3
...
@@ -17,7 +17,6 @@ import unittest
...
@@ -17,7 +17,6 @@ import unittest
import
numpy
as
np
import
numpy
as
np
import
paddle
import
paddle
from
paddle.fluid.framework
import
_test_eager_guard
class
LinearNet
(
paddle
.
nn
.
Layer
):
class
LinearNet
(
paddle
.
nn
.
Layer
):
...
@@ -41,33 +40,23 @@ class Logic(paddle.nn.Layer):
...
@@ -41,33 +40,23 @@ class Logic(paddle.nn.Layer):
class
TestExportWithTensor
(
unittest
.
TestCase
):
class
TestExportWithTensor
(
unittest
.
TestCase
):
def
func
_with_tensor
(
self
):
def
test
_with_tensor
(
self
):
self
.
x_spec
=
paddle
.
static
.
InputSpec
(
self
.
x_spec
=
paddle
.
static
.
InputSpec
(
shape
=
[
None
,
128
],
dtype
=
'float32'
shape
=
[
None
,
128
],
dtype
=
'float32'
)
)
model
=
LinearNet
()
model
=
LinearNet
()
paddle
.
onnx
.
export
(
model
,
'linear_net'
,
input_spec
=
[
self
.
x_spec
])
paddle
.
onnx
.
export
(
model
,
'linear_net'
,
input_spec
=
[
self
.
x_spec
])
def
test_with_tensor
(
self
):
with
_test_eager_guard
():
self
.
func_with_tensor
()
self
.
func_with_tensor
()
class
TestExportWithTensor1
(
unittest
.
TestCase
):
class
TestExportWithTensor1
(
unittest
.
TestCase
):
def
func
_with_tensor
(
self
):
def
test
_with_tensor
(
self
):
self
.
x
=
paddle
.
to_tensor
(
np
.
random
.
random
((
1
,
128
)))
self
.
x
=
paddle
.
to_tensor
(
np
.
random
.
random
((
1
,
128
)))
model
=
LinearNet
()
model
=
LinearNet
()
paddle
.
onnx
.
export
(
model
,
'linear_net'
,
input_spec
=
[
self
.
x
])
paddle
.
onnx
.
export
(
model
,
'linear_net'
,
input_spec
=
[
self
.
x
])
def
test_with_tensor
(
self
):
with
_test_eager_guard
():
self
.
func_with_tensor
()
self
.
func_with_tensor
()
class
TestExportPrunedGraph
(
unittest
.
TestCase
):
class
TestExportPrunedGraph
(
unittest
.
TestCase
):
def
func
_prune_graph
(
self
):
def
test
_prune_graph
(
self
):
model
=
Logic
()
model
=
Logic
()
self
.
x
=
paddle
.
to_tensor
(
np
.
array
([
1
]))
self
.
x
=
paddle
.
to_tensor
(
np
.
array
([
1
]))
self
.
y
=
paddle
.
to_tensor
(
np
.
array
([
-
1
]))
self
.
y
=
paddle
.
to_tensor
(
np
.
array
([
-
1
]))
...
@@ -77,12 +66,6 @@ class TestExportPrunedGraph(unittest.TestCase):
...
@@ -77,12 +66,6 @@ class TestExportPrunedGraph(unittest.TestCase):
model
,
'pruned'
,
input_spec
=
[
self
.
x
],
output_spec
=
[
out
]
model
,
'pruned'
,
input_spec
=
[
self
.
x
],
output_spec
=
[
out
]
)
)
def
test_prune_graph
(
self
):
# test eager
with
_test_eager_guard
():
self
.
func_prune_graph
()
self
.
func_prune_graph
()
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
unittest
.
main
()
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_optimizer.py
浏览文件 @
29d9dbe3
...
@@ -27,7 +27,6 @@ import paddle.fluid.optimizer as optimizer
...
@@ -27,7 +27,6 @@ import paddle.fluid.optimizer as optimizer
from
paddle.fluid.backward
import
append_backward
from
paddle.fluid.backward
import
append_backward
from
paddle.fluid.framework
import
(
from
paddle.fluid.framework
import
(
Program
,
Program
,
_test_eager_guard
,
convert_np_dtype_to_dtype_
,
convert_np_dtype_to_dtype_
,
program_guard
,
program_guard
,
)
)
...
@@ -1387,11 +1386,6 @@ class TestOptimizerDtype(unittest.TestCase):
...
@@ -1387,11 +1386,6 @@ class TestOptimizerDtype(unittest.TestCase):
def
test_float32
(
self
):
def
test_float32
(
self
):
self
.
check_with_dtype
(
'float32'
)
self
.
check_with_dtype
(
'float32'
)
def
test_api_eager_dygraph
(
self
):
with
_test_eager_guard
():
self
.
test_float64
()
self
.
test_float32
()
class
TestMasterWeightSaveForFP16
(
unittest
.
TestCase
):
class
TestMasterWeightSaveForFP16
(
unittest
.
TestCase
):
'''
'''
...
...
python/paddle/fluid/tests/unittests/test_optimizer_for_varbase.py
浏览文件 @
29d9dbe3
...
@@ -18,7 +18,7 @@ import numpy as np
...
@@ -18,7 +18,7 @@ import numpy as np
import
paddle
import
paddle
import
paddle.optimizer
as
optimizer
import
paddle.optimizer
as
optimizer
from
paddle.fluid.framework
import
_in_legacy_dygraph
,
_test_eager_guard
from
paddle.fluid.framework
import
_in_legacy_dygraph
class
TestOptimizerForVarBase
(
unittest
.
TestCase
):
class
TestOptimizerForVarBase
(
unittest
.
TestCase
):
...
@@ -59,71 +59,36 @@ class TestOptimizerForVarBase(unittest.TestCase):
...
@@ -59,71 +59,36 @@ class TestOptimizerForVarBase(unittest.TestCase):
x
.
numpy
(),
np
.
full
([
2
,
3
],
-
self
.
lr
),
rtol
=
1e-05
x
.
numpy
(),
np
.
full
([
2
,
3
],
-
self
.
lr
),
rtol
=
1e-05
)
)
def
func_
test_adam_with_varbase_list_input
(
self
):
def
test_adam_with_varbase_list_input
(
self
):
self
.
run_optimizer_step_with_varbase_list_input
(
optimizer
.
Adam
)
self
.
run_optimizer_step_with_varbase_list_input
(
optimizer
.
Adam
)
self
.
run_optimizer_minimize_with_varbase_list_input
(
optimizer
.
Adam
)
self
.
run_optimizer_minimize_with_varbase_list_input
(
optimizer
.
Adam
)
def
test_adam_with_varbase_list_input
(
self
):
def
test_sgd_with_varbase_list_input
(
self
):
with
_test_eager_guard
():
self
.
func_test_adam_with_varbase_list_input
()
self
.
func_test_adam_with_varbase_list_input
()
def
func_test_sgd_with_varbase_list_input
(
self
):
self
.
run_optimizer_step_with_varbase_list_input
(
optimizer
.
SGD
)
self
.
run_optimizer_step_with_varbase_list_input
(
optimizer
.
SGD
)
self
.
run_optimizer_minimize_with_varbase_list_input
(
optimizer
.
SGD
)
self
.
run_optimizer_minimize_with_varbase_list_input
(
optimizer
.
SGD
)
def
test_sgd_with_varbase_list_input
(
self
):
def
test_adagrad_with_varbase_list_input
(
self
):
with
_test_eager_guard
():
self
.
func_test_sgd_with_varbase_list_input
()
self
.
func_test_sgd_with_varbase_list_input
()
def
func_test_adagrad_with_varbase_list_input
(
self
):
self
.
run_optimizer_step_with_varbase_list_input
(
optimizer
.
Adagrad
)
self
.
run_optimizer_step_with_varbase_list_input
(
optimizer
.
Adagrad
)
self
.
run_optimizer_minimize_with_varbase_list_input
(
optimizer
.
Adagrad
)
self
.
run_optimizer_minimize_with_varbase_list_input
(
optimizer
.
Adagrad
)
def
test_adagrad_with_varbase_list_input
(
self
):
def
test_adamw_with_varbase_list_input
(
self
):
with
_test_eager_guard
():
self
.
func_test_adagrad_with_varbase_list_input
()
self
.
func_test_adagrad_with_varbase_list_input
()
def
func_test_adamw_with_varbase_list_input
(
self
):
self
.
run_optimizer_step_with_varbase_list_input
(
optimizer
.
AdamW
)
self
.
run_optimizer_step_with_varbase_list_input
(
optimizer
.
AdamW
)
self
.
run_optimizer_minimize_with_varbase_list_input
(
optimizer
.
AdamW
)
self
.
run_optimizer_minimize_with_varbase_list_input
(
optimizer
.
AdamW
)
def
test_adamw_with_varbase_list_input
(
self
):
def
test_adamax_with_varbase_list_input
(
self
):
with
_test_eager_guard
():
self
.
func_test_adamw_with_varbase_list_input
()
self
.
func_test_adamw_with_varbase_list_input
()
def
func_test_adamax_with_varbase_list_input
(
self
):
self
.
run_optimizer_step_with_varbase_list_input
(
optimizer
.
Adamax
)
self
.
run_optimizer_step_with_varbase_list_input
(
optimizer
.
Adamax
)
self
.
run_optimizer_minimize_with_varbase_list_input
(
optimizer
.
Adamax
)
self
.
run_optimizer_minimize_with_varbase_list_input
(
optimizer
.
Adamax
)
def
test_adamax_with_varbase_list_input
(
self
):
def
test_momentum_with_varbase_list_input
(
self
):
with
_test_eager_guard
():
self
.
func_test_adamax_with_varbase_list_input
()
self
.
func_test_adamax_with_varbase_list_input
()
def
func_test_momentum_with_varbase_list_input
(
self
):
self
.
run_optimizer_step_with_varbase_list_input
(
optimizer
.
Momentum
)
self
.
run_optimizer_step_with_varbase_list_input
(
optimizer
.
Momentum
)
self
.
run_optimizer_minimize_with_varbase_list_input
(
optimizer
.
Momentum
)
self
.
run_optimizer_minimize_with_varbase_list_input
(
optimizer
.
Momentum
)
def
test_momentum_with_varbase_list_input
(
self
):
def
test_optimizer_with_varbase_input
(
self
):
with
_test_eager_guard
():
self
.
func_test_momentum_with_varbase_list_input
()
self
.
func_test_momentum_with_varbase_list_input
()
def
func_test_optimizer_with_varbase_input
(
self
):
x
=
paddle
.
zeros
([
2
,
3
])
x
=
paddle
.
zeros
([
2
,
3
])
with
self
.
assertRaises
(
TypeError
):
with
self
.
assertRaises
(
TypeError
):
optimizer
.
Adam
(
learning_rate
=
self
.
lr
,
parameters
=
x
)
optimizer
.
Adam
(
learning_rate
=
self
.
lr
,
parameters
=
x
)
def
test_optimizer_with_varbase_input
(
self
):
def
test_create_param_lr_with_1_for_coverage
(
self
):
with
_test_eager_guard
():
self
.
func_test_optimizer_with_varbase_input
()
self
.
func_test_optimizer_with_varbase_input
()
def
func_test_create_param_lr_with_1_for_coverage
(
self
):
if
_in_legacy_dygraph
():
if
_in_legacy_dygraph
():
x
=
paddle
.
fluid
.
framework
.
ParamBase
(
x
=
paddle
.
fluid
.
framework
.
ParamBase
(
dtype
=
"float32"
,
dtype
=
"float32"
,
...
@@ -151,12 +116,7 @@ class TestOptimizerForVarBase(unittest.TestCase):
...
@@ -151,12 +116,7 @@ class TestOptimizerForVarBase(unittest.TestCase):
z
.
backward
()
z
.
backward
()
opt
.
step
()
opt
.
step
()
def
test_create_param_lr_with_1_for_coverage
(
self
):
def
test_create_param_lr_with_no_1_value_for_coverage
(
self
):
with
_test_eager_guard
():
self
.
func_test_create_param_lr_with_1_for_coverage
()
self
.
func_test_create_param_lr_with_1_for_coverage
()
def
func_test_create_param_lr_with_no_1_value_for_coverage
(
self
):
if
_in_legacy_dygraph
():
if
_in_legacy_dygraph
():
x
=
paddle
.
fluid
.
framework
.
ParamBase
(
x
=
paddle
.
fluid
.
framework
.
ParamBase
(
dtype
=
"float32"
,
dtype
=
"float32"
,
...
@@ -184,11 +144,6 @@ class TestOptimizerForVarBase(unittest.TestCase):
...
@@ -184,11 +144,6 @@ class TestOptimizerForVarBase(unittest.TestCase):
z
.
backward
()
z
.
backward
()
opt
.
step
()
opt
.
step
()
def
test_create_param_lr_with_no_1_value_for_coverage
(
self
):
with
_test_eager_guard
():
self
.
func_test_create_param_lr_with_1_for_coverage
()
self
.
func_test_create_param_lr_with_1_for_coverage
()
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
unittest
.
main
()
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_outer.py
浏览文件 @
29d9dbe3
...
@@ -17,7 +17,6 @@ import unittest
...
@@ -17,7 +17,6 @@ import unittest
import
numpy
as
np
import
numpy
as
np
import
paddle
import
paddle
from
paddle.fluid.framework
import
_test_eager_guard
from
paddle.static
import
Program
,
program_guard
from
paddle.static
import
Program
,
program_guard
...
@@ -54,7 +53,7 @@ class TestMultiplyApi(unittest.TestCase):
...
@@ -54,7 +53,7 @@ class TestMultiplyApi(unittest.TestCase):
res
=
paddle
.
outer
(
x
,
y
)
res
=
paddle
.
outer
(
x
,
y
)
return
res
.
numpy
()
return
res
.
numpy
()
def
func_
test_multiply
(
self
):
def
test_multiply
(
self
):
np
.
random
.
seed
(
7
)
np
.
random
.
seed
(
7
)
# test static computation graph: 3-d array
# test static computation graph: 3-d array
...
@@ -113,14 +112,9 @@ class TestMultiplyApi(unittest.TestCase):
...
@@ -113,14 +112,9 @@ class TestMultiplyApi(unittest.TestCase):
res
=
self
.
_run_dynamic_graph_case
(
x_data
,
y_data
)
res
=
self
.
_run_dynamic_graph_case
(
x_data
,
y_data
)
np
.
testing
.
assert_allclose
(
res
,
np
.
outer
(
x_data
,
y_data
),
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
res
,
np
.
outer
(
x_data
,
y_data
),
rtol
=
1e-05
)
def
test_multiply
(
self
):
with
_test_eager_guard
():
self
.
func_test_multiply
()
self
.
func_test_multiply
()
class
TestMultiplyError
(
unittest
.
TestCase
):
class
TestMultiplyError
(
unittest
.
TestCase
):
def
func_
test_errors
(
self
):
def
test_errors
(
self
):
# test static computation graph: dtype can not be int8
# test static computation graph: dtype can not be int8
paddle
.
enable_static
()
paddle
.
enable_static
()
with
program_guard
(
Program
(),
Program
()):
with
program_guard
(
Program
(),
Program
()):
...
@@ -161,11 +155,6 @@ class TestMultiplyError(unittest.TestCase):
...
@@ -161,11 +155,6 @@ class TestMultiplyError(unittest.TestCase):
y_data
=
np
.
random
.
randn
(
200
).
astype
(
np
.
float32
)
y_data
=
np
.
random
.
randn
(
200
).
astype
(
np
.
float32
)
self
.
assertRaises
(
ValueError
,
paddle
.
outer
,
x_data
,
y_data
)
self
.
assertRaises
(
ValueError
,
paddle
.
outer
,
x_data
,
y_data
)
def
test_errors
(
self
):
with
_test_eager_guard
():
self
.
func_test_errors
()
self
.
func_test_errors
()
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
unittest
.
main
()
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_paddle_imperative_double_grad.py
浏览文件 @
29d9dbe3
...
@@ -19,7 +19,7 @@ import numpy as np
...
@@ -19,7 +19,7 @@ import numpy as np
import
paddle
import
paddle
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
from
paddle.fluid.framework
import
_in_legacy_dygraph
,
_test_eager_guard
from
paddle.fluid.framework
import
_in_legacy_dygraph
from
paddle.fluid.wrapped_decorator
import
wrap_decorator
from
paddle.fluid.wrapped_decorator
import
wrap_decorator
...
@@ -68,7 +68,7 @@ class TestDygraphDoubleGrad(TestCase):
...
@@ -68,7 +68,7 @@ class TestDygraphDoubleGrad(TestCase):
)
)
@
dygraph_guard
@
dygraph_guard
def
func
_exception
(
self
):
def
test
_exception
(
self
):
with
self
.
assertRaises
(
AssertionError
):
with
self
.
assertRaises
(
AssertionError
):
self
.
grad
(
None
,
None
)
self
.
grad
(
None
,
None
)
...
@@ -101,13 +101,8 @@ class TestDygraphDoubleGrad(TestCase):
...
@@ -101,13 +101,8 @@ class TestDygraphDoubleGrad(TestCase):
with
self
.
assertRaises
(
AssertionError
):
with
self
.
assertRaises
(
AssertionError
):
self
.
grad
([
random_var
(
shape
)],
[
random_var
(
shape
)],
no_grad_vars
=
1
)
self
.
grad
([
random_var
(
shape
)],
[
random_var
(
shape
)],
no_grad_vars
=
1
)
def
test_exception
(
self
):
with
_test_eager_guard
():
self
.
func_exception
()
self
.
func_exception
()
@
dygraph_guard
@
dygraph_guard
def
func
_simple_example
(
self
):
def
test
_simple_example
(
self
):
x
=
random_var
(
self
.
shape
)
x
=
random_var
(
self
.
shape
)
x
.
stop_gradient
=
False
x
.
stop_gradient
=
False
y
=
x
+
1
y
=
x
+
1
...
@@ -141,13 +136,8 @@ class TestDygraphDoubleGrad(TestCase):
...
@@ -141,13 +136,8 @@ class TestDygraphDoubleGrad(TestCase):
grad_with_none_and_not_none
.
stop_gradient
,
create_graph
grad_with_none_and_not_none
.
stop_gradient
,
create_graph
)
)
def
test_simple_example
(
self
):
with
_test_eager_guard
():
self
.
func_simple_example
()
self
.
func_simple_example
()
@
dygraph_guard
@
dygraph_guard
def
func
_none_one_initial_gradient
(
self
):
def
test
_none_one_initial_gradient
(
self
):
numel
=
1
numel
=
1
for
s
in
self
.
shape
:
for
s
in
self
.
shape
:
numel
*=
s
numel
*=
s
...
@@ -223,11 +213,6 @@ class TestDygraphDoubleGrad(TestCase):
...
@@ -223,11 +213,6 @@ class TestDygraphDoubleGrad(TestCase):
grad_z
.
numpy
(),
original_random_grad_z
grad_z
.
numpy
(),
original_random_grad_z
)
)
def
test_none_one_initial_gradient
(
self
):
with
_test_eager_guard
():
self
.
func_none_one_initial_gradient
()
self
.
func_none_one_initial_gradient
()
@
dygraph_guard
@
dygraph_guard
def
func_example_with_gradient_accumulation_and_create_graph
(
self
):
def
func_example_with_gradient_accumulation_and_create_graph
(
self
):
x
=
random_var
(
self
.
shape
)
x
=
random_var
(
self
.
shape
)
...
@@ -269,13 +254,8 @@ class TestDygraphDoubleGrad(TestCase):
...
@@ -269,13 +254,8 @@ class TestDygraphDoubleGrad(TestCase):
x_grad_actual
,
x_grad_expected
,
rtol
=
1e-05
x_grad_actual
,
x_grad_expected
,
rtol
=
1e-05
)
)
def
test_example_with_gradient_accumulation_and_create_graph
(
self
):
with
_test_eager_guard
():
self
.
func_example_with_gradient_accumulation_and_create_graph
()
self
.
func_example_with_gradient_accumulation_and_create_graph
()
@
dygraph_guard
@
dygraph_guard
def
func
_example_with_gradient_accumulation_and_no_grad_vars
(
self
):
def
test
_example_with_gradient_accumulation_and_no_grad_vars
(
self
):
x
=
random_var
(
self
.
shape
)
x
=
random_var
(
self
.
shape
)
x_np
=
x
.
numpy
()
x_np
=
x
.
numpy
()
numel
=
x_np
.
size
numel
=
x_np
.
size
...
@@ -321,13 +301,8 @@ class TestDygraphDoubleGrad(TestCase):
...
@@ -321,13 +301,8 @@ class TestDygraphDoubleGrad(TestCase):
x_grad_actual
,
x_grad_expected
,
rtol
=
1e-05
x_grad_actual
,
x_grad_expected
,
rtol
=
1e-05
)
)
def
test_example_with_gradient_accumulation_and_no_grad_vars
(
self
):
with
_test_eager_guard
():
self
.
func_example_with_gradient_accumulation_and_no_grad_vars
()
self
.
func_example_with_gradient_accumulation_and_no_grad_vars
()
@
dygraph_guard
@
dygraph_guard
def
func
_example_with_gradient_accumulation_and_not_create_graph
(
self
):
def
test
_example_with_gradient_accumulation_and_not_create_graph
(
self
):
x
=
random_var
(
self
.
shape
)
x
=
random_var
(
self
.
shape
)
x_np
=
x
.
numpy
()
x_np
=
x
.
numpy
()
numel
=
x_np
.
size
numel
=
x_np
.
size
...
@@ -363,11 +338,6 @@ class TestDygraphDoubleGrad(TestCase):
...
@@ -363,11 +338,6 @@ class TestDygraphDoubleGrad(TestCase):
x_grad_actual
,
x_grad_expected
,
rtol
=
1e-05
x_grad_actual
,
x_grad_expected
,
rtol
=
1e-05
)
)
def
test_example_with_gradient_accumulation_and_not_create_graph
(
self
):
with
_test_eager_guard
():
self
.
func_example_with_gradient_accumulation_and_not_create_graph
()
self
.
func_example_with_gradient_accumulation_and_not_create_graph
()
class
TestDygraphDoubleGradSortGradient
(
TestDygraphDoubleGrad
):
class
TestDygraphDoubleGradSortGradient
(
TestDygraphDoubleGrad
):
def
setUp
(
self
):
def
setUp
(
self
):
...
...
python/paddle/fluid/tests/unittests/test_parameter.py
浏览文件 @
29d9dbe3
...
@@ -22,12 +22,7 @@ import paddle.fluid.core as core
...
@@ -22,12 +22,7 @@ import paddle.fluid.core as core
import
paddle.fluid.io
as
io
import
paddle.fluid.io
as
io
from
paddle.fluid.dygraph
import
guard
from
paddle.fluid.dygraph
import
guard
from
paddle.fluid.executor
import
Executor
from
paddle.fluid.executor
import
Executor
from
paddle.fluid.framework
import
(
from
paddle.fluid.framework
import
ParamBase
,
Variable
,
default_main_program
ParamBase
,
Variable
,
_test_eager_guard
,
default_main_program
,
)
from
paddle.fluid.initializer
import
ConstantInitializer
from
paddle.fluid.initializer
import
ConstantInitializer
paddle
.
enable_static
()
paddle
.
enable_static
()
...
@@ -59,7 +54,7 @@ class ParameterChecks(unittest.TestCase):
...
@@ -59,7 +54,7 @@ class ParameterChecks(unittest.TestCase):
zero_dim_param
=
b
.
create_parameter
(
name
=
'x'
,
shape
=
[],
dtype
=
'float32'
)
zero_dim_param
=
b
.
create_parameter
(
name
=
'x'
,
shape
=
[],
dtype
=
'float32'
)
self
.
assertEqual
(
zero_dim_param
.
shape
,
())
self
.
assertEqual
(
zero_dim_param
.
shape
,
())
def
func
_parambase
(
self
):
def
test
_parambase
(
self
):
with
guard
():
with
guard
():
linear
=
paddle
.
nn
.
Linear
(
10
,
10
)
linear
=
paddle
.
nn
.
Linear
(
10
,
10
)
param
=
linear
.
weight
param
=
linear
.
weight
...
@@ -85,11 +80,6 @@ class ParameterChecks(unittest.TestCase):
...
@@ -85,11 +80,6 @@ class ParameterChecks(unittest.TestCase):
zero_dim_param
=
ParamBase
(
shape
=
[],
dtype
=
'float32'
)
zero_dim_param
=
ParamBase
(
shape
=
[],
dtype
=
'float32'
)
self
.
assertEqual
(
zero_dim_param
.
shape
,
[])
self
.
assertEqual
(
zero_dim_param
.
shape
,
[])
def
test_parambase
(
self
):
with
_test_eager_guard
():
self
.
func_parambase
()
self
.
func_parambase
()
def
func_exception
(
self
):
def
func_exception
(
self
):
b
=
main_program
.
global_block
()
b
=
main_program
.
global_block
()
with
self
.
assertRaises
(
ValueError
):
with
self
.
assertRaises
(
ValueError
):
...
@@ -109,7 +99,7 @@ class ParameterChecks(unittest.TestCase):
...
@@ -109,7 +99,7 @@ class ParameterChecks(unittest.TestCase):
name
=
'test'
,
shape
=
[
-
1
],
dtype
=
'float32'
,
initializer
=
None
name
=
'test'
,
shape
=
[
-
1
],
dtype
=
'float32'
,
initializer
=
None
)
)
def
func
_parambase_to_vector
(
self
):
def
test
_parambase_to_vector
(
self
):
with
guard
():
with
guard
():
initializer
=
paddle
.
ParamAttr
(
initializer
=
paddle
.
ParamAttr
(
initializer
=
paddle
.
nn
.
initializer
.
Constant
(
3.0
)
initializer
=
paddle
.
nn
.
initializer
.
Constant
(
3.0
)
...
@@ -135,11 +125,6 @@ class ParameterChecks(unittest.TestCase):
...
@@ -135,11 +125,6 @@ class ParameterChecks(unittest.TestCase):
self
.
assertTrue
(
linear2
.
weight
.
is_leaf
,
True
)
self
.
assertTrue
(
linear2
.
weight
.
is_leaf
,
True
)
self
.
assertTrue
(
linear2
.
bias
.
is_leaf
,
True
)
self
.
assertTrue
(
linear2
.
bias
.
is_leaf
,
True
)
def
test_parambase_to_vector
(
self
):
with
_test_eager_guard
():
self
.
func_parambase_to_vector
()
self
.
func_parambase_to_vector
()
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
unittest
.
main
()
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_poisson_op.py
浏览文件 @
29d9dbe3
...
@@ -19,7 +19,6 @@ import numpy as np
...
@@ -19,7 +19,6 @@ import numpy as np
from
op_test
import
OpTest
from
op_test
import
OpTest
import
paddle
import
paddle
from
paddle.fluid.framework
import
_test_eager_guard
paddle
.
enable_static
()
paddle
.
enable_static
()
paddle
.
seed
(
100
)
paddle
.
seed
(
100
)
...
@@ -103,13 +102,12 @@ class TestPoissonAPI(unittest.TestCase):
...
@@ -103,13 +102,12 @@ class TestPoissonAPI(unittest.TestCase):
y
=
paddle
.
poisson
(
x
)
y
=
paddle
.
poisson
(
x
)
self
.
assertTrue
(
np
.
min
(
y
.
numpy
())
>=
0
)
self
.
assertTrue
(
np
.
min
(
y
.
numpy
())
>=
0
)
with
_test_eager_guard
():
x
=
paddle
.
randn
([
10
,
10
],
dtype
=
'float32'
)
x
=
paddle
.
randn
([
10
,
10
],
dtype
=
'float32'
)
x
.
stop_gradient
=
False
x
.
stop_gradient
=
False
y
=
paddle
.
poisson
(
x
)
y
=
paddle
.
poisson
(
x
)
y
.
backward
()
y
.
backward
()
self
.
assertTrue
(
np
.
min
(
y
.
numpy
())
>=
0
)
self
.
assertTrue
(
np
.
min
(
y
.
numpy
())
>=
0
)
np
.
testing
.
assert_array_equal
(
np
.
zeros_like
(
x
),
x
.
gradient
())
np
.
testing
.
assert_array_equal
(
np
.
zeros_like
(
x
),
x
.
gradient
())
def
test_fixed_random_number
(
self
):
def
test_fixed_random_number
(
self
):
# Test GPU Fixed random number, which is generated by 'curandStatePhilox4_32_10_t'
# Test GPU Fixed random number, which is generated by 'curandStatePhilox4_32_10_t'
...
...
python/paddle/fluid/tests/unittests/test_pool1d_api.py
浏览文件 @
29d9dbe3
...
@@ -20,7 +20,6 @@ import paddle
...
@@ -20,7 +20,6 @@ import paddle
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
import
paddle.fluid.core
as
core
import
paddle.fluid.core
as
core
import
paddle.nn.functional
as
F
import
paddle.nn.functional
as
F
from
paddle.fluid.framework
import
_test_eager_guard
def
adaptive_start_index
(
index
,
input_size
,
output_size
):
def
adaptive_start_index
(
index
,
input_size
,
output_size
):
...
@@ -274,10 +273,6 @@ class TestPool1D_API(unittest.TestCase):
...
@@ -274,10 +273,6 @@ class TestPool1D_API(unittest.TestCase):
self
.
check_avg_dygraph_padding_same
(
place
)
self
.
check_avg_dygraph_padding_same
(
place
)
self
.
check_max_dygraph_return_index_results
(
place
)
self
.
check_max_dygraph_return_index_results
(
place
)
def
test_dygraph_api
(
self
):
with
_test_eager_guard
():
self
.
test_pool1d
()
class
TestPool2DError_API
(
unittest
.
TestCase
):
class
TestPool2DError_API
(
unittest
.
TestCase
):
def
test_error_api
(
self
):
def
test_error_api
(
self
):
...
@@ -422,10 +417,6 @@ class TestPool2DError_API(unittest.TestCase):
...
@@ -422,10 +417,6 @@ class TestPool2DError_API(unittest.TestCase):
self
.
assertRaises
(
ValueError
,
run_stride_out_of_range
)
self
.
assertRaises
(
ValueError
,
run_stride_out_of_range
)
def
test_dygraph_api
(
self
):
with
_test_eager_guard
():
self
.
test_error_api
()
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
unittest
.
main
()
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录