未验证 提交 5c9feef1 编写于 作者: 姜永久 提交者: GitHub

rm unittest eager guard part1 (#48797)

* rm unittest eager guard part1

* review

* reset spwan runner base

* reset op_test

* rm test_type_core
上级 7fc25f22
......@@ -21,7 +21,6 @@ os.environ[str("GLOG_vmodule")] = str("nan_inf_utils_detail=10")
import paddle
import paddle.nn as nn
from paddle.fluid.framework import _test_eager_guard
np.random.seed(0)
......@@ -114,6 +113,4 @@ def run_check():
if __name__ == '__main__':
with _test_eager_guard():
run_check()
run_check()
......@@ -18,7 +18,6 @@ import unittest
import numpy as np
import paddle
from paddle.fluid.framework import _test_eager_guard
class TestDygraphFleetAPI(unittest.TestCase):
......@@ -50,6 +49,4 @@ class TestDygraphFleetAPI(unittest.TestCase):
if __name__ == "__main__":
with _test_eager_guard():
pass
unittest.main()
......@@ -22,7 +22,6 @@ import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.backward import _append_grad_suffix_, _as_list
from paddle.fluid.framework import _test_eager_guard
def _product(t):
......@@ -769,10 +768,7 @@ def double_grad_check_for_dygraph(
x_init = _as_list(x_init)
paddle.disable_static()
with _test_eager_guard():
eager_double_grad = get_eager_double_grad(
func, x_init, y_grads_init, place
)
eager_double_grad = get_eager_double_grad(func, x_init, y_grads_init, place)
paddle.enable_static()
static_double_grad = get_static_double_grad(
......@@ -935,10 +931,7 @@ def triple_grad_check_for_dygraph(
x_init = _as_list(x_init)
paddle.disable_static()
with _test_eager_guard():
eager_triple_grad = get_eager_triple_grad(
func, x_init, y_grads_init, place
)
eager_triple_grad = get_eager_triple_grad(func, x_init, y_grads_init, place)
paddle.enable_static()
static_triple_grad = get_static_triple_grad(
......
......@@ -19,7 +19,6 @@ import numpy as np
import paddle
import paddle.distributed as dist
import paddle.fluid as fluid
from paddle.fluid.framework import _test_eager_guard
from paddle.nn import Linear
paddle.seed(1024)
......@@ -69,58 +68,57 @@ class SimpleNet(fluid.Layer):
class TestDistTraning(unittest.TestCase):
def test_multiple_gpus(self):
self.trainer_id = dist.get_rank()
with _test_eager_guard():
self.pg = dist.init_parallel_env()
self.pg = dist.init_parallel_env()
model_a = SimpleNet(self.trainer_id)
model_b = SimpleNet(self.trainer_id)
model_a = SimpleNet(self.trainer_id)
model_b = SimpleNet(self.trainer_id)
state_dict = model_a.state_dict()
model_b.set_state_dict(state_dict)
state_dict = model_a.state_dict()
model_b.set_state_dict(state_dict)
model_a = paddle.DataParallel(
model_a, find_unused_parameters=True, group=self.pg
model_a = paddle.DataParallel(
model_a, find_unused_parameters=True, group=self.pg
)
model_b = paddle.DataParallel(
model_b, find_unused_parameters=True, group=self.pg
)
ones_input = paddle.ones(shape=(batch, in_dim))
ones_input.stop_gradient = True
w1_grad_sum = np.zeros((in_dim, out_dim), dtype='float32')
w2_grad_sum = np.zeros((in_dim, out_dim), dtype='float32')
for step_id in range(5):
random_input = paddle.rand(shape=(batch, in_dim))
random_input.stop_gradient = True
if step_id % 2 == 0:
out_a = model_a(random_input)
out_b = model_b(random_input)
else:
out_a = model_a(ones_input)
out_b = model_b(ones_input)
out_a.sum().backward()
out_b.sum().backward()
self.check_gradient(model_a.parameters())
self.check_gradient(model_b.parameters())
# test acc gradient
w1_grad_sum = self.check_acc(
model_a._layers.w1.grad,
w1_grad_sum,
model_b._layers.w1.grad,
)
model_b = paddle.DataParallel(
model_b, find_unused_parameters=True, group=self.pg
w2_grad_sum = self.check_acc(
model_a._layers.w2.grad,
w2_grad_sum,
model_b._layers.w2.grad,
)
ones_input = paddle.ones(shape=(batch, in_dim))
ones_input.stop_gradient = True
w1_grad_sum = np.zeros((in_dim, out_dim), dtype='float32')
w2_grad_sum = np.zeros((in_dim, out_dim), dtype='float32')
for step_id in range(5):
random_input = paddle.rand(shape=(batch, in_dim))
random_input.stop_gradient = True
if step_id % 2 == 0:
out_a = model_a(random_input)
out_b = model_b(random_input)
else:
out_a = model_a(ones_input)
out_b = model_b(ones_input)
out_a.sum().backward()
out_b.sum().backward()
self.check_gradient(model_a.parameters())
self.check_gradient(model_b.parameters())
# test acc gradient
w1_grad_sum = self.check_acc(
model_a._layers.w1.grad,
w1_grad_sum,
model_b._layers.w1.grad,
)
w2_grad_sum = self.check_acc(
model_a._layers.w2.grad,
w2_grad_sum,
model_b._layers.w2.grad,
)
model_a.clear_gradients()
model_a.clear_gradients()
def check_acc(self, grad, grad_sum, acc_grad):
if grad is not None:
......
......@@ -17,12 +17,10 @@ import unittest
import numpy as np
import paddle
import paddle.fluid.core as core
from paddle.fluid.framework import _test_eager_guard
class TensorTypeTest(unittest.TestCase):
def func_type_totensor(self):
def test_type_totensor(self):
paddle.disable_static()
inx = np.array([1, 2])
tensorx = paddle.to_tensor(inx)
......@@ -30,12 +28,7 @@ class TensorTypeTest(unittest.TestCase):
expectx = "<class 'paddle.Tensor'>"
self.assertEqual((typex_str == expectx), True)
def test_type_totensor(self):
with _test_eager_guard():
self.func_type_totensor()
self.func_type_totensor()
def func_type_Tensor(self):
def test_type_Tensor(self):
paddle.disable_static()
inx = np.array([1, 2])
tensorx = paddle.Tensor(inx)
......@@ -49,29 +42,6 @@ class TensorTypeTest(unittest.TestCase):
expectx = "<class 'paddle.Tensor'>"
self.assertEqual((typex_str == expectx), True)
def test_type_Tensor(self):
with _test_eager_guard():
self.func_type_Tensor()
self.func_type_Tensor()
def func_type_core(self):
paddle.disable_static()
inx = np.array([1, 2])
tensorx = core.VarBase(inx)
typex_str = str(type(tensorx))
expectx = "<class 'paddle.Tensor'>"
self.assertEqual((typex_str == expectx), True)
tensorx = paddle.framework.VarBase(inx)
typex_str = str(type(tensorx))
expectx = "<class 'paddle.Tensor'>"
self.assertEqual((typex_str == expectx), True)
def test_type_core(self):
with _test_eager_guard():
pass
self.func_type_core()
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册