未验证 提交 3900d562 编写于 作者: 姜永久 提交者: GitHub

rm unittests eager guard test part15 layers2maxout (#48837)

上级 53ce406a
...@@ -27,12 +27,7 @@ import paddle.fluid.nets as nets ...@@ -27,12 +27,7 @@ import paddle.fluid.nets as nets
import paddle.nn.functional as F import paddle.nn.functional as F
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid.dygraph import base, to_variable from paddle.fluid.dygraph import base, to_variable
from paddle.fluid.framework import ( from paddle.fluid.framework import Program, default_main_program, program_guard
Program,
_test_eager_guard,
default_main_program,
program_guard,
)
from paddle.tensor import random from paddle.tensor import random
...@@ -102,14 +97,6 @@ class TestLayer(LayerTest): ...@@ -102,14 +97,6 @@ class TestLayer(LayerTest):
return ret return ret
with self.dynamic_graph(): with self.dynamic_graph():
with _test_eager_guard():
inp = np.ones([3, 3], dtype='float32')
x = base.to_variable(inp)
custom = CustomLayer(input_size=3, linear1_size=2)
ret = custom(x, do_linear2=False)
np.testing.assert_array_equal(ret.numpy().shape, [3, 2])
ret = custom(x, do_linear2=True)
np.testing.assert_array_equal(ret.numpy().shape, [3, 1])
inp = np.ones([3, 3], dtype='float32') inp = np.ones([3, 3], dtype='float32')
x = base.to_variable(inp) x = base.to_variable(inp)
custom = CustomLayer(input_size=3, linear1_size=2) custom = CustomLayer(input_size=3, linear1_size=2)
...@@ -134,14 +121,6 @@ class TestLayer(LayerTest): ...@@ -134,14 +121,6 @@ class TestLayer(LayerTest):
feed={'data': inp}, fetch_list=[ret, ret2] feed={'data': inp}, fetch_list=[ret, ret2]
) )
with self.dynamic_graph(): with self.dynamic_graph():
with _test_eager_guard():
t = base.to_variable(inp)
dropout = paddle.nn.Dropout(p=0.35)
dy_eager_ret = dropout(t)
dy_eager_ret2 = paddle.nn.functional.dropout(t, p=0.35)
dy_eager_ret_value = dy_eager_ret.numpy()
dy_eager_ret2_value = dy_eager_ret2.numpy()
t = base.to_variable(inp) t = base.to_variable(inp)
dropout = paddle.nn.Dropout(p=0.35) dropout = paddle.nn.Dropout(p=0.35)
dy_ret = dropout(t) dy_ret = dropout(t)
...@@ -149,9 +128,6 @@ class TestLayer(LayerTest): ...@@ -149,9 +128,6 @@ class TestLayer(LayerTest):
dy_ret_value = dy_ret.numpy() dy_ret_value = dy_ret.numpy()
dy_ret2_value = dy_ret2.numpy() dy_ret2_value = dy_ret2.numpy()
np.testing.assert_array_equal(dy_eager_ret_value, dy_eager_ret2_value)
np.testing.assert_array_equal(static_ret, dy_eager_ret_value)
np.testing.assert_array_equal(static_ret, static_ret2) np.testing.assert_array_equal(static_ret, static_ret2)
np.testing.assert_array_equal(dy_ret_value, dy_ret2_value) np.testing.assert_array_equal(dy_ret_value, dy_ret2_value)
np.testing.assert_array_equal(static_ret, dy_ret_value) np.testing.assert_array_equal(static_ret, dy_ret_value)
...@@ -173,16 +149,6 @@ class TestLayer(LayerTest): ...@@ -173,16 +149,6 @@ class TestLayer(LayerTest):
feed={'data': inp}, fetch_list=[ret] feed={'data': inp}, fetch_list=[ret]
)[0] )[0]
with self.dynamic_graph(): with self.dynamic_graph():
with _test_eager_guard():
t = base.to_variable(inp)
linear = paddle.nn.Linear(
32,
4,
bias_attr=fluid.initializer.ConstantInitializer(value=1),
)
dy_eager_ret = linear(t)
dy_eager_ret_value = dy_eager_ret.numpy()
t = base.to_variable(inp) t = base.to_variable(inp)
linear = paddle.nn.Linear( linear = paddle.nn.Linear(
32, 4, bias_attr=fluid.initializer.ConstantInitializer(value=1) 32, 4, bias_attr=fluid.initializer.ConstantInitializer(value=1)
...@@ -190,7 +156,6 @@ class TestLayer(LayerTest): ...@@ -190,7 +156,6 @@ class TestLayer(LayerTest):
dy_ret = linear(t) dy_ret = linear(t)
dy_ret_value = dy_ret.numpy() dy_ret_value = dy_ret.numpy()
np.testing.assert_array_equal(static_ret, dy_eager_ret_value)
np.testing.assert_array_equal(static_ret, dy_ret_value) np.testing.assert_array_equal(static_ret, dy_ret_value)
with self.static_graph(): with self.static_graph():
...@@ -275,18 +240,11 @@ class TestLayer(LayerTest): ...@@ -275,18 +240,11 @@ class TestLayer(LayerTest):
feed={'data': inp}, fetch_list=[ret] feed={'data': inp}, fetch_list=[ret]
)[0] )[0]
with self.dynamic_graph(): with self.dynamic_graph():
with _test_eager_guard():
t = base.to_variable(inp)
flatten = paddle.nn.Flatten()
dy_eager_ret = flatten(t)
dy_eager_ret_value = dy_eager_ret.numpy()
t = base.to_variable(inp) t = base.to_variable(inp)
flatten = paddle.nn.Flatten() flatten = paddle.nn.Flatten()
dy_ret = flatten(t) dy_ret = flatten(t)
dy_ret_value = dy_ret.numpy() dy_ret_value = dy_ret.numpy()
np.testing.assert_array_equal(static_ret, dy_eager_ret_value)
np.testing.assert_array_equal(static_ret, dy_ret_value) np.testing.assert_array_equal(static_ret, dy_ret_value)
with self.static_graph(): with self.static_graph():
...@@ -328,18 +286,11 @@ class TestLayer(LayerTest): ...@@ -328,18 +286,11 @@ class TestLayer(LayerTest):
)[0] )[0]
with self.dynamic_graph(): with self.dynamic_graph():
with _test_eager_guard():
t = np.ones([3, 3, 5, 5], dtype='float32')
my_syncbn = paddle.nn.SyncBatchNorm(3)
dy_eager_ret = my_syncbn(base.to_variable(t))
dy_eager_ret_value = dy_eager_ret.numpy()
t = np.ones([3, 3, 5, 5], dtype='float32') t = np.ones([3, 3, 5, 5], dtype='float32')
my_syncbn = paddle.nn.SyncBatchNorm(3) my_syncbn = paddle.nn.SyncBatchNorm(3)
dy_ret = my_syncbn(base.to_variable(t)) dy_ret = my_syncbn(base.to_variable(t))
dy_ret_value = dy_ret.numpy() dy_ret_value = dy_ret.numpy()
np.testing.assert_array_equal(static_ret, dy_ret_value) np.testing.assert_array_equal(static_ret, dy_ret_value)
np.testing.assert_array_equal(static_ret, dy_eager_ret_value)
def test_relu(self): def test_relu(self):
with self.static_graph(): with self.static_graph():
...@@ -350,17 +301,11 @@ class TestLayer(LayerTest): ...@@ -350,17 +301,11 @@ class TestLayer(LayerTest):
)[0] )[0]
with self.dynamic_graph(): with self.dynamic_graph():
with _test_eager_guard():
t = np.ones([3, 3], dtype='float32')
dy_eager_ret = F.relu(base.to_variable(t))
dy_eager_ret_value = dy_eager_ret.numpy()
t = np.ones([3, 3], dtype='float32') t = np.ones([3, 3], dtype='float32')
dy_ret = F.relu(base.to_variable(t)) dy_ret = F.relu(base.to_variable(t))
dy_ret_value = dy_ret.numpy() dy_ret_value = dy_ret.numpy()
np.testing.assert_allclose(static_ret, dy_ret_value, rtol=1e-05) np.testing.assert_allclose(static_ret, dy_ret_value, rtol=1e-05)
np.testing.assert_allclose(static_ret, dy_eager_ret_value, rtol=1e-05)
def test_matmul(self): def test_matmul(self):
with self.static_graph(): with self.static_graph():
...@@ -376,21 +321,12 @@ class TestLayer(LayerTest): ...@@ -376,21 +321,12 @@ class TestLayer(LayerTest):
)[0] )[0]
with self.dynamic_graph(): with self.dynamic_graph():
with _test_eager_guard():
t = np.ones([3, 3], dtype='float32')
t2 = np.ones([3, 3], dtype='float32')
dy_eager_ret = paddle.matmul(
base.to_variable(t), base.to_variable(t2)
)
dy_eager_ret_value = dy_eager_ret.numpy()
t = np.ones([3, 3], dtype='float32') t = np.ones([3, 3], dtype='float32')
t2 = np.ones([3, 3], dtype='float32') t2 = np.ones([3, 3], dtype='float32')
dy_ret = paddle.matmul(base.to_variable(t), base.to_variable(t2)) dy_ret = paddle.matmul(base.to_variable(t), base.to_variable(t2))
dy_ret_value = dy_ret.numpy() dy_ret_value = dy_ret.numpy()
np.testing.assert_allclose(static_ret, dy_ret_value, rtol=1e-05) np.testing.assert_allclose(static_ret, dy_ret_value, rtol=1e-05)
np.testing.assert_allclose(static_ret, dy_eager_ret_value, rtol=1e-05)
def test_elementwise_math(self): def test_elementwise_math(self):
n = np.ones([3, 3], dtype='float32') n = np.ones([3, 3], dtype='float32')
...@@ -420,14 +356,6 @@ class TestLayer(LayerTest): ...@@ -420,14 +356,6 @@ class TestLayer(LayerTest):
)[0] )[0]
with self.dynamic_graph(): with self.dynamic_graph():
with _test_eager_guard():
ret = paddle.add(to_variable(n), to_variable(n2))
ret = paddle.pow(ret, to_variable(n3))
ret = paddle.divide(ret, to_variable(n4))
ret = paddle.subtract(ret, to_variable(n5))
dy_eager_ret = paddle.multiply(ret, to_variable(n6))
dy_eager_ret_value = dy_eager_ret.numpy()
ret = paddle.add(to_variable(n), to_variable(n2)) ret = paddle.add(to_variable(n), to_variable(n2))
ret = paddle.pow(ret, to_variable(n3)) ret = paddle.pow(ret, to_variable(n3))
ret = paddle.divide(ret, to_variable(n4)) ret = paddle.divide(ret, to_variable(n4))
...@@ -436,19 +364,12 @@ class TestLayer(LayerTest): ...@@ -436,19 +364,12 @@ class TestLayer(LayerTest):
dy_ret_value = dy_ret.numpy() dy_ret_value = dy_ret.numpy()
np.testing.assert_allclose(static_ret, dy_ret_value, rtol=1e-05) np.testing.assert_allclose(static_ret, dy_ret_value, rtol=1e-05)
np.testing.assert_allclose(static_ret, dy_eager_ret_value, rtol=1e-05)
def test_elementwise_minmax(self): def test_elementwise_minmax(self):
n = np.ones([3, 3], dtype='float32') n = np.ones([3, 3], dtype='float32')
n2 = np.ones([3, 3], dtype='float32') * 2 n2 = np.ones([3, 3], dtype='float32') * 2
with self.dynamic_graph(): with self.dynamic_graph():
with _test_eager_guard():
min_eager_ret = paddle.minimum(to_variable(n), to_variable(n2))
max_eager_ret = paddle.maximum(to_variable(n), to_variable(n2))
min_eager_ret_value = min_eager_ret.numpy()
max_eager_ret_value = max_eager_ret.numpy()
min_ret = paddle.minimum(to_variable(n), to_variable(n2)) min_ret = paddle.minimum(to_variable(n), to_variable(n2))
max_ret = paddle.maximum(to_variable(n), to_variable(n2)) max_ret = paddle.maximum(to_variable(n), to_variable(n2))
min_ret_value = min_ret.numpy() min_ret_value = min_ret.numpy()
...@@ -456,8 +377,6 @@ class TestLayer(LayerTest): ...@@ -456,8 +377,6 @@ class TestLayer(LayerTest):
np.testing.assert_allclose(n, min_ret_value, rtol=1e-05) np.testing.assert_allclose(n, min_ret_value, rtol=1e-05)
np.testing.assert_allclose(n2, max_ret_value, rtol=1e-05) np.testing.assert_allclose(n2, max_ret_value, rtol=1e-05)
np.testing.assert_allclose(n, min_eager_ret_value, rtol=1e-05)
np.testing.assert_allclose(n2, max_eager_ret_value, rtol=1e-05)
def test_conv2d_transpose(self): def test_conv2d_transpose(self):
inp_np = np.arange(0, 24).reshape([2, 3, 2, 2]).astype('float32') inp_np = np.arange(0, 24).reshape([2, 3, 2, 2]).astype('float32')
...@@ -487,17 +406,6 @@ class TestLayer(LayerTest): ...@@ -487,17 +406,6 @@ class TestLayer(LayerTest):
feed={'pixel': inp_np}, fetch_list=[out] feed={'pixel': inp_np}, fetch_list=[out]
)[0] )[0]
with self.dynamic_graph(): with self.dynamic_graph():
with _test_eager_guard():
conv2d_transpose = paddle.nn.Conv2DTranspose(
3,
10,
27,
bias_attr=fluid.initializer.ConstantInitializer(value=1),
)
dy_eager_rlt = conv2d_transpose(base.to_variable(inp_np))
dy_eager_rlt = paddle.nn.functional.sigmoid(dy_eager_rlt)
dy_eager_rlt_value = dy_eager_rlt.numpy()
conv2d_transpose = paddle.nn.Conv2DTranspose( conv2d_transpose = paddle.nn.Conv2DTranspose(
3, 3,
10, 10,
...@@ -509,53 +417,8 @@ class TestLayer(LayerTest): ...@@ -509,53 +417,8 @@ class TestLayer(LayerTest):
dy_rlt_value = dy_rlt.numpy() dy_rlt_value = dy_rlt.numpy()
np.testing.assert_allclose(static_rlt2, static_rlt, rtol=1e-05) np.testing.assert_allclose(static_rlt2, static_rlt, rtol=1e-05)
np.testing.assert_allclose(dy_rlt_value, static_rlt2, rtol=1e-05) np.testing.assert_allclose(dy_rlt_value, static_rlt2, rtol=1e-05)
np.testing.assert_allclose(dy_eager_rlt_value, static_rlt2, rtol=1e-05)
with self.dynamic_graph(): with self.dynamic_graph():
with _test_eager_guard():
images = np.ones([2, 3, 5, 5], dtype='float32')
custom_weight = np.random.randn(3, 3, 2, 2).astype("float32")
weight_attr = fluid.ParamAttr(
initializer=fluid.initializer.NumpyArrayInitializer(
custom_weight
)
)
conv2d1 = paddle.nn.Conv2DTranspose(3, 3, [2, 2])
conv2d2 = paddle.nn.Conv2DTranspose(
3,
3,
[2, 2],
weight_attr=weight_attr,
)
dy_ret1 = conv2d1(base.to_variable(images))
dy_ret2 = conv2d2(base.to_variable(images))
self.assertFalse(
np.array_equal(dy_ret1.numpy(), dy_ret2.numpy())
)
conv2d1_weight_np = conv2d1.weight.numpy()
conv2d1_bias = conv2d1.bias
self.assertFalse(
np.array_equal(conv2d1_weight_np, conv2d2.weight.numpy())
)
conv2d2.weight.set_value(conv2d1_weight_np)
np.testing.assert_array_equal(
conv2d1_weight_np, conv2d2.weight.numpy()
)
conv2d2.bias.set_value(conv2d1_bias)
dy_ret1 = conv2d1(base.to_variable(images))
dy_ret2 = conv2d2(base.to_variable(images))
np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy())
conv2d2.weight = conv2d1.weight
conv2d2.bias = conv2d1.bias
np.testing.assert_array_equal(
conv2d1.weight.numpy(), conv2d2.weight.numpy()
)
np.testing.assert_array_equal(
conv2d1.bias.numpy(), conv2d2.bias.numpy()
)
images = np.ones([2, 3, 5, 5], dtype='float32') images = np.ones([2, 3, 5, 5], dtype='float32')
custom_weight = np.random.randn(3, 3, 2, 2).astype("float32") custom_weight = np.random.randn(3, 3, 2, 2).astype("float32")
weight_attr = fluid.ParamAttr( weight_attr = fluid.ParamAttr(
...@@ -660,19 +523,6 @@ class TestLayer(LayerTest): ...@@ -660,19 +523,6 @@ class TestLayer(LayerTest):
feed={'x': inp_np_x, 'y': inp_np_y}, fetch_list=[out] feed={'x': inp_np_x, 'y': inp_np_y}, fetch_list=[out]
)[0] )[0]
with self.dynamic_graph(): with self.dynamic_graph():
with _test_eager_guard():
btp = paddle.nn.Bilinear(
3,
3,
6,
bias_attr=fluid.initializer.ConstantInitializer(value=1),
)
dy_eager_rlt = btp(
base.to_variable(inp_np_x), base.to_variable(inp_np_y)
)
dy_eager_rlt = paddle.nn.functional.sigmoid(dy_eager_rlt)
dy_eager_rlt_value = dy_eager_rlt.numpy()
btp = paddle.nn.Bilinear( btp = paddle.nn.Bilinear(
3, 3,
3, 3,
...@@ -684,14 +534,6 @@ class TestLayer(LayerTest): ...@@ -684,14 +534,6 @@ class TestLayer(LayerTest):
dy_rlt_value = dy_rlt.numpy() dy_rlt_value = dy_rlt.numpy()
with self.dynamic_graph(): with self.dynamic_graph():
with _test_eager_guard():
btp2 = paddle.nn.Bilinear(3, 3, 6)
dy_eager_rlt2 = btp2(
base.to_variable(inp_np_x), base.to_variable(inp_np_y)
)
dy_eager_rlt2 = paddle.nn.functional.sigmoid(dy_eager_rlt2)
dy_eager_rlt2_value = dy_eager_rlt2.numpy()
btp2 = paddle.nn.Bilinear(3, 3, 6) btp2 = paddle.nn.Bilinear(3, 3, 6)
dy_rlt2 = btp2( dy_rlt2 = btp2(
base.to_variable(inp_np_x), base.to_variable(inp_np_y) base.to_variable(inp_np_x), base.to_variable(inp_np_y)
...@@ -715,51 +557,10 @@ class TestLayer(LayerTest): ...@@ -715,51 +557,10 @@ class TestLayer(LayerTest):
)[0] )[0]
np.testing.assert_array_equal(dy_rlt2_value, static_rlt3) np.testing.assert_array_equal(dy_rlt2_value, static_rlt3)
np.testing.assert_array_equal(dy_eager_rlt2_value, static_rlt3)
np.testing.assert_array_equal(static_rlt2, static_rlt) np.testing.assert_array_equal(static_rlt2, static_rlt)
np.testing.assert_array_equal(dy_rlt_value, static_rlt) np.testing.assert_array_equal(dy_rlt_value, static_rlt)
np.testing.assert_array_equal(dy_eager_rlt_value, static_rlt)
with self.dynamic_graph(): with self.dynamic_graph():
with _test_eager_guard():
custom_weight = np.random.randn(6, 3, 3).astype("float32")
weight_attr = fluid.ParamAttr(
initializer=fluid.initializer.NumpyArrayInitializer(
custom_weight
)
)
btp1 = paddle.nn.Bilinear(3, 3, 6)
btp2 = paddle.nn.Bilinear(3, 3, 6, weight_attr=weight_attr)
dy_rlt1 = btp1(
base.to_variable(inp_np_x), base.to_variable(inp_np_y)
)
dy_rlt1 = paddle.nn.functional.sigmoid(dy_rlt1)
dy_rlt2 = btp2(
base.to_variable(inp_np_x), base.to_variable(inp_np_y)
)
dy_rlt2 = paddle.nn.functional.sigmoid(dy_rlt2)
self.assertFalse(
np.array_equal(dy_rlt1.numpy(), dy_rlt2.numpy())
)
btp2.weight.set_value(btp1.weight.numpy())
btp2.bias.set_value(btp1.bias)
dy_rlt1 = btp1(
base.to_variable(inp_np_x), base.to_variable(inp_np_y)
)
dy_rlt2 = btp2(
base.to_variable(inp_np_x), base.to_variable(inp_np_y)
)
np.testing.assert_array_equal(dy_rlt1.numpy(), dy_rlt2.numpy())
btp2.weight = btp1.weight
btp2.bias = btp1.bias
np.testing.assert_array_equal(
btp1.weight.numpy(), btp2.weight.numpy()
)
np.testing.assert_array_equal(
btp1.bias.numpy(), btp2.bias.numpy()
)
custom_weight = np.random.randn(6, 3, 3).astype("float32") custom_weight = np.random.randn(6, 3, 3).astype("float32")
weight_attr = fluid.ParamAttr( weight_attr = fluid.ParamAttr(
initializer=fluid.initializer.NumpyArrayInitializer( initializer=fluid.initializer.NumpyArrayInitializer(
...@@ -818,15 +619,6 @@ class TestLayer(LayerTest): ...@@ -818,15 +619,6 @@ class TestLayer(LayerTest):
feed={'word': inp_word}, fetch_list=[emb_rlt] feed={'word': inp_word}, fetch_list=[emb_rlt]
)[0] )[0]
with self.dynamic_graph(): with self.dynamic_graph():
with _test_eager_guard():
emb2 = paddle.nn.Embedding(
dict_size,
32,
weight_attr='eager_emb.w',
sparse=False,
)
dy_eager_rlt = emb2(base.to_variable(inp_word))
dy_eager_rlt_value = dy_eager_rlt.numpy()
emb2 = paddle.nn.Embedding( emb2 = paddle.nn.Embedding(
dict_size, 32, weight_attr='emb.w', sparse=False dict_size, 32, weight_attr='emb.w', sparse=False
...@@ -836,41 +628,8 @@ class TestLayer(LayerTest): ...@@ -836,41 +628,8 @@ class TestLayer(LayerTest):
self.assertTrue(np.allclose(static_rlt2, static_rlt)) self.assertTrue(np.allclose(static_rlt2, static_rlt))
self.assertTrue(np.allclose(dy_rlt_value, static_rlt)) self.assertTrue(np.allclose(dy_rlt_value, static_rlt))
self.assertTrue(np.allclose(dy_eager_rlt_value, static_rlt))
with self.dynamic_graph(): with self.dynamic_graph():
with _test_eager_guard():
custom_weight = np.random.randn(dict_size, 32).astype("float32")
weight_attr = fluid.ParamAttr(
initializer=fluid.initializer.NumpyArrayInitializer(
custom_weight
)
)
emb1 = paddle.nn.Embedding(dict_size, 32, sparse=False)
emb2 = paddle.nn.Embedding(
dict_size,
32,
weight_attr=weight_attr,
sparse=False,
)
rep1 = emb1(base.to_variable(inp_word))
rep2 = emb2(base.to_variable(inp_word))
self.assertFalse(
np.array_equal(emb1.weight.numpy(), custom_weight)
)
np.testing.assert_array_equal(
emb2.weight.numpy(), custom_weight
)
self.assertFalse(np.array_equal(rep1.numpy(), rep2.numpy()))
emb2.weight.set_value(emb1.weight.numpy())
rep2 = emb2(base.to_variable(inp_word))
np.testing.assert_array_equal(rep1.numpy(), rep2.numpy())
emb2.weight = emb1.weight
np.testing.assert_array_equal(
emb1.weight.numpy(), emb2.weight.numpy()
)
custom_weight = np.random.randn(dict_size, 32).astype("float32") custom_weight = np.random.randn(dict_size, 32).astype("float32")
weight_attr = fluid.ParamAttr( weight_attr = fluid.ParamAttr(
initializer=fluid.initializer.NumpyArrayInitializer( initializer=fluid.initializer.NumpyArrayInitializer(
...@@ -897,18 +656,6 @@ class TestLayer(LayerTest): ...@@ -897,18 +656,6 @@ class TestLayer(LayerTest):
def test_one_hot(self): def test_one_hot(self):
with self.dynamic_graph(): with self.dynamic_graph():
with _test_eager_guard():
label = fluid.dygraph.to_variable(
np.array([[1], [1], [3], [0]])
)
one_hot_label1 = fluid.layers.one_hot(input=label, depth=4)
one_hot_label2 = fluid.layers.one_hot(
input=label, depth=fluid.dygraph.to_variable(np.array([4]))
)
np.testing.assert_array_equal(
one_hot_label1.numpy(), one_hot_label2.numpy()
)
label = fluid.dygraph.to_variable(np.array([[1], [1], [3], [0]])) label = fluid.dygraph.to_variable(np.array([[1], [1], [3], [0]]))
one_hot_label1 = fluid.layers.one_hot(input=label, depth=4) one_hot_label1 = fluid.layers.one_hot(input=label, depth=4)
one_hot_label2 = fluid.layers.one_hot( one_hot_label2 = fluid.layers.one_hot(
...@@ -920,17 +667,6 @@ class TestLayer(LayerTest): ...@@ -920,17 +667,6 @@ class TestLayer(LayerTest):
def test_split(self): def test_split(self):
with self.dynamic_graph(): with self.dynamic_graph():
with _test_eager_guard():
input = fluid.dygraph.to_variable(np.random.random((3, 8, 5)))
x0, x1 = paddle.split(input, num_or_sections=2, axis=1)
x00, x11 = paddle.split(
input,
num_or_sections=2,
axis=fluid.dygraph.to_variable(np.array([1])),
)
np.testing.assert_array_equal(x0.numpy(), x00.numpy())
np.testing.assert_array_equal(x1.numpy(), x11.numpy())
input = fluid.dygraph.to_variable(np.random.random((3, 8, 5))) input = fluid.dygraph.to_variable(np.random.random((3, 8, 5)))
x0, x1 = paddle.split(input, num_or_sections=2, axis=1) x0, x1 = paddle.split(input, num_or_sections=2, axis=1)
x00, x11 = paddle.split( x00, x11 = paddle.split(
...@@ -943,19 +679,6 @@ class TestLayer(LayerTest): ...@@ -943,19 +679,6 @@ class TestLayer(LayerTest):
def test_topk(self): def test_topk(self):
with self.dynamic_graph(): with self.dynamic_graph():
with _test_eager_guard():
input = fluid.dygraph.to_variable(np.random.random((13, 11)))
top5_values1, top5_indices1 = paddle.topk(input, k=5)
top5_values2, top5_indices2 = paddle.topk(
input, k=fluid.dygraph.to_variable(np.array([5]))
)
np.testing.assert_array_equal(
top5_values1.numpy(), top5_values2.numpy()
)
np.testing.assert_array_equal(
top5_indices1.numpy(), top5_indices2.numpy()
)
input = fluid.dygraph.to_variable(np.random.random((13, 11))) input = fluid.dygraph.to_variable(np.random.random((13, 11)))
top5_values1, top5_indices1 = paddle.topk(input, k=5) top5_values1, top5_indices1 = paddle.topk(input, k=5)
top5_values2, top5_indices2 = paddle.topk( top5_values2, top5_indices2 = paddle.topk(
...@@ -995,14 +718,6 @@ class TestLayer(LayerTest): ...@@ -995,14 +718,6 @@ class TestLayer(LayerTest):
)[0] )[0]
with self.dynamic_graph(): with self.dynamic_graph():
with _test_eager_guard():
images = np.ones([2, 3, 6, 6, 6], dtype='float32')
conv3d = paddle.nn.Conv3D(
in_channels=3, out_channels=3, kernel_size=2
)
dy_eager_ret = conv3d(base.to_variable(images))
dy_eager_rlt_value = dy_eager_ret.numpy()
images = np.ones([2, 3, 6, 6, 6], dtype='float32') images = np.ones([2, 3, 6, 6, 6], dtype='float32')
conv3d = paddle.nn.Conv3D( conv3d = paddle.nn.Conv3D(
in_channels=3, out_channels=3, kernel_size=2 in_channels=3, out_channels=3, kernel_size=2
...@@ -1011,56 +726,9 @@ class TestLayer(LayerTest): ...@@ -1011,56 +726,9 @@ class TestLayer(LayerTest):
dy_rlt_value = dy_ret.numpy() dy_rlt_value = dy_ret.numpy()
np.testing.assert_allclose(static_ret, dy_rlt_value, rtol=1e-05) np.testing.assert_allclose(static_ret, dy_rlt_value, rtol=1e-05)
np.testing.assert_allclose(static_ret, dy_eager_rlt_value, rtol=1e-05)
np.testing.assert_allclose(static_ret, static_ret2, rtol=1e-05) np.testing.assert_allclose(static_ret, static_ret2, rtol=1e-05)
with self.dynamic_graph(): with self.dynamic_graph():
with _test_eager_guard():
images = np.ones([2, 3, 6, 6, 6], dtype='float32')
custom_weight = np.random.randn(3, 3, 2, 2, 2).astype("float32")
weight_attr = fluid.ParamAttr(
initializer=fluid.initializer.NumpyArrayInitializer(
custom_weight
)
)
conv3d1 = paddle.nn.Conv3D(
in_channels=3, out_channels=3, kernel_size=2
)
conv3d2 = paddle.nn.Conv3D(
in_channels=3,
out_channels=3,
kernel_size=2,
weight_attr=weight_attr,
)
dy_ret1 = conv3d1(base.to_variable(images))
dy_ret2 = conv3d2(base.to_variable(images))
self.assertFalse(
np.array_equal(dy_ret1.numpy(), dy_ret2.numpy())
)
conv3d1_weight_np = conv3d1.weight.numpy()
conv3d1_bias = conv3d1.bias
self.assertFalse(
np.array_equal(conv3d1_weight_np, conv3d2.weight.numpy())
)
conv3d2.weight.set_value(conv3d1_weight_np)
np.testing.assert_array_equal(
conv3d1_weight_np, conv3d2.weight.numpy()
)
conv3d1.bias.set_value(conv3d1_bias)
dy_ret1 = conv3d1(base.to_variable(images))
dy_ret2 = conv3d2(base.to_variable(images))
np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy())
conv3d2.weight = conv3d1.weight
conv3d2.bias = conv3d1.bias
np.testing.assert_array_equal(
conv3d1.weight.numpy(), conv3d2.weight.numpy()
)
np.testing.assert_array_equal(
conv3d1.bias.numpy(), conv3d2.bias.numpy()
)
images = np.ones([2, 3, 6, 6, 6], dtype='float32') images = np.ones([2, 3, 6, 6, 6], dtype='float32')
custom_weight = np.random.randn(3, 3, 2, 2, 2).astype("float32") custom_weight = np.random.randn(3, 3, 2, 2, 2).astype("float32")
weight_attr = fluid.ParamAttr( weight_attr = fluid.ParamAttr(
...@@ -1104,7 +772,7 @@ class TestLayer(LayerTest): ...@@ -1104,7 +772,7 @@ class TestLayer(LayerTest):
conv3d1.bias.numpy(), conv3d2.bias.numpy() conv3d1.bias.numpy(), conv3d2.bias.numpy()
) )
def func_group_norm(self): def test_group_norm(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
else: else:
...@@ -1176,11 +844,6 @@ class TestLayer(LayerTest): ...@@ -1176,11 +844,6 @@ class TestLayer(LayerTest):
np.testing.assert_allclose(static_ret, dy_rlt_value, rtol=1e-05) np.testing.assert_allclose(static_ret, dy_rlt_value, rtol=1e-05)
np.testing.assert_allclose(static_ret, static_ret2, rtol=1e-05) np.testing.assert_allclose(static_ret, static_ret2, rtol=1e-05)
def test_group_norm(self):
with _test_eager_guard():
self.func_group_norm()
self.func_group_norm()
def test_instance_norm(self): def test_instance_norm(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
...@@ -1211,29 +874,17 @@ class TestLayer(LayerTest): ...@@ -1211,29 +874,17 @@ class TestLayer(LayerTest):
)[0] )[0]
with self.dynamic_graph(): with self.dynamic_graph():
with _test_eager_guard():
instanceNorm = paddle.nn.InstanceNorm2D(num_features=shape[1])
dy_eager_ret = instanceNorm(base.to_variable(input))
dy_eager_rlt_value = dy_eager_ret.numpy()
instanceNorm = paddle.nn.InstanceNorm2D(num_features=shape[1]) instanceNorm = paddle.nn.InstanceNorm2D(num_features=shape[1])
dy_ret = instanceNorm(base.to_variable(input)) dy_ret = instanceNorm(base.to_variable(input))
dy_rlt_value = dy_ret.numpy() dy_rlt_value = dy_ret.numpy()
with self.dynamic_graph(): with self.dynamic_graph():
with _test_eager_guard():
instanceNorm = paddle.nn.InstanceNorm2D(num_features=shape[1])
dy_eager_ret = instanceNorm(base.to_variable(input))
dy_eager_rlt_value2 = dy_eager_ret.numpy()
instanceNorm = paddle.nn.InstanceNorm2D(num_features=shape[1]) instanceNorm = paddle.nn.InstanceNorm2D(num_features=shape[1])
dy_ret = instanceNorm(base.to_variable(input)) dy_ret = instanceNorm(base.to_variable(input))
dy_rlt_value2 = dy_ret.numpy() dy_rlt_value2 = dy_ret.numpy()
np.testing.assert_allclose(static_ret, dy_rlt_value, rtol=1e-05) np.testing.assert_allclose(static_ret, dy_rlt_value, rtol=1e-05)
np.testing.assert_allclose(static_ret, dy_rlt_value2, rtol=1e-05) np.testing.assert_allclose(static_ret, dy_rlt_value2, rtol=1e-05)
np.testing.assert_allclose(static_ret, dy_eager_rlt_value, rtol=1e-05)
np.testing.assert_allclose(static_ret, dy_eager_rlt_value2, rtol=1e-05)
np.testing.assert_allclose(static_ret, static_ret2, rtol=1e-05) np.testing.assert_allclose(static_ret, static_ret2, rtol=1e-05)
with self.static_graph(): with self.static_graph():
...@@ -1302,19 +953,11 @@ class TestLayer(LayerTest): ...@@ -1302,19 +953,11 @@ class TestLayer(LayerTest):
)[0] )[0]
with self.dynamic_graph(): with self.dynamic_graph():
with _test_eager_guard():
spectralNorm = paddle.nn.SpectralNorm(
shape, axis=1, power_iters=2
)
dy_eager_ret = spectralNorm(base.to_variable(input))
dy_eager_rlt_value = dy_eager_ret.numpy()
spectralNorm = paddle.nn.SpectralNorm(shape, axis=1, power_iters=2) spectralNorm = paddle.nn.SpectralNorm(shape, axis=1, power_iters=2)
dy_ret = spectralNorm(base.to_variable(input)) dy_ret = spectralNorm(base.to_variable(input))
dy_rlt_value = dy_ret.numpy() dy_rlt_value = dy_ret.numpy()
np.testing.assert_allclose(static_ret, dy_rlt_value, rtol=1e-05) np.testing.assert_allclose(static_ret, dy_rlt_value, rtol=1e-05)
np.testing.assert_allclose(static_ret, dy_eager_rlt_value, rtol=1e-05)
np.testing.assert_allclose(static_ret, static_ret2, rtol=1e-05) np.testing.assert_allclose(static_ret, static_ret2, rtol=1e-05)
def test_conv3d_transpose(self): def test_conv3d_transpose(self):
...@@ -1340,15 +983,6 @@ class TestLayer(LayerTest): ...@@ -1340,15 +983,6 @@ class TestLayer(LayerTest):
feed={'pixel': input_array}, fetch_list=[out] feed={'pixel': input_array}, fetch_list=[out]
)[0] )[0]
with self.dynamic_graph(): with self.dynamic_graph():
with _test_eager_guard():
conv3d_transpose = paddle.nn.Conv3DTranspose(
in_channels=3,
out_channels=12,
kernel_size=12,
)
dy_eager_rlt = conv3d_transpose(base.to_variable(input_array))
dy_eager_rlt_value = dy_eager_rlt.numpy()
conv3d_transpose = paddle.nn.Conv3DTranspose( conv3d_transpose = paddle.nn.Conv3DTranspose(
in_channels=3, out_channels=12, kernel_size=12 in_channels=3, out_channels=12, kernel_size=12
) )
...@@ -1356,59 +990,8 @@ class TestLayer(LayerTest): ...@@ -1356,59 +990,8 @@ class TestLayer(LayerTest):
dy_rlt_value = dy_rlt.numpy() dy_rlt_value = dy_rlt.numpy()
np.testing.assert_allclose(static_rlt2, static_rlt, rtol=1e-05) np.testing.assert_allclose(static_rlt2, static_rlt, rtol=1e-05)
np.testing.assert_allclose(dy_rlt_value, static_rlt, rtol=1e-05) np.testing.assert_allclose(dy_rlt_value, static_rlt, rtol=1e-05)
np.testing.assert_allclose(dy_eager_rlt_value, static_rlt, rtol=1e-05)
with self.dynamic_graph(): with self.dynamic_graph():
with _test_eager_guard():
images = np.ones([2, 3, 6, 6, 6], dtype='float32')
custom_weight = np.random.randn(3, 3, 2, 2, 2).astype("float32")
weight_attr = fluid.ParamAttr(
initializer=fluid.initializer.NumpyArrayInitializer(
custom_weight
)
)
conv3d1 = paddle.nn.Conv3DTranspose(
in_channels=3,
out_channels=3,
kernel_size=2,
bias_attr='eager_conv3d1_b',
)
conv3d2 = paddle.nn.Conv3DTranspose(
in_channels=3,
out_channels=3,
kernel_size=2,
weight_attr=weight_attr,
bias_attr='eager_conv3d2_b',
)
dy_ret1 = conv3d1(base.to_variable(images))
dy_ret2 = conv3d2(base.to_variable(images))
self.assertFalse(
np.array_equal(dy_ret1.numpy(), dy_ret2.numpy())
)
conv3d1_weight_np = conv3d1.weight.numpy()
conv3d1_bias = conv3d1.bias
self.assertFalse(
np.array_equal(conv3d1_weight_np, conv3d2.weight.numpy())
)
conv3d2.weight.set_value(conv3d1_weight_np)
np.testing.assert_array_equal(
conv3d1_weight_np, conv3d2.weight.numpy()
)
conv3d1.bias.set_value(conv3d1_bias)
dy_ret1 = conv3d1(base.to_variable(images))
dy_ret2 = conv3d2(base.to_variable(images))
np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy())
conv3d2.weight = conv3d1.weight
conv3d2.bias = conv3d1.bias
np.testing.assert_array_equal(
conv3d1.weight.numpy(), conv3d2.weight.numpy()
)
np.testing.assert_array_equal(
conv3d1.bias.numpy(), conv3d2.bias.numpy()
)
images = np.ones([2, 3, 6, 6, 6], dtype='float32') images = np.ones([2, 3, 6, 6, 6], dtype='float32')
custom_weight = np.random.randn(3, 3, 2, 2, 2).astype("float32") custom_weight = np.random.randn(3, 3, 2, 2, 2).astype("float32")
weight_attr = fluid.ParamAttr( weight_attr = fluid.ParamAttr(
...@@ -1456,7 +1039,7 @@ class TestLayer(LayerTest): ...@@ -1456,7 +1039,7 @@ class TestLayer(LayerTest):
conv3d1.bias.numpy(), conv3d2.bias.numpy() conv3d1.bias.numpy(), conv3d2.bias.numpy()
) )
def func_while_loop(self): def test_while_loop(self):
with self.static_graph(): with self.static_graph():
i = layers.fill_constant(shape=[1], dtype='int64', value=0) i = layers.fill_constant(shape=[1], dtype='int64', value=0)
ten = layers.fill_constant(shape=[1], dtype='int64', value=10) ten = layers.fill_constant(shape=[1], dtype='int64', value=10)
...@@ -1491,11 +1074,6 @@ class TestLayer(LayerTest): ...@@ -1491,11 +1074,6 @@ class TestLayer(LayerTest):
np.testing.assert_array_equal(static_ret[0], dy_ret[0].numpy()) np.testing.assert_array_equal(static_ret[0], dy_ret[0].numpy())
def test_while_loop(self):
with _test_eager_guard():
self.func_while_loop()
self.func_while_loop()
def test_compare(self): def test_compare(self):
value_a = np.arange(3) value_a = np.arange(3)
value_b = np.arange(3) value_b = np.arange(3)
...@@ -1508,14 +1086,6 @@ class TestLayer(LayerTest): ...@@ -1508,14 +1086,6 @@ class TestLayer(LayerTest):
feed={"a": value_a, "b": value_b}, fetch_list=[cond] feed={"a": value_a, "b": value_b}, fetch_list=[cond]
)[0] )[0]
with self.dynamic_graph(): with self.dynamic_graph():
with _test_eager_guard():
da = base.to_variable(value_a)
db = base.to_variable(value_b)
dcond = paddle.less_than(x=da, y=db)
for i in range(len(static_ret)):
self.assertTrue(dcond.numpy()[i] == static_ret[i])
da = base.to_variable(value_a) da = base.to_variable(value_a)
db = base.to_variable(value_b) db = base.to_variable(value_b)
dcond = paddle.less_than(x=da, y=db) dcond = paddle.less_than(x=da, y=db)
...@@ -1532,14 +1102,6 @@ class TestLayer(LayerTest): ...@@ -1532,14 +1102,6 @@ class TestLayer(LayerTest):
feed={"a1": value_a, "b1": value_b}, fetch_list=[cond1] feed={"a1": value_a, "b1": value_b}, fetch_list=[cond1]
)[0] )[0]
with self.dynamic_graph(): with self.dynamic_graph():
with _test_eager_guard():
da1 = base.to_variable(value_a)
db1 = base.to_variable(value_b)
dcond1 = paddle.less_equal(x=da1, y=db1)
for i in range(len(static_ret1)):
self.assertTrue(dcond1.numpy()[i] == static_ret1[i])
da1 = base.to_variable(value_a) da1 = base.to_variable(value_a)
db1 = base.to_variable(value_b) db1 = base.to_variable(value_b)
dcond1 = paddle.less_equal(x=da1, y=db1) dcond1 = paddle.less_equal(x=da1, y=db1)
...@@ -1556,14 +1118,6 @@ class TestLayer(LayerTest): ...@@ -1556,14 +1118,6 @@ class TestLayer(LayerTest):
feed={"a2": value_a, "b2": value_b}, fetch_list=[cond2] feed={"a2": value_a, "b2": value_b}, fetch_list=[cond2]
)[0] )[0]
with self.dynamic_graph(): with self.dynamic_graph():
with _test_eager_guard():
da2 = base.to_variable(value_a)
db2 = base.to_variable(value_b)
dcond2 = paddle.greater_than(x=da2, y=db2)
for i in range(len(static_ret2)):
self.assertTrue(dcond2.numpy()[i] == static_ret2[i])
da2 = base.to_variable(value_a) da2 = base.to_variable(value_a)
db2 = base.to_variable(value_b) db2 = base.to_variable(value_b)
dcond2 = paddle.greater_than(x=da2, y=db2) dcond2 = paddle.greater_than(x=da2, y=db2)
...@@ -1580,14 +1134,6 @@ class TestLayer(LayerTest): ...@@ -1580,14 +1134,6 @@ class TestLayer(LayerTest):
feed={"a3": value_a, "b3": value_b}, fetch_list=[cond3] feed={"a3": value_a, "b3": value_b}, fetch_list=[cond3]
)[0] )[0]
with self.dynamic_graph(): with self.dynamic_graph():
with _test_eager_guard():
da3 = base.to_variable(value_a)
db3 = base.to_variable(value_b)
dcond3 = paddle.greater_equal(x=da3, y=db3)
for i in range(len(static_ret3)):
self.assertTrue(dcond3.numpy()[i] == static_ret3[i])
da3 = base.to_variable(value_a) da3 = base.to_variable(value_a)
db3 = base.to_variable(value_b) db3 = base.to_variable(value_b)
dcond3 = paddle.greater_equal(x=da3, y=db3) dcond3 = paddle.greater_equal(x=da3, y=db3)
...@@ -1604,14 +1150,6 @@ class TestLayer(LayerTest): ...@@ -1604,14 +1150,6 @@ class TestLayer(LayerTest):
feed={"a4": value_a, "b4": value_b}, fetch_list=[cond4] feed={"a4": value_a, "b4": value_b}, fetch_list=[cond4]
)[0] )[0]
with self.dynamic_graph(): with self.dynamic_graph():
with _test_eager_guard():
da4 = base.to_variable(value_a)
db4 = base.to_variable(value_b)
dcond4 = paddle.equal(x=da4, y=db4)
for i in range(len(static_ret4)):
self.assertTrue(dcond4.numpy()[i] == static_ret4[i])
da4 = base.to_variable(value_a) da4 = base.to_variable(value_a)
db4 = base.to_variable(value_b) db4 = base.to_variable(value_b)
dcond4 = paddle.equal(x=da4, y=db4) dcond4 = paddle.equal(x=da4, y=db4)
...@@ -1628,14 +1166,6 @@ class TestLayer(LayerTest): ...@@ -1628,14 +1166,6 @@ class TestLayer(LayerTest):
feed={"a5": value_a, "b5": value_b}, fetch_list=[cond5] feed={"a5": value_a, "b5": value_b}, fetch_list=[cond5]
)[0] )[0]
with self.dynamic_graph(): with self.dynamic_graph():
with _test_eager_guard():
da5 = base.to_variable(value_a)
db5 = base.to_variable(value_b)
dcond5 = paddle.equal(x=da5, y=db5)
for i in range(len(static_ret5)):
self.assertTrue(dcond5.numpy()[i] == static_ret5[i])
da5 = base.to_variable(value_a) da5 = base.to_variable(value_a)
db5 = base.to_variable(value_b) db5 = base.to_variable(value_b)
dcond5 = paddle.equal(x=da5, y=db5) dcond5 = paddle.equal(x=da5, y=db5)
...@@ -1672,31 +1202,6 @@ class TestLayer(LayerTest): ...@@ -1672,31 +1202,6 @@ class TestLayer(LayerTest):
static_res = ret[0] static_res = ret[0]
with self.dynamic_graph(): with self.dynamic_graph():
with _test_eager_guard():
a = fluid.dygraph.to_variable(np.array([0.1]).astype('float32'))
b = fluid.dygraph.to_variable(
np.array([0.23]).astype('float32')
)
out = paddle.static.nn.cond(
a < b,
lambda: less_than_branch(a, b),
lambda: greater_equal_branch(a, b),
)
out2 = paddle.static.nn.cond(
a >= b,
lambda: greater_equal_branch(a, b),
lambda: less_than_branch(a, b),
)
eager_dynamic_res = out.numpy()
eager_dynamic_res2 = out2.numpy()
np.testing.assert_array_equal(
eager_dynamic_res, eager_dynamic_res2
)
with self.assertRaises(TypeError):
paddle.static.nn.cond(a < b, 'str', 'str')
with self.assertRaises(TypeError):
paddle.static.nn.cond(a >= b, 'str', 'str')
a = fluid.dygraph.to_variable(np.array([0.1]).astype('float32')) a = fluid.dygraph.to_variable(np.array([0.1]).astype('float32'))
b = fluid.dygraph.to_variable(np.array([0.23]).astype('float32')) b = fluid.dygraph.to_variable(np.array([0.23]).astype('float32'))
out = paddle.static.nn.cond( out = paddle.static.nn.cond(
...@@ -1718,7 +1223,6 @@ class TestLayer(LayerTest): ...@@ -1718,7 +1223,6 @@ class TestLayer(LayerTest):
paddle.static.nn.cond(a >= b, 'str', 'str') paddle.static.nn.cond(a >= b, 'str', 'str')
np.testing.assert_array_equal(static_res, dynamic_res) np.testing.assert_array_equal(static_res, dynamic_res)
np.testing.assert_array_equal(static_res, eager_dynamic_res)
def test_case(self): def test_case(self):
def fn_1(): def fn_1():
...@@ -1755,24 +1259,6 @@ class TestLayer(LayerTest): ...@@ -1755,24 +1259,6 @@ class TestLayer(LayerTest):
static_res1, static_res2 = exe.run(fetch_list=[out_1, out_2]) static_res1, static_res2 = exe.run(fetch_list=[out_1, out_2])
with self.dynamic_graph(): with self.dynamic_graph():
with _test_eager_guard():
x = layers.fill_constant(shape=[1], dtype='float32', value=0.3)
y = layers.fill_constant(shape=[1], dtype='float32', value=0.1)
z = layers.fill_constant(shape=[1], dtype='float32', value=0.2)
pred_1 = paddle.less_than(z, x) # true: 0.2 < 0.3
pred_2 = paddle.less_than(x, y) # false: 0.3 < 0.1
pred_3 = paddle.equal(x, y) # false: 0.3 == 0.1
out_1 = paddle.static.nn.case(
pred_fn_pairs=[(pred_1, fn_1), (pred_2, fn_2)], default=fn_3
)
out_2 = paddle.static.nn.case(
pred_fn_pairs=[(pred_2, fn_2), (pred_3, fn_3)]
)
eager_dynamic_res1 = out_1.numpy()
eager_dynamic_res2 = out_2.numpy()
x = layers.fill_constant(shape=[1], dtype='float32', value=0.3) x = layers.fill_constant(shape=[1], dtype='float32', value=0.3)
y = layers.fill_constant(shape=[1], dtype='float32', value=0.1) y = layers.fill_constant(shape=[1], dtype='float32', value=0.1)
z = layers.fill_constant(shape=[1], dtype='float32', value=0.2) z = layers.fill_constant(shape=[1], dtype='float32', value=0.2)
...@@ -1792,8 +1278,6 @@ class TestLayer(LayerTest): ...@@ -1792,8 +1278,6 @@ class TestLayer(LayerTest):
np.testing.assert_array_equal(static_res1, dynamic_res1) np.testing.assert_array_equal(static_res1, dynamic_res1)
np.testing.assert_array_equal(static_res2, dynamic_res2) np.testing.assert_array_equal(static_res2, dynamic_res2)
np.testing.assert_array_equal(static_res1, eager_dynamic_res1)
np.testing.assert_array_equal(static_res2, eager_dynamic_res2)
def test_switch_case(self): def test_switch_case(self):
def fn_1(): def fn_1():
...@@ -1835,33 +1319,6 @@ class TestLayer(LayerTest): ...@@ -1835,33 +1319,6 @@ class TestLayer(LayerTest):
) )
with self.dynamic_graph(): with self.dynamic_graph():
with _test_eager_guard():
index_1 = layers.fill_constant(
shape=[1], dtype='int32', value=1
)
index_2 = layers.fill_constant(
shape=[1], dtype='int32', value=2
)
out_1 = paddle.static.nn.switch_case(
branch_index=index_1,
branch_fns={1: fn_1, 2: fn_2},
default=fn_3,
)
out_2 = paddle.static.nn.switch_case(
branch_index=index_2,
branch_fns=[(1, fn_1), (2, fn_2)],
default=fn_3,
)
out_3 = paddle.static.nn.switch_case(
branch_index=index_2,
branch_fns=[(0, fn_1), (4, fn_2), (7, fn_3)],
)
eager_dynamic_res1 = out_1.numpy()
eager_dynamic_res2 = out_2.numpy()
eager_dynamic_res3 = out_3.numpy()
index_1 = layers.fill_constant(shape=[1], dtype='int32', value=1) index_1 = layers.fill_constant(shape=[1], dtype='int32', value=1)
index_2 = layers.fill_constant(shape=[1], dtype='int32', value=2) index_2 = layers.fill_constant(shape=[1], dtype='int32', value=2)
...@@ -1887,9 +1344,6 @@ class TestLayer(LayerTest): ...@@ -1887,9 +1344,6 @@ class TestLayer(LayerTest):
np.testing.assert_array_equal(static_res1, dynamic_res1) np.testing.assert_array_equal(static_res1, dynamic_res1)
np.testing.assert_array_equal(static_res2, dynamic_res2) np.testing.assert_array_equal(static_res2, dynamic_res2)
np.testing.assert_array_equal(static_res3, dynamic_res3) np.testing.assert_array_equal(static_res3, dynamic_res3)
np.testing.assert_array_equal(static_res1, eager_dynamic_res1)
np.testing.assert_array_equal(static_res2, eager_dynamic_res2)
np.testing.assert_array_equal(static_res3, eager_dynamic_res3)
def test_crop_tensor(self): def test_crop_tensor(self):
with self.static_graph(): with self.static_graph():
...@@ -1972,7 +1426,7 @@ class TestBook(LayerTest): ...@@ -1972,7 +1426,7 @@ class TestBook(LayerTest):
) )
self.all_close_compare = set({"make_spectral_norm"}) self.all_close_compare = set({"make_spectral_norm"})
def func_all_layers(self): def test_all_layers(self):
attrs = (getattr(self, name) for name in dir(self)) attrs = (getattr(self, name) for name in dir(self))
methods = filter(inspect.ismethod, attrs) methods = filter(inspect.ismethod, attrs)
for method in methods: for method in methods:
...@@ -2028,11 +1482,6 @@ class TestBook(LayerTest): ...@@ -2028,11 +1482,6 @@ class TestBook(LayerTest):
), ),
) )
def test_all_layers(self):
with _test_eager_guard():
self.func_all_layers()
self.func_all_layers()
def _get_np_data(self, shape, dtype, append_batch_size=True): def _get_np_data(self, shape, dtype, append_batch_size=True):
np.random.seed(self.seed) np.random.seed(self.seed)
if append_batch_size: if append_batch_size:
......
...@@ -19,7 +19,6 @@ import numpy as np ...@@ -19,7 +19,6 @@ import numpy as np
import paddle import paddle
from paddle.distributed.models.moe import utils from paddle.distributed.models.moe import utils
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid.framework import _test_eager_guard
def limit_by_capacity(expert_count, _capacity, n_worker): def limit_by_capacity(expert_count, _capacity, n_worker):
...@@ -88,7 +87,7 @@ class TestLimitByCapacityInt64API(unittest.TestCase): ...@@ -88,7 +87,7 @@ class TestLimitByCapacityInt64API(unittest.TestCase):
assert all_close(self.out, res[0], self.n_worker) assert all_close(self.out, res[0], self.n_worker)
def func_dygraph_api(self): def test_dygraph_api(self):
paddle.disable_static(self.place) paddle.disable_static(self.place)
capacity = paddle.to_tensor(self.capacity) capacity = paddle.to_tensor(self.capacity)
expert_count_tensor = paddle.to_tensor(self.expert_count) expert_count_tensor = paddle.to_tensor(self.expert_count)
...@@ -97,11 +96,6 @@ class TestLimitByCapacityInt64API(unittest.TestCase): ...@@ -97,11 +96,6 @@ class TestLimitByCapacityInt64API(unittest.TestCase):
) )
assert all_close(self.out, out.numpy(), self.n_worker) assert all_close(self.out, out.numpy(), self.n_worker)
def test_dygraph_api(self):
with _test_eager_guard():
self.func_dygraph_api()
self.func_dygraph_api()
@unittest.skipIf( @unittest.skipIf(
not core.is_compiled_with_cuda(), "core is not compiled with CUDA" not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
......
...@@ -18,7 +18,6 @@ import numpy as np ...@@ -18,7 +18,6 @@ import numpy as np
import paddle import paddle
import paddle.static as static import paddle.static as static
from paddle.fluid.framework import _test_eager_guard
p_list_n_n = ("fro", "nuc", 1, -1, np.inf, -np.inf) p_list_n_n = ("fro", "nuc", 1, -1, np.inf, -np.inf)
p_list_m_n = (None, 2, -2) p_list_m_n = (None, 2, -2)
...@@ -92,21 +91,16 @@ class API_TestStaticCond(unittest.TestCase): ...@@ -92,21 +91,16 @@ class API_TestStaticCond(unittest.TestCase):
class API_TestDygraphCond(unittest.TestCase): class API_TestDygraphCond(unittest.TestCase):
def func_out(self): def test_out(self):
paddle.disable_static() paddle.disable_static()
# test calling results of 'cond' in dynamic mode # test calling results of 'cond' in dynamic mode
x_list_n_n, x_list_m_n = gen_input() x_list_n_n, x_list_m_n = gen_input()
test_dygraph_assert_true(self, x_list_n_n, p_list_n_n + p_list_m_n) test_dygraph_assert_true(self, x_list_n_n, p_list_n_n + p_list_m_n)
test_dygraph_assert_true(self, x_list_m_n, p_list_m_n) test_dygraph_assert_true(self, x_list_m_n, p_list_m_n)
def test_out(self):
with _test_eager_guard():
self.func_out()
self.func_out()
class TestCondAPIError(unittest.TestCase): class TestCondAPIError(unittest.TestCase):
def func_dygraph_api_error(self): def test_dygraph_api_error(self):
paddle.disable_static() paddle.disable_static()
# test raising errors when 'cond' is called in dygraph mode # test raising errors when 'cond' is called in dygraph mode
p_list_error = ('fro_', '_nuc', -0.7, 0, 1.5, 3) p_list_error = ('fro_', '_nuc', -0.7, 0, 1.5, 3)
...@@ -121,11 +115,6 @@ class TestCondAPIError(unittest.TestCase): ...@@ -121,11 +115,6 @@ class TestCondAPIError(unittest.TestCase):
x_tensor = paddle.to_tensor(x) x_tensor = paddle.to_tensor(x)
self.assertRaises(ValueError, paddle.linalg.cond, x_tensor, p) self.assertRaises(ValueError, paddle.linalg.cond, x_tensor, p)
def test_dygraph_api_error(self):
with _test_eager_guard():
self.func_dygraph_api_error()
self.func_dygraph_api_error()
def test_static_api_error(self): def test_static_api_error(self):
paddle.enable_static() paddle.enable_static()
# test raising errors when 'cond' is called in static mode # test raising errors when 'cond' is called in static mode
...@@ -162,18 +151,13 @@ class TestCondAPIError(unittest.TestCase): ...@@ -162,18 +151,13 @@ class TestCondAPIError(unittest.TestCase):
class TestCondEmptyTensorInput(unittest.TestCase): class TestCondEmptyTensorInput(unittest.TestCase):
def func_dygraph_empty_tensor_input(self): def test_dygraph_empty_tensor_input(self):
paddle.disable_static() paddle.disable_static()
# test calling results of 'cond' when input is an empty tensor in dynamic mode # test calling results of 'cond' when input is an empty tensor in dynamic mode
x_list_n_n, x_list_m_n = gen_empty_input() x_list_n_n, x_list_m_n = gen_empty_input()
test_dygraph_assert_true(self, x_list_n_n, p_list_n_n + p_list_m_n) test_dygraph_assert_true(self, x_list_n_n, p_list_n_n + p_list_m_n)
test_dygraph_assert_true(self, x_list_m_n, p_list_m_n) test_dygraph_assert_true(self, x_list_m_n, p_list_m_n)
def test_dygraph_empty_tensor_input(self):
with _test_eager_guard():
self.func_dygraph_empty_tensor_input()
self.func_dygraph_empty_tensor_input()
if __name__ == "__main__": if __name__ == "__main__":
paddle.enable_static() paddle.enable_static()
......
...@@ -20,7 +20,6 @@ from op_test import OpTest ...@@ -20,7 +20,6 @@ from op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid import Program, core, program_guard from paddle.fluid import Program, core, program_guard
from paddle.fluid.framework import _test_eager_guard
class TestLinspaceOpCommonCase(OpTest): class TestLinspaceOpCommonCase(OpTest):
...@@ -128,11 +127,6 @@ class TestLinspaceAPI(unittest.TestCase): ...@@ -128,11 +127,6 @@ class TestLinspaceAPI(unittest.TestCase):
self.assertEqual((out2.numpy() == np_out2).all(), True) self.assertEqual((out2.numpy() == np_out2).all(), True)
self.assertEqual((out3.numpy() == np_out3).all(), True) self.assertEqual((out3.numpy() == np_out3).all(), True)
def test_api_eager_dygraph(self):
with _test_eager_guard():
self.test_variable_input2()
self.test_imperative()
class TestLinspaceOpError(unittest.TestCase): class TestLinspaceOpError(unittest.TestCase):
def test_errors(self): def test_errors(self):
......
...@@ -17,7 +17,6 @@ import unittest ...@@ -17,7 +17,6 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
from paddle.fluid.framework import _test_eager_guard
from paddle.framework import _non_static_mode from paddle.framework import _non_static_mode
from paddle.static import Executor, Program, program_guard from paddle.static import Executor, Program, program_guard
...@@ -106,15 +105,14 @@ def run_eager(x_np, y_np, op_str, use_gpu=False, binary_op=True): ...@@ -106,15 +105,14 @@ def run_eager(x_np, y_np, op_str, use_gpu=False, binary_op=True):
if use_gpu and paddle.is_compiled_with_cuda(): if use_gpu and paddle.is_compiled_with_cuda():
place = paddle.CUDAPlace(0) place = paddle.CUDAPlace(0)
paddle.disable_static(place) paddle.disable_static(place)
with _test_eager_guard(): op = getattr(paddle, op_str)
op = getattr(paddle, op_str) x = paddle.to_tensor(x_np, dtype=x_np.dtype)
x = paddle.to_tensor(x_np, dtype=x_np.dtype) if not binary_op:
if not binary_op: dygraph_result = op(x)
dygraph_result = op(x) else:
else: y = paddle.to_tensor(y_np, dtype=y_np.dtype)
y = paddle.to_tensor(y_np, dtype=y_np.dtype) dygraph_result = op(x, y)
dygraph_result = op(x, y) return dygraph_result
return dygraph_result
def np_data_generator(np_shape, dtype, *args, **kwargs): def np_data_generator(np_shape, dtype, *args, **kwargs):
......
...@@ -18,7 +18,6 @@ import numpy as np ...@@ -18,7 +18,6 @@ import numpy as np
from op_test import OpTest from op_test import OpTest
import paddle import paddle
from paddle.fluid.framework import _test_eager_guard
np.random.seed(10) np.random.seed(10)
...@@ -117,11 +116,6 @@ class TestLogitAPI(unittest.TestCase): ...@@ -117,11 +116,6 @@ class TestLogitAPI(unittest.TestCase):
x = paddle.fluid.data(name='X2', shape=[100], dtype='float32') x = paddle.fluid.data(name='X2', shape=[100], dtype='float32')
self.assertRaises(TypeError, paddle.logit, x, dtype='int32') self.assertRaises(TypeError, paddle.logit, x, dtype='int32')
def test_api_eager_dygraph(self):
with _test_eager_guard():
self.test_check_api()
self.test_errors()
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -19,7 +19,6 @@ import numpy as np ...@@ -19,7 +19,6 @@ import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.nn as nn import paddle.nn as nn
from paddle.fluid.framework import _test_eager_guard
LOOKAHEAD_K = 5 LOOKAHEAD_K = 5
LOOKAHEAD_ALPHA = 0.2 LOOKAHEAD_ALPHA = 0.2
...@@ -71,7 +70,7 @@ class TestLookAhead(unittest.TestCase): ...@@ -71,7 +70,7 @@ class TestLookAhead(unittest.TestCase):
) )
fast_param = latest_b - SGD_LR * b_grad fast_param = latest_b - SGD_LR * b_grad
def func_test_look_ahead_dygraph(self): def test_look_ahead_dygraph(self):
BATCH_SIZE = 16 BATCH_SIZE = 16
BATCH_NUM = 4 BATCH_NUM = 4
EPOCH_NUM = 4 EPOCH_NUM = 4
...@@ -152,11 +151,6 @@ class TestLookAhead(unittest.TestCase): ...@@ -152,11 +151,6 @@ class TestLookAhead(unittest.TestCase):
train(layer, loader, loss_fn, lookahead) train(layer, loader, loss_fn, lookahead)
def test_look_ahead_dygraph(self):
with _test_eager_guard():
self.func_test_look_ahead_dygraph()
self.func_test_look_ahead_dygraph()
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -20,7 +20,6 @@ from op_test import OpTest, convert_float_to_uint16, get_numeric_gradient ...@@ -20,7 +20,6 @@ from op_test import OpTest, convert_float_to_uint16, get_numeric_gradient
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.framework import _test_eager_guard
from paddle.fluid.tests.unittests.testsuite import create_op from paddle.fluid.tests.unittests.testsuite import create_op
...@@ -559,11 +558,6 @@ class TestMatMulV2API(unittest.TestCase): ...@@ -559,11 +558,6 @@ class TestMatMulV2API(unittest.TestCase):
{'FLAGS_gemm_use_half_precision_compute_type': False} {'FLAGS_gemm_use_half_precision_compute_type': False}
) )
def test_api_eager_dygraph(self):
with _test_eager_guard():
self.test_dygraph()
self.test_dygraph_fp16()
class TestComplexMatMulOp(OpTest): class TestComplexMatMulOp(OpTest):
def setUp(self): def setUp(self):
...@@ -732,10 +726,6 @@ class TestMatmulop(unittest.TestCase): ...@@ -732,10 +726,6 @@ class TestMatmulop(unittest.TestCase):
paddle.enable_static() paddle.enable_static()
def func_dygraph_matmul(self): # noqa: F811
with _test_eager_guard():
self.func_dygraph_matmul()
if __name__ == "__main__": if __name__ == "__main__":
paddle.enable_static() paddle.enable_static()
......
...@@ -20,7 +20,6 @@ from test_sum_op import TestReduceOPTensorAxisBase ...@@ -20,7 +20,6 @@ from test_sum_op import TestReduceOPTensorAxisBase
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.framework import _test_eager_guard
class ApiMaxTest(unittest.TestCase): class ApiMaxTest(unittest.TestCase):
...@@ -83,10 +82,6 @@ class ApiMaxTest(unittest.TestCase): ...@@ -83,10 +82,6 @@ class ApiMaxTest(unittest.TestCase):
z_expected = np.array(np.max(np_x, axis=0)) z_expected = np.array(np.max(np_x, axis=0))
self.assertEqual((np_z == z_expected).all(), True) self.assertEqual((np_z == z_expected).all(), True)
def test_eager_api(self):
with _test_eager_guard():
self.test_imperative_api()
def test_big_dimension(self): def test_big_dimension(self):
paddle.disable_static() paddle.disable_static()
x = paddle.rand(shape=[2, 2, 2, 2, 2, 2, 2]) x = paddle.rand(shape=[2, 2, 2, 2, 2, 2, 2])
......
...@@ -20,7 +20,6 @@ from op_test import OpTest ...@@ -20,7 +20,6 @@ from op_test import OpTest
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
import paddle.nn.functional as F import paddle.nn.functional as F
from paddle.fluid.framework import _test_eager_guard
paddle.enable_static() paddle.enable_static()
np.random.seed(1) np.random.seed(1)
...@@ -108,7 +107,7 @@ class TestMaxoutAPI(unittest.TestCase): ...@@ -108,7 +107,7 @@ class TestMaxoutAPI(unittest.TestCase):
for r in res: for r in res:
np.testing.assert_allclose(out_ref, r, rtol=1e-05) np.testing.assert_allclose(out_ref, r, rtol=1e-05)
def func_test_dygraph_api(self): def test_dygraph_api(self):
paddle.disable_static(self.place) paddle.disable_static(self.place)
x = paddle.to_tensor(self.x_np) x = paddle.to_tensor(self.x_np)
out1 = F.maxout(x, self.groups, self.axis) out1 = F.maxout(x, self.groups, self.axis)
...@@ -136,11 +135,6 @@ class TestMaxoutAPI(unittest.TestCase): ...@@ -136,11 +135,6 @@ class TestMaxoutAPI(unittest.TestCase):
x_float32 = paddle.fluid.data(name='x_float32', shape=[2, 4, 6, 8]) x_float32 = paddle.fluid.data(name='x_float32', shape=[2, 4, 6, 8])
self.assertRaises(ValueError, F.maxout, x_float32, 2, 2) self.assertRaises(ValueError, F.maxout, x_float32, 2, 2)
def test_dygraph_api(self):
with _test_eager_guard():
self.func_test_dygraph_api()
self.func_test_dygraph_api()
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册