未验证 提交 a7014f09 编写于 作者: 姜永久 提交者: GitHub

rm unittests eager guard tests part6 eager_run2expand_v2 (#48817)

上级 08524758
...@@ -24,11 +24,7 @@ from paddle.fluid.executor import ( ...@@ -24,11 +24,7 @@ from paddle.fluid.executor import (
_is_dy2st_enable_standalone_executor, _is_dy2st_enable_standalone_executor,
_is_enable_standalone_executor, _is_enable_standalone_executor,
) )
from paddle.fluid.framework import ( from paddle.fluid.framework import Variable, _in_legacy_dygraph
Variable,
_in_legacy_dygraph,
_test_eager_guard,
)
from paddle.fluid.layers.utils import _hash_with_id from paddle.fluid.layers.utils import _hash_with_id
...@@ -124,68 +120,57 @@ class TestRunProgram(unittest.TestCase): ...@@ -124,68 +120,57 @@ class TestRunProgram(unittest.TestCase):
paddle.disable_static('cpu') paddle.disable_static('cpu')
# step 2: call run_program in eager mode # step 2: call run_program in eager mode
with _test_eager_guard(): x_t = paddle.ones([2, 4])
x_t = paddle.ones([2, 4]) x_t.name = "x"
x_t.name = "x" x_t.stop_gradient = False
x_t.stop_gradient = False y_t = paddle.ones([4, 2])
y_t = paddle.ones([4, 2]) y_t.name = "y"
y_t.name = "y" y_t.stop_gradient = False
y_t.stop_gradient = False
fake_var = paddle.zeros([1])
fake_var = paddle.zeros([1]) fake_var.name = 'Fake_var'
fake_var.name = 'Fake_var'
out_t = _create_out(out)
out_t = _create_out(out)
scope = core.Scope()
scope = core.Scope() attrs = [
attrs = [ 'global_block',
'global_block', program.desc.block(0),
program.desc.block(0), 'start_op_index',
'start_op_index', 0,
0, 'end_op_index',
'end_op_index', main_program.desc.block(0).op_size(),
main_program.desc.block(0).op_size(), 'is_test',
'is_test', False,
False, 'program_id',
'program_id', _hash_with_id(program),
_hash_with_id(program), ]
]
use_interpretorcore = (
_is_enable_standalone_executor()
and _is_dy2st_enable_standalone_executor()
)
attrs.extend(('use_interpretorcore', use_interpretorcore))
if use_interpretorcore:
attrs.extend(
(
'forward_global_block',
forward_program.desc.block(0),
'backward_global_block',
backward_program.desc.block(0),
)
)
_legacy_C_ops.run_program( use_interpretorcore = (
[x_t, y_t], _is_enable_standalone_executor()
[fake_var], and _is_dy2st_enable_standalone_executor()
[out_t], )
[scope], attrs.extend(('use_interpretorcore', use_interpretorcore))
[fake_var], if use_interpretorcore:
None, attrs.extend(
*attrs (
'forward_global_block',
forward_program.desc.block(0),
'backward_global_block',
backward_program.desc.block(0),
)
) )
loss = paddle.mean(out_t) _legacy_C_ops.run_program(
loss.backward() [x_t, y_t], [fake_var], [out_t], [scope], [fake_var], None, *attrs
)
np.testing.assert_array_equal(np.ones([2, 2]) * 4, out_t.numpy()) loss = paddle.mean(out_t)
np.testing.assert_array_equal( loss.backward()
np.ones([2, 4]) * 0.5, x_t.grad.numpy()
) np.testing.assert_array_equal(np.ones([2, 2]) * 4, out_t.numpy())
np.testing.assert_array_equal( np.testing.assert_array_equal(np.ones([2, 4]) * 0.5, x_t.grad.numpy())
np.ones([4, 2]) * 0.5, y_t.grad.numpy() np.testing.assert_array_equal(np.ones([4, 2]) * 0.5, y_t.grad.numpy())
)
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -17,29 +17,27 @@ import unittest ...@@ -17,29 +17,27 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
from paddle.fluid.framework import _test_eager_guard
class TestEagerTraceOp(unittest.TestCase): class TestEagerTraceOp(unittest.TestCase):
def test_branches(self): def test_branches(self):
with _test_eager_guard(): data = np.random.random([1, 1]).astype(np.float32)
data = np.random.random([1, 1]).astype(np.float32) x = paddle.to_tensor(data)
x = paddle.to_tensor(data)
paddle.fluid.framework._dygraph_tracer().trace_op(
paddle.fluid.framework._dygraph_tracer().trace_op( 'broadcast_tensors',
'broadcast_tensors', {'X': [x, x], 'Out': [x, x]},
{'X': [x, x], 'Out': [x, x]}, {'Out': [x, x]},
{'Out': [x, x]}, {},
{}, )
) paddle.fluid.framework._dygraph_tracer().trace_op(
paddle.fluid.framework._dygraph_tracer().trace_op( 'scale', {'X': x}, {'Out': x}, {'scale': 0.5}
'scale', {'X': x}, {'Out': x}, {'scale': 0.5} )
)
scale = paddle.to_tensor(np.random.random([1]).astype(np.float32))
scale = paddle.to_tensor(np.random.random([1]).astype(np.float32)) paddle.fluid.framework._dygraph_tracer().trace_op(
paddle.fluid.framework._dygraph_tracer().trace_op( 'instance_norm', {'Scale': [scale], 'X': [x]}, {'Y': [x]}, {}
'instance_norm', {'Scale': [scale], 'X': [x]}, {'Y': [x]}, {} )
)
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -17,55 +17,50 @@ import unittest ...@@ -17,55 +17,50 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
from paddle.fluid.framework import _test_eager_guard
class EagerOpAPIGenerateTestCase(unittest.TestCase): class EagerOpAPIGenerateTestCase(unittest.TestCase):
def test_elementwise_add(self): def test_elementwise_add(self):
with _test_eager_guard(): paddle.set_device("cpu")
paddle.set_device("cpu") np_x = np.ones([4, 16, 16, 32]).astype('float32')
np_x = np.ones([4, 16, 16, 32]).astype('float32') np_y = np.ones([4, 16, 16, 32]).astype('float32')
np_y = np.ones([4, 16, 16, 32]).astype('float32') x = paddle.to_tensor(np_x)
x = paddle.to_tensor(np_x) y = paddle.to_tensor(np_y)
y = paddle.to_tensor(np_y) out = paddle.add(x, y)
out = paddle.add(x, y) out_arr = out.numpy()
out_arr = out.numpy()
out_arr_expected = np.add(np_x, np_y) out_arr_expected = np.add(np_x, np_y)
np.testing.assert_array_equal(out_arr, out_arr_expected) np.testing.assert_array_equal(out_arr, out_arr_expected)
def test_sum(self): def test_sum(self):
with _test_eager_guard(): x_data = np.array([[0.2, 0.3, 0.5, 0.9], [0.1, 0.2, 0.6, 0.7]]).astype(
x_data = np.array( 'float32'
[[0.2, 0.3, 0.5, 0.9], [0.1, 0.2, 0.6, 0.7]] )
).astype('float32') x = paddle.to_tensor(x_data, 'float32')
x = paddle.to_tensor(x_data, 'float32') out = paddle.sum(x, axis=0)
out = paddle.sum(x, axis=0) out_arr = out.numpy()
out_arr = out.numpy() out_arr_expected = np.sum(x_data, axis=0)
out_arr_expected = np.sum(x_data, axis=0) np.testing.assert_array_equal(out_arr, out_arr_expected)
np.testing.assert_array_equal(out_arr, out_arr_expected)
def test_mm(self): def test_mm(self):
with _test_eager_guard(): np_input = np.random.random([16, 32]).astype('float32')
np_input = np.random.random([16, 32]).astype('float32') np_mat2 = np.random.random([32, 32]).astype('float32')
np_mat2 = np.random.random([32, 32]).astype('float32') input = paddle.to_tensor(np_input)
input = paddle.to_tensor(np_input) mat2 = paddle.to_tensor(np_mat2)
mat2 = paddle.to_tensor(np_mat2) out = paddle.mm(input, mat2)
out = paddle.mm(input, mat2) out_arr = out.numpy()
out_arr = out.numpy() out_arr_expected = np.matmul(np_input, np_mat2)
out_arr_expected = np.matmul(np_input, np_mat2) np.testing.assert_allclose(out_arr, out_arr_expected, rtol=1e-05)
np.testing.assert_allclose(out_arr, out_arr_expected, rtol=1e-05)
def test_sigmoid(self): def test_sigmoid(self):
with _test_eager_guard(): np_x = np.array([-0.4, -0.2, 0.1, 0.3]).astype('float32')
np_x = np.array([-0.4, -0.2, 0.1, 0.3]).astype('float32') x = paddle.to_tensor(np_x)
x = paddle.to_tensor(np_x) out = paddle.nn.functional.sigmoid(x)
out = paddle.nn.functional.sigmoid(x) out_arr = out.numpy()
out_arr = out.numpy() out_arr_expected = np.array(
out_arr_expected = np.array( [0.40131234, 0.450166, 0.52497919, 0.57444252]
[0.40131234, 0.450166, 0.52497919, 0.57444252] ).astype('float32')
).astype('float32') np.testing.assert_allclose(out_arr, out_arr_expected, rtol=1e-05)
np.testing.assert_allclose(out_arr, out_arr_expected, rtol=1e-05)
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -17,7 +17,6 @@ import unittest ...@@ -17,7 +17,6 @@ import unittest
import numpy as np import numpy as np
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.framework import _test_eager_guard
class EagerStringTensorTestCase(unittest.TestCase): class EagerStringTensorTestCase(unittest.TestCase):
...@@ -32,77 +31,69 @@ class EagerStringTensorTestCase(unittest.TestCase): ...@@ -32,77 +31,69 @@ class EagerStringTensorTestCase(unittest.TestCase):
) # From IMDB ) # From IMDB
def test_constructor_with_args(self): def test_constructor_with_args(self):
with _test_eager_guard(): ST1 = core.eager.StringTensor() # constructor 1
ST1 = core.eager.StringTensor() # constructor 1 self.assertEqual(ST1.name, "generated_string_tensor_0")
self.assertEqual(ST1.name, "generated_string_tensor_0") self.assertEqual(ST1.shape, [])
self.assertEqual(ST1.shape, []) self.assertEqual(ST1.numpy(), '')
self.assertEqual(ST1.numpy(), '')
shape = [2, 3]
shape = [2, 3] ST2 = core.eager.StringTensor(shape, "ST2") # constructor 2
ST2 = core.eager.StringTensor(shape, "ST2") # constructor 2 self.assertEqual(ST2.name, "ST2")
self.assertEqual(ST2.name, "ST2") self.assertEqual(ST2.shape, shape)
self.assertEqual(ST2.shape, shape) np.testing.assert_array_equal(
np.testing.assert_array_equal( ST2.numpy(), np.empty(shape, dtype=np.unicode_)
ST2.numpy(), np.empty(shape, dtype=np.unicode_) )
)
ST3 = core.eager.StringTensor(self.str_arr, "ST3") # constructor 3
ST3 = core.eager.StringTensor(self.str_arr, "ST3") # constructor 3 self.assertEqual(ST3.name, "ST3")
self.assertEqual(ST3.name, "ST3") self.assertEqual(ST3.shape, list(self.str_arr.shape))
self.assertEqual(ST3.shape, list(self.str_arr.shape)) np.testing.assert_array_equal(ST3.numpy(), self.str_arr)
np.testing.assert_array_equal(ST3.numpy(), self.str_arr)
ST4 = core.eager.StringTensor(self.str_arr) # constructor 4
ST4 = core.eager.StringTensor(self.str_arr) # constructor 4 self.assertEqual(ST4.name, "generated_string_tensor_1")
self.assertEqual(ST4.name, "generated_string_tensor_1") self.assertEqual(ST4.shape, list(self.str_arr.shape))
self.assertEqual(ST4.shape, list(self.str_arr.shape)) np.testing.assert_array_equal(ST4.numpy(), self.str_arr)
np.testing.assert_array_equal(ST4.numpy(), self.str_arr)
ST5 = core.eager.StringTensor(ST4) # constructor 5
ST5 = core.eager.StringTensor(ST4) # constructor 5 self.assertEqual(ST5.name, "generated_string_tensor_2")
self.assertEqual(ST5.name, "generated_string_tensor_2") self.assertEqual(ST5.shape, list(self.str_arr.shape))
self.assertEqual(ST5.shape, list(self.str_arr.shape)) np.testing.assert_array_equal(ST5.numpy(), self.str_arr)
np.testing.assert_array_equal(ST5.numpy(), self.str_arr)
ST6 = core.eager.StringTensor(ST5, "ST6") # constructor 6
ST6 = core.eager.StringTensor(ST5, "ST6") # constructor 6 self.assertEqual(ST6.name, "ST6")
self.assertEqual(ST6.name, "ST6") self.assertEqual(ST6.shape, list(self.str_arr.shape))
self.assertEqual(ST6.shape, list(self.str_arr.shape)) np.testing.assert_array_equal(ST6.numpy(), self.str_arr)
np.testing.assert_array_equal(ST6.numpy(), self.str_arr)
for st in [ST1, ST2, ST3, ST4, ST5, ST6]:
for st in [ST1, ST2, ST3, ST4, ST5, ST6]: # All StringTensors are on cpu place so far.
# All StringTensors are on cpu place so far. self.assertTrue(st.place._equals(core.CPUPlace()))
self.assertTrue(st.place._equals(core.CPUPlace()))
def test_constructor_with_kwargs(self): def test_constructor_with_kwargs(self):
with _test_eager_guard(): shape = [2, 3]
shape = [2, 3] ST1 = core.eager.StringTensor(dims=shape, name="ST1") # constructor 2
ST1 = core.eager.StringTensor( self.assertEqual(ST1.name, "ST1")
dims=shape, name="ST1" self.assertEqual(ST1.shape, shape)
) # constructor 2 np.testing.assert_array_equal(
self.assertEqual(ST1.name, "ST1") ST1.numpy(), np.empty(shape, dtype=np.unicode_)
self.assertEqual(ST1.shape, shape) )
np.testing.assert_array_equal(
ST1.numpy(), np.empty(shape, dtype=np.unicode_) ST2 = core.eager.StringTensor(self.str_arr, name="ST2") # constructor 3
) self.assertEqual(ST2.name, "ST2")
self.assertEqual(ST2.shape, list(self.str_arr.shape))
ST2 = core.eager.StringTensor( np.testing.assert_array_equal(ST2.numpy(), self.str_arr)
self.str_arr, name="ST2"
) # constructor 3 ST3 = core.eager.StringTensor(ST2, name="ST3") # constructor 6
self.assertEqual(ST2.name, "ST2") self.assertEqual(ST3.name, "ST3")
self.assertEqual(ST2.shape, list(self.str_arr.shape)) self.assertEqual(ST3.shape, list(self.str_arr.shape))
np.testing.assert_array_equal(ST2.numpy(), self.str_arr) np.testing.assert_array_equal(ST3.numpy(), self.str_arr)
ST3 = core.eager.StringTensor(ST2, name="ST3") # constructor 6 ST4 = core.eager.StringTensor(value=ST2, name="ST4") # constructor 6
self.assertEqual(ST3.name, "ST3") self.assertEqual(ST4.name, "ST4")
self.assertEqual(ST3.shape, list(self.str_arr.shape)) self.assertEqual(ST4.shape, list(self.str_arr.shape))
np.testing.assert_array_equal(ST3.numpy(), self.str_arr) np.testing.assert_array_equal(ST4.numpy(), self.str_arr)
for st in [ST1, ST2, ST3, ST4]:
ST4 = core.eager.StringTensor( # All StringTensors are on cpu place so far.
value=ST2, name="ST4" self.assertTrue(st.place._equals(core.CPUPlace()))
) # constructor 6
self.assertEqual(ST4.name, "ST4")
self.assertEqual(ST4.shape, list(self.str_arr.shape))
np.testing.assert_array_equal(ST4.numpy(), self.str_arr)
for st in [ST1, ST2, ST3, ST4]:
# All StringTensors are on cpu place so far.
self.assertTrue(st.place._equals(core.CPUPlace()))
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -19,7 +19,6 @@ import numpy as np ...@@ -19,7 +19,6 @@ import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.framework import _test_eager_guard
from paddle.fluid.tests.unittests.op_test import ( from paddle.fluid.tests.unittests.op_test import (
OpTest, OpTest,
convert_float_to_uint16, convert_float_to_uint16,
...@@ -684,7 +683,7 @@ class TestBoolAddFloatElementwiseAddop(unittest.TestCase): ...@@ -684,7 +683,7 @@ class TestBoolAddFloatElementwiseAddop(unittest.TestCase):
self.assertTrue(c.dtype == core.VarDesc.VarType.FP32) self.assertTrue(c.dtype == core.VarDesc.VarType.FP32)
paddle.enable_static() paddle.enable_static()
def func_dygraph_add(self): def test_dygraph_add(self):
paddle.disable_static() paddle.disable_static()
a = 1.5 a = 1.5
b = paddle.full([2], True, dtype='bool') b = paddle.full([2], True, dtype='bool')
...@@ -715,14 +714,9 @@ class TestBoolAddFloatElementwiseAddop(unittest.TestCase): ...@@ -715,14 +714,9 @@ class TestBoolAddFloatElementwiseAddop(unittest.TestCase):
paddle.enable_static() paddle.enable_static()
def test_dygraph_add(self):
with _test_eager_guard():
self.func_dygraph_add()
self.func_dygraph_add()
class TestElementwiseAddop1(unittest.TestCase): class TestElementwiseAddop1(unittest.TestCase):
def func_dygraph_add(self): def test_dygraph_add(self):
paddle.disable_static() paddle.disable_static()
np_a = np.random.random((2, 3, 4)).astype(np.float32) np_a = np.random.random((2, 3, 4)).astype(np.float32)
...@@ -742,10 +736,6 @@ class TestElementwiseAddop1(unittest.TestCase): ...@@ -742,10 +736,6 @@ class TestElementwiseAddop1(unittest.TestCase):
paddle.enable_static() paddle.enable_static()
def test_dygraph_add(self):
with _test_eager_guard():
self.func_dygraph_add()
if __name__ == '__main__': if __name__ == '__main__':
paddle.enable_static() paddle.enable_static()
......
...@@ -20,7 +20,6 @@ from op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci ...@@ -20,7 +20,6 @@ from op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci
import paddle import paddle
from paddle import fluid from paddle import fluid
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid.framework import _test_eager_guard
class ElementwiseDivOp(OpTest): class ElementwiseDivOp(OpTest):
...@@ -440,7 +439,7 @@ class TestRealComplexElementwiseDivOp(TestComplexElementwiseDivOp): ...@@ -440,7 +439,7 @@ class TestRealComplexElementwiseDivOp(TestComplexElementwiseDivOp):
class TestElementwiseDivop(unittest.TestCase): class TestElementwiseDivop(unittest.TestCase):
def func_dygraph_div(self): def test_dygraph_div(self):
paddle.disable_static() paddle.disable_static()
np_a = np.random.random((2, 3, 4)).astype(np.float32) np_a = np.random.random((2, 3, 4)).astype(np.float32)
...@@ -462,10 +461,6 @@ class TestElementwiseDivop(unittest.TestCase): ...@@ -462,10 +461,6 @@ class TestElementwiseDivop(unittest.TestCase):
paddle.enable_static() paddle.enable_static()
def test_dygraph_div(self):
with _test_eager_guard():
self.func_dygraph_div()
if __name__ == '__main__': if __name__ == '__main__':
paddle.enable_static() paddle.enable_static()
......
...@@ -18,7 +18,6 @@ import numpy as np ...@@ -18,7 +18,6 @@ import numpy as np
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.framework import _test_eager_guard
from paddle.fluid.tests.unittests.op_test import ( from paddle.fluid.tests.unittests.op_test import (
OpTest, OpTest,
convert_float_to_uint16, convert_float_to_uint16,
...@@ -367,7 +366,7 @@ class TestRealComplexElementwiseMulOp(TestComplexElementwiseMulOp): ...@@ -367,7 +366,7 @@ class TestRealComplexElementwiseMulOp(TestComplexElementwiseMulOp):
class TestElementwiseMulop(unittest.TestCase): class TestElementwiseMulop(unittest.TestCase):
def func_dygraph_mul(self): def test_dygraph_mul(self):
paddle.disable_static() paddle.disable_static()
np_a = np.random.random((2, 3, 4)).astype(np.float32) np_a = np.random.random((2, 3, 4)).astype(np.float32)
...@@ -387,10 +386,6 @@ class TestElementwiseMulop(unittest.TestCase): ...@@ -387,10 +386,6 @@ class TestElementwiseMulop(unittest.TestCase):
paddle.enable_static() paddle.enable_static()
def test_dygraph_mul(self):
with _test_eager_guard():
self.func_dygraph_mul()
if __name__ == '__main__': if __name__ == '__main__':
paddle.enable_static() paddle.enable_static()
......
...@@ -19,7 +19,6 @@ from op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci ...@@ -19,7 +19,6 @@ from op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.framework import _test_eager_guard
class TestElementwiseOp(OpTest): class TestElementwiseOp(OpTest):
...@@ -414,7 +413,7 @@ class TestSubtractInplaceBroadcastError3(TestSubtractInplaceBroadcastError): ...@@ -414,7 +413,7 @@ class TestSubtractInplaceBroadcastError3(TestSubtractInplaceBroadcastError):
class TestFloatElementwiseSubop(unittest.TestCase): class TestFloatElementwiseSubop(unittest.TestCase):
def func_dygraph_sub(self): def test_dygraph_sub(self):
paddle.disable_static() paddle.disable_static()
np_a = np.random.random((2, 3, 4)).astype(np.float64) np_a = np.random.random((2, 3, 4)).astype(np.float64)
...@@ -446,14 +445,9 @@ class TestFloatElementwiseSubop(unittest.TestCase): ...@@ -446,14 +445,9 @@ class TestFloatElementwiseSubop(unittest.TestCase):
paddle.enable_static() paddle.enable_static()
def test_dygraph_sub(self):
with _test_eager_guard():
self.func_dygraph_sub()
self.func_dygraph_sub()
class TestFloatElementwiseSubop1(unittest.TestCase): class TestFloatElementwiseSubop1(unittest.TestCase):
def func_dygraph_sub(self): def test_dygraph_sub(self):
paddle.disable_static() paddle.disable_static()
np_a = np.random.random((2, 3, 4)).astype(np.float32) np_a = np.random.random((2, 3, 4)).astype(np.float32)
...@@ -477,10 +471,6 @@ class TestFloatElementwiseSubop1(unittest.TestCase): ...@@ -477,10 +471,6 @@ class TestFloatElementwiseSubop1(unittest.TestCase):
paddle.enable_static() paddle.enable_static()
def test_dygraph_sub(self):
with _test_eager_guard():
self.func_dygraph_sub()
if __name__ == '__main__': if __name__ == '__main__':
paddle.enable_static() paddle.enable_static()
......
...@@ -23,7 +23,6 @@ import paddle ...@@ -23,7 +23,6 @@ import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.layers as layers import paddle.fluid.layers as layers
from paddle.fluid import Program, core, program_guard from paddle.fluid import Program, core, program_guard
from paddle.fluid.framework import _test_eager_guard
# Situation 1: shape is a list(without tensor) # Situation 1: shape is a list(without tensor)
...@@ -256,26 +255,12 @@ class TestExpandInferShape(unittest.TestCase): ...@@ -256,26 +255,12 @@ class TestExpandInferShape(unittest.TestCase):
class TestExpandV2DygraphAPI(unittest.TestCase): class TestExpandV2DygraphAPI(unittest.TestCase):
def test_expand_times_is_tensor(self): def test_expand_times_is_tensor(self):
with paddle.fluid.dygraph.guard(): with paddle.fluid.dygraph.guard():
with _test_eager_guard():
paddle.seed(1)
a = paddle.rand([2, 5])
egr_expand_1 = paddle.expand(a, shape=[2, 5])
np_array = np.array([2, 5])
egr_expand_2 = paddle.expand(a, shape=np_array)
paddle.seed(1) paddle.seed(1)
a = paddle.rand([2, 5]) a = paddle.rand([2, 5])
expand_1 = paddle.expand(a, shape=[2, 5]) expand_1 = paddle.expand(a, shape=[2, 5])
np_array = np.array([2, 5]) np_array = np.array([2, 5])
expand_2 = paddle.expand(a, shape=np_array) expand_2 = paddle.expand(a, shape=np_array)
np.testing.assert_array_equal(
egr_expand_1.numpy(), egr_expand_2.numpy()
)
np.testing.assert_array_equal(expand_1.numpy(), expand_2.numpy()) np.testing.assert_array_equal(expand_1.numpy(), expand_2.numpy())
np.testing.assert_array_equal(
expand_1.numpy(), egr_expand_1.numpy()
)
class TestExpandDoubleGradCheck(unittest.TestCase): class TestExpandDoubleGradCheck(unittest.TestCase):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册