未验证 提交 6d0fa6f2 编写于 作者: W wanghuancoder 提交者: GitHub

Del old dygraph optest7 (#51999)

* delete old dygraph op test
上级 789aac8a
...@@ -28,6 +28,8 @@ ...@@ -28,6 +28,8 @@
// need to manually specify them in this map. // need to manually specify them in this map.
std::map<std::string, std::set<std::string>> op_ins_map = { std::map<std::string, std::set<std::string>> op_ins_map = {
{"fc", {"Input", "W", "Bias"}}, {"fc", {"Input", "W", "Bias"}},
{"precision_recall",
{"MaxProbs", "Indices", "Labels", "Weights", "StatesInfo"}},
{"layer_norm", {"X", "Scale", "Bias"}}, {"layer_norm", {"X", "Scale", "Bias"}},
{"conv2d_fusion_cutlass", {"Input", "Filter", "Bias", "ResidualData"}}, {"conv2d_fusion_cutlass", {"Input", "Filter", "Bias", "ResidualData"}},
{"conv2d_fusion", {"Input", "Filter", "Bias", "ResidualData"}}, {"conv2d_fusion", {"Input", "Filter", "Bias", "ResidualData"}},
...@@ -279,6 +281,7 @@ std::map<std::string, std::set<std::string>> op_ins_map = { ...@@ -279,6 +281,7 @@ std::map<std::string, std::set<std::string>> op_ins_map = {
// functions. While, for very few OPs, the dispensable outputs are used, we // functions. While, for very few OPs, the dispensable outputs are used, we
// need to manually specify them in this map. // need to manually specify them in this map.
std::map<std::string, std::set<std::string>> op_outs_map = { std::map<std::string, std::set<std::string>> op_outs_map = {
{"rank_attention", {"InputHelp", "Out", "InsRank"}},
{"fake_quantize_dequantize_moving_average_abs_max", {"fake_quantize_dequantize_moving_average_abs_max",
{"Out", "OutScale", "OutAccum", "OutState"}}, {"Out", "OutScale", "OutAccum", "OutState"}},
{"batch_norm", {"batch_norm",
......
...@@ -163,7 +163,7 @@ class TestFFTC2COp(OpTest): ...@@ -163,7 +163,7 @@ class TestFFTC2COp(OpTest):
) )
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(
...@@ -171,7 +171,6 @@ class TestFFTC2COp(OpTest): ...@@ -171,7 +171,6 @@ class TestFFTC2COp(OpTest):
"Out", "Out",
user_defined_grads=[self.x_grad], user_defined_grads=[self.x_grad],
user_defined_grad_outputs=[self.out_grad], user_defined_grad_outputs=[self.out_grad],
check_eager=True,
) )
...@@ -260,7 +259,7 @@ class TestFFTC2ROp(OpTest): ...@@ -260,7 +259,7 @@ class TestFFTC2ROp(OpTest):
) )
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(
...@@ -268,7 +267,6 @@ class TestFFTC2ROp(OpTest): ...@@ -268,7 +267,6 @@ class TestFFTC2ROp(OpTest):
"Out", "Out",
user_defined_grads=[self.x_grad], user_defined_grads=[self.x_grad],
user_defined_grad_outputs=[self.out_grad], user_defined_grad_outputs=[self.out_grad],
check_eager=True,
) )
...@@ -345,7 +343,7 @@ class TestFFTR2COp(OpTest): ...@@ -345,7 +343,7 @@ class TestFFTR2COp(OpTest):
) )
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(
...@@ -353,5 +351,4 @@ class TestFFTR2COp(OpTest): ...@@ -353,5 +351,4 @@ class TestFFTR2COp(OpTest):
"Out", "Out",
user_defined_grads=[self.x_grad], user_defined_grads=[self.x_grad],
user_defined_grad_outputs=[self.out_grad], user_defined_grad_outputs=[self.out_grad],
check_eager=True,
) )
...@@ -169,6 +169,8 @@ class OpTestUtils: ...@@ -169,6 +169,8 @@ class OpTestUtils:
for idx, arg_name in enumerate(api_params): for idx, arg_name in enumerate(api_params):
if arg_name in api_ignore_param_list: if arg_name in api_ignore_param_list:
results.append(get_default(idx, api_defaults)) results.append(get_default(idx, api_defaults))
if idx_of_op_proto_arguments < len(input_arguments):
idx_of_op_proto_arguments += 1
else: else:
if idx_of_op_proto_arguments < len(input_arguments): if idx_of_op_proto_arguments < len(input_arguments):
tmp = input_arguments[idx_of_op_proto_arguments] tmp = input_arguments[idx_of_op_proto_arguments]
......
...@@ -15,12 +15,12 @@ ...@@ -15,12 +15,12 @@
import unittest import unittest
import numpy as np import numpy as np
from eager_op_test import OpTest from eager_op_test import OpTest, paddle_static_guard
import paddle import paddle
from paddle import fluid from paddle import fluid
from paddle.fluid import Program, core, program_guard from paddle.fluid import Program, core, program_guard
from paddle.fluid.tests.unittests.op_test import convert_float_to_uint16 from paddle.fluid.tests.unittests.eager_op_test import convert_float_to_uint16
def accuracy_wrapper(infer, indices, label): def accuracy_wrapper(infer, indices, label):
...@@ -109,31 +109,30 @@ class TestAccuracyOpBf16(OpTest): ...@@ -109,31 +109,30 @@ class TestAccuracyOpBf16(OpTest):
class TestAccuracyOpError(unittest.TestCase): class TestAccuracyOpError(unittest.TestCase):
def test_type_errors(self): def test_type_errors(self):
with program_guard(Program(), Program()): with paddle_static_guard():
# The input type of accuracy_op must be Variable. with program_guard(Program(), Program()):
x1 = fluid.create_lod_tensor( # The input type of accuracy_op must be Variable.
np.array([[-1]]), [[1]], fluid.CPUPlace() x1 = fluid.create_lod_tensor(
) np.array([[-1]]), [[1]], fluid.CPUPlace()
label = paddle.static.data( )
name='label', shape=[-1, 1], dtype="int32" label = paddle.static.data(
) name='label', shape=[-1, 1], dtype="int32"
self.assertRaises(TypeError, paddle.static.accuracy, x1, label) )
self.assertRaises(TypeError, paddle.metric.accuracy, x1, label) self.assertRaises(TypeError, paddle.static.accuracy, x1, label)
# The input dtype of accuracy_op must be float32 or float64. self.assertRaises(TypeError, paddle.metric.accuracy, x1, label)
x2 = paddle.static.data(name='x2', shape=[-1, 4], dtype="int32") # The input dtype of accuracy_op must be float32 or float64.
self.assertRaises(TypeError, paddle.static.accuracy, x2, label) x2 = paddle.static.data(name='x2', shape=[-1, 4], dtype="int32")
self.assertRaises(TypeError, paddle.metric.accuracy, x2, label) self.assertRaises(TypeError, paddle.static.accuracy, x2, label)
self.assertRaises(TypeError, paddle.metric.accuracy, x2, label)
x3 = paddle.static.data(
name='input', shape=[-1, 2], dtype="float16" x3 = paddle.static.data(
) name='input', shape=[-1, 2], dtype="float16"
paddle.static.accuracy(input=x3, label=label) )
paddle.metric.accuracy(input=x3, label=label) paddle.static.accuracy(input=x3, label=label)
paddle.metric.accuracy(input=x3, label=label)
def test_value_errors(self): def test_value_errors(self):
with program_guard(Program(), Program()): with program_guard(Program(), Program()):
paddle.disable_static()
# The input rank of accuracy_op must be 2. # The input rank of accuracy_op must be 2.
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
x3 = paddle.to_tensor([0.1], dtype='float32') x3 = paddle.to_tensor([0.1], dtype='float32')
...@@ -142,37 +141,37 @@ class TestAccuracyOpError(unittest.TestCase): ...@@ -142,37 +141,37 @@ class TestAccuracyOpError(unittest.TestCase):
) )
paddle.metric.accuracy(x3, label3) paddle.metric.accuracy(x3, label3)
paddle.enable_static()
class TestAccuracyAPI1(unittest.TestCase): class TestAccuracyAPI1(unittest.TestCase):
def setUp(self): def setUp(self):
self.predictions = paddle.static.data( with paddle_static_guard():
shape=[2, 5], name="predictions", dtype="float32" self.predictions = paddle.static.data(
) shape=[2, 5], name="predictions", dtype="float32"
self.label = paddle.static.data( )
shape=[2, 1], name="labels", dtype="int64" self.label = paddle.static.data(
) shape=[2, 1], name="labels", dtype="int64"
self.result = paddle.static.accuracy( )
input=self.predictions, label=self.label, k=1 self.result = paddle.static.accuracy(
) input=self.predictions, label=self.label, k=1
self.input_predictions = np.array( )
[[0.2, 0.1, 0.4, 0.1, 0.1], [0.2, 0.3, 0.1, 0.15, 0.25]], self.input_predictions = np.array(
dtype="float32", [[0.2, 0.1, 0.4, 0.1, 0.1], [0.2, 0.3, 0.1, 0.15, 0.25]],
) dtype="float32",
self.input_labels = np.array([[2], [0]], dtype="int64") )
self.expect_value = np.array([0.5], dtype='float32') self.input_labels = np.array([[2], [0]], dtype="int64")
self.expect_value = np.array([0.5], dtype='float32')
def test_api(self): def test_api(self):
exe = paddle.static.Executor() with paddle_static_guard():
(result,) = exe.run( exe = paddle.static.Executor()
feed={ (result,) = exe.run(
"predictions": self.input_predictions, feed={
'labels': self.input_labels, "predictions": self.input_predictions,
}, 'labels': self.input_labels,
fetch_list=[self.result.name], },
) fetch_list=[self.result.name],
self.assertEqual((result == self.expect_value).all(), True) )
self.assertEqual((result == self.expect_value).all(), True)
class TestAccuracyAPI2(unittest.TestCase): class TestAccuracyAPI2(unittest.TestCase):
......
...@@ -14,9 +14,9 @@ ...@@ -14,9 +14,9 @@
import unittest import unittest
import eager_op_test
import gradient_checker import gradient_checker
import numpy as np import numpy as np
import op_test
from decorator_helper import prog_scope from decorator_helper import prog_scope
import paddle import paddle
...@@ -25,7 +25,7 @@ from paddle.fluid import Program, core, program_guard ...@@ -25,7 +25,7 @@ from paddle.fluid import Program, core, program_guard
from paddle.fluid.backward import append_backward from paddle.fluid.backward import append_backward
class TestAssignOp(op_test.OpTest): class TestAssignOp(eager_op_test.OpTest):
def setUp(self): def setUp(self):
self.python_api = paddle.assign self.python_api = paddle.assign
self.public_python_api = paddle.assign self.public_python_api = paddle.assign
...@@ -38,16 +38,16 @@ class TestAssignOp(op_test.OpTest): ...@@ -38,16 +38,16 @@ class TestAssignOp(op_test.OpTest):
def test_forward(self): def test_forward(self):
paddle.enable_static() paddle.enable_static()
self.check_output(check_eager=True) self.check_output()
paddle.disable_static() paddle.disable_static()
def test_backward(self): def test_backward(self):
paddle.enable_static() paddle.enable_static()
self.check_grad(['X'], 'Out', check_eager=True, check_prim=True) self.check_grad(['X'], 'Out', check_prim=True)
paddle.disable_static() paddle.disable_static()
class TestAssignFP16Op(op_test.OpTest): class TestAssignFP16Op(eager_op_test.OpTest):
def setUp(self): def setUp(self):
self.python_api = paddle.assign self.python_api = paddle.assign
self.public_python_api = paddle.assign self.public_python_api = paddle.assign
...@@ -60,12 +60,12 @@ class TestAssignFP16Op(op_test.OpTest): ...@@ -60,12 +60,12 @@ class TestAssignFP16Op(op_test.OpTest):
def test_forward(self): def test_forward(self):
paddle.enable_static() paddle.enable_static()
self.check_output(check_eager=True) self.check_output()
paddle.disable_static() paddle.disable_static()
def test_backward(self): def test_backward(self):
paddle.enable_static() paddle.enable_static()
self.check_grad(['X'], 'Out', check_eager=True, check_prim=True) self.check_grad(['X'], 'Out', check_prim=True)
paddle.disable_static() paddle.disable_static()
......
...@@ -14,8 +14,8 @@ ...@@ -14,8 +14,8 @@
import unittest import unittest
import eager_op_test
import numpy as np import numpy as np
import op_test
import paddle import paddle
from paddle.distributed.models.moe import utils from paddle.distributed.models.moe import utils
...@@ -72,7 +72,7 @@ def get_redefined_allclose(cum_count): ...@@ -72,7 +72,7 @@ def get_redefined_allclose(cum_count):
@unittest.skipIf( @unittest.skipIf(
not core.is_compiled_with_cuda(), "core is not compiled with CUDA" not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
) )
class TestAssignPosOpInt64(op_test.OpTest): class TestAssignPosOpInt64(eager_op_test.OpTest):
def setUp(self): def setUp(self):
x = np.random.randint(0, 16, size=(100, 2)).astype("int64") x = np.random.randint(0, 16, size=(100, 2)).astype("int64")
y = count(x, 16) y = count(x, 16)
......
...@@ -14,19 +14,27 @@ ...@@ -14,19 +14,27 @@
import unittest import unittest
import eager_op_test
import numpy as np import numpy as np
import op_test
import paddle import paddle
from paddle import fluid from paddle import fluid
from paddle.fluid import framework from paddle.fluid import framework
paddle.enable_static()
def assign_value_wrapper(
shape=[], dtype=fluid.core.VarDesc.VarType.FP32, values=0.0
):
tensor = paddle.Tensor()
return paddle._C_ops.assign_value_(
tensor, shape, dtype, values, framework._current_expected_place()
)
class TestAssignValueOp(op_test.OpTest):
class TestAssignValueOp(eager_op_test.OpTest):
def setUp(self): def setUp(self):
self.op_type = "assign_value" self.op_type = "assign_value"
self.python_api = assign_value_wrapper
self.inputs = {} self.inputs = {}
self.attrs = {} self.attrs = {}
self.init_data() self.init_data()
...@@ -66,29 +74,31 @@ class TestAssignValueOp4(TestAssignValueOp): ...@@ -66,29 +74,31 @@ class TestAssignValueOp4(TestAssignValueOp):
class TestAssignApi(unittest.TestCase): class TestAssignApi(unittest.TestCase):
def setUp(self): def setUp(self):
self.init_dtype() with eager_op_test.paddle_static_guard():
self.value = (-100 + 200 * np.random.random(size=(2, 5))).astype( self.init_dtype()
self.dtype self.value = (-100 + 200 * np.random.random(size=(2, 5))).astype(
) self.dtype
self.place = ( )
fluid.CUDAPlace(0) self.place = (
if fluid.is_compiled_with_cuda() fluid.CUDAPlace(0)
else fluid.CPUPlace() if fluid.is_compiled_with_cuda()
) else fluid.CPUPlace()
)
def init_dtype(self): def init_dtype(self):
self.dtype = "float32" self.dtype = "float32"
def test_assign(self): def test_assign(self):
main_program = fluid.Program() with eager_op_test.paddle_static_guard():
with fluid.program_guard(main_program): main_program = fluid.Program()
x = paddle.tensor.create_tensor(dtype=self.dtype) with fluid.program_guard(main_program):
paddle.assign(self.value, output=x) x = paddle.tensor.create_tensor(dtype=self.dtype)
paddle.assign(self.value, output=x)
exe = fluid.Executor(self.place) exe = fluid.Executor(self.place)
[fetched_x] = exe.run(main_program, feed={}, fetch_list=[x]) [fetched_x] = exe.run(main_program, feed={}, fetch_list=[x])
np.testing.assert_array_equal(fetched_x, self.value) np.testing.assert_array_equal(fetched_x, self.value)
self.assertEqual(fetched_x.dtype, self.value.dtype) self.assertEqual(fetched_x.dtype, self.value.dtype)
class TestAssignApi2(TestAssignApi): class TestAssignApi2(TestAssignApi):
...@@ -103,15 +113,16 @@ class TestAssignApi3(TestAssignApi): ...@@ -103,15 +113,16 @@ class TestAssignApi3(TestAssignApi):
class TestAssignApi4(TestAssignApi): class TestAssignApi4(TestAssignApi):
def setUp(self): def setUp(self):
self.init_dtype() with eager_op_test.paddle_static_guard():
self.value = np.random.choice(a=[False, True], size=(2, 5)).astype( self.init_dtype()
np.bool_ self.value = np.random.choice(a=[False, True], size=(2, 5)).astype(
) np.bool_
self.place = ( )
fluid.CUDAPlace(0) self.place = (
if fluid.is_compiled_with_cuda() fluid.CUDAPlace(0)
else fluid.CPUPlace() if fluid.is_compiled_with_cuda()
) else fluid.CPUPlace()
)
def init_dtype(self): def init_dtype(self):
self.dtype = "bool" self.dtype = "bool"
......
...@@ -14,9 +14,9 @@ ...@@ -14,9 +14,9 @@
import unittest import unittest
import eager_op_test
import numpy import numpy
import numpy as np import numpy as np
import op_test
import paddle import paddle
from paddle import fluid from paddle import fluid
...@@ -24,7 +24,7 @@ from paddle.fluid import Program, core, program_guard ...@@ -24,7 +24,7 @@ from paddle.fluid import Program, core, program_guard
def create_test_class(op_type, typename, callback): def create_test_class(op_type, typename, callback):
class Cls(op_test.OpTest): class Cls(eager_op_test.OpTest):
def setUp(self): def setUp(self):
a = numpy.random.random(size=(10, 7)).astype(typename) a = numpy.random.random(size=(10, 7)).astype(typename)
b = numpy.random.random(size=(10, 7)).astype(typename) b = numpy.random.random(size=(10, 7)).astype(typename)
...@@ -444,7 +444,7 @@ create_paddle_case('not_equal', lambda _a, _b: _a != _b) ...@@ -444,7 +444,7 @@ create_paddle_case('not_equal', lambda _a, _b: _a != _b)
# add bf16 tests # add bf16 tests
def create_bf16_case(op_type, callback): def create_bf16_case(op_type, callback):
class TestCompareOpBF16Op(op_test.OpTest): class TestCompareOpBF16Op(eager_op_test.OpTest):
def setUp(self): def setUp(self):
self.op_type = op_type self.op_type = op_type
self.dtype = np.uint16 self.dtype = np.uint16
...@@ -454,8 +454,8 @@ def create_bf16_case(op_type, callback): ...@@ -454,8 +454,8 @@ def create_bf16_case(op_type, callback):
y = np.random.uniform(0, 1, [5, 5]).astype(np.float32) y = np.random.uniform(0, 1, [5, 5]).astype(np.float32)
real_result = callback(x, y) real_result = callback(x, y)
self.inputs = { self.inputs = {
'X': op_test.convert_float_to_uint16(x), 'X': eager_op_test.convert_float_to_uint16(x),
'Y': op_test.convert_float_to_uint16(y), 'Y': eager_op_test.convert_float_to_uint16(y),
} }
self.outputs = {'Out': real_result} self.outputs = {'Out': real_result}
......
...@@ -14,14 +14,14 @@ ...@@ -14,14 +14,14 @@
import unittest import unittest
import eager_op_test
import numpy as np import numpy as np
import op_test
import paddle import paddle
def create_test_not_equal_class(op_type, typename, callback): def create_test_not_equal_class(op_type, typename, callback):
class Cls(op_test.OpTest): class Cls(eager_op_test.OpTest):
def setUp(self): def setUp(self):
x = np.random.random(size=(10, 7)).astype(typename) x = np.random.random(size=(10, 7)).astype(typename)
y = np.random.random(size=(10, 7)).astype(typename) y = np.random.random(size=(10, 7)).astype(typename)
...@@ -40,7 +40,7 @@ def create_test_not_equal_class(op_type, typename, callback): ...@@ -40,7 +40,7 @@ def create_test_not_equal_class(op_type, typename, callback):
def create_test_not_shape_equal_class(op_type, typename, callback): def create_test_not_shape_equal_class(op_type, typename, callback):
class Cls(op_test.OpTest): class Cls(eager_op_test.OpTest):
def setUp(self): def setUp(self):
x = np.random.random(size=(10, 7)).astype(typename) x = np.random.random(size=(10, 7)).astype(typename)
y = np.random.random(size=(10)).astype(typename) y = np.random.random(size=(10)).astype(typename)
...@@ -59,7 +59,7 @@ def create_test_not_shape_equal_class(op_type, typename, callback): ...@@ -59,7 +59,7 @@ def create_test_not_shape_equal_class(op_type, typename, callback):
def create_test_equal_class(op_type, typename, callback): def create_test_equal_class(op_type, typename, callback):
class Cls(op_test.OpTest): class Cls(eager_op_test.OpTest):
def setUp(self): def setUp(self):
x = y = np.random.random(size=(10, 7)).astype(typename) x = y = np.random.random(size=(10, 7)).astype(typename)
z = callback(x, y) z = callback(x, y)
...@@ -77,7 +77,7 @@ def create_test_equal_class(op_type, typename, callback): ...@@ -77,7 +77,7 @@ def create_test_equal_class(op_type, typename, callback):
def create_test_dim1_class(op_type, typename, callback): def create_test_dim1_class(op_type, typename, callback):
class Cls(op_test.OpTest): class Cls(eager_op_test.OpTest):
def setUp(self): def setUp(self):
x = y = np.random.random(size=(1)).astype(typename) x = y = np.random.random(size=(1)).astype(typename)
x = np.array([True, False, True]).astype(typename) x = np.array([True, False, True]).astype(typename)
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
from paddle import static from paddle import static
...@@ -67,7 +67,7 @@ class TestComplexOp(OpTest): ...@@ -67,7 +67,7 @@ class TestComplexOp(OpTest):
self.outputs = {'Out': out_ref} self.outputs = {'Out': out_ref}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
dout = self.out_grad dout = self.out_grad
...@@ -79,7 +79,6 @@ class TestComplexOp(OpTest): ...@@ -79,7 +79,6 @@ class TestComplexOp(OpTest):
'Out', 'Out',
user_defined_grads=[dx, dy], user_defined_grads=[dx, dy],
user_defined_grad_outputs=[dout], user_defined_grad_outputs=[dout],
check_eager=True,
) )
def test_check_grad_ignore_x(self): def test_check_grad_ignore_x(self):
...@@ -95,7 +94,6 @@ class TestComplexOp(OpTest): ...@@ -95,7 +94,6 @@ class TestComplexOp(OpTest):
no_grad_set=set('X'), no_grad_set=set('X'),
user_defined_grads=[dy], user_defined_grads=[dy],
user_defined_grad_outputs=[dout], user_defined_grad_outputs=[dout],
check_eager=True,
) )
def test_check_grad_ignore_y(self): def test_check_grad_ignore_y(self):
...@@ -109,7 +107,6 @@ class TestComplexOp(OpTest): ...@@ -109,7 +107,6 @@ class TestComplexOp(OpTest):
no_grad_set=set('Y'), no_grad_set=set('Y'),
user_defined_grads=[dx], user_defined_grads=[dx],
user_defined_grad_outputs=[dout], user_defined_grad_outputs=[dout],
check_eager=True,
) )
......
...@@ -17,7 +17,7 @@ import unittest ...@@ -17,7 +17,7 @@ import unittest
import gradient_checker import gradient_checker
import numpy as np import numpy as np
from decorator_helper import prog_scope from decorator_helper import prog_scope
from op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci from eager_op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci
import paddle import paddle
from paddle import fluid from paddle import fluid
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
from test_conv2d_op import conv2d_forward_naive from test_conv2d_op import conv2d_forward_naive
from paddle.fluid import core from paddle.fluid import core
...@@ -60,7 +60,9 @@ def create_test_cudnn_channel_last_class(parent): ...@@ -60,7 +60,9 @@ def create_test_cudnn_channel_last_class(parent):
print(self.attrs) print(self.attrs)
if self.has_cuda(): if self.has_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, atol=1e-5) self.check_output_with_place(
place, atol=1e-5, check_dygraph=False
)
cls_name = "{0}_{1}".format(parent.__name__, "CudnnChannelLast") cls_name = "{0}_{1}".format(parent.__name__, "CudnnChannelLast")
TestCudnnChannelLastCase.__name__ = cls_name TestCudnnChannelLastCase.__name__ = cls_name
...@@ -161,7 +163,7 @@ class TestConv2DFusionOp(OpTest): ...@@ -161,7 +163,7 @@ class TestConv2DFusionOp(OpTest):
def test_check_output(self): def test_check_output(self):
if self.has_cuda(): if self.has_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, atol=1e-5) self.check_output_with_place(place, atol=1e-5, check_dygraph=False)
def init_test_case(self): def init_test_case(self):
self.pad = [0, 0] self.pad = [0, 0]
......
...@@ -19,7 +19,7 @@ import numpy as np ...@@ -19,7 +19,7 @@ import numpy as np
import paddle import paddle
from paddle import fluid from paddle import fluid
from paddle.fluid import Program, core, program_guard from paddle.fluid import Program, core, program_guard
from paddle.fluid.tests.unittests.op_test import ( from paddle.fluid.tests.unittests.eager_op_test import (
OpTest, OpTest,
convert_float_to_uint16, convert_float_to_uint16,
get_numeric_gradient, get_numeric_gradient,
...@@ -391,9 +391,36 @@ def create_test_cudnn_padding_VALID_class(parent): ...@@ -391,9 +391,36 @@ def create_test_cudnn_padding_VALID_class(parent):
globals()[cls_name] = TestCUDNNPaddingVALIDCase globals()[cls_name] = TestCUDNNPaddingVALIDCase
def conv2d_wrapper(
x,
weight,
stride=1,
padding=0,
padding_algorithm="EXPLICIT",
dilation=1,
groups=1,
data_format="NCDHW",
):
if data_format == "AnyLayout":
data_format = "NCDHW"
if padding_algorithm is None:
padding_algorithm = "EXPLICIT"
return paddle._C_ops.conv2d(
x,
weight,
stride,
padding,
padding_algorithm,
dilation,
groups,
data_format,
)
class TestConv2DOp(OpTest): class TestConv2DOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "conv2d" self.op_type = "conv2d"
self.python_api = conv2d_wrapper
self.use_cudnn = False self.use_cudnn = False
self.exhaustive_search = False self.exhaustive_search = False
self.use_cuda = False self.use_cuda = False
...@@ -732,6 +759,7 @@ class TestConv2DOpError(unittest.TestCase): ...@@ -732,6 +759,7 @@ class TestConv2DOpError(unittest.TestCase):
class TestConv2DOp_v2(OpTest): class TestConv2DOp_v2(OpTest):
def setUp(self): def setUp(self):
self.op_type = "conv2d" self.op_type = "conv2d"
self.python_api = conv2d_wrapper
self.use_cudnn = False self.use_cudnn = False
self.exhaustive_search = False self.exhaustive_search = False
self.use_cuda = False self.use_cuda = False
......
...@@ -36,6 +36,32 @@ from paddle.fluid.tests.unittests.testsuite import create_op ...@@ -36,6 +36,32 @@ from paddle.fluid.tests.unittests.testsuite import create_op
# ----------------TestDepthwiseConv ----- # ----------------TestDepthwiseConv -----
def depthwise_conv2d_wrapper(
x,
weight,
stride=1,
padding=0,
padding_algorithm="EXPLICIT",
groups=1,
dilation=1,
data_format="NCDHW",
):
if data_format == "AnyLayout":
data_format = "NCDHW"
if padding_algorithm is None:
padding_algorithm = "EXPLICIT"
return paddle._C_ops.depthwise_conv2d(
x,
weight,
stride,
padding,
padding_algorithm,
groups,
dilation,
data_format,
)
class TestDepthwiseConv(TestConv2DOp): class TestDepthwiseConv(TestConv2DOp):
def init_test_case(self): def init_test_case(self):
self.use_cuda = True self.use_cuda = True
...@@ -47,6 +73,7 @@ class TestDepthwiseConv(TestConv2DOp): ...@@ -47,6 +73,7 @@ class TestDepthwiseConv(TestConv2DOp):
f_c = self.input_size[1] // self.groups f_c = self.input_size[1] // self.groups
self.filter_size = [12, f_c, 3, 3] self.filter_size = [12, f_c, 3, 3]
self.op_type = "depthwise_conv2d" self.op_type = "depthwise_conv2d"
self.python_api = depthwise_conv2d_wrapper
class TestDepthwiseConv2(TestConv2DOp): class TestDepthwiseConv2(TestConv2DOp):
...@@ -60,6 +87,7 @@ class TestDepthwiseConv2(TestConv2DOp): ...@@ -60,6 +87,7 @@ class TestDepthwiseConv2(TestConv2DOp):
f_c = self.input_size[1] // self.groups f_c = self.input_size[1] // self.groups
self.filter_size = [12, f_c, 3, 3] self.filter_size = [12, f_c, 3, 3]
self.op_type = "depthwise_conv2d" self.op_type = "depthwise_conv2d"
self.python_api = depthwise_conv2d_wrapper
class TestDepthwiseConv3(TestConv2DOp): class TestDepthwiseConv3(TestConv2DOp):
...@@ -73,6 +101,7 @@ class TestDepthwiseConv3(TestConv2DOp): ...@@ -73,6 +101,7 @@ class TestDepthwiseConv3(TestConv2DOp):
f_c = self.input_size[1] // self.groups f_c = self.input_size[1] // self.groups
self.filter_size = [24, f_c, 3, 3] self.filter_size = [24, f_c, 3, 3]
self.op_type = "depthwise_conv2d" self.op_type = "depthwise_conv2d"
self.python_api = depthwise_conv2d_wrapper
class TestDepthwiseConvWithDilation(TestConv2DOp): class TestDepthwiseConvWithDilation(TestConv2DOp):
...@@ -87,6 +116,7 @@ class TestDepthwiseConvWithDilation(TestConv2DOp): ...@@ -87,6 +116,7 @@ class TestDepthwiseConvWithDilation(TestConv2DOp):
f_c = self.input_size[1] // self.groups f_c = self.input_size[1] // self.groups
self.filter_size = [24, f_c, 3, 3] self.filter_size = [24, f_c, 3, 3]
self.op_type = "depthwise_conv2d" self.op_type = "depthwise_conv2d"
self.python_api = depthwise_conv2d_wrapper
class TestDepthwiseConvWithDilation2(TestConv2DOp): class TestDepthwiseConvWithDilation2(TestConv2DOp):
...@@ -101,6 +131,7 @@ class TestDepthwiseConvWithDilation2(TestConv2DOp): ...@@ -101,6 +131,7 @@ class TestDepthwiseConvWithDilation2(TestConv2DOp):
f_c = self.input_size[1] // self.groups f_c = self.input_size[1] // self.groups
self.filter_size = [24, f_c, 3, 3] self.filter_size = [24, f_c, 3, 3]
self.op_type = "depthwise_conv2d" self.op_type = "depthwise_conv2d"
self.python_api = depthwise_conv2d_wrapper
class TestDepthwiseConvandFuse(TestConv2DOp): class TestDepthwiseConvandFuse(TestConv2DOp):
...@@ -115,6 +146,7 @@ class TestDepthwiseConvandFuse(TestConv2DOp): ...@@ -115,6 +146,7 @@ class TestDepthwiseConvandFuse(TestConv2DOp):
f_c = self.input_size[1] // self.groups f_c = self.input_size[1] // self.groups
self.filter_size = [12, f_c, 3, 3] self.filter_size = [12, f_c, 3, 3]
self.op_type = "depthwise_conv2d" self.op_type = "depthwise_conv2d"
self.python_api = depthwise_conv2d_wrapper
class TestDepthwiseConv2andFuse(TestConv2DOp): class TestDepthwiseConv2andFuse(TestConv2DOp):
...@@ -129,6 +161,7 @@ class TestDepthwiseConv2andFuse(TestConv2DOp): ...@@ -129,6 +161,7 @@ class TestDepthwiseConv2andFuse(TestConv2DOp):
f_c = self.input_size[1] // self.groups f_c = self.input_size[1] // self.groups
self.filter_size = [12, f_c, 3, 3] self.filter_size = [12, f_c, 3, 3]
self.op_type = "depthwise_conv2d" self.op_type = "depthwise_conv2d"
self.python_api = depthwise_conv2d_wrapper
class TestDepthwiseConv3andFuse(TestConv2DOp): class TestDepthwiseConv3andFuse(TestConv2DOp):
...@@ -143,6 +176,7 @@ class TestDepthwiseConv3andFuse(TestConv2DOp): ...@@ -143,6 +176,7 @@ class TestDepthwiseConv3andFuse(TestConv2DOp):
f_c = self.input_size[1] // self.groups f_c = self.input_size[1] // self.groups
self.filter_size = [24, f_c, 3, 3] self.filter_size = [24, f_c, 3, 3]
self.op_type = "depthwise_conv2d" self.op_type = "depthwise_conv2d"
self.python_api = depthwise_conv2d_wrapper
class TestDepthwiseConvWithDilationandFuse(TestConv2DOp): class TestDepthwiseConvWithDilationandFuse(TestConv2DOp):
...@@ -158,6 +192,7 @@ class TestDepthwiseConvWithDilationandFuse(TestConv2DOp): ...@@ -158,6 +192,7 @@ class TestDepthwiseConvWithDilationandFuse(TestConv2DOp):
f_c = self.input_size[1] // self.groups f_c = self.input_size[1] // self.groups
self.filter_size = [24, f_c, 3, 3] self.filter_size = [24, f_c, 3, 3]
self.op_type = "depthwise_conv2d" self.op_type = "depthwise_conv2d"
self.python_api = depthwise_conv2d_wrapper
class TestDepthwiseConvWithDilation2andFuse(TestConv2DOp): class TestDepthwiseConvWithDilation2andFuse(TestConv2DOp):
...@@ -173,6 +208,7 @@ class TestDepthwiseConvWithDilation2andFuse(TestConv2DOp): ...@@ -173,6 +208,7 @@ class TestDepthwiseConvWithDilation2andFuse(TestConv2DOp):
f_c = self.input_size[1] // self.groups f_c = self.input_size[1] // self.groups
self.filter_size = [24, f_c, 3, 3] self.filter_size = [24, f_c, 3, 3]
self.op_type = "depthwise_conv2d" self.op_type = "depthwise_conv2d"
self.python_api = depthwise_conv2d_wrapper
class TestDepthwiseConv_AsyPadding(TestConv2DOp_v2): class TestDepthwiseConv_AsyPadding(TestConv2DOp_v2):
...@@ -185,6 +221,7 @@ class TestDepthwiseConv_AsyPadding(TestConv2DOp_v2): ...@@ -185,6 +221,7 @@ class TestDepthwiseConv_AsyPadding(TestConv2DOp_v2):
f_c = self.input_size[1] // self.groups f_c = self.input_size[1] // self.groups
self.filter_size = [12, f_c, 3, 3] self.filter_size = [12, f_c, 3, 3]
self.op_type = "depthwise_conv2d" self.op_type = "depthwise_conv2d"
self.python_api = depthwise_conv2d_wrapper
def init_paddings(self): def init_paddings(self):
self.pad = [1, 1, 0, 1] self.pad = [1, 1, 0, 1]
...@@ -201,6 +238,7 @@ class TestDepthwiseConv2_AsyPadding(TestConv2DOp_v2): ...@@ -201,6 +238,7 @@ class TestDepthwiseConv2_AsyPadding(TestConv2DOp_v2):
f_c = self.input_size[1] // self.groups f_c = self.input_size[1] // self.groups
self.filter_size = [12, f_c, 3, 3] self.filter_size = [12, f_c, 3, 3]
self.op_type = "depthwise_conv2d" self.op_type = "depthwise_conv2d"
self.python_api = depthwise_conv2d_wrapper
def init_paddings(self): def init_paddings(self):
self.pad = [0, 1, 0, 2] self.pad = [0, 1, 0, 2]
...@@ -217,6 +255,7 @@ class TestDepthwiseConv3_AsyPadding(TestConv2DOp_v2): ...@@ -217,6 +255,7 @@ class TestDepthwiseConv3_AsyPadding(TestConv2DOp_v2):
f_c = self.input_size[1] // self.groups f_c = self.input_size[1] // self.groups
self.filter_size = [24, f_c, 3, 3] self.filter_size = [24, f_c, 3, 3]
self.op_type = "depthwise_conv2d" self.op_type = "depthwise_conv2d"
self.python_api = depthwise_conv2d_wrapper
def init_paddings(self): def init_paddings(self):
self.pad = [1, 1, 0, 0] self.pad = [1, 1, 0, 0]
...@@ -235,6 +274,7 @@ class TestDepthwiseConvWithDilation_AsyPadding(TestConv2DOp_v2): ...@@ -235,6 +274,7 @@ class TestDepthwiseConvWithDilation_AsyPadding(TestConv2DOp_v2):
f_c = self.input_size[1] // self.groups f_c = self.input_size[1] // self.groups
self.filter_size = [24, f_c, 3, 3] self.filter_size = [24, f_c, 3, 3]
self.op_type = "depthwise_conv2d" self.op_type = "depthwise_conv2d"
self.python_api = depthwise_conv2d_wrapper
def init_paddings(self): def init_paddings(self):
self.pad = [1, 1, 2, 1] self.pad = [1, 1, 2, 1]
...@@ -253,6 +293,7 @@ class TestDepthwiseConvWithDilation2_AsyPadding(TestConv2DOp_v2): ...@@ -253,6 +293,7 @@ class TestDepthwiseConvWithDilation2_AsyPadding(TestConv2DOp_v2):
f_c = self.input_size[1] // self.groups f_c = self.input_size[1] // self.groups
self.filter_size = [24, f_c, 3, 3] self.filter_size = [24, f_c, 3, 3]
self.op_type = "depthwise_conv2d" self.op_type = "depthwise_conv2d"
self.python_api = depthwise_conv2d_wrapper
def init_paddings(self): def init_paddings(self):
self.pad = [0, 1, 1, 0] self.pad = [0, 1, 1, 0]
...@@ -271,6 +312,7 @@ class TestDepthwiseConvandFuse_AsyPadding(TestConv2DOp_v2): ...@@ -271,6 +312,7 @@ class TestDepthwiseConvandFuse_AsyPadding(TestConv2DOp_v2):
f_c = self.input_size[1] // self.groups f_c = self.input_size[1] // self.groups
self.filter_size = [12, f_c, 3, 3] self.filter_size = [12, f_c, 3, 3]
self.op_type = "depthwise_conv2d" self.op_type = "depthwise_conv2d"
self.python_api = depthwise_conv2d_wrapper
def init_paddings(self): def init_paddings(self):
self.pad = [2, 1, 2, 3] self.pad = [2, 1, 2, 3]
...@@ -289,6 +331,7 @@ class TestDepthwiseConv2andFuse_AsyPadding(TestConv2DOp_v2): ...@@ -289,6 +331,7 @@ class TestDepthwiseConv2andFuse_AsyPadding(TestConv2DOp_v2):
f_c = self.input_size[1] // self.groups f_c = self.input_size[1] // self.groups
self.filter_size = [12, f_c, 3, 3] self.filter_size = [12, f_c, 3, 3]
self.op_type = "depthwise_conv2d" self.op_type = "depthwise_conv2d"
self.python_api = depthwise_conv2d_wrapper
def init_paddings(self): def init_paddings(self):
self.pad = [1, 1, 1, 2] self.pad = [1, 1, 1, 2]
...@@ -307,6 +350,7 @@ class TestDepthwiseConv3andFuse_AsyPadding(TestConv2DOp_v2): ...@@ -307,6 +350,7 @@ class TestDepthwiseConv3andFuse_AsyPadding(TestConv2DOp_v2):
f_c = self.input_size[1] // self.groups f_c = self.input_size[1] // self.groups
self.filter_size = [24, f_c, 3, 3] self.filter_size = [24, f_c, 3, 3]
self.op_type = "depthwise_conv2d" self.op_type = "depthwise_conv2d"
self.python_api = depthwise_conv2d_wrapper
def init_paddings(self): def init_paddings(self):
self.pad = [1, 2, 0, 2] self.pad = [1, 2, 0, 2]
...@@ -326,6 +370,7 @@ class TestDepthwiseConvWithDilationandFuse_AsyPadding(TestConv2DOp_v2): ...@@ -326,6 +370,7 @@ class TestDepthwiseConvWithDilationandFuse_AsyPadding(TestConv2DOp_v2):
f_c = self.input_size[1] // self.groups f_c = self.input_size[1] // self.groups
self.filter_size = [24, f_c, 3, 3] self.filter_size = [24, f_c, 3, 3]
self.op_type = "depthwise_conv2d" self.op_type = "depthwise_conv2d"
self.python_api = depthwise_conv2d_wrapper
def init_paddings(self): def init_paddings(self):
self.pad = [2, 1, 1, 0] self.pad = [2, 1, 1, 0]
...@@ -345,6 +390,7 @@ class TestDepthwiseConvWithDilation2andFuse_AsyPadding(TestConv2DOp_v2): ...@@ -345,6 +390,7 @@ class TestDepthwiseConvWithDilation2andFuse_AsyPadding(TestConv2DOp_v2):
f_c = self.input_size[1] // self.groups f_c = self.input_size[1] // self.groups
self.filter_size = [24, f_c, 3, 3] self.filter_size = [24, f_c, 3, 3]
self.op_type = "depthwise_conv2d" self.op_type = "depthwise_conv2d"
self.python_api = depthwise_conv2d_wrapper
def init_paddings(self): def init_paddings(self):
self.pad = [1, 3, 1, 3] self.pad = [1, 3, 1, 3]
......
...@@ -17,7 +17,7 @@ import tempfile ...@@ -17,7 +17,7 @@ import tempfile
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest, convert_float_to_uint16 from eager_op_test import OpTest, convert_float_to_uint16
import paddle import paddle
import paddle.inference as paddle_infer import paddle.inference as paddle_infer
...@@ -112,11 +112,15 @@ class TestCumsumOp(unittest.TestCase): ...@@ -112,11 +112,15 @@ class TestCumsumOp(unittest.TestCase):
self.assertTrue('out' in y.name) self.assertTrue('out' in y.name)
def cumsum_wrapper(x, axis=-1, flatten=False, exclusive=False, reverse=False):
return paddle._C_ops.cumsum(x, axis, flatten, exclusive, reverse)
class TestSumOp1(OpTest): class TestSumOp1(OpTest):
def setUp(self): def setUp(self):
self.op_type = "cumsum" self.op_type = "cumsum"
self.prim_op_type = "prim" self.prim_op_type = "prim"
self.python_api = paddle.cumsum self.python_api = cumsum_wrapper
self.public_python_api = paddle.cumsum self.public_python_api = paddle.cumsum
self.set_enable_cinn() self.set_enable_cinn()
self.init_dtype() self.init_dtype()
...@@ -215,7 +219,7 @@ class TestSumOpExclusive1(OpTest): ...@@ -215,7 +219,7 @@ class TestSumOpExclusive1(OpTest):
def setUp(self): def setUp(self):
self.op_type = "cumsum" self.op_type = "cumsum"
self.prim_op_type = "prim" self.prim_op_type = "prim"
self.python_api = paddle.cumsum self.python_api = cumsum_wrapper
self.public_python_api = paddle.cumsum self.public_python_api = paddle.cumsum
self.set_enable_cinn() self.set_enable_cinn()
self.init_dtype() self.init_dtype()
...@@ -307,7 +311,7 @@ class TestSumOpExclusiveFP16(OpTest): ...@@ -307,7 +311,7 @@ class TestSumOpExclusiveFP16(OpTest):
def setUp(self): def setUp(self):
self.op_type = "cumsum" self.op_type = "cumsum"
self.prim_op_type = "prim" self.prim_op_type = "prim"
self.python_api = paddle.cumsum self.python_api = cumsum_wrapper
self.public_python_api = paddle.cumsum self.public_python_api = paddle.cumsum
self.init_dtype() self.init_dtype()
self.enable_cinn = False self.enable_cinn = False
...@@ -341,7 +345,7 @@ class TestSumOpReverseExclusive(OpTest): ...@@ -341,7 +345,7 @@ class TestSumOpReverseExclusive(OpTest):
def setUp(self): def setUp(self):
self.op_type = "cumsum" self.op_type = "cumsum"
self.prim_op_type = "prim" self.prim_op_type = "prim"
self.python_api = paddle.cumsum self.python_api = cumsum_wrapper
self.public_python_api = paddle.cumsum self.public_python_api = paddle.cumsum
self.set_enable_cinn() self.set_enable_cinn()
self.init_dtype() self.init_dtype()
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci from eager_op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci
import paddle import paddle
from paddle.fluid import core from paddle.fluid import core
...@@ -44,22 +44,20 @@ class TestElementwiseOp(OpTest): ...@@ -44,22 +44,20 @@ class TestElementwiseOp(OpTest):
def test_check_output(self): def test_check_output(self):
if hasattr(self, 'attrs'): if hasattr(self, 'attrs'):
self.check_output(check_eager=False) self.check_output(check_dygraph=False)
else: else:
self.check_output(check_eager=True) self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
if hasattr(self, 'attrs'): if hasattr(self, 'attrs'):
if self.attrs['axis'] == -1: if self.attrs['axis'] == -1:
self.check_grad( self.check_grad(
['X', 'Y'], 'Out', check_eager=False, check_prim=True ['X', 'Y'], 'Out', check_dygraph=False, check_prim=True
) )
else: else:
self.check_grad(['X', 'Y'], 'Out', check_eager=False) self.check_grad(['X', 'Y'], 'Out', check_dygraph=False)
else: else:
self.check_grad( self.check_grad(['X', 'Y'], 'Out', check_prim=True)
['X', 'Y'], 'Out', check_eager=True, check_prim=True
)
def test_check_grad_ingore_x(self): def test_check_grad_ingore_x(self):
if hasattr(self, 'attrs') and self.attrs['axis'] != -1: if hasattr(self, 'attrs') and self.attrs['axis'] != -1:
...@@ -68,6 +66,7 @@ class TestElementwiseOp(OpTest): ...@@ -68,6 +66,7 @@ class TestElementwiseOp(OpTest):
'Out', 'Out',
max_relative_error=0.005, max_relative_error=0.005,
no_grad_set=set("X"), no_grad_set=set("X"),
check_dygraph=False,
) )
else: else:
self.check_grad( self.check_grad(
...@@ -85,6 +84,7 @@ class TestElementwiseOp(OpTest): ...@@ -85,6 +84,7 @@ class TestElementwiseOp(OpTest):
'Out', 'Out',
max_relative_error=0.005, max_relative_error=0.005,
no_grad_set=set('Y'), no_grad_set=set('Y'),
check_dygraph=False,
) )
else: else:
self.check_grad( self.check_grad(
...@@ -178,16 +178,16 @@ class TestElementwiseBF16Op(OpTest): ...@@ -178,16 +178,16 @@ class TestElementwiseBF16Op(OpTest):
def test_check_output(self): def test_check_output(self):
if hasattr(self, 'attrs'): if hasattr(self, 'attrs'):
self.check_output(check_eager=False) self.check_output(check_dygraph=False)
else: else:
self.check_output(check_eager=True) self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
if hasattr(self, 'attrs'): if hasattr(self, 'attrs'):
# check_prim=False, bfloat16 is not supported in `less_equal` # check_prim=False, bfloat16 is not supported in `less_equal`
self.check_grad(['X', 'Y'], 'Out', check_eager=False) self.check_grad(['X', 'Y'], 'Out', check_dygraph=False)
else: else:
self.check_grad(['X', 'Y'], 'Out', check_eager=True) self.check_grad(['X', 'Y'], 'Out')
def test_check_grad_ingore_x(self): def test_check_grad_ingore_x(self):
self.check_grad(['Y'], 'Out', no_grad_set=set("X")) self.check_grad(['Y'], 'Out', no_grad_set=set("X"))
...@@ -204,12 +204,10 @@ class TestElementwiseMaxBF16Op_ZeroDim1(TestElementwiseBF16Op): ...@@ -204,12 +204,10 @@ class TestElementwiseMaxBF16Op_ZeroDim1(TestElementwiseBF16Op):
def test_check_grad_normal(self): def test_check_grad_normal(self):
if hasattr(self, 'attrs'): if hasattr(self, 'attrs'):
self.check_grad( self.check_grad(
['X', 'Y'], 'Out', numeric_grad_delta=0.05, check_eager=False ['X', 'Y'], 'Out', numeric_grad_delta=0.05, check_dygraph=False
) )
else: else:
self.check_grad( self.check_grad(['X', 'Y'], 'Out', numeric_grad_delta=0.05)
['X', 'Y'], 'Out', numeric_grad_delta=0.05, check_eager=True
)
def test_check_grad_ingore_x(self): def test_check_grad_ingore_x(self):
self.check_grad( self.check_grad(
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci from eager_op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci
import paddle import paddle
from paddle import fluid from paddle import fluid
...@@ -41,19 +41,17 @@ class TestElementwisePowOp(OpTest): ...@@ -41,19 +41,17 @@ class TestElementwisePowOp(OpTest):
def test_check_output(self): def test_check_output(self):
if hasattr(self, 'attrs'): if hasattr(self, 'attrs'):
self.check_output(check_eager=False) self.check_output(check_dygraph=False)
else: else:
self.check_output(check_eager=True) self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
if hasattr(self, 'attrs'): if hasattr(self, 'attrs'):
self.check_grad( self.check_grad(
['X', 'Y'], 'Out', check_eager=False, check_prim=True ['X', 'Y'], 'Out', check_prim=True, check_dygraph=False
) )
else: else:
self.check_grad( self.check_grad(['X', 'Y'], 'Out', check_prim=True)
['X', 'Y'], 'Out', check_eager=True, check_prim=True
)
class TestElementwisePowOp_ZeroDim1(TestElementwisePowOp): class TestElementwisePowOp_ZeroDim1(TestElementwisePowOp):
...@@ -190,9 +188,9 @@ class TestElementwisePowOp_broadcast_1(TestElementwisePowOp): ...@@ -190,9 +188,9 @@ class TestElementwisePowOp_broadcast_1(TestElementwisePowOp):
def test_check_grad_normal(self): def test_check_grad_normal(self):
if hasattr(self, 'attrs'): if hasattr(self, 'attrs'):
self.check_grad(['X', 'Y'], 'Out', check_eager=False) self.check_grad(['X', 'Y'], 'Out', check_dygraph=False)
else: else:
self.check_grad(['X', 'Y'], 'Out', check_eager=True) self.check_grad(['X', 'Y'], 'Out')
class TestElementwisePowOp_broadcast_2(TestElementwisePowOp): class TestElementwisePowOp_broadcast_2(TestElementwisePowOp):
...@@ -213,9 +211,9 @@ class TestElementwisePowOp_broadcast_2(TestElementwisePowOp): ...@@ -213,9 +211,9 @@ class TestElementwisePowOp_broadcast_2(TestElementwisePowOp):
def test_check_grad_normal(self): def test_check_grad_normal(self):
if hasattr(self, 'attrs'): if hasattr(self, 'attrs'):
self.check_grad(['X', 'Y'], 'Out', check_eager=False) self.check_grad(['X', 'Y'], 'Out', check_dygraph=False)
else: else:
self.check_grad(['X', 'Y'], 'Out', check_eager=True) self.check_grad(['X', 'Y'], 'Out')
class TestElementwisePowOp_broadcast_3(TestElementwisePowOp): class TestElementwisePowOp_broadcast_3(TestElementwisePowOp):
...@@ -236,9 +234,9 @@ class TestElementwisePowOp_broadcast_3(TestElementwisePowOp): ...@@ -236,9 +234,9 @@ class TestElementwisePowOp_broadcast_3(TestElementwisePowOp):
def test_check_grad_normal(self): def test_check_grad_normal(self):
if hasattr(self, 'attrs'): if hasattr(self, 'attrs'):
self.check_grad(['X', 'Y'], 'Out', check_eager=False) self.check_grad(['X', 'Y'], 'Out', check_dygraph=False)
else: else:
self.check_grad(['X', 'Y'], 'Out', check_eager=True) self.check_grad(['X', 'Y'], 'Out')
class TestElementwisePowOp_broadcast_4(TestElementwisePowOp): class TestElementwisePowOp_broadcast_4(TestElementwisePowOp):
...@@ -265,9 +263,9 @@ class TestElementwisePowOpInt(OpTest): ...@@ -265,9 +263,9 @@ class TestElementwisePowOpInt(OpTest):
def test_check_output(self): def test_check_output(self):
if hasattr(self, 'attrs'): if hasattr(self, 'attrs'):
self.check_output(check_eager=False) self.check_output(check_dygraph=False)
else: else:
self.check_output(check_eager=True) self.check_output()
class TestElementwisePowGradOpInt(unittest.TestCase): class TestElementwisePowGradOpInt(unittest.TestCase):
...@@ -321,9 +319,9 @@ class TestElementwisePowOpFP16(OpTest): ...@@ -321,9 +319,9 @@ class TestElementwisePowOpFP16(OpTest):
def test_check_output(self): def test_check_output(self):
if hasattr(self, 'attrs'): if hasattr(self, 'attrs'):
self.check_output(check_eager=False) self.check_output(check_dygraph=False)
else: else:
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(
...@@ -332,7 +330,6 @@ class TestElementwisePowOpFP16(OpTest): ...@@ -332,7 +330,6 @@ class TestElementwisePowOpFP16(OpTest):
user_defined_grads=pow_grad( user_defined_grads=pow_grad(
self.inputs['X'], self.inputs['Y'], 1 / self.inputs['X'].size self.inputs['X'], self.inputs['Y'], 1 / self.inputs['X'].size
), ),
check_eager=True,
check_prim=True, check_prim=True,
) )
...@@ -354,12 +351,12 @@ class TestElementwisePowBF16Op(OpTest): ...@@ -354,12 +351,12 @@ class TestElementwisePowBF16Op(OpTest):
def test_check_output(self): def test_check_output(self):
if hasattr(self, 'attrs'): if hasattr(self, 'attrs'):
self.check_output(check_eager=False) self.check_output()
else: else:
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X', 'Y'], 'Out', check_eager=True) self.check_grad(['X', 'Y'], 'Out')
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -15,12 +15,10 @@ ...@@ -15,12 +15,10 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
paddle.seed(100)
class TestExponentialOp1(OpTest): class TestExponentialOp1(OpTest):
def setUp(self): def setUp(self):
...@@ -50,7 +48,7 @@ class TestExponentialOp1(OpTest): ...@@ -50,7 +48,7 @@ class TestExponentialOp1(OpTest):
hist2 = hist2.astype("float32") hist2 = hist2.astype("float32")
hist2 = hist2 / float(data_np.size) hist2 = hist2 / float(data_np.size)
np.testing.assert_allclose(hist1, hist2, rtol=0.02) np.testing.assert_allclose(hist1, hist2, rtol=0.03)
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad( self.check_grad(
...@@ -61,6 +59,7 @@ class TestExponentialOp1(OpTest): ...@@ -61,6 +59,7 @@ class TestExponentialOp1(OpTest):
user_defined_grad_outputs=[ user_defined_grad_outputs=[
np.random.rand(1024, 1024).astype(self.dtype) np.random.rand(1024, 1024).astype(self.dtype)
], ],
check_dygraph=False, # inplace can not call paddle.grad
) )
......
...@@ -17,7 +17,7 @@ import math ...@@ -17,7 +17,7 @@ import math
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
def round_c_single_element(val): def round_c_single_element(val):
...@@ -65,7 +65,7 @@ class TestFakeQuantizeAbsMaxOp(OpTest): ...@@ -65,7 +65,7 @@ class TestFakeQuantizeAbsMaxOp(OpTest):
self.inputs = {'X': input_data} self.inputs = {'X': input_data}
self.outputs = {'Out': output_data, 'OutScale': scale} self.outputs = {'Out': output_data, 'OutScale': scale}
self.dtype = dtype self.dtype = dtype
self.check_output() self.check_output(check_dygraph=False)
def test_fake_quantize_abs_max(self): def test_fake_quantize_abs_max(self):
self._fake_quantize_abs_max(np.float32, (124, 240), np.random.random) self._fake_quantize_abs_max(np.float32, (124, 240), np.random.random)
...@@ -126,7 +126,7 @@ class TestFakeChannelWiseQuantizeAbsMaxOp(OpTest): ...@@ -126,7 +126,7 @@ class TestFakeChannelWiseQuantizeAbsMaxOp(OpTest):
self.outputs = {'Out': output_data, 'OutScale': scale} self.outputs = {'Out': output_data, 'OutScale': scale}
self.dtype = dtype self.dtype = dtype
self.attrs['quant_axis'] = quant_axis self.attrs['quant_axis'] = quant_axis
self.check_output() self.check_output(check_dygraph=False)
def test_fake_channel_wise_quantize_abs_max(self): def test_fake_channel_wise_quantize_abs_max(self):
dtype_options = [np.float32, np.float16] dtype_options = [np.float32, np.float16]
...@@ -200,7 +200,7 @@ class TestFakeQuantizeRangeAbsMaxOp(OpTest): ...@@ -200,7 +200,7 @@ class TestFakeQuantizeRangeAbsMaxOp(OpTest):
} }
self.dtype = dtype self.dtype = dtype
self.attrs['is_test'] = is_test self.attrs['is_test'] = is_test
self.check_output() self.check_output(check_dygraph=False)
def test_fake_quantize_range_abs_max(self): def test_fake_quantize_range_abs_max(self):
dtype_options = [np.float16, np.float32] dtype_options = [np.float16, np.float32]
...@@ -248,7 +248,7 @@ class TestMovingAverageAbsMaxScaleOp(OpTest): ...@@ -248,7 +248,7 @@ class TestMovingAverageAbsMaxScaleOp(OpTest):
'OutScale': out_scale, 'OutScale': out_scale,
} }
self.dtype = dtype self.dtype = dtype
self.check_output() self.check_output(check_dygraph=False)
def test_moving_average_abs_max(self): def test_moving_average_abs_max(self):
self._moving_average_abs_max_scale( self._moving_average_abs_max_scale(
...@@ -313,7 +313,7 @@ class TestFakeQuantizeMovingAverageAbsMaxOp(OpTest): ...@@ -313,7 +313,7 @@ class TestFakeQuantizeMovingAverageAbsMaxOp(OpTest):
'OutScale': out_scale, 'OutScale': out_scale,
} }
self.dtype = dtype self.dtype = dtype
self.check_output() self.check_output(check_dygraph=False)
if with_gradient: if with_gradient:
gradient = [ gradient = [
np.ones(input_data.shape) / np.product(input_data.shape) np.ones(input_data.shape) / np.product(input_data.shape)
...@@ -369,7 +369,7 @@ class TestFakeQuantizeDequantizeAbsMaxOp(OpTest): ...@@ -369,7 +369,7 @@ class TestFakeQuantizeDequantizeAbsMaxOp(OpTest):
'OutScale': np.array(scale).astype(dtype), 'OutScale': np.array(scale).astype(dtype),
} }
self.dtype = dtype self.dtype = dtype
self.check_output() self.check_output(check_dygraph=False)
gradient = [np.ones(input_data.shape) / np.product(input_data.shape)] gradient = [np.ones(input_data.shape) / np.product(input_data.shape)]
self.check_grad(['X'], 'Out', user_defined_grads=gradient) self.check_grad(['X'], 'Out', user_defined_grads=gradient)
...@@ -426,7 +426,7 @@ class TestChannelWiseFakeQuantizeDequantizeAbsMaxOp(OpTest): ...@@ -426,7 +426,7 @@ class TestChannelWiseFakeQuantizeDequantizeAbsMaxOp(OpTest):
self.outputs = {'Out': output_data, 'OutScale': scale} self.outputs = {'Out': output_data, 'OutScale': scale}
self.dtype = dtype self.dtype = dtype
self.attrs['quant_axis'] = quant_axis self.attrs['quant_axis'] = quant_axis
self.check_output() self.check_output(check_dygraph=False)
gradient = [np.ones(input_data.shape) / np.product(input_data.shape)] gradient = [np.ones(input_data.shape) / np.product(input_data.shape)]
self.check_grad(['X'], 'Out', user_defined_grads=gradient) self.check_grad(['X'], 'Out', user_defined_grads=gradient)
...@@ -504,7 +504,7 @@ class TestChannelWiseQuantizeOp(OpTest): ...@@ -504,7 +504,7 @@ class TestChannelWiseQuantizeOp(OpTest):
self.outputs = {'Y': yq} self.outputs = {'Y': yq}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_dygraph=False)
class TestChannelWiseQuantizeOp1(TestChannelWiseQuantizeOp): class TestChannelWiseQuantizeOp1(TestChannelWiseQuantizeOp):
...@@ -540,7 +540,7 @@ class TestChannelWiseQuantizeOpTrain(OpTest): ...@@ -540,7 +540,7 @@ class TestChannelWiseQuantizeOpTrain(OpTest):
self.outputs = {'Y': yq, 'OutScale': scale} self.outputs = {'Y': yq, 'OutScale': scale}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_dygraph=False)
class TestquantizeOp(OpTest): class TestquantizeOp(OpTest):
...@@ -566,7 +566,7 @@ class TestquantizeOp(OpTest): ...@@ -566,7 +566,7 @@ class TestquantizeOp(OpTest):
self.outputs = {'Y': yq} self.outputs = {'Y': yq}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_dygraph=False)
class TestquantizeOpTrain(TestquantizeOp): class TestquantizeOpTrain(TestquantizeOp):
...@@ -618,7 +618,7 @@ class TestquantizeOpTrain(TestquantizeOp): ...@@ -618,7 +618,7 @@ class TestquantizeOpTrain(TestquantizeOp):
} }
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_dygraph=False)
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest, convert_float_to_uint16 from eager_op_test import OpTest, convert_float_to_uint16
import paddle import paddle
from paddle import fluid from paddle import fluid
...@@ -49,10 +49,10 @@ class TestGatherNdOpWithEmptyIndex(OpTest): ...@@ -49,10 +49,10 @@ class TestGatherNdOpWithEmptyIndex(OpTest):
self.dtype = np.float64 self.dtype = np.float64
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=False) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=False, check_prim=True) self.check_grad(['X'], 'Out', check_prim=True)
class TestGatherNdOpWithEmptyIndexFP16(TestGatherNdOpWithEmptyIndex): class TestGatherNdOpWithEmptyIndexFP16(TestGatherNdOpWithEmptyIndex):
...@@ -71,13 +71,11 @@ class TestGatherNdOpWithEmptyIndexBF16(TestGatherNdOpWithEmptyIndex): ...@@ -71,13 +71,11 @@ class TestGatherNdOpWithEmptyIndexBF16(TestGatherNdOpWithEmptyIndex):
def test_check_output(self): def test_check_output(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, check_eager=False) self.check_output_with_place(place)
def test_check_grad(self): def test_check_grad(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place( self.check_grad_with_place(place, ['X'], 'Out', check_prim=True)
place, ['X'], 'Out', check_eager=False, check_prim=True
)
class TestGatherNdOpWithIndex1(OpTest): class TestGatherNdOpWithIndex1(OpTest):
...@@ -106,10 +104,10 @@ class TestGatherNdOpWithIndex1(OpTest): ...@@ -106,10 +104,10 @@ class TestGatherNdOpWithIndex1(OpTest):
self.dtype = np.float64 self.dtype = np.float64
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=False) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=False, check_prim=True) self.check_grad(['X'], 'Out', check_prim=True)
class TestGatherNdOpWithIndex1FP16(TestGatherNdOpWithIndex1): class TestGatherNdOpWithIndex1FP16(TestGatherNdOpWithIndex1):
...@@ -128,13 +126,11 @@ class TestGatherNdOpWithIndex1BF16(TestGatherNdOpWithIndex1): ...@@ -128,13 +126,11 @@ class TestGatherNdOpWithIndex1BF16(TestGatherNdOpWithIndex1):
def test_check_output(self): def test_check_output(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, check_eager=False) self.check_output_with_place(place)
def test_check_grad(self): def test_check_grad(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place( self.check_grad_with_place(place, ['X'], 'Out', check_prim=True)
place, ['X'], 'Out', check_eager=False, check_prim=True
)
class TestGatherNdOpWithLowIndex(OpTest): class TestGatherNdOpWithLowIndex(OpTest):
...@@ -168,10 +164,10 @@ class TestGatherNdOpWithLowIndex(OpTest): ...@@ -168,10 +164,10 @@ class TestGatherNdOpWithLowIndex(OpTest):
self.dtype = np.float64 self.dtype = np.float64
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=False) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=False, check_prim=True) self.check_grad(['X'], 'Out', check_prim=True)
class TestGatherNdOpWithLowIndexFP16(TestGatherNdOpWithLowIndex): class TestGatherNdOpWithLowIndexFP16(TestGatherNdOpWithLowIndex):
...@@ -190,13 +186,11 @@ class TestGatherNdOpWithLowIndexBF16(TestGatherNdOpWithLowIndex): ...@@ -190,13 +186,11 @@ class TestGatherNdOpWithLowIndexBF16(TestGatherNdOpWithLowIndex):
def test_check_output(self): def test_check_output(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, check_eager=False) self.check_output_with_place(place)
def test_check_grad(self): def test_check_grad(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place( self.check_grad_with_place(place, ['X'], 'Out', check_prim=True)
place, ['X'], 'Out', check_eager=False, check_prim=True
)
class TestGatherNdOpIndex1(OpTest): class TestGatherNdOpIndex1(OpTest):
...@@ -228,10 +222,10 @@ class TestGatherNdOpIndex1(OpTest): ...@@ -228,10 +222,10 @@ class TestGatherNdOpIndex1(OpTest):
self.dtype = np.float64 self.dtype = np.float64
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=False) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=False, check_prim=True) self.check_grad(['X'], 'Out', check_prim=True)
class TestGatherNdOpIndex1FP16(TestGatherNdOpIndex1): class TestGatherNdOpIndex1FP16(TestGatherNdOpIndex1):
...@@ -250,13 +244,11 @@ class TestGatherNdOpIndex1BF16(TestGatherNdOpIndex1): ...@@ -250,13 +244,11 @@ class TestGatherNdOpIndex1BF16(TestGatherNdOpIndex1):
def test_check_output(self): def test_check_output(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, check_eager=False) self.check_output_with_place(place)
def test_check_grad(self): def test_check_grad(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place( self.check_grad_with_place(place, ['X'], 'Out', check_prim=True)
place, ['X'], 'Out', check_eager=False, check_prim=True
)
class TestGatherNdOpWithSameIndexAsX(OpTest): class TestGatherNdOpWithSameIndexAsX(OpTest):
...@@ -287,10 +279,10 @@ class TestGatherNdOpWithSameIndexAsX(OpTest): ...@@ -287,10 +279,10 @@ class TestGatherNdOpWithSameIndexAsX(OpTest):
self.dtype = np.float64 self.dtype = np.float64
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=False) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=False, check_prim=True) self.check_grad(['X'], 'Out', check_prim=True)
class TestGatherNdOpWithSameIndexAsXFP16(TestGatherNdOpWithSameIndexAsX): class TestGatherNdOpWithSameIndexAsXFP16(TestGatherNdOpWithSameIndexAsX):
...@@ -309,13 +301,11 @@ class TestGatherNdOpWithSameIndexAsXBF16(TestGatherNdOpWithSameIndexAsX): ...@@ -309,13 +301,11 @@ class TestGatherNdOpWithSameIndexAsXBF16(TestGatherNdOpWithSameIndexAsX):
def test_check_output(self): def test_check_output(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, check_eager=False) self.check_output_with_place(place)
def test_check_grad(self): def test_check_grad(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place( self.check_grad_with_place(place, ['X'], 'Out', check_prim=True)
place, ['X'], 'Out', check_eager=False, check_prim=True
)
class TestGatherNdOpWithHighRankSame(OpTest): class TestGatherNdOpWithHighRankSame(OpTest):
...@@ -347,10 +337,10 @@ class TestGatherNdOpWithHighRankSame(OpTest): ...@@ -347,10 +337,10 @@ class TestGatherNdOpWithHighRankSame(OpTest):
self.dtype = np.float64 self.dtype = np.float64
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=False) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=False, check_prim=True) self.check_grad(['X'], 'Out', check_prim=True)
class TestGatherNdOpWithHighRankSameFP16(TestGatherNdOpWithHighRankSame): class TestGatherNdOpWithHighRankSameFP16(TestGatherNdOpWithHighRankSame):
...@@ -369,13 +359,11 @@ class TestGatherNdOpWithHighRankSameBF16(TestGatherNdOpWithHighRankSame): ...@@ -369,13 +359,11 @@ class TestGatherNdOpWithHighRankSameBF16(TestGatherNdOpWithHighRankSame):
def test_check_output(self): def test_check_output(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, check_eager=False) self.check_output_with_place(place)
def test_check_grad(self): def test_check_grad(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place( self.check_grad_with_place(place, ['X'], 'Out', check_prim=True)
place, ['X'], 'Out', check_eager=False, check_prim=True
)
class TestGatherNdOpWithHighRankDiff(OpTest): class TestGatherNdOpWithHighRankDiff(OpTest):
...@@ -408,10 +396,10 @@ class TestGatherNdOpWithHighRankDiff(OpTest): ...@@ -408,10 +396,10 @@ class TestGatherNdOpWithHighRankDiff(OpTest):
self.dtype = np.float64 self.dtype = np.float64
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=False) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=False, check_prim=True) self.check_grad(['X'], 'Out', check_prim=True)
class TestGatherNdOpWithHighRankDiffFP16(TestGatherNdOpWithHighRankDiff): class TestGatherNdOpWithHighRankDiffFP16(TestGatherNdOpWithHighRankDiff):
...@@ -430,13 +418,11 @@ class TestGatherNdOpWithHighRankDiffBF16(TestGatherNdOpWithHighRankDiff): ...@@ -430,13 +418,11 @@ class TestGatherNdOpWithHighRankDiffBF16(TestGatherNdOpWithHighRankDiff):
def test_check_output(self): def test_check_output(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, check_eager=False) self.check_output_with_place(place)
def test_check_grad(self): def test_check_grad(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place( self.check_grad_with_place(place, ['X'], 'Out', check_prim=True)
place, ['X'], 'Out', check_eager=False, check_prim=True
)
# Test Python API # Test Python API
......
...@@ -19,7 +19,11 @@ import numpy as np ...@@ -19,7 +19,11 @@ import numpy as np
import paddle import paddle
from paddle import fluid from paddle import fluid
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid.tests.unittests.op_test import OpTest, convert_uint16_to_float from paddle.fluid.tests.unittests.eager_op_test import (
OpTest,
convert_uint16_to_float,
paddle_static_guard,
)
from paddle.tensor import random from paddle.tensor import random
...@@ -209,87 +213,86 @@ class TestGaussianRandomOp1_ShapeTensor(TestGaussianRandomOp): ...@@ -209,87 +213,86 @@ class TestGaussianRandomOp1_ShapeTensor(TestGaussianRandomOp):
# Test python API # Test python API
class TestGaussianRandomAPI(unittest.TestCase): class TestGaussianRandomAPI(unittest.TestCase):
def test_api(self): def test_api(self):
positive_2_int32 = paddle.tensor.fill_constant([1], "int32", 2000) with paddle_static_guard():
positive_2_int32 = paddle.tensor.fill_constant([1], "int32", 2000)
positive_2_int64 = paddle.tensor.fill_constant([1], "int64", 500) positive_2_int64 = paddle.tensor.fill_constant([1], "int64", 500)
shape_tensor_int32 = paddle.static.data( shape_tensor_int32 = paddle.static.data(
name="shape_tensor_int32", shape=[2], dtype="int32" name="shape_tensor_int32", shape=[2], dtype="int32"
) )
shape_tensor_int64 = paddle.static.data( shape_tensor_int64 = paddle.static.data(
name="shape_tensor_int64", shape=[2], dtype="int64" name="shape_tensor_int64", shape=[2], dtype="int64"
) )
out_1 = random.gaussian( out_1 = random.gaussian(
shape=[2000, 500], dtype="float32", mean=0.0, std=1.0, seed=10 shape=[2000, 500], dtype="float32", mean=0.0, std=1.0, seed=10
) )
out_2 = random.gaussian( out_2 = random.gaussian(
shape=[2000, positive_2_int32], shape=[2000, positive_2_int32],
dtype="float32", dtype="float32",
mean=0.0, mean=0.0,
std=1.0, std=1.0,
seed=10, seed=10,
) )
out_3 = random.gaussian( out_3 = random.gaussian(
shape=[2000, positive_2_int64], shape=[2000, positive_2_int64],
dtype="float32", dtype="float32",
mean=0.0, mean=0.0,
std=1.0, std=1.0,
seed=10, seed=10,
) )
out_4 = random.gaussian( out_4 = random.gaussian(
shape=shape_tensor_int32, shape=shape_tensor_int32,
dtype="float32", dtype="float32",
mean=0.0, mean=0.0,
std=1.0, std=1.0,
seed=10, seed=10,
) )
out_5 = random.gaussian( out_5 = random.gaussian(
shape=shape_tensor_int64, shape=shape_tensor_int64,
dtype="float32", dtype="float32",
mean=0.0, mean=0.0,
std=1.0, std=1.0,
seed=10, seed=10,
) )
out_6 = random.gaussian( out_6 = random.gaussian(
shape=shape_tensor_int64, shape=shape_tensor_int64,
dtype=np.float32, dtype=np.float32,
mean=0.0, mean=0.0,
std=1.0, std=1.0,
seed=10, seed=10,
) )
exe = fluid.Executor(place=fluid.CPUPlace()) exe = fluid.Executor(place=fluid.CPUPlace())
res_1, res_2, res_3, res_4, res_5, res_6 = exe.run( res_1, res_2, res_3, res_4, res_5, res_6 = exe.run(
fluid.default_main_program(), fluid.default_main_program(),
feed={ feed={
"shape_tensor_int32": np.array([2000, 500]).astype("int32"), "shape_tensor_int32": np.array([2000, 500]).astype("int32"),
"shape_tensor_int64": np.array([2000, 500]).astype("int64"), "shape_tensor_int64": np.array([2000, 500]).astype("int64"),
}, },
fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6], fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6],
) )
self.assertAlmostEqual(np.mean(res_1), 0.0, delta=0.1) self.assertAlmostEqual(np.mean(res_1), 0.0, delta=0.1)
self.assertAlmostEqual(np.std(res_1), 1.0, delta=0.1) self.assertAlmostEqual(np.std(res_1), 1.0, delta=0.1)
self.assertAlmostEqual(np.mean(res_2), 0.0, delta=0.1) self.assertAlmostEqual(np.mean(res_2), 0.0, delta=0.1)
self.assertAlmostEqual(np.std(res_2), 1.0, delta=0.1) self.assertAlmostEqual(np.std(res_2), 1.0, delta=0.1)
self.assertAlmostEqual(np.mean(res_3), 0.0, delta=0.1) self.assertAlmostEqual(np.mean(res_3), 0.0, delta=0.1)
self.assertAlmostEqual(np.std(res_3), 1.0, delta=0.1) self.assertAlmostEqual(np.std(res_3), 1.0, delta=0.1)
self.assertAlmostEqual(np.mean(res_4), 0.0, delta=0.1) self.assertAlmostEqual(np.mean(res_4), 0.0, delta=0.1)
self.assertAlmostEqual(np.std(res_5), 1.0, delta=0.1) self.assertAlmostEqual(np.std(res_5), 1.0, delta=0.1)
self.assertAlmostEqual(np.mean(res_5), 0.0, delta=0.1) self.assertAlmostEqual(np.mean(res_5), 0.0, delta=0.1)
self.assertAlmostEqual(np.std(res_5), 1.0, delta=0.1) self.assertAlmostEqual(np.std(res_5), 1.0, delta=0.1)
self.assertAlmostEqual(np.mean(res_6), 0.0, delta=0.1) self.assertAlmostEqual(np.mean(res_6), 0.0, delta=0.1)
self.assertAlmostEqual(np.std(res_6), 1.0, delta=0.1) self.assertAlmostEqual(np.std(res_6), 1.0, delta=0.1)
def test_default_dtype(self): def test_default_dtype(self):
paddle.disable_static()
def test_default_fp16(): def test_default_fp16():
paddle.framework.set_default_dtype('float16') paddle.framework.set_default_dtype('float16')
out = paddle.tensor.random.gaussian([2, 3]) out = paddle.tensor.random.gaussian([2, 3])
...@@ -311,13 +314,9 @@ class TestGaussianRandomAPI(unittest.TestCase): ...@@ -311,13 +314,9 @@ class TestGaussianRandomAPI(unittest.TestCase):
test_default_fp64() test_default_fp64()
test_default_fp32() test_default_fp32()
paddle.enable_static()
class TestStandardNormalDtype(unittest.TestCase): class TestStandardNormalDtype(unittest.TestCase):
def test_default_dtype(self): def test_default_dtype(self):
paddle.disable_static()
def test_default_fp16(): def test_default_fp16():
paddle.framework.set_default_dtype('float16') paddle.framework.set_default_dtype('float16')
out = paddle.tensor.random.standard_normal([2, 3]) out = paddle.tensor.random.standard_normal([2, 3])
...@@ -339,8 +338,6 @@ class TestStandardNormalDtype(unittest.TestCase): ...@@ -339,8 +338,6 @@ class TestStandardNormalDtype(unittest.TestCase):
test_default_fp64() test_default_fp64()
test_default_fp32() test_default_fp32()
paddle.enable_static()
class TestRandomValue(unittest.TestCase): class TestRandomValue(unittest.TestCase):
def test_fixed_random_number(self): def test_fixed_random_number(self):
...@@ -402,7 +399,6 @@ class TestRandomValue(unittest.TestCase): ...@@ -402,7 +399,6 @@ class TestRandomValue(unittest.TestCase):
_check_random_value( _check_random_value(
core.VarDesc.VarType.FP32, expect, expect_mean, expect_std core.VarDesc.VarType.FP32, expect, expect_mean, expect_std
) )
paddle.enable_static()
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -15,7 +15,12 @@ ...@@ -15,7 +15,12 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci from eager_op_test import (
OpTest,
convert_float_to_uint16,
paddle_static_guard,
skip_check_grad_ci,
)
from testsuite import create_op from testsuite import create_op
import paddle import paddle
...@@ -42,28 +47,41 @@ def group_norm_naive(x, scale, bias, epsilon, groups, data_layout): ...@@ -42,28 +47,41 @@ def group_norm_naive(x, scale, bias, epsilon, groups, data_layout):
class TestGroupNormOpError(unittest.TestCase): class TestGroupNormOpError(unittest.TestCase):
def test_errors(self): def test_errors(self):
with fluid.program_guard(fluid.Program(), fluid.Program()): with paddle_static_guard():
with fluid.program_guard(fluid.Program(), fluid.Program()):
def test_x_type(): def test_x_type():
input = np.random.random(2, 100, 3, 5).astype('float32') input = np.random.random(2, 100, 3, 5).astype('float32')
groups = 2 groups = 2
paddle.static.nn.group_norm(input, groups) paddle.static.nn.group_norm(input, groups)
self.assertRaises(TypeError, test_x_type) self.assertRaises(TypeError, test_x_type)
def test_x_dtype():
x2 = paddle.static.data(
name='x2', shape=[-1, 2, 100, 3, 5], dtype='int32'
)
groups = 2
paddle.static.nn.group_norm(x2, groups)
self.assertRaises(TypeError, test_x_dtype)
def test_x_dtype():
x2 = paddle.static.data(
name='x2', shape=[-1, 2, 100, 3, 5], dtype='int32'
)
groups = 2
paddle.static.nn.group_norm(x2, groups)
self.assertRaises(TypeError, test_x_dtype) def group_norm_wrapper(
input, weight, bias, epsilon=1e-5, num_groups=0, data_format="NCHW"
):
if data_format == "AnyLayout":
data_format = "NCDHW"
return paddle._C_ops.group_norm(
input, weight, bias, epsilon, num_groups, data_format
)
class TestGroupNormOp(OpTest): class TestGroupNormOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "group_norm" self.op_type = "group_norm"
self.python_api = group_norm_wrapper
self.python_out_sig = ["Y"]
self.data_format = "NCHW" self.data_format = "NCHW"
self.dtype = np.float64 self.dtype = np.float64
self.shape = (2, 100, 3, 5) self.shape = (2, 100, 3, 5)
...@@ -201,6 +219,8 @@ class TestGroupNormFP16OP(TestGroupNormOp): ...@@ -201,6 +219,8 @@ class TestGroupNormFP16OP(TestGroupNormOp):
class TestGroupNormBF16Op(OpTest): class TestGroupNormBF16Op(OpTest):
def setUp(self): def setUp(self):
self.op_type = "group_norm" self.op_type = "group_norm"
self.python_api = group_norm_wrapper
self.python_out_sig = ["Y"]
self.data_format = "NCHW" self.data_format = "NCHW"
self.dtype = np.uint16 self.dtype = np.uint16
self.shape = (2, 100, 3, 5) self.shape = (2, 100, 3, 5)
...@@ -361,58 +381,68 @@ class TestGroupNormOpLargeData_With_NHWC(TestGroupNormOp): ...@@ -361,58 +381,68 @@ class TestGroupNormOpLargeData_With_NHWC(TestGroupNormOp):
class TestGroupNormAPI_With_NHWC(unittest.TestCase): class TestGroupNormAPI_With_NHWC(unittest.TestCase):
paddle.enable_static()
def test_case1(self): def test_case1(self):
data1 = paddle.static.data( with paddle_static_guard():
name='data1', shape=[None, 3, 3, 4], dtype='float64' data1 = paddle.static.data(
) name='data1', shape=[None, 3, 3, 4], dtype='float64'
out1 = paddle.static.nn.group_norm( )
input=data1, groups=2, data_layout="NHWC" out1 = paddle.static.nn.group_norm(
) input=data1, groups=2, data_layout="NHWC"
data2 = paddle.static.data( )
name='data2', shape=[None, 4, 3, 3], dtype='float64' data2 = paddle.static.data(
) name='data2', shape=[None, 4, 3, 3], dtype='float64'
out2 = paddle.static.nn.group_norm( )
input=data2, groups=2, data_layout="NCHW" out2 = paddle.static.nn.group_norm(
) input=data2, groups=2, data_layout="NCHW"
)
data1_np = np.random.random((2, 3, 3, 4)).astype("float64")
data2_np = np.random.random((2, 4, 3, 3)).astype("float64")
scale = np.array([1]).astype("float64")
bias = np.array([0]).astype("float64")
place = core.CPUPlace() data1_np = np.random.random((2, 3, 3, 4)).astype("float64")
exe = fluid.Executor(place) data2_np = np.random.random((2, 4, 3, 3)).astype("float64")
results = exe.run( scale = np.array([1]).astype("float64")
fluid.default_main_program(), bias = np.array([0]).astype("float64")
feed={"data1": data1_np, "data2": data2_np},
fetch_list=[out1, out2], place = core.CPUPlace()
return_numpy=True, exe = fluid.Executor(place)
) results = exe.run(
expect_res1 = group_norm_naive( fluid.default_main_program(),
data1_np, scale, bias, epsilon=1e-5, groups=2, data_layout="NHWC" feed={"data1": data1_np, "data2": data2_np},
) fetch_list=[out1, out2],
expect_res2 = group_norm_naive( return_numpy=True,
data2_np, scale, bias, epsilon=1e-5, groups=2, data_layout="NCHW" )
) expect_res1 = group_norm_naive(
np.testing.assert_allclose(results[0], expect_res1[0], rtol=1e-05) data1_np,
np.testing.assert_allclose(results[1], expect_res2[0], rtol=1e-05) scale,
bias,
epsilon=1e-5,
groups=2,
data_layout="NHWC",
)
expect_res2 = group_norm_naive(
data2_np,
scale,
bias,
epsilon=1e-5,
groups=2,
data_layout="NCHW",
)
np.testing.assert_allclose(results[0], expect_res1[0], rtol=1e-05)
np.testing.assert_allclose(results[1], expect_res2[0], rtol=1e-05)
class TestGroupNormException(unittest.TestCase): class TestGroupNormException(unittest.TestCase):
# data_layout is not NHWC or NCHW # data_layout is not NHWC or NCHW
def test_exception(self): def test_exception(self):
data = paddle.static.data( with paddle_static_guard():
name='data', shape=[None, 3, 3, 4], dtype="float64" data = paddle.static.data(
) name='data', shape=[None, 3, 3, 4], dtype="float64"
def attr_data_format():
out = paddle.static.nn.group_norm(
input=data, groups=2, data_layout="NDHW"
) )
self.assertRaises(ValueError, attr_data_format) def attr_data_format():
out = paddle.static.nn.group_norm(
input=data, groups=2, data_layout="NDHW"
)
self.assertRaises(ValueError, attr_data_format)
class TestGroupNormEager(unittest.TestCase): class TestGroupNormEager(unittest.TestCase):
......
...@@ -16,7 +16,7 @@ import functools ...@@ -16,7 +16,7 @@ import functools
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
from paddle.fluid.tests.unittests.test_lstm_op import ACTIVATION from paddle.fluid.tests.unittests.test_lstm_op import ACTIVATION
......
...@@ -17,7 +17,7 @@ import sys ...@@ -17,7 +17,7 @@ import sys
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
from paddle.fluid import core from paddle.fluid import core
...@@ -31,6 +31,38 @@ np.set_printoptions(threshold=np.inf) ...@@ -31,6 +31,38 @@ np.set_printoptions(threshold=np.inf)
paddle.enable_static() paddle.enable_static()
def rnn_wrapper(
Input,
PreState,
WeightList=None,
SequenceLength=None,
dropout_prob=0.0,
is_bidirec=False,
input_size=10,
hidden_size=100,
num_layers=1,
mode="LSTM",
seed=0,
is_test=False,
):
dropout_state_in = paddle.Tensor()
return paddle._C_ops.rnn(
Input,
[PreState],
WeightList,
SequenceLength,
dropout_state_in,
dropout_prob,
is_bidirec,
input_size,
hidden_size,
num_layers,
mode,
seed,
is_test,
)
class TestGRUOp(OpTest): class TestGRUOp(OpTest):
def get_weight_names(self): def get_weight_names(self):
weight_names = [] weight_names = []
...@@ -44,6 +76,10 @@ class TestGRUOp(OpTest): ...@@ -44,6 +76,10 @@ class TestGRUOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "rnn" self.op_type = "rnn"
self.python_api = rnn_wrapper
self.python_out_sig = ["Out", "DropoutState", "State"]
self.python_out_sig_sub_name = {"State": ["last_hidden"]}
self.dtype = "float32" if core.is_compiled_with_rocm() else "float64" self.dtype = "float32" if core.is_compiled_with_rocm() else "float64"
self.sequence_length = ( self.sequence_length = (
None None
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
class TestHashOp(OpTest): class TestHashOp(OpTest):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest, convert_float_to_uint16 from eager_op_test import OpTest, convert_float_to_uint16
import paddle import paddle
from paddle import fluid from paddle import fluid
...@@ -60,10 +60,10 @@ class TestIndexSelectOp(OpTest): ...@@ -60,10 +60,10 @@ class TestIndexSelectOp(OpTest):
self.index_size = 100 self.index_size = 100
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True, check_prim=True) self.check_output(check_prim=True)
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad(['X'], 'Out', check_eager=True, check_prim=True) self.check_grad(['X'], 'Out', check_prim=True)
class TestIndexSelectOpCase2(TestIndexSelectOp): class TestIndexSelectOpCase2(TestIndexSelectOp):
...@@ -132,10 +132,10 @@ class TestIndexSelectBF16Op(OpTest): ...@@ -132,10 +132,10 @@ class TestIndexSelectBF16Op(OpTest):
self.index_size = 100 self.index_size = 100
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
class TestIndexSelectAPI(unittest.TestCase): class TestIndexSelectAPI(unittest.TestCase):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
...@@ -44,10 +44,10 @@ class TestLabelSmoothOp(OpTest): ...@@ -44,10 +44,10 @@ class TestLabelSmoothOp(OpTest):
self.dtype = np.float64 self.dtype = np.float64
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(["X"], "Out", check_eager=True) self.check_grad(["X"], "Out")
class TestLabelSmoothFP16OP(TestLabelSmoothOp): class TestLabelSmoothFP16OP(TestLabelSmoothOp):
......
...@@ -19,7 +19,10 @@ import numpy as np ...@@ -19,7 +19,10 @@ import numpy as np
import paddle import paddle
import paddle.nn.functional as F import paddle.nn.functional as F
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16 from paddle.fluid.tests.unittests.eager_op_test import (
OpTest,
convert_float_to_uint16,
)
np.random.seed(10) np.random.seed(10)
......
...@@ -17,7 +17,7 @@ import unittest ...@@ -17,7 +17,7 @@ import unittest
from typing import Optional from typing import Optional
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
from paddle import fluid from paddle import fluid
...@@ -214,10 +214,17 @@ class TestLogcumsumexp(unittest.TestCase): ...@@ -214,10 +214,17 @@ class TestLogcumsumexp(unittest.TestCase):
out = exe.run(feed={'X': data_np}, fetch_list=[y.name]) out = exe.run(feed={'X': data_np}, fetch_list=[y.name])
def logcumsumexp_wrapper(
x, axis=-1, flatten=False, exclusive=False, reverse=False
):
return paddle._C_ops.logcumsumexp(x, axis, flatten, exclusive, reverse)
class BaseTestCases: class BaseTestCases:
class BaseOpTest(OpTest): class BaseOpTest(OpTest):
def setUp(self): def setUp(self):
self.op_type = "logcumsumexp" self.op_type = "logcumsumexp"
self.python_api = logcumsumexp_wrapper
input, attrs = self.input_and_attrs() input, attrs = self.input_and_attrs()
self.inputs = {'X': input} self.inputs = {'X': input}
self.attrs = attrs self.attrs = attrs
......
...@@ -19,7 +19,7 @@ from eager_op_test import OpTest ...@@ -19,7 +19,7 @@ from eager_op_test import OpTest
import paddle import paddle
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid.tests.unittests.op_test import convert_float_to_uint16 from paddle.fluid.tests.unittests.eager_op_test import convert_float_to_uint16
np.random.seed(10) np.random.seed(10)
......
...@@ -20,7 +20,7 @@ import paddle ...@@ -20,7 +20,7 @@ import paddle
from paddle import enable_static, fluid from paddle import enable_static, fluid
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid.op import Operator from paddle.fluid.op import Operator
from paddle.fluid.tests.unittests.op_test import ( from paddle.fluid.tests.unittests.eager_op_test import (
OpTest, OpTest,
convert_float_to_uint16, convert_float_to_uint16,
convert_uint16_to_float, convert_uint16_to_float,
......
...@@ -15,7 +15,12 @@ ...@@ -15,7 +15,12 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest, check_out_dtype, skip_check_grad_ci from eager_op_test import (
OpTest,
check_out_dtype,
paddle_static_guard,
skip_check_grad_ci,
)
import paddle import paddle
import paddle.nn.functional as F import paddle.nn.functional as F
...@@ -157,38 +162,43 @@ class TestLookupTableWithTensorIdsWIsSelectedRows( ...@@ -157,38 +162,43 @@ class TestLookupTableWithTensorIdsWIsSelectedRows(
class TestEmbedOpError(unittest.TestCase): class TestEmbedOpError(unittest.TestCase):
def test_errors(self): def test_errors(self):
with program_guard(Program(), Program()): with paddle_static_guard():
input_data = np.random.randint(0, 10, (4, 1)).astype("int64") with program_guard(Program(), Program()):
input_data = np.random.randint(0, 10, (4, 1)).astype("int64")
def test_Variable():
# the input type must be Variable def test_Variable():
fluid.layers.embedding(input=input_data, size=(10, 64)) # the input type must be Variable
fluid.layers.embedding(input=input_data, size=(10, 64))
self.assertRaises(TypeError, test_Variable)
self.assertRaises(TypeError, test_Variable)
def test_input_dtype():
# the input dtype must be int64 def test_input_dtype():
input = paddle.static.data( # the input dtype must be int64
name='x', shape=[4, 1], dtype='float32' input = paddle.static.data(
) name='x', shape=[4, 1], dtype='float32'
fluid.layers.embedding(input=input, size=(10, 64)) )
fluid.layers.embedding(input=input, size=(10, 64))
self.assertRaises(TypeError, test_input_dtype)
self.assertRaises(TypeError, test_input_dtype)
def test_param_dtype():
# dtype must be float32 or float64 def test_param_dtype():
input2 = paddle.static.data( # dtype must be float32 or float64
name='x2', shape=[4, 1], dtype='int64' input2 = paddle.static.data(
name='x2', shape=[4, 1], dtype='int64'
)
fluid.layers.embedding(
input=input2, size=(10, 64), dtype='int64'
)
self.assertRaises(TypeError, test_param_dtype)
input3 = paddle.static.data(
name='x3', shape=[4, 1], dtype='int64'
) )
fluid.layers.embedding( fluid.layers.embedding(
input=input2, size=(10, 64), dtype='int64' input=input3, size=(10, 64), dtype='float16'
) )
self.assertRaises(TypeError, test_param_dtype)
input3 = paddle.static.data(name='x3', shape=[4, 1], dtype='int64')
fluid.layers.embedding(input=input3, size=(10, 64), dtype='float16')
class TestLookupTableOpInt8(OpTest): class TestLookupTableOpInt8(OpTest):
def setUp(self): def setUp(self):
......
...@@ -19,7 +19,7 @@ import numpy as np ...@@ -19,7 +19,7 @@ import numpy as np
import paddle import paddle
from paddle import fluid from paddle import fluid
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid.tests.unittests.op_test import convert_uint16_to_float from paddle.fluid.tests.unittests.eager_op_test import convert_uint16_to_float
from paddle.fluid.tests.unittests.test_lookup_table_bf16_op import ( from paddle.fluid.tests.unittests.test_lookup_table_bf16_op import (
TestLookupTableBF16Op, TestLookupTableBF16Op,
TestLookupTableBF16OpIds4D, TestLookupTableBF16OpIds4D,
...@@ -32,6 +32,7 @@ from paddle.fluid.tests.unittests.test_lookup_table_bf16_op import ( ...@@ -32,6 +32,7 @@ from paddle.fluid.tests.unittests.test_lookup_table_bf16_op import (
class TestLookupTableV2BF16Op(TestLookupTableBF16Op): class TestLookupTableV2BF16Op(TestLookupTableBF16Op):
def init_test(self): def init_test(self):
self.op_type = "lookup_table_v2" self.op_type = "lookup_table_v2"
self.python_api = paddle.nn.functional.embedding
self.ids_shape = 4 self.ids_shape = 4
self.mkldnn_data_type = "bfloat16" self.mkldnn_data_type = "bfloat16"
...@@ -39,6 +40,7 @@ class TestLookupTableV2BF16Op(TestLookupTableBF16Op): ...@@ -39,6 +40,7 @@ class TestLookupTableV2BF16Op(TestLookupTableBF16Op):
class TestLookupTableV2BF16OpIds4D(TestLookupTableBF16OpIds4D): class TestLookupTableV2BF16OpIds4D(TestLookupTableBF16OpIds4D):
def init_test(self): def init_test(self):
self.op_type = "lookup_table_v2" self.op_type = "lookup_table_v2"
self.python_api = paddle.nn.functional.embedding
self.ids_shape = (2, 4, 5) self.ids_shape = (2, 4, 5)
self.mkldnn_data_type = "bfloat16" self.mkldnn_data_type = "bfloat16"
...@@ -48,6 +50,7 @@ class TestLookupTableV2BF16OpWIsSelectedRows( ...@@ -48,6 +50,7 @@ class TestLookupTableV2BF16OpWIsSelectedRows(
): ):
def init_test(self): def init_test(self):
self.op_type = "lookup_table_v2" self.op_type = "lookup_table_v2"
self.python_api = paddle.nn.functional.embedding
self.ids_shape = 10 self.ids_shape = 10
...@@ -56,6 +59,7 @@ class TestLookupTableV2BF16OpWIsSelectedRows4DIds( ...@@ -56,6 +59,7 @@ class TestLookupTableV2BF16OpWIsSelectedRows4DIds(
): ):
def init_test(self): def init_test(self):
self.op_type = "lookup_table_v2" self.op_type = "lookup_table_v2"
self.python_api = paddle.nn.functional.embedding
self.ids_shape = (3, 4, 5) self.ids_shape = (3, 4, 5)
...@@ -88,6 +92,7 @@ class TestEmbeddingLayerBF16ConstantInitializer(unittest.TestCase): ...@@ -88,6 +92,7 @@ class TestEmbeddingLayerBF16ConstantInitializer(unittest.TestCase):
def setUp(self): def setUp(self):
self.op_type = "lookup_table_v2" self.op_type = "lookup_table_v2"
self.python_api = paddle.nn.functional.embedding
self.ids_shape = [4] self.ids_shape = [4]
self.w_shape = [10, 64] self.w_shape = [10, 64]
self.ids = np.random.randint(low=0, high=9, size=self.ids_shape).astype( self.ids = np.random.randint(low=0, high=9, size=self.ids_shape).astype(
......
...@@ -17,7 +17,7 @@ import random ...@@ -17,7 +17,7 @@ import random
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
from paddle.fluid import core from paddle.fluid import core
...@@ -524,7 +524,7 @@ class TestCUDNNLstmOp(OpTest): ...@@ -524,7 +524,7 @@ class TestCUDNNLstmOp(OpTest):
else: else:
paddle.enable_static() paddle.enable_static()
self.check_output_with_place( self.check_output_with_place(
place, no_check_set=['Reserve', 'StateOut'] place, no_check_set=['Reserve', 'StateOut'], check_dygraph=False
) )
paddle.disable_static() paddle.disable_static()
...@@ -536,6 +536,7 @@ class TestCUDNNLstmOp(OpTest): ...@@ -536,6 +536,7 @@ class TestCUDNNLstmOp(OpTest):
place, place,
{'Input', var_name, 'InitH', 'InitC'}, {'Input', var_name, 'InitH', 'InitC'},
['Out', 'LastH', 'LastC'], ['Out', 'LastH', 'LastC'],
check_dygraph=False,
) )
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
def generate_compatible_shapes_mul_head(dim_X, dim_Y, transpose_X, transpose_Y): def generate_compatible_shapes_mul_head(dim_X, dim_Y, transpose_X, transpose_Y):
......
...@@ -19,7 +19,7 @@ import numpy as np ...@@ -19,7 +19,7 @@ import numpy as np
import paddle import paddle
from paddle import fluid from paddle import fluid
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid.tests.unittests.op_test import OpTest from paddle.fluid.tests.unittests.eager_op_test import OpTest
paddle.enable_static() paddle.enable_static()
SEED = 2049 SEED = 2049
......
...@@ -16,7 +16,7 @@ import unittest ...@@ -16,7 +16,7 @@ import unittest
import numpy import numpy
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
from paddle import fluid from paddle import fluid
...@@ -54,9 +54,38 @@ def calculate_momentum_by_numpy( ...@@ -54,9 +54,38 @@ def calculate_momentum_by_numpy(
return param_out, velocity_out return param_out, velocity_out
def momentum_wrapper(
param,
grad,
velocity,
learning_rate=1.0,
master_param=None,
mu=0.0,
use_nesterov=False,
regularization_method="",
regularization_coeff=0.0,
multi_precision=False,
rescale_grad=1.0,
):
return paddle._C_ops.momentum_(
param,
grad,
velocity,
learning_rate,
master_param,
mu,
use_nesterov,
regularization_method,
regularization_coeff,
multi_precision,
rescale_grad,
)
class TestMomentumOp1(OpTest): class TestMomentumOp1(OpTest):
def setUp(self): def setUp(self):
self.op_type = "momentum" self.op_type = "momentum"
self.python_api = momentum_wrapper
self.dtype = np.float32 self.dtype = np.float32
self.init_dtype() self.init_dtype()
...@@ -107,6 +136,7 @@ class TestMomentumOp2(OpTest): ...@@ -107,6 +136,7 @@ class TestMomentumOp2(OpTest):
def setUp(self): def setUp(self):
self.op_type = "momentum" self.op_type = "momentum"
self.python_api = momentum_wrapper
param = np.random.random((123, 321)).astype("float32") param = np.random.random((123, 321)).astype("float32")
grad = np.random.random((123, 321)).astype("float32") grad = np.random.random((123, 321)).astype("float32")
...@@ -221,7 +251,7 @@ class TestLarsMomentumOpWithMP(OpTest): ...@@ -221,7 +251,7 @@ class TestLarsMomentumOpWithMP(OpTest):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = fluid.CUDAPlace(0) place = fluid.CUDAPlace(0)
if core.is_float16_supported(place): if core.is_float16_supported(place):
self.check_output_with_place(place) self.check_output_with_place(place, check_dygraph=False)
def config(self): def config(self):
self.params_num = 1 self.params_num = 1
...@@ -561,6 +591,7 @@ class TestMomentumV2(unittest.TestCase): ...@@ -561,6 +591,7 @@ class TestMomentumV2(unittest.TestCase):
class TestMomentumOpWithDecay(OpTest): class TestMomentumOpWithDecay(OpTest):
def setUp(self): def setUp(self):
self.op_type = "momentum" self.op_type = "momentum"
self.python_api = momentum_wrapper
self.dtype = np.float32 self.dtype = np.float32
self.use_nesterov = True self.use_nesterov = True
self.regularization_method = 'l2_decay' self.regularization_method = 'l2_decay'
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest, paddle_static_guard
import paddle import paddle
from paddle import fluid from paddle import fluid
...@@ -186,156 +186,173 @@ class TestNCECase1SelectedRows(unittest.TestCase): ...@@ -186,156 +186,173 @@ class TestNCECase1SelectedRows(unittest.TestCase):
custom_dist, custom_dist,
is_sparse, is_sparse,
): ):
input = paddle.static.data( with paddle_static_guard():
name="input", shape=[-1, 10], dtype="float32" input = paddle.static.data(
) name="input", shape=[-1, 10], dtype="float32"
label = paddle.static.data(name="label", shape=[-1, 1], dtype="int64")
w_param = (
fluid.default_main_program()
.global_block()
.create_parameter(
shape=[num_total_classes, 10],
dtype='float32',
name='nce_w',
initializer=paddle.nn.initializer.Constant(),
) )
) label = paddle.static.data(
b_param = ( name="label", shape=[-1, 1], dtype="int64"
fluid.default_main_program()
.global_block()
.create_parameter(
shape=[num_total_classes, 1],
dtype='float32',
name='nce_b',
initializer=paddle.nn.initializer.Constant(),
) )
)
cost = paddle.static.nn.nce( w_param = (
input=input, fluid.default_main_program()
label=label, .global_block()
num_total_classes=num_total_classes, .create_parameter(
sampler=sampler, shape=[num_total_classes, 10],
custom_dist=custom_dist, dtype='float32',
sample_weight=None, name='nce_w',
param_attr='nce_w', initializer=paddle.nn.initializer.Constant(),
bias_attr='nce_b', )
seed=1, )
num_neg_samples=num_neg_samples, b_param = (
is_sparse=is_sparse, fluid.default_main_program()
) .global_block()
avg_cost = paddle.mean(cost) .create_parameter(
# optimizer shape=[num_total_classes, 1],
optimizer = self.get_optimizer() dtype='float32',
optimizer.minimize(avg_cost) name='nce_b',
initializer=paddle.nn.initializer.Constant(),
)
)
return [avg_cost, [input, label]] cost = paddle.static.nn.nce(
input=input,
label=label,
num_total_classes=num_total_classes,
sampler=sampler,
custom_dist=custom_dist,
sample_weight=None,
param_attr='nce_w',
bias_attr='nce_b',
seed=1,
num_neg_samples=num_neg_samples,
is_sparse=is_sparse,
)
avg_cost = paddle.mean(cost)
# optimizer
optimizer = self.get_optimizer()
optimizer.minimize(avg_cost)
return [avg_cost, [input, label]]
def test_input_is_selected_rows(self): def test_input_is_selected_rows(self):
place = self.get_place() with paddle_static_guard():
exe = fluid.Executor(place) place = self.get_place()
exe = fluid.Executor(place)
data = self.get_train_data(self.batch_size)
nid_freq_arr = np.random.dirichlet(np.ones(20) * 1000).astype('float32')
rets = []
# for dense
dense_scope = fluid.core.Scope()
dense_startup_program = fluid.framework.Program()
dense_train_program = fluid.framework.Program()
with fluid.scope_guard(dense_scope):
with fluid.program_guard(
dense_train_program, dense_startup_program
):
cost, feeds = self.train_network(
20, 5, "custom_dist", nid_freq_arr.tolist(), False
)
feeder = fluid.DataFeeder(feed_list=feeds, place=place)
exe.run(dense_startup_program)
loss_val = exe.run(
dense_train_program,
feed=feeder.feed(data),
fetch_list=[cost.name],
)
rets.append(np.mean(loss_val))
# for sparse
sparse_scope = fluid.core.Scope()
sparse_startup_program = fluid.framework.Program()
sparse_train_program = fluid.framework.Program()
with fluid.scope_guard(sparse_scope):
with fluid.program_guard(
sparse_train_program, sparse_startup_program
):
cost, feeds = self.train_network(
20, 5, "custom_dist", nid_freq_arr.tolist(), True
)
feeder = fluid.DataFeeder(feed_list=feeds, place=place)
exe.run(sparse_startup_program)
loss_val = exe.run(
sparse_train_program,
feed=feeder.feed(data),
fetch_list=[cost.name],
)
rets.append(np.mean(loss_val))
self.assertEqual(rets[0], rets[1]) data = self.get_train_data(self.batch_size)
nid_freq_arr = np.random.dirichlet(np.ones(20) * 1000).astype(
'float32'
)
rets = []
# for dense
dense_scope = fluid.core.Scope()
dense_startup_program = fluid.framework.Program()
dense_train_program = fluid.framework.Program()
with fluid.scope_guard(dense_scope):
with fluid.program_guard(
dense_train_program, dense_startup_program
):
cost, feeds = self.train_network(
20, 5, "custom_dist", nid_freq_arr.tolist(), False
)
feeder = fluid.DataFeeder(feed_list=feeds, place=place)
paddle.enable_static()
exe.run(dense_startup_program)
loss_val = exe.run(
dense_train_program,
feed=feeder.feed(data),
fetch_list=[cost.name],
)
rets.append(np.mean(loss_val))
# for sparse
sparse_scope = fluid.core.Scope()
sparse_startup_program = fluid.framework.Program()
sparse_train_program = fluid.framework.Program()
with fluid.scope_guard(sparse_scope):
with fluid.program_guard(
sparse_train_program, sparse_startup_program
):
cost, feeds = self.train_network(
20, 5, "custom_dist", nid_freq_arr.tolist(), True
)
feeder = fluid.DataFeeder(feed_list=feeds, place=place)
paddle.enable_static()
exe.run(sparse_startup_program)
loss_val = exe.run(
sparse_train_program,
feed=feeder.feed(data),
fetch_list=[cost.name],
)
rets.append(np.mean(loss_val))
self.assertEqual(rets[0], rets[1])
class TestNCE_OpError(unittest.TestCase): class TestNCE_OpError(unittest.TestCase):
def test_errors(self): def test_errors(self):
with program_guard(Program(), Program()): with paddle_static_guard():
input1 = fluid.create_lod_tensor( with program_guard(Program(), Program()):
np.array([0.0, 3.0, 2.0, 4.0]), [[1, 1, 2]], fluid.CPUPlace() input1 = fluid.create_lod_tensor(
) np.array([0.0, 3.0, 2.0, 4.0]),
label1 = paddle.static.data( [[1, 1, 2]],
name='label1', shape=[-1, 4], dtype="int64" fluid.CPUPlace(),
) )
# the input(input) of nce layer must be Variable. label1 = paddle.static.data(
self.assertRaises( name='label1', shape=[-1, 4], dtype="int64"
TypeError, paddle.static.nn.nce, input1, label1, 5 )
) # the input(input) of nce layer must be Variable.
self.assertRaises(
TypeError, paddle.static.nn.nce, input1, label1, 5
)
input2 = paddle.static.data( input2 = paddle.static.data(
name='input2', shape=[-1, 4], dtype="float32" name='input2', shape=[-1, 4], dtype="float32"
) )
label2 = fluid.create_lod_tensor( label2 = fluid.create_lod_tensor(
np.array([0.0, 3.0, 2.0, 4.0]), [[1, 1, 2]], fluid.CPUPlace() np.array([0.0, 3.0, 2.0, 4.0]),
) [[1, 1, 2]],
# the input(label) of nce layer must be Variable. fluid.CPUPlace(),
self.assertRaises( )
TypeError, paddle.static.nn.nce, input2, label2, 5 # the input(label) of nce layer must be Variable.
) self.assertRaises(
TypeError, paddle.static.nn.nce, input2, label2, 5
)
input3 = paddle.static.data( input3 = paddle.static.data(
name='input3', shape=[-1, 4], dtype="float16" name='input3', shape=[-1, 4], dtype="float16"
) )
label3 = paddle.static.data( label3 = paddle.static.data(
name='label3', shape=[-1, 1], dtype="int64" name='label3', shape=[-1, 1], dtype="int64"
) )
# the data type of input(input) must be float32 or float64. # the data type of input(input) must be float32 or float64.
self.assertRaises( self.assertRaises(
TypeError, paddle.static.nn.nce, input3, label3, 5 TypeError, paddle.static.nn.nce, input3, label3, 5
) )
input4 = paddle.static.data( input4 = paddle.static.data(
name='input4', shape=[-1, 4], dtype="float32" name='input4', shape=[-1, 4], dtype="float32"
) )
label4 = paddle.static.data( label4 = paddle.static.data(
name='label4', shape=[-1, 1], dtype="int32" name='label4', shape=[-1, 1], dtype="int32"
) )
# the data type of input(label) must be int64. # the data type of input(label) must be int64.
self.assertRaises( self.assertRaises(
TypeError, paddle.static.nn.nce, input4, label4, 5 TypeError, paddle.static.nn.nce, input4, label4, 5
) )
input5 = paddle.static.data(name='x', shape=[1], dtype='float32') input5 = paddle.static.data(
label5 = paddle.static.data(name='label', shape=[1], dtype='int64') name='x', shape=[1], dtype='float32'
)
label5 = paddle.static.data(
name='label', shape=[1], dtype='int64'
)
self.assertRaises( self.assertRaises(
ValueError, paddle.static.nn.nce, input5, label5, 1 ValueError, paddle.static.nn.nce, input5, label5, 1
) )
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -201,7 +201,7 @@ class TestExpandV2DoubleGradCheck(unittest.TestCase): ...@@ -201,7 +201,7 @@ class TestExpandV2DoubleGradCheck(unittest.TestCase):
class TestSqueezeDoubleGradCheck(unittest.TestCase): class TestSqueezeDoubleGradCheck(unittest.TestCase):
def squeeze_warpper(self, x): def squeeze_wrapper(self, x):
axes = [0, 2] axes = [0, 2]
return paddle.squeeze(x[0], axes) return paddle.squeeze(x[0], axes)
...@@ -221,7 +221,7 @@ class TestSqueezeDoubleGradCheck(unittest.TestCase): ...@@ -221,7 +221,7 @@ class TestSqueezeDoubleGradCheck(unittest.TestCase):
[x], out, x_init=x_arr, place=place, eps=eps [x], out, x_init=x_arr, place=place, eps=eps
) )
gradient_checker.double_grad_check_for_dygraph( gradient_checker.double_grad_check_for_dygraph(
self.squeeze_warpper, [x], out, x_init=x_arr, place=place self.squeeze_wrapper, [x], out, x_init=x_arr, place=place
) )
def test_grad(self): def test_grad(self):
......
...@@ -15,8 +15,7 @@ ...@@ -15,8 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from eager_op_test import OpTest, skip_check_grad_ci from eager_op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci
from op_test import convert_float_to_uint16
import paddle import paddle
from paddle import fluid from paddle import fluid
......
...@@ -14,8 +14,8 @@ ...@@ -14,8 +14,8 @@
import unittest import unittest
import eager_op_test
import numpy as np import numpy as np
import op_test
import paddle import paddle
from paddle.distributed.models.moe import utils from paddle.distributed.models.moe import utils
...@@ -33,7 +33,7 @@ def count(x, upper_num): ...@@ -33,7 +33,7 @@ def count(x, upper_num):
@unittest.skipIf( @unittest.skipIf(
not core.is_compiled_with_cuda(), "core is not compiled with CUDA" not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
) )
class TestNumberCountOpInt64(op_test.OpTest): class TestNumberCountOpInt64(eager_op_test.OpTest):
def setUp(self): def setUp(self):
upper_num = 16 upper_num = 16
self.op_type = "number_count" self.op_type = "number_count"
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.nn.functional as F import paddle.nn.functional as F
...@@ -68,6 +68,12 @@ def pixel_unshuffle_np(x, down_factor, data_format="NCHW"): ...@@ -68,6 +68,12 @@ def pixel_unshuffle_np(x, down_factor, data_format="NCHW"):
return npresult return npresult
def pixel_unshuffle_wrapper(x, downscale_factor, data_format):
return paddle._legacy_C_ops.pixel_unshuffle(
x, "downscale_factor", downscale_factor, "data_format", data_format
)
class TestPixelUnshuffleOp(OpTest): class TestPixelUnshuffleOp(OpTest):
'''TestPixelUnshuffleOp''' '''TestPixelUnshuffleOp'''
...@@ -75,6 +81,7 @@ class TestPixelUnshuffleOp(OpTest): ...@@ -75,6 +81,7 @@ class TestPixelUnshuffleOp(OpTest):
'''setUp''' '''setUp'''
self.op_type = "pixel_unshuffle" self.op_type = "pixel_unshuffle"
self.python_api = pixel_unshuffle_wrapper
self.init_data_format() self.init_data_format()
n, c, h, w = 2, 1, 12, 12 n, c, h, w = 2, 1, 12, 12
......
...@@ -16,8 +16,9 @@ import unittest ...@@ -16,8 +16,9 @@ import unittest
import numpy as np import numpy as np
import paddle
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid.tests.unittests.op_test import OpTest from paddle.fluid.tests.unittests.eager_op_test import OpTest
def adaptive_start_index(index, input_size, output_size): def adaptive_start_index(index, input_size, output_size):
...@@ -285,6 +286,67 @@ def pool2D_forward_naive( ...@@ -285,6 +286,67 @@ def pool2D_forward_naive(
return out return out
def pool2d_wrapper_not_use_cudnn(
X,
ksize=[],
strides=[],
paddings=[],
ceil_mode=False,
exclusive=True,
data_format="NCDHW",
pooling_type="max",
global_pooling=False,
adaptive=False,
padding_algorithm="EXPLICIT",
):
tmp = X._use_gpudnn(False)
if data_format == "AnyLayout":
data_format = "NCDHW"
return paddle._C_ops.pool2d(
tmp,
ksize,
strides,
paddings,
ceil_mode,
exclusive,
data_format,
pooling_type,
global_pooling,
adaptive,
padding_algorithm,
)
def pool2d_wrapper_use_cudnn(
X,
ksize=[],
strides=[],
paddings=[],
ceil_mode=False,
exclusive=True,
data_format="NCDHW",
pooling_type="max",
global_pooling=False,
adaptive=False,
padding_algorithm="EXPLICIT",
):
if data_format == "AnyLayout":
data_format = "NCDHW"
return paddle._C_ops.pool2d(
X,
ksize,
strides,
paddings,
ceil_mode,
exclusive,
data_format,
pooling_type,
global_pooling,
adaptive,
padding_algorithm,
)
class TestPool2D_Op_Mixin: class TestPool2D_Op_Mixin:
def setUp(self): def setUp(self):
self.op_type = "pool2d" self.op_type = "pool2d"
...@@ -337,6 +399,11 @@ class TestPool2D_Op_Mixin: ...@@ -337,6 +399,11 @@ class TestPool2D_Op_Mixin:
self.outputs = {'Out': output} self.outputs = {'Out': output}
if self.use_cudnn:
self.python_api = pool2d_wrapper_use_cudnn
else:
self.python_api = pool2d_wrapper_not_use_cudnn
def has_cudnn(self): def has_cudnn(self):
return core.is_compiled_with_cuda() and self.use_cudnn return core.is_compiled_with_cuda() and self.use_cudnn
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
from paddle.fluid import core from paddle.fluid import core
...@@ -271,6 +271,67 @@ def avg_pool3D_forward_naive( ...@@ -271,6 +271,67 @@ def avg_pool3D_forward_naive(
return out return out
def pool3d_wrapper_not_use_cudnn(
X,
ksize=[],
strides=[],
paddings=[],
ceil_mode=False,
exclusive=True,
data_format="NCDHW",
pooling_type="max",
global_pooling=False,
adaptive=False,
padding_algorithm="EXPLICIT",
):
tmp = X._use_gpudnn(False)
if data_format == "AnyLayout":
data_format = "NCDHW"
return paddle._C_ops.pool3d(
tmp,
ksize,
strides,
paddings,
ceil_mode,
exclusive,
data_format,
pooling_type,
global_pooling,
adaptive,
padding_algorithm,
)
def pool3d_wrapper_use_cudnn(
X,
ksize=[],
strides=[],
paddings=[],
ceil_mode=False,
exclusive=True,
data_format="NCDHW",
pooling_type="max",
global_pooling=False,
adaptive=False,
padding_algorithm="EXPLICIT",
):
if data_format == "AnyLayout":
data_format = "NCDHW"
return paddle._C_ops.pool3d(
X,
ksize,
strides,
paddings,
ceil_mode,
exclusive,
data_format,
pooling_type,
global_pooling,
adaptive,
padding_algorithm,
)
class TestPool3D_Op(OpTest): class TestPool3D_Op(OpTest):
def setUp(self): def setUp(self):
self.op_type = "pool3d" self.op_type = "pool3d"
...@@ -322,6 +383,11 @@ class TestPool3D_Op(OpTest): ...@@ -322,6 +383,11 @@ class TestPool3D_Op(OpTest):
self.outputs = {'Out': output} self.outputs = {'Out': output}
if self.use_cudnn:
self.python_api = pool3d_wrapper_use_cudnn
else:
self.python_api = pool3d_wrapper_not_use_cudnn
def has_cudnn(self): def has_cudnn(self):
return core.is_compiled_with_cuda() and self.use_cudnn return core.is_compiled_with_cuda() and self.use_cudnn
......
...@@ -15,7 +15,9 @@ ...@@ -15,7 +15,9 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle
def adaptive_start_index(index, input_size, output_size): def adaptive_start_index(index, input_size, output_size):
...@@ -129,6 +131,19 @@ def max_pool2D_forward_naive( ...@@ -129,6 +131,19 @@ def max_pool2D_forward_naive(
return out, mask return out, mask
def max_pool3d_with_index_wapper(
x,
kernel_size=[],
strides=[],
paddings=[],
global_pooling=False,
adaptive=False,
):
return paddle._C_ops.max_pool3d_with_index(
x, kernel_size, strides, paddings, global_pooling, adaptive
)
class TestMaxPoolWithIndex_Op(OpTest): class TestMaxPoolWithIndex_Op(OpTest):
def setUp(self): def setUp(self):
self.init_test_case() self.init_test_case()
...@@ -167,6 +182,7 @@ class TestMaxPoolWithIndex_Op(OpTest): ...@@ -167,6 +182,7 @@ class TestMaxPoolWithIndex_Op(OpTest):
def init_test_case(self): def init_test_case(self):
self.op_type = "max_pool3d_with_index" self.op_type = "max_pool3d_with_index"
self.python_api = max_pool3d_with_index_wapper
self.pool_forward_naive = max_pool3D_forward_naive self.pool_forward_naive = max_pool3D_forward_naive
self.shape = [2, 3, 7, 7, 7] self.shape = [2, 3, 7, 7, 7]
self.ksize = [3, 3, 3] self.ksize = [3, 3, 3]
...@@ -188,6 +204,7 @@ class TestCase1(TestMaxPoolWithIndex_Op): ...@@ -188,6 +204,7 @@ class TestCase1(TestMaxPoolWithIndex_Op):
class TestCase2(TestMaxPoolWithIndex_Op): class TestCase2(TestMaxPoolWithIndex_Op):
def init_test_case(self): def init_test_case(self):
self.op_type = "max_pool3d_with_index" self.op_type = "max_pool3d_with_index"
self.python_api = max_pool3d_with_index_wapper
self.pool_forward_naive = max_pool3D_forward_naive self.pool_forward_naive = max_pool3D_forward_naive
self.shape = [2, 3, 7, 7, 7] self.shape = [2, 3, 7, 7, 7]
self.ksize = [3, 3, 3] self.ksize = [3, 3, 3]
...@@ -204,9 +221,25 @@ class TestCase3(TestCase2): ...@@ -204,9 +221,25 @@ class TestCase3(TestCase2):
# ----------------max_pool2d_with_index---------------- # ----------------max_pool2d_with_index----------------
def max_pool2d_with_index_wapper(
x,
kernel_size=[],
strides=[],
paddings=[],
global_pooling=False,
adaptive=False,
):
return paddle._C_ops.max_pool2d_with_index(
x, kernel_size, strides, paddings, global_pooling, adaptive
)
class TestCase4(TestMaxPoolWithIndex_Op): class TestCase4(TestMaxPoolWithIndex_Op):
def init_test_case(self): def init_test_case(self):
self.op_type = "max_pool2d_with_index" self.op_type = "max_pool2d_with_index"
self.python_api = max_pool2d_with_index_wapper
self.pool_forward_naive = max_pool2D_forward_naive self.pool_forward_naive = max_pool2D_forward_naive
self.shape = [2, 3, 7, 7] self.shape = [2, 3, 7, 7]
self.ksize = [3, 3] self.ksize = [3, 3]
...@@ -225,6 +258,7 @@ class TestCase5(TestCase4): ...@@ -225,6 +258,7 @@ class TestCase5(TestCase4):
class TestCase6(TestMaxPoolWithIndex_Op): class TestCase6(TestMaxPoolWithIndex_Op):
def init_test_case(self): def init_test_case(self):
self.op_type = "max_pool2d_with_index" self.op_type = "max_pool2d_with_index"
self.python_api = max_pool2d_with_index_wapper
self.pool_forward_naive = max_pool2D_forward_naive self.pool_forward_naive = max_pool2D_forward_naive
self.shape = [2, 3, 7, 7] self.shape = [2, 3, 7, 7]
self.ksize = [3, 3] self.ksize = [3, 3]
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
def calc_precision(tp_count, fp_count): def calc_precision(tp_count, fp_count):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest, skip_check_grad_ci from eager_op_test import OpTest, skip_check_grad_ci
import paddle import paddle
import paddle.nn.functional as F import paddle.nn.functional as F
...@@ -162,16 +162,14 @@ class TestNNPReluAPI(unittest.TestCase): ...@@ -162,16 +162,14 @@ class TestNNPReluAPI(unittest.TestCase):
paddle.enable_static() paddle.enable_static()
def prelu_api_wrapper(x, weight, data_format="NCHW"): def prelu_api_wrapper(x, alpha, data_format="NCHW", mode="all"):
weight = weight.reshape([-1]) return paddle._C_ops.prelu(x, alpha, data_format, mode)
return paddle.nn.functional.prelu(x, weight, data_format, name=None)
class PReluTest(OpTest): class PReluTest(OpTest):
def setUp(self): def setUp(self):
self.init_dtype() self.init_dtype()
self.init_input_shape() self.init_input_shape()
self.eager_mode = True
self.init_attr() self.init_attr()
self.op_type = "prelu" self.op_type = "prelu"
self.python_api = prelu_api_wrapper self.python_api = prelu_api_wrapper
...@@ -192,8 +190,6 @@ class PReluTest(OpTest): ...@@ -192,8 +190,6 @@ class PReluTest(OpTest):
alpha_np = np.random.uniform(-1, -0.5, [1, 1, 1, self.x_shape[-1]]) alpha_np = np.random.uniform(-1, -0.5, [1, 1, 1, self.x_shape[-1]])
else: else:
alpha_np = np.random.uniform(-1, -0.5, [1] + self.x_shape[1:]) alpha_np = np.random.uniform(-1, -0.5, [1] + self.x_shape[1:])
# eager check don't support mode = 'all'
self.eager_mode = False
alpha_np = alpha_np.astype(self.dtype) alpha_np = alpha_np.astype(self.dtype)
self.inputs = {'X': x_np, 'Alpha': alpha_np} self.inputs = {'X': x_np, 'Alpha': alpha_np}
...@@ -226,10 +222,10 @@ class PReluTest(OpTest): ...@@ -226,10 +222,10 @@ class PReluTest(OpTest):
self.attrs = {'mode': "channel", "data_format": "NCHW"} self.attrs = {'mode': "channel", "data_format": "NCHW"}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=self.eager_mode) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X', 'Alpha'], 'Out', check_eager=self.eager_mode) self.check_grad(['X', 'Alpha'], 'Out')
@skip_check_grad_ci( @skip_check_grad_ci(
...@@ -392,9 +388,7 @@ def create_test_fp16_class( ...@@ -392,9 +388,7 @@ def create_test_fp16_class(
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
if core.is_float16_supported(place): if core.is_float16_supported(place):
self.check_output_with_place( self.check_output_with_place(place, atol=atol)
place, atol=atol, check_eager=self.eager_mode
)
def test_check_grad(self): def test_check_grad(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
...@@ -404,7 +398,6 @@ def create_test_fp16_class( ...@@ -404,7 +398,6 @@ def create_test_fp16_class(
['X', 'Alpha'], ['X', 'Alpha'],
'Out', 'Out',
max_relative_error=max_relative_error, max_relative_error=max_relative_error,
check_eager=self.eager_mode,
) )
cls_name = "{0}_{1}".format(parent.__name__, "Fp16Op") cls_name = "{0}_{1}".format(parent.__name__, "Fp16Op")
......
...@@ -16,7 +16,7 @@ import random ...@@ -16,7 +16,7 @@ import random
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
from paddle.fluid import core from paddle.fluid import core
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
class TestRankLossOp(OpTest): class TestRankLossOp(OpTest):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci from eager_op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci
import paddle import paddle
from paddle import fluid from paddle import fluid
...@@ -35,10 +35,10 @@ class TestSumOp(OpTest): ...@@ -35,10 +35,10 @@ class TestSumOp(OpTest):
self.enable_cinn = True self.enable_cinn = True
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True, check_prim=True) self.check_grad(['X'], 'Out', check_prim=True)
class TestSumOpFp32(OpTest): class TestSumOpFp32(OpTest):
...@@ -58,7 +58,7 @@ class TestSumOpFp32(OpTest): ...@@ -58,7 +58,7 @@ class TestSumOpFp32(OpTest):
self.enable_cinn = True self.enable_cinn = True
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def calc_gradient(self): def calc_gradient(self):
x = self.inputs["X"] x = self.inputs["X"]
...@@ -70,7 +70,6 @@ class TestSumOpFp32(OpTest): ...@@ -70,7 +70,6 @@ class TestSumOpFp32(OpTest):
['X'], ['X'],
'Out', 'Out',
user_defined_grads=self.gradient, user_defined_grads=self.gradient,
check_eager=True,
check_prim=True, check_prim=True,
) )
...@@ -89,10 +88,10 @@ class TestSumOp_ZeroDim(OpTest): ...@@ -89,10 +88,10 @@ class TestSumOp_ZeroDim(OpTest):
self.enable_cinn = False self.enable_cinn = False
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
@unittest.skipIf( @unittest.skipIf(
...@@ -118,7 +117,7 @@ class TestSumOp_bf16(OpTest): ...@@ -118,7 +117,7 @@ class TestSumOp_bf16(OpTest):
def test_check_output(self): def test_check_output(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, check_eager=True, atol=0.1) self.check_output_with_place(place, atol=0.1)
def test_check_grad(self): def test_check_grad(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
...@@ -127,7 +126,6 @@ class TestSumOp_bf16(OpTest): ...@@ -127,7 +126,6 @@ class TestSumOp_bf16(OpTest):
['X'], ['X'],
'Out', 'Out',
user_defined_grads=self.gradient, user_defined_grads=self.gradient,
check_eager=True,
check_prim=True, check_prim=True,
) )
...@@ -156,7 +154,7 @@ class TestSumOp_fp16_withInt(OpTest): ...@@ -156,7 +154,7 @@ class TestSumOp_fp16_withInt(OpTest):
self.enable_cinn = True self.enable_cinn = True
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def calc_gradient(self): def calc_gradient(self):
x = self.inputs["X"] x = self.inputs["X"]
...@@ -168,7 +166,6 @@ class TestSumOp_fp16_withInt(OpTest): ...@@ -168,7 +166,6 @@ class TestSumOp_fp16_withInt(OpTest):
['X'], ['X'],
'Out', 'Out',
user_defined_grads=self.gradient, user_defined_grads=self.gradient,
check_eager=True,
check_prim=True, check_prim=True,
) )
...@@ -188,10 +185,10 @@ class TestSumOp5D(OpTest): ...@@ -188,10 +185,10 @@ class TestSumOp5D(OpTest):
self.enable_cinn = True self.enable_cinn = True
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True, check_prim=True) self.check_grad(['X'], 'Out', check_prim=True)
class TestSumOp6D(OpTest): class TestSumOp6D(OpTest):
...@@ -207,10 +204,10 @@ class TestSumOp6D(OpTest): ...@@ -207,10 +204,10 @@ class TestSumOp6D(OpTest):
self.outputs = {'Out': self.inputs['X'].sum(axis=0)} self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True, check_prim=True) self.check_grad(['X'], 'Out', check_prim=True)
class TestSumOp8D(OpTest): class TestSumOp8D(OpTest):
...@@ -227,7 +224,7 @@ class TestSumOp8D(OpTest): ...@@ -227,7 +224,7 @@ class TestSumOp8D(OpTest):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
@skip_check_grad_ci( @skip_check_grad_ci(
...@@ -249,14 +246,13 @@ class TestMaxOp(OpTest): ...@@ -249,14 +246,13 @@ class TestMaxOp(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
# only composite op support gradient check of reduce_max # only composite op support gradient check of reduce_max
self.check_grad( self.check_grad(
['X'], ['X'],
'Out', 'Out',
check_eager=True,
check_prim=True, check_prim=True,
only_check_prim=True, only_check_prim=True,
) )
...@@ -266,7 +262,7 @@ class TestMaxOp(OpTest): ...@@ -266,7 +262,7 @@ class TestMaxOp(OpTest):
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float16")} self.inputs = {'X': np.random.random((5, 6, 10)).astype("float16")}
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
with self.assertRaises(RuntimeError) as cm: with self.assertRaises(RuntimeError) as cm:
self.check_output_with_place(place, check_eager=True) self.check_output_with_place(place)
error_msg = str(cm.exception).split("\n")[-2].strip().split(".")[0] error_msg = str(cm.exception).split("\n")[-2].strip().split(".")[0]
self.assertEqual( self.assertEqual(
error_msg, error_msg,
...@@ -290,14 +286,13 @@ class TestMaxOp_ZeroDim(OpTest): ...@@ -290,14 +286,13 @@ class TestMaxOp_ZeroDim(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
# only composite op support gradient check of reduce_max # only composite op support gradient check of reduce_max
self.check_grad( self.check_grad(
['X'], ['X'],
'Out', 'Out',
check_eager=True,
check_prim=True, check_prim=True,
only_check_prim=True, only_check_prim=True,
) )
...@@ -320,14 +315,13 @@ class TestMaxOp_FP32(OpTest): ...@@ -320,14 +315,13 @@ class TestMaxOp_FP32(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
# only composite op support gradient check of reduce_max # only composite op support gradient check of reduce_max
self.check_grad( self.check_grad(
['X'], ['X'],
'Out', 'Out',
check_eager=True,
check_prim=True, check_prim=True,
only_check_prim=True, only_check_prim=True,
) )
...@@ -350,7 +344,7 @@ class TestMinOp(OpTest): ...@@ -350,7 +344,7 @@ class TestMinOp(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
class TestMinOp_ZeroDim(OpTest): class TestMinOp_ZeroDim(OpTest):
...@@ -366,7 +360,7 @@ class TestMinOp_ZeroDim(OpTest): ...@@ -366,7 +360,7 @@ class TestMinOp_ZeroDim(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
class TestMin6DOp(OpTest): class TestMin6DOp(OpTest):
...@@ -384,7 +378,7 @@ class TestMin6DOp(OpTest): ...@@ -384,7 +378,7 @@ class TestMin6DOp(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
class TestMin8DOp(OpTest): class TestMin8DOp(OpTest):
...@@ -402,7 +396,7 @@ class TestMin8DOp(OpTest): ...@@ -402,7 +396,7 @@ class TestMin8DOp(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def raw_reduce_prod(x, dim=[0], keep_dim=False): def raw_reduce_prod(x, dim=[0], keep_dim=False):
...@@ -423,10 +417,10 @@ class TestProdOp(OpTest): ...@@ -423,10 +417,10 @@ class TestProdOp(OpTest):
) )
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
class TestProdOp_ZeroDim(OpTest): class TestProdOp_ZeroDim(OpTest):
...@@ -438,10 +432,10 @@ class TestProdOp_ZeroDim(OpTest): ...@@ -438,10 +432,10 @@ class TestProdOp_ZeroDim(OpTest):
self.attrs = {'dim': [], 'reduce_all': True} self.attrs = {'dim': [], 'reduce_all': True}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
class TestProd6DOp(OpTest): class TestProd6DOp(OpTest):
...@@ -463,10 +457,10 @@ class TestProd6DOp(OpTest): ...@@ -463,10 +457,10 @@ class TestProd6DOp(OpTest):
) )
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
class TestProd8DOp(OpTest): class TestProd8DOp(OpTest):
...@@ -490,10 +484,10 @@ class TestProd8DOp(OpTest): ...@@ -490,10 +484,10 @@ class TestProd8DOp(OpTest):
) )
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
class TestAllOp(OpTest): class TestAllOp(OpTest):
...@@ -505,7 +499,7 @@ class TestAllOp(OpTest): ...@@ -505,7 +499,7 @@ class TestAllOp(OpTest):
self.attrs = {'reduce_all': True} self.attrs = {'reduce_all': True}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
class TestAllOp_ZeroDim(OpTest): class TestAllOp_ZeroDim(OpTest):
...@@ -517,7 +511,7 @@ class TestAllOp_ZeroDim(OpTest): ...@@ -517,7 +511,7 @@ class TestAllOp_ZeroDim(OpTest):
self.attrs = {'dim': [], 'reduce_all': True} self.attrs = {'dim': [], 'reduce_all': True}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
class TestAll8DOp(OpTest): class TestAll8DOp(OpTest):
...@@ -533,7 +527,7 @@ class TestAll8DOp(OpTest): ...@@ -533,7 +527,7 @@ class TestAll8DOp(OpTest):
self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])} self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
class TestAllOpWithDim(OpTest): class TestAllOpWithDim(OpTest):
...@@ -545,7 +539,7 @@ class TestAllOpWithDim(OpTest): ...@@ -545,7 +539,7 @@ class TestAllOpWithDim(OpTest):
self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])} self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
class TestAll8DOpWithDim(OpTest): class TestAll8DOpWithDim(OpTest):
...@@ -561,7 +555,7 @@ class TestAll8DOpWithDim(OpTest): ...@@ -561,7 +555,7 @@ class TestAll8DOpWithDim(OpTest):
self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])} self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
class TestAllOpWithKeepDim(OpTest): class TestAllOpWithKeepDim(OpTest):
...@@ -575,7 +569,7 @@ class TestAllOpWithKeepDim(OpTest): ...@@ -575,7 +569,7 @@ class TestAllOpWithKeepDim(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
class TestAll8DOpWithKeepDim(OpTest): class TestAll8DOpWithKeepDim(OpTest):
...@@ -595,7 +589,7 @@ class TestAll8DOpWithKeepDim(OpTest): ...@@ -595,7 +589,7 @@ class TestAll8DOpWithKeepDim(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
class TestAllOpError(unittest.TestCase): class TestAllOpError(unittest.TestCase):
...@@ -620,7 +614,7 @@ class TestAnyOp(OpTest): ...@@ -620,7 +614,7 @@ class TestAnyOp(OpTest):
self.attrs = {'reduce_all': True} self.attrs = {'reduce_all': True}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
class TestAnyOp_ZeroDim(OpTest): class TestAnyOp_ZeroDim(OpTest):
...@@ -632,7 +626,7 @@ class TestAnyOp_ZeroDim(OpTest): ...@@ -632,7 +626,7 @@ class TestAnyOp_ZeroDim(OpTest):
self.attrs = {'dim': [], 'reduce_all': True} self.attrs = {'dim': [], 'reduce_all': True}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
class TestAny8DOp(OpTest): class TestAny8DOp(OpTest):
...@@ -648,7 +642,7 @@ class TestAny8DOp(OpTest): ...@@ -648,7 +642,7 @@ class TestAny8DOp(OpTest):
self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])} self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
class TestAnyOpWithDim(OpTest): class TestAnyOpWithDim(OpTest):
...@@ -660,7 +654,7 @@ class TestAnyOpWithDim(OpTest): ...@@ -660,7 +654,7 @@ class TestAnyOpWithDim(OpTest):
self.outputs = {'Out': self.inputs['X'].any(axis=1)} self.outputs = {'Out': self.inputs['X'].any(axis=1)}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
class TestAny8DOpWithDim(OpTest): class TestAny8DOpWithDim(OpTest):
...@@ -676,7 +670,7 @@ class TestAny8DOpWithDim(OpTest): ...@@ -676,7 +670,7 @@ class TestAny8DOpWithDim(OpTest):
self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])} self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
class TestAnyOpWithKeepDim(OpTest): class TestAnyOpWithKeepDim(OpTest):
...@@ -692,7 +686,7 @@ class TestAnyOpWithKeepDim(OpTest): ...@@ -692,7 +686,7 @@ class TestAnyOpWithKeepDim(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
class TestAny8DOpWithKeepDim(OpTest): class TestAny8DOpWithKeepDim(OpTest):
...@@ -712,7 +706,7 @@ class TestAny8DOpWithKeepDim(OpTest): ...@@ -712,7 +706,7 @@ class TestAny8DOpWithKeepDim(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
class TestAnyOpError(unittest.TestCase): class TestAnyOpError(unittest.TestCase):
...@@ -821,9 +815,14 @@ class Test3DReduce3(Test1DReduce): ...@@ -821,9 +815,14 @@ class Test3DReduce3(Test1DReduce):
} }
def reduce_sum_wrapper2(x, axis=[0], dtype=None, keepdim=False):
return paddle._C_ops.sum(x, axis, dtype, keepdim)
class Test8DReduce0(Test1DReduce): class Test8DReduce0(Test1DReduce):
def setUp(self): def setUp(self):
self.op_type = "reduce_sum" self.op_type = "reduce_sum"
self.python_api = reduce_sum_wrapper2
self.attrs = {'dim': (4, 2, 3)} self.attrs = {'dim': (4, 2, 3)}
self.inputs = { self.inputs = {
'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype("float64") 'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype("float64")
...@@ -854,9 +853,26 @@ class TestKeepDimReduce(Test1DReduce): ...@@ -854,9 +853,26 @@ class TestKeepDimReduce(Test1DReduce):
} }
class TestKeepDimReduceForEager(Test1DReduce):
def setUp(self):
self.op_type = "reduce_sum"
self.python_api = reduce_sum_wrapper2
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
self.attrs = {'dim': [1], 'keep_dim': True}
self.outputs = {
'Out': self.inputs['X'].sum(
axis=tuple(self.attrs['dim']), keepdims=self.attrs['keep_dim']
)
}
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestKeepDim8DReduce(Test1DReduce): class TestKeepDim8DReduce(Test1DReduce):
def setUp(self): def setUp(self):
self.op_type = "reduce_sum" self.op_type = "reduce_sum"
self.python_api = reduce_sum_wrapper2
self.inputs = { self.inputs = {
'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype("float64") 'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype("float64")
} }
...@@ -893,14 +909,13 @@ class TestReduceMaxOpMultiAxises(OpTest): ...@@ -893,14 +909,13 @@ class TestReduceMaxOpMultiAxises(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
# only composite op support gradient check of reduce_max # only composite op support gradient check of reduce_max
self.check_grad( self.check_grad(
['X'], ['X'],
'Out', 'Out',
check_eager=True,
check_prim=True, check_prim=True,
only_check_prim=True, only_check_prim=True,
) )
...@@ -923,7 +938,7 @@ class TestReduceMinOpMultiAxises(OpTest): ...@@ -923,7 +938,7 @@ class TestReduceMinOpMultiAxises(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
class TestKeepDimReduceSumMultiAxises(OpTest): class TestKeepDimReduceSumMultiAxises(OpTest):
...@@ -947,6 +962,25 @@ class TestKeepDimReduceSumMultiAxises(OpTest): ...@@ -947,6 +962,25 @@ class TestKeepDimReduceSumMultiAxises(OpTest):
self.check_grad(['X'], 'Out', check_prim=True) self.check_grad(['X'], 'Out', check_prim=True)
class TestKeepDimReduceSumMultiAxisesForEager(OpTest):
def setUp(self):
self.op_type = "reduce_sum"
self.python_api = reduce_sum_wrapper2
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
self.attrs = {'dim': [-2, -1], 'keep_dim': True}
self.outputs = {
'Out': self.inputs['X'].sum(
axis=tuple(self.attrs['dim']), keepdims=True
)
}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestReduceSumWithDimOne(OpTest): class TestReduceSumWithDimOne(OpTest):
def setUp(self): def setUp(self):
self.op_type = "reduce_sum" self.op_type = "reduce_sum"
...@@ -969,6 +1003,26 @@ class TestReduceSumWithDimOne(OpTest): ...@@ -969,6 +1003,26 @@ class TestReduceSumWithDimOne(OpTest):
self.check_grad(['X'], 'Out', check_prim=True) self.check_grad(['X'], 'Out', check_prim=True)
class TestReduceSumWithDimOneForEager(OpTest):
def setUp(self):
self.op_type = "reduce_sum"
self.python_api = reduce_sum_wrapper2
self.inputs = {'X': np.random.random((100, 1, 1)).astype("float64")}
self.attrs = {'dim': [1, 2], 'keep_dim': True}
self.outputs = {
'Out': self.inputs['X'].sum(
axis=tuple(self.attrs['dim']), keepdims=True
)
}
self.enable_cinn = True
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestReduceSumWithNumelOne(OpTest): class TestReduceSumWithNumelOne(OpTest):
def setUp(self): def setUp(self):
self.op_type = "reduce_sum" self.op_type = "reduce_sum"
......
...@@ -31,7 +31,7 @@ np.set_printoptions(threshold=np.inf) ...@@ -31,7 +31,7 @@ np.set_printoptions(threshold=np.inf)
paddle.enable_static() paddle.enable_static()
def rnn_warpper( def rnn_wrapper(
Input, Input,
PreState, PreState,
WeightList=None, WeightList=None,
...@@ -76,7 +76,7 @@ class TestRNNOp(OpTest): ...@@ -76,7 +76,7 @@ class TestRNNOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "rnn" self.op_type = "rnn"
self.python_api = rnn_warpper self.python_api = rnn_wrapper
self.python_out_sig = ["Out", "DropoutState", "State"] self.python_out_sig = ["Out", "DropoutState", "State"]
self.python_out_sig_sub_name = {"State": ["last_hidden", "last_cell"]} self.python_out_sig_sub_name = {"State": ["last_hidden", "last_cell"]}
self.dtype = np.float32 if core.is_compiled_with_rocm() else np.float64 self.dtype = np.float32 if core.is_compiled_with_rocm() else np.float64
......
...@@ -106,10 +106,10 @@ class TestRollBF16OP(TestRollOp): ...@@ -106,10 +106,10 @@ class TestRollBF16OP(TestRollOp):
self.place = core.CUDAPlace(0) self.place = core.CUDAPlace(0)
def test_check_output(self): def test_check_output(self):
self.check_output_with_place(self.place, check_eager=True) self.check_output_with_place(self.place)
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad_with_place(self.place, ['X'], 'Out', check_eager=True) self.check_grad_with_place(self.place, ['X'], 'Out')
@unittest.skipIf( @unittest.skipIf(
...@@ -126,10 +126,10 @@ class TestRollBF16OpCase2(TestRollOp): ...@@ -126,10 +126,10 @@ class TestRollBF16OpCase2(TestRollOp):
self.place = core.CUDAPlace(0) self.place = core.CUDAPlace(0)
def test_check_output(self): def test_check_output(self):
self.check_output_with_place(self.place, check_eager=True) self.check_output_with_place(self.place)
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad_with_place(self.place, ['X'], 'Out', check_eager=True) self.check_grad_with_place(self.place, ['X'], 'Out')
@unittest.skipIf( @unittest.skipIf(
...@@ -146,10 +146,10 @@ class TestRollBF16OpCase3(TestRollOp): ...@@ -146,10 +146,10 @@ class TestRollBF16OpCase3(TestRollOp):
self.place = core.CUDAPlace(0) self.place = core.CUDAPlace(0)
def test_check_output(self): def test_check_output(self):
self.check_output_with_place(self.place, check_eager=True) self.check_output_with_place(self.place)
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad_with_place(self.place, ['X'], 'Out', check_eager=True) self.check_grad_with_place(self.place, ['X'], 'Out')
class TestRollAPI(unittest.TestCase): class TestRollAPI(unittest.TestCase):
......
...@@ -17,7 +17,7 @@ import unittest ...@@ -17,7 +17,7 @@ import unittest
import gradient_checker import gradient_checker
import numpy as np import numpy as np
from decorator_helper import prog_scope from decorator_helper import prog_scope
from op_test import OpTest, convert_float_to_uint16 from eager_op_test import OpTest, convert_float_to_uint16
import paddle import paddle
from paddle import fluid from paddle import fluid
...@@ -42,10 +42,10 @@ class TestScaleOp(OpTest): ...@@ -42,10 +42,10 @@ class TestScaleOp(OpTest):
pass pass
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
class TestScaleOpScaleVariable(OpTest): class TestScaleOpScaleVariable(OpTest):
...@@ -66,10 +66,10 @@ class TestScaleOpScaleVariable(OpTest): ...@@ -66,10 +66,10 @@ class TestScaleOpScaleVariable(OpTest):
pass pass
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
class TestScaleOpSelectedRows(unittest.TestCase): class TestScaleOpSelectedRows(unittest.TestCase):
...@@ -148,10 +148,10 @@ class TestScaleFp16Op(TestScaleOp): ...@@ -148,10 +148,10 @@ class TestScaleFp16Op(TestScaleOp):
self.dtype = np.float16 self.dtype = np.float16
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(["X"], "Out", check_eager=True) self.check_grad(["X"], "Out")
class TestScaleBF16Op(OpTest): class TestScaleBF16Op(OpTest):
...@@ -166,14 +166,13 @@ class TestScaleBF16Op(OpTest): ...@@ -166,14 +166,13 @@ class TestScaleBF16Op(OpTest):
self.outputs = {'Out': convert_float_to_uint16(out)} self.outputs = {'Out': convert_float_to_uint16(out)}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(
['X'], ['X'],
'Out', 'Out',
numeric_grad_delta=0.8, numeric_grad_delta=0.8,
check_eager=True,
) )
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
from paddle import fluid from paddle import fluid
...@@ -25,9 +25,19 @@ from paddle.fluid.op import Operator ...@@ -25,9 +25,19 @@ from paddle.fluid.op import Operator
paddle.enable_static() paddle.enable_static()
def sgd_wrapper(
param, learning_rate, grad, master_param=None, multi_precision=False
):
paddle._C_ops.sgd_(
param, learning_rate, grad, master_param, multi_precision
)
class TestSGDOp(OpTest): class TestSGDOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "sgd" self.op_type = "sgd"
self.python_api = sgd_wrapper
self.python_out_sig = ['Out']
self.conf() self.conf()
w = np.random.random((self.h, self.w)).astype("float32") w = np.random.random((self.h, self.w)).astype("float32")
g = np.random.random((self.h, self.w)).astype("float32") g = np.random.random((self.h, self.w)).astype("float32")
......
...@@ -21,7 +21,7 @@ import paddle ...@@ -21,7 +21,7 @@ import paddle
from paddle import fluid from paddle import fluid
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid.op import Operator from paddle.fluid.op import Operator
from paddle.fluid.tests.unittests.op_test import ( from paddle.fluid.tests.unittests.eager_op_test import (
OpTest, OpTest,
OpTestTool, OpTestTool,
convert_float_to_uint16, convert_float_to_uint16,
......
...@@ -31,7 +31,7 @@ np.set_printoptions(threshold=np.inf) ...@@ -31,7 +31,7 @@ np.set_printoptions(threshold=np.inf)
paddle.enable_static() paddle.enable_static()
def rnn_warpper( def rnn_wrapper(
Input, Input,
PreState, PreState,
WeightList=None, WeightList=None,
...@@ -76,7 +76,7 @@ class TestSimpleRNNOp(OpTest): ...@@ -76,7 +76,7 @@ class TestSimpleRNNOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "rnn" self.op_type = "rnn"
self.python_api = rnn_warpper self.python_api = rnn_wrapper
self.python_out_sig = ["Out", "DropoutState", "State"] self.python_out_sig = ["Out", "DropoutState", "State"]
self.python_out_sig_sub_name = {"State": ["last_hidden"]} self.python_out_sig_sub_name = {"State": ["last_hidden"]}
......
...@@ -17,7 +17,7 @@ import unittest ...@@ -17,7 +17,7 @@ import unittest
import gradient_checker import gradient_checker
import numpy as np import numpy as np
from decorator_helper import prog_scope from decorator_helper import prog_scope
from op_test import OpTest, convert_float_to_uint16 from eager_op_test import OpTest, convert_float_to_uint16, paddle_static_guard
import paddle import paddle
from paddle import fluid from paddle import fluid
...@@ -27,6 +27,19 @@ from paddle.tensor.manipulation import tensor_array_to_tensor ...@@ -27,6 +27,19 @@ from paddle.tensor.manipulation import tensor_array_to_tensor
paddle.enable_static() paddle.enable_static()
def slice_wrapper(
Input,
axes=[],
StartsTensor=None,
EndsTensor=None,
infer_flags=[],
decrease_axis=[],
):
return paddle._C_ops.slice(
Input, axes, StartsTensor, EndsTensor, infer_flags, decrease_axis
)
# Situation 1: starts(list, no tensor), ends(list, no tensor) # Situation 1: starts(list, no tensor), ends(list, no tensor)
# 1.1 without attr(decrease) # 1.1 without attr(decrease)
class TestSliceOp(OpTest): class TestSliceOp(OpTest):
...@@ -148,73 +161,12 @@ class TestSliceOp_decs_dim(OpTest): ...@@ -148,73 +161,12 @@ class TestSliceOp_decs_dim(OpTest):
) )
class TestSliceOp_decs_dim_2(TestSliceOp_decs_dim):
def config(self):
self.enable_cinn = True
self.input = np.random.random([3, 4, 5, 6]).astype("float64")
self.starts = [1, 0, 2]
self.ends = [2, 1, 4]
self.axes = [0, 1, 2]
self.decrease_axis = [0, 1]
self.infer_flags = [1, 1, 1]
self.out = self.input[1, 0, 2:4, :]
class TestSliceOp_decs_dim_3(TestSliceOp_decs_dim):
def config(self):
self.enable_cinn = True
self.input = np.random.random([3, 4, 5, 6]).astype("float64")
self.starts = [-1, 0, 2]
self.ends = [1000000, 1, 4]
self.axes = [0, 1, 2]
self.decrease_axis = [0, 1]
self.infer_flags = [1, 1, 1]
self.out = self.input[-1, 0, 2:4, :]
class TestSliceOp_decs_dim_4(TestSliceOp_decs_dim):
def config(self):
self.enable_cinn = True
self.input = np.random.random([3, 4, 5, 7]).astype("float64")
self.starts = [0, 1, 2, 3]
self.ends = [1, 2, 3, 4]
self.axes = [0, 1, 2, 3]
self.decrease_axis = [0, 1, 2, 3]
self.infer_flags = [1, 1, 1]
self.out = self.input[0, 1, 2, 3:4]
class TestSliceOp_decs_dim_5(TestSliceOp_decs_dim):
def config(self):
self.enable_cinn = True
self.input = np.random.random([3, 4, 5, 6]).astype("float64")
self.starts = [-1]
self.ends = [1000000]
self.axes = [3]
self.decrease_axis = [3]
self.infer_flags = [1, 1, 1]
self.out = self.input[:, :, :, -1]
# test_6 with test_2 with test_3
class TestSliceOp_decs_dim_6(TestSliceOp_decs_dim):
def config(self):
self.enable_cinn = True
self.input = np.random.random([3, 4, 5, 6]).astype("float64")
self.starts = [0, 1, 2, 3]
self.ends = [1, 2, 3, 4]
self.axes = [0, 1, 2, 3]
self.decrease_axis = [0, 1, 2, 3]
self.infer_flags = [1, 1, 1]
self.out = self.input[0, 1, 2, 3:4]
# Situation 2: starts(list, have tensor), ends(list, no tensor) # Situation 2: starts(list, have tensor), ends(list, no tensor)
# without attr(decrease) # without attr(decrease)
class TestSliceOp_starts_ListTensor(OpTest): class TestSliceOp_starts_ListTensor(OpTest):
def setUp(self): def setUp(self):
self.op_type = "slice" self.op_type = "slice"
self.python_api = paddle.slice self.python_api = slice_wrapper
self.config() self.config()
starts_tensor = [] starts_tensor = []
...@@ -254,7 +206,7 @@ class TestSliceOp_starts_ListTensor(OpTest): ...@@ -254,7 +206,7 @@ class TestSliceOp_starts_ListTensor(OpTest):
class TestSliceOp_decs_dim_starts_ListTensor(OpTest): class TestSliceOp_decs_dim_starts_ListTensor(OpTest):
def setUp(self): def setUp(self):
self.op_type = "slice" self.op_type = "slice"
self.python_api = paddle.slice self.python_api = slice_wrapper
self.config() self.config()
starts_tensor = [] starts_tensor = []
...@@ -312,7 +264,7 @@ class TestSliceOp_decs_dim_5_starts_ListTensor( ...@@ -312,7 +264,7 @@ class TestSliceOp_decs_dim_5_starts_ListTensor(
class TestSliceOp_decs_dim_starts_OneTensor(OpTest): class TestSliceOp_decs_dim_starts_OneTensor(OpTest):
def setUp(self): def setUp(self):
self.op_type = "slice" self.op_type = "slice"
self.python_api = paddle.slice self.python_api = slice_wrapper
self.config() self.config()
self.inputs = { self.inputs = {
'Input': self.input, 'Input': self.input,
...@@ -348,7 +300,7 @@ class TestSliceOp_decs_dim_starts_OneTensor(OpTest): ...@@ -348,7 +300,7 @@ class TestSliceOp_decs_dim_starts_OneTensor(OpTest):
class TestSliceOp_starts_OneTensor_ends_OneTensor(OpTest): class TestSliceOp_starts_OneTensor_ends_OneTensor(OpTest):
def setUp(self): def setUp(self):
self.op_type = "slice" self.op_type = "slice"
self.python_api = paddle.slice self.python_api = slice_wrapper
self.config() self.config()
self.inputs = { self.inputs = {
...@@ -384,7 +336,7 @@ class TestSliceOp_starts_OneTensor_ends_OneTensor(OpTest): ...@@ -384,7 +336,7 @@ class TestSliceOp_starts_OneTensor_ends_OneTensor(OpTest):
class TestSliceOp_decs_dim_starts_and_ends_OneTensor(OpTest): class TestSliceOp_decs_dim_starts_and_ends_OneTensor(OpTest):
def setUp(self): def setUp(self):
self.op_type = "slice" self.op_type = "slice"
self.python_api = paddle.slice self.python_api = slice_wrapper
self.config() self.config()
self.inputs = { self.inputs = {
'Input': self.input, 'Input': self.input,
...@@ -421,7 +373,7 @@ class TestSliceOp_decs_dim_starts_and_ends_OneTensor(OpTest): ...@@ -421,7 +373,7 @@ class TestSliceOp_decs_dim_starts_and_ends_OneTensor(OpTest):
class TestSliceOp_starts_OneTensor_ends_ListTensor(OpTest): class TestSliceOp_starts_OneTensor_ends_ListTensor(OpTest):
def setUp(self): def setUp(self):
self.op_type = "slice" self.op_type = "slice"
self.python_api = paddle.slice self.python_api = slice_wrapper
self.config() self.config()
ends_tensor = [] ends_tensor = []
...@@ -587,57 +539,64 @@ class TestBF16(OpTest): ...@@ -587,57 +539,64 @@ class TestBF16(OpTest):
# Test python API # Test python API
class TestSliceAPI(unittest.TestCase): class TestSliceAPI(unittest.TestCase):
def test_1(self): def test_1(self):
input = np.random.random([3, 4, 5, 6]).astype("float64") with paddle_static_guard():
minus_1 = paddle.tensor.fill_constant([1], "int32", -1) input = np.random.random([3, 4, 5, 6]).astype("float64")
minus_3 = paddle.tensor.fill_constant([1], "int64", -3) minus_1 = paddle.tensor.fill_constant([1], "int32", -1)
starts = paddle.static.data( minus_3 = paddle.tensor.fill_constant([1], "int64", -3)
name='starts', shape=[1, 3], dtype="float32" starts = paddle.static.data(
) name='starts', shape=[1, 3], dtype="float32"
starts.desc.set_need_check_feed(False) )
ends = paddle.static.data(name='ends', shape=[3], dtype="float32") starts.desc.set_need_check_feed(False)
ends.desc.set_need_check_feed(False) ends = paddle.static.data(name='ends', shape=[3], dtype="float32")
x = paddle.static.data( ends.desc.set_need_check_feed(False)
name="x", x = paddle.static.data(
shape=[3, 4, 5, 6], name="x",
dtype="float64", shape=[3, 4, 5, 6],
) dtype="float64",
)
# value_int64 is greater than 2147483647 which is the max of int32 # value_int64 is greater than 2147483647 which is the max of int32
value_int64 = paddle.tensor.fill_constant([1], "int64", 2147483648) value_int64 = paddle.tensor.fill_constant([1], "int64", 2147483648)
out_1 = paddle.slice( out_1 = paddle.slice(
x, axes=[0, 1, 2], starts=[-3, 0, 2], ends=[value_int64, 100, -1] x,
) axes=[0, 1, 2],
out_2 = paddle.slice( starts=[-3, 0, 2],
x, axes=[0, 1, 3], starts=[minus_3, 0, 2], ends=[3, 100, -1] ends=[value_int64, 100, -1],
) )
out_3 = paddle.slice( out_2 = paddle.slice(
x, axes=[0, 1, 3], starts=[minus_3, 0, 2], ends=[3, 100, minus_1] x, axes=[0, 1, 3], starts=[minus_3, 0, 2], ends=[3, 100, -1]
) )
out_4 = paddle.slice(x, axes=[0, 1, 2], starts=starts, ends=ends) out_3 = paddle.slice(
x,
out_5 = x[-3:3, 0:100, 2:-1] axes=[0, 1, 3],
out_6 = x[minus_3:3, 0:100, :, 2:-1] starts=[minus_3, 0, 2],
out_7 = x[minus_1, 0:100, :, 2:minus_1] ends=[3, 100, minus_1],
)
exe = fluid.Executor(place=fluid.CPUPlace()) out_4 = paddle.slice(x, axes=[0, 1, 2], starts=starts, ends=ends)
res_1, res_2, res_3, res_4, res_5, res_6, res_7 = exe.run(
fluid.default_main_program(), out_5 = x[-3:3, 0:100, 2:-1]
feed={ out_6 = x[minus_3:3, 0:100, :, 2:-1]
"x": input, out_7 = x[minus_1, 0:100, :, 2:minus_1]
'starts': np.array([-3, 0, 2]).astype("int32"),
'ends': np.array([3, 100, -1]).astype("int32"), exe = fluid.Executor(place=fluid.CPUPlace())
}, res_1, res_2, res_3, res_4, res_5, res_6, res_7 = exe.run(
fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6, out_7], fluid.default_main_program(),
) feed={
"x": input,
'starts': np.array([-3, 0, 2]).astype("int32"),
'ends': np.array([3, 100, -1]).astype("int32"),
},
fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6, out_7],
)
assert np.array_equal(res_1, input[-3:3, 0:100, 2:-1, :]) assert np.array_equal(res_1, input[-3:3, 0:100, 2:-1, :])
assert np.array_equal(res_2, input[-3:3, 0:100, :, 2:-1]) assert np.array_equal(res_2, input[-3:3, 0:100, :, 2:-1])
assert np.array_equal(res_3, input[-3:3, 0:100, :, 2:-1]) assert np.array_equal(res_3, input[-3:3, 0:100, :, 2:-1])
assert np.array_equal(res_4, input[-3:3, 0:100, 2:-1, :]) assert np.array_equal(res_4, input[-3:3, 0:100, 2:-1, :])
assert np.array_equal(res_5, input[-3:3, 0:100, 2:-1, :]) assert np.array_equal(res_5, input[-3:3, 0:100, 2:-1, :])
assert np.array_equal(res_6, input[-3:3, 0:100, :, 2:-1]) assert np.array_equal(res_6, input[-3:3, 0:100, :, 2:-1])
assert np.array_equal(res_7, input[-1, 0:100, :, 2:-1]) assert np.array_equal(res_7, input[-1, 0:100, :, 2:-1])
class TestSliceApiWithTensor(unittest.TestCase): class TestSliceApiWithTensor(unittest.TestCase):
...@@ -718,60 +677,61 @@ class TestSliceApiWithLoDTensorArray(unittest.TestCase): ...@@ -718,60 +677,61 @@ class TestSliceApiWithLoDTensorArray(unittest.TestCase):
self.exe = fluid.Executor(self.place) self.exe = fluid.Executor(self.place)
def set_program_and_run(self, main_program, case_num): def set_program_and_run(self, main_program, case_num):
with fluid.program_guard(main_program): with paddle_static_guard():
x = [ with fluid.program_guard(main_program):
paddle.static.data( x = [
name='x0', shape=self.shape, dtype="float32" paddle.static.data(
), name='x0', shape=self.shape, dtype="float32"
paddle.static.data( ),
name='x1', shape=self.shape, dtype="float32" paddle.static.data(
), name='x1', shape=self.shape, dtype="float32"
paddle.static.data( ),
name='x2', shape=self.shape, dtype="float32" paddle.static.data(
), name='x2', shape=self.shape, dtype="float32"
] ),
]
for each_x in x:
each_x.stop_gradient = False for each_x in x:
each_x.stop_gradient = False
arr = paddle.tensor.create_array(dtype="float32")
for i in range(3): arr = paddle.tensor.create_array(dtype="float32")
idx = paddle.tensor.array_length(arr) for i in range(3):
arr = paddle.tensor.array_write(x=x[i], i=idx, array=arr) idx = paddle.tensor.array_length(arr)
arr = paddle.tensor.array_write(x=x[i], i=idx, array=arr)
if case_num == 1:
self.sliced_arr = output = arr[0] if case_num == 1:
self.sliced_arr = output = arr[0]
elif case_num == 2:
end = ( elif case_num == 2:
paddle.tensor.array_length(arr) - 1 end = (
) # dtype of end is int64 paddle.tensor.array_length(arr) - 1
self.sliced_arr = slice_arr = arr[self.start : end] ) # dtype of end is int64
output, _ = tensor_array_to_tensor( self.sliced_arr = slice_arr = arr[self.start : end]
slice_arr, axis=self.axis, use_stack=True output, _ = tensor_array_to_tensor(
) slice_arr, axis=self.axis, use_stack=True
elif case_num == 3: )
value_int64 = paddle.tensor.fill_constant( elif case_num == 3:
[1], "int64", 2147483648 value_int64 = paddle.tensor.fill_constant(
) [1], "int64", 2147483648
self.sliced_arr = slice_arr = arr[self.start : value_int64] )
output, _ = tensor_array_to_tensor( self.sliced_arr = slice_arr = arr[self.start : value_int64]
slice_arr, axis=self.axis, use_stack=True output, _ = tensor_array_to_tensor(
slice_arr, axis=self.axis, use_stack=True
)
loss = paddle.sum(output)
fluid.backward.append_backward(loss)
g_vars = list(
map(
main_program.global_block().var,
[each_x.name + "@GRAD" for each_x in x],
)
) )
self.out, self.g_x0, self.g_x1, self.g_x2 = self.exe.run(
loss = paddle.sum(output) main_program,
fluid.backward.append_backward(loss) feed={'x0': self.data, 'x1': self.data, 'x2': self.data},
g_vars = list( fetch_list=[output] + g_vars,
map(
main_program.global_block().var,
[each_x.name + "@GRAD" for each_x in x],
) )
)
self.out, self.g_x0, self.g_x1, self.g_x2 = self.exe.run(
main_program,
feed={'x0': self.data, 'x1': self.data, 'x2': self.data},
fetch_list=[output] + g_vars,
)
def test_case_1(self): def test_case_1(self):
main_program = fluid.Program() main_program = fluid.Program()
...@@ -785,35 +745,37 @@ class TestSliceApiWithLoDTensorArray(unittest.TestCase): ...@@ -785,35 +745,37 @@ class TestSliceApiWithLoDTensorArray(unittest.TestCase):
np.testing.assert_array_equal(self.g_x2, np.zeros_like(self.data)) np.testing.assert_array_equal(self.g_x2, np.zeros_like(self.data))
def test_case_2(self): def test_case_2(self):
main_program = fluid.Program() with paddle_static_guard():
self.set_program_and_run(main_program, 2) main_program = fluid.Program()
self.set_program_and_run(main_program, 2)
self.assertTrue( self.assertTrue(
self.sliced_arr.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY self.sliced_arr.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY
) )
self.assertEqual(self.sliced_arr.shape, self.shape) self.assertEqual(self.sliced_arr.shape, self.shape)
np.testing.assert_array_equal( np.testing.assert_array_equal(
self.out, np.stack([self.data, self.data], axis=self.axis) self.out, np.stack([self.data, self.data], axis=self.axis)
) )
np.testing.assert_array_equal(self.g_x0, np.ones_like(self.data)) np.testing.assert_array_equal(self.g_x0, np.ones_like(self.data))
np.testing.assert_array_equal(self.g_x1, np.ones_like(self.data)) np.testing.assert_array_equal(self.g_x1, np.ones_like(self.data))
np.testing.assert_array_equal(self.g_x2, np.zeros_like(self.data)) np.testing.assert_array_equal(self.g_x2, np.zeros_like(self.data))
def test_case_3(self): def test_case_3(self):
main_program = fluid.Program() with paddle_static_guard():
self.set_program_and_run(main_program, 3) main_program = fluid.Program()
self.set_program_and_run(main_program, 3)
self.assertTrue( self.assertTrue(
self.sliced_arr.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY self.sliced_arr.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY
) )
self.assertEqual(self.sliced_arr.shape, self.shape) self.assertEqual(self.sliced_arr.shape, self.shape)
np.testing.assert_array_equal( np.testing.assert_array_equal(
self.out, self.out,
np.stack([self.data, self.data, self.data], axis=self.axis), np.stack([self.data, self.data, self.data], axis=self.axis),
) )
np.testing.assert_array_equal(self.g_x0, np.ones_like(self.data)) np.testing.assert_array_equal(self.g_x0, np.ones_like(self.data))
np.testing.assert_array_equal(self.g_x1, np.ones_like(self.data)) np.testing.assert_array_equal(self.g_x1, np.ones_like(self.data))
np.testing.assert_array_equal(self.g_x2, np.ones_like(self.data)) np.testing.assert_array_equal(self.g_x2, np.ones_like(self.data))
class TestImperativeVarBaseGetItem(unittest.TestCase): class TestImperativeVarBaseGetItem(unittest.TestCase):
...@@ -964,12 +926,12 @@ class TestSliceDoubleGradCheck(unittest.TestCase): ...@@ -964,12 +926,12 @@ class TestSliceDoubleGradCheck(unittest.TestCase):
) )
def test_grad(self): def test_grad(self):
paddle.enable_static() with paddle_static_guard():
places = [fluid.CPUPlace()] places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0)) places.append(fluid.CUDAPlace(0))
for p in places: for p in places:
self.func(p) self.func(p)
class TestSliceTripleGradCheck(unittest.TestCase): class TestSliceTripleGradCheck(unittest.TestCase):
...@@ -999,12 +961,12 @@ class TestSliceTripleGradCheck(unittest.TestCase): ...@@ -999,12 +961,12 @@ class TestSliceTripleGradCheck(unittest.TestCase):
) )
def test_grad(self): def test_grad(self):
paddle.enable_static() with paddle_static_guard():
places = [fluid.CPUPlace()] places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0)) places.append(fluid.CUDAPlace(0))
for p in places: for p in places:
self.func(p) self.func(p)
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
from paddle import fluid, incubate from paddle import fluid, incubate
...@@ -42,6 +42,7 @@ def _get_softmax(x, mask, fp16=True): ...@@ -42,6 +42,7 @@ def _get_softmax(x, mask, fp16=True):
class TestSoftmaxMaskFuseOp(OpTest): class TestSoftmaxMaskFuseOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "fused_softmax_mask" self.op_type = "fused_softmax_mask"
self.python_api = paddle.incubate.softmax_mask_fuse
x = np.random.random((1, 1, 8, 32)) x = np.random.random((1, 1, 8, 32))
mask = np.random.randint(0, 2, (1, 1, 8, 32)) mask = np.random.randint(0, 2, (1, 1, 8, 32))
mask_input = np.where(mask == 1, -10000.0, mask) mask_input = np.where(mask == 1, -10000.0, mask)
...@@ -68,6 +69,7 @@ class TestSoftmaxMaskFuseOp(OpTest): ...@@ -68,6 +69,7 @@ class TestSoftmaxMaskFuseOp(OpTest):
class TestSoftmaxMaskFuseOp0(OpTest): class TestSoftmaxMaskFuseOp0(OpTest):
def setUp(self): def setUp(self):
self.op_type = "fused_softmax_mask" self.op_type = "fused_softmax_mask"
self.python_api = paddle.incubate.softmax_mask_fuse
x = np.random.random((1, 1, 8, 32)).astype("float16") x = np.random.random((1, 1, 8, 32)).astype("float16")
mask = np.random.randint(0, 2, (1, 1, 8, 32)).astype("float16") mask = np.random.randint(0, 2, (1, 1, 8, 32)).astype("float16")
mask_input = np.where(mask == 1, -10000.0, mask) mask_input = np.where(mask == 1, -10000.0, mask)
......
...@@ -15,8 +15,8 @@ ...@@ -15,8 +15,8 @@
import unittest import unittest
import numpy as np import numpy as np
from eager_op_test import OpTest
from numpy.lib.stride_tricks import as_strided from numpy.lib.stride_tricks import as_strided
from op_test import OpTest
import paddle import paddle
...@@ -80,12 +80,12 @@ class TestStftOp(OpTest): ...@@ -80,12 +80,12 @@ class TestStftOp(OpTest):
def test_check_output(self): def test_check_output(self):
paddle.enable_static() paddle.enable_static()
self.check_output(check_eager=True) self.check_output()
paddle.disable_static() paddle.disable_static()
def test_check_grad_normal(self): def test_check_grad_normal(self):
paddle.enable_static() paddle.enable_static()
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out', check_dygraph=False)
paddle.disable_static() paddle.disable_static()
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
from paddle import fluid from paddle import fluid
...@@ -96,10 +96,10 @@ class TestStrideSliceOp(OpTest): ...@@ -96,10 +96,10 @@ class TestStrideSliceOp(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad({'Input'}, 'Out', check_eager=True) self.check_grad({'Input'}, 'Out')
def initTestCase(self): def initTestCase(self):
self.input = np.random.rand(100) self.input = np.random.rand(100)
...@@ -318,6 +318,7 @@ class TestStrideSliceOpBool6D(TestStrideSliceOpBool): ...@@ -318,6 +318,7 @@ class TestStrideSliceOpBool6D(TestStrideSliceOpBool):
class TestStridedSliceOp_starts_ListTensor(OpTest): class TestStridedSliceOp_starts_ListTensor(OpTest):
def setUp(self): def setUp(self):
self.op_type = "strided_slice" self.op_type = "strided_slice"
self.python_api = paddle.strided_slice
self.config() self.config()
starts_tensor = [] starts_tensor = []
...@@ -359,6 +360,7 @@ class TestStridedSliceOp_starts_ListTensor(OpTest): ...@@ -359,6 +360,7 @@ class TestStridedSliceOp_starts_ListTensor(OpTest):
class TestStridedSliceOp_ends_ListTensor(OpTest): class TestStridedSliceOp_ends_ListTensor(OpTest):
def setUp(self): def setUp(self):
self.op_type = "strided_slice" self.op_type = "strided_slice"
self.python_api = paddle.strided_slice
self.config() self.config()
ends_tensor = [] ends_tensor = []
...@@ -400,6 +402,7 @@ class TestStridedSliceOp_ends_ListTensor(OpTest): ...@@ -400,6 +402,7 @@ class TestStridedSliceOp_ends_ListTensor(OpTest):
class TestStridedSliceOp_starts_Tensor(OpTest): class TestStridedSliceOp_starts_Tensor(OpTest):
def setUp(self): def setUp(self):
self.op_type = "strided_slice" self.op_type = "strided_slice"
self.python_api = paddle.strided_slice
self.config() self.config()
self.inputs = { self.inputs = {
'Input': self.input, 'Input': self.input,
...@@ -435,6 +438,7 @@ class TestStridedSliceOp_starts_Tensor(OpTest): ...@@ -435,6 +438,7 @@ class TestStridedSliceOp_starts_Tensor(OpTest):
class TestStridedSliceOp_ends_Tensor(OpTest): class TestStridedSliceOp_ends_Tensor(OpTest):
def setUp(self): def setUp(self):
self.op_type = "strided_slice" self.op_type = "strided_slice"
self.python_api = paddle.strided_slice
self.config() self.config()
self.inputs = { self.inputs = {
'Input': self.input, 'Input': self.input,
...@@ -476,6 +480,7 @@ class TestStridedSliceOp_listTensor_Tensor(OpTest): ...@@ -476,6 +480,7 @@ class TestStridedSliceOp_listTensor_Tensor(OpTest):
("x" + str(index), np.ones((1)).astype('int32') * ele) ("x" + str(index), np.ones((1)).astype('int32') * ele)
) )
self.op_type = "strided_slice" self.op_type = "strided_slice"
self.python_api = paddle.strided_slice
self.inputs = { self.inputs = {
'Input': self.input, 'Input': self.input,
...@@ -512,6 +517,7 @@ class TestStridedSliceOp_listTensor_Tensor(OpTest): ...@@ -512,6 +517,7 @@ class TestStridedSliceOp_listTensor_Tensor(OpTest):
class TestStridedSliceOp_strides_Tensor(OpTest): class TestStridedSliceOp_strides_Tensor(OpTest):
def setUp(self): def setUp(self):
self.op_type = "strided_slice" self.op_type = "strided_slice"
self.python_api = paddle.strided_slice
self.config() self.config()
self.inputs = { self.inputs = {
'Input': self.input, 'Input': self.input,
...@@ -551,7 +557,7 @@ class TestStridedSliceAPI(unittest.TestCase): ...@@ -551,7 +557,7 @@ class TestStridedSliceAPI(unittest.TestCase):
minus_1 = paddle.tensor.fill_constant([1], "int32", -1) minus_1 = paddle.tensor.fill_constant([1], "int32", -1)
minus_3 = paddle.tensor.fill_constant([1], "int32", -3) minus_3 = paddle.tensor.fill_constant([1], "int32", -3)
starts = paddle.static.data(name='starts', shape=[3], dtype='int32') starts = paddle.static.data(name='starts', shape=[3], dtype='int32')
ends = paddle.static.data(name='ends', shape=[3], dtype='int32') ends = paddle.static.data(name='ends', shape=[3], dtype='int64')
strides = paddle.static.data(name='strides', shape=[3], dtype='int32') strides = paddle.static.data(name='strides', shape=[3], dtype='int32')
x = paddle.static.data( x = paddle.static.data(
...@@ -971,6 +977,7 @@ class TestStridedSliceTensorArray(unittest.TestCase): ...@@ -971,6 +977,7 @@ class TestStridedSliceTensorArray(unittest.TestCase):
class TestStridedSliceFloat16(unittest.TestCase): class TestStridedSliceFloat16(unittest.TestCase):
def init_test_case(self): def init_test_case(self):
self.op_type = 'strided_slice' self.op_type = 'strided_slice'
self.python_api = paddle.strided_slice
self.input_shape = [3, 3, 3, 6, 7, 8] self.input_shape = [3, 3, 3, 6, 7, 8]
self.axes = [0, 1, 2, 3, 4, 5] self.axes = [0, 1, 2, 3, 4, 5]
self.starts = [1, 0, 0, 0, 1, 2] self.starts = [1, 0, 0, 0, 1, 2]
......
...@@ -24,7 +24,7 @@ import unittest ...@@ -24,7 +24,7 @@ import unittest
import numpy as np import numpy as np
from decorator_helper import prog_scope from decorator_helper import prog_scope
from op_test import OpTest, _set_use_system_allocator from eager_op_test import OpTest, _set_use_system_allocator
import paddle import paddle
from paddle import fluid, nn from paddle import fluid, nn
......
...@@ -41,12 +41,16 @@ def temporal_shift(x, seg_num, shift_ratio, data_format): ...@@ -41,12 +41,16 @@ def temporal_shift(x, seg_num, shift_ratio, data_format):
return out return out
def wrapper_temporal_shift(x, seg_num, shift_ratio=0.25, data_format="NCHW"):
return paddle._C_ops.temporal_shift(x, seg_num, shift_ratio, data_format)
class TestTemporalShift(OpTest): class TestTemporalShift(OpTest):
def setUp(self): def setUp(self):
self.initTestCase() self.initTestCase()
self.init_dtype() self.init_dtype()
self.op_type = 'temporal_shift' self.op_type = 'temporal_shift'
self.python_api = paddle.nn.functional.temporal_shift self.python_api = wrapper_temporal_shift
x = np.random.random(self.x_shape).astype(self.dtype) x = np.random.random(self.x_shape).astype(self.dtype)
self.attrs = { self.attrs = {
...@@ -198,7 +202,7 @@ class TestTemporalShiftBF16(OpTest): ...@@ -198,7 +202,7 @@ class TestTemporalShiftBF16(OpTest):
def setUp(self): def setUp(self):
self.initTestCase() self.initTestCase()
self.op_type = 'temporal_shift' self.op_type = 'temporal_shift'
self.python_api = paddle.nn.functional.temporal_shift self.python_api = wrapper_temporal_shift
x = np.random.random(self.x_shape).astype(np.float32) x = np.random.random(self.x_shape).astype(np.float32)
......
...@@ -15,7 +15,11 @@ ...@@ -15,7 +15,11 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest, convert_float_to_uint16, convert_uint16_to_float from eager_op_test import (
OpTest,
convert_float_to_uint16,
convert_uint16_to_float,
)
import paddle import paddle
from paddle.fluid import core from paddle.fluid import core
...@@ -33,7 +37,7 @@ class TestTransferDtypeOpFp32ToFp64(OpTest): ...@@ -33,7 +37,7 @@ class TestTransferDtypeOpFp32ToFp64(OpTest):
self.op_type = 'transfer_dtype' self.op_type = 'transfer_dtype'
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_dygraph=False)
class TestTransferDtypeOpFp16ToFp32(OpTest): class TestTransferDtypeOpFp16ToFp32(OpTest):
...@@ -48,7 +52,7 @@ class TestTransferDtypeOpFp16ToFp32(OpTest): ...@@ -48,7 +52,7 @@ class TestTransferDtypeOpFp16ToFp32(OpTest):
self.op_type = 'transfer_dtype' self.op_type = 'transfer_dtype'
def test_check_output(self): def test_check_output(self):
self.check_output(atol=1e-3) self.check_output(atol=1e-3, check_dygraph=False)
class TestTransferDtypeOpFp32ToFp16(OpTest): class TestTransferDtypeOpFp32ToFp16(OpTest):
...@@ -63,7 +67,7 @@ class TestTransferDtypeOpFp32ToFp16(OpTest): ...@@ -63,7 +67,7 @@ class TestTransferDtypeOpFp32ToFp16(OpTest):
self.op_type = 'transfer_dtype' self.op_type = 'transfer_dtype'
def test_check_output(self): def test_check_output(self):
self.check_output(atol=1e-3) self.check_output(atol=1e-3, check_dygraph=False)
class TestTransferDtypeOpBf16ToFp32(OpTest): class TestTransferDtypeOpBf16ToFp32(OpTest):
...@@ -78,7 +82,7 @@ class TestTransferDtypeOpBf16ToFp32(OpTest): ...@@ -78,7 +82,7 @@ class TestTransferDtypeOpBf16ToFp32(OpTest):
self.op_type = 'transfer_dtype' self.op_type = 'transfer_dtype'
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_dygraph=False)
class TestTransferDtypeFp32ToBf16(OpTest): class TestTransferDtypeFp32ToBf16(OpTest):
...@@ -93,7 +97,7 @@ class TestTransferDtypeFp32ToBf16(OpTest): ...@@ -93,7 +97,7 @@ class TestTransferDtypeFp32ToBf16(OpTest):
self.op_type = 'transfer_dtype' self.op_type = 'transfer_dtype'
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_dygraph=False)
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest, paddle_static_guard
import paddle import paddle
from paddle.fluid import core from paddle.fluid import core
...@@ -27,9 +27,9 @@ class TestUniqueOp(OpTest): ...@@ -27,9 +27,9 @@ class TestUniqueOp(OpTest):
self.init_config() self.init_config()
def test_check_output(self): def test_check_output(self):
paddle.enable_static() self.check_output(
self.check_output() check_dygraph=False
paddle.disable_static() ) # unique return sorted data in dygraph
def init_config(self): def init_config(self):
self.inputs = { self.inputs = {
...@@ -73,19 +73,20 @@ class TestRandom(TestUniqueOp): ...@@ -73,19 +73,20 @@ class TestRandom(TestUniqueOp):
class TestUniqueRaiseError(unittest.TestCase): class TestUniqueRaiseError(unittest.TestCase):
def test_errors(self): def test_errors(self):
paddle.enable_static() with paddle_static_guard():
def test_type(): def test_type():
paddle.unique([10]) paddle.unique([10])
self.assertRaises(TypeError, test_type) self.assertRaises(TypeError, test_type)
def test_dtype(): def test_dtype():
data = paddle.static.data(shape=[10], dtype="float16", name="input") data = paddle.static.data(
paddle.unique(data) shape=[10], dtype="float16", name="input"
)
paddle.unique(data)
self.assertRaises(TypeError, test_dtype) self.assertRaises(TypeError, test_dtype)
paddle.disable_static()
@unittest.skipIf( @unittest.skipIf(
...@@ -104,10 +105,10 @@ class TestOneGPU(TestUniqueOp): ...@@ -104,10 +105,10 @@ class TestOneGPU(TestUniqueOp):
def test_check_output(self): def test_check_output(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
paddle.enable_static()
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, atol=1e-5) self.check_output_with_place(
paddle.disable_static() place, atol=1e-5, check_dygraph=False
) # unique return sorted data in dygraph
@unittest.skipIf( @unittest.skipIf(
...@@ -131,10 +132,10 @@ class TestRandomGPU(TestUniqueOp): ...@@ -131,10 +132,10 @@ class TestRandomGPU(TestUniqueOp):
def test_check_output(self): def test_check_output(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
paddle.enable_static()
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, atol=1e-5) self.check_output_with_place(
paddle.disable_static() place, atol=1e-5, check_dygraph=False
) # unique return sorted data in dygraph
class TestSortedUniqueOp(TestUniqueOp): class TestSortedUniqueOp(TestUniqueOp):
...@@ -243,6 +244,7 @@ class TestUniqueOpAxis1(TestUniqueOp): ...@@ -243,6 +244,7 @@ class TestUniqueOpAxis1(TestUniqueOp):
class TestUniqueAPI(unittest.TestCase): class TestUniqueAPI(unittest.TestCase):
def test_dygraph_api_out(self): def test_dygraph_api_out(self):
paddle.disable_static()
x_data = x_data = np.random.randint(0, 10, (120)) x_data = x_data = np.random.randint(0, 10, (120))
x = paddle.to_tensor(x_data) x = paddle.to_tensor(x_data)
out = paddle.unique(x) out = paddle.unique(x)
...@@ -250,6 +252,7 @@ class TestUniqueAPI(unittest.TestCase): ...@@ -250,6 +252,7 @@ class TestUniqueAPI(unittest.TestCase):
self.assertTrue((out.numpy() == expected_out).all(), True) self.assertTrue((out.numpy() == expected_out).all(), True)
def test_dygraph_api_attr(self): def test_dygraph_api_attr(self):
paddle.disable_static()
x_data = np.random.random((3, 5, 5)).astype("float32") x_data = np.random.random((3, 5, 5)).astype("float32")
x = paddle.to_tensor(x_data) x = paddle.to_tensor(x_data)
out, index, inverse, counts = paddle.unique( out, index, inverse, counts = paddle.unique(
...@@ -272,6 +275,7 @@ class TestUniqueAPI(unittest.TestCase): ...@@ -272,6 +275,7 @@ class TestUniqueAPI(unittest.TestCase):
self.assertTrue((counts.numpy() == np_counts).all(), True) self.assertTrue((counts.numpy() == np_counts).all(), True)
def test_dygraph_attr_dtype(self): def test_dygraph_attr_dtype(self):
paddle.disable_static()
x_data = x_data = np.random.randint(0, 10, (120)) x_data = x_data = np.random.randint(0, 10, (120))
x = paddle.to_tensor(x_data) x = paddle.to_tensor(x_data)
out, indices, inverse, counts = paddle.unique( out, indices, inverse, counts = paddle.unique(
...@@ -290,69 +294,62 @@ class TestUniqueAPI(unittest.TestCase): ...@@ -290,69 +294,62 @@ class TestUniqueAPI(unittest.TestCase):
self.assertTrue((counts.numpy() == np_counts).all(), True) self.assertTrue((counts.numpy() == np_counts).all(), True)
def test_static_graph(self): def test_static_graph(self):
paddle.enable_static() with paddle_static_guard():
with paddle.static.program_guard( with paddle.static.program_guard(
paddle.static.Program(), paddle.static.Program() paddle.static.Program(), paddle.static.Program()
): ):
x = paddle.static.data(name='x', shape=[3, 2], dtype='float64') x = paddle.static.data(name='x', shape=[3, 2], dtype='float64')
unique, inverse, counts = paddle.unique( unique, inverse, counts = paddle.unique(
x, return_inverse=True, return_counts=True, axis=0 x, return_inverse=True, return_counts=True, axis=0
) )
place = paddle.CPUPlace() place = paddle.CPUPlace()
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
x_np = np.array([[1, 2], [3, 4], [1, 2]]).astype('float64') x_np = np.array([[1, 2], [3, 4], [1, 2]]).astype('float64')
result = exe.run( result = exe.run(
feed={"x": x_np}, fetch_list=[unique, inverse, counts] feed={"x": x_np}, fetch_list=[unique, inverse, counts]
) )
np_unique, np_inverse, np_counts = np.unique(
x_np, return_inverse=True, return_counts=True, axis=0
)
np.testing.assert_allclose(result[0], np_unique, rtol=1e-05)
np.testing.assert_allclose(result[1], np_inverse, rtol=1e-05)
np.testing.assert_allclose(result[2], np_counts, rtol=1e-05)
paddle.disable_static()
class TestUniqueError(unittest.TestCase): class TestUniqueError(unittest.TestCase):
def test_input_dtype(self): def test_input_dtype(self):
def test_x_dtype(): def test_x_dtype():
with paddle.static.program_guard( with paddle_static_guard():
paddle.static.Program(), paddle.static.Program() with paddle.static.program_guard(
): paddle.static.Program(), paddle.static.Program()
x = paddle.static.data( ):
name='x', shape=[10, 10], dtype='float16' x = paddle.static.data(
) name='x', shape=[10, 10], dtype='float16'
result = paddle.unique(x) )
result = paddle.unique(x)
self.assertRaises(TypeError, test_x_dtype) self.assertRaises(TypeError, test_x_dtype)
def test_attr(self): def test_attr(self):
paddle.enable_static() with paddle_static_guard():
x = paddle.static.data(name='x', shape=[10, 10], dtype='float64') x = paddle.static.data(name='x', shape=[10, 10], dtype='float64')
def test_return_index(): def test_return_index():
result = paddle.unique(x, return_index=0) result = paddle.unique(x, return_index=0)
self.assertRaises(TypeError, test_return_index) self.assertRaises(TypeError, test_return_index)
def test_return_inverse(): def test_return_inverse():
result = paddle.unique(x, return_inverse='s') result = paddle.unique(x, return_inverse='s')
self.assertRaises(TypeError, test_return_inverse) self.assertRaises(TypeError, test_return_inverse)
def test_return_counts(): def test_return_counts():
result = paddle.unique(x, return_counts=3) result = paddle.unique(x, return_counts=3)
self.assertRaises(TypeError, test_return_counts) self.assertRaises(TypeError, test_return_counts)
def test_axis(): def test_axis():
result = paddle.unique(x, axis='12') result = paddle.unique(x, axis='12')
def test_dtype(): def test_dtype():
result = paddle.unique(x, dtype='float64') result = paddle.unique(x, dtype='float64')
self.assertRaises(TypeError, test_axis) self.assertRaises(TypeError, test_axis)
paddle.disable_static()
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest, paddle_static_guard
import paddle import paddle
from paddle.fluid import core from paddle.fluid import core
...@@ -81,16 +81,15 @@ class TestRandom(TestUniqueWithCountsOp): ...@@ -81,16 +81,15 @@ class TestRandom(TestUniqueWithCountsOp):
class TestUniqueWithCountsRaiseError(unittest.TestCase): class TestUniqueWithCountsRaiseError(unittest.TestCase):
def test_errors(self): def test_errors(self):
def test_type(): with paddle_static_guard():
paddle.unique([10])
self.assertRaises(TypeError, test_type) def test_dtype():
data = paddle.static.data(
shape=[10], dtype="float16", name="input"
)
paddle.unique(data)
def test_dtype(): self.assertRaises(TypeError, test_dtype)
data = paddle.static.data(shape=[10], dtype="float16", name="input")
paddle.unique(data)
self.assertRaises(TypeError, test_dtype)
@unittest.skipIf( @unittest.skipIf(
......
...@@ -16,15 +16,13 @@ import sys ...@@ -16,15 +16,13 @@ import sys
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
from test_softmax_op import stable_softmax from test_softmax_op import stable_softmax
import paddle import paddle
import paddle.nn.functional as F import paddle.nn.functional as F
from paddle.fluid import Program, core, program_guard from paddle.fluid import Program, core, program_guard
paddle.enable_static()
CUDA_BLOCK_SIZE = 32 CUDA_BLOCK_SIZE = 32
...@@ -206,6 +204,19 @@ class CTCForward: ...@@ -206,6 +204,19 @@ class CTCForward:
return self.loss return self.loss
def warpctc_wrapper(
Logits,
Label,
LogitsLength=None,
LabelLength=None,
blank=0,
norm_by_times=False,
):
return paddle._C_ops.warpctc(
Logits, Label, LogitsLength, LabelLength, blank, norm_by_times
)
class TestWarpCTCOp(OpTest): class TestWarpCTCOp(OpTest):
def config(self): def config(self):
self.batch_size = 4 self.batch_size = 4
...@@ -217,6 +228,8 @@ class TestWarpCTCOp(OpTest): ...@@ -217,6 +228,8 @@ class TestWarpCTCOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "warpctc" self.op_type = "warpctc"
self.python_api = warpctc_wrapper
self.python_out_sig = ["Loss"]
self.config() self.config()
logits = np.random.uniform( logits = np.random.uniform(
...@@ -304,6 +317,7 @@ class TestWarpCTCOpWithPadding(OpTest): ...@@ -304,6 +317,7 @@ class TestWarpCTCOpWithPadding(OpTest):
def setUp(self): def setUp(self):
self.op_type = "warpctc" self.op_type = "warpctc"
self.python_api = warpctc_wrapper
self.python_out_sig = ["Loss"] self.python_out_sig = ["Loss"]
self.config() self.config()
...@@ -380,7 +394,7 @@ class TestWarpCTCOpWithPadding(OpTest): ...@@ -380,7 +394,7 @@ class TestWarpCTCOpWithPadding(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=False) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.outputs['WarpCTCGrad'] = self.gradient self.outputs['WarpCTCGrad'] = self.gradient
...@@ -425,6 +439,7 @@ class TestWarpCTCOpFp64(OpTest): ...@@ -425,6 +439,7 @@ class TestWarpCTCOpFp64(OpTest):
def setUp(self): def setUp(self):
self.op_type = "warpctc" self.op_type = "warpctc"
self.python_api = warpctc_wrapper
self.python_out_sig = ["Loss"] self.python_out_sig = ["Loss"]
self.config() self.config()
...@@ -501,11 +516,11 @@ class TestWarpCTCOpFp64(OpTest): ...@@ -501,11 +516,11 @@ class TestWarpCTCOpFp64(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=False) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.outputs['WarpCTCGrad'] = self.gradient self.outputs['WarpCTCGrad'] = self.gradient
self.check_grad(["Logits"], "Loss", check_eager=False) self.check_grad(["Logits"], "Loss")
class TestWarpCTCOpError(unittest.TestCase): class TestWarpCTCOpError(unittest.TestCase):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest, convert_float_to_uint16 from eager_op_test import OpTest, convert_float_to_uint16
import paddle import paddle
from paddle import fluid from paddle import fluid
...@@ -32,10 +32,10 @@ class TestWhereOp(OpTest): ...@@ -32,10 +32,10 @@ class TestWhereOp(OpTest):
self.outputs = {'Out': np.where(self.cond, self.x, self.y)} self.outputs = {'Out': np.where(self.cond, self.x, self.y)}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=False) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X', 'Y'], 'Out', check_eager=False) self.check_grad(['X', 'Y'], 'Out')
def init_config(self): def init_config(self):
self.x = np.random.uniform((-3), 5, 100).astype('float64') self.x = np.random.uniform((-3), 5, 100).astype('float64')
...@@ -80,12 +80,12 @@ class TestWhereBF16OP(OpTest): ...@@ -80,12 +80,12 @@ class TestWhereBF16OP(OpTest):
def test_check_output(self): def test_check_output(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, check_eager=False) self.check_output_with_place(place)
def test_check_grad(self): def test_check_grad(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place( self.check_grad_with_place(
place, ['X', 'Y'], 'Out', check_eager=False, numeric_grad_delta=0.05 place, ['X', 'Y'], 'Out', numeric_grad_delta=0.05
) )
def init_config(self): def init_config(self):
......
...@@ -93,11 +93,37 @@ def YoloBox(x, img_size, attrs): ...@@ -93,11 +93,37 @@ def YoloBox(x, img_size, attrs):
return (pred_box, pred_score.reshape((n, (-1), class_num))) return (pred_box, pred_score.reshape((n, (-1), class_num)))
def yolo_box_wrapper(
X,
ImgSize,
anchors=[],
class_num=0,
conf_thresh=0.01,
downsample_ratio=32,
clip_bbox=True,
scale_x_y=1.0,
iou_aware=False,
iou_aware_factor=0.5,
):
return paddle._C_ops.yolo_box(
X,
ImgSize,
anchors,
class_num,
conf_thresh,
downsample_ratio,
clip_bbox,
scale_x_y,
iou_aware,
iou_aware_factor,
)
class TestYoloBoxOp(OpTest): class TestYoloBoxOp(OpTest):
def setUp(self): def setUp(self):
self.initTestCase() self.initTestCase()
self.op_type = 'yolo_box' self.op_type = 'yolo_box'
self.python_api = paddle.vision.ops.yolo_box self.python_api = yolo_box_wrapper
x = np.random.random(self.x_shape).astype('float32') x = np.random.random(self.x_shape).astype('float32')
img_size = np.random.randint(10, 20, self.imgsize_shape).astype('int32') img_size = np.random.randint(10, 20, self.imgsize_shape).astype('int32')
self.attrs = { self.attrs = {
......
...@@ -3867,7 +3867,10 @@ def strided_slice(x, axes, starts, ends, strides, name=None): ...@@ -3867,7 +3867,10 @@ def strided_slice(x, axes, starts, ends, strides, name=None):
def check_list_elements_dtype(list_input, input_name): def check_list_elements_dtype(list_input, input_name):
if isinstance(list_input, Variable): if isinstance(list_input, Variable):
check_dtype( check_dtype(
list_input.dtype, input_name, ['int32'], 'strided_slice' list_input.dtype,
input_name,
['int32', 'int64'],
'strided_slice',
) )
else: else:
for i, var in enumerate(list_input): for i, var in enumerate(list_input):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册