未验证 提交 f1a3f4f7 编写于 作者: W wanghuancoder 提交者: GitHub

Del old dygraph optest4 (#51610)

* delete old dygraph op test
上级 66e0720d
...@@ -62,6 +62,12 @@ ops_to_fill_zero_for_empty_grads = set( ...@@ -62,6 +62,12 @@ ops_to_fill_zero_for_empty_grads = set(
"concat_double_grad", "concat_double_grad",
"expand_grad", "expand_grad",
"argsort_grad", "argsort_grad",
"eigh_grad",
"add_grad",
"subtract_grad",
"multiply_grad",
"divide_grad",
"matmul_grad",
] ]
) )
......
...@@ -474,6 +474,7 @@ void GradNodeBase::HandleComplexGradToRealGrad( ...@@ -474,6 +474,7 @@ void GradNodeBase::HandleComplexGradToRealGrad(
const paddle::Tensor& grad = slot_out_grads[rank_id]; const paddle::Tensor& grad = slot_out_grads[rank_id];
if (paddle::framework::IsComplexType(fwd_data_type)) continue; if (paddle::framework::IsComplexType(fwd_data_type)) continue;
if (!grad.impl()) continue;
// Only Handle Complex To Real for DenseTensor for now // Only Handle Complex To Real for DenseTensor for now
if (phi::DenseTensor::classof(grad.impl().get())) { if (phi::DenseTensor::classof(grad.impl().get())) {
......
...@@ -27,7 +27,10 @@ ...@@ -27,7 +27,10 @@
// functions. While, for very few OPs, the dispensable inputs are used, we // functions. While, for very few OPs, the dispensable inputs are used, we
// need to manually specify them in this map. // need to manually specify them in this map.
std::map<std::string, std::set<std::string>> op_ins_map = { std::map<std::string, std::set<std::string>> op_ins_map = {
{"fc", {"Input", "W", "Bias"}},
{"layer_norm", {"X", "Scale", "Bias"}}, {"layer_norm", {"X", "Scale", "Bias"}},
{"conv2d_fusion_cutlass", {"Input", "Filter", "Bias", "ResidualData"}},
{"conv2d_fusion", {"Input", "Filter", "Bias", "ResidualData"}},
{"bincount", {"X", "Weights"}}, {"bincount", {"X", "Weights"}},
{"fused_attention", {"fused_attention",
{"X", {"X",
......
...@@ -34,7 +34,6 @@ from paddle.fluid.framework import ( ...@@ -34,7 +34,6 @@ from paddle.fluid.framework import (
OpProtoHolder, OpProtoHolder,
Program, Program,
_current_expected_place, _current_expected_place,
in_dygraph_mode,
) )
from paddle.fluid.op import Operator from paddle.fluid.op import Operator
...@@ -914,7 +913,14 @@ class OpTest(unittest.TestCase): ...@@ -914,7 +913,14 @@ class OpTest(unittest.TestCase):
""" """
return cal_python_api(self.python_api, args, kernel_sig) return cal_python_api(self.python_api, args, kernel_sig)
def _calc_dygraph_output(self, place, parallel=False, no_check_set=None): def _calc_dygraph_output(
self,
place,
parallel=False,
no_check_set=None,
egr_inps=None,
egr_oups=None,
):
self.__class__.op_type = ( self.__class__.op_type = (
self.op_type self.op_type
) # for ci check, please not delete it for now ) # for ci check, please not delete it for now
...@@ -924,12 +930,20 @@ class OpTest(unittest.TestCase): ...@@ -924,12 +930,20 @@ class OpTest(unittest.TestCase):
op_proto = OpProtoHolder.instance().get_op_proto(self.op_type) op_proto = OpProtoHolder.instance().get_op_proto(self.op_type)
# prepare input variable # prepare input variable
inputs = self.append_input_output_for_dygraph( inputs = (
op_proto, self.inputs, True, False, block egr_inps
if egr_inps
else self.append_input_output_for_dygraph(
op_proto, self.inputs, True, False, block
)
) )
# prepare output variable # prepare output variable
outputs = self.append_input_output_for_dygraph( outputs = (
op_proto, self.outputs, False, False, block egr_oups
if egr_oups
else self.append_input_output_for_dygraph(
op_proto, self.outputs, False, False, block
)
) )
# prepare attributes # prepare attributes
...@@ -2279,35 +2293,38 @@ class OpTest(unittest.TestCase): ...@@ -2279,35 +2293,38 @@ class OpTest(unittest.TestCase):
) )
if dygraph_outputs is None: if dygraph_outputs is None:
# missing KernelSignature, fall back to eager middle output. # missing KernelSignature, fall back to eager middle output.
dygraph_outs = self._calc_dygraph_output(place) dygraph_outputs = self._calc_dygraph_output(
place, egr_inps=inputs, egr_oups=outputs
# if outputs is None, kernel sig is empty or other error is happens. )
if not check_dygraph or dygraph_outputs is None:
block.append_op( outputs = dygraph_outputs
type=self.op_type,
inputs=inputs,
outputs=outputs,
attrs=attrs_outputs if hasattr(self, "attrs") else None,
)
else:
outputs = dygraph_outputs
if self.dtype == np.uint16: if self.dtype == np.uint16:
cast_inputs = self._find_var_in_dygraph( cast_inputs = self._find_var_in_dygraph(
outputs, output_names[0] outputs, output_names[0]
) )
cast_outputs = block.create_var( if isinstance(cast_inputs, paddle.Tensor):
dtype="float32", shape=cast_inputs[0].shape cast_outputs = paddle.cast(
) cast_inputs, core.VarDesc.VarType.FP32
cast_op = block.append_op( )
inputs={"X": cast_inputs}, elif isinstance(cast_inputs, list):
outputs={"Out": cast_outputs}, cast_outputs = []
type="cast", for cast_input in cast_inputs:
attrs={ if isinstance(cast_input, paddle.Tensor):
"in_dtype": core.VarDesc.VarType.BF16, cast_outputs.append(
"out_dtype": core.VarDesc.VarType.FP32, paddle.cast(
}, cast_input, core.VarDesc.VarType.FP32
) )
)
else:
raise TypeError(
"Unsupported test data type %s."
% type(cast_input)
)
else:
raise TypeError(
"Unsupported test data type %s." % type(cast_inputs)
)
outputs = {output_names[0]: cast_outputs} outputs = {output_names[0]: cast_outputs}
outputs_valid = {} outputs_valid = {}
...@@ -2318,61 +2335,16 @@ class OpTest(unittest.TestCase): ...@@ -2318,61 +2335,16 @@ class OpTest(unittest.TestCase):
if user_defined_grad_outputs is None: if user_defined_grad_outputs is None:
if len(outputs_valid) == 1: if len(outputs_valid) == 1:
loss = block.create_var(
dtype=self.dtype,
type=core.VarDesc.VarType.LOD_TENSOR,
persistable=False,
stop_gradient=False,
shape=[1],
)
for outputs_valid_key in outputs_valid: for outputs_valid_key in outputs_valid:
block.append_op( loss = paddle.mean(outputs_valid[outputs_valid_key][0])
type="mean",
inputs={"X": outputs_valid[outputs_valid_key]},
outputs={"Out": [loss]},
attrs=None,
)
else: else:
avg_sum = [] avg_sum = []
for cur_loss in outputs_valid: for cur_loss in outputs_valid:
cur_avg_loss = block.create_var( cur_avg_loss = paddle.mean(outputs_valid[cur_loss][0])
dtype=self.dtype,
type=core.VarDesc.VarType.LOD_TENSOR,
persistable=False,
stop_gradient=False,
)
block.append_op(
type="mean",
inputs={"X": outputs_valid[cur_loss]},
outputs={"Out": [cur_avg_loss]},
attrs=None,
)
avg_sum.append(cur_avg_loss) avg_sum.append(cur_avg_loss)
loss_sum = block.create_var( loss_sum = paddle.add_n(avg_sum)
dtype=self.dtype, loss = paddle.scale(
type=core.VarDesc.VarType.LOD_TENSOR, loss_sum, scale=1.0 / float(len(avg_sum))
persistable=False,
stop_gradient=False,
shape=[1],
)
block.append_op(
type='sum',
inputs={"X": avg_sum},
outputs={"Out": loss_sum},
attrs=None,
)
loss = block.create_var(
dtype=self.dtype,
type=core.VarDesc.VarType.LOD_TENSOR,
persistable=False,
stop_gradient=False,
shape=[1],
)
block.append_op(
type='scale',
inputs={"X": loss_sum},
outputs={"Out": loss},
attrs={'scale': 1.0 / float(len(avg_sum))},
) )
loss.backward() loss.backward()
...@@ -2392,24 +2364,12 @@ class OpTest(unittest.TestCase): ...@@ -2392,24 +2364,12 @@ class OpTest(unittest.TestCase):
for no_grad_val in no_grad_set: for no_grad_val in no_grad_set:
del inputs[no_grad_val] del inputs[no_grad_val]
if in_dygraph_mode(): grad_inputs = paddle.grad(
core.eager.run_backward( outputs=paddle.utils.flatten(outputs),
paddle.utils.flatten(outputs), inputs=paddle.utils.flatten(inputs),
grad_outputs, grad_outputs=grad_outputs,
False, )
) return [grad.numpy() for grad in grad_inputs]
grad_inputs = []
for inputs_list in inputs.values():
for inp in inputs_list:
grad_inputs.append(inp.grad.numpy())
return grad_inputs
else:
grad_inputs = paddle.grad(
outputs=paddle.utils.flatten(outputs),
inputs=paddle.utils.flatten(inputs),
grad_outputs=grad_outputs,
)
return [grad.numpy() for grad in grad_inputs]
@staticmethod @staticmethod
def _numpy_to_lod_tensor(np_value, lod, place): def _numpy_to_lod_tensor(np_value, lod, place):
......
...@@ -2357,7 +2357,9 @@ class TestSoftRelu(TestActivation): ...@@ -2357,7 +2357,9 @@ class TestSoftRelu(TestActivation):
def test_check_grad(self): def test_check_grad(self):
if self.dtype == np.float16: if self.dtype == np.float16:
return return
self.check_grad(['X'], 'Out', max_relative_error=0.02) self.check_grad(
['X'], 'Out', max_relative_error=0.02, check_dygraph=False
)
def elu(x, alpha): def elu(x, alpha):
......
...@@ -48,16 +48,23 @@ class TestAffineChannelOp(OpTest): ...@@ -48,16 +48,23 @@ class TestAffineChannelOp(OpTest):
self.outputs = {'Out': y} self.outputs = {'Out': y}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_dygraph=False)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X', 'Scale', 'Bias'], 'Out') self.check_grad(['X', 'Scale', 'Bias'], 'Out', check_dygraph=False)
def test_check_grad_stopgrad_dx(self): def test_check_grad_stopgrad_dx(self):
self.check_grad(['Scale', 'Bias'], 'Out', no_grad_set=set('X')) self.check_grad(
['Scale', 'Bias'], 'Out', no_grad_set=set('X'), check_dygraph=False
)
def test_check_grad_stopgrad_dscale_dbias(self): def test_check_grad_stopgrad_dscale_dbias(self):
self.check_grad(['X'], 'Out', no_grad_set=set(['Scale', 'Bias'])) self.check_grad(
['X'],
'Out',
no_grad_set=set(['Scale', 'Bias']),
check_dygraph=False,
)
def init_test_case(self): def init_test_case(self):
self.shape = [2, 100, 3, 3] self.shape = [2, 100, 3, 3]
......
...@@ -109,7 +109,6 @@ class TestBilinearInterpOp(OpTest): ...@@ -109,7 +109,6 @@ class TestBilinearInterpOp(OpTest):
self.op_type = "bilinear_interp" self.op_type = "bilinear_interp"
# NOTE(dev): some AsDispensible input is not used under imperative mode. # NOTE(dev): some AsDispensible input is not used under imperative mode.
# Skip check_dygraph while found them in Inputs. # Skip check_dygraph while found them in Inputs.
self.check_dygraph = True
input_np = np.random.random(self.input_shape).astype("float64") input_np = np.random.random(self.input_shape).astype("float64")
if self.data_layout == "NCHW": if self.data_layout == "NCHW":
...@@ -139,10 +138,8 @@ class TestBilinearInterpOp(OpTest): ...@@ -139,10 +138,8 @@ class TestBilinearInterpOp(OpTest):
self.inputs = {'X': input_np} self.inputs = {'X': input_np}
if self.out_size is not None: if self.out_size is not None:
self.inputs['OutSize'] = self.out_size self.inputs['OutSize'] = self.out_size
self.check_dygraph = False
if self.actual_shape is not None: if self.actual_shape is not None:
self.inputs['OutSize'] = self.actual_shape self.inputs['OutSize'] = self.actual_shape
self.check_dygraph = False
self.attrs = { self.attrs = {
'out_h': self.out_h, 'out_h': self.out_h,
...@@ -156,12 +153,10 @@ class TestBilinearInterpOp(OpTest): ...@@ -156,12 +153,10 @@ class TestBilinearInterpOp(OpTest):
self.outputs = {'Out': output_np} self.outputs = {'Out': output_np}
def test_check_output(self): def test_check_output(self):
self.check_output(check_dygraph=self.check_dygraph) self.check_output(check_dygraph=False)
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(['X'], 'Out', in_place=True, check_dygraph=False)
['X'], 'Out', in_place=True, check_dygraph=self.check_dygraph
)
def init_test_case(self): def init_test_case(self):
self.interp_method = 'bilinear' self.interp_method = 'bilinear'
...@@ -285,7 +280,6 @@ class TestBilinearInterpOpUint8(OpTest): ...@@ -285,7 +280,6 @@ class TestBilinearInterpOpUint8(OpTest):
self.actual_shape = None self.actual_shape = None
self.init_test_case() self.init_test_case()
self.op_type = "bilinear_interp" self.op_type = "bilinear_interp"
self.check_dygraph = True
input_np = np.random.randint( input_np = np.random.randint(
low=0, high=256, size=self.input_shape low=0, high=256, size=self.input_shape
).astype("uint8") ).astype("uint8")
...@@ -309,7 +303,6 @@ class TestBilinearInterpOpUint8(OpTest): ...@@ -309,7 +303,6 @@ class TestBilinearInterpOpUint8(OpTest):
self.inputs = {'X': input_np} self.inputs = {'X': input_np}
if self.out_size is not None: if self.out_size is not None:
self.inputs['OutSize'] = self.out_size self.inputs['OutSize'] = self.out_size
self.check_dygraph = False
self.attrs = { self.attrs = {
'out_h': self.out_h, 'out_h': self.out_h,
...@@ -323,7 +316,7 @@ class TestBilinearInterpOpUint8(OpTest): ...@@ -323,7 +316,7 @@ class TestBilinearInterpOpUint8(OpTest):
def test_check_output(self): def test_check_output(self):
self.check_output_with_place( self.check_output_with_place(
place=core.CPUPlace(), atol=1, check_dygraph=self.check_dygraph place=core.CPUPlace(), atol=1, check_dygraph=False
) )
def init_test_case(self): def init_test_case(self):
...@@ -427,7 +420,6 @@ class TestBilinearInterpOp_attr_tensor(OpTest): ...@@ -427,7 +420,6 @@ class TestBilinearInterpOp_attr_tensor(OpTest):
self.actual_shape = None self.actual_shape = None
self.init_test_case() self.init_test_case()
self.op_type = "bilinear_interp" self.op_type = "bilinear_interp"
self.check_dygraph = True
self.shape_by_1Dtensor = False self.shape_by_1Dtensor = False
self.scale_by_1Dtensor = False self.scale_by_1Dtensor = False
self.attrs = { self.attrs = {
...@@ -450,7 +442,6 @@ class TestBilinearInterpOp_attr_tensor(OpTest): ...@@ -450,7 +442,6 @@ class TestBilinearInterpOp_attr_tensor(OpTest):
if self.shape_by_1Dtensor: if self.shape_by_1Dtensor:
self.inputs['OutSize'] = self.out_size self.inputs['OutSize'] = self.out_size
self.check_dygraph = False
elif self.out_size is not None: elif self.out_size is not None:
size_tensor = [] size_tensor = []
for index, ele in enumerate(self.out_size): for index, ele in enumerate(self.out_size):
...@@ -458,7 +449,6 @@ class TestBilinearInterpOp_attr_tensor(OpTest): ...@@ -458,7 +449,6 @@ class TestBilinearInterpOp_attr_tensor(OpTest):
("x" + str(index), np.ones((1)).astype('int32') * ele) ("x" + str(index), np.ones((1)).astype('int32') * ele)
) )
self.inputs['SizeTensor'] = size_tensor self.inputs['SizeTensor'] = size_tensor
self.check_dygraph = False
self.attrs['out_h'] = self.out_h self.attrs['out_h'] = self.out_h
self.attrs['out_w'] = self.out_w self.attrs['out_w'] = self.out_w
...@@ -473,12 +463,10 @@ class TestBilinearInterpOp_attr_tensor(OpTest): ...@@ -473,12 +463,10 @@ class TestBilinearInterpOp_attr_tensor(OpTest):
self.outputs = {'Out': output_np} self.outputs = {'Out': output_np}
def test_check_output(self): def test_check_output(self):
self.check_output(check_dygraph=self.check_dygraph) self.check_output(check_dygraph=False)
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(['X'], 'Out', in_place=True, check_dygraph=False)
['X'], 'Out', in_place=True, check_dygraph=self.check_dygraph
)
def init_test_case(self): def init_test_case(self):
self.interp_method = 'bilinear' self.interp_method = 'bilinear'
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
...@@ -109,7 +109,7 @@ def batch_box_coder(p_box, pb_v, t_box, lod, code_type, norm, axis=0): ...@@ -109,7 +109,7 @@ def batch_box_coder(p_box, pb_v, t_box, lod, code_type, norm, axis=0):
class TestBoxCoderOp(OpTest): class TestBoxCoderOp(OpTest):
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def setUp(self): def setUp(self):
self.op_type = "box_coder" self.op_type = "box_coder"
...@@ -142,7 +142,7 @@ class TestBoxCoderOp(OpTest): ...@@ -142,7 +142,7 @@ class TestBoxCoderOp(OpTest):
class TestBoxCoderOpWithoutBoxVar(OpTest): class TestBoxCoderOpWithoutBoxVar(OpTest):
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def setUp(self): def setUp(self):
self.python_api = paddle.vision.ops.box_coder self.python_api = paddle.vision.ops.box_coder
...@@ -176,7 +176,7 @@ class TestBoxCoderOpWithoutBoxVar(OpTest): ...@@ -176,7 +176,7 @@ class TestBoxCoderOpWithoutBoxVar(OpTest):
class TestBoxCoderOpWithLoD(OpTest): class TestBoxCoderOpWithLoD(OpTest):
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def setUp(self): def setUp(self):
self.python_api = paddle.vision.ops.box_coder self.python_api = paddle.vision.ops.box_coder
...@@ -207,7 +207,7 @@ class TestBoxCoderOpWithLoD(OpTest): ...@@ -207,7 +207,7 @@ class TestBoxCoderOpWithLoD(OpTest):
class TestBoxCoderOpWithAxis(OpTest): class TestBoxCoderOpWithAxis(OpTest):
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def setUp(self): def setUp(self):
self.python_api = paddle.vision.ops.box_coder self.python_api = paddle.vision.ops.box_coder
...@@ -242,12 +242,55 @@ class TestBoxCoderOpWithAxis(OpTest): ...@@ -242,12 +242,55 @@ class TestBoxCoderOpWithAxis(OpTest):
self.outputs = {'OutputBox': output_box} self.outputs = {'OutputBox': output_box}
def wrapper_box_coder(
prior_box,
prior_box_var=None,
target_box=None,
code_type="encode_center_size",
box_normalized=True,
axis=0,
variance=[],
):
if isinstance(prior_box_var, paddle.Tensor):
output_box = paddle._C_ops.box_coder(
prior_box,
prior_box_var,
target_box,
code_type,
box_normalized,
axis,
[],
)
elif isinstance(prior_box_var, list):
output_box = paddle._C_ops.box_coder(
prior_box,
None,
target_box,
code_type,
box_normalized,
axis,
prior_box_var,
)
else:
output_box = paddle._C_ops.box_coder(
prior_box,
None,
target_box,
code_type,
box_normalized,
axis,
variance,
)
return output_box
class TestBoxCoderOpWithVariance(OpTest): class TestBoxCoderOpWithVariance(OpTest):
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def setUp(self): def setUp(self):
self.op_type = "box_coder" self.op_type = "box_coder"
self.python_api = wrapper_box_coder
lod = [[1, 1, 1, 1, 1]] lod = [[1, 1, 1, 1, 1]]
prior_box = np.random.random((30, 4)).astype('float32') prior_box = np.random.random((30, 4)).astype('float32')
prior_box_var = np.random.random((4)).astype('float32') prior_box_var = np.random.random((4)).astype('float32')
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest, randomize_probability from eager_op_test import OpTest, randomize_probability
import paddle import paddle
...@@ -43,11 +43,13 @@ class TestBprLossOp1(OpTest): ...@@ -43,11 +43,13 @@ class TestBprLossOp1(OpTest):
def test_check_output(self): def test_check_output(self):
paddle.enable_static() paddle.enable_static()
self.check_output() self.check_output(check_dygraph=False)
paddle.disable_static() paddle.disable_static()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(["X"], "Y", numeric_grad_delta=0.001) self.check_grad(
["X"], "Y", numeric_grad_delta=0.001, check_dygraph=False
)
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle.fluid.core as core import paddle.fluid.core as core
......
...@@ -16,7 +16,7 @@ import random ...@@ -16,7 +16,7 @@ import random
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
...@@ -125,7 +125,7 @@ class TestCPUBroadcastTensorsOp(OpTest): ...@@ -125,7 +125,7 @@ class TestCPUBroadcastTensorsOp(OpTest):
def test_check_output(self): def test_check_output(self):
self.run_dual_test( self.run_dual_test(
self.check_output_with_place, self.check_output_with_place,
{"place": self.place, "atol": 1e-1, "check_eager": True}, {"place": self.place, "atol": 1e-1},
) )
def test_check_grad_normal(self): def test_check_grad_normal(self):
...@@ -136,7 +136,6 @@ class TestCPUBroadcastTensorsOp(OpTest): ...@@ -136,7 +136,6 @@ class TestCPUBroadcastTensorsOp(OpTest):
"inputs_to_check": ['x0', 'x1'], "inputs_to_check": ['x0', 'x1'],
"output_names": ['out0', 'out1'], "output_names": ['out0', 'out1'],
"max_relative_error": 0.05, "max_relative_error": 0.05,
"check_eager": True,
}, },
) )
self.run_triple_in_test( self.run_triple_in_test(
...@@ -146,7 +145,6 @@ class TestCPUBroadcastTensorsOp(OpTest): ...@@ -146,7 +145,6 @@ class TestCPUBroadcastTensorsOp(OpTest):
"inputs_to_check": ['x0', 'x1', 'x2'], "inputs_to_check": ['x0', 'x1', 'x2'],
"output_names": ['out0', 'out1', "out2"], "output_names": ['out0', 'out1', "out2"],
"max_relative_error": 0.05, "max_relative_error": 0.05,
"check_eager": True,
}, },
) )
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
...@@ -80,10 +80,10 @@ class TestCenterLossOp(OpTest): ...@@ -80,10 +80,10 @@ class TestCenterLossOp(OpTest):
pass pass
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_dygraph=False)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Loss') self.check_grad(['X'], 'Loss', check_dygraph=False)
class TestCenterLossOpNoUpdate(TestCenterLossOp): class TestCenterLossOpNoUpdate(TestCenterLossOp):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest, paddle_static_guard
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
...@@ -114,9 +114,7 @@ class TestClassCenterSampleOp(OpTest): ...@@ -114,9 +114,7 @@ class TestClassCenterSampleOp(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output( self.check_output(no_check_set=['SampledLocalClassCenter'])
no_check_set=['SampledLocalClassCenter'], check_eager=True
)
class TestClassCenterSampleOpINT32(TestClassCenterSampleOp): class TestClassCenterSampleOpINT32(TestClassCenterSampleOp):
...@@ -149,42 +147,46 @@ class TestClassCenterSampleV2(unittest.TestCase): ...@@ -149,42 +147,46 @@ class TestClassCenterSampleV2(unittest.TestCase):
self.dtype = np.int64 self.dtype = np.int64
def test_static(self): def test_static(self):
for place in self.places: with paddle_static_guard():
self.check_static_result(place=place) for place in self.places:
self.check_static_result(place=place)
def check_static_result(self, place): def check_static_result(self, place):
with program_guard(Program(), Program()): with paddle_static_guard():
label_np = np.random.randint( with program_guard(Program(), Program()):
0, self.num_classes, (self.batch_size,), dtype=self.dtype label_np = np.random.randint(
) 0, self.num_classes, (self.batch_size,), dtype=self.dtype
)
label = paddle.static.data(
name='label', shape=[self.batch_size], dtype=self.dtype label = paddle.static.data(
) name='label', shape=[self.batch_size], dtype=self.dtype
( )
remapped_label, (
sampled_class_index, remapped_label,
) = paddle.nn.functional.class_center_sample( sampled_class_index,
label, self.num_classes, self.num_samples ) = paddle.nn.functional.class_center_sample(
) label, self.num_classes, self.num_samples
)
(
remapped_label_np, (
sampled_class_center_np, remapped_label_np,
) = class_center_sample_numpy( sampled_class_center_np,
label_np, [self.num_classes], self.num_samples ) = class_center_sample_numpy(
) label_np, [self.num_classes], self.num_samples
exe = paddle.fluid.Executor(place) )
[remapped_label_res, sampled_class_index_res] = exe.run( exe = paddle.fluid.Executor(place)
paddle.fluid.default_main_program(), [remapped_label_res, sampled_class_index_res] = exe.run(
feed={'label': label_np}, paddle.fluid.default_main_program(),
fetch_list=[remapped_label, sampled_class_index], feed={'label': label_np},
) fetch_list=[remapped_label, sampled_class_index],
np.testing.assert_allclose(remapped_label_res, remapped_label_np) )
np.testing.assert_allclose( np.testing.assert_allclose(
sampled_class_index_res[: len(sampled_class_center_np[0])], remapped_label_res, remapped_label_np
sampled_class_center_np[0], )
) np.testing.assert_allclose(
sampled_class_index_res[: len(sampled_class_center_np[0])],
sampled_class_center_np[0],
)
def test_dynamic(self): def test_dynamic(self):
for place in self.places: for place in self.places:
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -45,7 +45,7 @@ class TestClipByNormOp(OpTest): ...@@ -45,7 +45,7 @@ class TestClipByNormOp(OpTest):
self.outputs = {'Out': output} self.outputs = {'Out': output}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def initTestCase(self): def initTestCase(self):
self.shape = (100,) self.shape = (100,)
...@@ -81,9 +81,7 @@ class TestClipByNormOpFp16(TestClipByNormOp): ...@@ -81,9 +81,7 @@ class TestClipByNormOpFp16(TestClipByNormOp):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
if core.is_float16_supported(place): if core.is_float16_supported(place):
self.check_output_with_place( self.check_output_with_place(place, atol=0.001)
place, atol=0.001, check_eager=True
)
class TestClipByNormOpFp16Case1(TestClipByNormOpFp16): class TestClipByNormOpFp16Case1(TestClipByNormOpFp16):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -52,12 +52,12 @@ class TestClipOp(OpTest): ...@@ -52,12 +52,12 @@ class TestClipOp(OpTest):
def test_check_output(self): def test_check_output(self):
paddle.enable_static() paddle.enable_static()
self.check_output(check_eager=True) self.check_output()
paddle.disable_static() paddle.disable_static()
def test_check_grad_normal(self): def test_check_grad_normal(self):
paddle.enable_static() paddle.enable_static()
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
paddle.disable_static() paddle.disable_static()
def initTestCase(self): def initTestCase(self):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -160,7 +160,10 @@ class TestAllocContinuousSpace(OpTest): ...@@ -160,7 +160,10 @@ class TestAllocContinuousSpace(OpTest):
def test_check_output(self): def test_check_output(self):
self.check_output_with_place( self.check_output_with_place(
place=core.CUDAPlace(0), no_check_set=["FusedOutput"], atol=1e-5 place=core.CUDAPlace(0),
no_check_set=["FusedOutput"],
atol=1e-5,
check_dygraph=False,
) )
self.verify_output(core.CUDAPlace(0)) self.verify_output(core.CUDAPlace(0))
...@@ -180,7 +183,10 @@ class TestAllocContinuousSpace2(TestAllocContinuousSpace): ...@@ -180,7 +183,10 @@ class TestAllocContinuousSpace2(TestAllocContinuousSpace):
def test_check_output(self): def test_check_output(self):
self.check_output_with_place( self.check_output_with_place(
place=core.CUDAPlace(0), no_check_set=["FusedOutput"], atol=1e-5 place=core.CUDAPlace(0),
no_check_set=["FusedOutput"],
atol=1e-5,
check_dygraph=False,
) )
self.verify_output(core.CUDAPlace(0)) self.verify_output(core.CUDAPlace(0))
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
class TestCollectFPNProposalstOp(OpTest): class TestCollectFPNProposalstOp(OpTest):
......
...@@ -36,7 +36,7 @@ def create_test_class(op_type, typename, callback): ...@@ -36,7 +36,7 @@ def create_test_class(op_type, typename, callback):
self.op_type = op_type self.op_type = op_type
def test_output(self): def test_output(self):
self.check_output(check_eager=False) self.check_output()
def test_errors(self): def test_errors(self):
paddle.enable_static() paddle.enable_static()
......
...@@ -32,7 +32,7 @@ def create_test_not_equal_class(op_type, typename, callback): ...@@ -32,7 +32,7 @@ def create_test_not_equal_class(op_type, typename, callback):
self.op_type = op_type self.op_type = op_type
def test_output(self): def test_output(self):
self.check_output(check_eager=True) self.check_output()
cls_name = "{0}_{1}_{2}".format(op_type, typename, 'not_equal_all') cls_name = "{0}_{1}_{2}".format(op_type, typename, 'not_equal_all')
Cls.__name__ = cls_name Cls.__name__ = cls_name
...@@ -51,7 +51,7 @@ def create_test_not_shape_equal_class(op_type, typename, callback): ...@@ -51,7 +51,7 @@ def create_test_not_shape_equal_class(op_type, typename, callback):
self.op_type = op_type self.op_type = op_type
def test_output(self): def test_output(self):
self.check_output(check_eager=True) self.check_output()
cls_name = "{0}_{1}_{2}".format(op_type, typename, 'not_shape_equal_all') cls_name = "{0}_{1}_{2}".format(op_type, typename, 'not_shape_equal_all')
Cls.__name__ = cls_name Cls.__name__ = cls_name
...@@ -69,7 +69,7 @@ def create_test_equal_class(op_type, typename, callback): ...@@ -69,7 +69,7 @@ def create_test_equal_class(op_type, typename, callback):
self.op_type = op_type self.op_type = op_type
def test_output(self): def test_output(self):
self.check_output(check_eager=True) self.check_output()
cls_name = "{0}_{1}_{2}".format(op_type, typename, 'equal_all') cls_name = "{0}_{1}_{2}".format(op_type, typename, 'equal_all')
Cls.__name__ = cls_name Cls.__name__ = cls_name
...@@ -89,7 +89,7 @@ def create_test_dim1_class(op_type, typename, callback): ...@@ -89,7 +89,7 @@ def create_test_dim1_class(op_type, typename, callback):
self.op_type = op_type self.op_type = op_type
def test_output(self): def test_output(self):
self.check_output(check_eager=True) self.check_output()
cls_name = "{0}_{1}_{2}".format(op_type, typename, 'equal_all') cls_name = "{0}_{1}_{2}".format(op_type, typename, 'equal_all')
Cls.__name__ = cls_name Cls.__name__ = cls_name
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid.dygraph as dg import paddle.fluid.dygraph as dg
...@@ -45,7 +45,7 @@ class TestComplexAbsOp(OpTest): ...@@ -45,7 +45,7 @@ class TestComplexAbsOp(OpTest):
self.grad_x = self.grad_out * (self.x / np.abs(self.x)) self.grad_x = self.grad_out * (self.x / np.abs(self.x))
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=False) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(
...@@ -53,7 +53,6 @@ class TestComplexAbsOp(OpTest): ...@@ -53,7 +53,6 @@ class TestComplexAbsOp(OpTest):
'Out', 'Out',
user_defined_grads=[self.grad_x], user_defined_grads=[self.grad_x],
user_defined_grad_outputs=[self.grad_out], user_defined_grad_outputs=[self.grad_out],
check_eager=False,
) )
...@@ -81,7 +80,7 @@ class TestComplexAbsOpZeroValues(OpTest): ...@@ -81,7 +80,7 @@ class TestComplexAbsOpZeroValues(OpTest):
self.grad_x = np.zeros(self.shape, self.dtype) self.grad_x = np.zeros(self.shape, self.dtype)
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=False) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(
...@@ -89,7 +88,6 @@ class TestComplexAbsOpZeroValues(OpTest): ...@@ -89,7 +88,6 @@ class TestComplexAbsOpZeroValues(OpTest):
'Out', 'Out',
user_defined_grads=[self.grad_x], user_defined_grads=[self.grad_x],
user_defined_grad_outputs=[self.grad_out], user_defined_grad_outputs=[self.grad_out],
check_eager=False,
) )
...@@ -131,7 +129,7 @@ class TestRealAbsOp(OpTest): ...@@ -131,7 +129,7 @@ class TestRealAbsOp(OpTest):
self.grad_x = self.grad_out * (self.x / np.abs(self.x)) self.grad_x = self.grad_out * (self.x / np.abs(self.x))
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=False) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(
...@@ -139,7 +137,6 @@ class TestRealAbsOp(OpTest): ...@@ -139,7 +137,6 @@ class TestRealAbsOp(OpTest):
'Out', 'Out',
user_defined_grads=[self.grad_x], user_defined_grads=[self.grad_x],
user_defined_grad_outputs=[self.grad_out], user_defined_grad_outputs=[self.grad_out],
check_eager=False,
) )
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
from paddle import static from paddle import static
...@@ -46,7 +46,7 @@ class TestViewAsComplexOp(OpTest): ...@@ -46,7 +46,7 @@ class TestViewAsComplexOp(OpTest):
self.outputs = {'Out': out_ref} self.outputs = {'Out': out_ref}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(
...@@ -54,7 +54,6 @@ class TestViewAsComplexOp(OpTest): ...@@ -54,7 +54,6 @@ class TestViewAsComplexOp(OpTest):
'Out', 'Out',
user_defined_grads=[ref_view_as_real(self.out_grad)], user_defined_grads=[ref_view_as_real(self.out_grad)],
user_defined_grad_outputs=[self.out_grad], user_defined_grad_outputs=[self.out_grad],
check_eager=True,
) )
...@@ -71,7 +70,7 @@ class TestViewAsRealOp(OpTest): ...@@ -71,7 +70,7 @@ class TestViewAsRealOp(OpTest):
self.out_grad = np.ones([10, 10, 2], dtype="float64") self.out_grad = np.ones([10, 10, 2], dtype="float64")
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(
...@@ -79,7 +78,6 @@ class TestViewAsRealOp(OpTest): ...@@ -79,7 +78,6 @@ class TestViewAsRealOp(OpTest):
'Out', 'Out',
user_defined_grads=[ref_view_as_complex(self.out_grad)], user_defined_grads=[ref_view_as_complex(self.out_grad)],
user_defined_grad_outputs=[self.out_grad], user_defined_grad_outputs=[self.out_grad],
check_eager=True,
) )
......
...@@ -58,7 +58,7 @@ class TestConcatOp(OpTest): ...@@ -58,7 +58,7 @@ class TestConcatOp(OpTest):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place) self.check_output_with_place(place)
else: else:
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
if self.dtype == np.uint16: if self.dtype == np.uint16:
...@@ -67,9 +67,9 @@ class TestConcatOp(OpTest): ...@@ -67,9 +67,9 @@ class TestConcatOp(OpTest):
self.check_grad_with_place(place, ['x1'], 'Out', check_prim=True) self.check_grad_with_place(place, ['x1'], 'Out', check_prim=True)
self.check_grad_with_place(place, ['x2'], 'Out', check_prim=True) self.check_grad_with_place(place, ['x2'], 'Out', check_prim=True)
else: else:
self.check_grad(['x0'], 'Out', check_eager=True, check_prim=True) self.check_grad(['x0'], 'Out', check_prim=True)
self.check_grad(['x1'], 'Out', check_eager=True, check_prim=True) self.check_grad(['x1'], 'Out', check_prim=True)
self.check_grad(['x2'], 'Out', check_eager=True, check_prim=True) self.check_grad(['x2'], 'Out', check_prim=True)
def init_test_data(self): def init_test_data(self):
if self.dtype == np.uint16: if self.dtype == np.uint16:
...@@ -157,12 +157,12 @@ class TestConcatOp6(TestConcatOp): ...@@ -157,12 +157,12 @@ class TestConcatOp6(TestConcatOp):
self.outputs = {'Out': (out, self.out_lod)} self.outputs = {'Out': (out, self.out_lod)}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['x0'], 'Out', check_eager=True) self.check_grad(['x0'], 'Out')
self.check_grad(['x1'], 'Out', check_eager=True) self.check_grad(['x1'], 'Out')
self.check_grad(['x2'], 'Out', check_eager=True) self.check_grad(['x2'], 'Out')
def init_test_data(self): def init_test_data(self):
self.x0 = np.random.random([100]).astype(self.dtype) self.x0 = np.random.random([100]).astype(self.dtype)
...@@ -197,12 +197,12 @@ class TestConcatOp7(TestConcatOp): ...@@ -197,12 +197,12 @@ class TestConcatOp7(TestConcatOp):
return "float64" return "float64"
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['x0'], 'Out', check_eager=True, check_prim=True) self.check_grad(['x0'], 'Out', check_prim=True)
self.check_grad(['x1'], 'Out', check_eager=True, check_prim=True) self.check_grad(['x1'], 'Out', check_prim=True)
self.check_grad(['x2'], 'Out', check_eager=True, check_prim=True) self.check_grad(['x2'], 'Out', check_prim=True)
def init_test_data(self): def init_test_data(self):
if self.dtype == np.uint16: if self.dtype == np.uint16:
......
...@@ -20,8 +20,8 @@ import numpy as np ...@@ -20,8 +20,8 @@ import numpy as np
import paddle import paddle
sys.path.append("..") sys.path.append("..")
from eager_op_test import OpTest
from numpy.random import random as rand from numpy.random import random as rand
from op_test import OpTest
import paddle.fluid.dygraph as dg import paddle.fluid.dygraph as dg
import paddle.static as static import paddle.static as static
...@@ -56,7 +56,7 @@ class TestConjOp(OpTest): ...@@ -56,7 +56,7 @@ class TestConjOp(OpTest):
self.grad_in = np.conj(self.grad_out) self.grad_in = np.conj(self.grad_out)
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad( self.check_grad(
...@@ -64,7 +64,6 @@ class TestConjOp(OpTest): ...@@ -64,7 +64,6 @@ class TestConjOp(OpTest):
'Out', 'Out',
user_defined_grads=[self.grad_in], user_defined_grads=[self.grad_in],
user_defined_grad_outputs=[self.grad_out], user_defined_grad_outputs=[self.grad_out],
check_eager=True,
) )
......
...@@ -21,7 +21,7 @@ import paddle ...@@ -21,7 +21,7 @@ import paddle
import paddle.nn as nn import paddle.nn as nn
paddle.enable_static() paddle.enable_static()
from op_test import OpTest from eager_op_test import OpTest
from test_attribute_var import UnittestBase from test_attribute_var import UnittestBase
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -137,6 +137,36 @@ def conv2dtranspose_forward_naive(input_, filter_, attrs): ...@@ -137,6 +137,36 @@ def conv2dtranspose_forward_naive(input_, filter_, attrs):
return out return out
def conv2dtranspose_wrapper(
x,
weight,
stride=1,
padding=0,
output_padding=[],
output_size=[],
padding_algorithm="EXPLICIT",
groups=1,
dilation=1,
data_format="NCDHW",
):
if data_format == "AnyLayout":
data_format = "NCDHW"
if padding_algorithm is None:
padding_algorithm = "EXPLICIT"
return paddle._C_ops.conv2d_transpose(
x,
weight,
stride,
padding,
output_padding,
output_size,
padding_algorithm,
groups,
dilation,
data_format,
)
class TestConv2DTransposeOp(OpTest): class TestConv2DTransposeOp(OpTest):
def setUp(self): def setUp(self):
# init as conv transpose # init as conv transpose
...@@ -244,6 +274,7 @@ class TestConv2DTransposeOp(OpTest): ...@@ -244,6 +274,7 @@ class TestConv2DTransposeOp(OpTest):
def init_op_type(self): def init_op_type(self):
self.op_type = "conv2d_transpose" self.op_type = "conv2d_transpose"
self.python_api = conv2dtranspose_wrapper
class TestWithSymmetricPad(TestConv2DTransposeOp): class TestWithSymmetricPad(TestConv2DTransposeOp):
...@@ -453,6 +484,7 @@ class TestCUDNN(TestConv2DTransposeOp): ...@@ -453,6 +484,7 @@ class TestCUDNN(TestConv2DTransposeOp):
def init_op_type(self): def init_op_type(self):
self.use_cudnn = True self.use_cudnn = True
self.op_type = "conv2d_transpose" self.op_type = "conv2d_transpose"
self.python_api = conv2dtranspose_wrapper
@unittest.skipIf( @unittest.skipIf(
...@@ -471,6 +503,7 @@ class TestCUDNNWithSymmetricPad(TestWithSymmetricPad): ...@@ -471,6 +503,7 @@ class TestCUDNNWithSymmetricPad(TestWithSymmetricPad):
def init_op_type(self): def init_op_type(self):
self.use_cudnn = True self.use_cudnn = True
self.op_type = "conv2d_transpose" self.op_type = "conv2d_transpose"
self.python_api = conv2dtranspose_wrapper
@unittest.skipIf( @unittest.skipIf(
...@@ -489,6 +522,7 @@ class TestCUDNNWithAsymmetricPad(TestWithAsymmetricPad): ...@@ -489,6 +522,7 @@ class TestCUDNNWithAsymmetricPad(TestWithAsymmetricPad):
def init_op_type(self): def init_op_type(self):
self.use_cudnn = True self.use_cudnn = True
self.op_type = "conv2d_transpose" self.op_type = "conv2d_transpose"
self.python_api = conv2dtranspose_wrapper
@unittest.skipIf( @unittest.skipIf(
...@@ -507,6 +541,7 @@ class TestCUDNNWithSAMEPad(TestWithSAMEPad): ...@@ -507,6 +541,7 @@ class TestCUDNNWithSAMEPad(TestWithSAMEPad):
def init_op_type(self): def init_op_type(self):
self.use_cudnn = True self.use_cudnn = True
self.op_type = "conv2d_transpose" self.op_type = "conv2d_transpose"
self.python_api = conv2dtranspose_wrapper
@unittest.skipIf( @unittest.skipIf(
...@@ -525,6 +560,7 @@ class TestCUDNNWithVALIDPad(TestWithVALIDPad): ...@@ -525,6 +560,7 @@ class TestCUDNNWithVALIDPad(TestWithVALIDPad):
def init_op_type(self): def init_op_type(self):
self.use_cudnn = True self.use_cudnn = True
self.op_type = "conv2d_transpose" self.op_type = "conv2d_transpose"
self.python_api = conv2dtranspose_wrapper
@unittest.skipIf( @unittest.skipIf(
...@@ -543,6 +579,7 @@ class TestCUDNNWithStride(TestWithStride): ...@@ -543,6 +579,7 @@ class TestCUDNNWithStride(TestWithStride):
def init_op_type(self): def init_op_type(self):
self.use_cudnn = True self.use_cudnn = True
self.op_type = "conv2d_transpose" self.op_type = "conv2d_transpose"
self.python_api = conv2dtranspose_wrapper
@unittest.skipIf( @unittest.skipIf(
...@@ -561,6 +598,7 @@ class TestCUDNNWithGroups(TestWithGroups): ...@@ -561,6 +598,7 @@ class TestCUDNNWithGroups(TestWithGroups):
def init_op_type(self): def init_op_type(self):
self.use_cudnn = True self.use_cudnn = True
self.op_type = "conv2d_transpose" self.op_type = "conv2d_transpose"
self.python_api = conv2dtranspose_wrapper
# ------------ test_cudnn ------------ # ------------ test_cudnn ------------
...@@ -571,6 +609,7 @@ class TestCUDNNWithEvenUpsample(TestWithEvenUpsample): ...@@ -571,6 +609,7 @@ class TestCUDNNWithEvenUpsample(TestWithEvenUpsample):
def init_op_type(self): def init_op_type(self):
self.use_cudnn = True self.use_cudnn = True
self.op_type = "conv2d_transpose" self.op_type = "conv2d_transpose"
self.python_api = conv2dtranspose_wrapper
# Please Don't remove the following code. # Please Don't remove the following code.
...@@ -605,6 +644,7 @@ class TestCUDNN_NHWC(TestConv2DTransposeOp): ...@@ -605,6 +644,7 @@ class TestCUDNN_NHWC(TestConv2DTransposeOp):
def init_op_type(self): def init_op_type(self):
self.use_cudnn = True self.use_cudnn = True
self.op_type = "conv2d_transpose" self.op_type = "conv2d_transpose"
self.python_api = conv2dtranspose_wrapper
@unittest.skipIf( @unittest.skipIf(
...@@ -624,6 +664,7 @@ class TestCUDNNWithSymmetricPad_NHWC(TestWithSymmetricPad): ...@@ -624,6 +664,7 @@ class TestCUDNNWithSymmetricPad_NHWC(TestWithSymmetricPad):
def init_op_type(self): def init_op_type(self):
self.use_cudnn = True self.use_cudnn = True
self.op_type = "conv2d_transpose" self.op_type = "conv2d_transpose"
self.python_api = conv2dtranspose_wrapper
@unittest.skipIf( @unittest.skipIf(
...@@ -643,6 +684,7 @@ class TestCUDNNWithAsymmetricPad_NHWC(TestWithSymmetricPad): ...@@ -643,6 +684,7 @@ class TestCUDNNWithAsymmetricPad_NHWC(TestWithSymmetricPad):
def init_op_type(self): def init_op_type(self):
self.use_cudnn = True self.use_cudnn = True
self.op_type = "conv2d_transpose" self.op_type = "conv2d_transpose"
self.python_api = conv2dtranspose_wrapper
@unittest.skipIf( @unittest.skipIf(
...@@ -662,6 +704,7 @@ class TestCUDNNWithStride_NHWC(TestWithStride): ...@@ -662,6 +704,7 @@ class TestCUDNNWithStride_NHWC(TestWithStride):
def init_op_type(self): def init_op_type(self):
self.use_cudnn = True self.use_cudnn = True
self.op_type = "conv2d_transpose" self.op_type = "conv2d_transpose"
self.python_api = conv2dtranspose_wrapper
@unittest.skipIf( @unittest.skipIf(
...@@ -681,6 +724,7 @@ class TestCUDNNWithGroups_NHWC(TestWithGroups): ...@@ -681,6 +724,7 @@ class TestCUDNNWithGroups_NHWC(TestWithGroups):
def init_op_type(self): def init_op_type(self):
self.use_cudnn = True self.use_cudnn = True
self.op_type = "conv2d_transpose" self.op_type = "conv2d_transpose"
self.python_api = conv2dtranspose_wrapper
@unittest.skipIf( @unittest.skipIf(
...@@ -701,6 +745,7 @@ class TestCUDNNWithEvenUpsample_NHWC(TestWithEvenUpsample): ...@@ -701,6 +745,7 @@ class TestCUDNNWithEvenUpsample_NHWC(TestWithEvenUpsample):
def init_op_type(self): def init_op_type(self):
self.use_cudnn = True self.use_cudnn = True
self.op_type = "conv2d_transpose" self.op_type = "conv2d_transpose"
self.python_api = conv2dtranspose_wrapper
@unittest.skipIf( @unittest.skipIf(
...@@ -721,6 +766,7 @@ class TestCUDNN_FP16(TestConv2DTransposeOp): ...@@ -721,6 +766,7 @@ class TestCUDNN_FP16(TestConv2DTransposeOp):
self.need_check_grad = False self.need_check_grad = False
self.use_cudnn = True self.use_cudnn = True
self.op_type = "conv2d_transpose" self.op_type = "conv2d_transpose"
self.python_api = conv2dtranspose_wrapper
def test_check_output(self): def test_check_output(self):
if self.use_cudnn: if self.use_cudnn:
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest, paddle_static_guard
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
...@@ -278,9 +278,36 @@ def create_test_cudnn_channel_last_class(parent): ...@@ -278,9 +278,36 @@ def create_test_cudnn_channel_last_class(parent):
globals()[cls_name] = TestCudnnChannelLastCase globals()[cls_name] = TestCudnnChannelLastCase
def conv3d_wrapper(
x,
weight,
stride=1,
padding=0,
padding_algorithm="EXPLICIT",
groups=1,
dilation=1,
data_format="NCDHW",
):
if data_format == "AnyLayout":
data_format = "NCDHW"
if padding_algorithm is None:
padding_algorithm = "EXPLICIT"
return paddle._C_ops.conv3d(
x,
weight,
stride,
padding,
padding_algorithm,
groups,
dilation,
data_format,
)
class TestConv3DOp(OpTest): class TestConv3DOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "conv3d" self.op_type = "conv3d"
self.python_api = conv3d_wrapper
self.use_cudnn = False self.use_cudnn = False
self.use_mkldnn = False self.use_mkldnn = False
self.data_format = "AnyLayout" self.data_format = "AnyLayout"
...@@ -596,6 +623,7 @@ class TestCUDNNExhaustiveSearch(TestCUDNN): ...@@ -596,6 +623,7 @@ class TestCUDNNExhaustiveSearch(TestCUDNN):
class TestConv3DOp_2(OpTest): class TestConv3DOp_2(OpTest):
def setUp(self): def setUp(self):
self.op_type = "conv3d" self.op_type = "conv3d"
self.python_api = conv3d_wrapper
self.use_cudnn = False self.use_cudnn = False
self.use_mkldnn = False self.use_mkldnn = False
self.data_format = "NCDHW" self.data_format = "NCDHW"
...@@ -863,227 +891,227 @@ create_test_cudnn_channel_last_class(TestWith1x1_AsyPadding) ...@@ -863,227 +891,227 @@ create_test_cudnn_channel_last_class(TestWith1x1_AsyPadding)
# --------- test python API --------------- # --------- test python API ---------------
class TestConv3DAPI(unittest.TestCase): class TestConv3DAPI(unittest.TestCase):
def test_api(self): def test_api(self):
with paddle_static_guard():
input_NDHWC = paddle.static.data(
name="input_NDHWC",
shape=[2, 5, 5, 5, 3],
dtype="float32",
)
input_NDHWC = paddle.static.data( input_NCDHW = paddle.static.data(
name="input_NDHWC", name="input_NCDHW",
shape=[2, 5, 5, 5, 3], shape=[2, 3, 5, 5, 3],
dtype="float32", dtype="float32",
) )
input_NCDHW = paddle.static.data(
name="input_NCDHW",
shape=[2, 3, 5, 5, 3],
dtype="float32",
)
paddle.static.nn.conv3d(
input=input_NDHWC,
num_filters=3,
filter_size=[3, 3, 3],
stride=[1, 1, 1],
padding=0,
dilation=[1, 1, 1],
groups=1,
data_format="NCDHW",
)
paddle.static.nn.conv3d(
input=input_NCDHW,
num_filters=3,
filter_size=[3, 3, 3],
stride=[1, 1, 1],
padding=[1, 2, 1, 0, 1, 0],
dilation=[1, 1, 1],
groups=1,
data_format="NCDHW",
)
paddle.static.nn.conv3d(
input=input_NCDHW,
num_filters=3,
filter_size=[3, 3, 3],
stride=[1, 1, 1],
padding=[[0, 0], [0, 0], [1, 1], [1, 1], [1, 1]],
dilation=[1, 1, 1],
groups=1,
data_format="NCDHW",
)
paddle.static.nn.conv3d(
input=input_NDHWC,
num_filters=3,
filter_size=[3, 3, 3],
stride=[1, 1, 1],
padding=[[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]],
dilation=[1, 1, 1],
groups=1,
data_format="NDHWC",
)
paddle.static.nn.conv3d(
input=input_NCDHW,
num_filters=3,
filter_size=[3, 3, 3],
stride=[1, 1, 1],
padding="SAME",
dilation=[1, 1, 1],
groups=1,
data_format="NCDHW",
)
paddle.static.nn.conv3d(
input=input_NCDHW,
num_filters=3,
filter_size=[3, 3, 3],
stride=[1, 1, 1],
padding="VALID",
dilation=[1, 1, 1],
groups=1,
data_format="NCDHW",
)
class TestConv3DAPI_Error(unittest.TestCase):
def test_api(self):
input = paddle.static.data(
name="input",
shape=[2, 5, 5, 5, 4],
dtype="float32",
)
# ValueError: cudnn
def run_1():
paddle.static.nn.conv3d( paddle.static.nn.conv3d(
input=input, input=input_NDHWC,
num_filters=3, num_filters=3,
filter_size=3, filter_size=[3, 3, 3],
stride=1, stride=[1, 1, 1],
padding=0, padding=0,
dilation=1, dilation=[1, 1, 1],
groups=1, groups=1,
use_cudnn=[0],
data_format="NCDHW", data_format="NCDHW",
) )
self.assertRaises(ValueError, run_1)
# ValueError: data_format
def run_2():
paddle.static.nn.conv3d( paddle.static.nn.conv3d(
input=input, input=input_NCDHW,
num_filters=3, num_filters=3,
filter_size=[3, 3, 3], filter_size=[3, 3, 3],
stride=[1, 1, 1], stride=[1, 1, 1],
padding=0, padding=[1, 2, 1, 0, 1, 0],
dilation=[1, 1, 1], dilation=[1, 1, 1],
groups=1, groups=1,
use_cudnn=False, data_format="NCDHW",
data_format="NCHWC",
) )
self.assertRaises(ValueError, run_2)
# ValueError: padding
def run_3():
paddle.static.nn.conv3d( paddle.static.nn.conv3d(
input=input, input=input_NCDHW,
num_filters=3, num_filters=3,
filter_size=3, filter_size=[3, 3, 3],
stride=1, stride=[1, 1, 1],
padding="SAMEE", padding=[[0, 0], [0, 0], [1, 1], [1, 1], [1, 1]],
dilation=1, dilation=[1, 1, 1],
groups=1, groups=1,
use_cudnn=False,
data_format="NCDHW", data_format="NCDHW",
) )
self.assertRaises(ValueError, run_3)
def run_4():
paddle.static.nn.conv3d( paddle.static.nn.conv3d(
input=input, input=input_NDHWC,
num_filters=3, num_filters=3,
filter_size=3, filter_size=[3, 3, 3],
stride=1, stride=[1, 1, 1],
padding=[[0, 1], [0, 0], [0, 1], [0, 1], [0, 1]], padding=[[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]],
dilation=1, dilation=[1, 1, 1],
groups=1, groups=1,
use_cudnn=False, data_format="NDHWC",
data_format="NCDHW",
) )
self.assertRaises(ValueError, run_4)
def run_5():
paddle.static.nn.conv3d( paddle.static.nn.conv3d(
input=input, input=input_NCDHW,
num_filters=3, num_filters=3,
filter_size=0, filter_size=[3, 3, 3],
stride=0, stride=[1, 1, 1],
padding=[[0, 1], [0, 1], [0, 1], [0, 1], [0, 1]], padding="SAME",
dilation=1, dilation=[1, 1, 1],
groups=1, groups=1,
use_cudnn=False, data_format="NCDHW",
data_format="NDHWC",
) )
self.assertRaises(ValueError, run_5)
# ValueError: channel dimmention
x = paddle.static.data(
name="x",
shape=[2, 5, 5, 5, -1],
dtype="float32",
)
def run_6():
paddle.static.nn.conv3d( paddle.static.nn.conv3d(
input=x, input=input_NCDHW,
num_filters=3, num_filters=3,
filter_size=3, filter_size=[3, 3, 3],
stride=1, stride=[1, 1, 1],
padding=0, padding="VALID",
dilation=1, dilation=[1, 1, 1],
groups=1, groups=1,
use_cudnn=False, data_format="NCDHW",
data_format="NDHWC",
) )
self.assertRaises(ValueError, run_6)
# ValueError: groups class TestConv3DAPI_Error(unittest.TestCase):
def run_7(): def test_api(self):
paddle.static.nn.conv3d( with paddle_static_guard():
input=input, input = paddle.static.data(
num_filters=3, name="input",
filter_size=3, shape=[2, 5, 5, 5, 4],
stride=1, dtype="float32",
padding=0,
dilation=1,
groups=3,
use_cudnn=False,
data_format="NDHWC",
) )
self.assertRaises(ValueError, run_7) # ValueError: cudnn
def run_1():
# ValueError: filter num paddle.static.nn.conv3d(
def run_8(): input=input,
paddle.static.nn.conv3d( num_filters=3,
input=input, filter_size=3,
num_filters=0, stride=1,
filter_size=0, padding=0,
stride=0, dilation=1,
padding=0, groups=1,
dilation=0, use_cudnn=[0],
groups=1, data_format="NCDHW",
use_cudnn=False, )
data_format="NDHWC",
self.assertRaises(ValueError, run_1)
# ValueError: data_format
def run_2():
paddle.static.nn.conv3d(
input=input,
num_filters=3,
filter_size=[3, 3, 3],
stride=[1, 1, 1],
padding=0,
dilation=[1, 1, 1],
groups=1,
use_cudnn=False,
data_format="NCHWC",
)
self.assertRaises(ValueError, run_2)
# ValueError: padding
def run_3():
paddle.static.nn.conv3d(
input=input,
num_filters=3,
filter_size=3,
stride=1,
padding="SAMEE",
dilation=1,
groups=1,
use_cudnn=False,
data_format="NCDHW",
)
self.assertRaises(ValueError, run_3)
def run_4():
paddle.static.nn.conv3d(
input=input,
num_filters=3,
filter_size=3,
stride=1,
padding=[[0, 1], [0, 0], [0, 1], [0, 1], [0, 1]],
dilation=1,
groups=1,
use_cudnn=False,
data_format="NCDHW",
)
self.assertRaises(ValueError, run_4)
def run_5():
paddle.static.nn.conv3d(
input=input,
num_filters=3,
filter_size=0,
stride=0,
padding=[[0, 1], [0, 1], [0, 1], [0, 1], [0, 1]],
dilation=1,
groups=1,
use_cudnn=False,
data_format="NDHWC",
)
self.assertRaises(ValueError, run_5)
# ValueError: channel dimmention
x = paddle.static.data(
name="x",
shape=[2, 5, 5, 5, -1],
dtype="float32",
) )
self.assertRaises(ValueError, run_8) def run_6():
paddle.static.nn.conv3d(
input=x,
num_filters=3,
filter_size=3,
stride=1,
padding=0,
dilation=1,
groups=1,
use_cudnn=False,
data_format="NDHWC",
)
self.assertRaises(ValueError, run_6)
# ValueError: groups
def run_7():
paddle.static.nn.conv3d(
input=input,
num_filters=3,
filter_size=3,
stride=1,
padding=0,
dilation=1,
groups=3,
use_cudnn=False,
data_format="NDHWC",
)
self.assertRaises(ValueError, run_7)
# ValueError: filter num
def run_8():
paddle.static.nn.conv3d(
input=input,
num_filters=0,
filter_size=0,
stride=0,
padding=0,
dilation=0,
groups=1,
use_cudnn=False,
data_format="NDHWC",
)
self.assertRaises(ValueError, run_8)
if __name__ == '__main__': if __name__ == '__main__':
paddle.enable_static()
unittest.main() unittest.main()
...@@ -19,7 +19,7 @@ import numpy as np ...@@ -19,7 +19,7 @@ import numpy as np
import paddle import paddle
paddle.enable_static() paddle.enable_static()
from op_test import OpTest from eager_op_test import OpTest
import paddle.fluid.core as core import paddle.fluid.core as core
...@@ -134,6 +134,34 @@ def conv3dtranspose_forward_naive(input_, filter_, attrs): ...@@ -134,6 +134,34 @@ def conv3dtranspose_forward_naive(input_, filter_, attrs):
return out return out
def conv3d_transpose_wrapper(
x,
weight,
stride=1,
padding=0,
output_padding=[],
output_size=[],
padding_algorithm="EXPLICIT",
groups=1,
dilation=1,
data_format="NCDHW",
):
if data_format == "AnyLayout":
data_format = "NCDHW"
return paddle._C_ops.conv3d_transpose(
x,
weight,
stride,
padding,
output_padding,
output_size,
padding_algorithm,
groups,
dilation,
data_format,
)
class TestConv3DTransposeOp(OpTest): class TestConv3DTransposeOp(OpTest):
def setUp(self): def setUp(self):
# init as conv transpose # init as conv transpose
...@@ -234,6 +262,7 @@ class TestConv3DTransposeOp(OpTest): ...@@ -234,6 +262,7 @@ class TestConv3DTransposeOp(OpTest):
def init_op_type(self): def init_op_type(self):
self.op_type = "conv3d_transpose" self.op_type = "conv3d_transpose"
self.python_api = conv3d_transpose_wrapper
class TestWithSymmetricPad(TestConv3DTransposeOp): class TestWithSymmetricPad(TestConv3DTransposeOp):
...@@ -335,6 +364,7 @@ class TestCUDNN(TestConv3DTransposeOp): ...@@ -335,6 +364,7 @@ class TestCUDNN(TestConv3DTransposeOp):
def init_op_type(self): def init_op_type(self):
self.use_cudnn = True self.use_cudnn = True
self.op_type = "conv3d_transpose" self.op_type = "conv3d_transpose"
self.python_api = conv3d_transpose_wrapper
@unittest.skipIf( @unittest.skipIf(
...@@ -353,6 +383,7 @@ class TestCUDNNWithSymmetricPad(TestWithSymmetricPad): ...@@ -353,6 +383,7 @@ class TestCUDNNWithSymmetricPad(TestWithSymmetricPad):
def init_op_type(self): def init_op_type(self):
self.use_cudnn = True self.use_cudnn = True
self.op_type = "conv3d_transpose" self.op_type = "conv3d_transpose"
self.python_api = conv3d_transpose_wrapper
@unittest.skipIf( @unittest.skipIf(
...@@ -371,6 +402,7 @@ class TestCUDNNWithAsymmetricPad(TestWithAsymmetricPad): ...@@ -371,6 +402,7 @@ class TestCUDNNWithAsymmetricPad(TestWithAsymmetricPad):
def init_op_type(self): def init_op_type(self):
self.use_cudnn = True self.use_cudnn = True
self.op_type = "conv3d_transpose" self.op_type = "conv3d_transpose"
self.python_api = conv3d_transpose_wrapper
@unittest.skipIf( @unittest.skipIf(
...@@ -389,6 +421,7 @@ class TestCUDNNWithSAMEPad(TestWithSAMEPad): ...@@ -389,6 +421,7 @@ class TestCUDNNWithSAMEPad(TestWithSAMEPad):
def init_op_type(self): def init_op_type(self):
self.use_cudnn = True self.use_cudnn = True
self.op_type = "conv3d_transpose" self.op_type = "conv3d_transpose"
self.python_api = conv3d_transpose_wrapper
@unittest.skipIf( @unittest.skipIf(
...@@ -407,6 +440,7 @@ class TestCUDNNWithVALIDPad(TestWithVALIDPad): ...@@ -407,6 +440,7 @@ class TestCUDNNWithVALIDPad(TestWithVALIDPad):
def init_op_type(self): def init_op_type(self):
self.use_cudnn = True self.use_cudnn = True
self.op_type = "conv3d_transpose" self.op_type = "conv3d_transpose"
self.python_api = conv3d_transpose_wrapper
@unittest.skipIf( @unittest.skipIf(
...@@ -425,6 +459,7 @@ class TestCUDNNWithStride(TestWithStride): ...@@ -425,6 +459,7 @@ class TestCUDNNWithStride(TestWithStride):
def init_op_type(self): def init_op_type(self):
self.use_cudnn = True self.use_cudnn = True
self.op_type = "conv3d_transpose" self.op_type = "conv3d_transpose"
self.python_api = conv3d_transpose_wrapper
@unittest.skipIf( @unittest.skipIf(
...@@ -443,21 +478,22 @@ class TestCUDNNWithGroups(TestWithGroups): ...@@ -443,21 +478,22 @@ class TestCUDNNWithGroups(TestWithGroups):
def init_op_type(self): def init_op_type(self):
self.use_cudnn = True self.use_cudnn = True
self.op_type = "conv3d_transpose" self.op_type = "conv3d_transpose"
self.python_api = conv3d_transpose_wrapper
# Please Don't remove the following code. # Please Don't remove the following code.
# Currently, CI use cudnn V5.0 which not support dilation conv. # Currently, CI use cudnn V5.0 which not support dilation conv.
# class TestCUDNNWithDilation(TestWithDilation): # class TestCUDNNWithDilation(TestWithDilation):
# def init_test_case(self): # def init_test_case(self):
# self.pad = [1, 1, 1] # self.pad = [1, 1, 1]
# self.stride = [2, 2, 2] # self.stride = [2, 2, 2]
# self.dilations = [2, 2, 2] # self.dilations = [2, 2, 2]
# self.input_size = [2, 3, 5, 5, 5] # NCDHW # self.input_size = [2, 3, 5, 5, 5] # NCDHW
# f_c = self.input_size[1] # f_c = self.input_size[1]
# self.filter_size = [f_c, 6, 3, 3, 3] # self.filter_size = [f_c, 6, 3, 3, 3]
# #
# def init_op_type(self): # def init_op_type(self):
# self.op_type = "conv3d_transpose" # self.op_type = "conv3d_transpose"
self.python_api = conv3d_transpose_wrapper
@unittest.skipIf( @unittest.skipIf(
...@@ -477,6 +513,7 @@ class TestCUDNN_NHWC(TestConv3DTransposeOp): ...@@ -477,6 +513,7 @@ class TestCUDNN_NHWC(TestConv3DTransposeOp):
def init_op_type(self): def init_op_type(self):
self.use_cudnn = True self.use_cudnn = True
self.op_type = "conv3d_transpose" self.op_type = "conv3d_transpose"
self.python_api = conv3d_transpose_wrapper
@unittest.skipIf( @unittest.skipIf(
...@@ -496,6 +533,7 @@ class TestCUDNNWithSymmetricPad_NHWC(TestWithSymmetricPad): ...@@ -496,6 +533,7 @@ class TestCUDNNWithSymmetricPad_NHWC(TestWithSymmetricPad):
def init_op_type(self): def init_op_type(self):
self.use_cudnn = True self.use_cudnn = True
self.op_type = "conv3d_transpose" self.op_type = "conv3d_transpose"
self.python_api = conv3d_transpose_wrapper
@unittest.skipIf( @unittest.skipIf(
...@@ -515,6 +553,7 @@ class TestCUDNNWithAsymmetricPad_NHWC(TestWithAsymmetricPad): ...@@ -515,6 +553,7 @@ class TestCUDNNWithAsymmetricPad_NHWC(TestWithAsymmetricPad):
def init_op_type(self): def init_op_type(self):
self.use_cudnn = True self.use_cudnn = True
self.op_type = "conv3d_transpose" self.op_type = "conv3d_transpose"
self.python_api = conv3d_transpose_wrapper
@unittest.skipIf( @unittest.skipIf(
...@@ -534,6 +573,7 @@ class TestCUDNNWithStride_NHWC(TestWithStride): ...@@ -534,6 +573,7 @@ class TestCUDNNWithStride_NHWC(TestWithStride):
def init_op_type(self): def init_op_type(self):
self.use_cudnn = True self.use_cudnn = True
self.op_type = "conv3d_transpose" self.op_type = "conv3d_transpose"
self.python_api = conv3d_transpose_wrapper
@unittest.skipIf( @unittest.skipIf(
...@@ -553,6 +593,7 @@ class TestCUDNNWithGroups_NHWC(TestWithGroups): ...@@ -553,6 +593,7 @@ class TestCUDNNWithGroups_NHWC(TestWithGroups):
def init_op_type(self): def init_op_type(self):
self.use_cudnn = True self.use_cudnn = True
self.op_type = "conv3d_transpose" self.op_type = "conv3d_transpose"
self.python_api = conv3d_transpose_wrapper
class TestConv3dTranspose(unittest.TestCase): class TestConv3dTranspose(unittest.TestCase):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
def conv_shift_forward(x, y): def conv_shift_forward(x, y):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
class TestCosSimOp(OpTest): class TestCosSimOp(OpTest):
......
...@@ -16,7 +16,7 @@ import random ...@@ -16,7 +16,7 @@ import random
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
class CRFDecoding: class CRFDecoding:
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -81,10 +81,10 @@ class TestCropOp(OpTest): ...@@ -81,10 +81,10 @@ class TestCropOp(OpTest):
self.offsets = [1, 2] self.offsets = [1, 2]
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
class TestCase1(TestCropOp): class TestCase1(TestCropOp):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -82,10 +82,10 @@ class TestCropTensorOp(OpTest): ...@@ -82,10 +82,10 @@ class TestCropTensorOp(OpTest):
self.offsets = [1, 2] self.offsets = [1, 2]
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
class TestCase1(TestCropTensorOp): class TestCase1(TestCropTensorOp):
...@@ -183,10 +183,10 @@ class TestCropTensorOpTensorAttr(OpTest): ...@@ -183,10 +183,10 @@ class TestCropTensorOpTensorAttr(OpTest):
self.shape_attr = [0, 0] self.shape_attr = [0, 0]
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad(["X"], "Out", check_eager=True) self.check_grad(["X"], "Out")
class TestCropTensorOpTensorAttrCase1(TestCropTensorOpTensorAttr): class TestCropTensorOpTensorAttrCase1(TestCropTensorOpTensorAttr):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
class CrossEntropy2OpTestBase(OpTest): class CrossEntropy2OpTestBase(OpTest):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest, randomize_probability from eager_op_test import OpTest, paddle_static_guard, randomize_probability
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -427,17 +427,18 @@ class TestCrossEntropyOpError(unittest.TestCase): ...@@ -427,17 +427,18 @@ class TestCrossEntropyOpError(unittest.TestCase):
self.assertRaises(TypeError, test_Variable) self.assertRaises(TypeError, test_Variable)
def test_dtype(): def test_dtype():
# the input dtype of cross_entropy must be float16 or float32 or float64 with paddle_static_guard():
# float16 only can be set on GPU place # the input dtype of cross_entropy must be float16 or float32 or float64
x2 = paddle.static.data( # float16 only can be set on GPU place
name='x2', shape=[-1, 3, 4, 5, 6], dtype="int32" x2 = paddle.static.data(
) name='x2', shape=[-1, 3, 4, 5, 6], dtype="int32"
lab2 = paddle.static.data( )
name='lab2', shape=[-1, 3, 4, 5, 6], dtype="int32" lab2 = paddle.static.data(
) name='lab2', shape=[-1, 3, 4, 5, 6], dtype="int32"
paddle.nn.functional.cross_entropy( )
x2, lab2, reduction='none', use_softmax=False paddle.nn.functional.cross_entropy(
) x2, lab2, reduction='none', use_softmax=False
)
self.assertRaises(TypeError, test_dtype) self.assertRaises(TypeError, test_dtype)
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -47,10 +47,10 @@ class TestCrossOp(OpTest): ...@@ -47,10 +47,10 @@ class TestCrossOp(OpTest):
self.outputs = {'Out': np.array(z_list).reshape(self.shape)} self.outputs = {'Out': np.array(z_list).reshape(self.shape)}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad(['X', 'Y'], 'Out', check_eager=True) self.check_grad(['X', 'Y'], 'Out')
class TestCrossOpCase1(TestCrossOp): class TestCrossOpCase1(TestCrossOp):
......
...@@ -16,7 +16,7 @@ import random ...@@ -16,7 +16,7 @@ import random
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
...@@ -109,7 +109,7 @@ class TestCumprod(OpTest): ...@@ -109,7 +109,7 @@ class TestCumprod(OpTest):
for dim in range(-len(self.shape), len(self.shape)): for dim in range(-len(self.shape), len(self.shape)):
for zero_num in self.zero_nums: for zero_num in self.zero_nums:
self.prepare_inputs_outputs_attrs(dim, zero_num) self.prepare_inputs_outputs_attrs(dim, zero_num)
self.check_output(check_eager=True) self.check_output()
# test backward. # test backward.
def test_check_grad(self): def test_check_grad(self):
...@@ -118,14 +118,13 @@ class TestCumprod(OpTest): ...@@ -118,14 +118,13 @@ class TestCumprod(OpTest):
self.prepare_inputs_outputs_attrs(dim, zero_num) self.prepare_inputs_outputs_attrs(dim, zero_num)
self.init_grad_input_output(dim) self.init_grad_input_output(dim)
if self.dtype == np.float64: if self.dtype == np.float64:
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
else: else:
self.check_grad( self.check_grad(
['X'], ['X'],
'Out', 'Out',
user_defined_grads=[self.grad_x], user_defined_grads=[self.grad_x],
user_defined_grad_outputs=[self.grad_out], user_defined_grad_outputs=[self.grad_out],
check_eager=True,
) )
......
...@@ -16,7 +16,7 @@ import unittest ...@@ -16,7 +16,7 @@ import unittest
from math import log from math import log
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
def cvm_compute(X, item_width, use_cvm): def cvm_compute(X, item_width, use_cvm):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
class TestDecayedAdagradOp1(OpTest): class TestDecayedAdagradOp1(OpTest):
...@@ -46,7 +46,7 @@ class TestDecayedAdagradOp1(OpTest): ...@@ -46,7 +46,7 @@ class TestDecayedAdagradOp1(OpTest):
self.outputs = {'ParamOut': param_out, 'MomentOut': moment_out} self.outputs = {'ParamOut': param_out, 'MomentOut': moment_out}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
class TestDecayedAdagradOp2(OpTest): class TestDecayedAdagradOp2(OpTest):
...@@ -77,7 +77,7 @@ class TestDecayedAdagradOp2(OpTest): ...@@ -77,7 +77,7 @@ class TestDecayedAdagradOp2(OpTest):
self.outputs = {'ParamOut': param_out, 'MomentOut': moment_out} self.outputs = {'ParamOut': param_out, 'MomentOut': moment_out}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
...@@ -194,14 +194,13 @@ class TestModulatedDeformableConvOp(OpTest): ...@@ -194,14 +194,13 @@ class TestModulatedDeformableConvOp(OpTest):
self.outputs = {'Output': output} self.outputs = {'Output': output}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(
{'Input', 'Offset', 'Mask', 'Filter'}, {'Input', 'Offset', 'Mask', 'Filter'},
'Output', 'Output',
max_relative_error=0.05, max_relative_error=0.05,
check_eager=True,
) )
def init_test_case(self): def init_test_case(self):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
...@@ -188,14 +188,13 @@ class TestModulatedDeformableConvOp(OpTest): ...@@ -188,14 +188,13 @@ class TestModulatedDeformableConvOp(OpTest):
self.outputs = {'Output': output} self.outputs = {'Output': output}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(
['Input', 'Offset', 'Filter'], ['Input', 'Offset', 'Filter'],
'Output', 'Output',
max_relative_error=0.05, max_relative_error=0.05,
check_eager=True,
) )
def test_check_grad_no_filter(self): def test_check_grad_no_filter(self):
...@@ -204,7 +203,6 @@ class TestModulatedDeformableConvOp(OpTest): ...@@ -204,7 +203,6 @@ class TestModulatedDeformableConvOp(OpTest):
'Output', 'Output',
max_relative_error=0.1, max_relative_error=0.1,
no_grad_set=set(['Filter']), no_grad_set=set(['Filter']),
check_eager=True,
) )
def init_test_case(self): def init_test_case(self):
......
...@@ -16,7 +16,7 @@ import math ...@@ -16,7 +16,7 @@ import math
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
class TestDensityPriorBoxOp(OpTest): class TestDensityPriorBoxOp(OpTest):
......
...@@ -16,7 +16,7 @@ import math ...@@ -16,7 +16,7 @@ import math
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
def quantize_max_abs(x, max_range): def quantize_max_abs(x, max_range):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
def dequantize_log(x, dict_data): def dequantize_log(x, dict_data):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
...@@ -30,10 +30,10 @@ class TestDeterminantOp(OpTest): ...@@ -30,10 +30,10 @@ class TestDeterminantOp(OpTest):
self.outputs = {'Out': self.target} self.outputs = {'Out': self.target}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['Input'], ['Out'], check_eager=True) self.check_grad(['Input'], ['Out'])
def init_data(self): def init_data(self):
np.random.seed(0) np.random.seed(0)
...@@ -95,13 +95,11 @@ class TestSlogDeterminantOp(OpTest): ...@@ -95,13 +95,11 @@ class TestSlogDeterminantOp(OpTest):
self.outputs = {'Out': self.target} self.outputs = {'Out': self.target}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
# the slog det's grad value is always huge # the slog det's grad value is always huge
self.check_grad( self.check_grad(['Input'], ['Out'], max_relative_error=0.1)
['Input'], ['Out'], max_relative_error=0.1, check_eager=True
)
def init_data(self): def init_data(self):
np.random.seed(0) np.random.seed(0)
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest, paddle_static_guard
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
...@@ -30,7 +30,7 @@ class TestDiagEmbedOp(OpTest): ...@@ -30,7 +30,7 @@ class TestDiagEmbedOp(OpTest):
self.outputs = {'Out': self.target} self.outputs = {'Out': self.target}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def init_config(self): def init_config(self):
self.case = np.random.randn(2, 3).astype('float32') self.case = np.random.randn(2, 3).astype('float32')
...@@ -51,27 +51,28 @@ class TestDiagEmbedOpCase1(TestDiagEmbedOp): ...@@ -51,27 +51,28 @@ class TestDiagEmbedOpCase1(TestDiagEmbedOp):
class TestDiagEmbedAPICase(unittest.TestCase): class TestDiagEmbedAPICase(unittest.TestCase):
def test_case1(self): def test_case1(self):
diag_embed = np.random.randn(2, 3, 4).astype('float32') with paddle_static_guard():
data1 = fluid.data(name='data1', shape=[2, 3, 4], dtype='float32') diag_embed = np.random.randn(2, 3, 4).astype('float32')
out1 = F.diag_embed(data1) data1 = fluid.data(name='data1', shape=[2, 3, 4], dtype='float32')
out2 = F.diag_embed(data1, offset=1, dim1=-2, dim2=3) out1 = F.diag_embed(data1)
out2 = F.diag_embed(data1, offset=1, dim1=-2, dim2=3)
place = core.CPUPlace() place = core.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
results = exe.run( results = exe.run(
fluid.default_main_program(), fluid.default_main_program(),
feed={"data1": diag_embed}, feed={"data1": diag_embed},
fetch_list=[out1, out2], fetch_list=[out1, out2],
return_numpy=True, return_numpy=True,
) )
target1 = np.stack( target1 = np.stack(
[np.stack([np.diag(s, 0) for s in r], 0) for r in diag_embed], 0 [np.stack([np.diag(s, 0) for s in r], 0) for r in diag_embed], 0
) )
target2 = np.stack( target2 = np.stack(
[np.stack([np.diag(s, 1) for s in r], 0) for r in diag_embed], 0 [np.stack([np.diag(s, 1) for s in r], 0) for r in diag_embed], 0
) )
np.testing.assert_allclose(results[0], target1, rtol=1e-05) np.testing.assert_allclose(results[0], target1, rtol=1e-05)
np.testing.assert_allclose(results[1], target2, rtol=1e-05) np.testing.assert_allclose(results[1], target2, rtol=1e-05)
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -41,11 +41,11 @@ class TestDiagV2Op(OpTest): ...@@ -41,11 +41,11 @@ class TestDiagV2Op(OpTest):
def test_check_output(self): def test_check_output(self):
paddle.enable_static() paddle.enable_static()
self.check_output(check_eager=False) self.check_output()
def test_check_grad(self): def test_check_grad(self):
paddle.enable_static() paddle.enable_static()
self.check_grad(['X'], 'Out', check_eager=False) self.check_grad(['X'], 'Out')
def init_config(self): def init_config(self):
pass pass
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
...@@ -30,10 +30,10 @@ class TestDiagonalOp(OpTest): ...@@ -30,10 +30,10 @@ class TestDiagonalOp(OpTest):
self.outputs = {'Out': self.target} self.outputs = {'Out': self.target}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['Input'], 'Out', check_eager=True) self.check_grad(['Input'], 'Out')
def init_config(self): def init_config(self):
self.case = np.random.randn(10, 5, 2).astype('float64') self.case = np.random.randn(10, 5, 2).astype('float64')
...@@ -80,7 +80,6 @@ class TestDiagonalOpCase2(TestDiagonalOp): ...@@ -80,7 +80,6 @@ class TestDiagonalOpCase2(TestDiagonalOp):
'Out', 'Out',
user_defined_grads=[self.grad_x], user_defined_grads=[self.grad_x],
user_defined_grad_outputs=[self.grad_out], user_defined_grad_outputs=[self.grad_out],
check_eager=True,
) )
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
from scipy.special import psi from scipy.special import psi
import paddle import paddle
...@@ -42,10 +42,10 @@ class TestDigammaOp(OpTest): ...@@ -42,10 +42,10 @@ class TestDigammaOp(OpTest):
self.dtype = np.float64 self.dtype = np.float64
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
class TestDigammaOpFp32(TestDigammaOp): class TestDigammaOpFp32(TestDigammaOp):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -113,14 +113,13 @@ class TestDistOp(OpTest): ...@@ -113,14 +113,13 @@ class TestDistOp(OpTest):
return x_grad, y_grad return x_grad, y_grad
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(
["X", "Y"], ["X", "Y"],
"Out", "Out",
user_defined_grads=self.gradient, user_defined_grads=self.gradient,
check_eager=True,
) )
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
...@@ -142,7 +142,7 @@ class TestDistributeFPNProposalsOp(OpTest): ...@@ -142,7 +142,7 @@ class TestDistributeFPNProposalsOp(OpTest):
self.set_data() self.set_data()
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_dygraph=False)
class TestDistributeFPNProposalsOpWithRoisNum(TestDistributeFPNProposalsOp): class TestDistributeFPNProposalsOpWithRoisNum(TestDistributeFPNProposalsOp):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
class TestDpsgdOp(OpTest): class TestDpsgdOp(OpTest):
...@@ -43,7 +43,7 @@ class TestDpsgdOp(OpTest): ...@@ -43,7 +43,7 @@ class TestDpsgdOp(OpTest):
self.outputs = {'ParamOut': param_out} self.outputs = {'ParamOut': param_out}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def dpsgd_step(inputs, attributes): def dpsgd_step(inputs, attributes):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci from eager_op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -25,9 +25,30 @@ from paddle import _C_ops ...@@ -25,9 +25,30 @@ from paddle import _C_ops
from paddle.fluid import Program, program_guard from paddle.fluid import Program, program_guard
def dropout_wapper(
X,
Seed=None,
dropout_prob=0.5,
is_test=False,
dropout_implementation="downgrade_in_infer",
seed=0,
fix_seed=False,
):
return paddle._C_ops.dropout(
X,
Seed,
dropout_prob,
is_test,
dropout_implementation,
seed,
fix_seed,
)
class TestDropoutOp(OpTest): class TestDropoutOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "dropout" self.op_type = "dropout"
self.python_api = dropout_wapper
self.inputs = {'X': np.random.random((32, 64)).astype("float32")} self.inputs = {'X': np.random.random((32, 64)).astype("float32")}
self.attrs = {'dropout_prob': 0.0, 'fix_seed': True, 'is_test': False} self.attrs = {'dropout_prob': 0.0, 'fix_seed': True, 'is_test': False}
self.outputs = { self.outputs = {
...@@ -45,6 +66,7 @@ class TestDropoutOp(OpTest): ...@@ -45,6 +66,7 @@ class TestDropoutOp(OpTest):
class TestDropoutOpInput1d(OpTest): class TestDropoutOpInput1d(OpTest):
def setUp(self): def setUp(self):
self.op_type = "dropout" self.op_type = "dropout"
self.python_api = dropout_wapper
self.inputs = {'X': np.random.random((2000,)).astype("float32")} self.inputs = {'X': np.random.random((2000,)).astype("float32")}
self.attrs = {'dropout_prob': 0.0, 'fix_seed': True, 'is_test': False} self.attrs = {'dropout_prob': 0.0, 'fix_seed': True, 'is_test': False}
self.outputs = { self.outputs = {
...@@ -62,6 +84,7 @@ class TestDropoutOpInput1d(OpTest): ...@@ -62,6 +84,7 @@ class TestDropoutOpInput1d(OpTest):
class TestDropoutOp2(TestDropoutOp): class TestDropoutOp2(TestDropoutOp):
def setUp(self): def setUp(self):
self.op_type = "dropout" self.op_type = "dropout"
self.python_api = dropout_wapper
self.inputs = {'X': np.random.random((32, 64)).astype("float32")} self.inputs = {'X': np.random.random((32, 64)).astype("float32")}
self.attrs = {'dropout_prob': 1.0, 'fix_seed': True, 'is_test': False} self.attrs = {'dropout_prob': 1.0, 'fix_seed': True, 'is_test': False}
self.outputs = { self.outputs = {
...@@ -73,6 +96,7 @@ class TestDropoutOp2(TestDropoutOp): ...@@ -73,6 +96,7 @@ class TestDropoutOp2(TestDropoutOp):
class TestDropoutOp3(TestDropoutOp): class TestDropoutOp3(TestDropoutOp):
def setUp(self): def setUp(self):
self.op_type = "dropout" self.op_type = "dropout"
self.python_api = dropout_wapper
self.inputs = {'X': np.random.random((32, 64, 2)).astype("float32")} self.inputs = {'X': np.random.random((32, 64, 2)).astype("float32")}
self.attrs = {'dropout_prob': 0.0, 'fix_seed': True, 'is_test': False} self.attrs = {'dropout_prob': 0.0, 'fix_seed': True, 'is_test': False}
self.outputs = { self.outputs = {
...@@ -85,6 +109,7 @@ class TestDropoutOp3(TestDropoutOp): ...@@ -85,6 +109,7 @@ class TestDropoutOp3(TestDropoutOp):
class TestDropoutOp4(OpTest): class TestDropoutOp4(OpTest):
def setUp(self): def setUp(self):
self.op_type = "dropout" self.op_type = "dropout"
self.python_api = dropout_wapper
self.inputs = {'X': np.random.random((32, 64)).astype("float32")} self.inputs = {'X': np.random.random((32, 64)).astype("float32")}
self.attrs = {'dropout_prob': 0.35, 'fix_seed': True, 'is_test': True} self.attrs = {'dropout_prob': 0.35, 'fix_seed': True, 'is_test': True}
self.outputs = { self.outputs = {
...@@ -99,6 +124,7 @@ class TestDropoutOp4(OpTest): ...@@ -99,6 +124,7 @@ class TestDropoutOp4(OpTest):
class TestDropoutOp5(OpTest): class TestDropoutOp5(OpTest):
def setUp(self): def setUp(self):
self.op_type = "dropout" self.op_type = "dropout"
self.python_api = dropout_wapper
self.inputs = {'X': np.random.random((32, 64, 3)).astype("float32")} self.inputs = {'X': np.random.random((32, 64, 3)).astype("float32")}
self.attrs = {'dropout_prob': 0.75, 'is_test': True} self.attrs = {'dropout_prob': 0.75, 'is_test': True}
self.outputs = { self.outputs = {
...@@ -112,6 +138,7 @@ class TestDropoutOp5(OpTest): ...@@ -112,6 +138,7 @@ class TestDropoutOp5(OpTest):
class TestDropoutOp6(TestDropoutOp): class TestDropoutOp6(TestDropoutOp):
def setUp(self): def setUp(self):
self.op_type = "dropout" self.op_type = "dropout"
self.python_api = dropout_wapper
self.inputs = {'X': np.random.random((32, 64)).astype("float32")} self.inputs = {'X': np.random.random((32, 64)).astype("float32")}
self.attrs = { self.attrs = {
'dropout_prob': 1.0, 'dropout_prob': 1.0,
...@@ -128,6 +155,7 @@ class TestDropoutOp6(TestDropoutOp): ...@@ -128,6 +155,7 @@ class TestDropoutOp6(TestDropoutOp):
class TestDropoutOp7(TestDropoutOp): class TestDropoutOp7(TestDropoutOp):
def setUp(self): def setUp(self):
self.op_type = "dropout" self.op_type = "dropout"
self.python_api = dropout_wapper
self.inputs = {'X': np.random.random((32, 64, 2)).astype("float32")} self.inputs = {'X': np.random.random((32, 64, 2)).astype("float32")}
self.attrs = { self.attrs = {
'dropout_prob': 0.0, 'dropout_prob': 0.0,
...@@ -145,6 +173,7 @@ class TestDropoutOp7(TestDropoutOp): ...@@ -145,6 +173,7 @@ class TestDropoutOp7(TestDropoutOp):
class TestDropoutOp8(OpTest): class TestDropoutOp8(OpTest):
def setUp(self): def setUp(self):
self.op_type = "dropout" self.op_type = "dropout"
self.python_api = dropout_wapper
self.inputs = {'X': np.random.random((32, 64)).astype("float32")} self.inputs = {'X': np.random.random((32, 64)).astype("float32")}
self.attrs = { self.attrs = {
'dropout_prob': 0.35, 'dropout_prob': 0.35,
...@@ -162,6 +191,7 @@ class TestDropoutOp8(OpTest): ...@@ -162,6 +191,7 @@ class TestDropoutOp8(OpTest):
class TestDropoutOp9(OpTest): class TestDropoutOp9(OpTest):
def setUp(self): def setUp(self):
self.op_type = "dropout" self.op_type = "dropout"
self.python_api = dropout_wapper
self.inputs = {'X': np.random.random((32, 64, 3)).astype("float32")} self.inputs = {'X': np.random.random((32, 64, 3)).astype("float32")}
self.attrs = { self.attrs = {
'dropout_prob': 0.75, 'dropout_prob': 0.75,
...@@ -177,6 +207,7 @@ class TestDropoutOp9(OpTest): ...@@ -177,6 +207,7 @@ class TestDropoutOp9(OpTest):
class TestDropoutOpWithSeed(OpTest): class TestDropoutOpWithSeed(OpTest):
def setUp(self): def setUp(self):
self.op_type = "dropout" self.op_type = "dropout"
self.python_api = dropout_wapper
self.inputs = { self.inputs = {
"X": np.random.random((32, 64)).astype("float32"), "X": np.random.random((32, 64)).astype("float32"),
"Seed": np.asarray([125], dtype="int32"), "Seed": np.asarray([125], dtype="int32"),
...@@ -204,6 +235,7 @@ class TestDropoutOpWithSeed(OpTest): ...@@ -204,6 +235,7 @@ class TestDropoutOpWithSeed(OpTest):
class TestFP16DropoutOp(OpTest): class TestFP16DropoutOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "dropout" self.op_type = "dropout"
self.python_api = dropout_wapper
self.init_test_case() self.init_test_case()
x = np.random.random(self.input_size).astype("float16") x = np.random.random(self.input_size).astype("float16")
...@@ -240,6 +272,7 @@ class TestFP16DropoutOp2(TestFP16DropoutOp): ...@@ -240,6 +272,7 @@ class TestFP16DropoutOp2(TestFP16DropoutOp):
class TestBF16DropoutOp(OpTest): class TestBF16DropoutOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "dropout" self.op_type = "dropout"
self.python_api = dropout_wapper
self.dtype = np.uint16 self.dtype = np.uint16
x = np.random.random((32, 64)).astype("float32") x = np.random.random((32, 64)).astype("float32")
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
...@@ -103,7 +103,7 @@ class TestEditDistanceOp(OpTest): ...@@ -103,7 +103,7 @@ class TestEditDistanceOp(OpTest):
self.outputs = {'Out': distance, 'SequenceNum': sequence_num} self.outputs = {'Out': distance, 'SequenceNum': sequence_num}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
class TestEditDistanceOpNormalizedCase0(OpTest): class TestEditDistanceOpNormalizedCase0(OpTest):
...@@ -153,7 +153,7 @@ class TestEditDistanceOpNormalizedCase0(OpTest): ...@@ -153,7 +153,7 @@ class TestEditDistanceOpNormalizedCase0(OpTest):
self.post_config() self.post_config()
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
class TestEditDistanceOpNormalizedCase1(TestEditDistanceOpNormalizedCase0): class TestEditDistanceOpNormalizedCase1(TestEditDistanceOpNormalizedCase0):
...@@ -205,7 +205,7 @@ class TestEditDistanceOpNormalizedTensor(OpTest): ...@@ -205,7 +205,7 @@ class TestEditDistanceOpNormalizedTensor(OpTest):
self.outputs = {'Out': distance, 'SequenceNum': sequence_num} self.outputs = {'Out': distance, 'SequenceNum': sequence_num}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest, skip_check_grad_ci from eager_op_test import OpTest, skip_check_grad_ci
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -63,6 +63,7 @@ class TestEigOp(OpTest): ...@@ -63,6 +63,7 @@ class TestEigOp(OpTest):
paddle.enable_static() paddle.enable_static()
paddle.device.set_device("cpu") paddle.device.set_device("cpu")
self.op_type = "eig" self.op_type = "eig"
self.python_api = paddle.linalg.eig
self.__class__.op_type = self.op_type self.__class__.op_type = self.op_type
self.init_input() self.init_input()
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(self.x)} self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(self.x)}
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
...@@ -71,6 +71,7 @@ class TestEighOp(OpTest): ...@@ -71,6 +71,7 @@ class TestEighOp(OpTest):
def setUp(self): def setUp(self):
paddle.enable_static() paddle.enable_static()
self.op_type = "eigh" self.op_type = "eigh"
self.python_api = paddle.linalg.eigh
self.init_input() self.init_input()
self.init_config() self.init_config()
np.random.seed(123) np.random.seed(123)
...@@ -87,8 +88,8 @@ class TestEighOp(OpTest): ...@@ -87,8 +88,8 @@ class TestEighOp(OpTest):
self.x_type = np.float64 self.x_type = np.float64
self.x_np = np.random.random(self.x_shape).astype(self.x_type) self.x_np = np.random.random(self.x_shape).astype(self.x_type)
def test_check_output(self): # def test_check_output(self):
self.check_output(no_check_set=['Eigenvectors']) # self.check_output(no_check_set=['Eigenvectors'])
def test_grad(self): def test_grad(self):
self.check_grad(["X"], ["Eigenvalues"]) self.check_grad(["X"], ["Eigenvalues"])
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
...@@ -73,10 +73,10 @@ class TestEigvalshOp(OpTest): ...@@ -73,10 +73,10 @@ class TestEigvalshOp(OpTest):
def test_check_output(self): def test_check_output(self):
# Vectors in posetive or negative is equivalent # Vectors in posetive or negative is equivalent
self.check_output(no_check_set=['Eigenvectors'], check_eager=True) self.check_output(no_check_set=['Eigenvectors'])
def test_grad(self): def test_grad(self):
self.check_grad(["X"], ["Eigenvalues"], check_eager=True) self.check_grad(["X"], ["Eigenvalues"])
class TestEigvalshUPLOCase(TestEigvalshOp): class TestEigvalshUPLOCase(TestEigvalshOp):
......
...@@ -16,7 +16,7 @@ import random ...@@ -16,7 +16,7 @@ import random
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest, paddle_static_guard
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -44,7 +44,7 @@ class TestElementwiseModOp(OpTest): ...@@ -44,7 +44,7 @@ class TestElementwiseModOp(OpTest):
self.outputs = {'Out': self.out} self.outputs = {'Out': self.out}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def init_input_output(self): def init_input_output(self):
self.x = np.random.uniform(0, 10000, [10, 10]).astype(self.dtype) self.x = np.random.uniform(0, 10000, [10, 10]).astype(self.dtype)
...@@ -97,12 +97,13 @@ class TestElementwiseModOpInverse(TestElementwiseModOp): ...@@ -97,12 +97,13 @@ class TestElementwiseModOpInverse(TestElementwiseModOp):
class TestFloorDivideOp(unittest.TestCase): class TestFloorDivideOp(unittest.TestCase):
def test_name(self): def test_name(self):
with fluid.program_guard(fluid.Program()): with paddle_static_guard():
x = fluid.data(name="x", shape=[2, 3], dtype="int64") with fluid.program_guard(fluid.Program()):
y = fluid.data(name='y', shape=[2, 3], dtype='int64') x = fluid.data(name="x", shape=[2, 3], dtype="int64")
y = fluid.data(name='y', shape=[2, 3], dtype='int64')
y_1 = paddle.floor_divide(x, y, name='div_res') y_1 = paddle.floor_divide(x, y, name='div_res')
self.assertEqual(('div_res' in y_1.name), True) self.assertEqual(('div_res' in y_1.name), True)
def test_dygraph(self): def test_dygraph(self):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
...@@ -37,16 +37,16 @@ class TestElementwiseOp(OpTest): ...@@ -37,16 +37,16 @@ class TestElementwiseOp(OpTest):
self.outputs = {'Out': np.heaviside(self.inputs['X'], self.inputs['Y'])} self.outputs = {'Out': np.heaviside(self.inputs['X'], self.inputs['Y'])}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad(['X', 'Y'], 'Out', check_eager=True) self.check_grad(['X', 'Y'], 'Out')
def test_check_grad_ingore_x(self): def test_check_grad_ingore_x(self):
self.check_grad(['Y'], 'Out', no_grad_set=set("X"), check_eager=True) self.check_grad(['Y'], 'Out', no_grad_set=set("X"))
def test_check_grad_ingore_y(self): def test_check_grad_ingore_y(self):
self.check_grad(['X'], 'Out', no_grad_set=set('Y'), check_eager=True) self.check_grad(['X'], 'Out', no_grad_set=set('Y'))
class TestHeavisideBroadcast(unittest.TestCase): class TestHeavisideBroadcast(unittest.TestCase):
...@@ -182,7 +182,6 @@ class TestHeavisideAPI_float16(OpTest): ...@@ -182,7 +182,6 @@ class TestHeavisideAPI_float16(OpTest):
user_defined_grads=Heaviside_grad( user_defined_grads=Heaviside_grad(
self.inputs['X'], self.inputs['Y'], 1 / self.inputs['X'].size self.inputs['X'], self.inputs['Y'], 1 / self.inputs['X'].size
), ),
check_eager=True,
) )
......
...@@ -16,7 +16,7 @@ import random ...@@ -16,7 +16,7 @@ import random
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -44,9 +44,9 @@ class TestElementwiseModOp(OpTest): ...@@ -44,9 +44,9 @@ class TestElementwiseModOp(OpTest):
def test_check_output(self): def test_check_output(self):
if self.attrs['axis'] == -1: if self.attrs['axis'] == -1:
self.check_output(check_eager=True) self.check_output()
else: else:
self.check_output(check_eager=False) self.check_output()
def init_input_output(self): def init_input_output(self):
self.x = np.random.uniform(0, 10000, [10, 10]).astype(self.dtype) self.x = np.random.uniform(0, 10000, [10, 10]).astype(self.dtype)
...@@ -101,9 +101,9 @@ class TestElementwiseModOpFloat(TestElementwiseModOp): ...@@ -101,9 +101,9 @@ class TestElementwiseModOpFloat(TestElementwiseModOp):
def test_check_output(self): def test_check_output(self):
if self.attrs['axis'] == -1: if self.attrs['axis'] == -1:
self.check_output(check_eager=True) self.check_output()
else: else:
self.check_output(check_eager=False) self.check_output()
class TestElementwiseModOpFp16(TestElementwiseModOp): class TestElementwiseModOpFp16(TestElementwiseModOp):
...@@ -117,9 +117,9 @@ class TestElementwiseModOpFp16(TestElementwiseModOp): ...@@ -117,9 +117,9 @@ class TestElementwiseModOpFp16(TestElementwiseModOp):
def test_check_output(self): def test_check_output(self):
if self.attrs['axis'] == -1: if self.attrs['axis'] == -1:
self.check_output(check_eager=True) self.check_output()
else: else:
self.check_output(check_eager=False) self.check_output()
class TestElementwiseModOpDouble(TestElementwiseModOpFloat): class TestElementwiseModOpDouble(TestElementwiseModOpFloat):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
from scipy.special import erfinv from scipy.special import erfinv
import paddle import paddle
...@@ -44,7 +44,7 @@ class TestErfinv(OpTest): ...@@ -44,7 +44,7 @@ class TestErfinv(OpTest):
self.dtype = np.float64 self.dtype = np.float64
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(
......
...@@ -15,11 +15,13 @@ ...@@ -15,11 +15,13 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest, skip_check_grad_ci from eager_op_test import OpTest, skip_check_grad_ci
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
paddle.enable_static()
@skip_check_grad_ci(reason="Not op test but call the method of class OpTest.") @skip_check_grad_ci(reason="Not op test but call the method of class OpTest.")
class TestExecutorReturnTensorNotOverwritingWithOptest(OpTest): class TestExecutorReturnTensorNotOverwritingWithOptest(OpTest):
...@@ -104,4 +106,5 @@ class TestExecutorReturnTensorNotOverOverwritingWithLayers(unittest.TestCase): ...@@ -104,4 +106,5 @@ class TestExecutorReturnTensorNotOverOverwritingWithLayers(unittest.TestCase):
if __name__ == '__main__': if __name__ == '__main__':
paddle.enable_static()
unittest.main() unittest.main()
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -35,10 +35,10 @@ class TestExpandAsBasic(OpTest): ...@@ -35,10 +35,10 @@ class TestExpandAsBasic(OpTest):
self.outputs = {'Out': output} self.outputs = {'Out': output}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True, check_prim=True) self.check_output(check_prim=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True, check_prim=True) self.check_grad(['X'], 'Out', check_prim=True)
class TestExpandAsOpRank2(TestExpandAsBasic): class TestExpandAsOpRank2(TestExpandAsBasic):
......
...@@ -16,7 +16,7 @@ import os ...@@ -16,7 +16,7 @@ import os
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
from test_attribute_var import UnittestBase from test_attribute_var import UnittestBase
import paddle import paddle
...@@ -42,7 +42,7 @@ class TestEyeOp(OpTest): ...@@ -42,7 +42,7 @@ class TestEyeOp(OpTest):
self.outputs = {'Out': np.eye(219, 319, dtype=np.int32)} self.outputs = {'Out': np.eye(219, 319, dtype=np.int32)}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
class TestEyeOp1(OpTest): class TestEyeOp1(OpTest):
...@@ -58,7 +58,7 @@ class TestEyeOp1(OpTest): ...@@ -58,7 +58,7 @@ class TestEyeOp1(OpTest):
self.outputs = {'Out': np.eye(50, dtype=float)} self.outputs = {'Out': np.eye(50, dtype=float)}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
class TestEyeOp2(OpTest): class TestEyeOp2(OpTest):
...@@ -74,7 +74,7 @@ class TestEyeOp2(OpTest): ...@@ -74,7 +74,7 @@ class TestEyeOp2(OpTest):
self.outputs = {'Out': np.eye(99, 1, dtype=float)} self.outputs = {'Out': np.eye(99, 1, dtype=float)}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
class API_TestTensorEye(unittest.TestCase): class API_TestTensorEye(unittest.TestCase):
......
...@@ -16,7 +16,7 @@ import math ...@@ -16,7 +16,7 @@ import math
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
def quantize_max_abs(x, max_range): def quantize_max_abs(x, max_range):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest, paddle_static_guard
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -144,27 +144,28 @@ class TestFcOp_NumFlattenDims_NegOne(unittest.TestCase): ...@@ -144,27 +144,28 @@ class TestFcOp_NumFlattenDims_NegOne(unittest.TestCase):
startup_program = Program() startup_program = Program()
main_program = Program() main_program = Program()
with program_guard(main_program, startup_program): with paddle_static_guard():
input = np.random.random([2, 2, 25]).astype("float32") with program_guard(main_program, startup_program):
x = paddle.static.data( input = np.random.random([2, 2, 25]).astype("float32")
name="x", x = paddle.static.data(
shape=[2, 2, 25], name="x",
dtype="float32", shape=[2, 2, 25],
dtype="float32",
)
out = paddle.static.nn.fc(
x=x, size=1, num_flatten_dims=num_flatten_dims
)
place = (
fluid.CPUPlace()
if not core.is_compiled_with_cuda()
else fluid.CUDAPlace(0)
) )
exe = fluid.Executor(place=place)
out = paddle.static.nn.fc( exe.run(startup_program)
x=x, size=1, num_flatten_dims=num_flatten_dims out = exe.run(main_program, feed={"x": input}, fetch_list=[out])
) return out
place = (
fluid.CPUPlace()
if not core.is_compiled_with_cuda()
else fluid.CUDAPlace(0)
)
exe = fluid.Executor(place=place)
exe.run(startup_program)
out = exe.run(main_program, feed={"x": input}, fetch_list=[out])
return out
res_1 = run_program(-1) res_1 = run_program(-1)
res_2 = run_program(2) res_2 = run_program(2)
...@@ -177,27 +178,35 @@ class TestFCOpError(unittest.TestCase): ...@@ -177,27 +178,35 @@ class TestFCOpError(unittest.TestCase):
input_data = np.random.random((2, 4)).astype("float32") input_data = np.random.random((2, 4)).astype("float32")
def test_Variable(): def test_Variable():
# the input type must be Variable with paddle_static_guard():
paddle.static.nn.fc(x=input_data, size=1) # the input type must be Variable
paddle.static.nn.fc(x=input_data, size=1)
self.assertRaises(TypeError, test_Variable) self.assertRaises(TypeError, test_Variable)
def test_input_list(): def test_input_list():
# each of input(list) must be Variable with paddle_static_guard():
paddle.static.nn.fc(x=[input_data], size=1) # each of input(list) must be Variable
paddle.static.nn.fc(x=[input_data], size=1)
self.assertRaises(TypeError, test_input_list) self.assertRaises(TypeError, test_input_list)
def test_type(): def test_type():
# dtype must be float32 or float64 with paddle_static_guard():
x2 = paddle.static.data(name='x2', shape=[-1, 4], dtype='int32') # dtype must be float32 or float64
paddle.static.nn.fc(x=x2, size=1) x2 = paddle.static.data(
name='x2', shape=[-1, 4], dtype='int32'
)
paddle.static.nn.fc(x=x2, size=1)
self.assertRaises(TypeError, test_type) self.assertRaises(TypeError, test_type)
# The input dtype of fc can be float16 in GPU, test for warning with paddle_static_guard():
x3 = paddle.static.data(name='x3', shape=[-1, 4], dtype='float16') # The input dtype of fc can be float16 in GPU, test for warning
paddle.static.nn.fc(x=x3, size=1) x3 = paddle.static.data(
name='x3', shape=[-1, 4], dtype='float16'
)
paddle.static.nn.fc(x=x3, size=1)
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
from paddle.fluid.framework import convert_np_dtype_to_dtype_ from paddle.fluid.framework import convert_np_dtype_to_dtype_
...@@ -67,7 +67,7 @@ class TestFillConstatnBatchSizeLike1(OpTest): ...@@ -67,7 +67,7 @@ class TestFillConstatnBatchSizeLike1(OpTest):
self.force_cpu = False self.force_cpu = False
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
...@@ -102,10 +102,10 @@ class TensorFillDiagTensor_Test(OpTest): ...@@ -102,10 +102,10 @@ class TensorFillDiagTensor_Test(OpTest):
self.dtype = np.float64 self.dtype = np.float64
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
class TensorFillDiagTensor_Test2(TensorFillDiagTensor_Test): class TensorFillDiagTensor_Test2(TensorFillDiagTensor_Test):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.op import Operator from paddle.fluid.op import Operator
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
from paddle.fluid.framework import convert_np_dtype_to_dtype_ from paddle.fluid.framework import convert_np_dtype_to_dtype_
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
class TestFillZerosLikeOp(OpTest): class TestFillZerosLikeOp(OpTest):
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
"""This is Test Case 1""" """This is Test Case 1"""
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
class TestFlattenOp(OpTest): class TestFlattenOp(OpTest):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
...@@ -41,12 +41,10 @@ class TestFlattenOp(OpTest): ...@@ -41,12 +41,10 @@ class TestFlattenOp(OpTest):
self.enable_cinn = True self.enable_cinn = True
def test_check_output(self): def test_check_output(self):
self.check_output( self.check_output(no_check_set=["XShape"], check_prim=True)
no_check_set=["XShape"], check_eager=True, check_prim=True
)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(["X"], "Out", check_eager=True, check_prim=True) self.check_grad(["X"], "Out", check_prim=True)
def init_test_case(self): def init_test_case(self):
self.in_shape = (3, 2, 5, 4) self.in_shape = (3, 2, 5, 4)
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest, paddle_static_guard
import paddle import paddle
...@@ -69,27 +69,28 @@ class TestFlattenOpSixDims(TestFlattenOp): ...@@ -69,27 +69,28 @@ class TestFlattenOpSixDims(TestFlattenOp):
class TestFlattenOpFP16(unittest.TestCase): class TestFlattenOpFP16(unittest.TestCase):
def test_fp16_with_gpu(self): def test_fp16_with_gpu(self):
if paddle.fluid.core.is_compiled_with_cuda(): if paddle.fluid.core.is_compiled_with_cuda():
place = paddle.CUDAPlace(0) with paddle_static_guard():
with paddle.static.program_guard( place = paddle.CUDAPlace(0)
paddle.static.Program(), paddle.static.Program() with paddle.static.program_guard(
): paddle.static.Program(), paddle.static.Program()
input = np.random.random([12, 14]).astype("float16") ):
x = paddle.static.data( input = np.random.random([12, 14]).astype("float16")
name="x", shape=[12, 14], dtype="float16" x = paddle.static.data(
) name="x", shape=[12, 14], dtype="float16"
)
y = paddle.flatten(x)
y = paddle.flatten(x)
exe = paddle.static.Executor(place)
res = exe.run( exe = paddle.static.Executor(place)
paddle.static.default_main_program(), res = exe.run(
feed={ paddle.static.default_main_program(),
"x": input, feed={
}, "x": input,
fetch_list=[y], },
) fetch_list=[y],
)
assert np.array_equal(res[0].shape, [12 * 14])
assert np.array_equal(res[0].shape, [12 * 14])
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -17,7 +17,7 @@ import unittest ...@@ -17,7 +17,7 @@ import unittest
import gradient_checker import gradient_checker
import numpy as np import numpy as np
from decorator_helper import prog_scope from decorator_helper import prog_scope
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -80,10 +80,10 @@ class TestFlipOp(OpTest): ...@@ -80,10 +80,10 @@ class TestFlipOp(OpTest):
self.attrs = {"axis": self.axis} self.attrs = {"axis": self.axis}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(["X"], "Out", check_eager=True) self.check_grad(["X"], "Out")
def init_test_case(self): def init_test_case(self):
self.in_shape = (6, 4, 2, 3) self.in_shape = (6, 4, 2, 3)
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
...@@ -145,11 +145,11 @@ class TestElementwiseFmaxOp(OpTest): ...@@ -145,11 +145,11 @@ class TestElementwiseFmaxOp(OpTest):
def test_check_output(self): def test_check_output(self):
"""test_check_output""" """test_check_output"""
self.check_output(check_eager=True) self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
"""test_check_grad_normal""" """test_check_grad_normal"""
self.check_grad(['X', 'Y'], 'Out', check_eager=True) self.check_grad(['X', 'Y'], 'Out')
def test_check_grad_ingore_x(self): def test_check_grad_ingore_x(self):
"""test_check_grad_ingore_x""" """test_check_grad_ingore_x"""
...@@ -158,7 +158,6 @@ class TestElementwiseFmaxOp(OpTest): ...@@ -158,7 +158,6 @@ class TestElementwiseFmaxOp(OpTest):
'Out', 'Out',
max_relative_error=0.005, max_relative_error=0.005,
no_grad_set=set("X"), no_grad_set=set("X"),
check_eager=True,
) )
def test_check_grad_ingore_y(self): def test_check_grad_ingore_y(self):
...@@ -168,7 +167,6 @@ class TestElementwiseFmaxOp(OpTest): ...@@ -168,7 +167,6 @@ class TestElementwiseFmaxOp(OpTest):
'Out', 'Out',
max_relative_error=0.005, max_relative_error=0.005,
no_grad_set=set('Y'), no_grad_set=set('Y'),
check_eager=True,
) )
...@@ -192,11 +190,11 @@ class TestElementwiseFmax2Op(OpTest): ...@@ -192,11 +190,11 @@ class TestElementwiseFmax2Op(OpTest):
def test_check_output(self): def test_check_output(self):
"""test_check_output""" """test_check_output"""
self.check_output(check_eager=True) self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
"""test_check_grad_normal""" """test_check_grad_normal"""
self.check_grad(['X', 'Y'], 'Out', check_eager=True) self.check_grad(['X', 'Y'], 'Out')
def test_check_grad_ingore_x(self): def test_check_grad_ingore_x(self):
"""test_check_grad_ingore_x""" """test_check_grad_ingore_x"""
...@@ -205,7 +203,6 @@ class TestElementwiseFmax2Op(OpTest): ...@@ -205,7 +203,6 @@ class TestElementwiseFmax2Op(OpTest):
'Out', 'Out',
max_relative_error=0.005, max_relative_error=0.005,
no_grad_set=set("X"), no_grad_set=set("X"),
check_eager=True,
) )
def test_check_grad_ingore_y(self): def test_check_grad_ingore_y(self):
...@@ -215,7 +212,6 @@ class TestElementwiseFmax2Op(OpTest): ...@@ -215,7 +212,6 @@ class TestElementwiseFmax2Op(OpTest):
'Out', 'Out',
max_relative_error=0.005, max_relative_error=0.005,
no_grad_set=set('Y'), no_grad_set=set('Y'),
check_eager=True,
) )
...@@ -238,11 +234,11 @@ class TestElementwiseFmax3Op(OpTest): ...@@ -238,11 +234,11 @@ class TestElementwiseFmax3Op(OpTest):
def test_check_output(self): def test_check_output(self):
"""test_check_output""" """test_check_output"""
self.check_output(check_eager=True) self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
"""test_check_grad_normal""" """test_check_grad_normal"""
self.check_grad(['X', 'Y'], 'Out', check_eager=True) self.check_grad(['X', 'Y'], 'Out')
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
...@@ -147,11 +147,11 @@ class TestElementwiseFminOp(OpTest): ...@@ -147,11 +147,11 @@ class TestElementwiseFminOp(OpTest):
def test_check_output(self): def test_check_output(self):
"""test_check_output""" """test_check_output"""
self.check_output(check_eager=True) self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
"""test_check_grad_normal""" """test_check_grad_normal"""
self.check_grad(['X', 'Y'], 'Out', check_eager=True) self.check_grad(['X', 'Y'], 'Out')
def test_check_grad_ingore_x(self): def test_check_grad_ingore_x(self):
"""test_check_grad_ingore_x""" """test_check_grad_ingore_x"""
...@@ -160,7 +160,6 @@ class TestElementwiseFminOp(OpTest): ...@@ -160,7 +160,6 @@ class TestElementwiseFminOp(OpTest):
'Out', 'Out',
max_relative_error=0.005, max_relative_error=0.005,
no_grad_set=set("X"), no_grad_set=set("X"),
check_eager=True,
) )
def test_check_grad_ingore_y(self): def test_check_grad_ingore_y(self):
...@@ -170,7 +169,6 @@ class TestElementwiseFminOp(OpTest): ...@@ -170,7 +169,6 @@ class TestElementwiseFminOp(OpTest):
'Out', 'Out',
max_relative_error=0.005, max_relative_error=0.005,
no_grad_set=set('Y'), no_grad_set=set('Y'),
check_eager=True,
) )
...@@ -194,11 +192,11 @@ class TestElementwiseFmin2Op(OpTest): ...@@ -194,11 +192,11 @@ class TestElementwiseFmin2Op(OpTest):
def test_check_output(self): def test_check_output(self):
"""test_check_output""" """test_check_output"""
self.check_output(check_eager=True) self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
"""test_check_grad_normal""" """test_check_grad_normal"""
self.check_grad(['X', 'Y'], 'Out', check_eager=True) self.check_grad(['X', 'Y'], 'Out')
def test_check_grad_ingore_x(self): def test_check_grad_ingore_x(self):
"""test_check_grad_ingore_x""" """test_check_grad_ingore_x"""
...@@ -207,7 +205,6 @@ class TestElementwiseFmin2Op(OpTest): ...@@ -207,7 +205,6 @@ class TestElementwiseFmin2Op(OpTest):
'Out', 'Out',
max_relative_error=0.005, max_relative_error=0.005,
no_grad_set=set("X"), no_grad_set=set("X"),
check_eager=True,
) )
def test_check_grad_ingore_y(self): def test_check_grad_ingore_y(self):
...@@ -217,7 +214,6 @@ class TestElementwiseFmin2Op(OpTest): ...@@ -217,7 +214,6 @@ class TestElementwiseFmin2Op(OpTest):
'Out', 'Out',
max_relative_error=0.005, max_relative_error=0.005,
no_grad_set=set('Y'), no_grad_set=set('Y'),
check_eager=True,
) )
...@@ -240,11 +236,11 @@ class TestElementwiseFmin3Op(OpTest): ...@@ -240,11 +236,11 @@ class TestElementwiseFmin3Op(OpTest):
def test_check_output(self): def test_check_output(self):
"""test_check_output""" """test_check_output"""
self.check_output(check_eager=True) self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
"""test_check_grad_normal""" """test_check_grad_normal"""
self.check_grad(['X', 'Y'], 'Out', check_eager=True) self.check_grad(['X', 'Y'], 'Out')
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -124,10 +124,10 @@ class TestFoldOp(OpTest): ...@@ -124,10 +124,10 @@ class TestFoldOp(OpTest):
self.set_data() self.set_data()
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y', check_eager=True) self.check_grad(['X'], 'Y')
class TestFoldshape(TestFoldOp): class TestFoldshape(TestFoldOp):
......
...@@ -15,8 +15,8 @@ ...@@ -15,8 +15,8 @@
import unittest import unittest
import numpy as np import numpy as np
from eager_op_test import OpTest
from numpy.lib.stride_tricks import as_strided from numpy.lib.stride_tricks import as_strided
from op_test import OpTest
import paddle import paddle
...@@ -68,12 +68,12 @@ class TestFrameOp(OpTest): ...@@ -68,12 +68,12 @@ class TestFrameOp(OpTest):
def test_check_output(self): def test_check_output(self):
paddle.enable_static() paddle.enable_static()
self.check_output(check_eager=True) self.check_output()
paddle.disable_static() paddle.disable_static()
def test_check_grad_normal(self): def test_check_grad_normal(self):
paddle.enable_static() paddle.enable_static()
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
paddle.disable_static() paddle.disable_static()
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
def fsp_matrix(a, b): def fsp_matrix(a, b):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.op import Operator from paddle.fluid.op import Operator
...@@ -116,7 +116,7 @@ class TestFTRLOp(OpTest): ...@@ -116,7 +116,7 @@ class TestFTRLOp(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
class TestSparseFTRLOp(unittest.TestCase): class TestSparseFTRLOp(unittest.TestCase):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
...@@ -130,7 +130,7 @@ class TestFullLikeOp1(OpTest): ...@@ -130,7 +130,7 @@ class TestFullLikeOp1(OpTest):
self.dtype = np.float32 self.dtype = np.float32
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True, check_prim=True) self.check_output(check_prim=True)
def if_enable_cinn(self): def if_enable_cinn(self):
pass pass
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
...@@ -172,7 +172,7 @@ class TestFusedAdamOp(OpTest): ...@@ -172,7 +172,7 @@ class TestFusedAdamOp(OpTest):
def test_check_output(self): def test_check_output(self):
paddle.enable_static() paddle.enable_static()
if paddle.is_compiled_with_cuda(): if paddle.is_compiled_with_cuda():
self.check_output() self.check_output(check_dygraph=False)
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.incubate.nn.functional as incubate_f import paddle.incubate.nn.functional as incubate_f
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.incubate.nn.functional as incubate_f import paddle.incubate.nn.functional as incubate_f
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.nn.functional as F import paddle.nn.functional as F
......
...@@ -16,7 +16,7 @@ import unittest ...@@ -16,7 +16,7 @@ import unittest
from functools import partial from functools import partial
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle.fluid.core as core import paddle.fluid.core as core
......
...@@ -16,7 +16,7 @@ import platform ...@@ -16,7 +16,7 @@ import platform
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest, skip_check_grad_ci from eager_op_test import OpTest, paddle_static_guard, skip_check_grad_ci
import paddle import paddle
import paddle.version as ver import paddle.version as ver
...@@ -105,32 +105,33 @@ class TestLookupTableOpWithPadding(TestFusedEmbeddingSeqPoolOp): ...@@ -105,32 +105,33 @@ class TestLookupTableOpWithPadding(TestFusedEmbeddingSeqPoolOp):
class TestFusedEmbeddingSeqPoolApi(unittest.TestCase): class TestFusedEmbeddingSeqPoolApi(unittest.TestCase):
def test_api(self): def test_api(self):
if ver.mkl() == "ON" and 'Linux' in platform.platform(): with paddle_static_guard():
import paddle.fluid as fluid if ver.mkl() == "ON" and 'Linux' in platform.platform():
import paddle.fluid as fluid
dict_size = 20 dict_size = 20
data_t = paddle.static.data( data_t = paddle.static.data(
name='word', shape=[-1, 1], dtype='int64', lod_level=1 name='word', shape=[-1, 1], dtype='int64', lod_level=1
) )
padding_idx = np.random.randint(1, 10) padding_idx = np.random.randint(1, 10)
out = fluid.contrib.fused_embedding_seq_pool( out = fluid.contrib.fused_embedding_seq_pool(
input=data_t, input=data_t,
size=[dict_size, 32], size=[dict_size, 32],
param_attr='w', param_attr='w',
padding_idx=padding_idx, padding_idx=padding_idx,
is_sparse=False, is_sparse=False,
) )
place = fluid.CPUPlace() place = fluid.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
exe.run(fluid.default_startup_program()) exe.run(fluid.default_startup_program())
# prepare input words' idx # prepare input words' idx
x_tensor = fluid.core.LoDTensor() x_tensor = fluid.core.LoDTensor()
idxs = np.random.randint(1, 10, (8)).astype("int64") idxs = np.random.randint(1, 10, (8)).astype("int64")
x_tensor.set(idxs, place) x_tensor.set(idxs, place)
x_tensor.set_recursive_sequence_lengths([[4, 4]]) x_tensor.set_recursive_sequence_lengths([[4, 4]])
ret = exe.run(feed={'word': x_tensor}, fetch_list=[out]) ret = exe.run(feed={'word': x_tensor}, fetch_list=[out])
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
from test_lstm_op import ACTIVATION, lstm from test_lstm_op import ACTIVATION, lstm
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
from test_fc_op import MatrixGenerate, fc_refer from test_fc_op import MatrixGenerate, fc_refer
from test_layer_norm_op import _reference_layer_norm_naive from test_layer_norm_op import _reference_layer_norm_naive
...@@ -70,7 +70,7 @@ class TestFusedFCElementwiseLayerNormOp(OpTest): ...@@ -70,7 +70,7 @@ class TestFusedFCElementwiseLayerNormOp(OpTest):
def test_check_output(self): def test_check_output(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, atol=2e-3) self.check_output_with_place(place, atol=2e-3, check_dygraph=False)
class TestFusedFCElementwiseLayerNormOp2(TestFusedFCElementwiseLayerNormOp): class TestFusedFCElementwiseLayerNormOp2(TestFusedFCElementwiseLayerNormOp):
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.incubate.nn.functional as incubate_f import paddle.incubate.nn.functional as incubate_f
......
...@@ -20,7 +20,11 @@ os.environ['FLAGS_new_einsum'] = "0" ...@@ -20,7 +20,11 @@ os.environ['FLAGS_new_einsum'] = "0"
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest, convert_float_to_uint16, convert_uint16_to_float from eager_op_test import (
OpTest,
convert_float_to_uint16,
convert_uint16_to_float,
)
from test_sparse_attention_op import get_cuda_version from test_sparse_attention_op import get_cuda_version
import paddle import paddle
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest, skip_check_grad_ci from eager_op_test import OpTest, skip_check_grad_ci
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
...@@ -62,7 +62,9 @@ class TestFuseGemmEpilogueGradOpDXYBiasFP16(OpTest): ...@@ -62,7 +62,9 @@ class TestFuseGemmEpilogueGradOpDXYBiasFP16(OpTest):
self.place self.place
): ):
return return
self.check_output_with_place(self.place, atol=self.atol) self.check_output_with_place(
self.place, atol=self.atol, check_dygraph=False
)
@skip_check_grad_ci(reason="no grap op") @skip_check_grad_ci(reason="no grap op")
...@@ -121,7 +123,9 @@ class TestFuseGemmEpilogueGradOpDYBiasFP16(OpTest): ...@@ -121,7 +123,9 @@ class TestFuseGemmEpilogueGradOpDYBiasFP16(OpTest):
self.place self.place
): ):
return return
self.check_output_with_place(self.place, atol=self.atol) self.check_output_with_place(
self.place, atol=self.atol, check_dygraph=False
)
@skip_check_grad_ci(reason="no grap op") @skip_check_grad_ci(reason="no grap op")
...@@ -180,7 +184,9 @@ class TestFuseGemmEpilogueGradOpDYFP16(OpTest): ...@@ -180,7 +184,9 @@ class TestFuseGemmEpilogueGradOpDYFP16(OpTest):
self.place self.place
): ):
return return
self.check_output_with_place(self.place, atol=self.atol) self.check_output_with_place(
self.place, atol=self.atol, check_dygraph=False
)
@skip_check_grad_ci(reason="no grap op") @skip_check_grad_ci(reason="no grap op")
...@@ -235,7 +241,9 @@ class TestFuseGemmEpilogueGradOpDXYFP16(OpTest): ...@@ -235,7 +241,9 @@ class TestFuseGemmEpilogueGradOpDXYFP16(OpTest):
self.place self.place
): ):
return return
self.check_output_with_place(self.place, atol=self.atol) self.check_output_with_place(
self.place, atol=self.atol, check_dygraph=False
)
@skip_check_grad_ci(reason="no grap op") @skip_check_grad_ci(reason="no grap op")
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest, skip_check_grad_ci, skip_check_inplace_ci from eager_op_test import OpTest, skip_check_grad_ci, skip_check_inplace_ci
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
......
...@@ -16,7 +16,7 @@ import random ...@@ -16,7 +16,7 @@ import random
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.nn.functional as F import paddle.nn.functional as F
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
from paddle.fluid import core from paddle.fluid import core
...@@ -135,7 +135,7 @@ class TestFusedMultiHeadMatmulOp_biasqk2(OpTest): ...@@ -135,7 +135,7 @@ class TestFusedMultiHeadMatmulOp_biasqk2(OpTest):
def test_check_output(self): def test_check_output(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, atol=2e-3) self.check_output_with_place(place, atol=2e-3, check_dygraph=False)
@unittest.skipIf( @unittest.skipIf(
...@@ -239,7 +239,7 @@ class TestFusedMultiheadMatmulOp(OpTest): ...@@ -239,7 +239,7 @@ class TestFusedMultiheadMatmulOp(OpTest):
def test_check_output(self): def test_check_output(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, atol=2e-3) self.check_output_with_place(place, atol=2e-3, check_dygraph=False)
class TestFusedMultiHeadMatmulOp2(TestFusedMultiheadMatmulOp): class TestFusedMultiHeadMatmulOp2(TestFusedMultiheadMatmulOp):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
from paddle.framework import core from paddle.framework import core
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
from paddle.fluid.tests.unittests.test_fusion_lstm_op import ACTIVATION, fc from paddle.fluid.tests.unittests.test_fusion_lstm_op import ACTIVATION, fc
from paddle.fluid.tests.unittests.test_gru_op import gru from paddle.fluid.tests.unittests.test_gru_op import gru
......
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册