未验证 提交 61e60e68 编写于 作者: A Aurelius84 提交者: GitHub

[Eager]Fix 17 unittest and open check_eager=True (#41270)

* [Eager]Enhance eager_trace_op logic to support Optimizer Op

* fix AsDispensable

* [Eager]Fix 17 unittest and open check_eager=True

* remove print

* fix unittests

* fix op_testa

* fix coverage CI failed

* fix ci
上级 fd1ecfc5
...@@ -64,8 +64,9 @@ void GradTensorHolder::CopyValueFromTensor( ...@@ -64,8 +64,9 @@ void GradTensorHolder::CopyValueFromTensor(
} else { } else {
// Create new tensor->impl and fill it with 1.0 // Create new tensor->impl and fill it with 1.0
if (t.defined()) { if (t.defined()) {
// Fill 1.0 // Fill 1.0, use full to support complex, one_like don't support it.
buffer_[slot_id][rank] = paddle::experimental::ones_like(t, t.dtype()); buffer_[slot_id][rank] =
paddle::experimental::full(t.shape(), 1, t.dtype(), t.inner_place());
} }
} }
} }
......
...@@ -52,6 +52,7 @@ std::map<std::string, std::set<std::string>> op_ins_map = { ...@@ -52,6 +52,7 @@ std::map<std::string, std::set<std::string>> op_ins_map = {
{"fake_quantize_dequantize_moving_average_abs_max", {"fake_quantize_dequantize_moving_average_abs_max",
{"X", "InScale", "InAccum", "InState"}}, {"X", "InScale", "InAccum", "InState"}},
{"nll_loss", {"X", "Label", "Weight"}}, {"nll_loss", {"X", "Label", "Weight"}},
{"smooth_l1_loss", {"X", "Y", "InsideWeight", "OutsideWeight"}},
{"bilinear_tensor_product", {"X", "Y", "Weight", "Bias"}}, {"bilinear_tensor_product", {"X", "Y", "Weight", "Bias"}},
{"gather", {"X", "Index", "Axis"}}, {"gather", {"X", "Index", "Axis"}},
{"repeat_interleave", {"X", "RepeatsTensor"}}, {"repeat_interleave", {"X", "RepeatsTensor"}},
......
...@@ -106,14 +106,35 @@ _global_flags_ = core.globals() ...@@ -106,14 +106,35 @@ _global_flags_ = core.globals()
# to make sure in most case, we find new dygraph mode first with only one if statement. # to make sure in most case, we find new dygraph mode first with only one if statement.
def _update_monkey_methods(is_eager):
"""
Update monkey methods of VarBase or eager.Tensor while
switching eager mode and legacy mode.
"""
from paddle import _C_ops
from .dygraph.varbase_patch_methods import monkey_patch_varbase
from .dygraph import monkey_patch_math_varbase
assert isinstance(is_eager, bool)
if is_eager:
_C_ops.switch_to_eager_ops()
else:
_C_ops.switch_to_core_ops()
monkey_patch_varbase()
monkey_patch_math_varbase()
def _enable_legacy_dygraph(): def _enable_legacy_dygraph():
global _in_eager_mode_ global _in_eager_mode_
_in_eager_mode_ = False _in_eager_mode_ = False
_update_monkey_methods(is_eager=False)
def _disable_legacy_dygraph(): def _disable_legacy_dygraph():
global _in_eager_mode_ global _in_eager_mode_
_in_eager_mode_ = True _in_eager_mode_ = True
_update_monkey_methods(is_eager=True)
def _in_eager_without_dygraph_check(): def _in_eager_without_dygraph_check():
......
...@@ -1937,6 +1937,9 @@ class OpTest(unittest.TestCase): ...@@ -1937,6 +1937,9 @@ class OpTest(unittest.TestCase):
"Gradient Check On %s" % str(place)) "Gradient Check On %s" % str(place))
if check_dygraph: if check_dygraph:
# ensure switch into legacy dygraph
g_enable_legacy_dygraph()
dygraph_grad = self._get_dygraph_grad( dygraph_grad = self._get_dygraph_grad(
inputs_to_check, place, output_names, user_defined_grad_outputs, inputs_to_check, place, output_names, user_defined_grad_outputs,
no_grad_set, False) no_grad_set, False)
...@@ -1950,6 +1953,8 @@ class OpTest(unittest.TestCase): ...@@ -1950,6 +1953,8 @@ class OpTest(unittest.TestCase):
self._assert_is_close(numeric_grads, dygraph_grad, inputs_to_check, self._assert_is_close(numeric_grads, dygraph_grad, inputs_to_check,
max_relative_error, max_relative_error,
"Gradient Check On %s" % str(place)) "Gradient Check On %s" % str(place))
# ensure switch back eager dygraph
g_disable_legacy_dygraph()
if check_eager: if check_eager:
with fluid.dygraph.base.guard(place): with fluid.dygraph.base.guard(place):
...@@ -2087,7 +2092,6 @@ class OpTest(unittest.TestCase): ...@@ -2087,7 +2092,6 @@ class OpTest(unittest.TestCase):
inputs={"X": loss_sum}, inputs={"X": loss_sum},
outputs={"Out": loss}, outputs={"Out": loss},
attrs={'scale': 1.0 / float(len(avg_sum))}) attrs={'scale': 1.0 / float(len(avg_sum))})
loss.backward() loss.backward()
fetch_list_grad = [] fetch_list_grad = []
......
...@@ -127,6 +127,9 @@ class TestBicubicInterpOp(OpTest): ...@@ -127,6 +127,9 @@ class TestBicubicInterpOp(OpTest):
self.data_layout = 'NCHW' self.data_layout = 'NCHW'
self.init_test_case() self.init_test_case()
self.op_type = "bicubic_interp" self.op_type = "bicubic_interp"
# NOTE(dev): some AsDispensible input is not used under imperative mode.
# Skip check_eager while found them in Inputs.
self.check_eager = True
input_np = np.random.random(self.input_shape).astype("float64") input_np = np.random.random(self.input_shape).astype("float64")
if self.data_layout == "NCHW": if self.data_layout == "NCHW":
...@@ -149,8 +152,10 @@ class TestBicubicInterpOp(OpTest): ...@@ -149,8 +152,10 @@ class TestBicubicInterpOp(OpTest):
self.inputs = {'X': input_np} self.inputs = {'X': input_np}
if self.out_size is not None: if self.out_size is not None:
self.inputs['OutSize'] = self.out_size self.inputs['OutSize'] = self.out_size
self.check_eager = False
if self.actual_shape is not None: if self.actual_shape is not None:
self.inputs['OutSize'] = self.actual_shape self.inputs['OutSize'] = self.actual_shape
self.check_eager = False
self.attrs = { self.attrs = {
'out_h': self.out_h, 'out_h': self.out_h,
...@@ -163,10 +168,11 @@ class TestBicubicInterpOp(OpTest): ...@@ -163,10 +168,11 @@ class TestBicubicInterpOp(OpTest):
self.outputs = {'Out': output_np} self.outputs = {'Out': output_np}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=self.check_eager)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', in_place=True) self.check_grad(
['X'], 'Out', in_place=True, check_eager=self.check_eager)
def init_test_case(self): def init_test_case(self):
self.interp_method = 'bicubic' self.interp_method = 'bicubic'
...@@ -442,4 +448,5 @@ class TestBicubicOpError(unittest.TestCase): ...@@ -442,4 +448,5 @@ class TestBicubicOpError(unittest.TestCase):
if __name__ == "__main__": if __name__ == "__main__":
paddle.enable_static()
unittest.main() unittest.main()
...@@ -16,7 +16,7 @@ from __future__ import print_function ...@@ -16,7 +16,7 @@ from __future__ import print_function
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from op_test import OpTest, _in_eager_without_dygraph_check
import paddle.fluid.core as core import paddle.fluid.core as core
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle import paddle
...@@ -135,6 +135,10 @@ class TestBicubicInterpOp(OpTest): ...@@ -135,6 +135,10 @@ class TestBicubicInterpOp(OpTest):
self.data_layout = 'NCHW' self.data_layout = 'NCHW'
self.init_test_case() self.init_test_case()
self.op_type = "bicubic_interp_v2" self.op_type = "bicubic_interp_v2"
# NOTE(dev): some AsDispensible input is not used under imperative mode.
# Skip check_eager while found them in Inputs.
# TODO(dev): add self.python_api
self.check_eager = False
input_np = np.random.random(self.input_shape).astype("float64") input_np = np.random.random(self.input_shape).astype("float64")
scale_h = 0 scale_h = 0
scale_w = 0 scale_w = 0
...@@ -166,8 +170,10 @@ class TestBicubicInterpOp(OpTest): ...@@ -166,8 +170,10 @@ class TestBicubicInterpOp(OpTest):
self.inputs = {'X': input_np} self.inputs = {'X': input_np}
if self.out_size is not None: if self.out_size is not None:
self.inputs['OutSize'] = self.out_size self.inputs['OutSize'] = self.out_size
self.check_eager = False
if self.actual_shape is not None: if self.actual_shape is not None:
self.inputs['OutSize'] = self.actual_shape self.inputs['OutSize'] = self.actual_shape
self.check_eager = False
self.attrs = { self.attrs = {
'out_h': self.out_h, 'out_h': self.out_h,
...@@ -186,10 +192,11 @@ class TestBicubicInterpOp(OpTest): ...@@ -186,10 +192,11 @@ class TestBicubicInterpOp(OpTest):
self.outputs = {'Out': output_np} self.outputs = {'Out': output_np}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=self.check_eager)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', in_place=True) self.check_grad(
['X'], 'Out', in_place=True, check_eager=self.check_eager)
def init_test_case(self): def init_test_case(self):
self.interp_method = 'bicubic' self.interp_method = 'bicubic'
...@@ -543,4 +550,5 @@ class TestBicubicOpError(unittest.TestCase): ...@@ -543,4 +550,5 @@ class TestBicubicOpError(unittest.TestCase):
if __name__ == "__main__": if __name__ == "__main__":
paddle.enable_static()
unittest.main() unittest.main()
...@@ -102,6 +102,9 @@ class TestBilinearInterpOp(OpTest): ...@@ -102,6 +102,9 @@ class TestBilinearInterpOp(OpTest):
self.data_layout = 'NCHW' self.data_layout = 'NCHW'
self.init_test_case() self.init_test_case()
self.op_type = "bilinear_interp" self.op_type = "bilinear_interp"
# NOTE(dev): some AsDispensible input is not used under imperative mode.
# Skip check_eager while found them in Inputs.
self.check_eager = True
input_np = np.random.random(self.input_shape).astype("float64") input_np = np.random.random(self.input_shape).astype("float64")
if self.data_layout == "NCHW": if self.data_layout == "NCHW":
...@@ -124,8 +127,10 @@ class TestBilinearInterpOp(OpTest): ...@@ -124,8 +127,10 @@ class TestBilinearInterpOp(OpTest):
self.inputs = {'X': input_np} self.inputs = {'X': input_np}
if self.out_size is not None: if self.out_size is not None:
self.inputs['OutSize'] = self.out_size self.inputs['OutSize'] = self.out_size
self.check_eager = False
if self.actual_shape is not None: if self.actual_shape is not None:
self.inputs['OutSize'] = self.actual_shape self.inputs['OutSize'] = self.actual_shape
self.check_eager = False
self.attrs = { self.attrs = {
'out_h': self.out_h, 'out_h': self.out_h,
...@@ -139,10 +144,11 @@ class TestBilinearInterpOp(OpTest): ...@@ -139,10 +144,11 @@ class TestBilinearInterpOp(OpTest):
self.outputs = {'Out': output_np} self.outputs = {'Out': output_np}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=self.check_eager)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', in_place=True) self.check_grad(
['X'], 'Out', in_place=True, check_eager=self.check_eager)
def init_test_case(self): def init_test_case(self):
self.interp_method = 'bilinear' self.interp_method = 'bilinear'
...@@ -266,6 +272,7 @@ class TestBilinearInterpOpUint8(OpTest): ...@@ -266,6 +272,7 @@ class TestBilinearInterpOpUint8(OpTest):
self.actual_shape = None self.actual_shape = None
self.init_test_case() self.init_test_case()
self.op_type = "bilinear_interp" self.op_type = "bilinear_interp"
self.check_eager = True
input_np = np.random.randint( input_np = np.random.randint(
low=0, high=256, size=self.input_shape).astype("uint8") low=0, high=256, size=self.input_shape).astype("uint8")
...@@ -282,6 +289,7 @@ class TestBilinearInterpOpUint8(OpTest): ...@@ -282,6 +289,7 @@ class TestBilinearInterpOpUint8(OpTest):
self.inputs = {'X': input_np} self.inputs = {'X': input_np}
if self.out_size is not None: if self.out_size is not None:
self.inputs['OutSize'] = self.out_size self.inputs['OutSize'] = self.out_size
self.check_eager = False
self.attrs = { self.attrs = {
'out_h': self.out_h, 'out_h': self.out_h,
...@@ -294,7 +302,8 @@ class TestBilinearInterpOpUint8(OpTest): ...@@ -294,7 +302,8 @@ class TestBilinearInterpOpUint8(OpTest):
self.outputs = {'Out': output_np} self.outputs = {'Out': output_np}
def test_check_output(self): def test_check_output(self):
self.check_output_with_place(place=core.CPUPlace(), atol=1) self.check_output_with_place(
place=core.CPUPlace(), atol=1, check_eager=self.check_eager)
def init_test_case(self): def init_test_case(self):
self.interp_method = 'bilinear' self.interp_method = 'bilinear'
...@@ -397,6 +406,7 @@ class TestBilinearInterpOp_attr_tensor(OpTest): ...@@ -397,6 +406,7 @@ class TestBilinearInterpOp_attr_tensor(OpTest):
self.actual_shape = None self.actual_shape = None
self.init_test_case() self.init_test_case()
self.op_type = "bilinear_interp" self.op_type = "bilinear_interp"
self.check_eager = True
self.shape_by_1Dtensor = False self.shape_by_1Dtensor = False
self.scale_by_1Dtensor = False self.scale_by_1Dtensor = False
self.attrs = { self.attrs = {
...@@ -419,12 +429,14 @@ class TestBilinearInterpOp_attr_tensor(OpTest): ...@@ -419,12 +429,14 @@ class TestBilinearInterpOp_attr_tensor(OpTest):
if self.shape_by_1Dtensor: if self.shape_by_1Dtensor:
self.inputs['OutSize'] = self.out_size self.inputs['OutSize'] = self.out_size
self.check_eager = False
elif self.out_size is not None: elif self.out_size is not None:
size_tensor = [] size_tensor = []
for index, ele in enumerate(self.out_size): for index, ele in enumerate(self.out_size):
size_tensor.append(("x" + str(index), np.ones( size_tensor.append(("x" + str(index), np.ones(
(1)).astype('int32') * ele)) (1)).astype('int32') * ele))
self.inputs['SizeTensor'] = size_tensor self.inputs['SizeTensor'] = size_tensor
self.check_eager = False
self.attrs['out_h'] = self.out_h self.attrs['out_h'] = self.out_h
self.attrs['out_w'] = self.out_w self.attrs['out_w'] = self.out_w
...@@ -433,10 +445,11 @@ class TestBilinearInterpOp_attr_tensor(OpTest): ...@@ -433,10 +445,11 @@ class TestBilinearInterpOp_attr_tensor(OpTest):
self.outputs = {'Out': output_np} self.outputs = {'Out': output_np}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=self.check_eager)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', in_place=True) self.check_grad(
['X'], 'Out', in_place=True, check_eager=self.check_eager)
def init_test_case(self): def init_test_case(self):
self.interp_method = 'bilinear' self.interp_method = 'bilinear'
......
...@@ -71,10 +71,10 @@ class TestCropOp(OpTest): ...@@ -71,10 +71,10 @@ class TestCropOp(OpTest):
self.offsets = [1, 2] self.offsets = [1, 2]
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_eager=True)
class TestCase1(TestCropOp): class TestCase1(TestCropOp):
...@@ -125,4 +125,6 @@ class TestCase6(TestCropOp): ...@@ -125,4 +125,6 @@ class TestCase6(TestCropOp):
if __name__ == '__main__': if __name__ == '__main__':
import paddle
paddle.enable_static()
unittest.main() unittest.main()
...@@ -77,10 +77,10 @@ class TestCropTensorOp(OpTest): ...@@ -77,10 +77,10 @@ class TestCropTensorOp(OpTest):
self.offsets = [1, 2] self.offsets = [1, 2]
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_eager=True)
class TestCase1(TestCropTensorOp): class TestCase1(TestCropTensorOp):
...@@ -175,10 +175,10 @@ class TestCropTensorOpTensorAttr(OpTest): ...@@ -175,10 +175,10 @@ class TestCropTensorOpTensorAttr(OpTest):
self.shape_attr = [0, 0] self.shape_attr = [0, 0]
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad(["X"], "Out") self.check_grad(["X"], "Out", check_eager=True)
class TestCropTensorOpTensorAttrCase1(TestCropTensorOpTensorAttr): class TestCropTensorOpTensorAttrCase1(TestCropTensorOpTensorAttr):
...@@ -262,4 +262,6 @@ class TestCropTensorException(unittest.TestCase): ...@@ -262,4 +262,6 @@ class TestCropTensorException(unittest.TestCase):
if __name__ == '__main__': if __name__ == '__main__':
import paddle
paddle.enable_static()
unittest.main() unittest.main()
...@@ -48,7 +48,7 @@ class TestDecayedAdagradOp1(OpTest): ...@@ -48,7 +48,7 @@ class TestDecayedAdagradOp1(OpTest):
self.outputs = {'ParamOut': param_out, 'MomentOut': moment_out} self.outputs = {'ParamOut': param_out, 'MomentOut': moment_out}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
class TestDecayedAdagradOp2(OpTest): class TestDecayedAdagradOp2(OpTest):
...@@ -80,8 +80,10 @@ class TestDecayedAdagradOp2(OpTest): ...@@ -80,8 +80,10 @@ class TestDecayedAdagradOp2(OpTest):
self.outputs = {'ParamOut': param_out, 'MomentOut': moment_out} self.outputs = {'ParamOut': param_out, 'MomentOut': moment_out}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
if __name__ == "__main__": if __name__ == "__main__":
import paddle
paddle.enable_static()
unittest.main() unittest.main()
...@@ -45,7 +45,7 @@ class TestDpsgdOp(OpTest): ...@@ -45,7 +45,7 @@ class TestDpsgdOp(OpTest):
self.outputs = {'ParamOut': param_out} self.outputs = {'ParamOut': param_out}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def dpsgd_step(inputs, attributes): def dpsgd_step(inputs, attributes):
...@@ -70,4 +70,6 @@ def dpsgd_step(inputs, attributes): ...@@ -70,4 +70,6 @@ def dpsgd_step(inputs, attributes):
if __name__ == "__main__": if __name__ == "__main__":
import paddle
paddle.enable_static()
unittest.main() unittest.main()
...@@ -101,7 +101,7 @@ class TestFTRLOp(OpTest): ...@@ -101,7 +101,7 @@ class TestFTRLOp(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
class TestSparseFTRLOp(unittest.TestCase): class TestSparseFTRLOp(unittest.TestCase):
...@@ -201,4 +201,6 @@ class TestSparseFTRLOp2(TestSparseFTRLOp): ...@@ -201,4 +201,6 @@ class TestSparseFTRLOp2(TestSparseFTRLOp):
if __name__ == "__main__": if __name__ == "__main__":
import paddle
paddle.enable_static()
unittest.main() unittest.main()
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
from __future__ import print_function from __future__ import print_function
from __future__ import division from __future__ import division
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from op_test import OpTest
...@@ -113,6 +114,11 @@ class TestCase1(TestMeanIOUOp): ...@@ -113,6 +114,11 @@ class TestCase1(TestMeanIOUOp):
self.in_correct_num = 2 self.in_correct_num = 2
self.in_mean_iou_num = 2 self.in_mean_iou_num = 2
# NOTE(dev): Skip check_dygraph becuase Python API doesn't expose
# in_wrong_num/in_correct_num/in_mean_iou_num argument
def test_check_output(self):
self.check_output(check_dygraph=False, check_eager=False)
class TestMeanIOUOpError(unittest.TestCase): class TestMeanIOUOpError(unittest.TestCase):
def test_errors(self): def test_errors(self):
...@@ -130,5 +136,4 @@ class TestMeanIOUOpError(unittest.TestCase): ...@@ -130,5 +136,4 @@ class TestMeanIOUOpError(unittest.TestCase):
if __name__ == '__main__': if __name__ == '__main__':
paddle.enable_static()
unittest.main() unittest.main()
...@@ -79,6 +79,7 @@ class TestNearestInterpOp(OpTest): ...@@ -79,6 +79,7 @@ class TestNearestInterpOp(OpTest):
self.data_layout = 'NCHW' self.data_layout = 'NCHW'
self.init_test_case() self.init_test_case()
self.op_type = "nearest_interp" self.op_type = "nearest_interp"
self.check_eager = True
input_np = np.random.random(self.input_shape).astype("float64") input_np = np.random.random(self.input_shape).astype("float64")
if self.data_layout == "NCHW": if self.data_layout == "NCHW":
...@@ -101,8 +102,10 @@ class TestNearestInterpOp(OpTest): ...@@ -101,8 +102,10 @@ class TestNearestInterpOp(OpTest):
self.inputs = {'X': input_np} self.inputs = {'X': input_np}
if self.out_size is not None: if self.out_size is not None:
self.inputs['OutSize'] = self.out_size self.inputs['OutSize'] = self.out_size
self.check_eager = False
if self.actual_shape is not None: if self.actual_shape is not None:
self.inputs['OutSize'] = self.actual_shape self.inputs['OutSize'] = self.actual_shape
self.check_eager = False
self.attrs = { self.attrs = {
'out_h': self.out_h, 'out_h': self.out_h,
'out_w': self.out_w, 'out_w': self.out_w,
...@@ -114,10 +117,11 @@ class TestNearestInterpOp(OpTest): ...@@ -114,10 +117,11 @@ class TestNearestInterpOp(OpTest):
self.outputs = {'Out': output_np} self.outputs = {'Out': output_np}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=self.check_eager)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', in_place=True) self.check_grad(
['X'], 'Out', in_place=True, check_eager=self.check_eager)
def init_test_case(self): def init_test_case(self):
self.interp_method = 'nearest' self.interp_method = 'nearest'
...@@ -231,6 +235,7 @@ class TestNearestInterpOpUint8(OpTest): ...@@ -231,6 +235,7 @@ class TestNearestInterpOpUint8(OpTest):
self.actual_shape = None self.actual_shape = None
self.init_test_case() self.init_test_case()
self.op_type = "nearest_interp" self.op_type = "nearest_interp"
self.check_eager = True
input_np = np.random.randint( input_np = np.random.randint(
low=0, high=256, size=self.input_shape).astype("uint8") low=0, high=256, size=self.input_shape).astype("uint8")
...@@ -247,6 +252,7 @@ class TestNearestInterpOpUint8(OpTest): ...@@ -247,6 +252,7 @@ class TestNearestInterpOpUint8(OpTest):
self.inputs = {'X': input_np} self.inputs = {'X': input_np}
if self.out_size is not None: if self.out_size is not None:
self.inputs['OutSize'] = self.out_size self.inputs['OutSize'] = self.out_size
self.check_eager = False
self.attrs = { self.attrs = {
'out_h': self.out_h, 'out_h': self.out_h,
'out_w': self.out_w, 'out_w': self.out_w,
...@@ -257,7 +263,8 @@ class TestNearestInterpOpUint8(OpTest): ...@@ -257,7 +263,8 @@ class TestNearestInterpOpUint8(OpTest):
self.outputs = {'Out': output_np} self.outputs = {'Out': output_np}
def test_check_output(self): def test_check_output(self):
self.check_output_with_place(place=core.CPUPlace(), atol=1) self.check_output_with_place(
place=core.CPUPlace(), atol=1, check_eager=self.check_eager)
def init_test_case(self): def init_test_case(self):
self.interp_method = 'nearest' self.interp_method = 'nearest'
...@@ -339,6 +346,9 @@ class TestNearestInterpOp_attr_tensor(OpTest): ...@@ -339,6 +346,9 @@ class TestNearestInterpOp_attr_tensor(OpTest):
'interp_method': self.interp_method, 'interp_method': self.interp_method,
'align_corners': self.align_corners, 'align_corners': self.align_corners,
} }
# NOTE(dev): some AsDispensible input is not used under imperative mode.
# Skip check_eager while found them in Inputs.
self.check_eager = True
input_np = np.random.random(self.input_shape).astype("float64") input_np = np.random.random(self.input_shape).astype("float64")
self.inputs = {'X': input_np} self.inputs = {'X': input_np}
...@@ -355,12 +365,14 @@ class TestNearestInterpOp_attr_tensor(OpTest): ...@@ -355,12 +365,14 @@ class TestNearestInterpOp_attr_tensor(OpTest):
if self.shape_by_1Dtensor: if self.shape_by_1Dtensor:
self.inputs['OutSize'] = self.out_size self.inputs['OutSize'] = self.out_size
self.check_eager = False
elif self.out_size is not None: elif self.out_size is not None:
size_tensor = [] size_tensor = []
for index, ele in enumerate(self.out_size): for index, ele in enumerate(self.out_size):
size_tensor.append(("x" + str(index), np.ones( size_tensor.append(("x" + str(index), np.ones(
(1)).astype('int32') * ele)) (1)).astype('int32') * ele))
self.inputs['SizeTensor'] = size_tensor self.inputs['SizeTensor'] = size_tensor
self.check_eager = False
self.attrs['out_h'] = self.out_h self.attrs['out_h'] = self.out_h
self.attrs['out_w'] = self.out_w self.attrs['out_w'] = self.out_w
...@@ -370,10 +382,11 @@ class TestNearestInterpOp_attr_tensor(OpTest): ...@@ -370,10 +382,11 @@ class TestNearestInterpOp_attr_tensor(OpTest):
self.outputs = {'Out': output_np} self.outputs = {'Out': output_np}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=self.check_eager)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', in_place=True) self.check_grad(
['X'], 'Out', in_place=True, check_eager=self.check_eager)
def init_test_case(self): def init_test_case(self):
self.interp_method = 'nearest' self.interp_method = 'nearest'
...@@ -495,4 +508,6 @@ class TestNearestInterpException(unittest.TestCase): ...@@ -495,4 +508,6 @@ class TestNearestInterpException(unittest.TestCase):
if __name__ == "__main__": if __name__ == "__main__":
import paddle
paddle.enable_static()
unittest.main() unittest.main()
...@@ -80,14 +80,14 @@ class TestPRROIPoolOp(OpTest): ...@@ -80,14 +80,14 @@ class TestPRROIPoolOp(OpTest):
self.set_data() self.set_data()
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def test_backward(self): def test_backward(self):
places = [fluid.CPUPlace()] places = [fluid.CPUPlace()]
if fluid.core.is_compiled_with_cuda(): if fluid.core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0)) places.append(fluid.CUDAPlace(0))
for place in places: for place in places:
self.check_grad_with_place(place, ['X'], 'Out') self.check_grad_with_place(place, ['X'], 'Out', check_eager=True)
def run_net(self, place): def run_net(self, place):
with program_guard(Program(), Program()): with program_guard(Program(), Program()):
...@@ -197,14 +197,14 @@ class TestPRROIPoolOpTensorRoIs(OpTest): ...@@ -197,14 +197,14 @@ class TestPRROIPoolOpTensorRoIs(OpTest):
self.set_data() self.set_data()
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def test_backward(self): def test_backward(self):
places = [fluid.CPUPlace()] places = [fluid.CPUPlace()]
if fluid.core.is_compiled_with_cuda(): if fluid.core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0)) places.append(fluid.CUDAPlace(0))
for place in places: for place in places:
self.check_grad_with_place(place, ['X'], 'Out') self.check_grad_with_place(place, ['X'], 'Out', check_eager=True)
def run_net(self, place): def run_net(self, place):
with program_guard(Program(), Program()): with program_guard(Program(), Program()):
...@@ -280,4 +280,6 @@ class TestPRROIPoolOpTensorRoIs(OpTest): ...@@ -280,4 +280,6 @@ class TestPRROIPoolOpTensorRoIs(OpTest):
if __name__ == '__main__': if __name__ == '__main__':
import paddle
paddle.enable_static()
unittest.main() unittest.main()
...@@ -48,18 +48,27 @@ class TestSmoothL1LossOp1(OpTest): ...@@ -48,18 +48,27 @@ class TestSmoothL1LossOp1(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.02) self.check_grad(
['X', 'Y'], 'Out', max_relative_error=0.02, check_eager=True)
def test_check_grad_ingore_x(self): def test_check_grad_ingore_x(self):
self.check_grad( self.check_grad(
['Y'], 'Out', max_relative_error=0.03, no_grad_set=set("X")) ['Y'],
'Out',
max_relative_error=0.03,
no_grad_set=set("X"),
check_eager=True)
def test_check_grad_ingore_y(self): def test_check_grad_ingore_y(self):
self.check_grad( self.check_grad(
['X'], 'Out', max_relative_error=0.03, no_grad_set=set('Y')) ['X'],
'Out',
max_relative_error=0.03,
no_grad_set=set('Y'),
check_eager=True)
class TestSmoothL1LossOp2(OpTest): class TestSmoothL1LossOp2(OpTest):
...@@ -86,24 +95,27 @@ class TestSmoothL1LossOp2(OpTest): ...@@ -86,24 +95,27 @@ class TestSmoothL1LossOp2(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.03) self.check_grad(
['X', 'Y'], 'Out', max_relative_error=0.03, check_eager=True)
def test_check_grad_ingore_x(self): def test_check_grad_ingore_x(self):
self.check_grad( self.check_grad(
['Y'], ['Y'],
'Out', 'Out',
max_relative_error=0.03, max_relative_error=0.03,
no_grad_set=set(['X', 'InsideWeight', 'OutsideWeight'])) no_grad_set=set(['X', 'InsideWeight', 'OutsideWeight']),
check_eager=True)
def test_check_grad_ingore_y(self): def test_check_grad_ingore_y(self):
self.check_grad( self.check_grad(
['X'], ['X'],
'Out', 'Out',
max_relative_error=0.03, max_relative_error=0.03,
no_grad_set=set(['Y', 'InsideWeight', 'OutsideWeight'])) no_grad_set=set(['Y', 'InsideWeight', 'OutsideWeight']),
check_eager=True)
class TestSmoothL1LossOpError(unittest.TestCase): class TestSmoothL1LossOpError(unittest.TestCase):
......
...@@ -163,7 +163,8 @@ class TestSparseMomentumOp(OpTest): ...@@ -163,7 +163,8 @@ class TestSparseMomentumOp(OpTest):
pass pass
def test_check_output(self): def test_check_output(self):
self.check_output(atol=5e-3 if self.multi_precision else 1e-5) self.check_output(
atol=5e-3 if self.multi_precision else 1e-5, check_eager=True)
class TestSparseMomentumOpDtype1(TestSparseMomentumOp): class TestSparseMomentumOpDtype1(TestSparseMomentumOp):
...@@ -240,3 +241,7 @@ class TestSparseMomentumOpMultiPrecision3(TestSparseMomentumOp): ...@@ -240,3 +241,7 @@ class TestSparseMomentumOpMultiPrecision3(TestSparseMomentumOp):
def init_use_nesterov(self): def init_use_nesterov(self):
self.use_nesterov = False self.use_nesterov = False
if __name__ == "__main__":
unittest.main()
...@@ -77,12 +77,12 @@ class TestStftOp(OpTest): ...@@ -77,12 +77,12 @@ class TestStftOp(OpTest):
def test_check_output(self): def test_check_output(self):
paddle.enable_static() paddle.enable_static()
self.check_output() self.check_output(check_eager=True)
paddle.disable_static() paddle.disable_static()
def test_check_grad_normal(self): def test_check_grad_normal(self):
paddle.enable_static() paddle.enable_static()
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_eager=True)
paddle.disable_static() paddle.disable_static()
......
...@@ -131,6 +131,9 @@ class TestTrilinearInterpOp(OpTest): ...@@ -131,6 +131,9 @@ class TestTrilinearInterpOp(OpTest):
self.data_layout = 'NCDHW' self.data_layout = 'NCDHW'
self.init_test_case() self.init_test_case()
self.op_type = "trilinear_interp" self.op_type = "trilinear_interp"
# NOTE(dev): some AsDispensible input is not used under imperative mode.
# Skip check_eager while found them in Inputs.
self.check_eager = True
input_np = np.random.random(self.input_shape).astype("float32") input_np = np.random.random(self.input_shape).astype("float32")
if self.data_layout == "NCDHW": if self.data_layout == "NCDHW":
...@@ -157,8 +160,10 @@ class TestTrilinearInterpOp(OpTest): ...@@ -157,8 +160,10 @@ class TestTrilinearInterpOp(OpTest):
self.inputs = {'X': input_np} self.inputs = {'X': input_np}
if self.out_size is not None: if self.out_size is not None:
self.inputs['OutSize'] = self.out_size self.inputs['OutSize'] = self.out_size
self.check_eager = False
if self.actual_shape is not None: if self.actual_shape is not None:
self.inputs['OutSize'] = self.actual_shape self.inputs['OutSize'] = self.actual_shape
self.check_eager = False
# c++ end treat NCDHW the same way as NCHW # c++ end treat NCDHW the same way as NCHW
if self.data_layout == 'NCDHW': if self.data_layout == 'NCDHW':
data_layout = 'NCHW' data_layout = 'NCHW'
...@@ -177,10 +182,11 @@ class TestTrilinearInterpOp(OpTest): ...@@ -177,10 +182,11 @@ class TestTrilinearInterpOp(OpTest):
self.outputs = {'Out': output_np} self.outputs = {'Out': output_np}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=self.check_eager)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', in_place=True) self.check_grad(
['X'], 'Out', in_place=True, check_eager=self.check_eager)
def init_test_case(self): def init_test_case(self):
self.interp_method = 'trilinear' self.interp_method = 'trilinear'
...@@ -326,6 +332,7 @@ class TestTrilinearInterpOpUint8(OpTest): ...@@ -326,6 +332,7 @@ class TestTrilinearInterpOpUint8(OpTest):
self.actual_shape = None self.actual_shape = None
self.init_test_case() self.init_test_case()
self.op_type = "trilinear_interp" self.op_type = "trilinear_interp"
self.check_eager = True
input_np = np.random.randint( input_np = np.random.randint(
low=0, high=256, size=self.input_shape).astype("uint8") low=0, high=256, size=self.input_shape).astype("uint8")
...@@ -344,6 +351,7 @@ class TestTrilinearInterpOpUint8(OpTest): ...@@ -344,6 +351,7 @@ class TestTrilinearInterpOpUint8(OpTest):
self.inputs = {'X': input_np} self.inputs = {'X': input_np}
if self.out_size is not None: if self.out_size is not None:
self.inputs['OutSize'] = self.out_size self.inputs['OutSize'] = self.out_size
self.check_eager = False
self.attrs = { self.attrs = {
'out_d': self.out_d, 'out_d': self.out_d,
...@@ -357,7 +365,8 @@ class TestTrilinearInterpOpUint8(OpTest): ...@@ -357,7 +365,8 @@ class TestTrilinearInterpOpUint8(OpTest):
self.outputs = {'Out': output_np} self.outputs = {'Out': output_np}
def test_check_output(self): def test_check_output(self):
self.check_output_with_place(place=core.CPUPlace(), atol=1) self.check_output_with_place(
place=core.CPUPlace(), atol=1, check_eager=self.check_eager)
def init_test_case(self): def init_test_case(self):
self.interp_method = 'trilinear' self.interp_method = 'trilinear'
...@@ -467,6 +476,7 @@ class TestTrilinearInterpOp_attr_tensor(OpTest): ...@@ -467,6 +476,7 @@ class TestTrilinearInterpOp_attr_tensor(OpTest):
self.actual_shape = None self.actual_shape = None
self.init_test_case() self.init_test_case()
self.op_type = "trilinear_interp" self.op_type = "trilinear_interp"
self.check_eager = True
self.shape_by_1Dtensor = False self.shape_by_1Dtensor = False
self.scale_by_1Dtensor = False self.scale_by_1Dtensor = False
self.attrs = { self.attrs = {
...@@ -492,12 +502,14 @@ class TestTrilinearInterpOp_attr_tensor(OpTest): ...@@ -492,12 +502,14 @@ class TestTrilinearInterpOp_attr_tensor(OpTest):
if self.shape_by_1Dtensor: if self.shape_by_1Dtensor:
self.inputs['OutSize'] = self.out_size self.inputs['OutSize'] = self.out_size
self.check_eager = False
elif self.out_size is not None: elif self.out_size is not None:
size_tensor = [] size_tensor = []
for index, ele in enumerate(self.out_size): for index, ele in enumerate(self.out_size):
size_tensor.append(("x" + str(index), np.ones( size_tensor.append(("x" + str(index), np.ones(
(1)).astype('int32') * ele)) (1)).astype('int32') * ele))
self.inputs['SizeTensor'] = size_tensor self.inputs['SizeTensor'] = size_tensor
self.check_eager = False
self.attrs['out_d'] = self.out_d self.attrs['out_d'] = self.out_d
self.attrs['out_h'] = self.out_h self.attrs['out_h'] = self.out_h
...@@ -508,10 +520,11 @@ class TestTrilinearInterpOp_attr_tensor(OpTest): ...@@ -508,10 +520,11 @@ class TestTrilinearInterpOp_attr_tensor(OpTest):
self.outputs = {'Out': output_np} self.outputs = {'Out': output_np}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=self.check_eager)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', in_place=True) self.check_grad(
['X'], 'Out', in_place=True, check_eager=self.check_eager)
def init_test_case(self): def init_test_case(self):
self.interp_method = 'trilinear' self.interp_method = 'trilinear'
......
...@@ -145,6 +145,10 @@ class TestTrilinearInterpOp(OpTest): ...@@ -145,6 +145,10 @@ class TestTrilinearInterpOp(OpTest):
self.data_layout = 'NCDHW' self.data_layout = 'NCDHW'
self.init_test_case() self.init_test_case()
self.op_type = "trilinear_interp_v2" self.op_type = "trilinear_interp_v2"
# NOTE(dev): some AsDispensible input is not used under imperative mode.
# Skip check_eager while found them in Inputs.
# TODO(dev): add self.python_api
self.check_eager = False
input_np = np.random.random(self.input_shape).astype("float32") input_np = np.random.random(self.input_shape).astype("float32")
scale_w = 0 scale_w = 0
...@@ -183,8 +187,10 @@ class TestTrilinearInterpOp(OpTest): ...@@ -183,8 +187,10 @@ class TestTrilinearInterpOp(OpTest):
self.inputs = {'X': input_np} self.inputs = {'X': input_np}
if self.out_size is not None: if self.out_size is not None:
self.inputs['OutSize'] = self.out_size self.inputs['OutSize'] = self.out_size
self.check_eager = False
if self.actual_shape is not None: if self.actual_shape is not None:
self.inputs['OutSize'] = self.actual_shape self.inputs['OutSize'] = self.actual_shape
self.check_eager = False
# c++ end treat NCDHW the same way as NCHW # c++ end treat NCDHW the same way as NCHW
if self.data_layout == 'NCDHW': if self.data_layout == 'NCDHW':
data_layout = 'NCHW' data_layout = 'NCHW'
...@@ -208,10 +214,11 @@ class TestTrilinearInterpOp(OpTest): ...@@ -208,10 +214,11 @@ class TestTrilinearInterpOp(OpTest):
self.outputs = {'Out': output_np} self.outputs = {'Out': output_np}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=self.check_eager)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', in_place=True) self.check_grad(
['X'], 'Out', in_place=True, check_eager=self.check_eager)
def init_test_case(self): def init_test_case(self):
self.interp_method = 'trilinear' self.interp_method = 'trilinear'
...@@ -357,6 +364,8 @@ class TestTrilinearInterpOpUint8(OpTest): ...@@ -357,6 +364,8 @@ class TestTrilinearInterpOpUint8(OpTest):
self.actual_shape = None self.actual_shape = None
self.init_test_case() self.init_test_case()
self.op_type = "trilinear_interp_v2" self.op_type = "trilinear_interp_v2"
# TODO(dev): add self.python_api
self.check_eager = False
input_np = np.random.randint( input_np = np.random.randint(
low=0, high=256, size=self.input_shape).astype("uint8") low=0, high=256, size=self.input_shape).astype("uint8")
...@@ -383,6 +392,7 @@ class TestTrilinearInterpOpUint8(OpTest): ...@@ -383,6 +392,7 @@ class TestTrilinearInterpOpUint8(OpTest):
self.inputs = {'X': input_np} self.inputs = {'X': input_np}
if self.out_size is not None: if self.out_size is not None:
self.inputs['OutSize'] = self.out_size self.inputs['OutSize'] = self.out_size
self.check_eager = False
self.attrs = { self.attrs = {
'out_d': self.out_d, 'out_d': self.out_d,
...@@ -401,7 +411,8 @@ class TestTrilinearInterpOpUint8(OpTest): ...@@ -401,7 +411,8 @@ class TestTrilinearInterpOpUint8(OpTest):
self.outputs = {'Out': output_np} self.outputs = {'Out': output_np}
def test_check_output(self): def test_check_output(self):
self.check_output_with_place(place=core.CPUPlace(), atol=1) self.check_output_with_place(
place=core.CPUPlace(), atol=1, check_eager=self.check_eager)
def init_test_case(self): def init_test_case(self):
self.interp_method = 'trilinear' self.interp_method = 'trilinear'
...@@ -511,6 +522,8 @@ class TestTrilinearInterpOp_attr_tensor(OpTest): ...@@ -511,6 +522,8 @@ class TestTrilinearInterpOp_attr_tensor(OpTest):
self.actual_shape = None self.actual_shape = None
self.init_test_case() self.init_test_case()
self.op_type = "trilinear_interp_v2" self.op_type = "trilinear_interp_v2"
# TODO(dev): add self.python_api
self.check_eager = False
self.shape_by_1Dtensor = False self.shape_by_1Dtensor = False
self.scale_by_1Dtensor = False self.scale_by_1Dtensor = False
self.attrs = { self.attrs = {
...@@ -543,12 +556,14 @@ class TestTrilinearInterpOp_attr_tensor(OpTest): ...@@ -543,12 +556,14 @@ class TestTrilinearInterpOp_attr_tensor(OpTest):
if self.shape_by_1Dtensor: if self.shape_by_1Dtensor:
self.inputs['OutSize'] = self.out_size self.inputs['OutSize'] = self.out_size
self.check_eager = False
elif self.out_size is not None: elif self.out_size is not None:
size_tensor = [] size_tensor = []
for index, ele in enumerate(self.out_size): for index, ele in enumerate(self.out_size):
size_tensor.append(("x" + str(index), np.ones( size_tensor.append(("x" + str(index), np.ones(
(1)).astype('int32') * ele)) (1)).astype('int32') * ele))
self.inputs['SizeTensor'] = size_tensor self.inputs['SizeTensor'] = size_tensor
self.check_eager = False
self.attrs['out_d'] = self.out_d self.attrs['out_d'] = self.out_d
self.attrs['out_h'] = self.out_h self.attrs['out_h'] = self.out_h
...@@ -565,10 +580,11 @@ class TestTrilinearInterpOp_attr_tensor(OpTest): ...@@ -565,10 +580,11 @@ class TestTrilinearInterpOp_attr_tensor(OpTest):
self.outputs = {'Out': output_np} self.outputs = {'Out': output_np}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=self.check_eager)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', in_place=True) self.check_grad(
['X'], 'Out', in_place=True, check_eager=self.check_eager)
def init_test_case(self): def init_test_case(self):
self.interp_method = 'trilinear' self.interp_method = 'trilinear'
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册