未验证 提交 67a105f9 编写于 作者: W wanghuancoder 提交者: GitHub

delete old dygraph mkldnn op test (#51953)

* delete old dygraph mkldnn op test
上级 866c2877
......@@ -19,7 +19,7 @@ import numpy as np
from scipy.special import erf
from paddle.fluid import core
from paddle.fluid.tests.unittests.op_test import (
from paddle.fluid.tests.unittests.eager_op_test import (
OpTestTool,
convert_float_to_uint16,
)
......
......@@ -21,7 +21,10 @@ from scipy.special import expit
import paddle
import paddle.nn.functional as F
from paddle.fluid import core
from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16
from paddle.fluid.tests.unittests.eager_op_test import (
OpTest,
convert_float_to_uint16,
)
from paddle.fluid.tests.unittests.test_activation_op import (
TestAbs,
TestAbs_ZeroDim,
......@@ -220,7 +223,6 @@ class TestMKLDNNSwishDim2(TestSwish):
super().setUp()
self.attrs["use_mkldnn"] = True
self.check_eager = False
def init_dtype(self):
self.dtype = np.float32
......
......@@ -18,7 +18,7 @@ import numpy as np
from mkldnn_op_test import check_if_mkldnn_batchnorm_primitives_exist_in_bwd
from paddle.fluid import core
from paddle.fluid.tests.unittests.op_test import _set_use_system_allocator
from paddle.fluid.tests.unittests.eager_op_test import _set_use_system_allocator
from paddle.fluid.tests.unittests.test_batch_norm_op import (
TestBatchNormOpInference,
TestBatchNormOpTraining,
......
......@@ -17,7 +17,10 @@ import unittest
import numpy as np
from paddle.fluid.tests.unittests.op_test import OpTest, skip_check_grad_ci
from paddle.fluid.tests.unittests.eager_op_test import (
OpTest,
skip_check_grad_ci,
)
def bilinear_interp_mkldnn_np(
......
......@@ -17,7 +17,7 @@ import unittest
import numpy as np
from paddle.fluid.tests.unittests.op_test import (
from paddle.fluid.tests.unittests.eager_op_test import (
OpTest,
convert_float_to_uint16,
skip_check_grad_ci,
......
......@@ -18,7 +18,10 @@ import numpy as np
import paddle
from paddle.fluid import core
from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16
from paddle.fluid.tests.unittests.eager_op_test import (
OpTest,
convert_float_to_uint16,
)
@unittest.skipIf(
......
......@@ -18,7 +18,7 @@ import numpy as np
import paddle
from paddle.fluid import core
from paddle.fluid.tests.unittests.op_test import (
from paddle.fluid.tests.unittests.eager_op_test import (
OpTest,
OpTestTool,
convert_float_to_uint16,
......@@ -66,10 +66,10 @@ class TestClipOneDNNOp(OpTest):
self.attrs = {'min': 7.2, 'max': 9.6, 'use_mkldnn': True}
def test_check_output(self):
self.check_output()
self.check_output(check_dygraph=False)
def test_check_grad(self):
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_dygraph=False)
class TestClipOneDNNOp_ZeroDim(TestClipOneDNNOp):
......@@ -118,7 +118,7 @@ def create_bf16_test_class(parent):
self.dx[j][i] = self.dout[j][i]
def test_check_output(self):
self.check_output_with_place(core.CPUPlace())
self.check_output_with_place(core.CPUPlace(), check_dygraph=False)
def test_check_grad(self):
self.calculate_grads()
......@@ -128,6 +128,7 @@ def create_bf16_test_class(parent):
"Out",
user_defined_grads=[self.dx],
user_defined_grad_outputs=[convert_float_to_uint16(self.dout)],
check_dygraph=False,
)
cls_name = "{0}_{1}".format(parent.__name__, "BF16")
......
......@@ -18,7 +18,10 @@ import numpy as np
from paddle import enable_static
from paddle.fluid import core
from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16
from paddle.fluid.tests.unittests.eager_op_test import (
OpTest,
convert_float_to_uint16,
)
@unittest.skipIf(
......
......@@ -16,7 +16,7 @@ import unittest
import numpy as np
from paddle.fluid.tests.unittests.op_test import OpTest
from paddle.fluid.tests.unittests.eager_op_test import OpTest
class TestConcatOp(OpTest):
......
......@@ -18,7 +18,7 @@ import numpy as np
from paddle import enable_static
from paddle.fluid import core
from paddle.fluid.tests.unittests.op_test import OpTest
from paddle.fluid.tests.unittests.eager_op_test import OpTest
class TestConcatAxis0OneDNNOp(OpTest):
......@@ -47,12 +47,12 @@ class TestConcatAxis0OneDNNOp(OpTest):
self.dtype = np.float32
def test_check_output(self):
self.check_output_with_place(core.CPUPlace())
self.check_output_with_place(core.CPUPlace(), check_dygraph=False)
def test_check_grad(self):
self.check_grad(['x0'], 'Out')
self.check_grad(['x1'], 'Out')
self.check_grad(['x2'], 'Out')
self.check_grad(['x0'], 'Out', check_dygraph=False)
self.check_grad(['x1'], 'Out', check_dygraph=False)
self.check_grad(['x2'], 'Out', check_dygraph=False)
def init_test_data(self):
self.x0 = np.random.random(self.x0_shape).astype(np.float32)
......
......@@ -17,7 +17,7 @@ import unittest
import numpy as np
from paddle.fluid import core
from paddle.fluid.tests.unittests.op_test import (
from paddle.fluid.tests.unittests.eager_op_test import (
OpTest,
OpTestTool,
convert_float_to_uint16,
......
......@@ -18,7 +18,7 @@ import unittest
import numpy as np
from paddle.fluid import core
from paddle.fluid.tests.unittests.op_test import OpTest
from paddle.fluid.tests.unittests.eager_op_test import OpTest
from paddle.fluid.tests.unittests.test_conv2d_op import (
TestConv2DOp,
conv2d_forward_naive,
......
......@@ -16,7 +16,10 @@ import unittest
import numpy as np
from paddle.fluid.tests.unittests.op_test import OpTest, skip_check_grad_ci
from paddle.fluid.tests.unittests.eager_op_test import (
OpTest,
skip_check_grad_ci,
)
from paddle.fluid.tests.unittests.test_conv2d_op import (
TestConv2DOp,
TestConv2DOp_v2,
......
......@@ -18,7 +18,10 @@ import numpy as np
from paddle import enable_static
from paddle.fluid import core
from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16
from paddle.fluid.tests.unittests.eager_op_test import (
OpTest,
convert_float_to_uint16,
)
from paddle.fluid.tests.unittests.test_conv2d_transpose_op import (
conv2dtranspose_forward_naive,
)
......
......@@ -17,7 +17,7 @@ import unittest
import numpy as np
from paddle import enable_static
from paddle.fluid.tests.unittests.op_test import OpTest
from paddle.fluid.tests.unittests.eager_op_test import OpTest
from paddle.fluid.tests.unittests.test_conv2d_transpose_op import (
TestConv2DTransposeOp,
)
......
......@@ -17,7 +17,10 @@ import unittest
import numpy as np
import paddle
from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16
from paddle.fluid.tests.unittests.eager_op_test import (
OpTest,
convert_float_to_uint16,
)
class TestDeQuantizeOp(OpTest):
......
......@@ -18,7 +18,10 @@ import numpy as np
from paddle import enable_static
from paddle.fluid import core
from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16
from paddle.fluid.tests.unittests.eager_op_test import (
OpTest,
convert_float_to_uint16,
)
@unittest.skipIf(
......
......@@ -17,7 +17,7 @@ import unittest
import numpy as np
from paddle import enable_static
from paddle.fluid.tests.unittests.op_test import skip_check_grad_ci
from paddle.fluid.tests.unittests.eager_op_test import skip_check_grad_ci
from paddle.fluid.tests.unittests.test_elementwise_add_op import (
TestElementwiseAddOp,
)
......
......@@ -19,7 +19,7 @@ import numpy as np
from paddle import enable_static
from paddle.fluid import core
from paddle.fluid.framework import _current_expected_place
from paddle.fluid.tests.unittests.op_test import (
from paddle.fluid.tests.unittests.eager_op_test import (
OpTest,
OpTestTool,
convert_float_to_uint16,
......
......@@ -18,7 +18,10 @@ import numpy as np
from paddle import enable_static
from paddle.fluid import core
from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16
from paddle.fluid.tests.unittests.eager_op_test import (
OpTest,
convert_float_to_uint16,
)
@unittest.skipIf(
......
......@@ -17,7 +17,7 @@ import unittest
import numpy as np
from paddle import enable_static
from paddle.fluid.tests.unittests.op_test import skip_check_grad_ci
from paddle.fluid.tests.unittests.eager_op_test import skip_check_grad_ci
from paddle.fluid.tests.unittests.test_elementwise_mul_op import (
ElementwiseMulOp,
)
......
......@@ -19,7 +19,7 @@ import numpy as np
from paddle import enable_static
from paddle.fluid import core
from paddle.fluid.framework import _current_expected_place
from paddle.fluid.tests.unittests.op_test import (
from paddle.fluid.tests.unittests.eager_op_test import (
OpTest,
OpTestTool,
convert_float_to_uint16,
......
......@@ -18,7 +18,7 @@ import numpy as np
import paddle
from paddle.fluid import core
from paddle.fluid.tests.unittests.op_test import (
from paddle.fluid.tests.unittests.eager_op_test import (
OpTest,
OpTestTool,
convert_float_to_uint16,
......
......@@ -18,7 +18,10 @@ import numpy as np
from paddle import enable_static
from paddle.fluid import core
from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16
from paddle.fluid.tests.unittests.eager_op_test import (
OpTest,
convert_float_to_uint16,
)
def fully_connected_naive(input, weights, bias_data):
......
......@@ -16,7 +16,7 @@ import unittest
import numpy as np
from paddle.fluid.tests.unittests.op_test import OpTest, OpTestTool
from paddle.fluid.tests.unittests.eager_op_test import OpTest, OpTestTool
@OpTestTool.skip_if_not_cpu()
......
......@@ -16,7 +16,7 @@ import unittest
import numpy as np
from paddle.fluid.tests.unittests.op_test import OpTest
from paddle.fluid.tests.unittests.eager_op_test import OpTest
def fully_connected_naive(input, weights, bias_data):
......
......@@ -17,7 +17,7 @@ import unittest
import numpy as np
import paddle
from paddle.fluid.tests.unittests.op_test import OpTest, OpTestTool
from paddle.fluid.tests.unittests.eager_op_test import OpTest, OpTestTool
@OpTestTool.skip_if_not_cpu_bf16()
......
......@@ -18,7 +18,7 @@ import numpy as np
import paddle
from paddle.fluid import core
from paddle.fluid.tests.unittests.op_test import (
from paddle.fluid.tests.unittests.eager_op_test import (
OpTest,
OpTestTool,
convert_float_to_uint16,
......
......@@ -17,7 +17,10 @@ import unittest
import numpy as np
from paddle.fluid import core
from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16
from paddle.fluid.tests.unittests.eager_op_test import (
OpTest,
convert_float_to_uint16,
)
from paddle.fluid.tests.unittests.test_fusion_gru_op import fusion_gru
from paddle.fluid.tests.unittests.test_fusion_lstm_op import ACTIVATION
......
......@@ -16,7 +16,7 @@ import unittest
import numpy as np
from paddle.fluid.tests.unittests.op_test import OpTest
from paddle.fluid.tests.unittests.eager_op_test import OpTest
from paddle.fluid.tests.unittests.test_fusion_gru_op import fusion_gru
from paddle.fluid.tests.unittests.test_fusion_lstm_op import ACTIVATION
......
......@@ -17,7 +17,10 @@ import unittest
import numpy as np
from paddle.fluid import core
from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16
from paddle.fluid.tests.unittests.eager_op_test import (
OpTest,
convert_float_to_uint16,
)
from paddle.fluid.tests.unittests.test_fusion_lstm_op import (
ACTIVATION,
fusion_lstm,
......
......@@ -16,7 +16,7 @@ import unittest
import numpy as np
from paddle.fluid.tests.unittests.op_test import OpTest
from paddle.fluid.tests.unittests.eager_op_test import OpTest
from paddle.fluid.tests.unittests.test_fusion_lstm_op import (
ACTIVATION,
fusion_lstm,
......
......@@ -21,14 +21,14 @@ import numpy as np
from paddle import enable_static, fluid
from paddle.fluid import core
from paddle.fluid.tests.unittests.eager_op_test import (
_set_use_system_allocator,
convert_float_to_uint16,
)
from paddle.fluid.tests.unittests.mkldnn.test_layer_norm_mkldnn_op import (
TestLayerNormMKLDNNOp,
_reference_layer_norm_naive,
)
from paddle.fluid.tests.unittests.op_test import (
_set_use_system_allocator,
convert_float_to_uint16,
)
np.random.random(123)
......
......@@ -21,7 +21,7 @@ import numpy as np
from paddle import enable_static, fluid
from paddle.fluid import core
from paddle.fluid.tests.unittests.op_test import (
from paddle.fluid.tests.unittests.eager_op_test import (
OpTestTool,
_set_use_system_allocator,
)
......
......@@ -18,7 +18,7 @@ import numpy as np
import paddle
from paddle.fluid import core
from paddle.fluid.tests.unittests.op_test import (
from paddle.fluid.tests.unittests.eager_op_test import (
OpTest,
OpTestTool,
convert_float_to_uint16,
......@@ -57,7 +57,7 @@ class TestLogSoftmaxOneDNNOp(OpTest):
self.axis = -1
def test_check_output(self):
self.check_output_with_place(core.CPUPlace())
self.check_output_with_place(core.CPUPlace(), check_dygraph=False)
class TestLogSoftmax0DOneDNNOp(TestLogSoftmaxOneDNNOp):
......
......@@ -18,7 +18,10 @@ import numpy as np
from paddle import enable_static
from paddle.fluid import core
from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16
from paddle.fluid.tests.unittests.eager_op_test import (
OpTest,
convert_float_to_uint16,
)
@unittest.skipIf(
......
......@@ -17,7 +17,10 @@ import unittest
import numpy as np
from paddle.fluid.tests.unittests.op_test import OpTest, skip_check_grad_ci
from paddle.fluid.tests.unittests.eager_op_test import (
OpTest,
skip_check_grad_ci,
)
class TestDnnlMatMulOp(OpTest):
......
......@@ -18,6 +18,11 @@ import numpy as np
import paddle
from paddle.fluid import core
from paddle.fluid.tests.unittests.eager_op_test import (
OpTest,
OpTestTool,
convert_float_to_uint16,
)
from paddle.fluid.tests.unittests.mkldnn.test_matmul_mkldnn_op import (
TestMatMulOpTransposeReshapeBasicFloat,
TestMatMulOpTransposeReshapeEmptyFloat,
......@@ -31,11 +36,6 @@ from paddle.fluid.tests.unittests.mkldnn.test_matmul_mkldnn_op import (
TestReshapeTransposeMatMulOp4DXYFloat,
TestReshapeTransposeMatMulOp4DYFloat,
)
from paddle.fluid.tests.unittests.op_test import (
OpTest,
OpTestTool,
convert_float_to_uint16,
)
def reference_matmul(X, Y, transpose_x=False, transpose_y=False):
......
......@@ -18,7 +18,10 @@ import numpy as np
import paddle
from paddle.fluid import core
from paddle.fluid.tests.unittests.op_test import OpTest, skip_check_grad_ci
from paddle.fluid.tests.unittests.eager_op_test import (
OpTest,
skip_check_grad_ci,
)
'''
test case for s8 * s8
......
......@@ -18,7 +18,7 @@ import numpy as np
import paddle
from paddle.fluid import core
from paddle.fluid.tests.unittests.op_test import (
from paddle.fluid.tests.unittests.eager_op_test import (
OpTest,
OpTestTool,
convert_float_to_uint16,
......
......@@ -16,7 +16,7 @@ import unittest
import numpy as np
from paddle.fluid.tests.unittests.op_test import OpTest
from paddle.fluid.tests.unittests.eager_op_test import OpTest
from paddle.fluid.tests.unittests.test_fusion_gru_op import (
ACTIVATION,
fusion_gru,
......
......@@ -16,7 +16,10 @@ import unittest
import numpy as np
from paddle.fluid.tests.unittests.op_test import OpTest, skip_check_grad_ci
from paddle.fluid.tests.unittests.eager_op_test import (
OpTest,
skip_check_grad_ci,
)
def nearest_neighbor_interp_mkldnn_np(
......
......@@ -16,7 +16,7 @@ import unittest
import numpy as np
from paddle.fluid.tests.unittests.op_test import (
from paddle.fluid.tests.unittests.eager_op_test import (
OpTest,
OpTestTool,
convert_float_to_uint16,
......
......@@ -18,14 +18,14 @@ import numpy as np
from paddle import enable_static
from paddle.fluid import core
from paddle.fluid.tests.unittests.npu.test_pool2d_op_npu import (
pool2d_backward_navie as pool2d_backward_naive,
)
from paddle.fluid.tests.unittests.op_test import (
from paddle.fluid.tests.unittests.eager_op_test import (
OpTest,
OpTestTool,
convert_float_to_uint16,
)
from paddle.fluid.tests.unittests.npu.test_pool2d_op_npu import (
pool2d_backward_navie as pool2d_backward_naive,
)
from paddle.fluid.tests.unittests.test_pool2d_op import (
TestPool2D_Op_Mixin,
max_pool2D_forward_naive,
......
......@@ -17,7 +17,7 @@ import unittest
import numpy as np
from paddle.fluid import core
from paddle.fluid.tests.unittests.op_test import OpTest
from paddle.fluid.tests.unittests.eager_op_test import OpTest
from paddle.fluid.tests.unittests.test_pool2d_op import (
TestPool2D_Op,
max_pool2D_forward_naive,
......
......@@ -18,7 +18,7 @@ import numpy as np
import paddle
from paddle.fluid import core
from paddle.fluid.tests.unittests.op_test import (
from paddle.fluid.tests.unittests.eager_op_test import (
OpTest,
OpTestTool,
convert_float_to_uint16,
......@@ -69,10 +69,10 @@ class TestPReluModeChannelOneDNNOp(OpTest):
self.outputs = {'Out': ref_prelu(self.x, self.alpha, self.mode)}
def test_check_output(self):
self.check_output()
self.check_output(check_dygraph=False)
def test_check_grad(self):
self.check_grad(['X', 'Alpha'], 'Out')
self.check_grad(['X', 'Alpha'], 'Out', check_dygraph=False)
class TestPReluModeAllOneDNNOp(TestPReluModeChannelOneDNNOp):
......@@ -83,7 +83,7 @@ class TestPReluModeAllOneDNNOp(TestPReluModeChannelOneDNNOp):
# Skip 'Alpha' input check because in mode = 'all' it has to be a single
# 1D value so checking if it has at least 100 values will cause an error
def test_check_grad(self):
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_dygraph=False)
class TestPReluModeElementOneDNNOp(TestPReluModeChannelOneDNNOp):
......@@ -158,7 +158,7 @@ def create_bf16_test_class(parent):
self.dout = dout
def test_check_output(self):
self.check_output_with_place(core.CPUPlace())
self.check_output_with_place(core.CPUPlace(), check_dygraph=False)
def test_check_grad(self):
self.calculate_grads()
......@@ -168,6 +168,7 @@ def create_bf16_test_class(parent):
"Out",
user_defined_grads=[self.dx, self.dalpha],
user_defined_grad_outputs=[convert_float_to_uint16(self.dout)],
check_dygraph=False,
)
cls_name = "{0}_{1}".format(parent.__name__, "BF16")
......
......@@ -17,7 +17,7 @@ import unittest
import numpy as np
import paddle
from paddle.fluid.tests.unittests.op_test import OpTest
from paddle.fluid.tests.unittests.eager_op_test import OpTest
class TestQuantizeOp(OpTest):
......
......@@ -18,7 +18,7 @@ import numpy as np
import paddle
from paddle.fluid import core
from paddle.fluid.tests.unittests.op_test import (
from paddle.fluid.tests.unittests.eager_op_test import (
OpTest,
OpTestTool,
convert_float_to_uint16,
......
......@@ -17,7 +17,7 @@ import unittest
import numpy as np
import paddle
from paddle.fluid.tests.unittests.op_test import (
from paddle.fluid.tests.unittests.eager_op_test import (
OpTest,
OpTestTool,
skip_check_grad_ci,
......@@ -33,12 +33,12 @@ class TestReduceSumDefaultOneDNNOp(OpTest):
self.attrs = {'use_mkldnn': self.use_mkldnn}
def test_check_output(self):
self.check_output()
self.check_output(check_dygraph=False)
class TestReduceDefaultWithGradOneDNNOp(TestReduceSumDefaultOneDNNOp):
def test_check_grad(self):
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_dygraph=False)
class TestReduceSum4DOneDNNOp(TestReduceDefaultWithGradOneDNNOp):
......
......@@ -19,7 +19,7 @@ from mkldnn_op_test import format_reorder
from paddle import fluid
from paddle.fluid import core
from paddle.fluid.tests.unittests.op_test import OpTest
from paddle.fluid.tests.unittests.eager_op_test import OpTest
class TestReQuantizeOp(OpTest):
......
......@@ -18,7 +18,10 @@ import numpy as np
from paddle import enable_static
from paddle.fluid import core
from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16
from paddle.fluid.tests.unittests.eager_op_test import (
OpTest,
convert_float_to_uint16,
)
@unittest.skipIf(
......@@ -55,7 +58,9 @@ class TestReshapeBf16Op(OpTest):
self.input_data = convert_float_to_uint16(self.input_data_fp32)
def test_check_output(self):
self.check_output_with_place(core.CPUPlace(), no_check_set=['XShape'])
self.check_output_with_place(
core.CPUPlace(), no_check_set=['XShape'], check_dygraph=False
)
def test_check_grad(self):
self.check_grad_with_place(
......
......@@ -18,7 +18,7 @@ import numpy as np
import paddle
from paddle.fluid import core
from paddle.fluid.tests.unittests.op_test import (
from paddle.fluid.tests.unittests.eager_op_test import (
OpTest,
OpTestTool,
convert_float_to_uint16,
......@@ -59,10 +59,10 @@ class TestReshape2OneDNNOp(OpTest):
pass
def test_check_output(self):
self.check_output(no_check_set=['XShape'])
self.check_output(no_check_set=['XShape'], check_dygraph=False)
def test_check_grad(self):
self.check_grad(["X"], "Out")
self.check_grad(["X"], "Out", check_dygraph=False)
class TestReshape2OneDNNOpDimInfer1(TestReshape2OneDNNOp):
......@@ -154,7 +154,7 @@ class TestReshapeOneDNNOp(TestReshape2OneDNNOp):
self.outputs = {"Out": self.inputs["X"].reshape(self.infered_shape)}
def test_check_output(self):
self.check_output()
self.check_output(check_dygraph=False)
class TestReshapeOneDNNOpDimInfer1(TestReshapeOneDNNOp):
......@@ -173,7 +173,7 @@ class TestReshapeOneDNNOp_attr_OnlyShape(TestReshape2OneDNNOp_attr_OnlyShape):
self.outputs = {"Out": self.inputs["X"].reshape(self.infered_shape)}
def test_check_output(self):
self.check_output()
self.check_output(check_dygraph=False)
class TestReshapeOneDNNOpDimInfer1_attr_OnlyShape(
......@@ -202,7 +202,7 @@ def create_reshape_bf16_test_classes(parent):
def test_check_output(self):
self.check_output_with_place(
core.CPUPlace(), no_check_set=["XShape"]
core.CPUPlace(), no_check_set=["XShape"], check_dygraph=False
)
def test_check_grad(self):
......@@ -213,6 +213,7 @@ def create_reshape_bf16_test_classes(parent):
"Out",
user_defined_grads=[self.dx],
user_defined_grad_outputs=[self.dout],
check_dygraph=False,
)
cls_name = "{0}_{1}".format(parent.__name__, "Reshape2_BF16")
......@@ -228,7 +229,7 @@ def create_reshape_bf16_test_classes(parent):
self.outputs = {"Out": self.x.reshape(self.new_shape)}
def test_check_output(self):
self.check_output_with_place(core.CPUPlace())
self.check_output_with_place(core.CPUPlace(), check_dygraph=False)
def test_check_grad(self):
self.calculate_grads()
......@@ -238,6 +239,7 @@ def create_reshape_bf16_test_classes(parent):
"Out",
user_defined_grads=[self.dx],
user_defined_grad_outputs=[convert_float_to_uint16(self.dout)],
check_dygraph=False,
)
cls_name = "{0}_{1}".format(parent.__name__, "Reshape_BF16")
......
......@@ -18,7 +18,10 @@ import numpy as np
import paddle
from paddle.fluid import core
from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16
from paddle.fluid.tests.unittests.eager_op_test import (
OpTest,
convert_float_to_uint16,
)
@unittest.skipIf(
......
......@@ -17,7 +17,7 @@ import unittest
import numpy as np
import paddle
from paddle.fluid.tests.unittests.op_test import OpTest
from paddle.fluid.tests.unittests.eager_op_test import OpTest
class TestScaleOp(OpTest):
......@@ -38,7 +38,7 @@ class TestScaleOp(OpTest):
self.check_output(check_dygraph=False)
def test_check_grad(self):
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_dygraph=False)
class TestScaleOp_ZeroDim(TestScaleOp):
......@@ -65,7 +65,7 @@ class TestScaleOpBiasNotAfterScale(OpTest):
self.check_output(check_dygraph=False)
def test_check_grad(self):
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_dygraph=False)
class TestScaleOpScaleTensor(OpTest):
......@@ -80,10 +80,10 @@ class TestScaleOpScaleTensor(OpTest):
self.outputs = {'Out': self.inputs['X'] * self.scale}
def test_check_output(self):
self.check_output()
self.check_output(check_dygraph=False)
def test_check_grad(self):
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_dygraph=False)
class TestScaleOpScaleTensorNotBiasAfterScale(OpTest):
......@@ -101,10 +101,10 @@ class TestScaleOpScaleTensorNotBiasAfterScale(OpTest):
}
def test_check_output(self):
self.check_output()
self.check_output(check_dygraph=False)
def test_check_grad(self):
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_dygraph=False)
if __name__ == "__main__":
......
......@@ -18,7 +18,7 @@ import numpy as np
import paddle
from paddle.fluid import core
from paddle.fluid.tests.unittests.op_test import OpTest, OpTestTool
from paddle.fluid.tests.unittests.eager_op_test import OpTest, OpTestTool
@OpTestTool.skip_if_not_cpu_bf16()
......
......@@ -18,7 +18,7 @@ import numpy as np
import paddle
from paddle.fluid import core
from paddle.fluid.tests.unittests.op_test import OpTest, OpTestTool
from paddle.fluid.tests.unittests.eager_op_test import OpTest, OpTestTool
@OpTestTool.skip_if_not_cpu_bf16()
......
......@@ -18,7 +18,7 @@ import numpy as np
import paddle
from paddle.fluid import core
from paddle.fluid.tests.unittests.op_test import (
from paddle.fluid.tests.unittests.eager_op_test import (
OpTest,
OpTestTool,
convert_float_to_uint16,
......
......@@ -18,7 +18,7 @@ import numpy as np
from paddle import enable_static
from paddle.fluid import core
from paddle.fluid.tests.unittests.op_test import convert_float_to_uint16
from paddle.fluid.tests.unittests.eager_op_test import convert_float_to_uint16
from paddle.fluid.tests.unittests.test_softmax_op import (
TestSoftmaxOp,
TestSoftmaxOp2,
......
......@@ -18,7 +18,7 @@ import numpy as np
from mkldnn_op_test import check_if_mkldnn_primitives_exist_in_bwd
from paddle.fluid import core
from paddle.fluid.tests.unittests.op_test import OpTest
from paddle.fluid.tests.unittests.eager_op_test import OpTest
from paddle.fluid.tests.unittests.test_softmax_op import (
TestSoftmaxOp,
TestSoftmaxOp2,
......
......@@ -17,7 +17,7 @@ import unittest
import numpy as np
import paddle
from paddle.fluid.tests.unittests.op_test import (
from paddle.fluid.tests.unittests.eager_op_test import (
OpTest,
OpTestTool,
convert_float_to_uint16,
......
......@@ -18,7 +18,7 @@ import numpy as np
import paddle
from paddle.fluid import core
from paddle.fluid.tests.unittests.op_test import OpTest
from paddle.fluid.tests.unittests.eager_op_test import OpTest
@unittest.skipIf(
......
......@@ -17,7 +17,7 @@ import unittest
import numpy as np
import paddle
from paddle.fluid.tests.unittests.op_test import OpTest
from paddle.fluid.tests.unittests.eager_op_test import OpTest
class TestSplitSectionsOneDNNOp(OpTest):
......@@ -68,10 +68,10 @@ class TestSplitSectionsOneDNNOp(OpTest):
}
def test_check_output(self):
self.check_output()
self.check_output(check_dygraph=False)
def test_check_grad(self):
self.check_grad(['X'], ['out0', 'out1', 'out2'])
self.check_grad(['X'], ['out0', 'out1', 'out2'], check_dygraph=False)
# test with attr(num)
......@@ -86,7 +86,9 @@ class TestSplitNumOneDNNOp(TestSplitSectionsOneDNNOp):
self.out = np.split(self.x, indices_or_sections, self.axis)
def test_check_grad(self):
self.check_grad(['X'], ['out0', 'out1', 'out2', 'out3'])
self.check_grad(
['X'], ['out0', 'out1', 'out2', 'out3'], check_dygraph=False
)
class TestSplitNumAxisTensorOneDNNOp(TestSplitSectionsOneDNNOp):
......
......@@ -18,7 +18,7 @@ import numpy as np
import paddle
from paddle.fluid import core
from paddle.fluid.tests.unittests.op_test import (
from paddle.fluid.tests.unittests.eager_op_test import (
OpTest,
OpTestTool,
convert_float_to_uint16,
......
......@@ -18,7 +18,7 @@ import numpy as np
import paddle
from paddle.fluid import core
from paddle.fluid.tests.unittests.op_test import OpTest, OpTestTool
from paddle.fluid.tests.unittests.eager_op_test import OpTest, OpTestTool
@OpTestTool.skip_if_not_cpu()
......
......@@ -18,7 +18,7 @@ import numpy as np
from paddle import enable_static
from paddle.fluid import core
from paddle.fluid.tests.unittests.op_test import convert_float_to_uint16
from paddle.fluid.tests.unittests.eager_op_test import convert_float_to_uint16
from paddle.fluid.tests.unittests.test_sum_op import TestSumOp
......
......@@ -18,7 +18,10 @@ import numpy as np
from paddle import enable_static
from paddle.fluid import core
from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16
from paddle.fluid.tests.unittests.eager_op_test import (
OpTest,
convert_float_to_uint16,
)
@unittest.skipIf(
......
......@@ -18,7 +18,7 @@ import numpy as np
from mkldnn_op_test import format_reorder
from paddle.fluid import core
from paddle.fluid.tests.unittests.op_test import OpTest
from paddle.fluid.tests.unittests.eager_op_test import OpTest
class TestTransposeOp(OpTest):
......
......@@ -16,7 +16,7 @@ import unittest
import numpy as np
from paddle.fluid.tests.unittests.op_test import OpTest
from paddle.fluid.tests.unittests.eager_op_test import OpTest
class TestTransposeMKLDNN(OpTest):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册