diff --git a/python/paddle/fluid/tests/unittests/op_test.py b/python/paddle/fluid/tests/unittests/op_test.py index 690c4cf0ad6b2c741689e419223cfa6b6e1e5cf3..c195a28e452fbe073a9afb5d650f538176f688fd 100644 --- a/python/paddle/fluid/tests/unittests/op_test.py +++ b/python/paddle/fluid/tests/unittests/op_test.py @@ -362,7 +362,9 @@ class OpTest(unittest.TestCase): else: return [] places = [fluid.CPUPlace()] - if core.is_compiled_with_cuda() and core.op_support_gpu(self.op_type): + cpu_only = self._cpu_only if hasattr(self, '_cpu_only') else False + if core.is_compiled_with_cuda() and core.op_support_gpu(self.op_type)\ + and not cpu_only: places.append(core.CUDAPlace(0)) return places diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_mul_mkldnn_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_mul_mkldnn_op.py index 56e2ca849af671d7e64c0ef3c66666a82a14ac96..536e9a1c58ec4a8b1b5a7c1d3a5fe737b38d24ab 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_mul_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_mul_mkldnn_op.py @@ -34,6 +34,7 @@ class TestElementwiseMulMKLDNNOp_BroadcastNCHW16c(ElementwiseMulOp): super(TestElementwiseMulMKLDNNOp_BroadcastNCHW16c, self).setUp() self.attrs["x_data_format"] = "nchw16c" self.attrs["y_data_format"] = "nc" + self._cpu_only = True def init_kernel_type(self): self.use_mkldnn = True @@ -66,6 +67,7 @@ class TestElementwiseMulMKLDNNOp_BroadcastNCHW8c(ElementwiseMulOp): super(TestElementwiseMulMKLDNNOp_BroadcastNCHW8c, self).setUp() self.attrs["x_data_format"] = "nchw8c" self.attrs["y_data_format"] = "nc" + self._cpu_only = True def init_kernel_type(self): self.use_mkldnn = True @@ -119,6 +121,7 @@ class TestElementwiseMulMKLDNNOp_FallbackNCHW16C(ElementwiseMulOp): super(TestElementwiseMulMKLDNNOp_FallbackNCHW16C, self).setUp() self.attrs["x_data_format"] = "nchw16c" self.attrs["y_data_format"] = "nchw16c" + self._cpu_only = True def init_kernel_type(self): self.use_mkldnn = True @@ -149,6 +152,7 @@ class TestElementwiseMulMKLDNNOp_FallbackNoReorders(ElementwiseMulOp): super(TestElementwiseMulMKLDNNOp_FallbackNoReorders, self).setUp() self.attrs["x_data_format"] = "nchw16c" self.attrs["y_data_format"] = "nchw16c" + self._cpu_only = True def init_kernel_type(self): self.use_mkldnn = True @@ -178,6 +182,7 @@ class TestElementwiseMulMKLDNNOp_FallbackWithReorder1(ElementwiseMulOp): super(TestElementwiseMulMKLDNNOp_FallbackWithReorder1, self).setUp() self.attrs["x_data_format"] = "nchw" self.attrs["y_data_format"] = "nchw16c" + self._cpu_only = True def init_kernel_type(self): self.use_mkldnn = True @@ -207,6 +212,7 @@ class TestElementwiseMulMKLDNNOp_FallbackWithReorder2(ElementwiseMulOp): super(TestElementwiseMulMKLDNNOp_FallbackWithReorder2, self).setUp() self.attrs["x_data_format"] = "nchw16c" self.attrs["y_data_format"] = "nchw" + self._cpu_only = True def init_kernel_type(self): self.use_mkldnn = True @@ -235,6 +241,7 @@ class TestElementwiseMulMKLDNNOp_FallbackNoReorders2(ElementwiseMulOp): super(TestElementwiseMulMKLDNNOp_FallbackNoReorders2, self).setUp() self.attrs["x_data_format"] = "nc" self.attrs["y_data_format"] = "nc" + self._cpu_only = True def init_kernel_type(self): self.use_mkldnn = True