提交 23df084b 编写于 作者: G guomingz 提交者: Tao Luo

resolve #16987 (#16994)

Rename the testcuda function to has_cuda, it will elimate the unnecessary testing.
test=develop
上级 1202d3fc
...@@ -85,7 +85,7 @@ class TestConv2dOp(OpTest): ...@@ -85,7 +85,7 @@ class TestConv2dOp(OpTest):
} }
input = np.random.random(self.input_size).astype(self.dtype) input = np.random.random(self.input_size).astype(self.dtype)
if not self.testcuda(): if not self.has_cuda():
self.fuse_relu_before_depthwise_conv = False self.fuse_relu_before_depthwise_conv = False
if self.fuse_relu_before_depthwise_conv: if self.fuse_relu_before_depthwise_conv:
input = input - 0.5 input = input - 0.5
...@@ -117,25 +117,25 @@ class TestConv2dOp(OpTest): ...@@ -117,25 +117,25 @@ class TestConv2dOp(OpTest):
} }
self.outputs = {'Output': output} self.outputs = {'Output': output}
def testcuda(self): def has_cuda(self):
return core.is_compiled_with_cuda() and (self.use_cudnn or return core.is_compiled_with_cuda() and (self.use_cudnn or
self.use_cuda) self.use_cuda)
def test_check_output(self): def test_check_output(self):
place = core.CUDAPlace(0) if self.testcuda() else core.CPUPlace() place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace()
self.check_output_with_place(place, atol=1e-5) self.check_output_with_place(place, atol=1e-5)
def test_check_grad(self): def test_check_grad(self):
if self.dtype == np.float16: if self.dtype == np.float16:
return return
place = core.CUDAPlace(0) if self.testcuda() else core.CPUPlace() place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace()
self.check_grad_with_place( self.check_grad_with_place(
place, {'Input', 'Filter'}, 'Output', max_relative_error=0.02) place, {'Input', 'Filter'}, 'Output', max_relative_error=0.02)
def test_check_grad_no_filter(self): def test_check_grad_no_filter(self):
if self.dtype == np.float16: if self.dtype == np.float16:
return return
place = core.CUDAPlace(0) if self.testcuda() else core.CPUPlace() place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace()
self.check_grad_with_place( self.check_grad_with_place(
place, ['Input'], place, ['Input'],
'Output', 'Output',
...@@ -145,7 +145,7 @@ class TestConv2dOp(OpTest): ...@@ -145,7 +145,7 @@ class TestConv2dOp(OpTest):
def test_check_grad_no_input(self): def test_check_grad_no_input(self):
if self.dtype == np.float16: if self.dtype == np.float16:
return return
place = core.CUDAPlace(0) if self.testcuda() else core.CPUPlace() place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace()
self.check_grad_with_place( self.check_grad_with_place(
place, ['Filter'], place, ['Filter'],
'Output', 'Output',
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册