提交 9466e956 编写于 作者: T Tao Luo

reduce unittest time by rename testcuda to has_cuda

test=develop
上级 a770ce06
...@@ -90,11 +90,11 @@ class TestConv2dFusionOp(OpTest): ...@@ -90,11 +90,11 @@ class TestConv2dFusionOp(OpTest):
self.set_outputs() self.set_outputs()
def testcuda(self): def has_cuda(self):
return core.is_compiled_with_cuda() return core.is_compiled_with_cuda()
def test_check_output(self): def test_check_output(self):
if self.testcuda(): if self.has_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, atol=1e-5) self.check_output_with_place(place, atol=1e-5)
else: else:
......
...@@ -108,24 +108,24 @@ class TestConv3dOp(OpTest): ...@@ -108,24 +108,24 @@ class TestConv3dOp(OpTest):
} }
self.outputs = {'Output': output} self.outputs = {'Output': output}
def testcudnn(self): def has_cudnn(self):
return core.is_compiled_with_cuda() and self.use_cudnn return core.is_compiled_with_cuda() and self.use_cudnn
def test_check_output(self): def test_check_output(self):
place = core.CUDAPlace(0) if self.testcudnn() else core.CPUPlace() place = core.CUDAPlace(0) if self.has_cudnn() else core.CPUPlace()
self.check_output_with_place(place, atol=1e-5) self.check_output_with_place(place, atol=1e-5)
def test_check_grad(self): def test_check_grad(self):
if self.dtype == np.float16: if self.dtype == np.float16:
return return
place = core.CUDAPlace(0) if self.testcudnn() else core.CPUPlace() place = core.CUDAPlace(0) if self.has_cudnn() else core.CPUPlace()
self.check_grad_with_place( self.check_grad_with_place(
place, {'Input', 'Filter'}, 'Output', max_relative_error=0.03) place, {'Input', 'Filter'}, 'Output', max_relative_error=0.03)
def test_check_grad_no_filter(self): def test_check_grad_no_filter(self):
if self.dtype == np.float16: if self.dtype == np.float16:
return return
place = core.CUDAPlace(0) if self.testcudnn() else core.CPUPlace() place = core.CUDAPlace(0) if self.has_cudnn() else core.CPUPlace()
self.check_grad_with_place( self.check_grad_with_place(
place, ['Input'], place, ['Input'],
'Output', 'Output',
...@@ -135,7 +135,7 @@ class TestConv3dOp(OpTest): ...@@ -135,7 +135,7 @@ class TestConv3dOp(OpTest):
def test_check_grad_no_input(self): def test_check_grad_no_input(self):
if self.dtype == np.float16: if self.dtype == np.float16:
return return
place = core.CUDAPlace(0) if self.testcudnn() else core.CPUPlace() place = core.CUDAPlace(0) if self.has_cudnn() else core.CPUPlace()
self.check_grad_with_place( self.check_grad_with_place(
place, ['Input'], place, ['Input'],
'Output', 'Output',
......
...@@ -171,7 +171,7 @@ class TestCUDNNLstmOp(OpTest): ...@@ -171,7 +171,7 @@ class TestCUDNNLstmOp(OpTest):
} }
def test_output_with_place(self): def test_output_with_place(self):
if self.testcuda(): if self.has_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, atol=1e-5) self.check_output_with_place(place, atol=1e-5)
...@@ -184,7 +184,7 @@ class TestCUDNNLstmOp(OpTest): ...@@ -184,7 +184,7 @@ class TestCUDNNLstmOp(OpTest):
['Out', 'last_h', 'last_c'], ['Out', 'last_h', 'last_c'],
max_relative_error=0.02) max_relative_error=0.02)
def testcuda(self): def has_cuda(self):
return core.is_compiled_with_cuda() return core.is_compiled_with_cuda()
......
...@@ -148,11 +148,11 @@ class TestPool2D_Op(OpTest): ...@@ -148,11 +148,11 @@ class TestPool2D_Op(OpTest):
self.outputs = {'Out': output} self.outputs = {'Out': output}
def testcudnn(self): def has_cudnn(self):
return core.is_compiled_with_cuda() and self.use_cudnn return core.is_compiled_with_cuda() and self.use_cudnn
def test_check_output(self): def test_check_output(self):
if self.testcudnn(): if self.has_cudnn():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, atol=1e-5) self.check_output_with_place(place, atol=1e-5)
else: else:
...@@ -161,7 +161,7 @@ class TestPool2D_Op(OpTest): ...@@ -161,7 +161,7 @@ class TestPool2D_Op(OpTest):
def test_check_grad(self): def test_check_grad(self):
if self.dtype == np.float16: if self.dtype == np.float16:
return return
if self.testcudnn() and self.pool_type != "max": if self.has_cudnn() and self.pool_type != "max":
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place( self.check_grad_with_place(
place, set(['X']), 'Out', max_relative_error=0.07) place, set(['X']), 'Out', max_relative_error=0.07)
......
...@@ -172,11 +172,11 @@ class TestPool3d_Op(OpTest): ...@@ -172,11 +172,11 @@ class TestPool3d_Op(OpTest):
self.outputs = {'Out': output} self.outputs = {'Out': output}
def testcudnn(self): def has_cudnn(self):
return core.is_compiled_with_cuda() and self.use_cudnn return core.is_compiled_with_cuda() and self.use_cudnn
def test_check_output(self): def test_check_output(self):
if self.testcudnn(): if self.has_cudnn():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, atol=1e-5) self.check_output_with_place(place, atol=1e-5)
else: else:
...@@ -185,7 +185,7 @@ class TestPool3d_Op(OpTest): ...@@ -185,7 +185,7 @@ class TestPool3d_Op(OpTest):
def test_check_grad(self): def test_check_grad(self):
if self.dtype == np.float16: if self.dtype == np.float16:
return return
if self.testcudnn() and self.pool_type != "max": if self.has_cudnn() and self.pool_type != "max":
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place( self.check_grad_with_place(
place, set(['X']), 'Out', max_relative_error=0.07) place, set(['X']), 'Out', max_relative_error=0.07)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册