未验证 提交 0a17e98e 编写于 作者: L Leo Chen 提交者: GitHub

Use tempfile to save model in InferencePassTest (#54038)

上级 6133ca4e
...@@ -12,7 +12,9 @@ ...@@ -12,7 +12,9 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import os
import random import random
import tempfile
import unittest import unittest
import numpy as np import numpy as np
...@@ -41,7 +43,10 @@ class InferencePassTest(unittest.TestCase): ...@@ -41,7 +43,10 @@ class InferencePassTest(unittest.TestCase):
self.dynamic_shape_params = None self.dynamic_shape_params = None
self.enable_lite = False self.enable_lite = False
self.lite_parameters = None self.lite_parameters = None
self.path = "./inference_pass/" + self.__class__.__name__ + "/" self.temp_dir = tempfile.TemporaryDirectory()
self.path = os.path.join(
self.temp_dir.name, 'inference_pass', self.__class__.__name__
)
np.random.seed(1) np.random.seed(1)
random.seed(1) random.seed(1)
......
...@@ -53,8 +53,9 @@ class TensorRTSubgraphPassActivationTest(InferencePassTest): ...@@ -53,8 +53,9 @@ class TensorRTSubgraphPassActivationTest(InferencePassTest):
def test_check_output(self): def test_check_output(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
use_gpu = True use_gpu = True
if os.path.exists(self.path + "_opt_cache"): opt_path = os.path.join(self.path, '_opt_cache')
shutil.rmtree(self.path + "_opt_cache") if os.path.exists(opt_path):
shutil.rmtree(opt_path)
if ( if (
self.trt_parameters.precision self.trt_parameters.precision
== AnalysisConfig.Precision.Float32 == AnalysisConfig.Precision.Float32
......
...@@ -53,8 +53,9 @@ class TensorRTSubgraphPassElementwiseBroadcastTest(InferencePassTest): ...@@ -53,8 +53,9 @@ class TensorRTSubgraphPassElementwiseBroadcastTest(InferencePassTest):
return paddle.tensor.math.add(x=data1, y=data2) return paddle.tensor.math.add(x=data1, y=data2)
def test_check_output(self): def test_check_output(self):
if os.path.exists(self.path + "_opt_cache"): opt_path = os.path.join(self.path, '_opt_cache')
shutil.rmtree(self.path + "_opt_cache") if os.path.exists(opt_path):
shutil.rmtree(opt_path)
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
use_gpu = True use_gpu = True
self.check_output_with_option(use_gpu) self.check_output_with_option(use_gpu)
......
...@@ -55,8 +55,9 @@ class TRTInstanceNormTest(InferencePassTest): ...@@ -55,8 +55,9 @@ class TRTInstanceNormTest(InferencePassTest):
self.fetch_list = [out] self.fetch_list = [out]
def check_output(self, remove_cache=False): def check_output(self, remove_cache=False):
if remove_cache and os.path.exists(self.path + "_opt_cache"): opt_path = os.path.join(self.path, '_opt_cache')
shutil.rmtree(self.path + "_opt_cache") if remove_cache and os.path.exists(opt_path):
shutil.rmtree(opt_path)
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
use_gpu = True use_gpu = True
atol = 1e-5 atol = 1e-5
......
...@@ -84,8 +84,9 @@ class TensorRTPool3dTest(InferencePassTest): ...@@ -84,8 +84,9 @@ class TensorRTPool3dTest(InferencePassTest):
self.fetch_list = [pool_out] self.fetch_list = [pool_out]
def check_output(self): def check_output(self):
if os.path.exists(self.path + "_opt_cache"): opt_path = os.path.join(self.path, '_opt_cache')
shutil.rmtree(self.path + "_opt_cache") if os.path.exists(opt_path):
shutil.rmtree(opt_path)
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
use_gpu = True use_gpu = True
if self.precision == AnalysisConfig.Precision.Float32: if self.precision == AnalysisConfig.Precision.Float32:
...@@ -200,8 +201,9 @@ class TensorRTAdaptiveAvgPool3DTest(InferencePassTest): ...@@ -200,8 +201,9 @@ class TensorRTAdaptiveAvgPool3DTest(InferencePassTest):
self.fetch_list = [pool_out] self.fetch_list = [pool_out]
def check_output(self): def check_output(self):
if os.path.exists(self.path + "_opt_cache"): opt_path = os.path.join(self.path, '_opt_cache')
shutil.rmtree(self.path + "_opt_cache") if os.path.exists(opt_path):
shutil.rmtree(opt_path)
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
use_gpu = True use_gpu = True
self.check_output_with_option(use_gpu) self.check_output_with_option(use_gpu)
...@@ -300,8 +302,9 @@ class TensorRTAdaptiveMaxPool3DTest(InferencePassTest): ...@@ -300,8 +302,9 @@ class TensorRTAdaptiveMaxPool3DTest(InferencePassTest):
self.fetch_list = [pool_out] self.fetch_list = [pool_out]
def check_output(self): def check_output(self):
if os.path.exists(self.path + "_opt_cache"): opt_path = os.path.join(self.path, '_opt_cache')
shutil.rmtree(self.path + "_opt_cache") if os.path.exists(opt_path):
shutil.rmtree(opt_path)
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
use_gpu = True use_gpu = True
self.check_output_with_option(use_gpu) self.check_output_with_option(use_gpu)
......
...@@ -86,8 +86,9 @@ class TensorRTPoolTest(InferencePassTest): ...@@ -86,8 +86,9 @@ class TensorRTPoolTest(InferencePassTest):
self.fetch_list = [out] self.fetch_list = [out]
def check_output(self): def check_output(self):
if os.path.exists(self.path + "_opt_cache"): opt_path = os.path.join(self.path, '_opt_cache')
shutil.rmtree(self.path + "_opt_cache") if os.path.exists(opt_path):
shutil.rmtree(opt_path)
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
use_gpu = True use_gpu = True
if self.precision == AnalysisConfig.Precision.Float32: if self.precision == AnalysisConfig.Precision.Float32:
......
...@@ -60,8 +60,9 @@ class SkipLayernormFusePassTest0(InferencePassTest): ...@@ -60,8 +60,9 @@ class SkipLayernormFusePassTest0(InferencePassTest):
return paddle.add(data1, data2) return paddle.add(data1, data2)
def test_check_output(self): def test_check_output(self):
if os.path.exists(self.path + "_opt_cache"): opt_path = os.path.join(self.path, '_opt_cache')
shutil.rmtree(self.path + "_opt_cache") if os.path.exists(opt_path):
shutil.rmtree(opt_path)
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
use_gpu = True use_gpu = True
self.check_output_with_option(use_gpu, atol=0.01, rtol=0.00001) self.check_output_with_option(use_gpu, atol=0.01, rtol=0.00001)
...@@ -107,8 +108,9 @@ class SkipLayernormFusePassTest1(InferencePassTest): ...@@ -107,8 +108,9 @@ class SkipLayernormFusePassTest1(InferencePassTest):
return paddle.add(data1, data2) return paddle.add(data1, data2)
def test_check_output(self): def test_check_output(self):
if os.path.exists(self.path + "_opt_cache"): opt_path = os.path.join(self.path, '_opt_cache')
shutil.rmtree(self.path + "_opt_cache") if os.path.exists(opt_path):
shutil.rmtree(opt_path)
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
use_gpu = True use_gpu = True
self.check_output_with_option(use_gpu, atol=0.01, rtol=0.00001) self.check_output_with_option(use_gpu, atol=0.01, rtol=0.00001)
...@@ -154,8 +156,9 @@ class SkipLayernormFusePassTest2(InferencePassTest): ...@@ -154,8 +156,9 @@ class SkipLayernormFusePassTest2(InferencePassTest):
return paddle.add(data1, data2) return paddle.add(data1, data2)
def test_check_output(self): def test_check_output(self):
if os.path.exists(self.path + "_opt_cache"): opt_path = os.path.join(self.path, '_opt_cache')
shutil.rmtree(self.path + "_opt_cache") if os.path.exists(opt_path):
shutil.rmtree(opt_path)
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
use_gpu = True use_gpu = True
self.check_output_with_option(use_gpu, atol=0.1, rtol=0.00001) self.check_output_with_option(use_gpu, atol=0.1, rtol=0.00001)
...@@ -201,8 +204,9 @@ class SkipLayernormFusePassTest3(InferencePassTest): ...@@ -201,8 +204,9 @@ class SkipLayernormFusePassTest3(InferencePassTest):
return paddle.add(data1, data2) return paddle.add(data1, data2)
def test_check_output(self): def test_check_output(self):
if os.path.exists(self.path + "_opt_cache"): opt_path = os.path.join(self.path, '_opt_cache')
shutil.rmtree(self.path + "_opt_cache") if os.path.exists(opt_path):
shutil.rmtree(opt_path)
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
use_gpu = True use_gpu = True
self.check_output_with_option(use_gpu, atol=0.1, rtol=0.00001) self.check_output_with_option(use_gpu, atol=0.1, rtol=0.00001)
......
...@@ -128,8 +128,9 @@ class TensorRTSubgraphPassSplitSerializeTest(InferencePassTest): ...@@ -128,8 +128,9 @@ class TensorRTSubgraphPassSplitSerializeTest(InferencePassTest):
def test_check_output(self): def test_check_output(self):
if paddle.is_compiled_with_cuda(): if paddle.is_compiled_with_cuda():
use_gpu = True use_gpu = True
if os.path.exists(self.path + "_opt_cache"): opt_path = os.path.join(self.path, '_opt_cache')
shutil.rmtree(self.path + "_opt_cache") if os.path.exists(opt_path):
shutil.rmtree(opt_path)
self.check_output_with_option(use_gpu) self.check_output_with_option(use_gpu)
self.assertTrue( self.assertTrue(
PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')
...@@ -164,8 +165,9 @@ class TensorRTSubgraphPassDynamicSplitFp16SerializeTest(InferencePassTest): ...@@ -164,8 +165,9 @@ class TensorRTSubgraphPassDynamicSplitFp16SerializeTest(InferencePassTest):
def test_check_output(self): def test_check_output(self):
if paddle.is_compiled_with_cuda(): if paddle.is_compiled_with_cuda():
use_gpu = True use_gpu = True
if os.path.exists(self.path + "_opt_cache"): opt_path = os.path.join(self.path, '_opt_cache')
shutil.rmtree(self.path + "_opt_cache") if os.path.exists(opt_path):
shutil.rmtree(opt_path)
self.check_output_with_option(use_gpu, 1e-3) self.check_output_with_option(use_gpu, 1e-3)
self.assertTrue( self.assertTrue(
PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')
...@@ -313,8 +315,9 @@ class TensorRTSubgraphPassLayerNormDynamicTest(InferencePassTest): ...@@ -313,8 +315,9 @@ class TensorRTSubgraphPassLayerNormDynamicTest(InferencePassTest):
self.serialize = True self.serialize = True
def test_check_output(self): def test_check_output(self):
if os.path.exists(self.path + "_opt_cache"): opt_path = os.path.join(self.path, '_opt_cache')
shutil.rmtree(self.path + "_opt_cache") if os.path.exists(opt_path):
shutil.rmtree(opt_path)
if paddle.is_compiled_with_cuda(): if paddle.is_compiled_with_cuda():
use_gpu = True use_gpu = True
self.check_output_with_option(use_gpu) self.check_output_with_option(use_gpu)
...@@ -332,8 +335,9 @@ class TensorRTSubgraphPassLayerNormDynamicFP16Test( ...@@ -332,8 +335,9 @@ class TensorRTSubgraphPassLayerNormDynamicFP16Test(
self.serialize = True self.serialize = True
def test_check_output(self): def test_check_output(self):
if os.path.exists(self.path + "_opt_cache"): opt_path = os.path.join(self.path, '_opt_cache')
shutil.rmtree(self.path + "_opt_cache") if os.path.exists(opt_path):
shutil.rmtree(opt_path)
if paddle.is_compiled_with_cuda(): if paddle.is_compiled_with_cuda():
use_gpu = True use_gpu = True
self.check_output_with_option(use_gpu, atol=0.01, rtol=0.01) self.check_output_with_option(use_gpu, atol=0.01, rtol=0.01)
...@@ -406,8 +410,9 @@ class TensorRTSubgraphPassElementwiseSerializeTest( ...@@ -406,8 +410,9 @@ class TensorRTSubgraphPassElementwiseSerializeTest(
) )
def test_check_output(self): def test_check_output(self):
if os.path.exists(self.path + "_opt_cache"): opt_path = os.path.join(self.path, '_opt_cache')
shutil.rmtree(self.path + "_opt_cache") if os.path.exists(opt_path):
shutil.rmtree(opt_path)
super().test_check_output() super().test_check_output()
...@@ -444,8 +449,9 @@ class TensorRTSubgraphPassElementwiseBroadcastDynamicTest(InferencePassTest): ...@@ -444,8 +449,9 @@ class TensorRTSubgraphPassElementwiseBroadcastDynamicTest(InferencePassTest):
return paddle.add(x=data1, y=data2) return paddle.add(x=data1, y=data2)
def test_check_output(self): def test_check_output(self):
if os.path.exists(self.path + "_opt_cache"): opt_path = os.path.join(self.path, '_opt_cache')
shutil.rmtree(self.path + "_opt_cache") if os.path.exists(opt_path):
shutil.rmtree(opt_path)
if paddle.is_compiled_with_cuda(): if paddle.is_compiled_with_cuda():
use_gpu = True use_gpu = True
self.check_output_with_option(use_gpu) self.check_output_with_option(use_gpu)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册