未验证 提交 0a17e98e 编写于 作者: L Leo Chen 提交者: GitHub

Use tempfile to save model in InferencePassTest (#54038)

上级 6133ca4e
......@@ -12,7 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
import tempfile
import unittest
import numpy as np
......@@ -41,7 +43,10 @@ class InferencePassTest(unittest.TestCase):
self.dynamic_shape_params = None
self.enable_lite = False
self.lite_parameters = None
self.path = "./inference_pass/" + self.__class__.__name__ + "/"
self.temp_dir = tempfile.TemporaryDirectory()
self.path = os.path.join(
self.temp_dir.name, 'inference_pass', self.__class__.__name__
)
np.random.seed(1)
random.seed(1)
......
......@@ -53,8 +53,9 @@ class TensorRTSubgraphPassActivationTest(InferencePassTest):
def test_check_output(self):
if core.is_compiled_with_cuda():
use_gpu = True
if os.path.exists(self.path + "_opt_cache"):
shutil.rmtree(self.path + "_opt_cache")
opt_path = os.path.join(self.path, '_opt_cache')
if os.path.exists(opt_path):
shutil.rmtree(opt_path)
if (
self.trt_parameters.precision
== AnalysisConfig.Precision.Float32
......
......@@ -53,8 +53,9 @@ class TensorRTSubgraphPassElementwiseBroadcastTest(InferencePassTest):
return paddle.tensor.math.add(x=data1, y=data2)
def test_check_output(self):
if os.path.exists(self.path + "_opt_cache"):
shutil.rmtree(self.path + "_opt_cache")
opt_path = os.path.join(self.path, '_opt_cache')
if os.path.exists(opt_path):
shutil.rmtree(opt_path)
if core.is_compiled_with_cuda():
use_gpu = True
self.check_output_with_option(use_gpu)
......
......@@ -55,8 +55,9 @@ class TRTInstanceNormTest(InferencePassTest):
self.fetch_list = [out]
def check_output(self, remove_cache=False):
if remove_cache and os.path.exists(self.path + "_opt_cache"):
shutil.rmtree(self.path + "_opt_cache")
opt_path = os.path.join(self.path, '_opt_cache')
if remove_cache and os.path.exists(opt_path):
shutil.rmtree(opt_path)
if core.is_compiled_with_cuda():
use_gpu = True
atol = 1e-5
......
......@@ -84,8 +84,9 @@ class TensorRTPool3dTest(InferencePassTest):
self.fetch_list = [pool_out]
def check_output(self):
if os.path.exists(self.path + "_opt_cache"):
shutil.rmtree(self.path + "_opt_cache")
opt_path = os.path.join(self.path, '_opt_cache')
if os.path.exists(opt_path):
shutil.rmtree(opt_path)
if core.is_compiled_with_cuda():
use_gpu = True
if self.precision == AnalysisConfig.Precision.Float32:
......@@ -200,8 +201,9 @@ class TensorRTAdaptiveAvgPool3DTest(InferencePassTest):
self.fetch_list = [pool_out]
def check_output(self):
if os.path.exists(self.path + "_opt_cache"):
shutil.rmtree(self.path + "_opt_cache")
opt_path = os.path.join(self.path, '_opt_cache')
if os.path.exists(opt_path):
shutil.rmtree(opt_path)
if core.is_compiled_with_cuda():
use_gpu = True
self.check_output_with_option(use_gpu)
......@@ -300,8 +302,9 @@ class TensorRTAdaptiveMaxPool3DTest(InferencePassTest):
self.fetch_list = [pool_out]
def check_output(self):
if os.path.exists(self.path + "_opt_cache"):
shutil.rmtree(self.path + "_opt_cache")
opt_path = os.path.join(self.path, '_opt_cache')
if os.path.exists(opt_path):
shutil.rmtree(opt_path)
if core.is_compiled_with_cuda():
use_gpu = True
self.check_output_with_option(use_gpu)
......
......@@ -86,8 +86,9 @@ class TensorRTPoolTest(InferencePassTest):
self.fetch_list = [out]
def check_output(self):
if os.path.exists(self.path + "_opt_cache"):
shutil.rmtree(self.path + "_opt_cache")
opt_path = os.path.join(self.path, '_opt_cache')
if os.path.exists(opt_path):
shutil.rmtree(opt_path)
if core.is_compiled_with_cuda():
use_gpu = True
if self.precision == AnalysisConfig.Precision.Float32:
......
......@@ -60,8 +60,9 @@ class SkipLayernormFusePassTest0(InferencePassTest):
return paddle.add(data1, data2)
def test_check_output(self):
if os.path.exists(self.path + "_opt_cache"):
shutil.rmtree(self.path + "_opt_cache")
opt_path = os.path.join(self.path, '_opt_cache')
if os.path.exists(opt_path):
shutil.rmtree(opt_path)
if core.is_compiled_with_cuda():
use_gpu = True
self.check_output_with_option(use_gpu, atol=0.01, rtol=0.00001)
......@@ -107,8 +108,9 @@ class SkipLayernormFusePassTest1(InferencePassTest):
return paddle.add(data1, data2)
def test_check_output(self):
if os.path.exists(self.path + "_opt_cache"):
shutil.rmtree(self.path + "_opt_cache")
opt_path = os.path.join(self.path, '_opt_cache')
if os.path.exists(opt_path):
shutil.rmtree(opt_path)
if core.is_compiled_with_cuda():
use_gpu = True
self.check_output_with_option(use_gpu, atol=0.01, rtol=0.00001)
......@@ -154,8 +156,9 @@ class SkipLayernormFusePassTest2(InferencePassTest):
return paddle.add(data1, data2)
def test_check_output(self):
if os.path.exists(self.path + "_opt_cache"):
shutil.rmtree(self.path + "_opt_cache")
opt_path = os.path.join(self.path, '_opt_cache')
if os.path.exists(opt_path):
shutil.rmtree(opt_path)
if core.is_compiled_with_cuda():
use_gpu = True
self.check_output_with_option(use_gpu, atol=0.1, rtol=0.00001)
......@@ -201,8 +204,9 @@ class SkipLayernormFusePassTest3(InferencePassTest):
return paddle.add(data1, data2)
def test_check_output(self):
if os.path.exists(self.path + "_opt_cache"):
shutil.rmtree(self.path + "_opt_cache")
opt_path = os.path.join(self.path, '_opt_cache')
if os.path.exists(opt_path):
shutil.rmtree(opt_path)
if core.is_compiled_with_cuda():
use_gpu = True
self.check_output_with_option(use_gpu, atol=0.1, rtol=0.00001)
......
......@@ -128,8 +128,9 @@ class TensorRTSubgraphPassSplitSerializeTest(InferencePassTest):
def test_check_output(self):
if paddle.is_compiled_with_cuda():
use_gpu = True
if os.path.exists(self.path + "_opt_cache"):
shutil.rmtree(self.path + "_opt_cache")
opt_path = os.path.join(self.path, '_opt_cache')
if os.path.exists(opt_path):
shutil.rmtree(opt_path)
self.check_output_with_option(use_gpu)
self.assertTrue(
PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')
......@@ -164,8 +165,9 @@ class TensorRTSubgraphPassDynamicSplitFp16SerializeTest(InferencePassTest):
def test_check_output(self):
if paddle.is_compiled_with_cuda():
use_gpu = True
if os.path.exists(self.path + "_opt_cache"):
shutil.rmtree(self.path + "_opt_cache")
opt_path = os.path.join(self.path, '_opt_cache')
if os.path.exists(opt_path):
shutil.rmtree(opt_path)
self.check_output_with_option(use_gpu, 1e-3)
self.assertTrue(
PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')
......@@ -313,8 +315,9 @@ class TensorRTSubgraphPassLayerNormDynamicTest(InferencePassTest):
self.serialize = True
def test_check_output(self):
if os.path.exists(self.path + "_opt_cache"):
shutil.rmtree(self.path + "_opt_cache")
opt_path = os.path.join(self.path, '_opt_cache')
if os.path.exists(opt_path):
shutil.rmtree(opt_path)
if paddle.is_compiled_with_cuda():
use_gpu = True
self.check_output_with_option(use_gpu)
......@@ -332,8 +335,9 @@ class TensorRTSubgraphPassLayerNormDynamicFP16Test(
self.serialize = True
def test_check_output(self):
if os.path.exists(self.path + "_opt_cache"):
shutil.rmtree(self.path + "_opt_cache")
opt_path = os.path.join(self.path, '_opt_cache')
if os.path.exists(opt_path):
shutil.rmtree(opt_path)
if paddle.is_compiled_with_cuda():
use_gpu = True
self.check_output_with_option(use_gpu, atol=0.01, rtol=0.01)
......@@ -406,8 +410,9 @@ class TensorRTSubgraphPassElementwiseSerializeTest(
)
def test_check_output(self):
if os.path.exists(self.path + "_opt_cache"):
shutil.rmtree(self.path + "_opt_cache")
opt_path = os.path.join(self.path, '_opt_cache')
if os.path.exists(opt_path):
shutil.rmtree(opt_path)
super().test_check_output()
......@@ -444,8 +449,9 @@ class TensorRTSubgraphPassElementwiseBroadcastDynamicTest(InferencePassTest):
return paddle.add(x=data1, y=data2)
def test_check_output(self):
if os.path.exists(self.path + "_opt_cache"):
shutil.rmtree(self.path + "_opt_cache")
opt_path = os.path.join(self.path, '_opt_cache')
if os.path.exists(opt_path):
shutil.rmtree(opt_path)
if paddle.is_compiled_with_cuda():
use_gpu = True
self.check_output_with_option(use_gpu)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册