未验证 提交 e90dfaf7 编写于 作者: S shiyutang 提交者: GitHub

Cherry-pick PR#43237 from deveop (#43685)

* merge_release_and_dev

* merge_release_dev

* update

* Use tempfile to place the temporary files (#43237)

* tempfile_fix

* update

* fix_CI

* update_word2vec.inference.model

* remove_change_in_word2vec_book

* fix_word2vec_book

* rm_affine

* update
上级 f4c42389
......@@ -24,6 +24,7 @@ import unittest
import os
import copy
import numpy as np
import tempfile
from paddle.static.amp import decorate
paddle.enable_static()
......@@ -268,18 +269,26 @@ def infer(use_cuda, save_dirname=None):
clip_extra=True)
def main(net_type, use_cuda, is_local=True):
if use_cuda and not fluid.core.is_compiled_with_cuda():
return
class TestImageClassification(unittest.TestCase):
# Directory for saving the trained model
save_dirname = "image_classification_" + net_type + ".inference.model"
def setUp(self):
self.temp_dir = tempfile.TemporaryDirectory()
train(net_type, use_cuda, save_dirname, is_local)
#infer(use_cuda, save_dirname)
def tearDown(self):
self.temp_dir.cleanup()
def main(self, net_type, use_cuda, is_local=True):
if use_cuda and not fluid.core.is_compiled_with_cuda():
return
# Directory for saving the trained model
save_dirname = os.path.join(
self.temp_dir.name,
"image_classification_" + net_type + ".inference.model")
train(net_type, use_cuda, save_dirname, is_local)
#infer(use_cuda, save_dirname)
class TestImageClassification(unittest.TestCase):
def test_amp_lists(self):
white_list = copy.copy(
fluid.contrib.mixed_precision.fp16_lists.white_list)
......@@ -408,11 +417,11 @@ class TestImageClassification(unittest.TestCase):
def test_vgg_cuda(self):
with self.scope_prog_guard():
main('vgg', use_cuda=True)
self.main('vgg', use_cuda=True)
def test_resnet_cuda(self):
with self.scope_prog_guard():
main('resnet', use_cuda=True)
self.main('resnet', use_cuda=True)
@contextlib.contextmanager
def scope_prog_guard(self):
......
......@@ -25,6 +25,7 @@ import math
import sys
import os
import struct
import tempfile
paddle.enable_static()
......@@ -192,11 +193,13 @@ def main(use_cuda, is_local=True, use_bf16=False, pure_bf16=False):
if use_bf16 and not fluid.core.is_compiled_with_mkldnn():
return
temp_dir = tempfile.TemporaryDirectory()
# Directory for saving the trained model
save_dirname = "fit_a_line.inference.model"
save_dirname = os.path.join(temp_dir.name, "fit_a_line.inference.model")
train(use_cuda, save_dirname, is_local, use_bf16, pure_bf16)
infer(use_cuda, save_dirname, use_bf16)
temp_dir.cleanup()
class TestFitALineBase(unittest.TestCase):
......
......@@ -22,6 +22,7 @@ import sys
import numpy
import unittest
import os
import tempfile
import numpy as np
paddle.enable_static()
......@@ -240,10 +241,13 @@ def main(net_type, use_cuda, is_local=True):
return
# Directory for saving the trained model
save_dirname = "image_classification_" + net_type + ".inference.model"
temp_dir = tempfile.TemporaryDirectory()
save_dirname = os.path.join(
temp_dir.name, "image_classification_" + net_type + ".inference.model")
train(net_type, use_cuda, save_dirname, is_local)
infer(use_cuda, save_dirname)
temp_dir.cleanup()
class TestImageClassification(unittest.TestCase):
......
......@@ -20,6 +20,7 @@ import numpy as np
import os
import time
import unittest
import tempfile
import paddle
import paddle.dataset.conll05 as conll05
......@@ -350,12 +351,16 @@ def main(use_cuda, is_local=True):
if use_cuda and not fluid.core.is_compiled_with_cuda():
return
temp_dir = tempfile.TemporaryDirectory()
# Directory for saving the trained model
save_dirname = "label_semantic_roles.inference.model"
save_dirname = os.path.join(temp_dir.name,
"label_semantic_roles.inference.model")
train(use_cuda, save_dirname, is_local)
infer(use_cuda, save_dirname)
temp_dir.cleanup()
class TestLabelSemanticRoles(unittest.TestCase):
def test_cuda(self):
......
......@@ -23,6 +23,7 @@ import paddle.fluid as fluid
import paddle.fluid.framework as framework
import paddle.fluid.layers as layers
import paddle.fluid.nets as nets
import tempfile
from paddle.fluid.executor import Executor
from paddle.fluid.optimizer import SGDOptimizer
......@@ -320,10 +321,13 @@ def main(use_cuda):
return
# Directory for saving the inference model
save_dirname = "recommender_system.inference.model"
temp_dir = tempfile.TemporaryDirectory()
save_dirname = os.path.join(temp_dir.name,
"recommender_system.inference.model")
train(use_cuda, save_dirname)
infer(use_cuda, save_dirname)
temp_dir.cleanup()
if __name__ == '__main__':
......
......@@ -23,7 +23,9 @@ import paddle.fluid.layers as layers
import contextlib
import math
import sys
import os
import unittest
import tempfile
from paddle.fluid.executor import Executor
import paddle
......@@ -257,10 +259,13 @@ def main(use_cuda):
return
# Directory for saving the trained model
save_dirname = "rnn_encoder_decoder.inference.model"
temp_dir = tempfile.TemporaryDirectory()
save_dirname = os.path.join(temp_dir.name,
"rnn_encoder_decoder.inference.model")
train(use_cuda, save_dirname)
infer(use_cuda, save_dirname)
temp_dir.cleanup()
class TestRnnEncoderDecoder(unittest.TestCase):
......
......@@ -22,6 +22,7 @@ import os
import numpy as np
import math
import sys
import tempfile
paddle.enable_static()
......@@ -238,7 +239,7 @@ def infer(target, save_dirname=None):
infer_inputs = [to_infer_tensor(t) for t in infer_inputs]
infer_config = fluid.core.NativeConfig()
infer_config.model_dir = 'word2vec.inference.model'
infer_config.model_dir = save_dirname
if target == "cuda":
infer_config.use_gpu = True
infer_config.device = 0
......@@ -264,8 +265,9 @@ def main(target, is_sparse, is_parallel, use_bf16, pure_bf16):
if use_bf16 and not fluid.core.is_compiled_with_mkldnn():
return
temp_dir = tempfile.TemporaryDirectory()
if not is_parallel:
save_dirname = "word2vec.inference.model"
save_dirname = os.path.join(temp_dir.name, "word2vec.inference.model")
else:
save_dirname = None
......@@ -282,6 +284,7 @@ def main(target, is_sparse, is_parallel, use_bf16, pure_bf16):
use_bf16=use_bf16,
pure_bf16=pure_bf16)
infer(target, save_dirname)
temp_dir.cleanup()
FULL_TEST = os.getenv('FULL_TEST',
......
......@@ -16,8 +16,10 @@ import paddle
paddle.set_default_dtype("float64")
from paddle.fluid.layers import sequence_mask
import os
import numpy as np
import unittest
import tempfile
from convert import convert_params_for_net
from rnn_numpy import SimpleRNN, LSTM, GRU
......@@ -318,9 +320,11 @@ def predict_test_util(place, mode, stop_gradient=True):
rnn.train()
rnn = paddle.jit.to_static(
rnn, [paddle.static.InputSpec(
shape=[None, None, 16], dtype=x.dtype)])
paddle.jit.save(rnn, "./inference/%s_infer" % mode)
rnn, [paddle.static.InputSpec(shape=[None, None, 16], dtype=x.dtype)])
temp_dir = tempfile.TemporaryDirectory()
save_dirname = os.path.join(temp_dir.name, "./inference/%s_infer" % mode)
paddle.jit.save(rnn, save_dirname)
paddle.enable_static()
......@@ -328,8 +332,7 @@ def predict_test_util(place, mode, stop_gradient=True):
with paddle.static.scope_guard(new_scope):
exe = paddle.static.Executor(place)
[inference_program, feed_target_names,
fetch_targets] = paddle.static.load_inference_model(
"./inference/%s_infer" % mode, exe)
fetch_targets] = paddle.static.load_inference_model(save_dirname, exe)
results = exe.run(inference_program,
feed={feed_target_names[0]: x.numpy()},
fetch_list=fetch_targets)
......@@ -337,6 +340,8 @@ def predict_test_util(place, mode, stop_gradient=True):
y.numpy(), results[0]) # eval results equal predict results
paddle.disable_static()
temp_dir.cleanup()
def load_tests(loader, tests, pattern):
suite = unittest.TestSuite()
......
......@@ -25,6 +25,7 @@ import numpy as np
import os
import shutil
import unittest
import tempfile
class TestDataset(unittest.TestCase):
......@@ -165,8 +166,12 @@ class TestDataset(unittest.TestCase):
"""
Testcase for InMemoryDataset from create to run.
"""
filename1 = "afs:test_in_memory_dataset_run_a.txt"
filename2 = "afs:test_in_memory_dataset_run_b.txt"
temp_dir = tempfile.TemporaryDirectory()
filename1 = os.path.join(temp_dir.name,
"afs:test_in_memory_dataset_run_a.txt")
filename2 = os.path.join(temp_dir.name,
"afs:test_in_memory_dataset_run_b.txt")
with open(filename1, "w") as f:
data = "1 1 2 3 3 4 5 5 5 5 1 1\n"
data += "1 2 2 3 4 4 6 6 6 6 1 2\n"
......@@ -216,19 +221,24 @@ class TestDataset(unittest.TestCase):
except Exception as e:
self.assertTrue(False)
os.remove(filename1)
os.remove(filename2)
temp_dir.cleanup()
def test_in_memory_dataset_run(self):
"""
Testcase for InMemoryDataset from create to run.
"""
with open("test_in_memory_dataset_run_a.txt", "w") as f:
temp_dir = tempfile.TemporaryDirectory()
filename1 = os.path.join(temp_dir.name,
"test_in_memory_dataset_run_a.txt")
filename2 = os.path.join(temp_dir.name,
"test_in_memory_dataset_run_b.txt")
with open(filename1, "w") as f:
data = "1 1 2 3 3 4 5 5 5 5 1 1\n"
data += "1 2 2 3 4 4 6 6 6 6 1 2\n"
data += "1 3 2 3 5 4 7 7 7 7 1 3\n"
f.write(data)
with open("test_in_memory_dataset_run_b.txt", "w") as f:
with open(filename2, "w") as f:
data = "1 4 2 3 3 4 5 5 5 5 1 4\n"
data += "1 5 2 3 4 4 6 6 6 6 1 5\n"
data += "1 6 2 3 5 4 7 7 7 7 1 6\n"
......@@ -246,10 +256,7 @@ class TestDataset(unittest.TestCase):
dataset.init(
batch_size=32, thread_num=3, pipe_command="cat", use_var=slots_vars)
dataset._init_distributed_settings(fea_eval=True, candidate_size=1)
dataset.set_filelist([
"test_in_memory_dataset_run_a.txt",
"test_in_memory_dataset_run_b.txt"
])
dataset.set_filelist([filename1, filename2])
dataset.load_into_memory()
dataset.slots_shuffle(["slot1"])
dataset.local_shuffle()
......@@ -272,14 +279,19 @@ class TestDataset(unittest.TestCase):
except Exception as e:
self.assertTrue(False)
os.remove("./test_in_memory_dataset_run_a.txt")
os.remove("./test_in_memory_dataset_run_b.txt")
temp_dir.cleanup()
def test_in_memory_dataset_masterpatch(self):
"""
Testcase for InMemoryDataset from create to run.
"""
with open("test_in_memory_dataset_masterpatch_a.txt", "w") as f:
temp_dir = tempfile.TemporaryDirectory()
filename1 = os.path.join(temp_dir.name,
"test_in_memory_dataset_masterpatch_a.txt")
filename2 = os.path.join(temp_dir.name,
"test_in_memory_dataset_masterpatch_b.txt")
with open(filename1, "w") as f:
data = "1 id1 1 1 2 3 3 4 5 5 5 5 1 1\n"
data += "1 id1 1 2 2 3 4 4 6 6 6 6 1 2\n"
data += "1 id2 1 1 1 1 1 0 1 0\n"
......@@ -290,7 +302,7 @@ class TestDataset(unittest.TestCase):
data += "1 id5 1 1 1 1 1 0 1 0\n"
data += "1 id5 1 1 1 1 1 0 1 0\n"
f.write(data)
with open("test_in_memory_dataset_masterpatch_b.txt", "w") as f:
with open(filename2, "w") as f:
data = "1 id6 1 4 2 3 3 4 5 5 5 5 1 4\n"
data += "1 id6 1 1 2 3 4 4 6 6 6 6 1 5\n"
data += "1 id6 1 6 2 3 5 4 7 7 7 7 1 6\n"
......@@ -337,14 +349,19 @@ class TestDataset(unittest.TestCase):
dataset.update_settings(merge_size=2)
dataset.dataset.merge_by_lineid()
os.remove("./test_in_memory_dataset_masterpatch_a.txt")
os.remove("./test_in_memory_dataset_masterpatch_b.txt")
temp_dir.cleanup()
def test_in_memory_dataset_masterpatch1(self):
"""
Testcase for InMemoryDataset from create to run.
"""
with open("test_in_memory_dataset_masterpatch1_a.txt", "w") as f:
temp_dir = tempfile.TemporaryDirectory()
filename1 = os.path.join(temp_dir.name,
"test_in_memory_dataset_masterpatch1_a.txt")
filename2 = os.path.join(temp_dir.name,
"test_in_memory_dataset_masterpatch1_b.txt")
with open(filename1, "w") as f:
data = "1 id1 1 1 2 3 3 4 5 5 5 5 1 1\n"
data += "1 id1 1 2 2 3 4 4 6 6 6 6 1 2\n"
data += "1 id2 1 1 1 1 1 0 1 0\n"
......@@ -355,7 +372,7 @@ class TestDataset(unittest.TestCase):
data += "1 id5 1 1 1 1 1 0 1 0\n"
data += "1 id5 1 1 1 1 1 0 1 0\n"
f.write(data)
with open("test_in_memory_dataset_masterpatch1_b.txt", "w") as f:
with open(filename2, "w") as f:
data = "1 id6 1 4 2 3 3 4 5 5 5 5 1 4\n"
data += "1 id6 1 1 2 3 4 4 6 6 6 6 1 5\n"
data += "1 id6 1 6 2 3 5 4 7 7 7 7 1 6\n"
......@@ -401,8 +418,7 @@ class TestDataset(unittest.TestCase):
dataset._set_merge_by_lineid(2)
dataset.dataset.merge_by_lineid()
os.remove("./test_in_memory_dataset_masterpatch1_a.txt")
os.remove("./test_in_memory_dataset_masterpatch1_b.txt")
temp_dir.cleanup()
def test_in_memory_dataset_run_2(self):
"""
......@@ -410,12 +426,18 @@ class TestDataset(unittest.TestCase):
Use CUDAPlace
Use float type id
"""
with open("test_in_memory_dataset_run_a.txt", "w") as f:
temp_dir = tempfile.TemporaryDirectory()
filename1 = os.path.join(temp_dir.name,
"test_in_memory_dataset_run_a.txt")
filename2 = os.path.join(temp_dir.name,
"test_in_memory_dataset_run_b.txt")
with open(filename1, "w") as f:
data = "1 1 2 3 3 4 5 5 5 5 1 1\n"
data += "1 2 2 3 4 4 6 6 6 6 1 2\n"
data += "1 3 2 3 5 4 7 7 7 7 1 3\n"
f.write(data)
with open("test_in_memory_dataset_run_b.txt", "w") as f:
with open(filename2, "w") as f:
data = "1 4 2 3 3 4 5 5 5 5 1 4\n"
data += "1 5 2 3 4 4 6 6 6 6 1 5\n"
data += "1 6 2 3 5 4 7 7 7 7 1 6\n"
......@@ -430,12 +452,11 @@ class TestDataset(unittest.TestCase):
slots_vars.append(var)
dataset = paddle.distributed.InMemoryDataset()
dataset.init(
batch_size=32, thread_num=3, pipe_command="cat", use_var=slots_vars)
dataset.set_filelist([
"test_in_memory_dataset_run_a.txt",
"test_in_memory_dataset_run_b.txt"
])
dataset.init(batch_size=32,
thread_num=3,
pipe_command="cat",
use_var=slots_vars)
dataset.set_filelist([filename1, filename2])
dataset.load_into_memory()
dataset.local_shuffle()
......@@ -509,19 +530,22 @@ class TestDataset(unittest.TestCase):
fleet_ptr.set_client2client_config(1, 1, 1)
fleet_ptr.get_cache_threshold(0)
os.remove("./test_in_memory_dataset_run_a.txt")
os.remove("./test_in_memory_dataset_run_b.txt")
temp_dir.cleanup()
def test_queue_dataset_run(self):
"""
Testcase for QueueDataset from create to run.
"""
with open("test_queue_dataset_run_a.txt", "w") as f:
temp_dir = tempfile.TemporaryDirectory()
filename1 = os.path.join(temp_dir.name, "test_queue_dataset_run_a.txt")
filename2 = os.path.join(temp_dir.name, "test_queue_dataset_run_b.txt")
with open(filename1, "w") as f:
data = "1 1 2 3 3 4 5 5 5 5 1 1\n"
data += "1 2 2 3 4 4 6 6 6 6 1 2\n"
data += "1 3 2 3 5 4 7 7 7 7 1 3\n"
f.write(data)
with open("test_queue_dataset_run_b.txt", "w") as f:
with open(filename2, "w") as f:
data = "1 4 2 3 3 4 5 5 5 5 1 4\n"
data += "1 5 2 3 4 4 6 6 6 6 1 5\n"
data += "1 6 2 3 5 4 7 7 7 7 1 6\n"
......@@ -536,10 +560,11 @@ class TestDataset(unittest.TestCase):
slots_vars.append(var)
dataset = paddle.distributed.QueueDataset()
dataset.init(
batch_size=32, thread_num=3, pipe_command="cat", use_var=slots_vars)
dataset.set_filelist(
["test_queue_dataset_run_a.txt", "test_queue_dataset_run_b.txt"])
dataset.init(batch_size=32,
thread_num=3,
pipe_command="cat",
use_var=slots_vars)
dataset.set_filelist([filename1, filename2])
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
......@@ -569,10 +594,7 @@ class TestDataset(unittest.TestCase):
except Exception as e:
self.assertTrue(False)
if os.path.exists("./test_queue_dataset_run_a.txt"):
os.remove("./test_queue_dataset_run_a.txt")
if os.path.exists("./test_queue_dataset_run_b.txt"):
os.remove("./test_queue_dataset_run_b.txt")
temp_dir.cleanup()
def test_queue_dataset_run_2(self):
"""
......@@ -580,12 +602,16 @@ class TestDataset(unittest.TestCase):
Use CUDAPlace
Use float type id
"""
with open("test_queue_dataset_run_a.txt", "w") as f:
temp_dir = tempfile.TemporaryDirectory()
filename1 = os.path.join(temp_dir.name, "test_queue_dataset_run_a.txt")
filename2 = os.path.join(temp_dir.name, "test_queue_dataset_run_b.txt")
with open(filename1, "w") as f:
data = "1 1 2 3 3 4 5 5 5 5 1 1\n"
data += "1 2 2 3 4 4 6 6 6 6 1 2\n"
data += "1 3 2 3 5 4 7 7 7 7 1 3\n"
f.write(data)
with open("test_queue_dataset_run_b.txt", "w") as f:
with open(filename2, "w") as f:
data = "1 4 2 3 3 4 5 5 5 5 1 4\n"
data += "1 5 2 3 4 4 6 6 6 6 1 5\n"
data += "1 6 2 3 5 4 7 7 7 7 1 6\n"
......@@ -600,10 +626,11 @@ class TestDataset(unittest.TestCase):
slots_vars.append(var)
dataset = paddle.distributed.QueueDataset()
dataset.init(
batch_size=32, thread_num=3, pipe_command="cat", use_var=slots_vars)
dataset.set_filelist(
["test_queue_dataset_run_a.txt", "test_queue_dataset_run_b.txt"])
dataset.init(batch_size=32,
thread_num=3,
pipe_command="cat",
use_var=slots_vars)
dataset.set_filelist([filename1, filename2])
exe = fluid.Executor(fluid.CPUPlace() if not core.is_compiled_with_cuda(
) else fluid.CUDAPlace(0))
......@@ -623,10 +650,7 @@ class TestDataset(unittest.TestCase):
except Exception as e:
self.assertTrue(False)
if os.path.exists("./test_queue_dataset_run_a.txt"):
os.remove("./test_queue_dataset_run_a.txt")
if os.path.exists("./test_queue_dataset_run_b.txt"):
os.remove("./test_queue_dataset_run_b.txt")
temp_dir.cleanup()
def test_queue_dataset_run_3(self):
"""
......@@ -634,13 +658,17 @@ class TestDataset(unittest.TestCase):
Use CUDAPlace
Use float type id
"""
with open("test_queue_dataset_run_a.txt", "w") as f:
temp_dir = tempfile.TemporaryDirectory()
filename1 = os.path.join(temp_dir.name, "test_queue_dataset_run_a.txt")
filename2 = os.path.join(temp_dir.name, "test_queue_dataset_run_b.txt")
with open(filename1, "w") as f:
data = "2 1 2 2 5 4 2 2 7 2 1 3\n"
data += "2 6 2 2 1 4 2 2 4 2 2 3\n"
data += "2 5 2 2 9 9 2 2 7 2 1 3\n"
data += "2 7 2 2 1 9 2 3 7 2 5 3\n"
f.write(data)
with open("test_queue_dataset_run_b.txt", "w") as f:
with open(filename2, "w") as f:
data = "2 1 2 2 5 4 2 2 7 2 1 3\n"
data += "2 6 2 2 1 4 2 2 4 2 2 3\n"
data += "2 5 2 2 9 9 2 2 7 2 1 3\n"
......@@ -655,14 +683,12 @@ class TestDataset(unittest.TestCase):
slots_vars.append(var)
dataset = paddle.distributed.InMemoryDataset()
dataset.init(
batch_size=1,
thread_num=2,
input_type=1,
pipe_command="cat",
use_var=slots_vars)
dataset.set_filelist(
["test_queue_dataset_run_a.txt", "test_queue_dataset_run_b.txt"])
dataset.init(batch_size=1,
thread_num=2,
input_type=1,
pipe_command="cat",
use_var=slots_vars)
dataset.set_filelist([filename1, filename2])
dataset.load_into_memory()
exe = fluid.Executor(fluid.CPUPlace() if not core.is_compiled_with_cuda(
......@@ -683,10 +709,7 @@ class TestDataset(unittest.TestCase):
except Exception as e:
self.assertTrue(False)
if os.path.exists("./test_queue_dataset_run_a.txt"):
os.remove("./test_queue_dataset_run_a.txt")
if os.path.exists("./test_queue_dataset_run_b.txt"):
os.remove("./test_queue_dataset_run_b.txt")
temp_dir.cleanup()
class TestDatasetWithDataLoader(TestDataset):
......@@ -746,12 +769,18 @@ class TestDatasetWithFetchHandler(unittest.TestCase):
"""
Test Dataset With Fetch Handler. TestCases.
"""
with open("test_queue_dataset_run_a.txt", "w") as f:
self.temp_dir = tempfile.TemporaryDirectory()
self.filename1 = os.path.join(self.temp_dir.name,
"test_queue_dataset_run_a.txt")
self.filename2 = os.path.join(self.temp_dir.name,
"test_queue_dataset_run_b.txt")
with open(self.filename1, "w") as f:
data = "1 1 2 3 3 4 5 5 5 5 1 1\n"
data += "1 2 2 3 4 4 6 6 6 6 1 2\n"
data += "1 3 2 3 5 4 7 7 7 7 1 3\n"
f.write(data)
with open("test_queue_dataset_run_b.txt", "w") as f:
with open(self.filename2, "w") as f:
data = "1 4 2 3 3 4 5 5 5 5 1 4\n"
data += "1 5 2 3 4 4 6 6 6 6 1 5\n"
data += "1 6 2 3 5 4 7 7 7 7 1 6\n"
......@@ -762,15 +791,14 @@ class TestDatasetWithFetchHandler(unittest.TestCase):
"""
Test Dataset With Fetch Handler. TestCases.
"""
os.remove("./test_queue_dataset_run_a.txt")
os.remove("./test_queue_dataset_run_b.txt")
self.temp_dir.cleanup()
def test_dataset_none(self):
"""
Test Dataset With Fetch Handler. TestCases.
"""
slots_vars, out = self.net()
files = ["test_queue_dataset_run_a.txt", "test_queue_dataset_run_b.txt"]
files = [self.filename1, self.filename2]
dataset = self.get_dataset(slots_vars, files)
exe = fluid.Executor(fluid.CPUPlace())
......@@ -792,7 +820,7 @@ class TestDatasetWithFetchHandler(unittest.TestCase):
Test Dataset With Fetch Handler. TestCases.
"""
slots_vars, out = self.net()
files = ["test_queue_dataset_run_a.txt", "test_queue_dataset_run_b.txt"]
files = [self.filename1, self.filename2]
dataset = self.get_dataset(slots_vars, files)
exe = fluid.Executor(fluid.CPUPlace())
......@@ -810,7 +838,7 @@ class TestDatasetWithFetchHandler(unittest.TestCase):
Test Dataset With Fetch Handler. TestCases.
"""
slots_vars, out = self.net()
files = ["test_queue_dataset_run_a.txt", "test_queue_dataset_run_b.txt"]
files = [self.filename1, self.filename2]
dataset = self.get_dataset(slots_vars, files)
exe = fluid.Executor(fluid.CPUPlace())
......@@ -846,15 +874,20 @@ class TestDataset2(unittest.TestCase):
"""
Testcase for InMemoryDataset from create to run.
"""
temp_dir = tempfile.TemporaryDirectory()
filename1 = os.path.join(temp_dir.name,
"test_in_memory_dataset2_run_a.txt")
filename2 = os.path.join(temp_dir.name,
"test_in_memory_dataset2_run_b.txt")
self.skipTest("parameter server will add pslib UT later")
with open("test_in_memory_dataset2_run_a.txt", "w") as f:
with open(filename1, "w") as f:
data = "1 1 2 3 3 4 5 5 5 5 1 1\n"
data += "1 2 2 3 4 4 6 6 6 6 1 2\n"
data += "1 3 2 3 5 4 7 7 7 7 1 3\n"
f.write(data)
with open("test_in_memory_dataset2_run_b.txt", "w") as f:
with open(filename2, "w") as f:
data = "1 4 2 3 3 4 5 5 5 5 1 4\n"
data += "1 5 2 3 4 4 6 6 6 6 1 5\n"
data += "1 6 2 3 5 4 7 7 7 7 1 6\n"
......@@ -893,32 +926,33 @@ class TestDataset2(unittest.TestCase):
exe.run(startup_program)
dataset = paddle.distributed.InMemoryDataset()
dataset.init(
batch_size=32,
thread_num=3,
pipe_command="cat",
use_var=slots_vars)
dataset.set_filelist([
"test_in_memory_dataset2_run_a.txt",
"test_in_memory_dataset2_run_b.txt"
])
dataset.init(batch_size=32,
thread_num=3,
pipe_command="cat",
use_var=slots_vars)
dataset.set_filelist([filename1, filename2])
dataset.load_into_memory()
fleet._opt_info = None
fleet._fleet_ptr = None
os.remove("./test_in_memory_dataset2_run_a.txt")
os.remove("./test_in_memory_dataset2_run_b.txt")
temp_dir.cleanup()
def test_dataset_fleet2(self):
"""
Testcase for InMemoryDataset from create to run.
"""
with open("test_in_memory_dataset2_run2_a.txt", "w") as f:
temp_dir = tempfile.TemporaryDirectory()
filename1 = os.path.join(temp_dir.name,
"test_in_memory_dataset2_run2_a.txt")
filename2 = os.path.join(temp_dir.name,
"test_in_memory_dataset2_run2_b.txt")
with open(filename1, "w") as f:
data = "1 1 2 3 3 4 5 5 5 5 1 1\n"
data += "1 2 2 3 4 4 6 6 6 6 1 2\n"
data += "1 3 2 3 5 4 7 7 7 7 1 3\n"
f.write(data)
with open("test_in_memory_dataset2_run2_b.txt", "w") as f:
with open(filename2, "w") as f:
data = "1 4 2 3 3 4 5 5 5 5 1 4\n"
data += "1 5 2 3 4 4 6 6 6 6 1 5\n"
data += "1 6 2 3 5 4 7 7 7 7 1 6\n"
......@@ -963,15 +997,11 @@ class TestDataset2(unittest.TestCase):
print("warning: no mpi4py")
exe.run(startup_program)
dataset = paddle.distributed.InMemoryDataset()
dataset.init(
batch_size=32,
thread_num=3,
pipe_command="cat",
use_var=slots_vars)
dataset.set_filelist([
"test_in_memory_dataset2_run2_a.txt",
"test_in_memory_dataset2_run2_b.txt"
])
dataset.init(batch_size=32,
thread_num=3,
pipe_command="cat",
use_var=slots_vars)
dataset.set_filelist([filename1, filename2])
dataset.load_into_memory()
try:
dataset.global_shuffle(fleet)
......@@ -1030,19 +1060,24 @@ class TestDataset2(unittest.TestCase):
except:
print("warning: catch expected error")
os.remove("./test_in_memory_dataset2_run2_a.txt")
os.remove("./test_in_memory_dataset2_run2_b.txt")
temp_dir.cleanup()
def test_bosps_dataset_fleet2(self):
"""
Testcase for InMemoryDataset from create to run.
"""
with open("test_in_memory_dataset2_run2_a.txt", "w") as f:
temp_dir = tempfile.TemporaryDirectory()
filename1 = os.path.join(temp_dir.name,
"test_in_memory_dataset2_run2_a.txt")
filename2 = os.path.join(temp_dir.name,
"test_in_memory_dataset2_run2_b.txt")
with open(filename1, "w") as f:
data = "1 1 2 3 3 4 5 5 5 5 1 1\n"
data += "1 2 2 3 4 4 6 6 6 6 1 2\n"
data += "1 3 2 3 5 4 7 7 7 7 1 3\n"
f.write(data)
with open("test_in_memory_dataset2_run2_b.txt", "w") as f:
with open(filename2, "w") as f:
data = "1 4 2 3 3 4 5 5 5 5 1 4\n"
data += "1 5 2 3 4 4 6 6 6 6 1 5\n"
data += "1 6 2 3 5 4 7 7 7 7 1 6\n"
......@@ -1087,15 +1122,11 @@ class TestDataset2(unittest.TestCase):
print("warning: no mpi4py")
exe.run(startup_program)
dataset = paddle.distributed.fleet.BoxPSDataset()
dataset.init(
batch_size=32,
thread_num=3,
pipe_command="cat",
use_var=slots_vars)
dataset.set_filelist([
"test_in_memory_dataset2_run2_a.txt",
"test_in_memory_dataset2_run2_b.txt"
])
dataset.init(batch_size=32,
thread_num=3,
pipe_command="cat",
use_var=slots_vars)
dataset.set_filelist([filename1, filename2])
dataset.load_into_memory()
try:
dataset.global_shuffle(fleet)
......@@ -1146,6 +1177,7 @@ class TestDataset2(unittest.TestCase):
#dataset.get_pv_data_size()
dataset.get_memory_data_size()
dataset.get_shuffle_data_size()
temp_dir.cleanup()
if __name__ == '__main__':
......
......@@ -18,6 +18,7 @@ import numpy as np
import six
import os
import unittest
import tempfile
from simple_nets import simple_fc_net_with_inputs
BATCH_SIZE = 32
......@@ -27,8 +28,6 @@ EPOCH_NUM = 4
IMAGE_SHAPE = [2, 3]
LABEL_SHAPE = [1]
ALL_WRITTEN_FILES = set()
def get_place_string(p):
if isinstance(p, (fluid.CPUPlace or fluid.CUDAPlace)):
......@@ -42,13 +41,7 @@ def get_place_string(p):
return 'CUDAPlace()'
def remove_all_written_files():
for filename in ALL_WRITTEN_FILES:
os.remove(filename)
def write_reader_data_to_file(filename, reader):
ALL_WRITTEN_FILES.add(filename)
with open(filename, 'w') as fid:
for instance_list in reader():
for i, instance in enumerate(instance_list):
......@@ -77,10 +70,10 @@ class DatasetLoaderTestBase(unittest.TestCase):
def setUp(self):
self.dataset_name = "QueueDataset"
self.drop_last = False
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
return
remove_all_written_files()
self.temp_dir.cleanup()
def build_network(self):
main_prog = fluid.Program()
......@@ -123,7 +116,8 @@ class DatasetLoaderTestBase(unittest.TestCase):
random_delta_batch_size = np.zeros(shape=[file_num])
for i in six.moves.range(file_num):
filename = 'dataset_test_{}.txt'.format(i)
filename = os.path.join(self.temp_dir.name,
'dataset_test_{}.txt'.format(i))
filelist.append(filename)
write_reader_data_to_file(
filename,
......@@ -207,18 +201,21 @@ class QueueDatasetTestWithoutDropLast(DatasetLoaderTestBase):
def setUp(self):
self.dataset_name = "QueueDataset"
self.drop_last = True
self.temp_dir = tempfile.TemporaryDirectory()
class InMemoryDatasetTestWithoutDropLast(DatasetLoaderTestBase):
def setUp(self):
self.dataset_name = "InMemoryDataset"
self.drop_last = False
self.temp_dir = tempfile.TemporaryDirectory()
class InMemoryDatasetTestWithDropLast(DatasetLoaderTestBase):
def setUp(self):
self.dataset_name = "InMemoryDataset"
self.drop_last = True
self.temp_dir = tempfile.TemporaryDirectory()
if __name__ == '__main__':
......
......@@ -12,10 +12,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import numpy as np
import paddle
from test_nms_op import nms
import tempfile
def _find(condition):
......@@ -78,6 +80,11 @@ class TestOpsNMS(unittest.TestCase):
self.devices = ['cpu']
if paddle.is_compiled_with_cuda():
self.devices.append('gpu')
self.temp_dir = tempfile.TemporaryDirectory()
self.path = os.path.join(self.temp_dir.name, './net')
def tearDown(self):
self.temp_dir.cleanup()
def test_nms(self):
for device in self.devices:
......@@ -165,7 +172,6 @@ class TestOpsNMS(unittest.TestCase):
categories, 10)
return out
path = "./net"
boxes = np.random.rand(64, 4).astype('float32')
boxes[:, 2] = boxes[:, 0] + boxes[:, 2]
boxes[:, 3] = boxes[:, 1] + boxes[:, 3]
......@@ -173,12 +179,14 @@ class TestOpsNMS(unittest.TestCase):
origin = fun(paddle.to_tensor(boxes))
paddle.jit.save(
fun,
path,
self.path,
input_spec=[
paddle.static.InputSpec(
shape=[None, 4], dtype='float32', name='x')
], )
load_func = paddle.jit.load(path)
paddle.static.InputSpec(shape=[None, 4],
dtype='float32',
name='x')
],
)
load_func = paddle.jit.load(self.path)
res = load_func(paddle.to_tensor(boxes))
self.assertTrue(
np.array_equal(origin, res),
......
......@@ -19,7 +19,7 @@ import os
os.environ['FLAGS_cudnn_deterministic'] = '1'
import unittest
import tempfile
import numpy as np
import paddle
......@@ -98,7 +98,9 @@ class TestHapiWithAmp(unittest.TestCase):
batch_size=64,
num_iters=2,
log_freq=1)
model.save('./lenet_amp')
temp_dir = tempfile.TemporaryDirectory()
lenet_amp_path = os.path.join(temp_dir.name, './lenet_amp')
model.save(lenet_amp_path)
with paddle.fluid.unique_name.guard():
paddle.seed(2021)
......@@ -116,7 +118,8 @@ class TestHapiWithAmp(unittest.TestCase):
model._scaler.state_dict()['incr_count']))
# equal after load
new_model.load('./lenet_amp')
new_model.load(lenet_amp_path)
temp_dir.cleanup()
self.assertEqual(new_model._scaler.state_dict()['incr_count'],
model._scaler.state_dict()['incr_count'])
self.assertEqual(new_model._scaler.state_dict()['decr_count'],
......
......@@ -16,6 +16,7 @@ import os
import cv2
import shutil
import unittest
import tempfile
import numpy as np
import paddle
......@@ -25,23 +26,25 @@ from paddle.vision.ops import read_file, decode_jpeg
class TestReadFile(unittest.TestCase):
def setUp(self):
fake_img = (np.random.random((400, 300, 3)) * 255).astype('uint8')
cv2.imwrite('fake.jpg', fake_img)
self.temp_dir = tempfile.TemporaryDirectory()
self.img_path = os.path.join(self.temp_dir.name, 'fake.jpg')
cv2.imwrite(self.img_path, fake_img)
def tearDown(self):
os.remove('fake.jpg')
self.temp_dir.cleanup()
def read_file_decode_jpeg(self):
if not paddle.is_compiled_with_cuda():
return
img_bytes = read_file('fake.jpg')
img_bytes = read_file(self.img_path)
img = decode_jpeg(img_bytes, mode='gray')
img = decode_jpeg(img_bytes, mode='rgb')
img = decode_jpeg(img_bytes)
img_cv2 = cv2.imread('fake.jpg')
img_cv2 = cv2.imread(self.img_path)
if paddle.in_dynamic_mode():
np.testing.assert_equal(img.shape, img_cv2.transpose(2, 0, 1).shape)
else:
......
......@@ -641,7 +641,8 @@ class TestFunctional(unittest.TestCase):
fake_img = Image.fromarray((np.random.random((32, 32, 3)) * 255).astype(
'uint8'))
path = 'temp.jpg'
temp_dir = tempfile.TemporaryDirectory()
path = os.path.join(temp_dir.name, 'temp.jpg')
fake_img.save(path)
set_image_backend('pil')
......@@ -654,7 +655,7 @@ class TestFunctional(unittest.TestCase):
np_img = image_load(path)
os.remove(path)
temp_dir.cleanup()
def test_rotate(self):
np_img = (np.random.rand(28, 28, 3) * 255).astype('uint8')
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册