未验证 提交 892f94bc 编写于 作者: K kangguangli 提交者: GitHub

remove parallel_executor related unit tests (#51632)

* remove parallel_executor related unit tests

* fix CI
上级 53c73c77
......@@ -1245,10 +1245,6 @@ if(WITH_CINN AND WITH_TESTING)
)
endif()
# ExecutionStrategy is deprecated in standalone executor
set_tests_properties(test_parallel_executor_drop_scope
PROPERTIES ENVIRONMENT "FLAGS_USE_STANDALONE_EXECUTOR=0")
set_tests_properties(
test_cuda_graph_static_mode
PROPERTIES ENVIRONMENT "FLAGS_CUDA_GRAPH_USE_STANDALONE_EXECUTOR=1")
......
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import numpy
import paddle
import paddle.fluid as fluid
class TestParallelExecutorDropExeScope(unittest.TestCase):
def check_drop_scope(self, use_cuda=True):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
if not use_cuda:
os.environ['CPU_NUM'] = str(2)
train_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(train_program, startup_program):
data = paddle.static.data(name='X', shape=[-1, 1], dtype='float32')
hidden = paddle.static.nn.fc(x=data, size=10)
loss = paddle.mean(hidden)
test_program = fluid.default_main_program().clone(for_test=True)
fluid.optimizer.SGD(learning_rate=0.01).minimize(loss)
exe = fluid.Executor(place)
exe.run(startup_program)
exec_strateg = fluid.ExecutionStrategy()
exec_strateg.num_iteration_per_drop_scope = 10
train_exe = fluid.ParallelExecutor(
use_cuda=use_cuda,
main_program=train_program,
loss_name=loss.name,
exec_strategy=exec_strateg,
)
test_exe = fluid.ParallelExecutor(
use_cuda=use_cuda,
main_program=test_program,
share_vars_from=train_exe,
exec_strategy=exec_strateg,
)
x = numpy.random.random(size=(10, 1)).astype('float32')
train_exe.run(feed={"X": x}, fetch_list=[loss.name])
test_exe.run(feed={"X": x}, fetch_list=[loss.name])
assert not train_exe._need_create_local_exe_scopes()
assert not test_exe._need_create_local_exe_scopes()
# drop the local execution scope immediately
train_exe.drop_local_exe_scopes()
test_exe.drop_local_exe_scopes()
assert train_exe._need_create_local_exe_scopes()
assert test_exe._need_create_local_exe_scopes()
def test_drop_scope(self):
self.check_drop_scope(use_cuda=False)
if fluid.core.is_compiled_with_cuda():
self.check_drop_scope(use_cuda=True)
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from simple_nets import init_data, simple_fc_net
import paddle.fluid as fluid
class TestMNIST(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.save_dirname = "./"
cls.model_filename = (
"test_parallel_executor_run_load_infer_program_model"
)
cls.params_filename = (
"test_parallel_executor_run_load_infer_program_parameter"
)
cls.place = fluid.CPUPlace()
cls.exe = fluid.Executor(cls.place)
img, label = init_data()
cls.batch_data = []
for img, label in zip(img, label):
cls.batch_data.append([img, label])
def test_simple_fc(self):
exe_loss = self.run_with_executor()
[
inference_program,
feed_target_names,
fetch_targets,
] = fluid.io.load_inference_model(
self.save_dirname,
self.exe,
self.model_filename,
self.params_filename,
)
train_exe = fluid.ParallelExecutor(
use_cuda=False, main_program=inference_program
)
feed_vars = [
inference_program.global_block().var(var_name)
for var_name in ["image", "label"]
]
feeder = fluid.DataFeeder(place=self.place, feed_list=feed_vars)
pe_loss = train_exe.run(
feed=feeder.feed(self.batch_data),
fetch_list=[fetch_targets[0].name],
)
assert exe_loss == pe_loss
def run_with_executor(self):
main = fluid.Program()
startup = fluid.Program()
with fluid.program_guard(main, startup):
loss = simple_fc_net()
feed_vars = [
main.global_block().var(var_name) for var_name in ["image", "label"]
]
feeder = fluid.DataFeeder(place=self.place, feed_list=feed_vars)
self.exe.run(startup)
loss_data = self.exe.run(
main, feed=feeder.feed(self.batch_data), fetch_list=[loss.name]
)
fluid.io.save_inference_model(
self.save_dirname,
["image", "label"],
[loss],
self.exe,
model_filename=self.model_filename,
params_filename=self.params_filename,
main_program=main,
)
return loss_data
if __name__ == '__main__':
unittest.main()
......@@ -407,7 +407,6 @@ HIGH_PARALLEL_JOB_NEW = [
'place_test',
'test_fleet_launch_cloud',
'test_conv2d_bf16_mkldnn_op',
'test_parallel_executor_run_load_infer_program',
'scatter_test',
'graph_to_program_pass_test',
'test_lod_tensor_array_ops',
......@@ -1409,7 +1408,6 @@ FOURTH_HIGH_PARALLEL_JOB_NEW = [
'test_instance_norm_op',
'test_lambv2_op',
'test_yolo_box_op',
'test_parallel_executor_drop_scope',
'test_generator_dataloader',
'test_conv2d_transpose_op_depthwise_conv',
'test_imperative_save_load_v2',
......@@ -2329,7 +2327,6 @@ TETRAD_PARALLEL_JOB = [
'jit_kernel_test',
'test_conv_activation_mkldnn_fuse_pass',
'test_trt_conv3d_op',
'test_parallel_executor_drop_scope',
'test_tensorrt_engine',
'test_load_state_dict_from_old_format',
'test_fuse_elewise_add_act_pass',
......@@ -3110,7 +3107,6 @@ TWO_PARALLEL_JOB = [
'test_initializer',
'test_egr_ds_grad_node_info',
'test_save_inference_model_conditional_op',
'test_parallel_executor_run_load_infer_program',
'test_hapi_hub_model',
'test_get_inputs_outputs_in_block',
'test_get_device_properties',
......
......@@ -368,8 +368,6 @@ STATIC_MODE_TESTING_LIST = [
'test_pad_constant_like',
'test_pad_op',
'test_pairwise_distance',
'test_parallel_executor_drop_scope',
'test_parallel_executor_run_load_infer_program',
'test_parameter',
'test_partial_concat_op',
'test_partial_eager_deletion_transformer',
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册