diff --git a/paddle/fluid/framework/CMakeLists.txt b/paddle/fluid/framework/CMakeLists.txt index f2361c5ceaae6b9ac02f51951cc219fe9740bae8..b236eef3cee36a61bc18ad2eff1713ab39cbcc78 100644 --- a/paddle/fluid/framework/CMakeLists.txt +++ b/paddle/fluid/framework/CMakeLists.txt @@ -171,9 +171,9 @@ if(WITH_DISTRIBUTE) set_source_files_properties(executor.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS}) else() if(NOT WIN32) - cc_library(executor SRCS executor.cc DEPS op_registry device_context scope framework_proto glog lod_rank_table feed_fetch_method graph_to_program_pass ngraph_operator variable_helper) + cc_library(executor SRCS executor.cc DEPS op_registry device_context scope framework_proto glog lod_rank_table feed_fetch_method graph_to_program_pass ngraph_operator variable_helper garbage_collector) else(NOT WIN32) - cc_library(executor SRCS executor.cc DEPS op_registry device_context scope framework_proto glog lod_rank_table feed_fetch_method graph_to_program_pass variable_helper) + cc_library(executor SRCS executor.cc DEPS op_registry device_context scope framework_proto glog lod_rank_table feed_fetch_method graph_to_program_pass variable_helper garbage_collector) endif(NOT WIN32) cc_test(test_naive_executor SRCS naive_executor_test.cc DEPS naive_executor elementwise_add_op) endif() diff --git a/python/paddle/fluid/tests/unittests/test_eager_deletion_dynamic_rnn_base.py b/python/paddle/fluid/tests/unittests/test_eager_deletion_dynamic_rnn_base.py new file mode 100644 index 0000000000000000000000000000000000000000..e91cfe0b45ab7e4e56fccf8d49eb381fbbd199d1 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_eager_deletion_dynamic_rnn_base.py @@ -0,0 +1,86 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +os.environ['FLAGS_eager_delete_tensor_gb'] = '0.0' +os.environ['CPU_NUM'] = '2' + +import six +import unittest + +import paddle +import paddle.fluid.core as core +import paddle.fluid as fluid + + +def train(network, use_cuda, use_parallel_executor, batch_size=32, pass_num=2): + if use_cuda and not core.is_compiled_with_cuda(): + print('Skip use_cuda=True because Paddle is not compiled with cuda') + return + + word_dict = paddle.dataset.imdb.word_dict() + train_reader = paddle.batch( + paddle.dataset.imdb.train(word_dict), batch_size=batch_size) + + data = fluid.layers.data( + name="words", shape=[1], dtype="int64", lod_level=1) + + label = fluid.layers.data(name="label", shape=[1], dtype="int64") + + cost = network(data, label, len(word_dict)) + optimizer = fluid.optimizer.Adagrad(learning_rate=0.2) + optimizer.minimize(cost) + + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + feeder = fluid.DataFeeder(feed_list=[data, label], place=place) + reader = feeder.decorate_reader( + train_reader, multi_devices=use_parallel_executor) + + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + + if use_parallel_executor: + train_exe = fluid.ParallelExecutor( + use_cuda=use_cuda, loss_name=cost.name) + fetch_list = [cost.name] + else: + train_exe = exe + fetch_list = [cost] + + for pass_id in six.moves.xrange(pass_num): + batch_id = 0 + for data in reader(): + train_exe.run(feed=data, + fetch_list=fetch_list if batch_id % 4 == 0 else []) + batch_id += 1 + if batch_id > 16: + break + + +class TestBase(unittest.TestCase): + def setUp(self): + self.net = None + + def test_network(self): + if self.net is None: + return + + for use_cuda in [True, False]: + for use_parallel_executor in [False, True]: + print('network: {}, use_cuda: {}, use_parallel_executor: {}'. + format(self.net.__name__, use_cuda, + use_parallel_executor)) + with fluid.program_guard(fluid.Program(), fluid.Program()): + with fluid.scope_guard(core.Scope()): + train(self.net, use_cuda, use_parallel_executor) diff --git a/python/paddle/fluid/tests/unittests/test_eager_deletion_gru_net.py b/python/paddle/fluid/tests/unittests/test_eager_deletion_gru_net.py index 1ec174544cb982dbb8a634647a839beec5eb6cdf..5ed3d9fdf3bf765f1b9ef8ba1ef2a5795f1874c7 100644 --- a/python/paddle/fluid/tests/unittests/test_eager_deletion_gru_net.py +++ b/python/paddle/fluid/tests/unittests/test_eager_deletion_gru_net.py @@ -13,7 +13,7 @@ # limitations under the License. import unittest -from test_eager_deletion_lstm_net import TestBase +from test_eager_deletion_dynamic_rnn_base import TestBase import paddle.fluid as fluid diff --git a/python/paddle/fluid/tests/unittests/test_eager_deletion_lstm_net.py b/python/paddle/fluid/tests/unittests/test_eager_deletion_lstm_net.py index 431765bff2dc05bb33c203cf88ba012f187f8588..8462c06aa56e0469fd06c7dc4b2ed514f7eb51ba 100644 --- a/python/paddle/fluid/tests/unittests/test_eager_deletion_lstm_net.py +++ b/python/paddle/fluid/tests/unittests/test_eager_deletion_lstm_net.py @@ -12,60 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os -os.environ['FLAGS_eager_delete_tensor_gb'] = '0.0' -os.environ['CPU_NUM'] = '2' - -import six -import unittest - -import paddle -import paddle.fluid.core as core +from test_eager_deletion_dynamic_rnn_base import TestBase import paddle.fluid as fluid - - -def train(network, use_cuda, use_parallel_executor, batch_size=32, pass_num=2): - if use_cuda and not core.is_compiled_with_cuda(): - print('Skip use_cuda=True because Paddle is not compiled with cuda') - return - - word_dict = paddle.dataset.imdb.word_dict() - train_reader = paddle.batch( - paddle.dataset.imdb.train(word_dict), batch_size=batch_size) - - data = fluid.layers.data( - name="words", shape=[1], dtype="int64", lod_level=1) - - label = fluid.layers.data(name="label", shape=[1], dtype="int64") - - cost = network(data, label, len(word_dict)) - optimizer = fluid.optimizer.Adagrad(learning_rate=0.2) - optimizer.minimize(cost) - - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - feeder = fluid.DataFeeder(feed_list=[data, label], place=place) - reader = feeder.decorate_reader( - train_reader, multi_devices=use_parallel_executor) - - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) - - if use_parallel_executor: - train_exe = fluid.ParallelExecutor( - use_cuda=use_cuda, loss_name=cost.name) - fetch_list = [cost.name] - else: - train_exe = exe - fetch_list = [cost] - - for pass_id in six.moves.xrange(pass_num): - batch_id = 0 - for data in reader(): - train_exe.run(feed=data, - fetch_list=fetch_list if batch_id % 4 == 0 else []) - batch_id += 1 - if batch_id > 16: - break +import unittest def lstm_net(data, @@ -92,20 +41,10 @@ def lstm_net(data, return avg_cost -class TestBase(unittest.TestCase): +class LSTMTest(TestBase): def setUp(self): self.net = lstm_net - def test_network(self): - for use_cuda in [True, False]: - for use_parallel_executor in [False, True]: - print('network: {}, use_cuda: {}, use_parallel_executor: {}'. - format(self.net.__name__, use_cuda, - use_parallel_executor)) - with fluid.program_guard(fluid.Program(), fluid.Program()): - with fluid.scope_guard(core.Scope()): - train(self.net, use_cuda, use_parallel_executor) - if __name__ == "__main__": unittest.main()