From 4b8d65ab79df075dc1c9b27cf75b027cc4894fb5 Mon Sep 17 00:00:00 2001 From: chengduo Date: Thu, 28 Jun 2018 10:39:52 +0800 Subject: [PATCH] Add return_numpy for PE (#11792) --- python/paddle/fluid/executor.py | 2 ++ python/paddle/fluid/parallel_executor.py | 7 ++++++- .../fluid/tests/unittests/parallel_executor_test_base.py | 3 --- .../fluid/tests/unittests/test_parallel_executor_crf.py | 5 ++--- .../tests/unittests/test_parallel_executor_fetch_feed.py | 6 ++++-- .../unittests/test_parallel_executor_test_while_train.py | 3 +-- 6 files changed, 15 insertions(+), 11 deletions(-) diff --git a/python/paddle/fluid/executor.py b/python/paddle/fluid/executor.py index dc2756746..145f1423e 100644 --- a/python/paddle/fluid/executor.py +++ b/python/paddle/fluid/executor.py @@ -78,6 +78,8 @@ def as_numpy(tensor): Returns: numpy.ndarray """ + if isinstance(tensor, core.LoDTensorArray): + return [as_numpy(t) for t in tensor] if isinstance(tensor, list): return [as_numpy(t) for t in tensor] assert isinstance(tensor, core.LoDTensor) diff --git a/python/paddle/fluid/parallel_executor.py b/python/paddle/fluid/parallel_executor.py index 25cc1355d..6baf64819 100644 --- a/python/paddle/fluid/parallel_executor.py +++ b/python/paddle/fluid/parallel_executor.py @@ -160,7 +160,7 @@ class ParallelExecutor(object): build_strategy, num_trainers, trainer_id) self.scope = scope - def run(self, fetch_list, feed=None, feed_dict=None): + def run(self, fetch_list, feed=None, feed_dict=None, return_numpy=True): """ Run a parallel executor with fetch_list. @@ -196,6 +196,8 @@ class ParallelExecutor(object): to each device. Default None. feed_dict: Alias for feed parameter, for backward compatibility. This parameter has been deprecated. Default None. + return_numpy(bool): Whether converts the fetched tensor to numpy. + Default: True. Returns: List: The fetched result list. @@ -270,6 +272,9 @@ class ParallelExecutor(object): if self.is_dist: self.bcast_params() + if return_numpy: + return executor.as_numpy(arr) + return [arr[i] for i in range(len(arr))] def bcast_params(self): diff --git a/python/paddle/fluid/tests/unittests/parallel_executor_test_base.py b/python/paddle/fluid/tests/unittests/parallel_executor_test_base.py index 829c5a1a5..21f2037ad 100644 --- a/python/paddle/fluid/tests/unittests/parallel_executor_test_base.py +++ b/python/paddle/fluid/tests/unittests/parallel_executor_test_base.py @@ -81,7 +81,6 @@ class TestParallelExecutorBase(unittest.TestCase): begin = time.time() first_loss, = run_executor( exe=exe, feed=feed_dict, fetch_list=[loss.name]) - first_loss = np.array(first_loss) for i in xrange(iter): run_executor(exe=exe, feed=feed_dict, fetch_list=[]) @@ -94,8 +93,6 @@ class TestParallelExecutorBase(unittest.TestCase): print "%.4f Instance per second" % ( (batch_size * iter + 2) / (end - begin)) - last_loss = np.array(last_loss) - print first_loss, last_loss # self.assertGreater(first_loss[0], last_loss[0]) return first_loss, last_loss diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_crf.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_crf.py index 1ea7a6a56..63fb58c69 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_crf.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_crf.py @@ -169,9 +169,8 @@ class TestCRFModel(unittest.TestCase): data = train_data() for i in xrange(10): cur_batch = next(data) - print map(np.array, - pe.run(feed=feeder.feed(cur_batch), - fetch_list=[avg_cost.name]))[0] + print pe.run(feed=feeder.feed(cur_batch), + fetch_list=[avg_cost.name])[0] @unittest.skip(reason="CI hangs") def test_update_sparse_parameter_all_reduce(self): diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_fetch_feed.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_fetch_feed.py index 79702475c..1f5d2f167 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_fetch_feed.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_fetch_feed.py @@ -75,7 +75,9 @@ class TestFetchOp(unittest.TestCase): fetch_list.append(k) for data in train_inputs: - ret = pe.run(fetch_list, feed=feeder.feed(data)) + ret = pe.run(fetch_list, + feed=feeder.feed(data), + return_numpy=True) for i in range(len(fetch_list)): assert not math.isnan(np.sum(ret[i])) and \ not math.isinf(np.sum(ret[i])) @@ -128,7 +130,7 @@ class TestFeedParallel(unittest.TestCase): use_cuda=use_cuda, loss_name=loss.name, main_program=main) for batch_id, data in enumerate(reader()): - loss_np = np.array(pe.run(feed=data, fetch_list=[loss.name])[0]) + loss_np = pe.run(feed=data, fetch_list=[loss.name])[0] print batch_id, loss_np if batch_id == 2: break diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_test_while_train.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_test_while_train.py index 31ba8c1d6..252793944 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_test_while_train.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_test_while_train.py @@ -70,10 +70,9 @@ class ParallelExecutorTestingDuringTraining(unittest.TestCase): for i in xrange(5): test_loss, = test_exe.run([loss.name], feed=feed_dict) - test_loss = np.array(test_loss) train_loss, = train_exe.run([loss.name], feed=feed_dict) - train_loss = np.array(train_loss) + self.assertTrue( np.allclose( train_loss, test_loss, atol=1e-8), -- GitLab