From 2f56d4b3d4a048b98a19872a6f69f1b8c7cf29a0 Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Tue, 19 Dec 2017 10:54:45 +0000 Subject: [PATCH] forward pass compile time --- paddle/operators/parallel_do_op.cc | 6 ++-- python/paddle/v2/fluid/framework.py | 3 +- python/paddle/v2/fluid/layers/control_flow.py | 3 +- .../paddle/v2/fluid/tests/test_parallel_op.py | 33 +++++++++++++++++++ 4 files changed, 41 insertions(+), 4 deletions(-) create mode 100644 python/paddle/v2/fluid/tests/test_parallel_op.py diff --git a/paddle/operators/parallel_do_op.cc b/paddle/operators/parallel_do_op.cc index 4c026c2239e..bde59c7e7ad 100644 --- a/paddle/operators/parallel_do_op.cc +++ b/paddle/operators/parallel_do_op.cc @@ -23,9 +23,11 @@ namespace operators { constexpr char kInputs[] = "inputs"; constexpr char kParameters[] = "parameters"; constexpr char kPlaces[] = "places"; -constexpr char kParallelBlock[] = "sub_block"; + constexpr char kOutputs[] = "outputs"; -constexpr char kParallelScopes[] = "sub_scopes"; +constexpr char kParallelScopes[] = "parallel_scopes"; + +constexpr char kParallelBlock[] = "sub_block"; // #define GRAD_SUFFIX "@GRAD" // constexpr char kInputGrads[] = "inputs" GRAD_SUFFIX; // constexpr char kOutputGrads[] = "outputs" GRAD_SUFFIX; diff --git a/python/paddle/v2/fluid/framework.py b/python/paddle/v2/fluid/framework.py index bf0cd275b62..14e0734331a 100644 --- a/python/paddle/v2/fluid/framework.py +++ b/python/paddle/v2/fluid/framework.py @@ -424,7 +424,8 @@ class Operator(object): self.desc.check_attrs() no_kernel_op_set = { 'feed', 'fetch', 'save', 'load', 'recurrent', - 'rnn_memory_helper_grad', 'conditional_block', 'while' + 'rnn_memory_helper_grad', 'conditional_block', 'while', + 'parallel_do' } if type not in no_kernel_op_set: self.desc.infer_var_type(self.block.desc) diff --git a/python/paddle/v2/fluid/layers/control_flow.py b/python/paddle/v2/fluid/layers/control_flow.py index 4791d749700..09ab9726d1d 100644 --- a/python/paddle/v2/fluid/layers/control_flow.py +++ b/python/paddle/v2/fluid/layers/control_flow.py @@ -103,6 +103,7 @@ class ParallelDo(object): def read_input(self, var): self.inputs.append(var) + return var def write_output(self, var): self.outputs.append(var) @@ -149,7 +150,7 @@ class ParallelDo(object): 'places': self.places }, outputs={'outputs': self.outputs, - 'step_scopes': [step_scope]}, + 'parallel_scopes': [step_scope]}, attrs={'sub_block': current_block}) diff --git a/python/paddle/v2/fluid/tests/test_parallel_op.py b/python/paddle/v2/fluid/tests/test_parallel_op.py new file mode 100644 index 00000000000..1e643032849 --- /dev/null +++ b/python/paddle/v2/fluid/tests/test_parallel_op.py @@ -0,0 +1,33 @@ +import unittest + +import paddle.v2.fluid.layers as layers +import paddle.v2.fluid as fluid +from paddle.v2.fluid.framework import Program +from paddle.v2.fluid.executor import Executor +from paddle.v2.fluid.backward import append_backward_ops +import numpy as np +import paddle.v2.fluid.core as core + + +class ParallelOpTest(unittest.TestCase): + def setUp(self): + x = layers.data( + shape=[2, 3, 4], dtype='float32', name='x', append_batch_size=False) + + places = fluid.default_main_program().global_block().create_var() + pd = layers.ParallelDo(places=places) + + with pd.do(): + data = pd.read_input(x) + hidden = layers.fc(input=data, size=7) + pd.write_output(hidden) + data = pd() + print data + print fluid.default_main_program() + + def test_forward(self): + pass + + +if __name__ == '__main__': + unittest.main() -- GitLab