diff --git a/paddle/fluid/pybind/op_function_generator.cc b/paddle/fluid/pybind/op_function_generator.cc index bf3c77843219c75f9cf4a75f340eaa71f972991d..bab4ac36353b863e7f3686aea3568ac5c9a61654 100644 --- a/paddle/fluid/pybind/op_function_generator.cc +++ b/paddle/fluid/pybind/op_function_generator.cc @@ -65,6 +65,7 @@ std::map> op_ins_map = { {"box_coder", {"PriorBox", "PriorBoxVar", "TargetBox"}}, {"momentum", {"Param", "Grad", "Velocity", "LearningRate"}}, {"rnn", {"Input", "PreState", "WeightList", "SequenceLength"}}, + {"run_program", {"X", "Params"}}, }; // NOTE(zhiqiu): Like op_ins_map. @@ -98,6 +99,7 @@ std::map> op_outs_map = { {"rnn", {"DropoutState", "Reserve", "Out", "State"}}, {"lamb", {"ParamOut", "Moment1Out", "Moment2Out", "Beta1PowOut", "Beta2PowOut"}}, + {"run_program", {"DOut"}}, }; // NOTE(zhiqiu): Commonly, the outputs in auto-generated OP function are @@ -148,6 +150,7 @@ std::map> op_passing_outs_map = { {"lamb", {"ParamOut", "Moment1Out", "Moment2Out", "Beta1PowOut", "Beta2PowOut"}}, {"rnn", {"DropoutState"}}, + {"run_program", {"Out", "DOut", "OutScope"}}, }; // NOTE(pangyoki): Tensor View Strategy. diff --git a/python/paddle/fluid/dygraph/dygraph_to_static/partial_program.py b/python/paddle/fluid/dygraph/dygraph_to_static/partial_program.py index 719b06c659f9f17d619318254007089b586fd36f..f2c9180986975596017d94c033bdb2b9391fb3a3 100644 --- a/python/paddle/fluid/dygraph/dygraph_to_static/partial_program.py +++ b/python/paddle/fluid/dygraph/dygraph_to_static/partial_program.py @@ -221,23 +221,15 @@ class PartialProgramLayer(layers.Layer): def forward(self, inputs): in_vars, out_vars, tmp_scope_vec = self._prepare(inputs) - framework._dygraph_tracer().trace_op( - type='run_program', - inputs={ - 'X': valid_vars(in_vars), - 'Params': valid_vars(self._params) - }, - outputs={ - 'Out': valid_vars(out_vars), - 'OutScope': tmp_scope_vec, - 'DOut': valid_vars(self._double_grads) - }, - attrs={ - 'global_block': self.program.desc.block(0), - 'start_op_index': 0, - 'end_op_index': self._infer_program.desc.block(0).op_size(), - 'is_test': not self.training - }) + + attrs = ('global_block', self.program.desc.block(0), 'start_op_index', + 0, 'end_op_index', self._infer_program.desc.block(0).op_size(), + 'is_test', not self.training) + core.ops.run_program( + valid_vars(in_vars), + valid_vars(self._params), + valid_vars(out_vars), tmp_scope_vec, + valid_vars(self._double_grads), *attrs) restored_nest_out = self._restore_out(out_vars) return self._remove_no_value(restored_nest_out)