diff --git a/paddle/framework/grad_op_builder.cc b/paddle/framework/grad_op_builder.cc index 1833a5463aac7620472e1508f8ef75a894fc02c6..f9b1a37c998a9a247408c782aa3dc25c290b43e3 100644 --- a/paddle/framework/grad_op_builder.cc +++ b/paddle/framework/grad_op_builder.cc @@ -23,7 +23,7 @@ static void TransOpArg(const OperatorBase* src_op, OperatorBase::VarNameMap* vars, const OpArgType& src_type, bool is_grad) { const auto& src_inout = - src_type == OpArgType::IN ? src_op->inputs_ : src_op->outputs_; + src_type == OpArgType::IN ? src_op->Inputs() : src_op->Outputs(); auto& dst_inout = *vars; const OpProto& proto = OpProtos().at(src_op->Type()); @@ -39,13 +39,12 @@ static void TransOpArg(const OperatorBase* src_op, dst_inout[dst_name].emplace_back(s); } } - return dst_inout; } OperatorBase* BuildGradOp(const OperatorBase* op) { - auto gop_type_it = OpRegistry::grad_ops().find(op->type_); + auto gop_type_it = OpRegistry::grad_ops().find(op->Type()); PADDLE_ENFORCE(gop_type_it != OpRegistry::grad_ops().end(), - "Operator %s do not register gradient type", op->type_); + "Operator %s do not register gradient type", op->Type()); auto& grad_op_type = gop_type_it->second; OperatorBase::VarNameMap inputs; OperatorBase::VarNameMap outputs; @@ -56,9 +55,9 @@ OperatorBase* BuildGradOp(const OperatorBase* op) { auto gop_it = OpRegistry::op_creators().find(grad_op_type); PADDLE_ENFORCE(gop_it != OpRegistry::op_creators().end(), "Operator %s 's Gradient %s's creator cannot be found", - op->type_, grad_op_type); + op->Type(), grad_op_type); - return gop_it->second(grad_op_type, inputs, outputs, op->attrs_); + return gop_it->second(grad_op_type, inputs, outputs, op->Attrs()); } } // namespace framework diff --git a/paddle/framework/grad_op_builder_test.cc b/paddle/framework/grad_op_builder_test.cc index ebaf84545fce0d281d8821861264cddc8854893d..ff1473d32755241cc635d69b2b416be641d42243 100644 --- a/paddle/framework/grad_op_builder_test.cc +++ b/paddle/framework/grad_op_builder_test.cc @@ -52,8 +52,8 @@ TEST(GradOpBuilder, AddTwo) { "add_two", {{"X", {"x"}}, {"Y", {"y"}}}, {{"Out", {"out"}}}, {})); std::shared_ptr grad_add_op = f::OpRegistry::CreateGradOp(*add_op); - EXPECT_EQ(grad_add_op->inputs_.size(), 4UL); - EXPECT_EQ(grad_add_op->outputs_.size(), 2UL); + EXPECT_EQ(grad_add_op->Inputs().size(), 4UL); + EXPECT_EQ(grad_add_op->Outputs().size(), 2UL); EXPECT_EQ(grad_add_op->Input("X"), "x"); EXPECT_EQ(grad_add_op->Input("Y"), "y"); EXPECT_EQ(grad_add_op->Input("Out"), "out"); @@ -76,7 +76,7 @@ TEST(GradOpBuilder, MutiInOut) { std::shared_ptr grad_test_op = f::OpRegistry::CreateGradOp(*test_op); - ASSERT_EQ(grad_test_op->inputs_.size(), 3UL + 2UL + 2UL); + ASSERT_EQ(grad_test_op->Inputs().size(), 3UL + 2UL + 2UL); EXPECT_EQ(grad_test_op->Input("In1"), "in1"); EXPECT_EQ(grad_test_op->Inputs("In2_mult"), std::vector({"in2_1", "in2_2", "in2_3"})); @@ -90,7 +90,7 @@ TEST(GradOpBuilder, MutiInOut) { std::vector( {f::GradVarName("out2_1"), f::GradVarName("out2_2")})); - ASSERT_EQ(grad_test_op->outputs_.size(), 3UL); + ASSERT_EQ(grad_test_op->Outputs().size(), 3UL); EXPECT_EQ(grad_test_op->Output(f::GradVarName("In1")), f::GradVarName("in1")); EXPECT_EQ(grad_test_op->Outputs(f::GradVarName("In2_mult")), std::vector({f::GradVarName("in2_1"), @@ -109,7 +109,7 @@ TEST(GradOpBuilder, IOIgnoredInGradient) { f::OpRegistry::CreateGradOp(*test_op); // 'In2' and 'Out2' are ignored in gradient calculating - ASSERT_EQ(grad_test_op->inputs_.size(), 2UL + 1UL + 2UL); + ASSERT_EQ(grad_test_op->Inputs().size(), 2UL + 1UL + 2UL); EXPECT_EQ(grad_test_op->Input("In1"), "in1"); EXPECT_EQ(grad_test_op->Inputs("In3_mult"), std::vector({"in3_1", "in3_2"})); @@ -121,7 +121,7 @@ TEST(GradOpBuilder, IOIgnoredInGradient) { EXPECT_EQ(grad_test_op->Input(f::GradVarName("Out2")), f::GradVarName("out2")); - ASSERT_EQ(grad_test_op->outputs_.size(), 3UL); + ASSERT_EQ(grad_test_op->Outputs().size(), 3UL); EXPECT_EQ(grad_test_op->Output(f::GradVarName("In1")), f::GradVarName("in1")); EXPECT_EQ(grad_test_op->Outputs(f::GradVarName("In2_mult")), std::vector( diff --git a/paddle/framework/pybind.cc b/paddle/framework/pybind.cc index 07b42c83717652bdf0120b3004f39ac7f7a98d06..e599b5daa0f34a4c4219458169f7aeb240d19477 100644 --- a/paddle/framework/pybind.cc +++ b/paddle/framework/pybind.cc @@ -53,15 +53,15 @@ void ExposeOperator(ClassType &m) { .def("run", &ClassType::type::Run) .def("type", [](const typename ClassType::type &op) -> std::string { - return op.type_; + return op.Type(); }) .def("outputs", [](const typename ClassType::type &op) -> std::map> { - return op.outputs_; + return op.Outputs(); }) .def("inputs", - [](const typename ClassType::type &op) { return op.inputs_; }) + [](const typename ClassType::type &op) { return op.Inputs(); }) .def("__str__", &ClassType::type::DebugString) .def("no_intermediate_outputs", [](const typename ClassType::type &op) { @@ -229,7 +229,7 @@ All parameter, weight, gradient are variables in Paddle. net.def_static("create", []() -> std::shared_ptr { auto retv = std::make_shared(); - retv->type_ = "plain_net"; + retv->SetType("plain_net"); return retv; }) .def("add_op", &operators::NetOp::AddOp) diff --git a/paddle/operators/net_op_test.cc b/paddle/operators/net_op_test.cc index f7aa56262ef71c24bf668950f6e9914e5f96ff70..0acde5a90d4213c1bac2845e646d309af92f5969 100644 --- a/paddle/operators/net_op_test.cc +++ b/paddle/operators/net_op_test.cc @@ -56,8 +56,8 @@ TEST(OpKernel, all) { net->CompleteAddOp(); AssertSameVectorWithoutOrder({"x", "w1", "b1", "w2", "b2"}, - net->inputs_.at(NetOp::kAll)); - AssertSameVectorWithoutOrder({"y", "z"}, net->outputs_.at(NetOp::kAll)); + net->Inputs(NetOp::kAll)); + AssertSameVectorWithoutOrder({"y", "z"}, net->Outputs(NetOp::kAll)); auto final_outs = net->OutputVars(false); diff --git a/paddle/operators/recurrent_op.cc b/paddle/operators/recurrent_op.cc index 5ddee75581824996fd312f8ddf13007759fd9a67..d81cc89ae3b01dc0f1d4bd38b9f75e8a61dbff26 100644 --- a/paddle/operators/recurrent_op.cc +++ b/paddle/operators/recurrent_op.cc @@ -82,14 +82,14 @@ void RecurrentAlgorithm::CreateScopes(const Scope& scope) const { PADDLE_ENFORCE(net_var != nullptr, "no stepnet called %s in scope", arg_->step_net); auto net_op = net_var->GetMutable(); - PADDLE_ENFORCE(!net_op->outputs_.empty(), "net_op has no outputs"); + PADDLE_ENFORCE(!net_op->Outputs().empty(), "net_op has no outputs"); if (seq_len_ > step_scopes->size()) { for (size_t i = step_scopes->size(); i < seq_len_; ++i) { auto& step_scope = scope.NewScope(); // create step net's temp inputs - for (auto& input : net_op->inputs_) { + for (auto& input : net_op->Inputs()) { // the weight are located in parent scope for (auto& var_name : input.second) { if (!step_scope.FindVar(var_name)) { @@ -98,7 +98,7 @@ void RecurrentAlgorithm::CreateScopes(const Scope& scope) const { } } // create stepnet's outputs - for (const auto& output : net_op->outputs_) { + for (const auto& output : net_op->Outputs()) { for (auto& var_name : output.second) { step_scope.NewVar(var_name); }