diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index 9193a1593efadad03fceeeac9dfce98ebcacbfa5..3d81dadfc4a861eb4aad494c6bff7b8a3ed2ca45 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -203,7 +203,6 @@ static std::unique_ptr BackwardRecursive( } } else { std::unique_ptr grad_op(CreateGradOp(forwardOp)); - PADDLE_ENFORCE(grad_op != nullptr); ForEachVarName(grad_op->Inputs(), [&no_grad_names, &net, &grad_op]( const std::string& grad_input) { diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index 830d0427fa1368f4c11a9e3164caafa7e3891ff8..a9b71cd8099cb74e6e68ab8e70adf4cb5d4b9625 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -171,17 +171,6 @@ REGISTER_OP_WITHOUT_GRADIENT(fc, f::FcOp, f::FcOpMaker); REGISTER_OP(many_output_op, f::NOP, f::ManyOutputOpMaker, many_output_op_grad, f::NOP); -// TEST(Backward, simple_op_grad) { -// auto fwd = f::OpRegistry::CreateOp( -// "rowwise_add", {{"X", {"x"}}, {"b", {"b"}}}, {{"Out", {"out"}}}, {}); -// ASSERT_NE(fwd, nullptr); -// auto gop = f::OpRegistry::CreateGradOp(*fwd); -// ASSERT_EQ(1UL, gop->Inputs().size()); -// ASSERT_EQ("rowwise_add_grad", gop->Type()); -// ASSERT_EQ(f::GradVarName("x"), gop->Output(f::GradVarName("X"))); -// ASSERT_EQ(f::GradVarName("b"), gop->Output(f::GradVarName("b"))); -//} - TEST(Backward, simple_op_not_need_grad) { auto fwd = f::OpRegistry::CreateOp( "rowwise_add", {{"X", {"x"}}, {"b", {"b"}}}, {{"Out", {"out"}}}, {}); @@ -390,7 +379,6 @@ TEST(Backward, linear_net_intermediate_variable_has_no_grad) { + 1UL /* external output number*/ + 1UL /* number of gradient of external output*/ + 2U /* internal variable number*/); - EXPECT_EQ(grad_fc.Outputs(all).size(), 2UL /* input number of mul*/ + 2UL /* input number of rowwise_add diff --git a/paddle/framework/op_info.h b/paddle/framework/op_info.h index 968f587b46a3ed9528819fdd31bd3ef8059c67a3..231f212fa35091cab752dc3d9bb43b1f98909426 100644 --- a/paddle/framework/op_info.h +++ b/paddle/framework/op_info.h @@ -23,8 +23,6 @@ #include "paddle/framework/type_defs.h" #include "paddle/platform/macros.h" -#include "glog/logging.h" - namespace paddle { namespace framework { diff --git a/paddle/framework/op_registry.cc b/paddle/framework/op_registry.cc index ac6aa8d28eca9fe5d16d9ea8a8dbe1a011643203..4dc83ec8fea12d3ad027fca11a736590d9a4da02 100644 --- a/paddle/framework/op_registry.cc +++ b/paddle/framework/op_registry.cc @@ -55,7 +55,6 @@ std::unique_ptr OpRegistry::CreateOp(const OpDesc& op_desc) { } std::unique_ptr OpRegistry::CreateOp(OpDescBind* op_desc) { - op_desc->Sync(); return CreateOp(op_desc->Type(), op_desc->Inputs(), op_desc->Outputs(), op_desc->GetAttrMap()); } diff --git a/paddle/operators/mean_op.cc b/paddle/operators/mean_op.cc index 339c089e87b71efc9af466d4ea5fadb1ac68c8c2..2332c9546b037c94a5a6d30319abda8e23c2b3bb 100644 --- a/paddle/operators/mean_op.cc +++ b/paddle/operators/mean_op.cc @@ -71,7 +71,6 @@ class MeanGradMaker : public framework::SingleGradOpDescMaker { } // namespace paddle namespace ops = paddle::operators; - REGISTER_OPERATOR(mean, ops::MeanOp, ops::MeanOpMaker, ops::MeanGradMaker); REGISTER_OPERATOR(mean_grad, ops::MeanGradOp); REGISTER_OP_CPU_KERNEL(mean, diff --git a/paddle/operators/softmax_with_cross_entropy_op.cc b/paddle/operators/softmax_with_cross_entropy_op.cc index 70fe429f59254515a9e5fd648f98ebe958db35c1..42c1ba6fdf1351c43ef78efaaf05c54acb54ce94 100644 --- a/paddle/operators/softmax_with_cross_entropy_op.cc +++ b/paddle/operators/softmax_with_cross_entropy_op.cc @@ -16,11 +16,6 @@ #include #include -#define DBG_LINE() \ - do { \ - std::cerr << "Run at " << __LINE__ << std::endl; \ - } while (false) - namespace paddle { namespace operators {