diff --git a/paddle/fluid/framework/ir/graph_helper.cc b/paddle/fluid/framework/ir/graph_helper.cc index 28a37f331c100695f0ffec7288db84f4493d68a0..12ce99c8788625e2aae6e07abdea565bb2c2ebb9 100644 --- a/paddle/fluid/framework/ir/graph_helper.cc +++ b/paddle/fluid/framework/ir/graph_helper.cc @@ -31,10 +31,10 @@ namespace paddle { namespace framework { namespace ir { namespace { -void SortHelper( - const std::map> &adj_list, - ir::Node *node, std::unordered_set *visited, - std::vector *ret) { +void SortHelper(const std::map, + ir::NodeComp> &adj_list, + ir::Node *node, std::unordered_set *visited, + std::vector *ret) { visited->insert(node); for (auto adj : adj_list.at(node)) { @@ -50,7 +50,8 @@ void SortHelper( bool HasCircleHelper( ir::Node *node, - const std::map> &adj_list, + const std::map, ir::NodeComp> + &adj_list, std::unordered_set *visited, std::unordered_set *in_trace, std::vector> *circles) { @@ -84,7 +85,8 @@ bool HasCircleHelper( } bool HasCircleInternal( - const std::map> &adj_list, + const std::map, ir::NodeComp> + &adj_list, std::vector> *circles) { std::unordered_set visited; std::unordered_set in_trace; @@ -107,8 +109,8 @@ bool FindCircleSubGraph(const Graph &graph, } std::vector TopologySortOperations(const Graph &graph) { - std::map> adj_list = - BuildOperationAdjList(graph); + std::map, ir::NodeComp> + adj_list = BuildOperationAdjList(graph); PADDLE_ENFORCE(!HasCircleInternal(adj_list, nullptr)); std::unordered_set visited; std::vector ret; @@ -117,34 +119,30 @@ std::vector TopologySortOperations(const Graph &graph) { SortHelper(adj_list, adj.first, &visited, &ret); } } + return ret; } // Build operator inlink edge table. -std::map> BuildOperationAdjList( - const Graph &graph) { - std::map> adj_list; +std::map, ir::NodeComp> +BuildOperationAdjList(const Graph &graph) { + std::map, ir::NodeComp> + adj_list; for (auto &n : graph.Nodes()) { if (!n->IsOp()) continue; if (adj_list.find(n) == adj_list.end()) { - adj_list[n] = std::unordered_set(); + adj_list[n] = std::set(); } - std::vector nodes; for (auto &var : n->inputs) { for (auto &adj_n : var->inputs) { PADDLE_ENFORCE(adj_n->NodeType() == ir::Node::Type::kOperation); VLOG(4) << "adj " << adj_n->Name() << reinterpret_cast(adj_n) << " -> " << n->Name() << reinterpret_cast(n) << " via " << var->Name() << reinterpret_cast(var); - nodes.push_back(adj_n); + adj_list[n].insert(adj_n); } } - std::sort(nodes.begin(), nodes.end(), [](ir::Node *node1, ir::Node *node2) { - return node1->id() > node2->id(); - }); - adj_list[n].insert(std::make_move_iterator(nodes.begin()), - std::make_move_iterator(nodes.end())); } return adj_list; } diff --git a/paddle/fluid/framework/ir/graph_helper.h b/paddle/fluid/framework/ir/graph_helper.h index 214de9ec7d85aee6021b18866295777e317aa79d..849a9c3be6904f3f9c3669d8fc9d750154863031 100644 --- a/paddle/fluid/framework/ir/graph_helper.h +++ b/paddle/fluid/framework/ir/graph_helper.h @@ -16,6 +16,7 @@ limitations under the License. */ #include #include +#include #include #include "paddle/fluid/framework/ir/graph.h" @@ -25,6 +26,13 @@ namespace paddle { namespace framework { namespace ir { +// Compare nodes via node id. +struct NodeComp { + bool operator()(ir::Node *const &node1, ir::Node *const &node2) const { + return node1->id() < node2->id(); + } +}; + // Test if the graph contains circle. bool HasCircle(const Graph &graph); @@ -57,8 +65,8 @@ std::vector TopologyVarientSort(const Graph &graph, SortKind sort_kind); void CleanIndividualNodes(Graph *graph); // Build an adjacency list of operations for the `graph`. -std::map> BuildOperationAdjList( - const Graph &graph); +std::map, ir::NodeComp> +BuildOperationAdjList(const Graph &graph); template std::vector FilterByNodeWrapper(const Graph &graph) { diff --git a/paddle/fluid/framework/op_desc.cc b/paddle/fluid/framework/op_desc.cc index e6f5b15af8cd440a9304235acfe62787c5f1b134..1ea93b7638a85e67bcc85a0c0e130d636938d6c5 100644 --- a/paddle/fluid/framework/op_desc.cc +++ b/paddle/fluid/framework/op_desc.cc @@ -241,6 +241,7 @@ OpDesc::OpDesc(const std::string &type, const VariableNameMap &inputs, outputs_ = outputs; attrs_ = attrs; need_update_ = true; + block_ = nullptr; } OpDesc::OpDesc(const OpDesc &other, BlockDesc *block) { diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc index 6942604b0723f8665f0e8b058d48a5356a1a01f4..0155609a029664da2c3d4c90a152ec556927c32d 100644 --- a/paddle/fluid/inference/api/analysis_predictor.cc +++ b/paddle/fluid/inference/api/analysis_predictor.cc @@ -259,6 +259,9 @@ bool AnalysisPredictor::SetFeed(const std::vector &inputs, return false; } + PADDLE_ENFORCE_NOT_NULL(input_ptr); + PADDLE_ENFORCE_NOT_NULL(inputs[i].data.data()); + if (platform::is_cpu_place(place_)) { // TODO(panyx0718): Init LoDTensor from existing memcpy to save a copy. std::memcpy(static_cast(input_ptr), inputs[i].data.data(), diff --git a/paddle/fluid/inference/api/api.cc b/paddle/fluid/inference/api/api.cc index 7d57b6ec74468dbdb0519f85140629a0ac01c18d..fc2d7b48c2a1f89232dcb96d1899667230e2ddda 100644 --- a/paddle/fluid/inference/api/api.cc +++ b/paddle/fluid/inference/api/api.cc @@ -54,6 +54,7 @@ PaddleBuf &PaddleBuf::operator=(const PaddleBuf &other) { memory_owned_ = other.memory_owned_; } else { Resize(other.length()); + PADDLE_ENFORCE(!(other.length() > 0 && other.data() == nullptr)); memcpy(data_, other.data(), other.length()); length_ = other.length(); memory_owned_ = true; diff --git a/paddle/fluid/inference/api/api_impl.cc b/paddle/fluid/inference/api/api_impl.cc index 54f40563c3662af24e794422be4d3262d86c76a7..56996c5cff88f5b4a9094291a09996f8b8d70a23 100644 --- a/paddle/fluid/inference/api/api_impl.cc +++ b/paddle/fluid/inference/api/api_impl.cc @@ -169,6 +169,7 @@ std::unique_ptr NativePaddlePredictor::Clone() { std::unique_ptr cls(new NativePaddlePredictor(config_)); // Hot fix the bug that result diff in multi-thread. // TODO(Superjomn) re-implement a real clone here. + PADDLE_ENFORCE_NOT_NULL(dynamic_cast(cls.get())); if (!dynamic_cast(cls.get())->Init(nullptr)) { LOG(ERROR) << "fail to call Init"; return nullptr; @@ -210,6 +211,8 @@ bool NativePaddlePredictor::SetFeed(const std::vector &inputs, return false; } + PADDLE_ENFORCE_NOT_NULL(input_ptr); + PADDLE_ENFORCE_NOT_NULL(inputs[i].data.data()); if (platform::is_cpu_place(place_)) { // TODO(panyx0718): Init LoDTensor from existing memcpy to save a copy. std::memcpy(static_cast(input_ptr), inputs[i].data.data(), @@ -316,6 +319,8 @@ std::unique_ptr CreatePaddlePredictor< } std::unique_ptr predictor(new NativePaddlePredictor(config)); + PADDLE_ENFORCE_NOT_NULL( + dynamic_cast(predictor.get())); if (!dynamic_cast(predictor.get())->Init(nullptr)) { return nullptr; } diff --git a/paddle/fluid/inference/tests/api/analyzer_seq_conv1_tester.cc b/paddle/fluid/inference/tests/api/analyzer_seq_conv1_tester.cc index 9f23b9f037bcaeb758312d011067ae29c82e73cd..5ee848c3cfa2117b2adeab5e563c5d07ce1d76ca 100644 --- a/paddle/fluid/inference/tests/api/analyzer_seq_conv1_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_seq_conv1_tester.cc @@ -47,6 +47,7 @@ struct DataRecord { num_lines++; std::vector data; split(line, '\t', &data); + PADDLE_ENFORCE(data.size() >= 4); // load title1 data std::vector title1_data; split_to_int64(data[0], ' ', &title1_data); diff --git a/paddle/fluid/inference/tests/api/analyzer_transformer_tester.cc b/paddle/fluid/inference/tests/api/analyzer_transformer_tester.cc index bd4f1b61973fb0de06dcc288e329c94756d5ed47..a23297f29cf65d891f530850ffd184aa58e10886 100644 --- a/paddle/fluid/inference/tests/api/analyzer_transformer_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_transformer_tester.cc @@ -214,28 +214,23 @@ TEST(Analyzer_Transformer, fuse_statis) { } // Compare result of NativeConfig and AnalysisConfig -// void compare(bool use_mkldnn = false) { -// AnalysisConfig cfg; -// SetConfig(&cfg); -// if (use_mkldnn) { -// cfg.EnableMKLDNN(); -// } -// -// std::vector> input_slots_all; -// SetInput(&input_slots_all); -// CompareNativeAndAnalysis( -// reinterpret_cast(&cfg), -// input_slots_all); -// } - -// TODO(yihuaxu): -// Disable compare and compare_mkldnn temporary, see -// https://github.com/paddlePaddle/Paddle/issues/16316 for details. -// TEST(Analyzer_Transformer, compare) { compare(); } -// #ifdef PADDLE_WITH_MKLDNN -// TEST(Analyzer_Transformer, compare_mkldnn) { compare(true /* use_mkldnn */); -// } -// #endif +void compare(bool use_mkldnn = false) { + AnalysisConfig cfg; + SetConfig(&cfg); + if (use_mkldnn) { + cfg.EnableMKLDNN(); + } + + std::vector> input_slots_all; + SetInput(&input_slots_all); + CompareNativeAndAnalysis( + reinterpret_cast(&cfg), input_slots_all); +} + +TEST(Analyzer_Transformer, compare) { compare(); } +#ifdef PADDLE_WITH_MKLDNN +TEST(Analyzer_Transformer, compare_mkldnn) { compare(true /* use_mkldnn */); } +#endif } // namespace inference } // namespace paddle diff --git a/paddle/fluid/op_use_default_grad_op_maker.spec b/paddle/fluid/op_use_default_grad_op_maker.spec index 21a25ce7d5e2bad172cf50cee6138ef4b44b07c1..63eaa676a43fc784dce2437ca15bc85e2295dbb7 100644 --- a/paddle/fluid/op_use_default_grad_op_maker.spec +++ b/paddle/fluid/op_use_default_grad_op_maker.spec @@ -29,6 +29,8 @@ pool3d prelu quantize rank_loss +reduce_all +reduce_any reduce_max reduce_mean reduce_min diff --git a/paddle/fluid/operators/detection/gpc.cc b/paddle/fluid/operators/detection/gpc.cc index 7c0823c0487d39eece5be08322e7d182b931ba3c..f46aaf7d0a7b2d48f18ba6cccb555bbb691ad353 100644 --- a/paddle/fluid/operators/detection/gpc.cc +++ b/paddle/fluid/operators/detection/gpc.cc @@ -24,6 +24,7 @@ **/ #include "paddle/fluid/operators/detection/gpc.h" +#include "paddle/fluid/platform/enforce.h" namespace gpc { @@ -689,6 +690,7 @@ static bbox *create_contour_bboxes(gpc_polygon *p) { gpc_malloc(box, p->num_contours * sizeof(bbox), const_cast("Bounding box creation")); + PADDLE_ENFORCE_NOT_NULL(box); /* Construct contour bounding boxes */ for (c = 0; c < p->num_contours; c++) { @@ -852,6 +854,7 @@ void gpc_add_contour(gpc_polygon *p, gpc_vertex_list *new_contour, int hole) { /* Create an extended hole array */ gpc_malloc(extended_hole, (p->num_contours + 1) * sizeof(int), const_cast("contour hole addition")); + PADDLE_ENFORCE_NOT_NULL(extended_hole); /* Create an extended contour array */ gpc_malloc(extended_contour, @@ -969,6 +972,7 @@ void gpc_polygon_clip(gpc_op op, gpc_polygon *subj, gpc_polygon *clip, /* Build scanbeam table from scanbeam tree */ gpc_malloc(sbt, sbt_entries * sizeof(double), const_cast("sbt creation")); + PADDLE_ENFORCE_NOT_NULL(sbt); build_sbt(&scanbeam, sbt, sbtree); scanbeam = 0; free_sbtree(&sbtree); @@ -1604,6 +1608,7 @@ void gpc_tristrip_clip(gpc_op op, gpc_polygon *subj, gpc_polygon *clip, /* Build scanbeam table from scanbeam tree */ gpc_malloc(sbt, sbt_entries * sizeof(double), const_cast("sbt creation")); + PADDLE_ENFORCE_NOT_NULL(sbt); build_sbt(&scanbeam, sbt, sbtree); scanbeam = 0; free_sbtree(&sbtree); diff --git a/paddle/fluid/operators/squared_l2_distance_op.h b/paddle/fluid/operators/squared_l2_distance_op.h index e0133d33e6a840d2d06832393a064df978cb9cbc..12a8f05b5a603417ead8ebd250ff7951f928f4a1 100644 --- a/paddle/fluid/operators/squared_l2_distance_op.h +++ b/paddle/fluid/operators/squared_l2_distance_op.h @@ -77,6 +77,9 @@ class SquaredL2DistanceGradKernel : public framework::OpKernel { auto* x_g = context.Output(framework::GradVarName("X")); auto* y_g = context.Output(framework::GradVarName("Y")); + PADDLE_ENFORCE_NOT_NULL(x_g); + PADDLE_ENFORCE_NOT_NULL(y_g); + auto sub_result = EigenMatrix::From(*in0); auto out_grad = EigenMatrix::From(*in1); @@ -92,31 +95,28 @@ class SquaredL2DistanceGradKernel : public framework::OpKernel { // propagate back to input auto& eigen_place = *context.template device_context().eigen_device(); - if (x_g) { - x_g->mutable_data(context.GetPlace()); - // eigen matrix - auto x_grad = - EigenMatrix::From(*x_g, framework::make_ddim({x_dims[0], cols})); - // dimensions are same with subResult - x_grad.device(eigen_place) = grad_mat; - } - if (y_g) { - y_g->mutable_data(context.GetPlace()); - - PADDLE_ENFORCE_GE(sub_result.dimensions()[0], y_dims[0], - "First dimension of gradient must be greater or " - "equal than first dimension of target."); - - if (sub_result.dimensions()[0] == y_dims[0]) { - auto y_grad = - EigenMatrix::From(*y_g, framework::make_ddim({y_dims[0], cols})); - y_grad.device(eigen_place) = -1 * grad_mat; - } else { - auto col_sum_res = -1 * (grad_mat.sum(Eigen::array({{0}}))); - auto y_grad = EigenVector::Flatten(*y_g); - y_grad.device(eigen_place) = col_sum_res; - } + x_g->mutable_data(context.GetPlace()); + // eigen matrix + auto x_grad = + EigenMatrix::From(*x_g, framework::make_ddim({x_dims[0], cols})); + // dimensions are same with subResult + x_grad.device(eigen_place) = grad_mat; + + y_g->mutable_data(context.GetPlace()); + + PADDLE_ENFORCE_GE(sub_result.dimensions()[0], y_dims[0], + "First dimension of gradient must be greater or " + "equal than first dimension of target."); + + if (sub_result.dimensions()[0] == y_dims[0]) { + auto y_grad = + EigenMatrix::From(*y_g, framework::make_ddim({y_dims[0], cols})); + y_grad.device(eigen_place) = -1 * grad_mat; + } else { + auto col_sum_res = -1 * (grad_mat.sum(Eigen::array({{0}}))); + auto y_grad = EigenVector::Flatten(*y_g); + y_grad.device(eigen_place) = col_sum_res; } } };