From 15fac5e7faab9ede5e40f84b63f80920a934436a Mon Sep 17 00:00:00 2001 From: liuyuhui Date: Thu, 7 Jan 2021 10:41:43 +0800 Subject: [PATCH] fix assign_op_xpu concat_op_xpu warining (#30120) --- .../fuse_all_reduce_op_pass.cc | 2 +- .../multi_devices_graph_pass.cc | 2 +- paddle/fluid/operators/concat_op_xpu.cc | 25 +++++++++++++------ 3 files changed, 20 insertions(+), 9 deletions(-) diff --git a/paddle/fluid/framework/ir/multi_devices_graph_pass/fuse_all_reduce_op_pass.cc b/paddle/fluid/framework/ir/multi_devices_graph_pass/fuse_all_reduce_op_pass.cc index dfd275d9bc..0525c56f3f 100644 --- a/paddle/fluid/framework/ir/multi_devices_graph_pass/fuse_all_reduce_op_pass.cc +++ b/paddle/fluid/framework/ir/multi_devices_graph_pass/fuse_all_reduce_op_pass.cc @@ -276,7 +276,7 @@ class FuseAllReduceOpPass : public ir::Pass { ir::Node::Type::kOperation), local_scopes, places, num_of_all_reduce, multi_nccl_ctxs); #elif defined(PADDLE_WITH_XPU_BKCL) - auto *op_handle = new details::FusedAllReduceOpHandle( + op_handle = new details::FusedAllReduceOpHandle( result->CreateEmptyNode("fused_all_reduce", ir::Node::Type::kOperation), local_scopes, places, num_of_all_reduce, multi_bkcl_ctxs); diff --git a/paddle/fluid/framework/ir/multi_devices_graph_pass/multi_devices_graph_pass.cc b/paddle/fluid/framework/ir/multi_devices_graph_pass/multi_devices_graph_pass.cc index c23d357b17..0c03531aa8 100644 --- a/paddle/fluid/framework/ir/multi_devices_graph_pass/multi_devices_graph_pass.cc +++ b/paddle/fluid/framework/ir/multi_devices_graph_pass/multi_devices_graph_pass.cc @@ -522,7 +522,7 @@ void MultiDevSSAGraphBuilderBase::CreateAllReduceOp(ir::Graph *result, scopes, places, grad_merge_cond_name, multi_nccl_ctxs_)); #elif defined(PADDLE_WITH_XPU_BKCL) result->Get(kGraphOps).emplace_back( - new datails::GradMergeAllReduceOpHandle( + new details::GradMergeAllReduceOpHandle( result->CreateEmptyNode("allreduce", ir::Node::Type::kOperation), scopes, places, grad_merge_cond_name, multi_bkcl_ctxs_)); #else diff --git a/paddle/fluid/operators/concat_op_xpu.cc b/paddle/fluid/operators/concat_op_xpu.cc index 0558f09a17..4ebe92801e 100644 --- a/paddle/fluid/operators/concat_op_xpu.cc +++ b/paddle/fluid/operators/concat_op_xpu.cc @@ -36,11 +36,16 @@ class ConcatXPUKernel : public framework::OpKernel { "XPU donot surpport AxisTensor for now")); axis = ComputeAxis(static_cast(axis), static_cast(ins[0]->dims().size())); - PADDLE_ENFORCE_GE( - axis, 0, platform::errors::InvalidArgument("concat: axis shoud >= 0!")); + PADDLE_ENFORCE_GE(axis, 0, platform::errors::InvalidArgument( + "concat: axis should be larger than or " + "equal to 0, but received axis is %d.", + axis)); PADDLE_ENFORCE_LT(axis, ins[0]->dims().size(), platform::errors::InvalidArgument( - "concat: axis shoud < ins[0]->dims()!")); + "concat: axis should be less than ins[0]->dims()!" + "But received axis is %d, while ins[0]->dims()" + "size is %d.", + axis, ins[0]->dims().size())); auto place = ctx.GetPlace(); out->mutable_data(place); @@ -151,10 +156,16 @@ class ConcatGradXPUKernel : public framework::OpKernel { } } PADDLE_ENFORCE_GE(axis, 0, platform::errors::InvalidArgument( - "concat_grad: axis shoud >= 0!")); - PADDLE_ENFORCE_LT(axis, out_grad->dims().size(), - platform::errors::InvalidArgument( - "concat_grad: axis shoud < ins[0]->dims()!")); + "concat_grad: axis should be larger than or " + "equal to 0, but received axis is %d.", + axis)); + PADDLE_ENFORCE_LT( + axis, out_grad->dims().size(), + platform::errors::InvalidArgument( + "concat_grad: axis should be less than ins[0]->dims()!" + "But received axis is %d, while ins[0]->dims()" + "size is %d.", + axis, out_grad->dims().size())); auto input_dims = ins[0]->dims(); std::vector split_list(n); -- GitLab