diff --git a/paddle/fluid/framework/ir/multi_devices_graph_pass/fuse_all_reduce_op_pass.cc b/paddle/fluid/framework/ir/multi_devices_graph_pass/fuse_all_reduce_op_pass.cc index dfd275d9bc5b026ba05ec57fb49b73b6de02bfd0..0525c56f3f2de4459d86f07dc4ba60e6225d46a2 100644 --- a/paddle/fluid/framework/ir/multi_devices_graph_pass/fuse_all_reduce_op_pass.cc +++ b/paddle/fluid/framework/ir/multi_devices_graph_pass/fuse_all_reduce_op_pass.cc @@ -276,7 +276,7 @@ class FuseAllReduceOpPass : public ir::Pass { ir::Node::Type::kOperation), local_scopes, places, num_of_all_reduce, multi_nccl_ctxs); #elif defined(PADDLE_WITH_XPU_BKCL) - auto *op_handle = new details::FusedAllReduceOpHandle( + op_handle = new details::FusedAllReduceOpHandle( result->CreateEmptyNode("fused_all_reduce", ir::Node::Type::kOperation), local_scopes, places, num_of_all_reduce, multi_bkcl_ctxs); diff --git a/paddle/fluid/framework/ir/multi_devices_graph_pass/multi_devices_graph_pass.cc b/paddle/fluid/framework/ir/multi_devices_graph_pass/multi_devices_graph_pass.cc index c23d357b17ef162e2cab515273fded187c12ccfb..0c03531aa889e13828048620c61836a376dbf7c4 100644 --- a/paddle/fluid/framework/ir/multi_devices_graph_pass/multi_devices_graph_pass.cc +++ b/paddle/fluid/framework/ir/multi_devices_graph_pass/multi_devices_graph_pass.cc @@ -522,7 +522,7 @@ void MultiDevSSAGraphBuilderBase::CreateAllReduceOp(ir::Graph *result, scopes, places, grad_merge_cond_name, multi_nccl_ctxs_)); #elif defined(PADDLE_WITH_XPU_BKCL) result->Get(kGraphOps).emplace_back( - new datails::GradMergeAllReduceOpHandle( + new details::GradMergeAllReduceOpHandle( result->CreateEmptyNode("allreduce", ir::Node::Type::kOperation), scopes, places, grad_merge_cond_name, multi_bkcl_ctxs_)); #else diff --git a/paddle/fluid/operators/concat_op_xpu.cc b/paddle/fluid/operators/concat_op_xpu.cc index 0558f09a174bf768c77e4a92e30d8abe4fe20021..4ebe92801e623aef0ca4e90927e2b2d0fce4d9e7 100644 --- a/paddle/fluid/operators/concat_op_xpu.cc +++ b/paddle/fluid/operators/concat_op_xpu.cc @@ -36,11 +36,16 @@ class ConcatXPUKernel : public framework::OpKernel { "XPU donot surpport AxisTensor for now")); axis = ComputeAxis(static_cast(axis), static_cast(ins[0]->dims().size())); - PADDLE_ENFORCE_GE( - axis, 0, platform::errors::InvalidArgument("concat: axis shoud >= 0!")); + PADDLE_ENFORCE_GE(axis, 0, platform::errors::InvalidArgument( + "concat: axis should be larger than or " + "equal to 0, but received axis is %d.", + axis)); PADDLE_ENFORCE_LT(axis, ins[0]->dims().size(), platform::errors::InvalidArgument( - "concat: axis shoud < ins[0]->dims()!")); + "concat: axis should be less than ins[0]->dims()!" + "But received axis is %d, while ins[0]->dims()" + "size is %d.", + axis, ins[0]->dims().size())); auto place = ctx.GetPlace(); out->mutable_data(place); @@ -151,10 +156,16 @@ class ConcatGradXPUKernel : public framework::OpKernel { } } PADDLE_ENFORCE_GE(axis, 0, platform::errors::InvalidArgument( - "concat_grad: axis shoud >= 0!")); - PADDLE_ENFORCE_LT(axis, out_grad->dims().size(), - platform::errors::InvalidArgument( - "concat_grad: axis shoud < ins[0]->dims()!")); + "concat_grad: axis should be larger than or " + "equal to 0, but received axis is %d.", + axis)); + PADDLE_ENFORCE_LT( + axis, out_grad->dims().size(), + platform::errors::InvalidArgument( + "concat_grad: axis should be less than ins[0]->dims()!" + "But received axis is %d, while ins[0]->dims()" + "size is %d.", + axis, out_grad->dims().size())); auto input_dims = ins[0]->dims(); std::vector split_list(n);