diff --git a/paddle/fluid/operators/fused/fusion_transpose_flatten_concat_op.cc b/paddle/fluid/operators/fused/fusion_transpose_flatten_concat_op.cc index 3127c0259a59e0b59ce5e3e7453d829162341fe9..bd376b1e7aaefbf890e174cc86899b990a9fed26 100644 --- a/paddle/fluid/operators/fused/fusion_transpose_flatten_concat_op.cc +++ b/paddle/fluid/operators/fused/fusion_transpose_flatten_concat_op.cc @@ -27,14 +27,20 @@ class TransposeFlattenConcatFusionOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE_GE(ctx->Inputs("X").size(), 1UL, - "Inputs(X) of ConcatOp should be empty."); - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(Out) of ConcatOp should not be null."); + PADDLE_ENFORCE_GE( + ctx->Inputs("X").size(), 1UL, + platform::errors::InvalidArgument( + "Inputs(X) of TransposeFlattenConcat op should not be empty.")); + PADDLE_ENFORCE_EQ( + ctx->HasOutput("Out"), true, + platform::errors::InvalidArgument( + "Inputs(X) of TransposeFlattenConcat op should not be empty.")); auto ins = ctx->GetInputsDim("X"); const size_t n = ins.size(); - PADDLE_ENFORCE_GT(n, 0, "Input tensors count should > 0."); + PADDLE_ENFORCE_GT(n, 0, + platform::errors::InvalidArgument( + "Input tensors dim size should greater than 0.")); std::vector trans_axis = ctx->Attrs().Get>("trans_axis"); @@ -44,9 +50,10 @@ class TransposeFlattenConcatFusionOp : public framework::OperatorWithKernel { size_t x_rank = ins[0].size(); size_t trans_axis_size = trans_axis.size(); PADDLE_ENFORCE_EQ(x_rank, trans_axis_size, - "The input tensor's rank(%d) " - "should be equal to the permutation axis's size(%d)", - x_rank, trans_axis_size); + platform::errors::InvalidArgument( + "The input tensor's rank(%d) " + "should be equal to the permutation axis's size(%d)", + x_rank, trans_axis_size)); auto dims0 = GetFlattenShape(flatten_axis, GetPermuteShape(trans_axis, ins[0])); @@ -59,9 +66,10 @@ class TransposeFlattenConcatFusionOp : public framework::OperatorWithKernel { out_dims[concat_axis] += dimsi[j]; } else { PADDLE_ENFORCE_EQ(out_dims[j], dimsi[j], - "After flatting, the %d-th dim should be save " - "except the specify axis.", - j); + platform::errors::InvalidArgument( + "After flatting, the %d-th dim should be save " + "except the specify axis.", + j)); } } } diff --git a/paddle/fluid/operators/fused/fusion_transpose_flatten_concat_op.cu.cc b/paddle/fluid/operators/fused/fusion_transpose_flatten_concat_op.cu.cc index 17cb4556d45ef3adee2adc0d2f19ea048e096982..b61ef8e566b77607e6db9b51b270836338e06160 100644 --- a/paddle/fluid/operators/fused/fusion_transpose_flatten_concat_op.cu.cc +++ b/paddle/fluid/operators/fused/fusion_transpose_flatten_concat_op.cu.cc @@ -46,9 +46,13 @@ class TransposeFlattenConcatFusionKernel : public framework::OpKernel { cudnnTensorDescriptor_t in_desc; cudnnTensorDescriptor_t out_desc; PADDLE_ENFORCE_CUDA_SUCCESS( - platform::dynload::cudnnCreateTensorDescriptor(&in_desc)); + platform::dynload::cudnnCreateTensorDescriptor(&in_desc), + platform::errors::External("Create cudnn tensor descriptor failed in " + "transpose_flatten_concat_fusion op.")); PADDLE_ENFORCE_CUDA_SUCCESS( - platform::dynload::cudnnCreateTensorDescriptor(&out_desc)); + platform::dynload::cudnnCreateTensorDescriptor(&out_desc), + platform::errors::External("Create cudnn tensor descriptor failed in " + "transpose_flatten_concat_fusion op.")); cudnnDataType_t cudnn_dtype = CudnnDataType::type; auto& dev_ctx = ctx.template device_context(); @@ -87,15 +91,24 @@ class TransposeFlattenConcatFusionKernel : public framework::OpKernel { dims_y[i] = 1; } - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( - in_desc, cudnn_dtype, max_dim, dims_y.data(), stride_x.data())); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( - out_desc, cudnn_dtype, max_dim, dims_y.data(), stride_y.data())); - - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnTransformTensor( - handle, CudnnDataType::kOne(), in_desc, - static_cast(ins[k]->data()), - CudnnDataType::kZero(), out_desc, static_cast(odata))); + PADDLE_ENFORCE_CUDA_SUCCESS( + platform::dynload::cudnnSetTensorNdDescriptor( + in_desc, cudnn_dtype, max_dim, dims_y.data(), stride_x.data()), + platform::errors::External("Create cudnn tensorNd descriptor failed " + "in transpose_flatten_concat op.")); + PADDLE_ENFORCE_CUDA_SUCCESS( + platform::dynload::cudnnSetTensorNdDescriptor( + out_desc, cudnn_dtype, max_dim, dims_y.data(), stride_y.data()), + platform::errors::External("Create cudnn tensorNd descriptor failed " + "in transpose_flatten_concat op.")); + + PADDLE_ENFORCE_CUDA_SUCCESS( + platform::dynload::cudnnTransformTensor( + handle, CudnnDataType::kOne(), in_desc, + static_cast(ins[k]->data()), + CudnnDataType::kZero(), out_desc, static_cast(odata)), + platform::errors::External("Create cudnn transform tensor failed in " + "transpose_flatten_concat op.")); if (concat_axis == 0) { odata += osize; } else { @@ -104,9 +117,13 @@ class TransposeFlattenConcatFusionKernel : public framework::OpKernel { } } PADDLE_ENFORCE_CUDA_SUCCESS( - platform::dynload::cudnnDestroyTensorDescriptor(in_desc)); + platform::dynload::cudnnDestroyTensorDescriptor(in_desc), + platform::errors::External( + "Destory cudnn descriptor failed in transpose_flatten_concat op.")); PADDLE_ENFORCE_CUDA_SUCCESS( - platform::dynload::cudnnDestroyTensorDescriptor(out_desc)); + platform::dynload::cudnnDestroyTensorDescriptor(out_desc), + platform::errors::External( + "Destory cudnn descriptor failed in transpose_flatten_concat op.")); } };