diff --git a/paddle/fluid/operators/conv_cudnn_op.cu b/paddle/fluid/operators/conv_cudnn_op.cu index c49a3ee1c20ed32bd8d0504a28e4d7bb5f9917e3..5ea10f29ea9247f08b6413d478ac6eb3342ae69f 100644 --- a/paddle/fluid/operators/conv_cudnn_op.cu +++ b/paddle/fluid/operators/conv_cudnn_op.cu @@ -65,7 +65,8 @@ class CUDNNConvOpKernel : public framework::OpKernel { int groups = ctx.Attr("groups"); bool exhaustive_search = - FLAGS_cudnn_exhaustive_search || ctx.Attr("exhaustive_search"); + FLAGS_cudnn_exhaustive_search || (ctx.HasAttr("exhaustive_search") && + ctx.Attr("exhaustive_search")); bool deterministic = FLAGS_cudnn_deterministic; auto exhaustive_deterministic = exhaustive_search && deterministic; PADDLE_ENFORCE_EQ(exhaustive_deterministic, false, @@ -386,7 +387,8 @@ class CUDNNConvGradOpKernel : public framework::OpKernel { int groups = ctx.Attr("groups"); bool exhaustive_search = - FLAGS_cudnn_exhaustive_search || ctx.Attr("exhaustive_search"); + FLAGS_cudnn_exhaustive_search || (ctx.HasAttr("exhaustive_search") && + ctx.Attr("exhaustive_search")); bool deterministic = FLAGS_cudnn_deterministic; auto exhaustive_deterministic = exhaustive_search && deterministic; PADDLE_ENFORCE_EQ(exhaustive_deterministic, false, @@ -437,7 +439,7 @@ class CUDNNConvGradOpKernel : public framework::OpKernel { ctx, input_grad, &transformed_input_grad_channel); // NOTE(zhiqiu): If inplace_addto strategy is enabled, we need to copy // the data of input_grad to transformed_input_grad_channel. - if (ctx.Attr("use_addto")) { + if (ctx.HasAttr("use_addto") && ctx.Attr("use_addto")) { TransToChannelFirst( ctx, input_grad, &transformed_input_grad_channel); } @@ -703,15 +705,17 @@ class CUDNNConvGradOpKernel : public framework::OpKernel { // MIOPEN ONLY support beta to be 0.0f ScalingParamType beta = 0.0f; #else - ScalingParamType beta = ctx.Attr("use_addto") ? 1.0f : 0.0f; + ScalingParamType beta = + (ctx.HasAttr("use_addto") && ctx.Attr("use_addto")) ? 1.0f : 0.0f; #endif - VLOG(4) << "Conv_grad: use_addto = " << ctx.Attr("use_addto"); + VLOG(4) << "Conv_grad: use_addto = " + << (ctx.HasAttr("use_addto") && ctx.Attr("use_addto")); if (input_grad) { // When beta is 0, it is unnecessary to reset input_grad. // When beta is 1, the output cannot be reset since addt strategy used. #ifdef PADDLE_WITH_HIP - if (ctx.Attr("use_addto")) { + if (ctx.HasAttr("use_addto") && ctx.Attr("use_addto")) { Tensor temp_tensor(transformed_input_grad.type()); temp_tensor.Resize(transformed_input_grad.dims()); T* temp_tensor_data = temp_tensor.mutable_data(ctx.GetPlace()); @@ -878,7 +882,8 @@ class CUDNNConvDoubleGradOpKernel : public framework::OpKernel { int groups = ctx.Attr("groups"); bool exhaustive_search = - FLAGS_cudnn_exhaustive_search || ctx.Attr("exhaustive_search"); + FLAGS_cudnn_exhaustive_search || (ctx.HasAttr("exhaustive_search") && + ctx.Attr("exhaustive_search")); bool deterministic = FLAGS_cudnn_deterministic; auto exhaustive_deterministic = exhaustive_search && deterministic; PADDLE_ENFORCE_EQ(exhaustive_deterministic, false,