diff --git a/paddle/operators/pad_op.cc b/paddle/operators/pad_op.cc index 99f605c651d7f48916f8735a1c4948c0d71fa613..7e78b6ec133981494a65b5e16316ae8fdbd61a60 100644 --- a/paddle/operators/pad_op.cc +++ b/paddle/operators/pad_op.cc @@ -96,8 +96,9 @@ class PadOpGrad : public framework::OperatorWithKernel { "Input(Out@GRAD) should not be null"); auto x_dims = ctx.Input("X")->dims(); auto *x_grad = ctx.Output(framework::GradVarName("X")); - - x_grad->Resize(x_dims); + if (x_grad != nullptr) { + x_grad->Resize(x_dims); + } } }; diff --git a/paddle/operators/pad_op.h b/paddle/operators/pad_op.h index ca8832f26a6492bf265b07e1581a886777e6301e..2cc3b945ae5b2e2e93d8531c7f99e4c215d1d806 100644 --- a/paddle/operators/pad_op.h +++ b/paddle/operators/pad_op.h @@ -87,12 +87,13 @@ void PadGradFunction(const framework::ExecutionContext& context) { } auto* d_out = context.Input(framework::GradVarName("Out")); auto* d_x = context.Output(framework::GradVarName("X")); - d_x->mutable_data(context.GetPlace()); - - auto d_x_tensor = EigenTensor::From(*d_x); - auto d_out_tensor = EigenTensor::From(*d_out); - auto place = context.GetEigenDevice(); - d_x_tensor.device(place) = d_out_tensor.pad(paddings, 0); + if (d_x != nullptr) { + d_x->mutable_data(context.GetPlace()); + auto d_x_tensor = EigenTensor::From(*d_x); + auto d_out_tensor = EigenTensor::From(*d_out); + auto place = context.GetEigenDevice(); + d_x_tensor.device(place) = d_out_tensor.pad(paddings, 0); + } } template