diff --git a/paddle/fluid/operators/clip_by_norm_op.h b/paddle/fluid/operators/clip_by_norm_op.h index 83461159134f3a77613eec70f735a2e1402131ce..7144524a4c62d29df255afb197c6f5e7c39c28a7 100644 --- a/paddle/fluid/operators/clip_by_norm_op.h +++ b/paddle/fluid/operators/clip_by_norm_op.h @@ -33,12 +33,14 @@ class ClipByNormKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext& context) const override { auto max_norm = context.Attr("max_norm"); auto in_var = context.InputVar("X"); - auto* output = context.Output("Out"); - output->mutable_data(context.GetPlace()); + Tensor* output = nullptr; const Tensor* input = nullptr; if (in_var->IsType()) { input = context.Input("X"); + + output = context.Output("Out"); + output->mutable_data(context.GetPlace()); } else if (in_var->IsType()) { auto* x = context.Input("X"); @@ -50,6 +52,11 @@ class ClipByNormKernel : public framework::OpKernel { merge_func(context.template device_context(), *x, merged_input); input = &(merged_input->value()); + + auto* output_selected_rows = context.Output("Out"); + output_selected_rows->set_rows(merged_input.rows()); + output = output_selected_rows->mutable_data(); + output->Resize(framework::make_ddim(merged_input.value().dims())); } else { PADDLE_THROW("Unexpected branch, input variable type is %s", in_var->Type().name());