diff --git a/paddle/fluid/operators/optimizers/adam_op.h b/paddle/fluid/operators/optimizers/adam_op.h index dda4ffb9087c4a993851661e854004caefb31154..61b9384f8422cb531a94096875434ffe36ecdbce 100644 --- a/paddle/fluid/operators/optimizers/adam_op.h +++ b/paddle/fluid/operators/optimizers/adam_op.h @@ -431,17 +431,19 @@ class AdamOpKernel : public framework::OpKernel { } else { // merge duplicated rows if any. // The rows of grad_merge have been sorted inside MergeAdd functor + framework::SelectedRows* grad_merge_var; scatter::MergeAdd merge_func; if (platform::is_cpu_place(ctx.GetPlace())) { - grad_merge_ptr = &cpu_grad_merge; + grad_merge_var = &cpu_grad_merge; } else { // FIXME(qiao): GPU also need to fix this - auto* grad_merge_var = const_cast(ctx.scope()) - .Var() - ->GetMutable(); + grad_merge_var = const_cast(ctx.scope()) + .Var() + ->GetMutable(); } merge_func(ctx.template device_context(), grad, - grad_merge_ptr, true); + grad_merge_var, true); + grad_merge_ptr = grad_merge_var; } auto& grad_merge = *grad_merge_ptr;