diff --git a/mindspore/nn/optim/optimizer.py b/mindspore/nn/optim/optimizer.py index c2a419c56589c09c9b8eee160f37947f6b018265..bfbde78ffff1c62a17edf14f2e1c5da19bad78dc 100755 --- a/mindspore/nn/optim/optimizer.py +++ b/mindspore/nn/optim/optimizer.py @@ -109,7 +109,7 @@ class Optimizer(Cell): tuple[Tensor], The gradients after weight decay. """ if self.weight_decay > 0: - params = self.params + params = self.parameters gradients = self.hyper_map(F.partial(apply_decay, self.weight_decay), self.decay_flags, params, gradients) return gradients