diff --git a/python/paddle/optimizer/adadelta.py b/python/paddle/optimizer/adadelta.py index ff0f0a13feddca1728d0345f007cac882cad5c2a..5ece3282678c7ff455ec876390fb1286d71ee99d 100644 --- a/python/paddle/optimizer/adadelta.py +++ b/python/paddle/optimizer/adadelta.py @@ -145,8 +145,11 @@ class Adadelta(Optimizer): parameters = parameters.get('params') for p in parameters: + if p.name in self._already_create_accumulater: + continue self._add_accumulator(self._avg_squared_grad_acc_str, p) self._add_accumulator(self._avg_squared_update_acc_str, p) + self._already_create_accumulater.add(p.name) def _append_optimize_op(self, block, param_and_grad): if isinstance(param_and_grad, dict): diff --git a/python/paddle/optimizer/adagrad.py b/python/paddle/optimizer/adagrad.py index 6bea5773270bb5828c03cdb8b361f0f95178d36c..ff65c86dbebc69615916f4527b1a8022da870eb2 100644 --- a/python/paddle/optimizer/adagrad.py +++ b/python/paddle/optimizer/adagrad.py @@ -139,11 +139,14 @@ class Adagrad(Optimizer): parameters = self._update_param_group(parameters) for p in parameters: + if p.name in self._already_create_accumulater: + continue self._add_accumulator( self._moment_acc_str, p, fill_value=self.initial_accumulator_value, ) + self._already_create_accumulater.add(p.name) def _append_optimize_op(self, block, param_and_grad): assert isinstance(block, framework.Block) diff --git a/python/paddle/optimizer/adam.py b/python/paddle/optimizer/adam.py index 9c827496e8b2e350c3109c27baecfd21408b5cb0..0e33cac29c7c716afd2c084d671ea8d0f4686616 100644 --- a/python/paddle/optimizer/adam.py +++ b/python/paddle/optimizer/adam.py @@ -317,9 +317,12 @@ class Adam(Optimizer): # Create accumulator tensors for first and second moments for p in parameters: + if p.name in self._already_create_accumulater: + continue if self._multi_precision and self._is_dtype_fp16_or_bf16(p.dtype): master_p = self._create_master_weight(p) self._add_moments_pows(master_p) + self._already_create_accumulater.add(p.name) continue if ( self._is_dtype_fp16_or_bf16(p.dtype) @@ -330,6 +333,7 @@ class Adam(Optimizer): "Consider using multi_precision=True option of the Adam optimizer." ) self._add_moments_pows(p) + self._already_create_accumulater.add(p.name) def _append_optimize_op(self, block, param_and_grad): assert isinstance(block, framework.Block) diff --git a/python/paddle/optimizer/adamax.py b/python/paddle/optimizer/adamax.py index c460ab6be032dd4ad52ef76ac517ae32f589a73c..5409a05787dd1febb3c4e4712047cd072d88995e 100644 --- a/python/paddle/optimizer/adamax.py +++ b/python/paddle/optimizer/adamax.py @@ -176,6 +176,8 @@ class Adamax(Optimizer): # Create accumulator tensors for first moment and infinity norm for p in parameters: + if p.name in self._already_create_accumulater: + continue self._add_accumulator(self._moment_acc_str, p) self._add_accumulator(self._inf_norm_acc_str, p) self._add_accumulator( @@ -184,6 +186,7 @@ class Adamax(Optimizer): fill_value=self._beta1, shape=[1], ) + self._already_create_accumulater.add(p.name) def _append_optimize_op(self, block, param_and_grad): assert isinstance(block, framework.Block) diff --git a/python/paddle/optimizer/adamw.py b/python/paddle/optimizer/adamw.py index 5a75e6d243696a4407910ef650a854f98e51aba8..ddacd33527e85f90da868f87af7f93c24f416ced 100644 --- a/python/paddle/optimizer/adamw.py +++ b/python/paddle/optimizer/adamw.py @@ -281,6 +281,7 @@ class AdamW(Optimizer): self._use_multi_tensor = None self.regularization = None self._auxiliary_vars = {} + self._already_create_accumulater = set() def _set_auxiliary_var(self, key, val): self._auxiliary_vars[key] = val @@ -422,9 +423,12 @@ class AdamW(Optimizer): # Create accumulator tensors for first and second moments for p in parameters: + if p.name in self._already_create_accumulater: + continue if self._multi_precision and self._is_dtype_fp16_or_bf16(p.dtype): master_p = self._create_master_weight(p) self._add_moments_pows(master_p) + self._already_create_accumulater.add(p.name) continue if ( self._is_dtype_fp16_or_bf16(p.dtype) @@ -435,6 +439,7 @@ class AdamW(Optimizer): "Consider using multi_precision=True option of the Adam optimizer." ) self._add_moments_pows(p) + self._already_create_accumulater.add(p.name) def _append_optimize_op(self, block, param_and_grad): assert isinstance(block, framework.Block) diff --git a/python/paddle/optimizer/lamb.py b/python/paddle/optimizer/lamb.py index 57904cd44a86cccfade4a6950a86b6eccd8c4af5..a577e7c0771e6aa83359be479fe6133f141a13eb 100644 --- a/python/paddle/optimizer/lamb.py +++ b/python/paddle/optimizer/lamb.py @@ -190,11 +190,15 @@ class Lamb(Optimizer): # Create accumulator tensors for first and second moments for p in parameters: + if p.name in self._already_create_accumulater: + continue if self._multi_precision and p.dtype == core.VarDesc.VarType.FP16: master_p = self._create_master_weight(p) self._add_moments_pows(master_p) + self._already_create_accumulater.add(p.name) else: self._add_moments_pows(p) + self._already_create_accumulater.add(p.name) def _get_accumulator(self, name, param): """Utility function to fetch an accumulator for a parameter diff --git a/python/paddle/optimizer/momentum.py b/python/paddle/optimizer/momentum.py index bff9c1209e70806f19b24f7cb1a6fc922c4e541c..07839bbe3eff6b4108b42b61e24e573a5d897b23 100644 --- a/python/paddle/optimizer/momentum.py +++ b/python/paddle/optimizer/momentum.py @@ -270,9 +270,12 @@ class Momentum(Optimizer): parameters = self._update_param_group(parameters) for p in parameters: + if p.name in self._already_create_accumulater: + continue if self._multi_precision and p.dtype == core.VarDesc.VarType.FP16: master_p = self._create_master_weight(p) self._add_accumulator(self._velocity_acc_str, master_p) + self._already_create_accumulater.add(p.name) continue if ( p.dtype == core.VarDesc.VarType.FP16 @@ -283,6 +286,7 @@ class Momentum(Optimizer): "Consider using multi_precision=True option of the Momentum optimizer." ) self._add_accumulator(self._velocity_acc_str, p) + self._already_create_accumulater.add(p.name) def _create_regularization_of_grad(self, param, grad, regularization=None): """Create and add backward regularization Operators diff --git a/python/paddle/optimizer/optimizer.py b/python/paddle/optimizer/optimizer.py index d9e1cd456042c409cb3694470f16133318cda81d..ef177c52a28c3187130bedb2057743cad644ee07 100644 --- a/python/paddle/optimizer/optimizer.py +++ b/python/paddle/optimizer/optimizer.py @@ -275,6 +275,7 @@ class Optimizer: self._param_dict = self._create_multi_tensor_dict() self._auxiliary_vars = {} + self._already_create_accumulater = set() def _set_auxiliary_var(self, key, val): self._auxiliary_vars[key] = val diff --git a/python/paddle/optimizer/rmsprop.py b/python/paddle/optimizer/rmsprop.py index 855082eae5f8f874f1ca29680f6b83214ddc3b46..ae342d4c0211407237b8322afb9c92c6a94a2d66 100644 --- a/python/paddle/optimizer/rmsprop.py +++ b/python/paddle/optimizer/rmsprop.py @@ -199,9 +199,12 @@ class RMSProp(Optimizer): parameters = parameters.get('params') for p in parameters: + if p.name in self._already_create_accumulater: + continue self._add_accumulator(self._momentum_acc_str, p) self._add_accumulator(self._mean_square_acc_str, p) self._add_accumulator(self._mean_grad_acc_str, p) + self._already_create_accumulater.add(p.name) def _append_optimize_op(self, block, param_and_grad): if not isinstance(block, framework.Block): diff --git a/python/paddle/optimizer/sgd.py b/python/paddle/optimizer/sgd.py index c188cd15a8c3af0826b79255e4f5abc73f52b8ba..d65857f1dd4e264e870cee8cf5d803d7a23887b1 100644 --- a/python/paddle/optimizer/sgd.py +++ b/python/paddle/optimizer/sgd.py @@ -129,8 +129,11 @@ class SGD(Optimizer): # Create accumulator tensors for first and second moments for p in parameters: + if p.name in self._already_create_accumulater: + continue if self._multi_precision and p.dtype == core.VarDesc.VarType.FP16: master_p = self._create_master_weight(p) + self._already_create_accumulater.add(p.name) continue if ( p.dtype == core.VarDesc.VarType.FP16