diff --git a/python/paddle/distributed/fleet/meta_optimizers/ascend/ascend_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/ascend/ascend_optimizer.py index b7d22882c82b5894211ac8ec30a7908d4afd3bb3..6db42eb47be90214d5bdc286b8127c010936ff0d 100644 --- a/python/paddle/distributed/fleet/meta_optimizers/ascend/ascend_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/ascend/ascend_optimizer.py @@ -236,10 +236,6 @@ class AscendOptimizer(Optimizer): ret_list.append(var) return ret_list - def _set_auxiliary_var(self, key, val): - super()._set_auxiliary_var(key, val) - self.inner_opt._set_auxiliary_var(key, val) - def minimize( self, loss, diff --git a/python/paddle/distributed/fleet/meta_optimizers/meta_optimizer_base.py b/python/paddle/distributed/fleet/meta_optimizers/meta_optimizer_base.py index 9a7660ebd7dc1fd85ac8386bdaf17f95710d0f98..87085a322c30370f7e67868e07423d09980d7de8 100644 --- a/python/paddle/distributed/fleet/meta_optimizers/meta_optimizer_base.py +++ b/python/paddle/distributed/fleet/meta_optimizers/meta_optimizer_base.py @@ -25,10 +25,6 @@ class MetaOptimizerBase(Optimizer): self.meta_optimizers_white_list = [] self.meta_optimizers_black_list = [] - def _set_auxiliary_var(self, key, val): - super()._set_auxiliary_var(key, val) - self.inner_opt._set_auxiliary_var(key, val) - def _set_basic_info( self, loss, role_maker, user_defined_optimizer, user_defined_strategy ): diff --git a/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_optimizer_stage2.py b/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_optimizer_stage2.py index 639bdf79ac9aa094ca634a20ad3eca00d1ec0eb4..00ec12a523f919a19c50c2ef357ae892a248469a 100644 --- a/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_optimizer_stage2.py +++ b/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_optimizer_stage2.py @@ -203,10 +203,6 @@ class GroupShardedOptimizerStage2(Optimizer): # Update optimizer parameters and adjust parameter storage and use according to rank. self._update_opt_status() - def _set_auxiliary_var(self, key, val): - super()._set_auxiliary_var(key, val) - self._optim._set_auxiliary_var(key, val) - @paddle.autograd.no_grad() def _sync_params_and_buffers(self): """ diff --git a/python/paddle/incubate/optimizer/lookahead.py b/python/paddle/incubate/optimizer/lookahead.py index bfa08c40556beca1111d250818a395956faa168e..b1ad5f3ecb0b5cdee8b263ea121a8fa63ece31fc 100644 --- a/python/paddle/incubate/optimizer/lookahead.py +++ b/python/paddle/incubate/optimizer/lookahead.py @@ -144,10 +144,6 @@ class LookAhead(Optimizer): self._global_step_var = None self._k_var = None - def _set_auxiliary_var(self, key, val): - super()._set_auxiliary_var(key, val) - self.inner_optimizer._set_auxiliary_var(key, val) - @framework.dygraph_only @imperative_base.no_grad def step(self):