diff --git a/paddlehub/finetune/strategy.py b/paddlehub/finetune/strategy.py index 4166a018cc560ad85885accd5e4d4e45e687b3e0..a6dc7afca72247cb57da9a187b899c652e6d3db8 100644 --- a/paddlehub/finetune/strategy.py +++ b/paddlehub/finetune/strategy.py @@ -44,12 +44,12 @@ class DefaultStrategy(object): self._optimizer_name = optimizer_name def execute(self, loss): - if self.optimizer.lower() == "adam": - self.optimizer = fluid.optimizer.Adam( - learning_rate=self.learning_rate) - elif self.optimizer.lower() == "sgd": + if self._optimizer_name.lower() == "sgd": self.optimizer = fluid.optimizer.SGD( learning_rate=self.learning_rate) + else: + self.optimizer = fluid.optimizer.Adam( + learning_rate=self.learning_rate) if self.optimizer is not None: self.optimizer.minimize(loss) @@ -129,12 +129,12 @@ class DefaultFinetuneStrategy(DefaultStrategy): self.regularization_coeff = regularization_coeff def execute(self, loss): - if self._optimizer_name.lower() == "adam": - self.optimizer = fluid.optimizer.Adam( - learning_rate=self.learning_rate) - elif self._optimizer_name.lower() == "sgd": + if self._optimizer_name.lower() == "sgd": self.optimizer = fluid.optimizer.SGD( learning_rate=self.learning_rate) + else: + self.optimizer = fluid.optimizer.Adam( + learning_rate=self.learning_rate) # get pretrained parameters program = loss.block.program