diff --git a/paddlehub/finetune/config.py b/paddlehub/finetune/config.py index 16a5d3927fdeac8bb2a9bac31c6308b91b9f6e3e..6e79609acd67218299212fdad06c484ed5a7fb96 100644 --- a/paddlehub/finetune/config.py +++ b/paddlehub/finetune/config.py @@ -33,7 +33,7 @@ class RunConfig(object): use_cuda=False, checkpoint_dir=None, num_epoch=10, - batch_size=None, + batch_size=8, enable_memory_optim=True, strategy=None): """ Construct finetune Config """ diff --git a/paddlehub/finetune/strategy.py b/paddlehub/finetune/strategy.py index a6dc7afca72247cb57da9a187b899c652e6d3db8..4015d16fda3150d973b8b1ead12fe572814fa6b9 100644 --- a/paddlehub/finetune/strategy.py +++ b/paddlehub/finetune/strategy.py @@ -47,6 +47,30 @@ class DefaultStrategy(object): if self._optimizer_name.lower() == "sgd": self.optimizer = fluid.optimizer.SGD( learning_rate=self.learning_rate) + elif self._optimizer_name.lower() == "adagrad": + self.optimizer = fluid.optimizer.Adagrad( + learning_rate=self.learning_rate) + elif self._optimizer_name.lower() == "adamax": + self.optimizer = fluid.optimizer.Adamax( + learning_rate=self.learning_rate) + elif self._optimizer_name.lower() == "decayedadagrad": + self.optimizer = fluid.optimizer.DecayedAdagrad( + learning_rate=self.learning_rate) + elif self._optimizer_name.lower() == "ftrl": + self.optimizer = fluid.optimizer.Ftrl( + learning_rate=self.learning_rate) + elif self._optimizer_name.lower() == "larsmomentum": + self.optimizer = fluid.optimizer.LarsMomentum( + learning_rate=self.learning_rate) + elif self._optimizer_name.lower() == "momentum": + self.optimizer = fluid.optimizer.Momentum( + learning_rate=self.learning_rate) + elif self._optimizer_name.lower() == "decayedadagrad": + self.optimizer = fluid.optimizer.DecayedAdagrad( + learning_rate=self.learning_rate) + elif self._optimizer_name.lower() == "rmsprop": + self.optimizer = fluid.optimizer.RMSPropOptimizer( + learning_rate=self.learning_rate) else: self.optimizer = fluid.optimizer.Adam( learning_rate=self.learning_rate) @@ -132,6 +156,30 @@ class DefaultFinetuneStrategy(DefaultStrategy): if self._optimizer_name.lower() == "sgd": self.optimizer = fluid.optimizer.SGD( learning_rate=self.learning_rate) + elif self._optimizer_name.lower() == "adagrad": + self.optimizer = fluid.optimizer.Adagrad( + learning_rate=self.learning_rate) + elif self._optimizer_name.lower() == "adamax": + self.optimizer = fluid.optimizer.Adamax( + learning_rate=self.learning_rate) + elif self._optimizer_name.lower() == "decayedadagrad": + self.optimizer = fluid.optimizer.DecayedAdagrad( + learning_rate=self.learning_rate) + elif self._optimizer_name.lower() == "ftrl": + self.optimizer = fluid.optimizer.Ftrl( + learning_rate=self.learning_rate) + elif self._optimizer_name.lower() == "larsmomentum": + self.optimizer = fluid.optimizer.LarsMomentum( + learning_rate=self.learning_rate) + elif self._optimizer_name.lower() == "momentum": + self.optimizer = fluid.optimizer.Momentum( + learning_rate=self.learning_rate) + elif self._optimizer_name.lower() == "decayedadagrad": + self.optimizer = fluid.optimizer.DecayedAdagrad( + learning_rate=self.learning_rate) + elif self._optimizer_name.lower() == "rmsprop": + self.optimizer = fluid.optimizer.RMSPropOptimizer( + learning_rate=self.learning_rate) else: self.optimizer = fluid.optimizer.Adam( learning_rate=self.learning_rate)