diff --git a/paddle_hub/finetune/config.py b/paddle_hub/finetune/config.py index 900e30033c51f741ad2c7c2ee8679fe89806231f..ec70c26efe3446b62a0560614622271e4db29cc5 100644 --- a/paddle_hub/finetune/config.py +++ b/paddle_hub/finetune/config.py @@ -23,16 +23,11 @@ class FinetuneConfig(object): eval_interval=100, save_ckpt_interval=None, use_cuda=False, - learning_rate=1e-4, checkpoint_dir=None, num_epoch=10, batch_size=None, - max_seq_len=128, - weight_decay=None, - warmup_proportion=0.0, enable_memory_optim=True, - strategy=None, - optimizer="adam"): + strategy=None): """ Construct finetune Config """ self._log_interval = log_interval self._eval_interval = eval_interval @@ -82,18 +77,6 @@ class FinetuneConfig(object): def batch_size(self): return self._batch_size - @property - def max_seq_len(self): - return self._max_seq_len - - @property - def weight_decay(self): - return self._weight_decay - - @property - def warmup_proportion(self): - return self._warmup_proportion - @property def strategy(self): return self._strategy @@ -101,7 +84,3 @@ class FinetuneConfig(object): @property def enable_memory_optim(self): return self._enable_memory_optim - - @property - def optimizer(self): - return self._optimizer diff --git a/paddle_hub/finetune/strategy.py b/paddle_hub/finetune/strategy.py index 445c0f5ed3aabe0838fb24edbca4c7ee36ee7fa7..a9ceffe3407984715fa5cf4b74fc62a49403b18e 100644 --- a/paddle_hub/finetune/strategy.py +++ b/paddle_hub/finetune/strategy.py @@ -37,6 +37,10 @@ class DefaultStrategy(object): else: raise ValueError("DefaultStrategy's optimizer is None") + # TODO complete __str__() + def __str__(self): + return "DefaultStrategy" + class BERTFinetuneStrategy(DefaultStrategy): def __init__(self, @@ -88,3 +92,7 @@ class BERTFinetuneStrategy(DefaultStrategy): os.environ.get('CPU_NUM', multiprocessing.cpu_count())) return dev_count + + # TODO complete __str__() + def __str__(self): + return "BERTFintuneStrategy"