msg="PaddlePaddle only support FP16 evaluation when training with AMP O2 now. "
logger.warning(msg)
self.config["AMP"]["use_fp16_test"]=True
self.amp_eval=True
# TODO(gaotingquan): to compatible with different versions of Paddle
paddle_version=paddle.__version__[:3]
# paddle version < 2.3.0 and not develop
ifpaddle_versionnotin["2.3","0.0"]:
ifself.mode=="train":
self.model,self.optimizer=paddle.amp.decorate(
models=self.model,
optimizers=self.optimizer,
level=self.amp_level,
save_dtype='float32')
elifself.amp_eval:
ifself.amp_level=="O2":
msg="The PaddlePaddle that installed not support FP16 evaluation in AMP O2. Please use PaddlePaddle version >= 2.3.0. Use FP32 evaluation instead and please notice the Eval Dataset output_fp16 should be 'False'."
msg=f"The training strategy provided by PaddleClas is based on {std_gpu_num} gpus. But the number of gpu is {world_size} in current training. Please modify the stategy (learning rate, batch size and so on) if use this config to train."
msg="PaddlePaddle only support FP16 evaluation when training with AMP O2 now. "
logger.warning(msg)
self.config["AMP"]["use_fp16_test"]=True
self.amp_eval=True
# TODO(gaotingquan): to compatible with different versions of Paddle
paddle_version=paddle.__version__[:3]
# paddle version < 2.3.0 and not develop
ifpaddle_versionnotin["2.3","0.0"]:
ifself.mode=="train":
self.model,self.optimizer=paddle.amp.decorate(
models=self.model,
optimizers=self.optimizer,
level=self.amp_level,
save_dtype='float32')
elifself.amp_eval:
ifself.amp_level=="O2":
msg="The PaddlePaddle that installed not support FP16 evaluation in AMP O2. Please use PaddlePaddle version >= 2.3.0. Use FP32 evaluation instead and please notice the Eval Dataset output_fp16 should be 'False'."
msg=f"The training strategy provided by PaddleClas is based on {std_gpu_num} gpus. But the number of gpu is {world_size} in current training. Please modify the stategy (learning rate, batch size and so on) if use this config to train."