diff --git a/ppcls/configs/ImageNet/ResNet/ResNet50_amp_O2_ultra.yaml b/ppcls/configs/ImageNet/ResNet/ResNet50_amp_O2_ultra.yaml index 6a4425b4048ce5c2881ca5bc55e4902b5f50396b..db718840ba8d9197e9ef5a29a938b2ed14ac782c 100644 --- a/ppcls/configs/ImageNet/ResNet/ResNet50_amp_O2_ultra.yaml +++ b/ppcls/configs/ImageNet/ResNet/ResNet50_amp_O2_ultra.yaml @@ -24,6 +24,8 @@ AMP: use_dynamic_loss_scaling: True # O2: pure fp16 level: O2 + # only FP16 evaluation is supported when AMP O2 is enabled + use_fp16_test: True # model architecture Arch: diff --git a/ppcls/configs/ImageNet/SENet/SE_ResNeXt101_32x4d_amp_O2_ultra.yaml b/ppcls/configs/ImageNet/SENet/SE_ResNeXt101_32x4d_amp_O2_ultra.yaml index da005d32911fdb942aa600b39937b660642f27f7..349de4413d927c3b73aabc2f08fce71bcf837191 100644 --- a/ppcls/configs/ImageNet/SENet/SE_ResNeXt101_32x4d_amp_O2_ultra.yaml +++ b/ppcls/configs/ImageNet/SENet/SE_ResNeXt101_32x4d_amp_O2_ultra.yaml @@ -38,6 +38,8 @@ AMP: use_dynamic_loss_scaling: True # O2: pure fp16 level: O2 + # only FP16 evaluation is supported when AMP O2 is enabled + use_fp16_test: True Optimizer: name: Momentum diff --git a/ppcls/engine/evaluation/classification.py b/ppcls/engine/evaluation/classification.py index e9836fcbbbf7275363c0e66019739c09b509b57e..3be00d52de0d00d181fc23940c9af5a67591558a 100644 --- a/ppcls/engine/evaluation/classification.py +++ b/ppcls/engine/evaluation/classification.py @@ -53,7 +53,7 @@ def classification_eval(engine, epoch_id=0): ] time_info["reader_cost"].update(time.time() - tic) batch_size = batch[0].shape[0] - batch[0] = paddle.to_tensor(batch[0]).astype("float32") + batch[0] = paddle.to_tensor(batch[0]) if not engine.config["Global"].get("use_multilabel", False): batch[1] = batch[1].reshape([-1, 1]).astype("int64")