diff --git a/mindinsight/wizard/conf/templates/network/alexnet/scripts/run_eval.sh-tpl b/mindinsight/wizard/conf/templates/network/alexnet/scripts/run_eval.sh-tpl index 898c5a436c663194340a726f811951969ad67688..cc2507e6b6331277b4fc55e30083b15b0b638867 100644 --- a/mindinsight/wizard/conf/templates/network/alexnet/scripts/run_eval.sh-tpl +++ b/mindinsight/wizard/conf/templates/network/alexnet/scripts/run_eval.sh-tpl @@ -61,5 +61,5 @@ cp -r ../src ./eval cd ./eval || exit env > env.log echo "start evaluation for device $DEVICE_ID" -python eval.py --dataset_path=$PATH1 --checkpoint_path=$PATH2 &> log & +python eval.py --dataset_path=$PATH1 --checkpoint_path=$PATH2 --dataset_sink_mode=False &> log & cd .. diff --git a/mindinsight/wizard/conf/templates/network/resnet50/scripts/run_eval.sh-tpl b/mindinsight/wizard/conf/templates/network/resnet50/scripts/run_eval.sh-tpl index 898c5a436c663194340a726f811951969ad67688..cc2507e6b6331277b4fc55e30083b15b0b638867 100644 --- a/mindinsight/wizard/conf/templates/network/resnet50/scripts/run_eval.sh-tpl +++ b/mindinsight/wizard/conf/templates/network/resnet50/scripts/run_eval.sh-tpl @@ -61,5 +61,5 @@ cp -r ../src ./eval cd ./eval || exit env > env.log echo "start evaluation for device $DEVICE_ID" -python eval.py --dataset_path=$PATH1 --checkpoint_path=$PATH2 &> log & +python eval.py --dataset_path=$PATH1 --checkpoint_path=$PATH2 --dataset_sink_mode=False &> log & cd .. diff --git a/mindinsight/wizard/conf/templates/network/resnet50/train.py-tpl b/mindinsight/wizard/conf/templates/network/resnet50/train.py-tpl index 770b59699aad1cec32f4922abc9bf5e757f02eba..218e6cc6cedf70b00853500a1ac757e740f14723 100644 --- a/mindinsight/wizard/conf/templates/network/resnet50/train.py-tpl +++ b/mindinsight/wizard/conf/templates/network/resnet50/train.py-tpl @@ -124,7 +124,7 @@ if __name__ == '__main__': {% if dataset=='ImageNet' %} if not cfg.use_label_smooth: cfg.label_smooth_factor = 0.0 - loss = CrossEntLambropy(smooth_factor=cfg.label_smooth_factor, num_classes=cfg.num_classes) + loss = CrossEntropy(smooth_factor=cfg.label_smooth_factor, num_classes=cfg.num_classes) {% else %} {% if loss=='SoftmaxCrossEntropyWithLogits' %} loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')