diff --git a/model_zoo/official/cv/ssd/scripts/run_distribute_train.sh b/model_zoo/official/cv/ssd/scripts/run_distribute_train.sh index 7175d22988387b02f0b164838b8a08329014d30b..73c0613ded0434b043a6743111f83190313d9db1 100644 --- a/model_zoo/official/cv/ssd/scripts/run_distribute_train.sh +++ b/model_zoo/official/cv/ssd/scripts/run_distribute_train.sh @@ -31,7 +31,7 @@ fi # Before start distribute train, first create mindrecord files. BASE_PATH=$(cd "`dirname $0`" || exit; pwd) cd $BASE_PATH/../ || exit -python train.py --only_create_dataset=1 +python train.py --only_create_dataset=True echo "After running the scipt, the network runs in the background. The log will be generated in LOGx/log.txt" @@ -57,7 +57,7 @@ do if [ $# == 5 ] then python train.py \ - --distribute=1 \ + --distribute=True \ --lr=$LR \ --dataset=$DATASET \ --device_num=$RANK_SIZE \ @@ -68,7 +68,7 @@ do if [ $# == 7 ] then python train.py \ - --distribute=1 \ + --distribute=True \ --lr=$LR \ --dataset=$DATASET \ --device_num=$RANK_SIZE \ diff --git a/model_zoo/official/cv/ssd/train.py b/model_zoo/official/cv/ssd/train.py index dad529860117f7d3cd14f6e732b345e8aeb38913..d92fa47ae554d9bd33d6032c02247676da7a11e6 100644 --- a/model_zoo/official/cv/ssd/train.py +++ b/model_zoo/official/cv/ssd/train.py @@ -17,6 +17,7 @@ import os import argparse +import ast import mindspore.nn as nn from mindspore import context, Tensor from mindspore.communication.management import init @@ -32,9 +33,10 @@ from src.init_params import init_net_param, filter_checkpoint_parameter def main(): parser = argparse.ArgumentParser(description="SSD training") - parser.add_argument("--only_create_dataset", type=bool, default=False, help="If set it true, only create " - "Mindrecord, default is False.") - parser.add_argument("--distribute", type=bool, default=False, help="Run distribute, default is False.") + parser.add_argument("--only_create_dataset", type=ast.literal_eval, default=False, + help="If set it true, only create Mindrecord, default is False.") + parser.add_argument("--distribute", type=ast.literal_eval, default=False, + help="Run distribute, default is False.") parser.add_argument("--device_id", type=int, default=0, help="Device id, default is 0.") parser.add_argument("--device_num", type=int, default=1, help="Use device nums, default is 1.") parser.add_argument("--lr", type=float, default=0.05, help="Learning rate, default is 0.05.") @@ -46,7 +48,8 @@ def main(): parser.add_argument("--pre_trained_epoch_size", type=int, default=0, help="Pretrained epoch size.") parser.add_argument("--save_checkpoint_epochs", type=int, default=10, help="Save checkpoint epochs, default is 10.") parser.add_argument("--loss_scale", type=int, default=1024, help="Loss scale, default is 1024.") - parser.add_argument("--filter_weight", type=bool, default=False, help="Filter weight parameters, default is False.") + parser.add_argument("--filter_weight", type=ast.literal_eval, default=False, + help="Filter weight parameters, default is False.") args_opt = parser.parse_args() context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", device_id=args_opt.device_id) diff --git a/model_zoo/official/cv/yolov3_darknet53_quant/README.md b/model_zoo/official/cv/yolov3_darknet53_quant/README.md index 8a2b93c081668b3e4ebd411897ca60788c3c3994..60e274fa97a3ebb8ae809501e08a74dd22401356 100644 --- a/model_zoo/official/cv/yolov3_darknet53_quant/README.md +++ b/model_zoo/official/cv/yolov3_darknet53_quant/README.md @@ -1,35 +1,120 @@ -# YOLOV3-DarkNet53-Quant Example +# Contents -## Description +- [YOLOv3-DarkNet53-Quant Description](#yolov3-darknet53-quant-description) +- [Model Architecture](#model-architecture) +- [Dataset](#dataset) +- [Environment Requirements](#environment-requirements) +- [Quick Start](#quick-start) +- [Script Description](#script-description) + - [Script and Sample Code](#script-and-sample-code) + - [Script Parameters](#script-parameters) + - [Training Process](#training-process) + - [Training](#training) + - [Distributed Training](#distributed-training) + - [Evaluation Process](#evaluation-process) + - [Evaluation](#evaluation) +- [Model Description](#model-description) + - [Performance](#performance) + - [Evaluation Performance](#evaluation-performance) + - [Inference Performance](#evaluation-performance) +- [Description of Random Situation](#description-of-random-situation) +- [ModelZoo Homepage](#modelzoo-homepage) -This is an example of training YOLOV3-DarkNet53-Quant with COCO2014 dataset in MindSpore. -## Requirements +# [YOLOv3-DarkNet53-Quant Description](#contents) -- Install [MindSpore](https://www.mindspore.cn/install/en). +You only look once (YOLO) is a state-of-the-art, real-time object detection system. YOLOv3 is extremely fast and accurate. -- Download the dataset COCO2014. +Prior detection systems repurpose classifiers or localizers to perform detection. They apply the model to an image at multiple locations and scales. High scoring regions of the image are considered detections. + YOLOv3 use a totally different approach. It apply a single neural network to the full image. This network divides the image into regions and predicts bounding boxes and probabilities for each region. These bounding boxes are weighted by the predicted probabilities. -> Unzip the COCO2014 dataset to any path you want, the folder should include train and eval dataset as follows: +YOLOv3 uses a few tricks to improve training and increase performance, including: multi-scale predictions, a better backbone classifier, and more. The full details are in the paper! + +In order to reduce the size of the weight and improve the low-bit computing performance, int8 quantization is used. + +[Paper](https://pjreddie.com/media/files/papers/YOLOv3.pdf): YOLOv3: An Incremental Improvement. Joseph Redmon, Ali Farhadi, +University of Washington + + +# [Model Architecture](#contents) + +YOLOv3 use DarkNet53 for performing feature extraction, which is a hybrid approach between the network used in YOLOv2, Darknet-19, and that newfangled residual network stuff. DarkNet53 uses successive 3 × 3 and 1 × 1 convolutional layers and has some shortcut connections as well and is significantly larger. It has 53 convolutional layers. + + +# [Dataset](#contents) + +Dataset used: [COCO2014](https://cocodataset.org/#download) + +- Dataset size: 19G, 123,287 images, 80 object categories. + - Train:13G, 82,783 images + - Val:6GM, 40,504 images + - Annotations: 241M, Train/Val annotations +- Data format:zip files + - Note:Data will be processed in yolo_dataset.py, and unzip files before uses it. + + +# [Environment Requirements](#contents) + +- Hardware(Ascend) + - Prepare hardware environment with Ascend processor. If you want to try Ascend , please send the [application form](https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/file/other/Ascend%20Model%20Zoo%E4%BD%93%E9%AA%8C%E8%B5%84%E6%BA%90%E7%94%B3%E8%AF%B7%E8%A1%A8.docx) to ascend@huawei.com. Once approved, you can get the resources. +- Framework + - [MindSpore](http://10.90.67.50/mindspore/archive/20200506/OpenSource/me_vm_x86/) +- For more information, please check the resources below: + - [MindSpore tutorials](https://www.mindspore.cn/tutorial/zh-CN/master/index.html) + - [MindSpore API](https://www.mindspore.cn/api/zh-CN/master/index.html) + + + +# [Quick Start](#contents) + +After installing MindSpore via the official website, you can start training and evaluation in Ascend as follows: ``` -. -└─dataset - ├─train2014 - ├─val2014 - └─annotations +# The yolov3_darknet53_noquant.ckpt in the follow script is got from yolov3-darknet53 training like paper. +# The parameter of resume_yolov3 is necessary. +# The parameter of training_shape define image shape for network, default is "". +# It means use 10 kinds of shape as input shape, or it can be set some kind of shape. +# run training example(1p) by python command. +python train.py \ + --data_dir=./dataset/coco2014 \ + --resume_yolov3=yolov3_darknet53_noquant.ckpt \ + --is_distributed=0 \ + --per_batch_size=16 \ + --lr=0.012 \ + --T_max=135 \ + --max_epoch=135 \ + --warmup_epochs=5 \ + --lr_scheduler=cosine_annealing > log.txt 2>&1 & + +# standalone training example(1p) by shell script +sh run_standalone_train.sh dataset/coco2014 yolov3_darknet53_noquant.ckpt + +# distributed training example(8p) by shell script +sh run_distribute_train.sh dataset/coco2014 yolov3_darknet53_noquant.ckpt rank_table_8p.json + +# run evaluation by python command +python eval.py \ + --data_dir=./dataset/coco2014 \ + --pretrained=yolov3_quant.ckpt \ + --testing_shape=416 > log.txt 2>&1 & + +# run evaluation by shell script +sh run_eval.sh dataset/coco2014/ checkpoint/yolov3_quant.ckpt 0 ``` -## Structure -```shell +# [Script Description](#contents) + +## [Script and Sample Code](#contents) + +``` . └─yolov3_darknet53_quant ├─README.md ├─scripts - ├─run_standalone_train.sh # launch standalone training(1p) - ├─run_distribute_train.sh # launch distributed training(8p) - └─run_eval.sh # launch evaluating + ├─run_standalone_train.sh # launch standalone training(1p) in ascend + ├─run_distribute_train.sh # launch distributed training(8p) in ascend + └─run_eval.sh # launch evaluating in ascend ├─src ├─__init__.py # python init file ├─config.py # parameter configuration @@ -47,35 +132,79 @@ This is an example of training YOLOV3-DarkNet53-Quant with COCO2014 dataset in M └─train.py # train net ``` -## Running the example -### Train - -#### Usage +## [Script Parameters](#contents) ``` -# distributed training -sh run_distribute_train.sh [DATASET_PATH] [RESUME_YOLOV3] [RANK_TABLE_FILE] - -# standalone training -sh run_standalone_train.sh [DATASET_PATH] [RESUME_YOLOV3] +Major parameters in train.py as follow. + +optional arguments: + -h, --help show this help message and exit + --data_dir DATA_DIR Train dataset directory. Default: "". + --per_batch_size PER_BATCH_SIZE + Batch size for per device. Default: 16. + --resume_yolov3 RESUME_YOLOV3 + The ckpt file of YOLOv3, which used to fine tune. + Default: "" + --lr_scheduler LR_SCHEDULER + Learning rate scheduler, options: exponential, + cosine_annealing. Default: exponential + --lr LR Learning rate. Default: 0.012 + --lr_epochs LR_EPOCHS + Epoch of changing of lr changing, split with ",". + Default: 92, 105 + --lr_gamma LR_GAMMA Decrease lr by a factor of exponential lr_scheduler. + Default: 0.1 + --eta_min ETA_MIN Eta_min in cosine_annealing scheduler. Default: 0 + --T_max T_MAX T-max in cosine_annealing scheduler. Default: 135 + --max_epoch MAX_EPOCH + Max epoch num to train the model. Default: 135 + --warmup_epochs WARMUP_EPOCHS + Warmup epochs. Default: 0 + --weight_decay WEIGHT_DECAY + Weight decay factor. Default: 0.0005 + --momentum MOMENTUM Momentum. Default: 0.9 + --loss_scale LOSS_SCALE + Static loss scale. Default: 1024 + --label_smooth LABEL_SMOOTH + Whether to use label smooth in CE. Default:0 + --label_smooth_factor LABEL_SMOOTH_FACTOR + Smooth strength of original one-hot. Default: 0.1 + --log_interval LOG_INTERVAL + Logging interval steps. Default: 100 + --ckpt_path CKPT_PATH + Checkpoint save location. Default: "outputs/" + --ckpt_interval CKPT_INTERVAL + Save checkpoint interval. Default: None + --is_save_on_master IS_SAVE_ON_MASTER + Save ckpt on master or all rank, 1 for master, 0 for + all ranks. Default: 1 + --is_distributed IS_DISTRIBUTED + Distribute train or not, 1 for yes, 0 for no. Default: 0 + --rank RANK Local rank of distributed. Default: 0 + --group_size GROUP_SIZE + World size of device. Default: 1 + --need_profiler NEED_PROFILER + Whether use profiler. 1 for yes. 0 for no. Default: 0 + --training_shape TRAINING_SHAPE + Fix training shape. Default: "" + --resize_rate RESIZE_RATE + Resize rate for multi-scale training. Default: None ``` -#### Launch -```bash -# distributed training example(8p) -sh run_distribute_train.sh dataset/coco2014 yolov3_darknet_noquant_ckpt/0-320_102400.ckpt rank_table_8p.json -# standalone training example(1p) -sh run_standalone_train.sh dataset/coco2014 yolov3_darknet_noquant_ckpt/0-320_102400.ckpt -``` +## [Training Process](#contents) -> About rank_table.json, You can generate it by using the [hccl json configuration file](https://gitee.com/mindspore/mindspore/tree/master/model_zoo/utils/hccl_tools). +### Training on Ascend -#### Result +### Distributed Training -Training result will be stored in the scripts path, whose folder name begins with "train" or "train_parallel". You can find checkpoint file together with result like the followings in log.txt. +``` +sh run_distribute_train.sh dataset/coco2014 yolov3_darknet53_noquant.ckpt rank_table_8p.json +``` + +The above shell script will run distribute training in the background. You can view the results through the file `train_parallel[X]/log.txt`. The loss value will be achieved as follows: ``` # distribute training result(8p) @@ -99,34 +228,28 @@ epoch[134], iter[86200], loss:36.641916, 137.91 imgs/sec, lr:1.6245529650404933e epoch[134], iter[86300], loss:32.819769, 138.17 imgs/sec, lr:1.6245529650404933e-06 epoch[134], iter[86400], loss:35.603033, 142.23 imgs/sec, lr:1.6245529650404933e-06 epoch[134], iter[86500], loss:34.303755, 145.18 imgs/sec, lr:1.6245529650404933e-06 -... ``` -### Infer -#### Usage - -``` -# infer -sh run_eval.sh [DATASET_PATH] [CHECKPOINT_PATH] [DEVICE_ID] -``` +## [Evaluation Process](#contents) -#### Launch +### Evaluation on Ascend -```bash -# infer with checkpoint -sh run_eval.sh dataset/coco2014/ checkpoint/0-131.ckpt 0 +Before running the command below. +``` +python eval.py \ + --data_dir=./dataset/coco2014 \ + --pretrained=0-130_83330.ckpt \ + --testing_shape=416 > log.txt 2>&1 & +OR +sh run_eval.sh dataset/coco2014/ checkpoint/0-130_83330.ckpt 0 ``` -> checkpoint can be produced in training process. - - -#### Result - -Inference result will be stored in the scripts path, whose folder name is "eval". Under this, you can find result like the followings in log.txt. +The above python command will run in the background. You can view the results through the file "log.txt". The mAP of the test dataset will be as follows: ``` +# log.txt =============coco eval reulst========= Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.310 Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.531 @@ -141,3 +264,51 @@ Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.232 Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.450 Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.558 ``` + + +# [Model Description](#contents) +## [Performance](#contents) + +### Evaluation Performance + +| Parameters | Ascend | +| -------------------------- | ---------------------------------------------------------------------------------------------- | +| Model Version | YOLOv3_Darknet53_Quant | +| Resource | Ascend 910; CPU 2.60GHz, 192cores; Memory, 755G | +| uploaded Date | 06/31/2020 (month/day/year) | +| MindSpore Version | 0.6.0-alpha | +| Dataset | COCO2014 | +| Training Parameters | epoch=135, batch_size=16, lr=0.012, momentum=0.9 | +| Optimizer | Momentum | +| Loss Function | Sigmoid Cross Entropy with logits | +| outputs | boxes and label | +| Loss | 34 | +| Speed | 1pc: 135 ms/step; | +| Total time | 8pc: 24.5 hours | +| Parameters (M) | 62.1 | +| Checkpoint for Fine tuning | 474M (.ckpt file) | +| Scripts | https://gitee.com/mindspore/mindspore/tree/master/model_zoo/official/cv/yolov3_darknet53_quant | + + +### Inference Performance + +| Parameters | Ascend | +| ------------------- | --------------------------- | +| Model Version | YOLOv3_Darknet53_Quant | +| Resource | Ascend 910 | +| Uploaded Date | 06/31/2020 (month/day/year) | +| MindSpore Version | 0.6.0-alpha | +| Dataset | COCO2014, 40,504 images | +| batch_size | 1 | +| outputs | mAP | +| Accuracy | 8pcs: 31.0% | +| Model for inference | 474M (.ckpt file) | + + +# [Description of Random Situation](#contents) + +There are random seeds in distributed_sampler.py, transforms.py, yolo_dataset.py files. + + +# [ModelZoo Homepage](#contents) + Please check the official [homepage](https://gitee.com/mindspore/mindspore/tree/master/model_zoo). diff --git a/model_zoo/official/cv/yolov3_darknet53_quant/eval.py b/model_zoo/official/cv/yolov3_darknet53_quant/eval.py index 24260f6ee9bde2e8d083c65493048d5da18150a8..879cfe64991100980428d3e6a933c7681b475da5 100644 --- a/model_zoo/official/cv/yolov3_darknet53_quant/eval.py +++ b/model_zoo/official/cv/yolov3_darknet53_quant/eval.py @@ -210,20 +210,22 @@ def parse_args(): parser = argparse.ArgumentParser('mindspore coco testing') # dataset related - parser.add_argument('--data_dir', type=str, default='', help='train data dir') - parser.add_argument('--per_batch_size', default=1, type=int, help='batch size for per gpu') + parser.add_argument('--data_dir', type=str, default="", help='Train data dir. Default: ""') + parser.add_argument('--per_batch_size', default=1, type=int, help='Batch size for per device, Default: 1') # network related - parser.add_argument('--pretrained', default='', type=str, help='model_path, local pretrained model to load') + parser.add_argument('--pretrained', default="", type=str,\ + help='The model path, local pretrained model to load, Default: ""') # logging related - parser.add_argument('--log_path', type=str, default='outputs/', help='checkpoint save location') + parser.add_argument('--log_path', type=str, default="outputs/", help='Log save location, Default: "outputs/"') # detect_related - parser.add_argument('--nms_thresh', type=float, default=0.5, help='threshold for NMS') - parser.add_argument('--annFile', type=str, default='', help='path to annotation') - parser.add_argument('--testing_shape', type=str, default='', help='shape for test ') - parser.add_argument('--ignore_threshold', type=float, default=0.001, help='threshold to throw low quality boxes') + parser.add_argument('--nms_thresh', type=float, default=0.5, help='Threshold for NMS. Default: 0.5') + parser.add_argument('--annFile', type=str, default="", help='The path to annotation. Default: ""') + parser.add_argument('--testing_shape', type=str, default="", help='Shape for test. Default: ""') + parser.add_argument('--ignore_threshold', type=float, default=0.001,\ + help='Threshold to throw low quality boxes, Default: 0.001') args, _ = parser.parse_known_args() diff --git a/model_zoo/official/cv/yolov3_darknet53_quant/train.py b/model_zoo/official/cv/yolov3_darknet53_quant/train.py index 75d1eb09021a07eeb9f38a50610b09f8a08b9bc6..4a1fa9dc47695610d5c59d01e978b7b38c369cae 100644 --- a/model_zoo/official/cv/yolov3_darknet53_quant/train.py +++ b/model_zoo/official/cv/yolov3_darknet53_quant/train.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ -"""YoloV3 train.""" +"""YoloV3-Darknet53-Quant train.""" import os import time @@ -32,7 +32,7 @@ from mindspore.train.quant import quant from src.yolo import YOLOV3DarkNet53, YoloWithLossCell, TrainingWrapper from src.logger import get_logger -from src.util import AverageMeter, load_backbone, get_param_groups +from src.util import AverageMeter, get_param_groups from src.lr_scheduler import warmup_step_lr, warmup_cosine_annealing_lr, \ warmup_cosine_annealing_lr_V2, warmup_cosine_annealing_lr_sample from src.yolo_dataset import create_yolo_dataset @@ -52,53 +52,60 @@ def parse_args(): parser = argparse.ArgumentParser('mindspore coco training') # dataset related - parser.add_argument('--data_dir', type=str, default='', help='train data dir') - parser.add_argument('--per_batch_size', default=32, type=int, help='batch size for per gpu') + parser.add_argument('--data_dir', type=str, default='', help='Train data dir. Default: ""') + parser.add_argument('--per_batch_size', default=16, type=int, help='Batch size for per device. Default: 16') # network related - parser.add_argument('--pretrained_backbone', default='', type=str, help='model_path, local pretrained backbone' - ' model to load') - parser.add_argument('--resume_yolov3', default='', type=str, help='path of pretrained yolov3') + parser.add_argument('--resume_yolov3', default='', type=str,\ + help='The ckpt file of yolov3-darknet53, which used to yolov3-darknet53 quant. Default: ""') # optimizer and lr related - parser.add_argument('--lr_scheduler', default='exponential', type=str, - help='lr-scheduler, option type: exponential, cosine_annealing') - parser.add_argument('--lr', default=0.001, type=float, help='learning rate of the training') - parser.add_argument('--lr_epochs', type=str, default='220,250', help='epoch of lr changing') - parser.add_argument('--lr_gamma', type=float, default=0.1, - help='decrease lr by a factor of exponential lr_scheduler') - parser.add_argument('--eta_min', type=float, default=0., help='eta_min in cosine_annealing scheduler') - parser.add_argument('--T_max', type=int, default=320, help='T-max in cosine_annealing scheduler') - parser.add_argument('--max_epoch', type=int, default=320, help='max epoch num to train the model') - parser.add_argument('--warmup_epochs', default=0, type=float, help='warmup epoch') - parser.add_argument('--weight_decay', type=float, default=0.0005, help='weight decay') - parser.add_argument('--momentum', type=float, default=0.9, help='momentum') + parser.add_argument('--lr_scheduler', default='exponential', type=str,\ + help='Learning rate scheduler, option type: exponential, ' + 'cosine_annealing. Default: exponential') + parser.add_argument('--lr', default=0.012, type=float, help='Learning rate of the training') + parser.add_argument('--lr_epochs', type=str, default='92,105',\ + help='Epoch of lr changing. Default: 92,105') + parser.add_argument('--lr_gamma', type=float, default=0.1,\ + help='Decrease lr by a factor of exponential lr_scheduler. Default: 0.1') + parser.add_argument('--eta_min', type=float, default=0.,\ + help='Eta_min in cosine_annealing scheduler. Default: 0.') + parser.add_argument('--T_max', type=int, default=135,\ + help='T-max in cosine_annealing scheduler. Default: 135') + parser.add_argument('--max_epoch', type=int, default=135,\ + help='Max epoch num to train the model. Default: 135') + parser.add_argument('--warmup_epochs', type=float, default=0, help='Warmup epochs. Default: 0') + parser.add_argument('--weight_decay', type=float, default=0.0005, help='Weight decay. Default: 0.0005') + parser.add_argument('--momentum', type=float, default=0.9, help='Momentum. Default: 0.9') # loss related - parser.add_argument('--loss_scale', type=int, default=1024, help='static loss scale') - parser.add_argument('--label_smooth', type=int, default=0, help='whether to use label smooth in CE') - parser.add_argument('--label_smooth_factor', type=float, default=0.1, help='smooth strength of original one-hot') + parser.add_argument('--loss_scale', type=int, default=1024, help='Static loss scale. Default: 1024') + parser.add_argument('--label_smooth', type=int, default=0, help='Whether to use label smooth in CE. Default: 0') + parser.add_argument('--label_smooth_factor', type=float, default=0.1,\ + help='Smooth strength of original one-hot. Default: 0.1') # logging related - parser.add_argument('--log_interval', type=int, default=100, help='logging interval') - parser.add_argument('--ckpt_path', type=str, default='outputs/', help='checkpoint save location') - parser.add_argument('--ckpt_interval', type=int, default=None, help='ckpt_interval') - parser.add_argument('--is_save_on_master', type=int, default=1, help='save ckpt on master or all rank') + parser.add_argument('--log_interval', type=int, default=100, help='Logging interval steps. Default: 100') + parser.add_argument('--ckpt_path', type=str, default='outputs/',\ + help='Checkpoint save location. Default: "outputs/"') + parser.add_argument('--ckpt_interval', type=int, default=None, help='Save checkpoint interval. Default: None') + parser.add_argument('--is_save_on_master', type=int, default=1,\ + help='Save ckpt on master or all rank, 1 for master, 0 for all ranks. Default: 1') # distributed related - parser.add_argument('--is_distributed', type=int, default=1, help='if multi device') - parser.add_argument('--rank', type=int, default=0, help='local rank of distributed') - parser.add_argument('--group_size', type=int, default=1, help='world size of distributed') - - # roma obs - parser.add_argument('--train_url', type=str, default="", help='train url') + parser.add_argument('--is_distributed', type=int, default=0,\ + help='Distribute train or not, 1 for yes, 0 for no. Default: 0') + parser.add_argument('--rank', type=int, default=0, help='Local rank of distributed, Default: 0') + parser.add_argument('--group_size', type=int, default=1, help='World size of device, Default: 1') # profiler init - parser.add_argument('--need_profiler', type=int, default=0, help='whether use profiler') + parser.add_argument('--need_profiler', type=int, default=0,\ + help='Whether use profiler, 1 for yes, 0 for no, Default: 0') # reset default config - parser.add_argument('--training_shape', type=str, default="", help='fix training shape') - parser.add_argument('--resize_rate', type=int, default=None, help='resize rate for multi-scale training') + parser.add_argument('--training_shape', type=str, default="", help='Fix training shape. Default: ""') + parser.add_argument('--resize_rate', type=int, default=None,\ + help='Resize rate for multi-scale training. Default: None') args, _ = parser.parse_known_args() if args.lr_scheduler == 'cosine_annealing' and args.max_epoch > args.T_max: @@ -141,7 +148,7 @@ def train(): args.logger.save_args(args) if args.need_profiler: - from mindinsight.profiler.profiling import Profiler + from mindspore.profiler.profiling import Profiler profiler = Profiler(output_path=args.outputs_dir, is_detail=True, is_show_op_path=True) loss_meter = AverageMeter('loss') @@ -159,12 +166,6 @@ def train(): # default is kaiming-normal default_recurisive_init(network) - if args.pretrained_backbone: - network = load_backbone(network, args.pretrained_backbone, args) - args.logger.info('load pre-trained backbone {} into network'.format(args.pretrained_backbone)) - else: - args.logger.info('Not load pre-trained backbone, please be careful') - if args.resume_yolov3: param_dict = load_checkpoint(args.resume_yolov3) param_dict_new = {} diff --git a/model_zoo/official/cv/yolov3_resnet18/README.md b/model_zoo/official/cv/yolov3_resnet18/README.md index cf03883226c1edc1302a320f9852593e7efb0e3f..cc7539cb5f09286e129caaecd18a9331a21fe0a6 100644 --- a/model_zoo/official/cv/yolov3_resnet18/README.md +++ b/model_zoo/official/cv/yolov3_resnet18/README.md @@ -118,9 +118,9 @@ After installing MindSpore via the official website, you can start training and ``` Major parameters in train.py and config.py as follows: - evice_num: Use device nums, default is 1. + device_num: Use device nums, default is 1. lr: Learning rate, default is 0.001. - epoch_size: Epoch size, default is 10. + epoch_size: Epoch size, default is 50. batch_size: Batch size, default is 32. pre_trained: Pretrained Checkpoint file path. pre_trained_epoch_size: Pretrained epoch size. diff --git a/model_zoo/official/cv/yolov3_resnet18/eval.py b/model_zoo/official/cv/yolov3_resnet18/eval.py index 65dc408a1502092b1a66b4d22baa00725491c25b..17823af0c6cd931b437575650bb4b273b33d9d6e 100644 --- a/model_zoo/official/cv/yolov3_resnet18/eval.py +++ b/model_zoo/official/cv/yolov3_resnet18/eval.py @@ -13,7 +13,7 @@ # limitations under the License. # ============================================================================ -"""Evaluation for yolo_v3""" +"""Evaluation for yolov3-resnet18""" import os import argparse import time diff --git a/model_zoo/official/cv/yolov3_resnet18/scripts/run_distribute_train.sh b/model_zoo/official/cv/yolov3_resnet18/scripts/run_distribute_train.sh index 10643e7a6a6c0eeb41871c32ef94e33fcf4d4241..0e7c48effbb3df459bb057ad7863d2330c2ec0d0 100644 --- a/model_zoo/official/cv/yolov3_resnet18/scripts/run_distribute_train.sh +++ b/model_zoo/official/cv/yolov3_resnet18/scripts/run_distribute_train.sh @@ -40,7 +40,7 @@ BASE_PATH=$(cd "`dirname $0`" || exit; pwd) cd $BASE_PATH/../ || exit # Before start distribute train, first create mindrecord files. -python train.py --only_create_dataset=1 --mindrecord_dir=$MINDRECORD_DIR --image_dir=$IMAGE_DIR \ +python train.py --only_create_dataset=True --mindrecord_dir=$MINDRECORD_DIR --image_dir=$IMAGE_DIR \ --anno_path=$ANNO_PATH if [ $? -ne 0 ] then @@ -72,7 +72,7 @@ do if [ $# == 6 ] then taskset -c $cmdopt python train.py \ - --distribute=1 \ + --distribute=True \ --lr=0.005 \ --device_num=$RANK_SIZE \ --device_id=$DEVICE_ID \ @@ -85,7 +85,7 @@ do if [ $# == 8 ] then taskset -c $cmdopt python train.py \ - --distribute=1 \ + --distribute=True \ --lr=0.005 \ --device_num=$RANK_SIZE \ --device_id=$DEVICE_ID \ diff --git a/model_zoo/official/cv/yolov3_resnet18/train.py b/model_zoo/official/cv/yolov3_resnet18/train.py index e0d309be9cd23b467e3205874012c2538582409d..08d8c1b210786f1259f59f4666ddac0dee2fdb95 100644 --- a/model_zoo/official/cv/yolov3_resnet18/train.py +++ b/model_zoo/official/cv/yolov3_resnet18/train.py @@ -24,6 +24,7 @@ Note if mindrecord_dir isn't empty, it will use mindrecord_dir rather than image import os import argparse +import ast import numpy as np import mindspore.nn as nn from mindspore import context, Tensor @@ -61,21 +62,21 @@ def init_net_param(network, init_value='ones'): def main(): parser = argparse.ArgumentParser(description="YOLOv3 train") - parser.add_argument("--only_create_dataset", type=bool, default=False, help="If set it true, only create " - "Mindrecord, default is false.") - parser.add_argument("--distribute", type=bool, default=False, help="Run distribute, default is false.") + parser.add_argument("--only_create_dataset", type=ast.literal_eval, default=False, + help="If set it true, only create Mindrecord, default is False.") + parser.add_argument("--distribute", type=ast.literal_eval, default=False, help="Run distribute, default is False.") parser.add_argument("--device_id", type=int, default=0, help="Device id, default is 0.") parser.add_argument("--device_num", type=int, default=1, help="Use device nums, default is 1.") parser.add_argument("--lr", type=float, default=0.001, help="Learning rate, default is 0.001.") parser.add_argument("--mode", type=str, default="sink", help="Run sink mode or not, default is sink") - parser.add_argument("--epoch_size", type=int, default=10, help="Epoch size, default is 10") + parser.add_argument("--epoch_size", type=int, default=50, help="Epoch size, default is 50") parser.add_argument("--batch_size", type=int, default=32, help="Batch size, default is 32.") parser.add_argument("--pre_trained", type=str, default=None, help="Pretrained checkpoint file path") parser.add_argument("--pre_trained_epoch_size", type=int, default=0, help="Pretrained epoch size") parser.add_argument("--save_checkpoint_epochs", type=int, default=5, help="Save checkpoint epochs, default is 5.") parser.add_argument("--loss_scale", type=int, default=1024, help="Loss scale, default is 1024.") parser.add_argument("--mindrecord_dir", type=str, default="./Mindrecord_train", - help="Mindrecord directory. If the mindrecord_dir is empty, it wil generate mindrecord file by" + help="Mindrecord directory. If the mindrecord_dir is empty, it wil generate mindrecord file by " "image_dir and anno_path. Note if mindrecord_dir isn't empty, it will use mindrecord_dir " "rather than image_dir and anno_path. Default is ./Mindrecord_train") parser.add_argument("--image_dir", type=str, default="", help="Dataset directory, "