diff --git a/model_zoo/official/cv/alexnet/train.py b/model_zoo/official/cv/alexnet/train.py index f3856161b20418c34e99446fde6f73a87cdcfaa3..db44e5450f25750e8e9ff525cadd4467b66d5302 100644 --- a/model_zoo/official/cv/alexnet/train.py +++ b/model_zoo/official/cv/alexnet/train.py @@ -30,7 +30,9 @@ from mindspore import Tensor from mindspore.train import Model from mindspore.nn.metrics import Accuracy from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor +from mindspore.common import set_seed +set_seed(1) if __name__ == "__main__": parser = argparse.ArgumentParser(description='MindSpore AlexNet Example') diff --git a/model_zoo/official/cv/deeplabv3/train.py b/model_zoo/official/cv/deeplabv3/train.py index 0269d5f54093432a91ea11771c251fa81abc88e9..1e501105c0202aeee6c033853234dabba2ed4583 100644 --- a/model_zoo/official/cv/deeplabv3/train.py +++ b/model_zoo/official/cv/deeplabv3/train.py @@ -21,11 +21,14 @@ from mindspore import Model from mindspore.context import ParallelMode from mindspore.train.serialization import load_checkpoint, load_param_into_net from mindspore.train.callback import Callback, CheckpointConfig, ModelCheckpoint, TimeMonitor +from mindspore.common import set_seed from src.md_dataset import create_dataset from src.losses import OhemLoss from src.deeplabv3 import deeplabv3_resnet50 from src.config import config +set_seed(1) + parser = argparse.ArgumentParser(description="Deeplabv3 training") parser.add_argument("--distribute", type=str, default="false", help="Run distribute, default is false.") parser.add_argument('--data_url', required=True, default=None, help='Train data url') diff --git a/model_zoo/official/cv/faster_rcnn/README.md b/model_zoo/official/cv/faster_rcnn/README.md index dfe8c74624d87f6d1210919fe7664363bd1ecf2b..c73c855c352cc71459f9e5bda53c7711d22c8d5b 100644 --- a/model_zoo/official/cv/faster_rcnn/README.md +++ b/model_zoo/official/cv/faster_rcnn/README.md @@ -87,7 +87,9 @@ Dataset used: [COCO2017]() After installing MindSpore via the official website, you can start training and evaluation as follows: -Note: 1.the first run will generate the mindeocrd file, which will take a long time. 2. pretrained model is a resnet50 checkpoint that trained over ImageNet2012. 3. VALIDATION_JSON_FILE is label file. CHECKPOINT_PATH is a checkpoint file after trained. +Note: 1.the first run will generate the mindeocrd file, which will take a long time. + 2.pretrained model is a resnet50 checkpoint that trained over ImageNet2012. + 3.VALIDATION_JSON_FILE is label file. CHECKPOINT_PATH is a checkpoint file after trained. ``` # standalone training @@ -106,7 +108,7 @@ sh run_eval_ascend.sh [VALIDATION_JSON_FILE] [CHECKPOINT_PATH] ```shell . -└─FasterRcnn +└─faster_rcnn ├─README.md // descriptions about fasterrcnn ├─scripts ├─run_standalone_train_ascend.sh // shell script for standalone on ascend @@ -148,6 +150,7 @@ sh run_distribute_train_ascend.sh [RANK_TABLE_FILE] [PRETRAINED_MODEL] > Rank_table.json which is specified by RANK_TABLE_FILE is needed when you are running a distribute task. You can generate it by using the [hccl_tools](https://gitee.com/mindspore/mindspore/tree/master/model_zoo/utils/hccl_tools). > As for PRETRAINED_MODEL,it should be a ResNet50 checkpoint that trained over ImageNet2012. Ready-made pretrained_models are not available now. Stay tuned. +> The original dataset path needs to be in the config.py,you can select "coco_root" or "image_dir". ### Result @@ -205,10 +208,10 @@ Eval result will be stored in the example path, whose folder name is "eval". Und | -------------------------- | ----------------------------------------------------------- | | Model Version | V1 | | Resource | Ascend 910 ;CPU 2.60GHz,56cores;Memory,314G | -| uploaded Date | 06/01/2020 (month/day/year) | -| MindSpore Version | 0.3.0-alpha | +| uploaded Date | 08/31/2020 (month/day/year) | +| MindSpore Version | 0.7.0-beta | | Dataset | COCO2017 | -| Training Parameters | epoch=12, batch_size = 2 | +| Training Parameters | epoch=12, batch_size=2 | | Optimizer | SGD | | Loss Function | Softmax Cross Entropy ,Sigmoid Cross Entropy,SmoothL1Loss | | Speed | 1pc: 190 ms/step; 8pcs: 200 ms/step | @@ -223,12 +226,12 @@ Eval result will be stored in the example path, whose folder name is "eval". Und | ------------------- | --------------------------- | | Model Version | V1 | | Resource | Ascend 910 | -| Uploaded Date | 06/01/2020 (month/day/year) | -| MindSpore Version | 0.3.0-alpha | +| Uploaded Date | 08/31/2020 (month/day/year) | +| MindSpore Version | 0.7.0-beta | | Dataset | COCO2017 | | batch_size | 2 | | outputs | mAP | -| Accuracy | IoU=0.50: 58.6% | +| Accuracy | IoU=0.50: 57.6% | | Model for inference | 250M (.ckpt file) | # [ModelZoo Homepage](#contents) diff --git a/model_zoo/official/cv/faster_rcnn/eval.py b/model_zoo/official/cv/faster_rcnn/eval.py index 20497350461e0399d5be008e1f752c1fdf89e630..d8d960e0c6ead55229dba2820ac55df4275319c1 100644 --- a/model_zoo/official/cv/faster_rcnn/eval.py +++ b/model_zoo/official/cv/faster_rcnn/eval.py @@ -17,21 +17,18 @@ import os import argparse import time -import random import numpy as np from pycocotools.coco import COCO from mindspore import context, Tensor from mindspore.train.serialization import load_checkpoint, load_param_into_net -import mindspore.dataset.engine as de +from mindspore.common import set_seed from src.FasterRcnn.faster_rcnn_r50 import Faster_Rcnn_Resnet50 from src.config import config from src.dataset import data_to_mindrecord_byte_image, create_fasterrcnn_dataset from src.util import coco_eval, bbox2result_1image, results2json -random.seed(1) -np.random.seed(1) -de.config.set_seed(1) +set_seed(1) parser = argparse.ArgumentParser(description="FasterRcnn evaluation") parser.add_argument("--dataset", type=str, default="coco", help="Dataset, default is coco.") diff --git a/model_zoo/official/cv/faster_rcnn/train.py b/model_zoo/official/cv/faster_rcnn/train.py index a6d09874f9f6c0fa5b2606105011bd8def1edf56..2add1913c5a576fb656bd94a74bbd9fa640650b9 100644 --- a/model_zoo/official/cv/faster_rcnn/train.py +++ b/model_zoo/official/cv/faster_rcnn/train.py @@ -19,8 +19,6 @@ import os import time import argparse import ast -import random -import numpy as np import mindspore.common.dtype as mstype from mindspore import context, Tensor @@ -30,7 +28,7 @@ from mindspore.train import Model from mindspore.context import ParallelMode from mindspore.train.serialization import load_checkpoint, load_param_into_net from mindspore.nn import SGD -import mindspore.dataset.engine as de +from mindspore.common import set_seed from src.FasterRcnn.faster_rcnn_r50 import Faster_Rcnn_Resnet50 from src.network_define import LossCallBack, WithLossCell, TrainOneStepCell, LossNet @@ -38,9 +36,7 @@ from src.config import config from src.dataset import data_to_mindrecord_byte_image, create_fasterrcnn_dataset from src.lr_schedule import dynamic_lr -random.seed(1) -np.random.seed(1) -de.config.set_seed(1) +set_seed(1) parser = argparse.ArgumentParser(description="FasterRcnn training") parser.add_argument("--run_distribute", type=ast.literal_eval, default=False, help="Run distribute, default: false.") @@ -78,18 +74,24 @@ if __name__ == '__main__': os.makedirs(mindrecord_dir) if args_opt.dataset == "coco": if os.path.isdir(config.coco_root): + if not os.path.exists(config.coco_root): + print("Please make sure config:coco_root is valid.") + raise ValueError(config.coco_root) print("Create Mindrecord. It may take some time.") data_to_mindrecord_byte_image("coco", True, prefix) print("Create Mindrecord Done, at {}".format(mindrecord_dir)) else: print("coco_root not exits.") else: - if os.path.isdir(config.IMAGE_DIR) and os.path.exists(config.ANNO_PATH): + if os.path.isdir(config.image_dir) and os.path.exists(config.anno_path): + if not os.path.exists(config.image_dir): + print("Please make sure config:image_dir is valid.") + raise ValueError(config.image_dir) print("Create Mindrecord. It may take some time.") data_to_mindrecord_byte_image("other", True, prefix) print("Create Mindrecord Done, at {}".format(mindrecord_dir)) else: - print("IMAGE_DIR or ANNO_PATH not exits.") + print("image_dir or anno_path not exits.") while not os.path.exists(mindrecord_file + ".db"): time.sleep(5) diff --git a/model_zoo/official/cv/googlenet/eval.py b/model_zoo/official/cv/googlenet/eval.py index 4118a7294fd02dee1d644c35d76a0bb8d00008e0..21c902c466827fffcab271a787e4351d8f94afe8 100644 --- a/model_zoo/official/cv/googlenet/eval.py +++ b/model_zoo/official/cv/googlenet/eval.py @@ -23,11 +23,14 @@ from mindspore import context from mindspore.nn.optim.momentum import Momentum from mindspore.train.model import Model from mindspore.train.serialization import load_checkpoint, load_param_into_net +from mindspore.common import set_seed from src.config import cifar_cfg as cfg from src.dataset import create_dataset from src.googlenet import GoogleNet +set_seed(1) + parser = argparse.ArgumentParser(description='googlenet') parser.add_argument('--checkpoint_path', type=str, default=None, help='Checkpoint file path') args_opt = parser.parse_args() diff --git a/model_zoo/official/cv/googlenet/train.py b/model_zoo/official/cv/googlenet/train.py index 9eaf3130ed45117ec79eafc3588e87b26d391146..3e0fae74118d6b5cb32e11138454f7dc37f27438 100644 --- a/model_zoo/official/cv/googlenet/train.py +++ b/model_zoo/official/cv/googlenet/train.py @@ -18,7 +18,6 @@ python train.py """ import argparse import os -import random import numpy as np @@ -31,13 +30,13 @@ from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMoni from mindspore.train.model import Model from mindspore.context import ParallelMode from mindspore.train.serialization import load_checkpoint, load_param_into_net +from mindspore.common import set_seed from src.config import cifar_cfg as cfg from src.dataset import create_dataset from src.googlenet import GoogleNet -random.seed(1) -np.random.seed(1) +set_seed(1) def lr_steps(global_step, lr_max=None, total_epochs=None, steps_per_epoch=None): """Set learning rate.""" diff --git a/model_zoo/official/cv/inceptionv3/train.py b/model_zoo/official/cv/inceptionv3/train.py index 13514f0508ca3c3a574de1b5ae271e6c0a6f0f98..b3af5beefbd98ae02933633221c434026fdba728 100644 --- a/model_zoo/official/cv/inceptionv3/train.py +++ b/model_zoo/official/cv/inceptionv3/train.py @@ -15,8 +15,6 @@ """train_imagenet.""" import argparse import os -import random -import numpy as np import mindspore.nn as nn from mindspore import Tensor @@ -27,9 +25,9 @@ from mindspore.nn.optim.rmsprop import RMSProp from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor from mindspore.train.model import Model from mindspore.train.serialization import load_checkpoint, load_param_into_net -from mindspore import dataset as de from mindspore.train.loss_scale_manager import FixedLossScaleManager from mindspore.common.initializer import XavierUniform, initializer +from mindspore.common import set_seed from src.config import config_gpu, config_ascend from src.dataset import create_dataset @@ -37,9 +35,7 @@ from src.inception_v3 import InceptionV3 from src.lr_generator import get_lr from src.loss import CrossEntropy -random.seed(1) -np.random.seed(1) -de.config.set_seed(1) +set_seed(1) if __name__ == '__main__': @@ -94,7 +90,6 @@ if __name__ == '__main__': if args_opt.platform == "Ascend": for param in net.trainable_params(): if 'beta' not in param.name and 'gamma' not in param.name and 'bias' not in param.name: - np.random.seed(seed=1) param.set_parameter_data(initializer(XavierUniform(), param.data.shape, param.data.dtype)) group_params = [{'params': decayed_params, 'weight_decay': cfg.weight_decay}, {'params': no_decayed_params}, diff --git a/model_zoo/official/cv/lenet/train.py b/model_zoo/official/cv/lenet/train.py index 7230245d85cae4954eb355123099b770132c53b4..4dbcaedae58ade2aca1381719be038e63abc2b12 100644 --- a/model_zoo/official/cv/lenet/train.py +++ b/model_zoo/official/cv/lenet/train.py @@ -29,7 +29,9 @@ from mindspore import context from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor from mindspore.train import Model from mindspore.nn.metrics import Accuracy +from mindspore.common import set_seed +set_seed(1) if __name__ == "__main__": parser = argparse.ArgumentParser(description='MindSpore Lenet Example') diff --git a/model_zoo/official/cv/lenet_quant/train_quant.py b/model_zoo/official/cv/lenet_quant/train_quant.py index 9092cc69d9c6806fc6b19163b9552b806540a98d..e24ca5dec45a6478a000d0f6952a9b79a79edcc6 100644 --- a/model_zoo/official/cv/lenet_quant/train_quant.py +++ b/model_zoo/official/cv/lenet_quant/train_quant.py @@ -28,11 +28,14 @@ from mindspore.train import Model from mindspore.nn.metrics import Accuracy from mindspore.train.quant import quant from mindspore.train.quant.quant_utils import load_nonquant_param_into_quant_net +from mindspore.common import set_seed from src.dataset import create_dataset from src.config import mnist_cfg as cfg from src.lenet_fusion import LeNet5 as LeNet5Fusion from src.loss_monitor import LossMonitor +set_seed(1) + parser = argparse.ArgumentParser(description='MindSpore MNIST Example') parser.add_argument('--device_target', type=str, default="Ascend", choices=['Ascend', 'GPU'], diff --git a/model_zoo/official/cv/maskrcnn/eval.py b/model_zoo/official/cv/maskrcnn/eval.py index d4179b2d116aa5dff6422f77ee6794169cc9bb91..c2d00cf2b482ce858e727754d0411c85caebe638 100644 --- a/model_zoo/official/cv/maskrcnn/eval.py +++ b/model_zoo/official/cv/maskrcnn/eval.py @@ -17,21 +17,18 @@ import os import argparse import time -import random import numpy as np from pycocotools.coco import COCO from mindspore import context, Tensor from mindspore.train.serialization import load_checkpoint, load_param_into_net -import mindspore.dataset.engine as de +from mindspore.common import set_seed from src.maskrcnn.mask_rcnn_r50 import Mask_Rcnn_Resnet50 from src.config import config from src.dataset import data_to_mindrecord_byte_image, create_maskrcnn_dataset from src.util import coco_eval, bbox2result_1image, results2json, get_seg_masks -random.seed(1) -np.random.seed(1) -de.config.set_seed(1) +set_seed(1) parser = argparse.ArgumentParser(description="MaskRcnn evaluation") parser.add_argument("--dataset", type=str, default="coco", help="Dataset, default is coco.") diff --git a/model_zoo/official/cv/maskrcnn/train.py b/model_zoo/official/cv/maskrcnn/train.py index e781648a3799b568dccde0d1517f85cc9cf99339..c9a40303633ca284ae081b91a53167cdf6520b6b 100644 --- a/model_zoo/official/cv/maskrcnn/train.py +++ b/model_zoo/official/cv/maskrcnn/train.py @@ -17,9 +17,7 @@ import os import argparse -import random import ast -import numpy as np import mindspore.common.dtype as mstype from mindspore import context, Tensor @@ -29,7 +27,7 @@ from mindspore.train import Model from mindspore.context import ParallelMode from mindspore.train.serialization import load_checkpoint, load_param_into_net from mindspore.nn import SGD -import mindspore.dataset.engine as de +from mindspore.common import set_seed from src.maskrcnn.mask_rcnn_r50 import Mask_Rcnn_Resnet50 from src.network_define import LossCallBack, WithLossCell, TrainOneStepCell, LossNet @@ -37,9 +35,7 @@ from src.config import config from src.dataset import data_to_mindrecord_byte_image, create_maskrcnn_dataset from src.lr_schedule import dynamic_lr -random.seed(1) -np.random.seed(1) -de.config.set_seed(1) +set_seed(1) parser = argparse.ArgumentParser(description="MaskRcnn training") parser.add_argument("--only_create_dataset", type=ast.literal_eval, default=False, help="If set it true, only create " diff --git a/model_zoo/official/cv/mobilenetv2/src/utils.py b/model_zoo/official/cv/mobilenetv2/src/utils.py index 9f54ab5d2d43502de523552fa5beddb251935549..d0a09afb8b722ae6838fe1261df22d2f86a6a47e 100644 --- a/model_zoo/official/cv/mobilenetv2/src/utils.py +++ b/model_zoo/official/cv/mobilenetv2/src/utils.py @@ -13,16 +13,12 @@ # limitations under the License. # ============================================================================ -import random -import numpy as np - from mindspore import context from mindspore import nn from mindspore.common import dtype as mstype from mindspore.train.model import ParallelMode from mindspore.train.callback import ModelCheckpoint, CheckpointConfig from mindspore.communication.management import get_rank, init -from mindspore.dataset import engine as de from src.models import Monitor @@ -84,10 +80,3 @@ def config_ckpoint(config, lr, step_size): ckpt_cb = ModelCheckpoint(prefix="mobilenetV2", directory=ckpt_save_dir, config=config_ck) cb += [ckpt_cb] return cb - - - -def set_random_seed(seed=1): - random.seed(seed) - np.random.seed(seed) - de.config.set_seed(seed) diff --git a/model_zoo/official/cv/mobilenetv2/train.py b/model_zoo/official/cv/mobilenetv2/train.py index 0381f8cb7b0489381cf074519af1bb41698c5275..f817b2b7c5c2a27123177e03d27b4038b7cab176 100644 --- a/model_zoo/official/cv/mobilenetv2/train.py +++ b/model_zoo/official/cv/mobilenetv2/train.py @@ -27,16 +27,17 @@ from mindspore.common import dtype as mstype from mindspore.train.model import Model from mindspore.train.loss_scale_manager import FixedLossScaleManager from mindspore.train.serialization import _exec_save_checkpoint +from mindspore.common import set_seed from src.dataset import create_dataset, extract_features from src.lr_generator import get_lr from src.config import set_config from src.args import train_parse_args -from src.utils import set_random_seed, context_device_init, switch_precision, config_ckpoint +from src.utils import context_device_init, switch_precision, config_ckpoint from src.models import CrossEntropyWithLabelSmooth, define_net -set_random_seed(1) +set_seed(1) if __name__ == '__main__': args_opt = train_parse_args() diff --git a/model_zoo/official/cv/mobilenetv2_quant/train.py b/model_zoo/official/cv/mobilenetv2_quant/train.py index a2d3ff63125914524361064ed7323df93406d95d..40d9b83ae23e868aa4de18a79b5d53fd1c54c3fe 100644 --- a/model_zoo/official/cv/mobilenetv2_quant/train.py +++ b/model_zoo/official/cv/mobilenetv2_quant/train.py @@ -16,8 +16,6 @@ import os import argparse -import random -import numpy as np from mindspore import context from mindspore import Tensor @@ -30,7 +28,7 @@ from mindspore.train.serialization import load_checkpoint from mindspore.communication.management import init, get_group_size, get_rank from mindspore.train.quant import quant from mindspore.train.quant.quant_utils import load_nonquant_param_into_quant_net -import mindspore.dataset.engine as de +from mindspore.common import set_seed from src.dataset import create_dataset from src.lr_generator import get_lr @@ -38,9 +36,7 @@ from src.utils import Monitor, CrossEntropyWithLabelSmooth from src.config import config_ascend_quant, config_gpu_quant from src.mobilenetV2 import mobilenetV2 -random.seed(1) -np.random.seed(1) -de.config.set_seed(1) +set_seed(1) parser = argparse.ArgumentParser(description='Image classification') parser.add_argument('--dataset_path', type=str, default=None, help='Dataset path') diff --git a/model_zoo/official/cv/mobilenetv3/train.py b/model_zoo/official/cv/mobilenetv3/train.py index d961d104601b8edec7715bae78f7fbf52c17244f..9f05fe4fde82ae1c0b59a33e65abc587d57c6607 100644 --- a/model_zoo/official/cv/mobilenetv3/train.py +++ b/model_zoo/official/cv/mobilenetv3/train.py @@ -16,7 +16,6 @@ import time import argparse -import random import numpy as np from mindspore import context @@ -33,7 +32,7 @@ from mindspore.context import ParallelMode from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, Callback from mindspore.train.loss_scale_manager import FixedLossScaleManager from mindspore.train.serialization import load_checkpoint, load_param_into_net -import mindspore.dataset.engine as de +from mindspore.common import set_seed from mindspore.communication.management import init, get_group_size, get_rank from src.dataset import create_dataset @@ -41,9 +40,7 @@ from src.lr_generator import get_lr from src.config import config_gpu from src.mobilenetV3 import mobilenet_v3_large -random.seed(1) -np.random.seed(1) -de.config.set_seed(1) +set_seed(1) parser = argparse.ArgumentParser(description='Image classification') parser.add_argument('--dataset_path', type=str, default=None, help='Dataset path') diff --git a/model_zoo/official/cv/nasnet/train.py b/model_zoo/official/cv/nasnet/train.py index 02e9b9f2954e51738892244486c6995d016046ab..290dc892e4d3c7c0028ca3e8eff300bf50c50155 100755 --- a/model_zoo/official/cv/nasnet/train.py +++ b/model_zoo/official/cv/nasnet/train.py @@ -15,8 +15,6 @@ """train imagenet.""" import argparse import os -import random -import numpy as np from mindspore import Tensor from mindspore import context @@ -26,7 +24,7 @@ from mindspore.nn.optim.rmsprop import RMSProp from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor from mindspore.train.model import Model from mindspore.train.serialization import load_checkpoint, load_param_into_net -from mindspore import dataset as de +from mindspore.common import set_seed from src.config import nasnet_a_mobile_config_gpu as cfg from src.dataset import create_dataset @@ -34,9 +32,7 @@ from src.nasnet_a_mobile import NASNetAMobileWithLoss, NASNetAMobileTrainOneStep from src.lr_generator import get_lr -random.seed(cfg.random_seed) -np.random.seed(cfg.random_seed) -de.config.set_seed(cfg.random_seed) +set_seed(cfg.random_seed) if __name__ == '__main__': diff --git a/model_zoo/official/cv/resnet/eval.py b/model_zoo/official/cv/resnet/eval.py index 570a26ee5cba1a95cfdcf72aeb1123b4a95faf27..3928ea34b3502d0dc2e0282cb2b875798602eb19 100755 --- a/model_zoo/official/cv/resnet/eval.py +++ b/model_zoo/official/cv/resnet/eval.py @@ -14,11 +14,9 @@ # ============================================================================ """train resnet.""" import os -import random import argparse -import numpy as np from mindspore import context -from mindspore import dataset as de +from mindspore.common import set_seed from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits from mindspore.train.model import Model from mindspore.train.serialization import load_checkpoint, load_param_into_net @@ -33,9 +31,7 @@ parser.add_argument('--dataset_path', type=str, default=None, help='Dataset path parser.add_argument('--device_target', type=str, default='Ascend', help='Device target') args_opt = parser.parse_args() -random.seed(1) -np.random.seed(1) -de.config.set_seed(1) +set_seed(1) if args_opt.net == "resnet50": from src.resnet import resnet50 as resnet diff --git a/model_zoo/official/cv/resnet/train.py b/model_zoo/official/cv/resnet/train.py index 1c7f4d4dca29c47055dcd03ef5bb651bb2c81e71..6d65cce48e812d18ac87202cf5e333d4f12e0f1b 100755 --- a/model_zoo/official/cv/resnet/train.py +++ b/model_zoo/official/cv/resnet/train.py @@ -14,13 +14,10 @@ # ============================================================================ """train resnet.""" import os -import random import argparse import ast -import numpy as np from mindspore import context from mindspore import Tensor -from mindspore import dataset as de from mindspore.parallel._auto_parallel_context import auto_parallel_context from mindspore.nn.optim.momentum import Momentum from mindspore.train.model import Model @@ -30,6 +27,7 @@ from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits from mindspore.train.loss_scale_manager import FixedLossScaleManager from mindspore.train.serialization import load_checkpoint, load_param_into_net from mindspore.communication.management import init, get_rank, get_group_size +from mindspore.common import set_seed import mindspore.nn as nn import mindspore.common.initializer as weight_init from src.lr_generator import get_lr, warmup_cosine_annealing_lr @@ -47,9 +45,7 @@ parser.add_argument('--pre_trained', type=str, default=None, help='Pretrained ch parser.add_argument('--parameter_server', type=ast.literal_eval, default=False, help='Run parameter server train') args_opt = parser.parse_args() -random.seed(1) -np.random.seed(1) -de.config.set_seed(1) +set_seed(1) if args_opt.net == "resnet50": from src.resnet import resnet50 as resnet diff --git a/model_zoo/official/cv/resnet50_quant/train.py b/model_zoo/official/cv/resnet50_quant/train.py index 1ef8db2e005f65bddab94ca7de6ae2c182b642ae..927fe98c77752960395e5e2314cf0a1f6f1fd02d 100755 --- a/model_zoo/official/cv/resnet50_quant/train.py +++ b/model_zoo/official/cv/resnet50_quant/train.py @@ -31,6 +31,7 @@ from mindspore.train.quant.quant_utils import load_nonquant_param_into_quant_net from mindspore.communication.management import init import mindspore.nn as nn import mindspore.common.initializer as weight_init +from mindspore.common import set_seed #from models.resnet_quant import resnet50_quant #auto construct quantative network of resnet50 from models.resnet_quant_manual import resnet50_quant #manually construct quantative network of resnet50 @@ -39,6 +40,8 @@ from src.lr_generator import get_lr from src.config import config_quant from src.crossentropy import CrossEntropy +set_seed(1) + parser = argparse.ArgumentParser(description='Image classification') parser.add_argument('--run_distribute', type=bool, default=False, help='Run distribute') parser.add_argument('--device_num', type=int, default=1, help='Device num.') diff --git a/model_zoo/official/cv/resnet_thor/eval.py b/model_zoo/official/cv/resnet_thor/eval.py index 6d857d06dbabcf5ccbea9580efe4356cad70304b..1c5a4eae81bbf1e42ceee26c41da46f05060d6a5 100644 --- a/model_zoo/official/cv/resnet_thor/eval.py +++ b/model_zoo/official/cv/resnet_thor/eval.py @@ -14,11 +14,9 @@ # ============================================================================ """train resnet.""" import os -import random import argparse -import numpy as np from mindspore import context -from mindspore import dataset as de +from mindspore.common import set_seed from mindspore.train.model import Model from mindspore.train.serialization import load_checkpoint, load_param_into_net from src.crossentropy import CrossEntropy @@ -32,9 +30,7 @@ parser.add_argument('--dataset_path', type=str, default=None, help='Dataset path parser.add_argument('--device_target', type=str, default='Ascend', help='Device target') args_opt = parser.parse_args() -random.seed(1) -np.random.seed(1) -de.config.set_seed(1) +set_seed(1) if __name__ == '__main__': target = args_opt.device_target diff --git a/model_zoo/official/cv/resnet_thor/train.py b/model_zoo/official/cv/resnet_thor/train.py index 03d545eca14f4458d09ea18886e2b681b482950a..1b6510860751cc838e292d916440816e9a0f094b 100644 --- a/model_zoo/official/cv/resnet_thor/train.py +++ b/model_zoo/official/cv/resnet_thor/train.py @@ -14,13 +14,12 @@ # ============================================================================ """train resnet.""" import os -import random import argparse import numpy as np from mindspore import context from mindspore import Tensor -from mindspore import dataset as de +from mindspore.common import set_seed from mindspore.parallel._auto_parallel_context import auto_parallel_context from mindspore.context import ParallelMode from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, TimeMonitor, LossMonitor @@ -46,9 +45,7 @@ else: from src.thor import THOR_GPU as THOR from src.config import config_gpu as config -random.seed(1) -np.random.seed(1) -de.config.set_seed(1) +set_seed(1) def get_model_lr(global_step, lr_init, decay, total_epochs, steps_per_epoch, decay_epochs=100): diff --git a/model_zoo/official/cv/resnext50/src/utils/var_init.py b/model_zoo/official/cv/resnext50/src/utils/var_init.py index 185072d4410675e8f9157f0d84deae1d716c9320..53fde61d53368fd8c340f7ec47844cb72323b631 100644 --- a/model_zoo/official/cv/resnext50/src/utils/var_init.py +++ b/model_zoo/official/cv/resnext50/src/utils/var_init.py @@ -151,7 +151,6 @@ class KaimingUniform(KaimingInit): def _initialize(self, arr): fan = _select_fan(arr, self.mode) bound = math.sqrt(3.0) * self.gain / math.sqrt(fan) - np.random.seed(0) data = np.random.uniform(-bound, bound, arr.shape) _assignment(arr, data) @@ -179,7 +178,6 @@ class KaimingNormal(KaimingInit): def _initialize(self, arr): fan = _select_fan(arr, self.mode) std = self.gain / math.sqrt(fan) - np.random.seed(0) data = np.random.normal(0, std, arr.shape) _assignment(arr, data) @@ -195,7 +193,6 @@ def default_recurisive_init(custom_cell): if cell.bias is not None: fan_in, _ = _calculate_in_and_out(cell.weight) bound = 1 / math.sqrt(fan_in) - np.random.seed(0) cell.bias.default_input = init.initializer(init.Uniform(bound), cell.bias.shape, cell.bias.dtype) @@ -206,7 +203,6 @@ def default_recurisive_init(custom_cell): if cell.bias is not None: fan_in, _ = _calculate_in_and_out(cell.weight) bound = 1 / math.sqrt(fan_in) - np.random.seed(0) cell.bias.default_input = init.initializer(init.Uniform(bound), cell.bias.shape, cell.bias.dtype) diff --git a/model_zoo/official/cv/resnext50/train.py b/model_zoo/official/cv/resnext50/train.py index 7d068e5b7c2ab099b72bb73213404e0f31fc852d..8e7d4b0220ec963330573ba1ac1f8455ead9ccc2 100644 --- a/model_zoo/official/cv/resnext50/train.py +++ b/model_zoo/official/cv/resnext50/train.py @@ -28,6 +28,7 @@ from mindspore.train.callback import CheckpointConfig, Callback from mindspore.train.serialization import load_checkpoint, load_param_into_net from mindspore.train.model import Model from mindspore.train.loss_scale_manager import DynamicLossScaleManager, FixedLossScaleManager +from mindspore.common import set_seed from src.dataset import classification_dataset from src.crossentropy import CrossEntropy @@ -38,6 +39,7 @@ from src.utils.optimizers__init__ import get_param_groups from src.image_classification import get_network from src.config import config +set_seed(1) class BuildTrainNetwork(nn.Cell): """build training network""" diff --git a/model_zoo/official/cv/shufflenetv2/train.py b/model_zoo/official/cv/shufflenetv2/train.py index 0d6560bcb0a1af90f4948e2046ba3a17807d38ab..ed70f9186e425b79f753d14ab608e87cf2b6b803 100644 --- a/model_zoo/official/cv/shufflenetv2/train.py +++ b/model_zoo/official/cv/shufflenetv2/train.py @@ -16,14 +16,11 @@ import argparse import ast import os -import random -import numpy as np from network import ShuffleNetV2 import mindspore.nn as nn from mindspore import context -from mindspore import dataset as de from mindspore.context import ParallelMode from mindspore import Tensor from mindspore.communication.management import init, get_rank, get_group_size @@ -31,14 +28,13 @@ from mindspore.nn.optim.momentum import Momentum from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor from mindspore.train.model import Model from mindspore.train.serialization import load_checkpoint, load_param_into_net +from mindspore.common import set_seed from src.config import config_gpu as cfg from src.dataset import create_dataset from src.lr_generator import get_lr_basic -random.seed(cfg.random_seed) -np.random.seed(cfg.random_seed) -de.config.set_seed(cfg.random_seed) +set_seed(cfg.random_seed) if __name__ == '__main__': diff --git a/model_zoo/official/cv/ssd/src/init_params.py b/model_zoo/official/cv/ssd/src/init_params.py index e144bc7f084a81e0bdb71cd4a598468f0808c5cb..30ceb92697a4dba12f18c8007c2423a2d1806a38 100644 --- a/model_zoo/official/cv/ssd/src/init_params.py +++ b/model_zoo/official/cv/ssd/src/init_params.py @@ -14,7 +14,6 @@ # ============================================================================ """Parameters utils""" -import numpy as np from mindspore.common.initializer import initializer, TruncatedNormal def init_net_param(network, initialize_mode='TruncatedNormal'): @@ -22,7 +21,6 @@ def init_net_param(network, initialize_mode='TruncatedNormal'): params = network.trainable_params() for p in params: if 'beta' not in p.name and 'gamma' not in p.name and 'bias' not in p.name: - np.random.seed(seed=1) if initialize_mode == 'TruncatedNormal': p.set_parameter_data(initializer(TruncatedNormal(), p.data.shape, p.data.dtype)) else: diff --git a/model_zoo/official/cv/ssd/train.py b/model_zoo/official/cv/ssd/train.py index eddace4b4811754747cdc2d73ed7d9907a11a1ed..34b1e3ca2b677f12583b3cbaadf62758effdde91 100644 --- a/model_zoo/official/cv/ssd/train.py +++ b/model_zoo/official/cv/ssd/train.py @@ -25,12 +25,14 @@ from mindspore.train.callback import CheckpointConfig, ModelCheckpoint, LossMoni from mindspore.train import Model from mindspore.context import ParallelMode from mindspore.train.serialization import load_checkpoint, load_param_into_net +from mindspore.common import set_seed from src.ssd import SSD300, SSDWithLossCell, TrainingWrapper, ssd_mobilenet_v2 from src.config import config from src.dataset import create_ssd_dataset, data_to_mindrecord_byte_image, voc_data_to_mindrecord from src.lr_schedule import get_lr from src.init_params import init_net_param, filter_checkpoint_parameter +set_seed(1) def main(): parser = argparse.ArgumentParser(description="SSD training") diff --git a/model_zoo/official/cv/vgg16/src/utils/var_init.py b/model_zoo/official/cv/vgg16/src/utils/var_init.py index 185072d4410675e8f9157f0d84deae1d716c9320..53fde61d53368fd8c340f7ec47844cb72323b631 100644 --- a/model_zoo/official/cv/vgg16/src/utils/var_init.py +++ b/model_zoo/official/cv/vgg16/src/utils/var_init.py @@ -151,7 +151,6 @@ class KaimingUniform(KaimingInit): def _initialize(self, arr): fan = _select_fan(arr, self.mode) bound = math.sqrt(3.0) * self.gain / math.sqrt(fan) - np.random.seed(0) data = np.random.uniform(-bound, bound, arr.shape) _assignment(arr, data) @@ -179,7 +178,6 @@ class KaimingNormal(KaimingInit): def _initialize(self, arr): fan = _select_fan(arr, self.mode) std = self.gain / math.sqrt(fan) - np.random.seed(0) data = np.random.normal(0, std, arr.shape) _assignment(arr, data) @@ -195,7 +193,6 @@ def default_recurisive_init(custom_cell): if cell.bias is not None: fan_in, _ = _calculate_in_and_out(cell.weight) bound = 1 / math.sqrt(fan_in) - np.random.seed(0) cell.bias.default_input = init.initializer(init.Uniform(bound), cell.bias.shape, cell.bias.dtype) @@ -206,7 +203,6 @@ def default_recurisive_init(custom_cell): if cell.bias is not None: fan_in, _ = _calculate_in_and_out(cell.weight) bound = 1 / math.sqrt(fan_in) - np.random.seed(0) cell.bias.default_input = init.initializer(init.Uniform(bound), cell.bias.shape, cell.bias.dtype) diff --git a/model_zoo/official/cv/vgg16/train.py b/model_zoo/official/cv/vgg16/train.py index 8690fa79c6fa71c3888241d4cf44543f07e48bd5..832664eb0c1cf581d102c883574810b7986c74cf 100644 --- a/model_zoo/official/cv/vgg16/train.py +++ b/model_zoo/official/cv/vgg16/train.py @@ -19,9 +19,6 @@ python train.py --data_path=$DATA_HOME --device_id=$DEVICE_ID import argparse import datetime import os -import random - -import numpy as np import mindspore.nn as nn from mindspore import Tensor @@ -33,6 +30,7 @@ from mindspore.train.model import Model from mindspore.context import ParallelMode from mindspore.train.serialization import load_param_into_net, load_checkpoint from mindspore.train.loss_scale_manager import FixedLossScaleManager +from mindspore.common import set_seed from src.dataset import vgg_create_dataset from src.dataset import classification_dataset @@ -45,8 +43,7 @@ from src.utils.util import get_param_groups from src.vgg import vgg16 -random.seed(1) -np.random.seed(1) +set_seed(1) def parse_args(cloud_args=None): diff --git a/model_zoo/official/cv/warpctc/eval.py b/model_zoo/official/cv/warpctc/eval.py index bf8e4e9552d4c6471fc222a35250ccea4b2eff90..58a9d4ae40b9905b839d1b9f3d30feb4d0a08f33 100755 --- a/model_zoo/official/cv/warpctc/eval.py +++ b/model_zoo/official/cv/warpctc/eval.py @@ -15,11 +15,9 @@ """Warpctc evaluation""" import os import math as m -import random import argparse -import numpy as np from mindspore import context -from mindspore import dataset as de +from mindspore.common import set_seed from mindspore.train.model import Model from mindspore.train.serialization import load_checkpoint, load_param_into_net @@ -29,9 +27,7 @@ from src.dataset import create_dataset from src.warpctc import StackedRNN, StackedRNNForGPU from src.metric import WarpCTCAccuracy -random.seed(1) -np.random.seed(1) -de.config.set_seed(1) +set_seed(1) parser = argparse.ArgumentParser(description="Warpctc training") parser.add_argument("--dataset_path", type=str, default=None, help="Dataset, default is None.") diff --git a/model_zoo/official/cv/warpctc/train.py b/model_zoo/official/cv/warpctc/train.py index 0b1b37c6059e450dd162b5ff1666e0757075648b..db3775a04b71a8975893f54aed88b2b5c6415a8f 100755 --- a/model_zoo/official/cv/warpctc/train.py +++ b/model_zoo/official/cv/warpctc/train.py @@ -15,12 +15,10 @@ """Warpctc training""" import os import math as m -import random import argparse -import numpy as np import mindspore.nn as nn from mindspore import context -from mindspore import dataset as de +from mindspore.common import set_seed from mindspore.train.model import Model from mindspore.context import ParallelMode from mindspore.nn.wrap import WithLossCell @@ -34,9 +32,7 @@ from src.warpctc import StackedRNN, StackedRNNForGPU from src.warpctc_for_train import TrainOneStepCellWithGradClip from src.lr_schedule import get_lr -random.seed(1) -np.random.seed(1) -de.config.set_seed(1) +set_seed(1) parser = argparse.ArgumentParser(description="Warpctc training") parser.add_argument("--run_distribute", action='store_true', help="Run distribute, default is false.") diff --git a/model_zoo/official/cv/yolov3_darknet53/src/initializer.py b/model_zoo/official/cv/yolov3_darknet53/src/initializer.py index c66cc74acf30418bb0544ba981af7963f6b82187..347fc9010a8ca52119458660b3a2fdcd1e85d173 100644 --- a/model_zoo/official/cv/yolov3_darknet53/src/initializer.py +++ b/model_zoo/official/cv/yolov3_darknet53/src/initializer.py @@ -21,9 +21,6 @@ from mindspore.common.initializer import Initializer as MeInitializer import mindspore.nn as nn -np.random.seed(5) - - def calculate_gain(nonlinearity, param=None): r"""Return the recommended gain value for the given nonlinearity function. The values are as follows: diff --git a/model_zoo/official/cv/yolov3_darknet53/train.py b/model_zoo/official/cv/yolov3_darknet53/train.py index c95beb05cd2093057b05fe0c1267174142645670..a17b32d66c5d8589ab3d5a42e9beb2521830b6d8 100644 --- a/model_zoo/official/cv/yolov3_darknet53/train.py +++ b/model_zoo/official/cv/yolov3_darknet53/train.py @@ -30,6 +30,7 @@ import mindspore as ms from mindspore.train.serialization import load_checkpoint, load_param_into_net from mindspore import amp from mindspore.train.loss_scale_manager import FixedLossScaleManager +from mindspore.common import set_seed from src.yolo import YOLOV3DarkNet53, YoloWithLossCell, TrainingWrapper from src.logger import get_logger @@ -41,6 +42,7 @@ from src.initializer import default_recurisive_init from src.config import ConfigYOLOV3DarkNet53 from src.util import keep_loss_fp32 +set_seed(1) class BuildTrainNetwork(nn.Cell): def __init__(self, network, criterion): diff --git a/model_zoo/official/cv/yolov3_darknet53_quant/src/initializer.py b/model_zoo/official/cv/yolov3_darknet53_quant/src/initializer.py index f3c03a8ad14b08c5608e974664782266c43892da..6f8134961a95b34b13537759f72b4e973ebb13c4 100644 --- a/model_zoo/official/cv/yolov3_darknet53_quant/src/initializer.py +++ b/model_zoo/official/cv/yolov3_darknet53_quant/src/initializer.py @@ -21,9 +21,6 @@ import mindspore.nn as nn from mindspore import Tensor -np.random.seed(5) - - def calculate_gain(nonlinearity, param=None): r"""Return the recommended gain value for the given nonlinearity function. The values are as follows: diff --git a/model_zoo/official/cv/yolov3_darknet53_quant/train.py b/model_zoo/official/cv/yolov3_darknet53_quant/train.py index 65277b431476ce6bff3b7a56bbabd17fa5e743d1..975ea993d6cd3890ca93b91f5294f7dc4bd00d2e 100644 --- a/model_zoo/official/cv/yolov3_darknet53_quant/train.py +++ b/model_zoo/official/cv/yolov3_darknet53_quant/train.py @@ -29,6 +29,7 @@ from mindspore.train.callback import _InternalCallbackParam, CheckpointConfig import mindspore as ms from mindspore.train.serialization import load_checkpoint, load_param_into_net from mindspore.train.quant import quant +from mindspore.common import set_seed from src.yolo import YOLOV3DarkNet53, YoloWithLossCell, TrainingWrapper from src.logger import get_logger @@ -41,6 +42,7 @@ from src.config import ConfigYOLOV3DarkNet53 from src.transforms import batch_preprocess_true_box, batch_preprocess_true_box_single from src.util import ShapeRecord +set_seed(1) devid = int(os.getenv('DEVICE_ID')) context.set_context(mode=context.GRAPH_MODE, enable_auto_mixed_precision=True, diff --git a/model_zoo/official/cv/yolov3_resnet18/train.py b/model_zoo/official/cv/yolov3_resnet18/train.py index 3005a14e254e5c92ed53f04a5e6f292e2aca98c4..8830df1af336919a61fe26b3d02ed357ddeb5d4d 100644 --- a/model_zoo/official/cv/yolov3_resnet18/train.py +++ b/model_zoo/official/cv/yolov3_resnet18/train.py @@ -34,11 +34,13 @@ from mindspore.train import Model from mindspore.context import ParallelMode from mindspore.train.serialization import load_checkpoint, load_param_into_net from mindspore.common.initializer import initializer +from mindspore.common import set_seed from src.yolov3 import yolov3_resnet18, YoloWithLossCell, TrainingWrapper from src.dataset import create_yolo_dataset, data_to_mindrecord_byte_image from src.config import ConfigYOLOV3ResNet18 +set_seed(1) def get_lr(learning_rate, start_step, global_step, decay_step, decay_rate, steps=False): """Set learning rate.""" @@ -54,7 +56,7 @@ def get_lr(learning_rate, start_step, global_step, decay_step, decay_rate, steps def init_net_param(network, init_value='ones'): - """Init:wq the parameters in network.""" + """Init the parameters in network.""" params = network.trainable_params() for p in params: if isinstance(p.data, Tensor) and 'beta' not in p.name and 'gamma' not in p.name and 'bias' not in p.name: diff --git a/model_zoo/official/gnn/gat/train.py b/model_zoo/official/gnn/gat/train.py index 94ac6f069f4eb63614ba49bc9777faa233680252..6a980722237615115c7c7b78b69bf7553e0432f1 100644 --- a/model_zoo/official/gnn/gat/train.py +++ b/model_zoo/official/gnn/gat/train.py @@ -19,12 +19,14 @@ import os import numpy as np import mindspore.context as context from mindspore.train.serialization import save_checkpoint, load_checkpoint +from mindspore.common import set_seed from src.config import GatConfig from src.dataset import load_and_process from src.gat import GAT from src.utils import LossAccuracyWrapper, TrainGAT +set_seed(1) def train(): """Train GAT model.""" diff --git a/model_zoo/official/gnn/gcn/train.py b/model_zoo/official/gnn/gcn/train.py index 16360a89684397bd5f6a9c30dc29f3c1cce6a04f..f7b0c600987d88f2483a0340b4023ac60119ddde 100644 --- a/model_zoo/official/gnn/gcn/train.py +++ b/model_zoo/official/gnn/gcn/train.py @@ -26,6 +26,7 @@ from matplotlib import pyplot as plt from matplotlib import animation from sklearn import manifold from mindspore import context +from mindspore.common import set_seed from src.gcn import GCN from src.metrics import LossAccuracyWrapper, TrainNetWrapper @@ -55,7 +56,7 @@ def train(): parser.add_argument('--save_TSNE', type=ast.literal_eval, default=False, help='Whether to save t-SNE graph') args_opt = parser.parse_args() - np.random.seed(args_opt.seed) + set_seed(args_opt.seed) context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", save_graphs=False) config = ConfigGCN() diff --git a/model_zoo/official/nlp/bert/run_pretrain.py b/model_zoo/official/nlp/bert/run_pretrain.py index 2bb1e6f3106589ff93eb45939cbb96e782957764..b69ee52463f4a962f2145304db7f7fbaad66f62e 100644 --- a/model_zoo/official/nlp/bert/run_pretrain.py +++ b/model_zoo/official/nlp/bert/run_pretrain.py @@ -19,7 +19,6 @@ python run_pretrain.py import os import argparse -import numpy import mindspore.communication.management as D import mindspore.common.dtype as mstype from mindspore import context @@ -30,6 +29,7 @@ from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, TimeMoni from mindspore.train.serialization import load_checkpoint, load_param_into_net from mindspore.nn.optim import Lamb, Momentum, AdamWeightDecay from mindspore import log as logger +from mindspore.common import set_seed from src import BertNetworkWithLoss, BertTrainOneStepCell, BertTrainOneStepWithLossScaleCell, \ BertTrainAccumulateStepsWithLossScaleCell from src.dataset import create_bert_dataset @@ -196,5 +196,5 @@ def run_pretrain(): if __name__ == '__main__': - numpy.random.seed(0) + set_seed(0) run_pretrain() diff --git a/model_zoo/official/nlp/bert_thor/run_pretrain.py b/model_zoo/official/nlp/bert_thor/run_pretrain.py index 3fea82ee5bd95b2e979c2a81e743440bdcc72aa6..e9e04b67a88f1c7b00d587bd799441c17cd7e2ef 100644 --- a/model_zoo/official/nlp/bert_thor/run_pretrain.py +++ b/model_zoo/official/nlp/bert_thor/run_pretrain.py @@ -19,7 +19,6 @@ python run_pretrain.py import argparse import os -import numpy from src import BertNetworkWithLoss, BertTrainOneStepCell, BertTrainOneStepWithLossScaleCell from src.bert_net_config import bert_net_cfg from src.config import cfg @@ -36,6 +35,7 @@ from mindspore.nn.wrap.loss_scale import DynamicLossScaleUpdateCell from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, TimeMonitor from mindspore.context import ParallelMode from mindspore.train.serialization import load_checkpoint, load_param_into_net +from mindspore.common import set_seed _current_dir = os.path.dirname(os.path.realpath(__file__)) @@ -197,5 +197,5 @@ def run_pretrain(): if __name__ == '__main__': - numpy.random.seed(0) + set_seed(0) run_pretrain() diff --git a/model_zoo/official/nlp/mass/train.py b/model_zoo/official/nlp/mass/train.py index d1f148c718dd8f6c63baf0d60f20f39e19f1b6bd..408ece34cbc79b3968cfbe94e614e8c26db9c25f 100644 --- a/model_zoo/official/nlp/mass/train.py +++ b/model_zoo/official/nlp/mass/train.py @@ -30,6 +30,7 @@ from mindspore import context, Parameter from mindspore.context import ParallelMode from mindspore.communication import management as MultiAscend from mindspore.train.serialization import load_checkpoint +from mindspore.common import set_seed from config import TransformerConfig from src.dataset import load_dataset @@ -337,7 +338,7 @@ if __name__ == '__main__': _check_args(args.config) _config = get_config(args.config) - np.random.seed(_config.random_seed) + set_seed(_config.random_seed) context.set_context(save_graphs=_config.save_graphs) if _rank_size is not None and int(_rank_size) > 1: diff --git a/model_zoo/official/nlp/tinybert/run_general_distill.py b/model_zoo/official/nlp/tinybert/run_general_distill.py index 1b64ed4dfbb7940a85e2a6a651ea620b0cfe61a1..7257c5883e37d1346eaf9bdbb7c1bf231e80542c 100644 --- a/model_zoo/official/nlp/tinybert/run_general_distill.py +++ b/model_zoo/official/nlp/tinybert/run_general_distill.py @@ -18,7 +18,6 @@ import os import argparse import datetime -import numpy import mindspore.communication.management as D import mindspore.common.dtype as mstype from mindspore import context @@ -28,6 +27,7 @@ from mindspore.context import ParallelMode from mindspore.nn.optim import AdamWeightDecay from mindspore.nn.wrap.loss_scale import DynamicLossScaleUpdateCell from mindspore import log as logger +from mindspore.common import set_seed from src.dataset import create_tinybert_dataset, DataType from src.utils import LossCallBack, ModelSaveCkpt, BertLearningRate from src.gd_config import common_cfg, bert_teacher_net_cfg, bert_student_net_cfg @@ -154,5 +154,5 @@ def run_general_distill(): sink_size=args_opt.data_sink_steps) if __name__ == '__main__': - numpy.random.seed(0) + set_seed(0) run_general_distill() diff --git a/model_zoo/official/nlp/transformer/train.py b/model_zoo/official/nlp/transformer/train.py index 36edcb08540314334fda74c6861e375c45fa268a..45207e19ee05b508ab64ded3140789ad015a00cf 100644 --- a/model_zoo/official/nlp/transformer/train.py +++ b/model_zoo/official/nlp/transformer/train.py @@ -16,8 +16,6 @@ import time import argparse -import random -import numpy as np import mindspore.common.dtype as mstype from mindspore.common.tensor import Tensor @@ -27,10 +25,10 @@ from mindspore.train.loss_scale_manager import DynamicLossScaleManager from mindspore.train.callback import CheckpointConfig, ModelCheckpoint from mindspore.train.callback import Callback, TimeMonitor from mindspore.train.serialization import load_checkpoint, load_param_into_net -import mindspore.dataset.engine as de import mindspore.communication.management as D from mindspore.context import ParallelMode from mindspore import context +from mindspore.common import set_seed from src.transformer_for_train import TransformerTrainOneStepCell, TransformerNetworkWithLoss, \ TransformerTrainOneStepWithLossScaleCell @@ -38,10 +36,7 @@ from src.config import cfg, transformer_net_cfg from src.dataset import create_transformer_dataset from src.lr_schedule import create_dynamic_lr -random_seed = 1 -random.seed(random_seed) -np.random.seed(random_seed) -de.config.set_seed(random_seed) +set_seed(1) def get_ms_timestamp(): t = time.time() diff --git a/model_zoo/official/recommend/deepfm/train.py b/model_zoo/official/recommend/deepfm/train.py index 9c202a1d3ab1192478445593421318f9ad811a7c..db660737ea1642ce803cc14e6f80e8f74cb8b3e1 100644 --- a/model_zoo/official/recommend/deepfm/train.py +++ b/model_zoo/official/recommend/deepfm/train.py @@ -16,15 +16,13 @@ import os import sys import argparse -import random -import numpy as np from mindspore import context from mindspore.context import ParallelMode from mindspore.communication.management import init, get_rank, get_group_size from mindspore.train.model import Model from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, TimeMonitor -import mindspore.dataset.engine as de +from mindspore.common import set_seed from src.deepfm import ModelBuilder, AUCMetric from src.config import DataConfig, ModelConfig, TrainConfig @@ -46,9 +44,7 @@ args_opt, _ = parser.parse_known_args() args_opt.do_eval = args_opt.do_eval == 'True' rank_size = int(os.environ.get("RANK_SIZE", 1)) -random.seed(1) -np.random.seed(1) -de.config.set_seed(1) +set_seed(1) if __name__ == '__main__': data_config = DataConfig() diff --git a/model_zoo/official/recommend/wide_and_deep/train_and_eval_distribute.py b/model_zoo/official/recommend/wide_and_deep/train_and_eval_distribute.py index 8f6b96f67090a43ecbdb83259760f041bbaa9124..a460ec42ff267543ac8b4311e21ce96dd5d04977 100644 --- a/model_zoo/official/recommend/wide_and_deep/train_and_eval_distribute.py +++ b/model_zoo/official/recommend/wide_and_deep/train_and_eval_distribute.py @@ -17,11 +17,11 @@ import os import sys -import numpy as np from mindspore import Model, context from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, TimeMonitor from mindspore.context import ParallelMode from mindspore.communication.management import get_rank, get_group_size, init +from mindspore.common import set_seed from src.wide_and_deep import PredictWithSigmoid, TrainStepWrap, NetWithLossClass, WideDeepModel from src.callbacks import LossCallBack, EvalCallBack @@ -69,7 +69,7 @@ def train_and_eval(config): """ test_train_eval """ - np.random.seed(1000) + set_seed(1000) data_path = config.data_path batch_size = config.batch_size epochs = config.epochs diff --git a/model_zoo/official/recommend/wide_and_deep/train_and_eval_parameter_server.py b/model_zoo/official/recommend/wide_and_deep/train_and_eval_parameter_server.py index 193b1b054d22e52b6b76bbfc2f98dbc571e0d3e2..b4f4e3c59e62f72d478212c9f80de3d43fe80e7d 100644 --- a/model_zoo/official/recommend/wide_and_deep/train_and_eval_parameter_server.py +++ b/model_zoo/official/recommend/wide_and_deep/train_and_eval_parameter_server.py @@ -17,11 +17,11 @@ import os import sys -import numpy as np from mindspore import Model, context from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, TimeMonitor from mindspore.context import ParallelMode from mindspore.communication.management import get_rank, get_group_size, init +from mindspore.common import set_seed from src.wide_and_deep import PredictWithSigmoid, TrainStepWrap, NetWithLossClass, WideDeepModel from src.callbacks import LossCallBack, EvalCallBack @@ -70,7 +70,7 @@ def train_and_eval(config): """ test_train_eval """ - np.random.seed(1000) + set_seed(1000) data_path = config.data_path batch_size = config.batch_size epochs = config.epochs diff --git a/model_zoo/official/recommend/wide_and_deep_multitable/train_and_eval_distribute.py b/model_zoo/official/recommend/wide_and_deep_multitable/train_and_eval_distribute.py index 99fe089fe64d443f1ec8f3d46b9a06d96082dc07..4383a93071568d097bbf2abdaa66b64b735e72b1 100644 --- a/model_zoo/official/recommend/wide_and_deep_multitable/train_and_eval_distribute.py +++ b/model_zoo/official/recommend/wide_and_deep_multitable/train_and_eval_distribute.py @@ -16,12 +16,12 @@ import os import sys -import numpy as np from mindspore import Model, context from mindspore.train.callback import ModelCheckpoint, CheckpointConfig from mindspore.train.callback import TimeMonitor from mindspore.context import ParallelMode from mindspore.communication.management import get_rank, get_group_size, init +from mindspore.common import set_seed from src.wide_and_deep import PredictWithSigmoid, TrainStepWrap, NetWithLossClass, WideDeepModel from src.callbacks import LossCallBack, EvalCallBack @@ -69,7 +69,7 @@ def train_and_eval(config): """ train_and_eval """ - np.random.seed(1000) + set_seed(1000) data_path = config.data_path epochs = config.epochs print("epochs is {}".format(epochs))