# Copyright 2020 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """train resnet.""" import os import random import argparse import numpy as np from mindspore import context from mindspore import Tensor from mindspore import dataset as de from mindspore.parallel._auto_parallel_context import auto_parallel_context from mindspore.train.model import Model from mindspore.context import ParallelMode from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor from mindspore.train.loss_scale_manager import FixedLossScaleManager from mindspore.train.serialization import load_checkpoint, load_param_into_net from mindspore.communication.management import init, get_rank, get_group_size import mindspore.nn as nn import mindspore.common.initializer as weight_init from src.lr_generator import get_lr, warmup_cosine_annealing_lr from src.CrossEntropySmooth import CrossEntropySmooth parser = argparse.ArgumentParser(description='Image classification') parser.add_argument('--run_distribute', type=bool, default=False, help='Run distribute') parser.add_argument('--device_num', type=int, default=1, help='Device num.') parser.add_argument('--dataset_path', type=str, default=None, help='Dataset path') parser.add_argument('--device_target', type=str, default='Ascend', help='Device target: "Ascend", "GPU"') parser.add_argument('--pre_trained', type=str, default=None, help='Pretrained checkpoint path') parser.add_argument('--dataset_sink_mode', type=str, default='True', choices = ['True', 'False'], help='DataSet sink mode is True or False') args_opt = parser.parse_args() random.seed(1) np.random.seed(1) de.config.set_seed(1) from src.resnet50 import resnet50 from src.config import cfg from src.dataset import create_dataset if __name__ == '__main__': target = args_opt.device_target ckpt_save_dir = cfg.save_checkpoint_path dataset_sink_mode = args_opt.dataset_sink_mode=='True' # init context context.set_context(mode=context.GRAPH_MODE, device_target=target, save_graphs=False) if args_opt.run_distribute: if target == "Ascend": device_id = int(os.getenv('DEVICE_ID')) context.set_context(device_id=device_id, enable_auto_mixed_precision=True) context.set_auto_parallel_context(device_num=args_opt.device_num, parallel_mode=ParallelMode.DATA_PARALLEL, mirror_mean=True) auto_parallel_context().set_all_reduce_fusion_split_indices([107, 160]) init() # GPU target else: init("nccl") context.set_auto_parallel_context(device_num=get_group_size(), parallel_mode=ParallelMode.DATA_PARALLEL, mirror_mean=True) ckpt_save_dir = cfg.save_checkpoint_path + "ckpt_" + str(get_rank()) + "/" # create dataset dataset = create_dataset(data_path=args_opt.dataset_path, do_train=True, batch_size=cfg.batch_size, target=target) step_size = dataset.get_dataset_size() # define net net = resnet50(class_num=cfg.num_classes) # init weight if args_opt.pre_trained: param_dict = load_checkpoint(args_opt.pre_trained) load_param_into_net(net, param_dict) else: for _, cell in net.cells_and_names(): if isinstance(cell, nn.Conv2d): cell.weight.default_input = weight_init.initializer(weight_init.XavierUniform(), cell.weight.default_input.shape, cell.weight.default_input.dtype).to_tensor() if isinstance(cell, nn.Dense): cell.weight.default_input = weight_init.initializer(weight_init.TruncatedNormal(), cell.weight.default_input.shape, cell.weight.default_input.dtype).to_tensor() # init lr {% if dataset=='Cifar10' %} lr = get_lr(lr_init=cfg.lr_init, lr_end=cfg.lr_end, lr_max=cfg.lr_max, warmup_epochs=cfg.warmup_epochs, total_epochs=cfg.epoch_size, steps_per_epoch=step_size, lr_decay_mode='poly') {% else %} lr = get_lr(lr_init=cfg.lr_init, lr_end=0.0, lr_max=cfg.lr_max, warmup_epochs=cfg.warmup_epochs, total_epochs=cfg.epoch_size, steps_per_epoch=step_size, lr_decay_mode='cosine') {% endif %} lr = Tensor(lr) # define opt {% if optimizer=='Momentum' %} opt = nn.Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), learning_rate=lr, momentum=cfg.momentum, weight_decay=cfg.weight_decay, loss_scale=cfg.loss_scale) {% else %} opt = nn.{{optimizer}}(filter(lambda x: x.requires_grad, net.get_parameters()), learning_rate=cfg.lr) {% endif %} # define loss, model if target == "Ascend": {% if dataset=='ImageNet' %} {% if loss=='SoftmaxCrossEntropyWithLogits' %} if not cfg.use_label_smooth: cfg.label_smooth_factor = 0.0 loss = CrossEntropySmooth(sparse=True, reduction='mean', smooth_factor=cfg.label_smooth_factor, num_classes=cfg.num_classes) {% elif loss=='SoftmaxCrossEntropyExpand' %} loss = nn.SoftmaxCrossEntropyExpand(sparse=True) {% endif %} {% else %} {% if loss=='SoftmaxCrossEntropyWithLogits' %} loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') {% elif loss=='SoftmaxCrossEntropyExpand' %} loss = nn.SoftmaxCrossEntropyExpand(sparse=True) {% endif %} {% endif %} loss_scale = FixedLossScaleManager(cfg.loss_scale, drop_overflow_update=False) model = Model(net, loss_fn=loss, optimizer=opt, loss_scale_manager=loss_scale, metrics={'acc'}, amp_level="O2", keep_batchnorm_fp32=False) else: # GPU target {% if dataset=='ImageNet' %} {% if loss=='SoftmaxCrossEntropyWithLogits' %} if not cfg.use_label_smooth: cfg.label_smooth_factor = 0.0 loss = CrossEntropySmooth(sparse=True, reduction='mean', smooth_factor=cfg.label_smooth_factor, num_classes=cfg.num_classes) {% elif loss=='SoftmaxCrossEntropyExpand' %} loss = nn.SoftmaxCrossEntropyExpand(sparse=True) {% endif %} {% else %} {% if loss=='SoftmaxCrossEntropyWithLogits' %} loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') {% elif loss=='SoftmaxCrossEntropyExpand' %} loss = nn.SoftmaxCrossEntropyExpand(sparse=True) {% endif %} {% endif %} loss_scale = FixedLossScaleManager(cfg.loss_scale, drop_overflow_update=False) model = Model(net, loss_fn=loss, optimizer=opt, loss_scale_manager=loss_scale, metrics={'acc'}, amp_level="O2", keep_batchnorm_fp32=True) # define callbacks time_cb = TimeMonitor(data_size=step_size) loss_cb = LossMonitor() cb = [time_cb, loss_cb] if cfg.save_checkpoint: cfg_ck = CheckpointConfig(save_checkpoint_steps=cfg.save_checkpoint_epochs * step_size, keep_checkpoint_max=cfg.keep_checkpoint_max) ckpt_cb = ModelCheckpoint(prefix="resnet", directory=ckpt_save_dir, config=cfg_ck) cb += [ckpt_cb] # train model model.train(cfg.epoch_size, dataset, callbacks=cb, dataset_sink_mode=dataset_sink_mode)