diff --git a/example/yolov3_coco2017/train.py b/example/yolov3_coco2017/train.py index 3ac3816f4a180a9fce9c699a819b1c0947f7ee33..0a32a6d30dcb56c1be885123b394bea8a4b9b540 100644 --- a/example/yolov3_coco2017/train.py +++ b/example/yolov3_coco2017/train.py @@ -67,7 +67,7 @@ if __name__ == '__main__': parser.add_argument("--distribute", type=bool, default=False, help="Run distribute, default is false.") parser.add_argument("--device_id", type=int, default=0, help="Device id, default is 0.") parser.add_argument("--device_num", type=int, default=1, help="Use device nums, default is 1.") - parser.add_argument("--mode", type=str, default="graph", help="Run graph mode or feed mode, default is graph") + parser.add_argument("--mode", type=str, default="sink", help="Run sink mode or non-sink mode, default is sink") parser.add_argument("--epoch_size", type=int, default=10, help="Epoch size, default is 10") parser.add_argument("--batch_size", type=int, default=32, help="Batch size, default is 32.") parser.add_argument("--checkpoint_path", type=str, default="", help="Checkpoint file path") @@ -150,8 +150,8 @@ if __name__ == '__main__': model = Model(net) dataset_sink_mode = False - if args_opt.mode == "graph": - print("In graph mode, one epoch return a loss.") + if args_opt.mode == "sink": + print("In sink mode, one epoch return a loss.") dataset_sink_mode = True print("Start train YOLOv3, the first epoch will be slower because of the graph compilation.") model.train(args_opt.epoch_size, dataset, callbacks=callback, dataset_sink_mode=dataset_sink_mode) diff --git a/mindspore/ccsrc/pipeline/pipeline_ge.cc b/mindspore/ccsrc/pipeline/pipeline_ge.cc index 2f6893559110d8aaabc0c129fbad330d82eea4a1..60960a2eb77b69ae0aea74bb7e3a7fbc1cb9cb99 100644 --- a/mindspore/ccsrc/pipeline/pipeline_ge.cc +++ b/mindspore/ccsrc/pipeline/pipeline_ge.cc @@ -116,7 +116,7 @@ bool InitExecDatasetGe(const std::string& queue_name, int64_t size, int64_t batc return transform::TransformUtil::ConvertDataType(i->type_id()); }); - ConfigManager::GetInstance().set_dataset_mode(DatasetMode::DS_GRAPH_MODE); + ConfigManager::GetInstance().set_dataset_mode(DatasetMode::DS_SINK_MODE); ConfigManager::GetInstance().set_iter_num(size); ConfigManager::GetInstance().set_dataset_phase(phase); @@ -453,8 +453,8 @@ void ProcessGeArg(const std::map& info, const py:: } // process the first args of tensor - // only in Dataset Feed Mode, fp_bp graph need input tensors - if (ConfigManager::GetInstance().dataset_mode() == DS_FEED_MODE) { + // only in Dataset non-sink Mode, fp_bp graph need input tensors + if (ConfigManager::GetInstance().dataset_mode() == DS_NORMAL_MODE) { for (std::size_t i = 0; i < size; i++) { ValuePtr converted = nullptr; bool succ = parse::ConvertData(args[i], &converted); diff --git a/mindspore/ccsrc/transform/convert.cc b/mindspore/ccsrc/transform/convert.cc index c400d1c5733e102be684e3751c7b027d108cac46..250e83432a66bbc8557248c9914b75b43be8678a 100755 --- a/mindspore/ccsrc/transform/convert.cc +++ b/mindspore/ccsrc/transform/convert.cc @@ -440,10 +440,10 @@ void DfGraphConvertor::InitLoopVar(std::vector *init_input) { int64_t value = 0; auto const_iter_num = std::make_shared("const/npu_runconfig/iterations_per_loop"); - if (ConfigManager::GetInstance().dataset_mode() == DS_GRAPH_MODE) { + if (ConfigManager::GetInstance().dataset_mode() == DS_SINK_MODE) { value = ConfigManager::GetInstance().iter_num(); } else { - MS_LOG(INFO) << "Run with feed mode, the iterator number will always be 1"; + MS_LOG(INFO) << "Run with non-sink mode, the iterator number will always be 1"; value = 1; ConfigManager::GetInstance().set_iter_num(value); } @@ -574,7 +574,7 @@ void DfGraphConvertor::SetupParamInitSubGraph(const TensorOrderMap &tensors, std void DfGraphConvertor::MakeDatasetHandler(const std::string &name, const size_t &input_idx, const AnfNodePtr &it) { MS_LOG(INFO) << "The " << name << " is the " << input_idx << "(st/nd/th) input"; - if (ConfigManager::GetInstance().dataset_mode() == DS_GRAPH_MODE) { + if (ConfigManager::GetInstance().dataset_mode() == DS_SINK_MODE) { auto getnext_idx = static_cast(input_idx); DatasetGraphParam param = ConfigManager::GetInstance().dataset_param(); if (!param.input_indexes().empty() && input_idx <= param.input_indexes().size()) { @@ -866,7 +866,7 @@ DfGraphConvertor &DfGraphConvertor::ConvertAllNode() { } // Create dataset iterator and iterator_getnext node - if (ConfigManager::GetInstance().dataset_mode() == DS_GRAPH_MODE) { + if (ConfigManager::GetInstance().dataset_mode() == DS_SINK_MODE) { DatasetGraphParam param = ConfigManager::GetInstance().dataset_param(); MS_LOG(INFO) << "Dataset param is " << param.ToString() << "."; // GetNext @@ -975,7 +975,7 @@ void DfGraphConvertor::TraceOutputFromParameter(const AnfNodePtr &anf_out) { } void SetupDatasetIterGetNextNode(const OperatorPtr &op) { - if (ConfigManager::GetInstance().dataset_mode() == DS_GRAPH_MODE) { + if (ConfigManager::GetInstance().dataset_mode() == DS_SINK_MODE) { DatasetGraphParam param = ConfigManager::GetInstance().dataset_param(); size_t output_num = param.ge_types().size(); MS_LOG(INFO) << "Set iterator_getnext op's output num = " << output_num << "."; @@ -1034,7 +1034,7 @@ DfGraphConvertor &DfGraphConvertor::BuildGraph() { // set graph input according to the order from anf graph std::vector inputs; - if (ConfigManager::GetInstance().dataset_mode() == DS_GRAPH_MODE) { + if (ConfigManager::GetInstance().dataset_mode() == DS_SINK_MODE) { inputs.push_back(*dataset_iter_getnext_); } else { auto params = anf_graph_->parameters(); diff --git a/mindspore/ccsrc/utils/config_manager.cc b/mindspore/ccsrc/utils/config_manager.cc index ac8a965878ddce5c55223f6caaa6095326771831..6d66b37436cc49f59e9d997dda04b64ed3d5b49c 100644 --- a/mindspore/ccsrc/utils/config_manager.cc +++ b/mindspore/ccsrc/utils/config_manager.cc @@ -28,7 +28,7 @@ ConfigManager& ConfigManager::GetInstance() noexcept { } void ConfigManager::SetDatasetModeConfig(const std::string& mode) { - static const std::map mode_map = {{"feed", DS_FEED_MODE}, {"graph", DS_GRAPH_MODE}}; + static const std::map mode_map = {{"normal", DS_NORMAL_MODE}, {"sink", DS_SINK_MODE}}; if (mode_map.find(mode) == mode_map.end()) { MS_LOG(ERROR) << "Invalid dataset mode:" << mode; return; @@ -38,7 +38,7 @@ void ConfigManager::SetDatasetModeConfig(const std::string& mode) { void ConfigManager::ResetConfig() noexcept { parallel_strategy_ = ONE_DEVICE; - dataset_mode_ = DS_FEED_MODE; + dataset_mode_ = DS_NORMAL_MODE; dataset_param_ = DatasetGraphParam("", 0, 0, {}, {}, {}); iter_num_ = 1; } diff --git a/mindspore/ccsrc/utils/config_manager.h b/mindspore/ccsrc/utils/config_manager.h index 31137f6243972e0f2c25ee87f6d0ab5198f7821c..db7d7d0c14de4234c754af5ef3f4b680b38ed87a 100644 --- a/mindspore/ccsrc/utils/config_manager.h +++ b/mindspore/ccsrc/utils/config_manager.h @@ -33,7 +33,7 @@ enum ParallelStrategy { DISTRIBUTION, }; -enum DatasetMode { DS_FEED_MODE = 0, DS_GRAPH_MODE }; +enum DatasetMode { DS_NORMAL_MODE = 0, DS_SINK_MODE }; class DatasetGraphParam { public: @@ -106,7 +106,7 @@ class ConfigManager { ~ConfigManager() = default; ParallelStrategy parallel_strategy_{ONE_DEVICE}; - DatasetMode dataset_mode_{DS_FEED_MODE}; + DatasetMode dataset_mode_{DS_NORMAL_MODE}; DatasetGraphParam dataset_param_{"", 0, 0, {}, {}, {}}; int64_t iter_num_{1}; std::string dataset_phase_{""}; diff --git a/mindspore/common/api.py b/mindspore/common/api.py index 9ee95ef77235e12a731ee6c5de0b6d2c75e552fe..5af17bbd78e146f6e77783e098f7001cc9159e90 100644 --- a/mindspore/common/api.py +++ b/mindspore/common/api.py @@ -381,9 +381,9 @@ class _Executor: if enable_ge: # decide whether to sink based on whether the inputs is virtual or not if args_list and isinstance(args_list[0], Tensor) and args_list[0].virtual_flag: - _set_dataset_mode_config('graph') + _set_dataset_mode_config('sink') else: - _set_dataset_mode_config('feed') + _set_dataset_mode_config('normal') self._build_data_graph(obj, params, phase) diff --git a/mindspore/nn/wrap/loss_scale.py b/mindspore/nn/wrap/loss_scale.py index 1ce3179273caa4f85ecc4dc627f8f77de234667d..5cdb34cf112c4791c2ce8400cedc2ff3a0467115 100644 --- a/mindspore/nn/wrap/loss_scale.py +++ b/mindspore/nn/wrap/loss_scale.py @@ -43,7 +43,7 @@ class DynamicLossScaleUpdateCell(Cell): In every training step, the loss scaling value will be updated by loss scaling value/`scale_factor` when there is overflow. And it will be increased by loss scaling value * `scale_factor` if there is no overflow for a continuous `scale_window` steps. This cell is used for Graph mode training in which all - logic will be executed on device side(Another training mode is feed mode in which some logic will be + logic will be executed on device side(Another training mode is non-sink mode in which some logic will be executed on host). Args: diff --git a/tests/ut/python/utils/test_callback.py b/tests/ut/python/utils/test_callback.py index 60e4c6527a00b9ce74ed6497b7cba3a85ccd9de3..c6fea04231ad6e1eda3777c968d6a76445227a8e 100644 --- a/tests/ut/python/utils/test_callback.py +++ b/tests/ut/python/utils/test_callback.py @@ -24,11 +24,12 @@ from mindspore import context from mindspore.common.tensor import Tensor from mindspore.nn.optim import Momentum from mindspore.nn import TrainOneStepCell, WithLossCell -from mindspore.train.callback import ModelCheckpoint, _check_file_name_prefix, RunContext,_checkpoint_cb_for_save_op,\ - LossMonitor, _InternalCallbackParam, _chg_ckpt_file_name_if_same_exist,\ - _build_callbacks, CheckpointConfig, _set_cur_net +from mindspore.train.callback import ModelCheckpoint, _check_file_name_prefix, RunContext, _checkpoint_cb_for_save_op, \ + LossMonitor, _InternalCallbackParam, _chg_ckpt_file_name_if_same_exist, \ + _build_callbacks, CheckpointConfig, _set_cur_net from mindspore.common.api import ms_function + class Net(nn.Cell): """Net definition.""" @@ -52,6 +53,7 @@ class Net(nn.Cell): class LossNet(nn.Cell): """ LossNet definition """ + def __init__(self): super(LossNet, self).__init__() self.conv = nn.Conv2d(3, 64, 3, has_bias=False, weight_init='normal', pad_mode='valid') @@ -110,8 +112,8 @@ def test_save_checkpoint(): os.remove('./test_files/test_ckpt-model.pkl') -def test_loss_monitor_graph_model(): - """Test lossmonitor Graph model.""" +def test_loss_monitor_sink_model(): + """Test loss monitor sink model.""" cb_params = _InternalCallbackParam() cb_params.cur_epoch_num = 4 cb_params.cur_step_num = 2 @@ -129,8 +131,8 @@ def test_loss_monitor_graph_model(): callbacklist.end(run_context) -def test_Loss_Monitor_feed_feed_model(): - """Test Loss Monitor feed feed mode.""" +def test_loss_monitor_feed_model(): + """Test loss monitor non-sink mode.""" cb_params = _InternalCallbackParam() run_context = RunContext(cb_params) loss_cb = LossMonitor(1)