diff --git a/mindspore/ccsrc/frontend/parallel/auto_parallel/rec_core/rec_cost.cc b/mindspore/ccsrc/frontend/parallel/auto_parallel/rec_core/rec_cost.cc index 21f073e34964402eb3c4a16810d5ed3d7c2a3a8c..db1c1a025d1b5f38da034bd5400812f9af919f2e 100644 --- a/mindspore/ccsrc/frontend/parallel/auto_parallel/rec_core/rec_cost.cc +++ b/mindspore/ccsrc/frontend/parallel/auto_parallel/rec_core/rec_cost.cc @@ -26,7 +26,6 @@ namespace mindspore { namespace parallel { - // Compute redistributed cost double CostRedis(const Graph::NodeType &node, const std::vector> &node_name_to_strategy, diff --git a/mindspore/ccsrc/frontend/parallel/auto_parallel/rec_core/rec_generate_strategy.cc b/mindspore/ccsrc/frontend/parallel/auto_parallel/rec_core/rec_generate_strategy.cc index e51bd579f3cc2d80ada7e36eb5c594b70a251077..46cb6a6e502d3ccf24ec96d0e56f16471cebdb06 100644 --- a/mindspore/ccsrc/frontend/parallel/auto_parallel/rec_core/rec_generate_strategy.cc +++ b/mindspore/ccsrc/frontend/parallel/auto_parallel/rec_core/rec_generate_strategy.cc @@ -746,7 +746,6 @@ Strategys CheckBroadcast(const std::vector> &ops, size_t first_tensor_dim = ops[iter_ops]->inputs_tensor_info()[0].shape().size(); size_t second_tensor_dim = ops[iter_ops]->inputs_tensor_info()[1].shape().size(); - // Do Broadcasting in the second tensor. if (second_tensor_dim < first_tensor_dim) { bool braoadcast_first_tensor = false; @@ -964,7 +963,6 @@ void GenerateEliminatedOperatorStrategyBackward(const std::vectorat(iter_list - 1); Strategys stra; Dimensions s = CopyOutgoingOperatorInputStrategy(ops, input_tensor_names, iter_ops); - if (s.size() != 0 && ops[iter_ops]->type() == SQUEEZE) { s = ModifyStrategyIfSqueezeOutgoing(ops, iter_ops, s); } diff --git a/mindspore/ccsrc/frontend/parallel/auto_parallel/rec_core/rec_partition.cc b/mindspore/ccsrc/frontend/parallel/auto_parallel/rec_core/rec_partition.cc index 9b6417a1971a3c3f03870a83c9823f6425f787df..ae52047071277744d448fe83d5ef51ca4065d1d7 100644 --- a/mindspore/ccsrc/frontend/parallel/auto_parallel/rec_core/rec_partition.cc +++ b/mindspore/ccsrc/frontend/parallel/auto_parallel/rec_core/rec_partition.cc @@ -204,7 +204,9 @@ Status PartitionForAllDevices(const size_t num_device, const double device_memor // Comopute iter times int iter_times = static_cast(log2(num_device)); - + if (iter_times > 10) { + MS_LOG(EXCEPTION) << "ERROR: Number of iter_times can't be larger than 10."; + } // N-cuts loop for (int loop = 0; loop < iter_times; loop++) { // Sort by weights diff --git a/mindspore/ccsrc/frontend/parallel/step_parallel.cc b/mindspore/ccsrc/frontend/parallel/step_parallel.cc index fcafa177efa657dba34cc43d5cb965779fa241a3..38fdf4e55208ea98f4611ef69d3742fed667b0da 100644 --- a/mindspore/ccsrc/frontend/parallel/step_parallel.cc +++ b/mindspore/ccsrc/frontend/parallel/step_parallel.cc @@ -341,7 +341,6 @@ void Redistribution(const std::pair &node_pair, const OperatorI TensorInfo tensorinfo_out = next_distribute_operator->inputs_tensor_info()[IntToSize(index - 1)]; TensorLayout tensorlayout_out = tensorinfo_out.tensor_layout(); TensorLayout tensorlayout_in = GetTensorInLayout(middle_node, middle_prim, distribute_operator); - if (tensor_redistribution.Init(tensorlayout_in, tensorlayout_out, dev_list) == FAILED) { MS_LOG(ERROR) << "Redistribution: middle_prim " << middle_prim->name() << " next_prim : " << next_prim_name; MS_LOG(ERROR) << "Redistribution: middle_node " << middle_node->ToString() << " next_node "