From 6f6fc75ba571bb899cd23a2110ff32863f97c8f0 Mon Sep 17 00:00:00 2001
From: liubuyu <liubuyu1@huawei.com>
Date: Mon, 25 May 2020 20:34:50 +0800
Subject: [PATCH] bug fix

---
 .../device/ascend/kernel_select_ascend.cc     |  2 +-
 .../enhancer/insert_pad_for_nms_with_mask.cc  | 26 ++-----------------
 .../ascend/ir_fission/transdata_split.cc      |  2 +-
 3 files changed, 4 insertions(+), 26 deletions(-)

diff --git a/mindspore/ccsrc/device/ascend/kernel_select_ascend.cc b/mindspore/ccsrc/device/ascend/kernel_select_ascend.cc
index 704763404..699d4d5d4 100644
--- a/mindspore/ccsrc/device/ascend/kernel_select_ascend.cc
+++ b/mindspore/ccsrc/device/ascend/kernel_select_ascend.cc
@@ -512,7 +512,7 @@ KernelSelectStatus SelectKernelInfo(const CNodePtr &kernel_node) {
     MS_LOG(WARNING) << "The node [" << kernel_node->DebugString()
                     << "] cannot find valid TBE kernel info, try to get aicpu kernel info";
     kernel::AICPUQuery(kernel_node, &aicpu_kernel_info_list);
-    select_status = SetMatchedKernelInfo(kernel_node, kernel_info_list);
+    select_status = SetMatchedKernelInfo(kernel_node, aicpu_kernel_info_list);
     AnfAlgo::SetNodeAttr(kAttrIsAICPUKernel, MakeValue(true), kernel_node);
   }
   // The kernel info not finded both in the aicpu kernel list & aicore kernel list
diff --git a/mindspore/ccsrc/pre_activate/ascend/enhancer/insert_pad_for_nms_with_mask.cc b/mindspore/ccsrc/pre_activate/ascend/enhancer/insert_pad_for_nms_with_mask.cc
index 20a10e7d2..b73fe6c83 100644
--- a/mindspore/ccsrc/pre_activate/ascend/enhancer/insert_pad_for_nms_with_mask.cc
+++ b/mindspore/ccsrc/pre_activate/ascend/enhancer/insert_pad_for_nms_with_mask.cc
@@ -33,8 +33,7 @@ const BaseRef InsertPadForNMSWithMask::DefinePattern() const {
   return VectorRef({prim::kPrimNMSWithMask, Xs});
 }
 
-AnfNodePtr INsertPadToGraph(const FuncGraphPtr &func_graph, const AnfNodePtr &input, const std::string &format,
-                            const TypeId &input_type, const TypeId &output_type, const TypeId &origin_type,
+AnfNodePtr InsertPadToGraph(const FuncGraphPtr &func_graph, const AnfNodePtr &input, const TypeId &origin_type,
                             const std::vector<size_t> &origin_shape) {
   MS_EXCEPTION_IF_NULL(func_graph);
   std::vector<AnfNodePtr> new_pad_inputs;
@@ -43,25 +42,6 @@ AnfNodePtr INsertPadToGraph(const FuncGraphPtr &func_graph, const AnfNodePtr &in
   new_pad_inputs.push_back(input);
   CNodePtr pad = func_graph->NewCNode(new_pad_inputs);
   MS_EXCEPTION_IF_NULL(pad);
-  // set kernel build info
-  kernel::KernelBuildInfo::KernelBuildInfoBuilder builder;
-  builder.SetInputsFormat({format});
-  builder.SetOutputsFormat({format});
-  builder.SetInputsDeviceType({input_type});
-  builder.SetOutputsDeviceType({output_type});
-  builder.SetFusionType(kernel::FusionType::OPAQUE);
-  builder.SetProcessor(kernel::Processor::AICORE);
-  if (kernel::OpLib::FindOp(prim::kPrimPad->name(), kernel::kTBE) != nullptr) {
-    builder.SetKernelType(KernelType::TBE_KERNEL);
-  } else {
-    builder.SetKernelType(KernelType::AICPU_KERNEL);
-  }
-
-  if (pad->kernel_info() == nullptr) {
-    auto kernel_info = std::make_shared<device::KernelInfo>();
-    pad->set_kernel_info(kernel_info);
-  }
-  AnfAlgo::SetSelectKernelBuildInfo(builder.Build(), pad.get());
   AnfAlgo::SetOutputInferTypeAndShape({origin_type}, {origin_shape}, pad.get());
   return pad;
 }
@@ -81,14 +61,12 @@ const AnfNodePtr InsertPadForNMSWithMask::Process(const FuncGraphPtr &func_graph
   for (size_t input_idx = 0; input_idx < AnfAlgo::GetInputTensorNum(cnode); input_idx++) {
     auto cur_input = AnfAlgo::GetInputNode(cnode, input_idx);
     auto origin_type = AnfAlgo::GetPrevNodeOutputInferDataType(cnode, input_idx);
-    auto format = AnfAlgo::GetPrevNodeOutputFormat(cnode, input_idx);
     auto origin_shape = AnfAlgo::GetPrevNodeOutputInferShape(cnode, input_idx);
     if (!(origin_shape.size() == 2 && origin_shape[1] == 5)) {
       return nullptr;
     }
     origin_shape[1] = 8;
-    auto device_type = AnfAlgo::GetPrevNodeOutputDeviceDataType(cnode, input_idx);
-    auto pad = INsertPadToGraph(func_graph, cur_input, format, origin_type, device_type, origin_type, origin_shape);
+    auto pad = InsertPadToGraph(func_graph, cur_input, origin_type, origin_shape);
     MS_EXCEPTION_IF_NULL(pad);
     pad->set_scope(cnode->scope());
     AnfAlgo::SetNodeAttr("paddings", MakeValue(std::vector<std::vector<int>>{{0, 0}, {0, 3}}), pad);
diff --git a/mindspore/ccsrc/pre_activate/ascend/ir_fission/transdata_split.cc b/mindspore/ccsrc/pre_activate/ascend/ir_fission/transdata_split.cc
index 035455db5..0305104f5 100644
--- a/mindspore/ccsrc/pre_activate/ascend/ir_fission/transdata_split.cc
+++ b/mindspore/ccsrc/pre_activate/ascend/ir_fission/transdata_split.cc
@@ -90,7 +90,7 @@ bool TransDataSplit::DoSplit(const FuncGraphPtr &func_graph, const AnfNodePtr &n
     new_transdata_node =
       NewTransOpNode(func_graph, new_transpose_node, kernel_select_, false, prim::KPrimTransData->name());
     RefreshKernelBuildInfo(kOpFormat_HWCN, output_format, AnfAlgo::GetOutputDeviceDataType(new_transdata_node, 0),
-                           new_transpose_node);
+                           new_transdata_node);
     new_replace_node = new_transdata_node;
   }
   FuncGraphManagerPtr manager = func_graph->manager();
-- 
GitLab