diff --git a/mindspore/ccsrc/device/kernel_runtime.cc b/mindspore/ccsrc/device/kernel_runtime.cc index 7f3d31d8d08008fb876ae6dd062b069fe947abf2..d1a068b5841ddf241d74efcf765fe633f7c9e435 100644 --- a/mindspore/ccsrc/device/kernel_runtime.cc +++ b/mindspore/ccsrc/device/kernel_runtime.cc @@ -135,10 +135,11 @@ void KernelRuntime::AssignMemory(session::KernelGraph *graph) { } void KernelRuntime::RunOpAssignMemory(const std::vector &input_tensors, - const session::KernelGraph *graph) { + session::KernelGraph *graph) { MS_EXCEPTION_IF_NULL(graph); // assign memory for input nodes RunOpAssignInputMemory(input_tensors, graph); + AssignStaticMemoryValueNode(graph); for (const auto &cnode : graph->execution_order()) { // assign memory for output nodes RunOpAssignOutputMemory(cnode); diff --git a/mindspore/ccsrc/device/kernel_runtime.h b/mindspore/ccsrc/device/kernel_runtime.h index 61b43fd5c0a5fa1387941911baae16696054620e..8f4f769f551c15174cd7a4d925919c570ae46e59 100644 --- a/mindspore/ccsrc/device/kernel_runtime.h +++ b/mindspore/ccsrc/device/kernel_runtime.h @@ -46,7 +46,7 @@ class KernelRuntime { virtual ~KernelRuntime(); virtual bool Init() = 0; virtual void AssignMemory(session::KernelGraph *graph); - void RunOpAssignMemory(const std::vector &input_tensors, const session::KernelGraph *graph); + void RunOpAssignMemory(const std::vector &input_tensors, session::KernelGraph *graph); virtual bool Run(session::KernelGraph *graph); virtual bool DumpData(session::KernelGraph *graph); virtual bool RunTask(const session::KernelGraph *graph); diff --git a/mindspore/ccsrc/pre_activate/ascend/ascend_backend_optimization.cc b/mindspore/ccsrc/pre_activate/ascend/ascend_backend_optimization.cc index 0de609f44131b345edd4cbff6a4bb72ff1a0459e..496a9b276febe63daba27d6d9e79fb014c079f82 100644 --- a/mindspore/ccsrc/pre_activate/ascend/ascend_backend_optimization.cc +++ b/mindspore/ccsrc/pre_activate/ascend/ascend_backend_optimization.cc @@ -222,6 +222,7 @@ void RunOpAscendBackendIRFusionOptimization(const std::shared_ptr(); auto ir_fusion_pm = std::make_shared("ir_fusion_pm"); ir_fusion_pm->AddPass(std::make_shared()); + ir_fusion_pm->AddPass(std::make_shared()); optimizer->AddPassManager(ir_fusion_pm); (void)optimizer->Optimize(kernel_graph); diff --git a/mindspore/ops/_op_impl/tbe/assign_add.py b/mindspore/ops/_op_impl/tbe/assign_add.py index fbbb9a997f519fe56409571b184bafb0134af603..2b20a7781dd97818329752b19c27b2bd21a36f14 100644 --- a/mindspore/ops/_op_impl/tbe/assign_add.py +++ b/mindspore/ops/_op_impl/tbe/assign_add.py @@ -25,7 +25,7 @@ assign_add_op_info = TBERegOp("AssignAdd") \ .partial_flag(True) \ .input(0, "ref", False, "required", "all") \ .input(1, "value", False, "required", "all") \ - .output(0, "output_ref", False, "required", "all") \ + .output(0, "ref", False, "required", "all") \ .dtype_format(DataType.I8_Default, DataType.I8_Default, DataType.I8_Default) \ .dtype_format(DataType.I8_5HD, DataType.I8_5HD, DataType.I8_5HD) \ .dtype_format(DataType.U8_Default, DataType.U8_Default, DataType.U8_Default) \ diff --git a/mindspore/ops/operations/debug_ops.py b/mindspore/ops/operations/debug_ops.py index 1d8fdedc260404d653d4ebbe3140dade5b16176d..97fa883bac2ced5285dce665b819c98caf17e210 100644 --- a/mindspore/ops/operations/debug_ops.py +++ b/mindspore/ops/operations/debug_ops.py @@ -210,6 +210,10 @@ class Print(PrimitiveWithInfer): def __init__(self): pass + def __call__(self, *args): + for arg in args: + print(arg) + def infer_shape(self, *inputs): return [1]