Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
4508134c
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
4508134c
编写于
5月 28, 2020
作者:
L
leopz
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add tensor_minnie and separate py from ir
上级
14f9a6e3
变更
80
展开全部
隐藏空白更改
内联
并排
Showing
80 changed file
with
1536 addition
and
1248 deletion
+1536
-1248
mindspore/ccsrc/debug/anf_ir_utils.cc
mindspore/ccsrc/debug/anf_ir_utils.cc
+2
-1
mindspore/ccsrc/debug/draw.cc
mindspore/ccsrc/debug/draw.cc
+10
-6
mindspore/ccsrc/debug/trace.cc
mindspore/ccsrc/debug/trace.cc
+2
-2
mindspore/ccsrc/device/ascend/ascend_device_address.cc
mindspore/ccsrc/device/ascend/ascend_device_address.cc
+2
-2
mindspore/ccsrc/device/convert_tensor_utils.h
mindspore/ccsrc/device/convert_tensor_utils.h
+2
-2
mindspore/ccsrc/device/kernel_adjust.h
mindspore/ccsrc/device/kernel_adjust.h
+1
-1
mindspore/ccsrc/device/kernel_runtime.h
mindspore/ccsrc/device/kernel_runtime.h
+2
-2
mindspore/ccsrc/ir/anf.cc
mindspore/ccsrc/ir/anf.cc
+2
-2
mindspore/ccsrc/ir/anf_extends.cc
mindspore/ccsrc/ir/anf_extends.cc
+1
-0
mindspore/ccsrc/ir/func_graph.cc
mindspore/ccsrc/ir/func_graph.cc
+4
-394
mindspore/ccsrc/ir/func_graph.h
mindspore/ccsrc/ir/func_graph.h
+13
-3
mindspore/ccsrc/ir/func_graph_extends.cc
mindspore/ccsrc/ir/func_graph_extends.cc
+422
-0
mindspore/ccsrc/ir/manager.cc
mindspore/ccsrc/ir/manager.cc
+5
-2
mindspore/ccsrc/ir/manager.h
mindspore/ccsrc/ir/manager.h
+2
-1
mindspore/ccsrc/ir/meta_tensor.cc
mindspore/ccsrc/ir/meta_tensor.cc
+3
-484
mindspore/ccsrc/ir/meta_tensor.h
mindspore/ccsrc/ir/meta_tensor.h
+2
-239
mindspore/ccsrc/ir/meta_tensor_extends.cc
mindspore/ccsrc/ir/meta_tensor_extends.cc
+41
-0
mindspore/ccsrc/ir/tensor.cc
mindspore/ccsrc/ir/tensor.cc
+494
-0
mindspore/ccsrc/ir/tensor.h
mindspore/ccsrc/ir/tensor.h
+278
-0
mindspore/ccsrc/kernel/kernel.h
mindspore/ccsrc/kernel/kernel.h
+2
-2
mindspore/ccsrc/minnie/param_value_minnie.h
mindspore/ccsrc/minnie/param_value_minnie.h
+4
-3
mindspore/ccsrc/minnie/tensor_minnie.cc
mindspore/ccsrc/minnie/tensor_minnie.cc
+34
-0
mindspore/ccsrc/minnie/tensor_minnie.h
mindspore/ccsrc/minnie/tensor_minnie.h
+77
-0
mindspore/ccsrc/operator/ops.cc
mindspore/ccsrc/operator/ops.cc
+1
-13
mindspore/ccsrc/operator/ops.h
mindspore/ccsrc/operator/ops.h
+2
-2
mindspore/ccsrc/operator/ops_extends.cc
mindspore/ccsrc/operator/ops_extends.cc
+36
-0
mindspore/ccsrc/parallel/context.h
mindspore/ccsrc/parallel/context.h
+2
-1
mindspore/ccsrc/parallel/graph_util/get_parallel_info.cc
mindspore/ccsrc/parallel/graph_util/get_parallel_info.cc
+2
-1
mindspore/ccsrc/parallel/ops_info/gather_v2_info.cc
mindspore/ccsrc/parallel/ops_info/gather_v2_info.cc
+1
-1
mindspore/ccsrc/parallel/ops_info/operator_info.cc
mindspore/ccsrc/parallel/ops_info/operator_info.cc
+2
-2
mindspore/ccsrc/parallel/ops_info/reduce_method_info.h
mindspore/ccsrc/parallel/ops_info/reduce_method_info.h
+2
-2
mindspore/ccsrc/parallel/step_auto_parallel.cc
mindspore/ccsrc/parallel/step_auto_parallel.cc
+2
-2
mindspore/ccsrc/parallel/step_parallel.cc
mindspore/ccsrc/parallel/step_parallel.cc
+2
-2
mindspore/ccsrc/pipeline/parse/resolve.h
mindspore/ccsrc/pipeline/parse/resolve.h
+2
-1
mindspore/ccsrc/pipeline/pipeline.h
mindspore/ccsrc/pipeline/pipeline.h
+2
-2
mindspore/ccsrc/pipeline/pipeline_ge.cc
mindspore/ccsrc/pipeline/pipeline_ge.cc
+1
-1
mindspore/ccsrc/pipeline/remove_value_node_dup.cc
mindspore/ccsrc/pipeline/remove_value_node_dup.cc
+2
-2
mindspore/ccsrc/pipeline/static_analysis/abstract_value.h
mindspore/ccsrc/pipeline/static_analysis/abstract_value.h
+2
-2
mindspore/ccsrc/pipeline/static_analysis/prim.cc
mindspore/ccsrc/pipeline/static_analysis/prim.cc
+2
-2
mindspore/ccsrc/pipeline/static_analysis/static_analysis.cc
mindspore/ccsrc/pipeline/static_analysis/static_analysis.cc
+2
-2
mindspore/ccsrc/pre_activate/common/pass_manager.cc
mindspore/ccsrc/pre_activate/common/pass_manager.cc
+2
-1
mindspore/ccsrc/predict/converter/attr_utils/convert_util.h
mindspore/ccsrc/predict/converter/attr_utils/convert_util.h
+2
-2
mindspore/ccsrc/predict/converter/executor_tensor.h
mindspore/ccsrc/predict/converter/executor_tensor.h
+2
-2
mindspore/ccsrc/session/ascend_session.cc
mindspore/ccsrc/session/ascend_session.cc
+2
-2
mindspore/ccsrc/session/cpu_session.cc
mindspore/ccsrc/session/cpu_session.cc
+2
-2
mindspore/ccsrc/session/session_basic.h
mindspore/ccsrc/session/session_basic.h
+2
-2
mindspore/ccsrc/session/session_context.h
mindspore/ccsrc/session/session_context.h
+2
-2
mindspore/ccsrc/transform/convert.h
mindspore/ccsrc/transform/convert.h
+2
-2
mindspore/ccsrc/transform/graph_runner.h
mindspore/ccsrc/transform/graph_runner.h
+2
-2
mindspore/ccsrc/transform/types.h
mindspore/ccsrc/transform/types.h
+2
-2
mindspore/ccsrc/transform/util.h
mindspore/ccsrc/transform/util.h
+2
-2
mindspore/ccsrc/utils/callbacks.h
mindspore/ccsrc/utils/callbacks.h
+2
-2
mindspore/ccsrc/utils/callbacks_ge.h
mindspore/ccsrc/utils/callbacks_ge.h
+1
-1
mindspore/ccsrc/utils/context/ms_context.cc
mindspore/ccsrc/utils/context/ms_context.cc
+1
-1
mindspore/ccsrc/utils/convert_utils.cc
mindspore/ccsrc/utils/convert_utils.cc
+2
-2
mindspore/ccsrc/utils/graph_utils.h
mindspore/ccsrc/utils/graph_utils.h
+2
-2
mindspore/ccsrc/utils/symbolic.h
mindspore/ccsrc/utils/symbolic.h
+2
-1
mindspore/ccsrc/utils/tensorprint_utils.cc
mindspore/ccsrc/utils/tensorprint_utils.cc
+1
-1
mindspore/ccsrc/vm/vmimpl.cc
mindspore/ccsrc/vm/vmimpl.cc
+3
-4
mindspore/ccsrc/vm/vmimpl.h
mindspore/ccsrc/vm/vmimpl.h
+2
-2
tests/ut/cpp/ir/meta_tensor_test.cc
tests/ut/cpp/ir/meta_tensor_test.cc
+1
-1
tests/ut/cpp/operator/ops_test.cc
tests/ut/cpp/operator/ops_test.cc
+1
-0
tests/ut/cpp/pipeline/static_analysis/prim_test.cc
tests/ut/cpp/pipeline/static_analysis/prim_test.cc
+1
-1
tests/ut/cpp/pipeline/static_analysis/static_analysis_test.cc
...s/ut/cpp/pipeline/static_analysis/static_analysis_test.cc
+1
-1
tests/ut/cpp/pre_activate/ascend/enhancer/add_memcpy_async_test.cc
...cpp/pre_activate/ascend/enhancer/add_memcpy_async_test.cc
+1
-1
tests/ut/cpp/pre_activate/ascend/enhancer/getnext_memcpy_elimination.cc
...re_activate/ascend/enhancer/getnext_memcpy_elimination.cc
+1
-1
tests/ut/cpp/pre_activate/ascend/format_type/check_consistency_test.cc
...pre_activate/ascend/format_type/check_consistency_test.cc
+2
-2
tests/ut/cpp/pre_activate/ascend/format_type/insert_cast_test.cc
...t/cpp/pre_activate/ascend/format_type/insert_cast_test.cc
+2
-2
tests/ut/cpp/pre_activate/ascend/format_type/merge_cast_to_op_test.cc
.../pre_activate/ascend/format_type/merge_cast_to_op_test.cc
+2
-2
tests/ut/cpp/pre_activate/ascend/ir_fission/bn_grad_split_test.cc
.../cpp/pre_activate/ascend/ir_fission/bn_grad_split_test.cc
+2
-2
tests/ut/cpp/pre_activate/ascend/ir_fission/bn_split_test.cc
tests/ut/cpp/pre_activate/ascend/ir_fission/bn_split_test.cc
+2
-2
tests/ut/cpp/pre_activate/ascend/ir_fission/layer_norm_grad_split_test.cc
..._activate/ascend/ir_fission/layer_norm_grad_split_test.cc
+1
-1
tests/ut/cpp/pre_activate/pass/allreduce_fusion_test.cc
tests/ut/cpp/pre_activate/pass/allreduce_fusion_test.cc
+2
-2
tests/ut/cpp/pre_activate/pass/common_subexpression_elimination_test.cc
...re_activate/pass/common_subexpression_elimination_test.cc
+2
-2
tests/ut/cpp/pre_activate/pass/convert_const_input_to_tensor_input_test.cc
...activate/pass/convert_const_input_to_tensor_input_test.cc
+1
-1
tests/ut/cpp/pre_activate/pass/convert_tuple_input_to_dynamic_input_test.cc
...ctivate/pass/convert_tuple_input_to_dynamic_input_test.cc
+1
-1
tests/ut/cpp/pre_activate/pass/convert_tuple_output_to_maketuple_test.cc
...e_activate/pass/convert_tuple_output_to_maketuple_test.cc
+1
-1
tests/ut/cpp/pre_activate/pass/eliminate_redundant_op_test.cc
...s/ut/cpp/pre_activate/pass/eliminate_redundant_op_test.cc
+2
-2
tests/ut/cpp/transform/transform_base_test.h
tests/ut/cpp/transform/transform_base_test.h
+1
-1
tests/ut/cpp/vm/segment_runner_test.cc
tests/ut/cpp/vm/segment_runner_test.cc
+1
-1
未找到文件。
mindspore/ccsrc/debug/anf_ir_utils.cc
浏览文件 @
4508134c
/**
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019
-2020
Huawei Technologies Co., Ltd
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may not use this file except in compliance with the License.
...
@@ -34,6 +34,7 @@
...
@@ -34,6 +34,7 @@
#include "utils/ordered_set.h"
#include "utils/ordered_set.h"
#include "utils/utils.h"
#include "utils/utils.h"
#include "debug/trace.h"
#include "debug/trace.h"
#include "debug/label.h"
#include "utils/context/ms_context.h"
#include "utils/context/ms_context.h"
#include "operator/ops.h"
#include "operator/ops.h"
...
...
mindspore/ccsrc/debug/draw.cc
浏览文件 @
4508134c
/**
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019
-2020
Huawei Technologies Co., Ltd
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may not use this file except in compliance with the License.
...
@@ -16,7 +16,9 @@
...
@@ -16,7 +16,9 @@
#include "debug/draw.h"
#include "debug/draw.h"
#include <algorithm>
#include <iostream>
#include <iostream>
#include <iterator>
#include <map>
#include <map>
#include <vector>
#include <vector>
#include <string>
#include <string>
...
@@ -28,7 +30,7 @@
...
@@ -28,7 +30,7 @@
#include "utils/graph_utils.h"
#include "utils/graph_utils.h"
#include "utils/utils.h"
#include "utils/utils.h"
#include "operator/composite/composite.h"
#include "operator/composite/composite.h"
#include "ir/
meta_
tensor.h"
#include "ir/tensor.h"
namespace
py
=
pybind11
;
namespace
py
=
pybind11
;
...
@@ -323,15 +325,17 @@ void BaseDigraph::FuncGraphParameters(const FuncGraphPtr &key) {
...
@@ -323,15 +325,17 @@ void BaseDigraph::FuncGraphParameters(const FuncGraphPtr &key) {
auto
py_p
=
param_value
->
value
();
auto
py_p
=
param_value
->
value
();
if
(
py
::
hasattr
(
py_p
,
"default_input"
))
{
if
(
py
::
hasattr
(
py_p
,
"default_input"
))
{
py_p
=
py_p
.
attr
(
"default_input"
);
py_p
=
py_p
.
attr
(
"default_input"
);
std
::
vector
<
int
>
shape
;
if
(
py
::
hasattr
(
py_p
,
PYTHON_TENSOR_FLAG
))
{
if
(
py
::
hasattr
(
py_p
,
PYTHON_TENSOR_FLAG
))
{
auto
m_tensor
=
py_p
.
cast
<
std
::
shared_ptr
<
tensor
::
Tensor
>>
();
auto
m_tensor
=
py_p
.
cast
<
std
::
shared_ptr
<
tensor
::
Tensor
>>
();
py
::
tuple
shape
=
m_tensor
->
GetPyTupleShape
();
shape
=
m_tensor
->
shape
();
buffer_
<<
"["
<<
py
::
str
(
shape
)
<<
"]"
;
}
else
if
(
py
::
hasattr
(
py_p
,
PYTHON_META_TENSOR_FLAG
))
{
}
else
if
(
py
::
hasattr
(
py_p
,
PYTHON_META_TENSOR_FLAG
))
{
auto
m_tensor
=
py_p
.
cast
<
std
::
shared_ptr
<
tensor
::
MetaTensor
>>
();
auto
m_tensor
=
py_p
.
cast
<
std
::
shared_ptr
<
tensor
::
MetaTensor
>>
();
py
::
tuple
shape
=
m_tensor
->
GetPyTupleShape
();
shape
=
m_tensor
->
shape
();
buffer_
<<
"["
<<
py
::
str
(
shape
)
<<
"]"
;
}
}
std
::
ostringstream
shape_str
;
std
::
copy
(
shape
.
begin
(),
shape
.
end
(),
std
::
ostream_iterator
<
int
>
(
shape_str
,
","
));
buffer_
<<
"["
<<
shape_str
.
str
()
<<
"]"
;
}
}
}
}
buffer_
<<
"</td></tr>"
;
buffer_
<<
"</td></tr>"
;
...
...
mindspore/ccsrc/debug/trace.cc
浏览文件 @
4508134c
/**
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 20
19-20
20 Huawei Technologies Co., Ltd
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may not use this file except in compliance with the License.
...
@@ -30,7 +30,7 @@
...
@@ -30,7 +30,7 @@
#include "ir/meta_func_graph.h"
#include "ir/meta_func_graph.h"
#include "utils/graph_utils.h"
#include "utils/graph_utils.h"
#include "operator/composite/composite.h"
#include "operator/composite/composite.h"
#include "ir/
meta_
tensor.h"
#include "ir/tensor.h"
#include "debug/anf_ir_utils.h"
#include "debug/anf_ir_utils.h"
#include "pipeline/static_analysis/evaluator.h"
#include "pipeline/static_analysis/evaluator.h"
...
...
mindspore/ccsrc/device/ascend/ascend_device_address.cc
浏览文件 @
4508134c
/**
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019
-2020
Huawei Technologies Co., Ltd
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may not use this file except in compliance with the License.
...
@@ -22,7 +22,7 @@
...
@@ -22,7 +22,7 @@
#include "device/kernel_runtime_manager.h"
#include "device/kernel_runtime_manager.h"
#include "device/convert_tensor_utils.h"
#include "device/convert_tensor_utils.h"
#include "ir/dtype/type.h"
#include "ir/dtype/type.h"
#include "ir/
meta_
tensor.h"
#include "ir/tensor.h"
#include "kernel/common_utils.h"
#include "kernel/common_utils.h"
#include "utils/utils.h"
#include "utils/utils.h"
#include "common/utils.h"
#include "common/utils.h"
...
...
mindspore/ccsrc/device/convert_tensor_utils.h
浏览文件 @
4508134c
/**
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019
-2020
Huawei Technologies Co., Ltd
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may not use this file except in compliance with the License.
...
@@ -19,7 +19,7 @@
...
@@ -19,7 +19,7 @@
#include <iostream>
#include <iostream>
#include <vector>
#include <vector>
#include "ir/
meta_
tensor.h"
#include "ir/tensor.h"
namespace
mindspore
{
namespace
mindspore
{
namespace
device
{
namespace
device
{
...
...
mindspore/ccsrc/device/kernel_adjust.h
浏览文件 @
4508134c
...
@@ -26,7 +26,7 @@
...
@@ -26,7 +26,7 @@
#include "session/kernel_graph.h"
#include "session/kernel_graph.h"
#include "kernel/kernel_build_info.h"
#include "kernel/kernel_build_info.h"
#include "session/session_context.h"
#include "session/session_context.h"
#include "ir/
meta_
tensor.h"
#include "ir/tensor.h"
#include "device/ascend/profiling/profiling_utils.h"
#include "device/ascend/profiling/profiling_utils.h"
#include "device/kernel_info.h"
#include "device/kernel_info.h"
...
...
mindspore/ccsrc/device/kernel_runtime.h
浏览文件 @
4508134c
/**
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019
-2020
Huawei Technologies Co., Ltd
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may not use this file except in compliance with the License.
...
@@ -22,7 +22,7 @@
...
@@ -22,7 +22,7 @@
#include <map>
#include <map>
#include "device/device_address.h"
#include "device/device_address.h"
#include "ir/
meta_
tensor.h"
#include "ir/tensor.h"
#include "predict/generator/utils/ir_model_util.h"
#include "predict/generator/utils/ir_model_util.h"
#ifdef ENABLE_DUMP_E2E
#ifdef ENABLE_DUMP_E2E
#include "debug/e2e_dump.h"
#include "debug/e2e_dump.h"
...
...
mindspore/ccsrc/ir/anf.cc
浏览文件 @
4508134c
/**
/**
* This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
* This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
*
*
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019
-2020
Huawei Technologies Co., Ltd
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may not use this file except in compliance with the License.
...
@@ -24,7 +24,7 @@
...
@@ -24,7 +24,7 @@
#include <unordered_map>
#include <unordered_map>
#include "ir/func_graph.h"
#include "ir/func_graph.h"
#include "ir/primitive.h"
#include "ir/primitive
_base
.h"
namespace
mindspore
{
namespace
mindspore
{
// namespace to support intermediate representation definition
// namespace to support intermediate representation definition
...
...
mindspore/ccsrc/ir/anf_extends.cc
浏览文件 @
4508134c
...
@@ -25,6 +25,7 @@
...
@@ -25,6 +25,7 @@
#include "pipeline/static_analysis/static_analysis.h"
#include "pipeline/static_analysis/static_analysis.h"
#include "operator/ops.h"
#include "operator/ops.h"
#include "parallel/ops_info/ops_utils.h"
#include "parallel/ops_info/ops_utils.h"
#include "debug/label.h"
namespace
mindspore
{
namespace
mindspore
{
// namespace to support intermediate representation definition
// namespace to support intermediate representation definition
...
...
mindspore/ccsrc/ir/func_graph.cc
浏览文件 @
4508134c
/**
/**
* This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
* This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
*
*
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019
-2020
Huawei Technologies Co., Ltd
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may not use this file except in compliance with the License.
...
@@ -22,24 +22,14 @@
...
@@ -22,24 +22,14 @@
#include <sstream>
#include <sstream>
#include <utility>
#include <utility>
#include "debug/trace.h"
#include "ir/manager.h"
#include "ir/manager.h"
#include "ir/func_graph_cloner.h"
#include "operator/ops.h"
#include "operator/ops.h"
#include "pybind_api/export_flags.h"
#include "utils/ordered_set.h"
#include "utils/ordered_set.h"
#include "pipeline/static_analysis/static_analysis.h"
#include "utils/convert_utils.h"
#include "pipeline/static_analysis/abstract_function.h"
#include "debug/anf_ir_dump.h"
#include "debug/trace.h"
#include "debug/draw.h"
#include "debug/label.h"
namespace
mindspore
{
namespace
mindspore
{
using
mindspore
::
abstract
::
AbstractFunction
;
using
mindspore
::
abstract
::
AbstractFunctionPtr
;
using
mindspore
::
abstract
::
AnalysisContextPtr
;
using
mindspore
::
abstract
::
PrimitiveAbstractClosure
;
using
mindspore
::
abstract
::
VirtualAbstractClosure
;
/*
/*
* Methods of Graph
* Methods of Graph
*/
*/
...
@@ -59,34 +49,6 @@ FuncGraph::FuncGraph()
...
@@ -59,34 +49,6 @@ FuncGraph::FuncGraph()
debug_info_
=
std
::
make_shared
<
GraphDebugInfo
>
();
debug_info_
=
std
::
make_shared
<
GraphDebugInfo
>
();
}
}
AbstractFunctionPtr
FuncGraph
::
abstract
()
{
AbstractBasePtrList
args_spec_list
;
for
(
auto
&
p
:
parameters_
)
{
MS_EXCEPTION_IF_NULL
(
p
);
if
(
p
->
abstract
()
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"Error!!"
;
return
nullptr
;
}
args_spec_list
.
push_back
(
p
->
abstract
());
}
if
(
nullptr
==
output
())
{
MS_LOG
(
ERROR
)
<<
"Error func graph no output"
;
return
nullptr
;
}
return
std
::
make_shared
<
VirtualAbstractClosure
>
(
args_spec_list
,
output
()
->
abstract
());
}
abstract
::
AbstractBasePtr
FuncGraph
::
MakeAbstractClosure
(
const
abstract
::
AnalysisContextPtr
&
context
)
{
AnalysisContextPtr
temp_context
=
context
;
if
(
temp_context
==
nullptr
)
{
temp_context
=
abstract
::
AnalysisContext
::
DummyContext
();
}
return
std
::
make_shared
<
abstract
::
FuncGraphAbstractClosure
>
(
shared_from_base
<
FuncGraph
>
(),
temp_context
);
}
AnfNodePtr
FuncGraph
::
output
()
const
{
AnfNodePtr
FuncGraph
::
output
()
const
{
// If return value is set, return should have two inputs.
// If return value is set, return should have two inputs.
if
(
return_
!=
nullptr
&&
return_
->
inputs
().
size
()
==
2
)
{
if
(
return_
!=
nullptr
&&
return_
->
inputs
().
size
()
==
2
)
{
...
@@ -97,28 +59,6 @@ AnfNodePtr FuncGraph::output() const {
...
@@ -97,28 +59,6 @@ AnfNodePtr FuncGraph::output() const {
}
}
}
}
void
FuncGraph
::
set_output
(
const
AnfNodePtr
&
value
,
bool
force_new_ret
)
{
if
(
force_new_ret
||
return_
==
nullptr
)
{
std
::
vector
<
AnfNodePtr
>
params
({
NewValueNode
(
prim
::
kPrimReturn
),
value
});
FuncGraphPtr
this_graph
=
shared_from_base
<
FuncGraph
>
();
return_
=
this_graph
->
NewCNode
(
params
);
}
else
{
if
(
manager_
.
lock
())
{
manager_
.
lock
()
->
SetEdge
(
return_
,
1
,
value
);
}
else
{
return_
->
set_input
(
1
,
value
);
}
}
return_
->
set_abstract
(
value
->
abstract
());
AnfNodePtr
input0
=
return_
->
input
(
0
);
PrimitivePtr
return_prim
=
prim
::
kPrimReturn
;
auto
f
=
std
::
make_shared
<
PrimitiveAbstractClosure
>
(
return_prim
,
input0
);
input0
->
set_abstract
(
f
);
}
ParameterPtr
FuncGraph
::
add_parameter
()
{
ParameterPtr
FuncGraph
::
add_parameter
()
{
FuncGraphPtr
this_func_graph
=
shared_from_base
<
FuncGraph
>
();
FuncGraphPtr
this_func_graph
=
shared_from_base
<
FuncGraph
>
();
ParameterPtr
p
=
std
::
make_shared
<
Parameter
>
(
this_func_graph
);
ParameterPtr
p
=
std
::
make_shared
<
Parameter
>
(
this_func_graph
);
...
@@ -469,8 +409,6 @@ std::shared_ptr<std::list<FuncGraphPtr>> FuncGraph::recursive_graphs() {
...
@@ -469,8 +409,6 @@ std::shared_ptr<std::list<FuncGraphPtr>> FuncGraph::recursive_graphs() {
return
mng
->
recursive_graphs
(
shared_from_base
<
FuncGraph
>
());
return
mng
->
recursive_graphs
(
shared_from_base
<
FuncGraph
>
());
}
}
void
FuncGraph
::
DumpFuncGraph
(
const
std
::
string
&
path
)
{
draw
::
Draw
(
path
+
".dot"
,
shared_from_base
<
FuncGraph
>
());
}
AnfNodePtr
FuncGraph
::
GetDefaultValueByName
(
const
std
::
string
&
name
)
{
AnfNodePtr
FuncGraph
::
GetDefaultValueByName
(
const
std
::
string
&
name
)
{
auto
itr
=
this
->
parameter_default_value_
.
find
(
name
);
auto
itr
=
this
->
parameter_default_value_
.
find
(
name
);
if
(
itr
==
parameter_default_value_
.
end
())
{
if
(
itr
==
parameter_default_value_
.
end
())
{
...
@@ -594,207 +532,6 @@ AnfNodePtr FuncGraph::GetParameterByName(const std::string &name) {
...
@@ -594,207 +532,6 @@ AnfNodePtr FuncGraph::GetParameterByName(const std::string &name) {
return
nullptr
;
return
nullptr
;
}
}
void
FuncGraph
::
GenerateVarParams
(
const
FuncGraphPtr
&
specialized_graph
,
std
::
vector
<
AnfNodePtr
>
*
specialized_parameter_list
,
std
::
unordered_map
<
AnfNodePtr
,
AnfNodePtr
>
*
repl_nodes
,
int
variable_args_count
,
int
pos_args_input_count
)
{
// if there is variable argument, pass the input arguments that does not match positional args to it as a tuple
if
(
specialized_graph
->
has_vararg
())
{
TraceManager
::
DebugTrace
(
std
::
make_shared
<
TraceGenerateVarArg
>
(
specialized_graph
->
GetVariableArgParameter
()
->
debug_info
()));
std
::
vector
<
AnfNodePtr
>
var_param_tuple_nodes
;
var_param_tuple_nodes
.
push_back
(
NewValueNode
(
prim
::
kPrimMakeTuple
));
if
(
variable_args_count
<
0
)
{
MS_LOG
(
EXCEPTION
)
<<
"Function:"
<<
this
->
ToString
()
<<
", variable_args_count "
<<
variable_args_count
<<
" were given."
;
}
// for python variable argument input , there is no upper limit
for
(
int
i
=
0
;
i
<
variable_args_count
;
++
i
)
{
ParameterPtr
p
=
std
::
make_shared
<
Parameter
>
(
specialized_graph
);
std
::
string
param_name
=
specialized_graph
->
GetVariableArgName
()
+
std
::
to_string
(
i
);
p
->
set_name
(
param_name
);
MS_EXCEPTION_IF_NULL
(
p
->
debug_info
());
p
->
debug_info
()
->
set_name
(
param_name
);
var_param_tuple_nodes
.
push_back
(
p
);
MS_EXCEPTION_IF_NULL
(
specialized_parameter_list
);
specialized_parameter_list
->
push_back
(
p
);
}
auto
var_tuple_param
=
specialized_graph
->
NewCNode
(
var_param_tuple_nodes
);
(
void
)
repl_nodes
->
emplace
(
specialized_graph
->
GetVariableArgParameter
(),
var_tuple_param
);
TraceManager
::
EndTrace
();
}
else
if
(
variable_args_count
>
0
)
{
MS_LOG
(
EXCEPTION
)
<<
"Function:"
<<
this
->
ToString
()
<<
" takes "
<<
this
->
GetPositionalArgsCount
()
<<
" positional arguments, but "
<<
pos_args_input_count
<<
" were given."
;
}
}
void
FuncGraph
::
GenerateKwParams
(
const
FuncGraphPtr
&
specialized_graph
,
std
::
vector
<
AnfNodePtr
>
*
specialized_parameter_list
,
const
std
::
vector
<
abstract
::
AbstractKeywordArgPtr
>
&
kwarg_list
,
std
::
unordered_map
<
AnfNodePtr
,
AnfNodePtr
>
*
repl_nodes
)
{
std
::
vector
<
AnfNodePtr
>
kwarg_keys_tuple_nodes
=
{
NewValueNode
(
prim
::
kPrimMakeTuple
)};
std
::
vector
<
AnfNodePtr
>
kwarg_values_tuple_nodes
=
{
NewValueNode
(
prim
::
kPrimMakeTuple
)};
for
(
const
auto
&
kwarg
:
kwarg_list
)
{
MS_EXCEPTION_IF_NULL
(
kwarg
);
std
::
string
kw_param_name
=
kwarg
->
get_key
();
MS_EXCEPTION_IF_NULL
(
specialized_graph
);
AnfNodePtr
param_node
=
specialized_graph
->
GetParameterByName
(
kw_param_name
);
// if not find correspoding parameter node
if
(
param_node
==
nullptr
)
{
if
(
!
has_kwarg
())
{
MS_LOG
(
EXCEPTION
)
<<
"Got unexpected keyword argument: "
<<
kw_param_name
;
}
else
{
ParameterPtr
p
=
std
::
make_shared
<
Parameter
>
(
specialized_graph
);
std
::
string
param_name
=
specialized_graph
->
GetVariableKwargName
()
+
"["
+
kw_param_name
+
"]"
;
MS_EXCEPTION_IF_NULL
(
specialized_parameter_list
);
auto
find_kw_arg_in_list
=
std
::
any_of
(
specialized_parameter_list
->
begin
(),
specialized_parameter_list
->
end
(),
[
param_name
](
const
AnfNodePtr
&
node
)
{
MS_EXCEPTION_IF_NULL
(
node
);
auto
param
=
node
->
cast
<
ParameterPtr
>
();
return
param
!=
nullptr
&&
param
->
name
()
==
param_name
;
});
if
(
find_kw_arg_in_list
)
{
MS_LOG
(
EXCEPTION
)
<<
"Multiply values for keyword argument:"
<<
kw_param_name
;
}
p
->
set_name
(
param_name
);
p
->
debug_info
()
->
set_name
(
param_name
);
kwarg_keys_tuple_nodes
.
push_back
(
NewValueNode
(
kw_param_name
));
auto
extract_node
=
specialized_graph
->
NewCNode
({
NewValueNode
(
prim
::
kPrimExtractKeywordArg
),
NewValueNode
(
kw_param_name
),
p
});
kwarg_values_tuple_nodes
.
push_back
(
extract_node
);
specialized_parameter_list
->
push_back
(
p
);
}
}
else
{
auto
node_itr
=
std
::
find
(
specialized_parameter_list
->
begin
(),
specialized_parameter_list
->
end
(),
param_node
);
// multiply values found given for parameter
if
(
node_itr
!=
specialized_parameter_list
->
end
())
{
MS_LOG
(
EXCEPTION
)
<<
"Multiply values for specific argument:"
<<
kw_param_name
;
}
else
{
specialized_parameter_list
->
push_back
(
param_node
);
auto
extract_node
=
specialized_graph
->
NewCNode
(
{
NewValueNode
(
prim
::
kPrimExtractKeywordArg
),
NewValueNode
(
kw_param_name
),
param_node
});
(
void
)
repl_nodes
->
emplace
(
param_node
,
extract_node
);
}
}
}
GenerateKwargReplNode
(
specialized_graph
,
repl_nodes
,
kwarg_keys_tuple_nodes
,
kwarg_values_tuple_nodes
);
}
void
FuncGraph
::
GenerateKwargReplNode
(
const
FuncGraphPtr
&
specialized_graph
,
std
::
unordered_map
<
AnfNodePtr
,
AnfNodePtr
>
*
repl_nodes
,
const
std
::
vector
<
AnfNodePtr
>
&
kwarg_keys_tuple_nodes
,
const
std
::
vector
<
AnfNodePtr
>
&
kwarg_values_tuple_nodes
)
{
if
(
has_kwarg
())
{
MS_EXCEPTION_IF_NULL
(
specialized_graph
);
TraceManager
::
DebugTrace
(
std
::
make_shared
<
TraceGenerateKwArg
>
(
specialized_graph
->
GetVariableKwargParameter
()
->
debug_info
()));
auto
make_tuple_keys
=
specialized_graph
->
NewCNode
(
kwarg_keys_tuple_nodes
);
auto
make_tuple_values
=
specialized_graph
->
NewCNode
(
kwarg_values_tuple_nodes
);
auto
make_dict_node
=
specialized_graph
->
NewCNode
({
NewValueNode
(
prim
::
kPrimMakeDict
),
make_tuple_keys
,
make_tuple_values
});
MS_EXCEPTION_IF_NULL
(
repl_nodes
);
(
void
)
repl_nodes
->
emplace
(
specialized_graph
->
GetVariableKwargParameter
(),
make_dict_node
);
TraceManager
::
EndTrace
();
}
}
bool
FuncGraph
::
NeedGenerate
(
const
std
::
vector
<
abstract
::
AbstractKeywordArgPtr
>
&
kwarg_list
)
{
// if the function does not have any vararg/kwarg/kwonly/default value/kw args input
// return the original graph
if
(
!
has_vararg
()
&&
kwonlyargs_count
()
==
0
&&
!
has_kwarg
()
&&
GetDefaultValueCount
()
==
0
&&
kwarg_list
.
empty
())
{
return
false
;
}
// if the graph is generated for specific input, do not need to generate again
if
(
is_generated
())
{
return
false
;
}
return
true
;
}
void
FuncGraph
::
GenerateDefaultValue
(
const
FuncGraphPtr
&
specialized_graph
,
const
std
::
vector
<
AnfNodePtr
>
&
specialized_parameter_list
,
std
::
unordered_map
<
AnfNodePtr
,
AnfNodePtr
>
*
repl_nodes
)
{
MS_EXCEPTION_IF_NULL
(
specialized_graph
);
for
(
size_t
i
=
0
;
i
<
specialized_graph
->
parameters
().
size
()
-
hyper_param_count
();
++
i
)
{
auto
param_node
=
specialized_graph
->
parameters
()[
i
];
MS_EXCEPTION_IF_NULL
(
param_node
);
auto
param_name
=
param_node
->
cast
<
ParameterPtr
>
()
->
name
();
auto
node_itr
=
std
::
find
(
specialized_parameter_list
.
begin
(),
specialized_parameter_list
.
end
(),
param_node
);
if
(
node_itr
!=
specialized_parameter_list
.
end
())
{
continue
;
}
if
(
param_name
==
specialized_graph
->
GetVariableArgName
()
||
param_name
==
specialized_graph
->
GetVariableKwargName
())
{
continue
;
}
auto
default_value
=
specialized_graph
->
GetDefaultValueByName
(
param_name
);
if
(
default_value
==
nullptr
)
{
MS_LOG
(
EXCEPTION
)
<<
"Miss argument input for parameter:"
<<
param_name
;
}
MS_EXCEPTION_IF_NULL
(
repl_nodes
);
(
void
)
repl_nodes
->
emplace
(
param_node
,
default_value
);
}
}
FuncGraphPtr
FuncGraph
::
GenerateGraph
(
const
AbstractBasePtrList
&
args_spec_list
)
{
std
::
vector
<
abstract
::
AbstractKeywordArgPtr
>
kwarg_list
;
size_t
arguments_count
=
args_spec_list
.
size
();
for
(
const
auto
&
arg
:
args_spec_list
)
{
// if it is a keyword argument
MS_EXCEPTION_IF_NULL
(
arg
);
if
(
arg
->
isa
<
abstract
::
AbstractKeywordArg
>
())
{
kwarg_list
.
push_back
(
dyn_cast
<
abstract
::
AbstractKeywordArg
>
(
arg
));
}
}
if
(
!
NeedGenerate
(
kwarg_list
))
{
return
shared_from_base
<
FuncGraph
>
();
}
FuncGraphPtr
specialized_graph
=
BasicClone
(
shared_from_base
<
FuncGraph
>
());
size_t
kwarg_count
=
kwarg_list
.
size
();
int
pos_args_input_count
=
SizeToInt
(
arguments_count
-
kwarg_count
-
hyper_param_count
());
int
pos_args_count
=
std
::
min
(
pos_args_input_count
,
this
->
GetPositionalArgsCount
());
int
variable_args_count
=
pos_args_input_count
-
pos_args_count
;
std
::
vector
<
AnfNodePtr
>
specialized_parameter_list
;
std
::
unordered_map
<
AnfNodePtr
,
AnfNodePtr
>
repl_nodes
;
// the parameters that has arg input, copy from original parameters
for
(
size_t
i
=
0
;
i
<
IntToSize
(
pos_args_count
);
++
i
)
{
specialized_parameter_list
.
push_back
(
specialized_graph
->
parameters
()[
i
]);
}
GenerateVarParams
(
specialized_graph
,
&
specialized_parameter_list
,
&
repl_nodes
,
variable_args_count
,
pos_args_input_count
);
GenerateKwParams
(
specialized_graph
,
&
specialized_parameter_list
,
kwarg_list
,
&
repl_nodes
);
GenerateDefaultValue
(
specialized_graph
,
specialized_parameter_list
,
&
repl_nodes
);
// append hyper parameter to specialized_parameter_list
MS_EXCEPTION_IF_NULL
(
specialized_graph
);
auto
params
=
specialized_graph
->
parameters
();
(
void
)
std
::
transform
(
params
.
end
()
-
SizeToInt
(
hyper_param_count
()),
params
.
end
(),
std
::
back_inserter
(
specialized_parameter_list
),
[](
const
AnfNodePtr
&
node
)
{
return
node
;
});
std
::
shared_ptr
<
mindspore
::
FuncGraphManager
>
manager
=
mindspore
::
Manage
(
specialized_graph
,
false
);
auto
tr
=
manager
->
Transact
();
for
(
auto
&
node_pair
:
repl_nodes
)
{
MS_LOG
(
DEBUG
)
<<
"GenerateGraph replace:"
<<
node_pair
.
first
->
DebugString
()
<<
"-"
<<
node_pair
.
second
->
DebugString
();
(
void
)
tr
.
Replace
(
node_pair
.
first
,
node_pair
.
second
);
}
tr
.
SetParameters
(
specialized_graph
,
specialized_parameter_list
);
tr
.
Commit
();
specialized_graph
->
set_has_kwarg
(
false
);
specialized_graph
->
set_has_vararg
(
false
);
specialized_graph
->
set_kwonlyargs_count
(
0
);
specialized_graph
->
ClearDefaultValues
();
specialized_graph
->
set_is_generate
(
true
);
return
specialized_graph
;
}
void
FuncGraph
::
add_parameter_obj_node
(
const
AnfNodePtr
&
p
)
{
paramter_obj_nodes_
.
push_back
(
p
);
}
void
FuncGraph
::
add_parameter_obj_node
(
const
AnfNodePtr
&
p
)
{
paramter_obj_nodes_
.
push_back
(
p
);
}
std
::
list
<
CNodePtr
>
FuncGraph
::
GetOrderedCnodes
()
{
std
::
list
<
CNodePtr
>
FuncGraph
::
GetOrderedCnodes
()
{
...
@@ -873,133 +610,6 @@ void FuncGraph::CheckOrder() {
...
@@ -873,133 +610,6 @@ void FuncGraph::CheckOrder() {
}
}
}
}
const
char
kPrimHasEffect
[]
=
"_side_effect_flag"
;
bool
FuncGraph
::
HasEffect
(
const
CNodePtr
&
cnode
)
{
auto
prim
=
GetCNodePrimitive
(
cnode
);
if
(
prim
!=
nullptr
&&
prim
->
isa
<
prim
::
DoSignaturePrimitive
>
())
{
auto
do_sig
=
prim
->
cast
<
prim
::
DoSignaturePrimitivePtr
>
();
auto
prim_val
=
do_sig
->
function
();
if
(
prim_val
!=
nullptr
&&
prim_val
->
isa
<
Primitive
>
())
{
prim
=
prim_val
->
cast
<
PrimitivePtr
>
();
}
else
{
prim
=
nullptr
;
}
}
if
(
prim
!=
nullptr
)
{
auto
effect_val
=
prim
->
GetAttr
(
kPrimHasEffect
);
if
(
effect_val
&&
effect_val
->
isa
<
BoolImm
>
())
{
auto
effect_bool
=
GetValue
<
bool
>
(
effect_val
);
return
effect_bool
;
}
}
return
false
;
}
std
::
shared_ptr
<
OrderedSet
<
CNodePtr
>>
FindRoots
(
const
std
::
vector
<
CNodePtr
>
&
segment
)
{
std
::
shared_ptr
<
OrderedSet
<
CNodePtr
>>
roots
=
std
::
make_shared
<
OrderedSet
<
CNodePtr
>>
(
segment
);
for
(
const
auto
&
node
:
segment
)
{
if
(
roots
->
size
()
==
1
)
{
return
roots
;
}
auto
input_size
=
node
->
size
();
for
(
size_t
i
=
0
;
i
<
input_size
;
i
++
)
{
auto
in_node
=
node
->
input
(
i
);
auto
in_cnode
=
in_node
->
cast
<
CNodePtr
>
();
if
(
in_cnode
!=
nullptr
)
{
(
void
)
roots
->
erase
(
in_cnode
);
}
}
}
return
roots
;
}
std
::
shared_ptr
<
OrderedSet
<
CNodePtr
>>
FindLeaves
(
const
std
::
vector
<
CNodePtr
>
&
segment
)
{
std
::
shared_ptr
<
OrderedSet
<
CNodePtr
>>
nodes
=
std
::
make_shared
<
OrderedSet
<
CNodePtr
>>
(
segment
);
for
(
const
auto
&
node
:
segment
)
{
if
(
nodes
->
size
()
==
1
)
{
return
nodes
;
}
if
(
IsPrimitiveCNode
(
node
,
prim
::
kPrimSwitch
))
{
(
void
)
nodes
->
erase
(
node
);
continue
;
}
auto
input_size
=
node
->
size
();
for
(
size_t
i
=
0
;
i
<
input_size
;
i
++
)
{
auto
in_node
=
node
->
input
(
i
);
if
(
!
in_node
->
isa
<
CNode
>
())
{
continue
;
}
auto
in_cnode
=
in_node
->
cast
<
CNodePtr
>
();
if
(
in_cnode
!=
nullptr
)
{
if
(
std
::
find
(
segment
.
begin
(),
segment
.
end
(),
in_cnode
)
!=
segment
.
end
())
{
(
void
)
nodes
->
erase
(
node
);
break
;
}
}
}
}
return
nodes
;
}
void
FuncGraph
::
ReleaseFullOrderToEffectOrder
()
{
MS_LOG
(
DEBUG
)
<<
"Flag has_effect "
<<
has_flag
(
GRAPH_FLAG_HAS_EFFECT
)
<<
"."
;
if
(
has_flag
(
GRAPH_FLAG_HAS_EFFECT
))
{
std
::
list
<
AnfNodePtr
>
depends_order
;
std
::
vector
<
CNodePtr
>
segment
;
for
(
const
auto
&
cnode
:
order_
)
{
if
(
IsPrimitiveCNode
(
cnode
,
prim
::
kPrimReturn
))
{
continue
;
}
if
(
HasEffect
(
cnode
))
{
MS_LOG
(
DEBUG
)
<<
"Meet a effect node "
<<
cnode
->
DebugString
()
<<
"."
;
if
(
segment
.
size
()
>
0
)
{
auto
roots
=
FindRoots
(
segment
);
for
(
auto
iter
=
roots
->
begin
();
iter
!=
roots
->
end
();
(
void
)
iter
++
)
{
depends_order
.
push_back
(
*
iter
);
}
}
segment
.
clear
();
depends_order
.
push_back
(
cnode
);
}
else
{
MS_LOG
(
DEBUG
)
<<
"Meet a general node "
<<
cnode
->
DebugString
()
<<
"."
;
segment
.
push_back
(
cnode
);
}
}
if
(
segment
.
size
()
>
1
)
{
auto
roots
=
FindRoots
(
segment
);
for
(
auto
iter
=
roots
->
begin
();
iter
!=
roots
->
end
();
(
void
)
iter
++
)
{
depends_order
.
push_back
(
*
iter
);
}
}
std
::
vector
<
AnfNodePtr
>
depend_inputs
;
auto
old_ret
=
output
();
for
(
auto
iter
=
depends_order
.
rbegin
();
iter
!=
depends_order
.
rend
();
(
void
)
iter
++
)
{
if
(
*
iter
!=
old_ret
)
{
depend_inputs
.
push_back
(
*
iter
);
}
}
set_flags
(
GRAPH_FLAG_HAS_EFFECT
,
false
);
set_flags
(
GRAPH_FLAG_EFFECT_PATIAL_ORDER
,
true
);
if
(
!
depend_inputs
.
empty
())
{
SetEffectDepends
(
depend_inputs
);
}
}
}
void
FuncGraph
::
SetEffectDepends
(
const
std
::
vector
<
AnfNodePtr
>
&
depend_inputs
)
{
auto
old_ret
=
output
();
std
::
vector
<
AnfNodePtr
>
inputs
{
NewValueNode
(
prim
::
kPrimDepend
),
old_ret
};
(
void
)
inputs
.
insert
(
inputs
.
end
(),
depend_inputs
.
begin
(),
depend_inputs
.
end
());
auto
new_ret
=
NewCNode
(
inputs
);
auto
mng
=
manager
();
if
(
mng
)
{
(
void
)
mng
->
Replace
(
old_ret
,
new_ret
);
}
else
{
return_
->
set_input
(
1
,
new_ret
);
}
}
size_t
NewFgSeenGeneration
()
{
size_t
NewFgSeenGeneration
()
{
static
size_t
fg_seen_generation
=
0
;
static
size_t
fg_seen_generation
=
0
;
return
++
fg_seen_generation
;
return
++
fg_seen_generation
;
...
...
mindspore/ccsrc/ir/func_graph.h
浏览文件 @
4508134c
/**
/**
* This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
* This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
*
*
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019
-2020
Huawei Technologies Co., Ltd
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may not use this file except in compliance with the License.
...
@@ -30,9 +30,9 @@
...
@@ -30,9 +30,9 @@
#include "ir/anf.h"
#include "ir/anf.h"
#include "ir/manager.h"
#include "ir/manager.h"
#include "utils/any.h"
#include "utils/ordered_set.h"
#include "utils/ordered_set.h"
#include "pipeline/static_analysis/abstract_value.h"
#include "utils/ordered_map.h"
#include "utils/base_ref.h"
namespace
mindspore
{
namespace
mindspore
{
using
BaseRefCounterMap
=
OrderedMap
<
BaseRef
,
int
,
BaseRefHash
>
;
using
BaseRefCounterMap
=
OrderedMap
<
BaseRef
,
int
,
BaseRefHash
>
;
...
@@ -50,6 +50,16 @@ const char FUNC_GRAPH_FLAG_DEFER_INLINE[] = "defer_inline";
...
@@ -50,6 +50,16 @@ const char FUNC_GRAPH_FLAG_DEFER_INLINE[] = "defer_inline";
const
char
FUNC_GRAPH_FLAG_CORE
[]
=
"core"
;
const
char
FUNC_GRAPH_FLAG_CORE
[]
=
"core"
;
const
char
FUNC_GRAPH_FLAG_SPECIALIZE_PARAMETER
[]
=
"spec_param"
;
const
char
FUNC_GRAPH_FLAG_SPECIALIZE_PARAMETER
[]
=
"spec_param"
;
namespace
abstract
{
class
AbstractKeywordArg
;
using
AbstractKeywordArgPtr
=
std
::
shared_ptr
<
AbstractKeywordArg
>
;
class
AbstractFunction
;
using
AbstractFunctionPtr
=
std
::
shared_ptr
<
AbstractFunction
>
;
}
// namespace abstract
class
FuncGraphManager
;
using
FuncGraphManagerPtr
=
std
::
shared_ptr
<
FuncGraphManager
>
;
// ANF transform class
// ANF transform class
// either a primitive or a func_graph
// either a primitive or a func_graph
class
FuncGraphTransform
{
class
FuncGraphTransform
{
...
...
mindspore/ccsrc/ir/func_graph_extends.cc
0 → 100644
浏览文件 @
4508134c
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ir/func_graph.h"
#include <algorithm>
#include <sstream>
#include <utility>
#include "ir/manager.h"
#include "ir/func_graph_cloner.h"
#include "operator/ops.h"
#include "utils/ordered_set.h"
#include "pipeline/static_analysis/abstract_value.h"
#include "pipeline/static_analysis/static_analysis.h"
#include "pipeline/static_analysis/abstract_function.h"
#include "debug/anf_ir_dump.h"
#include "debug/trace.h"
#include "debug/draw.h"
#include "debug/label.h"
namespace
mindspore
{
using
mindspore
::
abstract
::
AbstractFunction
;
using
mindspore
::
abstract
::
AbstractFunctionPtr
;
using
mindspore
::
abstract
::
AnalysisContextPtr
;
using
mindspore
::
abstract
::
PrimitiveAbstractClosure
;
using
mindspore
::
abstract
::
VirtualAbstractClosure
;
AbstractFunctionPtr
FuncGraph
::
abstract
()
{
AbstractBasePtrList
args_spec_list
;
for
(
auto
&
p
:
parameters_
)
{
MS_EXCEPTION_IF_NULL
(
p
);
if
(
p
->
abstract
()
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"Error!!"
;
return
nullptr
;
}
args_spec_list
.
push_back
(
p
->
abstract
());
}
if
(
nullptr
==
output
())
{
MS_LOG
(
ERROR
)
<<
"Error func graph no output"
;
return
nullptr
;
}
return
std
::
make_shared
<
VirtualAbstractClosure
>
(
args_spec_list
,
output
()
->
abstract
());
}
abstract
::
AbstractBasePtr
FuncGraph
::
MakeAbstractClosure
(
const
abstract
::
AnalysisContextPtr
&
context
)
{
AnalysisContextPtr
temp_context
=
context
;
if
(
temp_context
==
nullptr
)
{
temp_context
=
abstract
::
AnalysisContext
::
DummyContext
();
}
return
std
::
make_shared
<
abstract
::
FuncGraphAbstractClosure
>
(
shared_from_base
<
FuncGraph
>
(),
temp_context
);
}
void
FuncGraph
::
set_output
(
const
AnfNodePtr
&
value
,
bool
force_new_ret
)
{
if
(
force_new_ret
||
return_
==
nullptr
)
{
std
::
vector
<
AnfNodePtr
>
params
({
NewValueNode
(
prim
::
kPrimReturn
),
value
});
FuncGraphPtr
this_graph
=
shared_from_base
<
FuncGraph
>
();
return_
=
this_graph
->
NewCNode
(
params
);
}
else
{
if
(
manager_
.
lock
())
{
manager_
.
lock
()
->
SetEdge
(
return_
,
1
,
value
);
}
else
{
return_
->
set_input
(
1
,
value
);
}
}
return_
->
set_abstract
(
value
->
abstract
());
AnfNodePtr
input0
=
return_
->
input
(
0
);
PrimitivePtr
return_prim
=
prim
::
kPrimReturn
;
auto
f
=
std
::
make_shared
<
PrimitiveAbstractClosure
>
(
return_prim
,
input0
);
input0
->
set_abstract
(
f
);
}
void
FuncGraph
::
DumpFuncGraph
(
const
std
::
string
&
path
)
{
draw
::
Draw
(
path
+
".dot"
,
shared_from_base
<
FuncGraph
>
());
}
void
FuncGraph
::
GenerateVarParams
(
const
FuncGraphPtr
&
specialized_graph
,
std
::
vector
<
AnfNodePtr
>
*
specialized_parameter_list
,
std
::
unordered_map
<
AnfNodePtr
,
AnfNodePtr
>
*
repl_nodes
,
int
variable_args_count
,
int
pos_args_input_count
)
{
// if there is variable argument, pass the input arguments that does not match positional args to it as a tuple
if
(
specialized_graph
->
has_vararg
())
{
TraceManager
::
DebugTrace
(
std
::
make_shared
<
TraceGenerateVarArg
>
(
specialized_graph
->
GetVariableArgParameter
()
->
debug_info
()));
std
::
vector
<
AnfNodePtr
>
var_param_tuple_nodes
;
var_param_tuple_nodes
.
push_back
(
NewValueNode
(
prim
::
kPrimMakeTuple
));
if
(
variable_args_count
<
0
)
{
MS_LOG
(
EXCEPTION
)
<<
"Function:"
<<
this
->
ToString
()
<<
", variable_args_count "
<<
variable_args_count
<<
" were given."
;
}
// for python variable argument input , there is no upper limit
for
(
int
i
=
0
;
i
<
variable_args_count
;
++
i
)
{
ParameterPtr
p
=
std
::
make_shared
<
Parameter
>
(
specialized_graph
);
std
::
string
param_name
=
specialized_graph
->
GetVariableArgName
()
+
std
::
to_string
(
i
);
p
->
set_name
(
param_name
);
MS_EXCEPTION_IF_NULL
(
p
->
debug_info
());
p
->
debug_info
()
->
set_name
(
param_name
);
var_param_tuple_nodes
.
push_back
(
p
);
MS_EXCEPTION_IF_NULL
(
specialized_parameter_list
);
specialized_parameter_list
->
push_back
(
p
);
}
auto
var_tuple_param
=
specialized_graph
->
NewCNode
(
var_param_tuple_nodes
);
(
void
)
repl_nodes
->
emplace
(
specialized_graph
->
GetVariableArgParameter
(),
var_tuple_param
);
TraceManager
::
EndTrace
();
}
else
if
(
variable_args_count
>
0
)
{
MS_LOG
(
EXCEPTION
)
<<
"Function:"
<<
this
->
ToString
()
<<
" takes "
<<
this
->
GetPositionalArgsCount
()
<<
" positional arguments, but "
<<
pos_args_input_count
<<
" were given."
;
}
}
void
FuncGraph
::
GenerateKwParams
(
const
FuncGraphPtr
&
specialized_graph
,
std
::
vector
<
AnfNodePtr
>
*
specialized_parameter_list
,
const
std
::
vector
<
abstract
::
AbstractKeywordArgPtr
>
&
kwarg_list
,
std
::
unordered_map
<
AnfNodePtr
,
AnfNodePtr
>
*
repl_nodes
)
{
std
::
vector
<
AnfNodePtr
>
kwarg_keys_tuple_nodes
=
{
NewValueNode
(
prim
::
kPrimMakeTuple
)};
std
::
vector
<
AnfNodePtr
>
kwarg_values_tuple_nodes
=
{
NewValueNode
(
prim
::
kPrimMakeTuple
)};
for
(
const
auto
&
kwarg
:
kwarg_list
)
{
MS_EXCEPTION_IF_NULL
(
kwarg
);
std
::
string
kw_param_name
=
kwarg
->
get_key
();
MS_EXCEPTION_IF_NULL
(
specialized_graph
);
AnfNodePtr
param_node
=
specialized_graph
->
GetParameterByName
(
kw_param_name
);
// if not find correspoding parameter node
if
(
param_node
==
nullptr
)
{
if
(
!
has_kwarg
())
{
MS_LOG
(
EXCEPTION
)
<<
"Got unexpected keyword argument: "
<<
kw_param_name
;
}
else
{
ParameterPtr
p
=
std
::
make_shared
<
Parameter
>
(
specialized_graph
);
std
::
string
param_name
=
specialized_graph
->
GetVariableKwargName
()
+
"["
+
kw_param_name
+
"]"
;
MS_EXCEPTION_IF_NULL
(
specialized_parameter_list
);
auto
find_kw_arg_in_list
=
std
::
any_of
(
specialized_parameter_list
->
begin
(),
specialized_parameter_list
->
end
(),
[
param_name
](
const
AnfNodePtr
&
node
)
{
MS_EXCEPTION_IF_NULL
(
node
);
auto
param
=
node
->
cast
<
ParameterPtr
>
();
return
param
!=
nullptr
&&
param
->
name
()
==
param_name
;
});
if
(
find_kw_arg_in_list
)
{
MS_LOG
(
EXCEPTION
)
<<
"Multiply values for keyword argument:"
<<
kw_param_name
;
}
p
->
set_name
(
param_name
);
p
->
debug_info
()
->
set_name
(
param_name
);
kwarg_keys_tuple_nodes
.
push_back
(
NewValueNode
(
kw_param_name
));
auto
extract_node
=
specialized_graph
->
NewCNode
({
NewValueNode
(
prim
::
kPrimExtractKeywordArg
),
NewValueNode
(
kw_param_name
),
p
});
kwarg_values_tuple_nodes
.
push_back
(
extract_node
);
specialized_parameter_list
->
push_back
(
p
);
}
}
else
{
auto
node_itr
=
std
::
find
(
specialized_parameter_list
->
begin
(),
specialized_parameter_list
->
end
(),
param_node
);
// multiply values found given for parameter
if
(
node_itr
!=
specialized_parameter_list
->
end
())
{
MS_LOG
(
EXCEPTION
)
<<
"Multiply values for specific argument:"
<<
kw_param_name
;
}
else
{
specialized_parameter_list
->
push_back
(
param_node
);
auto
extract_node
=
specialized_graph
->
NewCNode
(
{
NewValueNode
(
prim
::
kPrimExtractKeywordArg
),
NewValueNode
(
kw_param_name
),
param_node
});
(
void
)
repl_nodes
->
emplace
(
param_node
,
extract_node
);
}
}
}
GenerateKwargReplNode
(
specialized_graph
,
repl_nodes
,
kwarg_keys_tuple_nodes
,
kwarg_values_tuple_nodes
);
}
void
FuncGraph
::
GenerateKwargReplNode
(
const
FuncGraphPtr
&
specialized_graph
,
std
::
unordered_map
<
AnfNodePtr
,
AnfNodePtr
>
*
repl_nodes
,
const
std
::
vector
<
AnfNodePtr
>
&
kwarg_keys_tuple_nodes
,
const
std
::
vector
<
AnfNodePtr
>
&
kwarg_values_tuple_nodes
)
{
if
(
has_kwarg
())
{
MS_EXCEPTION_IF_NULL
(
specialized_graph
);
TraceManager
::
DebugTrace
(
std
::
make_shared
<
TraceGenerateKwArg
>
(
specialized_graph
->
GetVariableKwargParameter
()
->
debug_info
()));
auto
make_tuple_keys
=
specialized_graph
->
NewCNode
(
kwarg_keys_tuple_nodes
);
auto
make_tuple_values
=
specialized_graph
->
NewCNode
(
kwarg_values_tuple_nodes
);
auto
make_dict_node
=
specialized_graph
->
NewCNode
({
NewValueNode
(
prim
::
kPrimMakeDict
),
make_tuple_keys
,
make_tuple_values
});
MS_EXCEPTION_IF_NULL
(
repl_nodes
);
(
void
)
repl_nodes
->
emplace
(
specialized_graph
->
GetVariableKwargParameter
(),
make_dict_node
);
TraceManager
::
EndTrace
();
}
}
bool
FuncGraph
::
NeedGenerate
(
const
std
::
vector
<
abstract
::
AbstractKeywordArgPtr
>
&
kwarg_list
)
{
// if the function does not have any vararg/kwarg/kwonly/default value/kw args input
// return the original graph
if
(
!
has_vararg
()
&&
kwonlyargs_count
()
==
0
&&
!
has_kwarg
()
&&
GetDefaultValueCount
()
==
0
&&
kwarg_list
.
empty
())
{
return
false
;
}
// if the graph is generated for specific input, do not need to generate again
if
(
is_generated
())
{
return
false
;
}
return
true
;
}
void
FuncGraph
::
GenerateDefaultValue
(
const
FuncGraphPtr
&
specialized_graph
,
const
std
::
vector
<
AnfNodePtr
>
&
specialized_parameter_list
,
std
::
unordered_map
<
AnfNodePtr
,
AnfNodePtr
>
*
repl_nodes
)
{
MS_EXCEPTION_IF_NULL
(
specialized_graph
);
for
(
size_t
i
=
0
;
i
<
specialized_graph
->
parameters
().
size
()
-
hyper_param_count
();
++
i
)
{
auto
param_node
=
specialized_graph
->
parameters
()[
i
];
MS_EXCEPTION_IF_NULL
(
param_node
);
auto
param_name
=
param_node
->
cast
<
ParameterPtr
>
()
->
name
();
auto
node_itr
=
std
::
find
(
specialized_parameter_list
.
begin
(),
specialized_parameter_list
.
end
(),
param_node
);
if
(
node_itr
!=
specialized_parameter_list
.
end
())
{
continue
;
}
if
(
param_name
==
specialized_graph
->
GetVariableArgName
()
||
param_name
==
specialized_graph
->
GetVariableKwargName
())
{
continue
;
}
auto
default_value
=
specialized_graph
->
GetDefaultValueByName
(
param_name
);
if
(
default_value
==
nullptr
)
{
MS_LOG
(
EXCEPTION
)
<<
"Miss argument input for parameter:"
<<
param_name
;
}
MS_EXCEPTION_IF_NULL
(
repl_nodes
);
(
void
)
repl_nodes
->
emplace
(
param_node
,
default_value
);
}
}
FuncGraphPtr
FuncGraph
::
GenerateGraph
(
const
AbstractBasePtrList
&
args_spec_list
)
{
std
::
vector
<
abstract
::
AbstractKeywordArgPtr
>
kwarg_list
;
size_t
arguments_count
=
args_spec_list
.
size
();
for
(
const
auto
&
arg
:
args_spec_list
)
{
// if it is a keyword argument
MS_EXCEPTION_IF_NULL
(
arg
);
if
(
arg
->
isa
<
abstract
::
AbstractKeywordArg
>
())
{
kwarg_list
.
push_back
(
dyn_cast
<
abstract
::
AbstractKeywordArg
>
(
arg
));
}
}
if
(
!
NeedGenerate
(
kwarg_list
))
{
return
shared_from_base
<
FuncGraph
>
();
}
FuncGraphPtr
specialized_graph
=
BasicClone
(
shared_from_base
<
FuncGraph
>
());
size_t
kwarg_count
=
kwarg_list
.
size
();
int
pos_args_input_count
=
SizeToInt
(
arguments_count
-
kwarg_count
-
hyper_param_count
());
int
pos_args_count
=
std
::
min
(
pos_args_input_count
,
this
->
GetPositionalArgsCount
());
int
variable_args_count
=
pos_args_input_count
-
pos_args_count
;
std
::
vector
<
AnfNodePtr
>
specialized_parameter_list
;
std
::
unordered_map
<
AnfNodePtr
,
AnfNodePtr
>
repl_nodes
;
// the parameters that has arg input, copy from original parameters
for
(
size_t
i
=
0
;
i
<
IntToSize
(
pos_args_count
);
++
i
)
{
specialized_parameter_list
.
push_back
(
specialized_graph
->
parameters
()[
i
]);
}
GenerateVarParams
(
specialized_graph
,
&
specialized_parameter_list
,
&
repl_nodes
,
variable_args_count
,
pos_args_input_count
);
GenerateKwParams
(
specialized_graph
,
&
specialized_parameter_list
,
kwarg_list
,
&
repl_nodes
);
GenerateDefaultValue
(
specialized_graph
,
specialized_parameter_list
,
&
repl_nodes
);
// append hyper parameter to specialized_parameter_list
MS_EXCEPTION_IF_NULL
(
specialized_graph
);
auto
params
=
specialized_graph
->
parameters
();
(
void
)
std
::
transform
(
params
.
end
()
-
SizeToInt
(
hyper_param_count
()),
params
.
end
(),
std
::
back_inserter
(
specialized_parameter_list
),
[](
const
AnfNodePtr
&
node
)
{
return
node
;
});
std
::
shared_ptr
<
mindspore
::
FuncGraphManager
>
manager
=
mindspore
::
Manage
(
specialized_graph
,
false
);
auto
tr
=
manager
->
Transact
();
for
(
auto
&
node_pair
:
repl_nodes
)
{
MS_LOG
(
DEBUG
)
<<
"GenerateGraph replace:"
<<
node_pair
.
first
->
DebugString
()
<<
"-"
<<
node_pair
.
second
->
DebugString
();
(
void
)
tr
.
Replace
(
node_pair
.
first
,
node_pair
.
second
);
}
tr
.
SetParameters
(
specialized_graph
,
specialized_parameter_list
);
tr
.
Commit
();
specialized_graph
->
set_has_kwarg
(
false
);
specialized_graph
->
set_has_vararg
(
false
);
specialized_graph
->
set_kwonlyargs_count
(
0
);
specialized_graph
->
ClearDefaultValues
();
specialized_graph
->
set_is_generate
(
true
);
return
specialized_graph
;
}
const
char
kPrimHasEffect
[]
=
"_side_effect_flag"
;
bool
FuncGraph
::
HasEffect
(
const
CNodePtr
&
cnode
)
{
auto
prim
=
GetCNodePrimitive
(
cnode
);
if
(
prim
!=
nullptr
&&
prim
->
isa
<
prim
::
DoSignaturePrimitive
>
())
{
auto
do_sig
=
prim
->
cast
<
prim
::
DoSignaturePrimitivePtr
>
();
auto
prim_val
=
do_sig
->
function
();
if
(
prim_val
!=
nullptr
&&
prim_val
->
isa
<
Primitive
>
())
{
prim
=
prim_val
->
cast
<
PrimitivePtr
>
();
}
else
{
prim
=
nullptr
;
}
}
if
(
prim
!=
nullptr
)
{
auto
effect_val
=
prim
->
GetAttr
(
kPrimHasEffect
);
if
(
effect_val
&&
effect_val
->
isa
<
BoolImm
>
())
{
auto
effect_bool
=
GetValue
<
bool
>
(
effect_val
);
return
effect_bool
;
}
}
return
false
;
}
std
::
shared_ptr
<
OrderedSet
<
CNodePtr
>>
FindRoots
(
const
std
::
vector
<
CNodePtr
>
&
segment
)
{
std
::
shared_ptr
<
OrderedSet
<
CNodePtr
>>
roots
=
std
::
make_shared
<
OrderedSet
<
CNodePtr
>>
(
segment
);
for
(
const
auto
&
node
:
segment
)
{
if
(
roots
->
size
()
==
1
)
{
return
roots
;
}
auto
input_size
=
node
->
size
();
for
(
size_t
i
=
0
;
i
<
input_size
;
i
++
)
{
auto
in_node
=
node
->
input
(
i
);
auto
in_cnode
=
in_node
->
cast
<
CNodePtr
>
();
if
(
in_cnode
!=
nullptr
)
{
(
void
)
roots
->
erase
(
in_cnode
);
}
}
}
return
roots
;
}
std
::
shared_ptr
<
OrderedSet
<
CNodePtr
>>
FindLeaves
(
const
std
::
vector
<
CNodePtr
>
&
segment
)
{
std
::
shared_ptr
<
OrderedSet
<
CNodePtr
>>
nodes
=
std
::
make_shared
<
OrderedSet
<
CNodePtr
>>
(
segment
);
for
(
const
auto
&
node
:
segment
)
{
if
(
nodes
->
size
()
==
1
)
{
return
nodes
;
}
if
(
IsPrimitiveCNode
(
node
,
prim
::
kPrimSwitch
))
{
(
void
)
nodes
->
erase
(
node
);
continue
;
}
auto
input_size
=
node
->
size
();
for
(
size_t
i
=
0
;
i
<
input_size
;
i
++
)
{
auto
in_node
=
node
->
input
(
i
);
if
(
!
in_node
->
isa
<
CNode
>
())
{
continue
;
}
auto
in_cnode
=
in_node
->
cast
<
CNodePtr
>
();
if
(
in_cnode
!=
nullptr
)
{
if
(
std
::
find
(
segment
.
begin
(),
segment
.
end
(),
in_cnode
)
!=
segment
.
end
())
{
(
void
)
nodes
->
erase
(
node
);
break
;
}
}
}
}
return
nodes
;
}
void
FuncGraph
::
ReleaseFullOrderToEffectOrder
()
{
MS_LOG
(
DEBUG
)
<<
"Flag has_effect "
<<
has_flag
(
GRAPH_FLAG_HAS_EFFECT
)
<<
"."
;
if
(
has_flag
(
GRAPH_FLAG_HAS_EFFECT
))
{
std
::
list
<
AnfNodePtr
>
depends_order
;
std
::
vector
<
CNodePtr
>
segment
;
for
(
const
auto
&
cnode
:
order_
)
{
if
(
IsPrimitiveCNode
(
cnode
,
prim
::
kPrimReturn
))
{
continue
;
}
if
(
HasEffect
(
cnode
))
{
MS_LOG
(
DEBUG
)
<<
"Meet a effect node "
<<
cnode
->
DebugString
()
<<
"."
;
if
(
segment
.
size
()
>
0
)
{
auto
roots
=
FindRoots
(
segment
);
for
(
auto
iter
=
roots
->
begin
();
iter
!=
roots
->
end
();
(
void
)
iter
++
)
{
depends_order
.
push_back
(
*
iter
);
}
}
segment
.
clear
();
depends_order
.
push_back
(
cnode
);
}
else
{
MS_LOG
(
DEBUG
)
<<
"Meet a general node "
<<
cnode
->
DebugString
()
<<
"."
;
segment
.
push_back
(
cnode
);
}
}
if
(
segment
.
size
()
>
1
)
{
auto
roots
=
FindRoots
(
segment
);
for
(
auto
iter
=
roots
->
begin
();
iter
!=
roots
->
end
();
(
void
)
iter
++
)
{
depends_order
.
push_back
(
*
iter
);
}
}
std
::
vector
<
AnfNodePtr
>
depend_inputs
;
auto
old_ret
=
output
();
for
(
auto
iter
=
depends_order
.
rbegin
();
iter
!=
depends_order
.
rend
();
(
void
)
iter
++
)
{
if
(
*
iter
!=
old_ret
)
{
depend_inputs
.
push_back
(
*
iter
);
}
}
set_flags
(
GRAPH_FLAG_HAS_EFFECT
,
false
);
set_flags
(
GRAPH_FLAG_EFFECT_PATIAL_ORDER
,
true
);
if
(
!
depend_inputs
.
empty
())
{
SetEffectDepends
(
depend_inputs
);
}
}
}
void
FuncGraph
::
SetEffectDepends
(
const
std
::
vector
<
AnfNodePtr
>
&
depend_inputs
)
{
auto
old_ret
=
output
();
std
::
vector
<
AnfNodePtr
>
inputs
{
NewValueNode
(
prim
::
kPrimDepend
),
old_ret
};
(
void
)
inputs
.
insert
(
inputs
.
end
(),
depend_inputs
.
begin
(),
depend_inputs
.
end
());
auto
new_ret
=
NewCNode
(
inputs
);
auto
mng
=
manager
();
if
(
mng
)
{
(
void
)
mng
->
Replace
(
old_ret
,
new_ret
);
}
else
{
return_
->
set_input
(
1
,
new_ret
);
}
}
}
// namespace mindspore
mindspore/ccsrc/ir/manager.cc
浏览文件 @
4508134c
/**
/**
* This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
* This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
*
*
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019
-2020
Huawei Technologies Co., Ltd
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may not use this file except in compliance with the License.
...
@@ -17,11 +17,14 @@
...
@@ -17,11 +17,14 @@
*/
*/
#include "ir/manager.h"
#include "ir/manager.h"
#include <algorithm>
#include <algorithm>
#include <numeric>
#include <numeric>
#include <list>
#include <list>
#include "./common.h"
#include "ir/func_graph.h"
#include "utils/profile.h"
#include "utils/profile.h"
#include "utils/convert_utils.h"
#include "operator/ops.h"
#include "operator/ops.h"
#include "debug/trace.h"
#include "debug/trace.h"
...
...
mindspore/ccsrc/ir/manager.h
浏览文件 @
4508134c
/**
/**
* This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
* This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
*
*
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019
-2020
Huawei Technologies Co., Ltd
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may not use this file except in compliance with the License.
...
@@ -37,6 +37,7 @@
...
@@ -37,6 +37,7 @@
#include "utils/graph_utils.h"
#include "utils/graph_utils.h"
#include "utils/counter.h"
#include "utils/counter.h"
#include "utils/hashing.h"
#include "utils/hashing.h"
#include "utils/base_ref.h"
#include "ir/anf.h"
#include "ir/anf.h"
namespace
mindspore
{
namespace
mindspore
{
...
...
mindspore/ccsrc/ir/meta_tensor.cc
浏览文件 @
4508134c
此差异已折叠。
点击以展开。
mindspore/ccsrc/ir/meta_tensor.h
浏览文件 @
4508134c
/**
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019
-2020
Huawei Technologies Co., Ltd
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may not use this file except in compliance with the License.
...
@@ -21,80 +21,12 @@
...
@@ -21,80 +21,12 @@
#include <vector>
#include <vector>
#include <memory>
#include <memory>
#include <string>
#include <string>
#include "device/device_address.h"
#include "pybind11/numpy.h"
#include "pybind11/pybind11.h"
#include "Eigen/Core"
#include "ir/base.h"
#include "ir/base.h"
#include "ir/dtype.h"
#include "ir/dtype.h"
#include "utils/log_adapter.h"
#include "utils/convert_utils.h"
#include "utils/convert_utils.h"
#include "utils/hashing.h"
#include "utils/hashing.h"
namespace
py
=
pybind11
;
using
float16
=
Eigen
::
half
;
namespace
pybind11
{
namespace
detail
{
// Similar to enums in `pybind11/numpy.h`. Determined by doing:
// python3 -c 'import numpy as np; print(np.dtype(np.float16).num)'
constexpr
int
NPY_FLOAT16
=
23
;
template
<
typename
T
>
struct
npy_scalar_caster
{
PYBIND11_TYPE_CASTER
(
T
,
_
(
"PleaseOverride"
));
using
Array
=
array_t
<
T
>
;
bool
load
(
handle
src
,
bool
convert
)
{
// Taken from Eigen casters. Permits either scalar dtype or scalar array.
handle
type
=
dtype
::
of
<
T
>
().
attr
(
"type"
);
if
(
!
convert
&&
!
isinstance
<
Array
>
(
src
)
&&
!
isinstance
(
src
,
type
))
return
false
;
Array
tmp
=
Array
::
ensure
(
src
);
if
(
tmp
&&
tmp
.
size
()
==
1
&&
tmp
.
ndim
()
==
0
)
{
this
->
value
=
*
tmp
.
data
();
return
true
;
}
return
false
;
}
static
handle
cast
(
T
src
,
return_value_policy
,
handle
)
{
Array
tmp
({
1
});
tmp
.
mutable_at
(
0
)
=
src
;
tmp
.
resize
({});
// You could also just return the array if you want a scalar array.
object
scalar
=
tmp
[
tuple
()];
return
scalar
.
release
();
}
};
template
<
>
struct
npy_format_descriptor
<
float16
>
{
static
constexpr
auto
name
=
"float16"
;
static
pybind11
::
dtype
dtype
()
{
handle
ptr
=
npy_api
::
get
().
PyArray_DescrFromType_
(
NPY_FLOAT16
);
return
reinterpret_borrow
<
pybind11
::
dtype
>
(
ptr
);
}
virtual
~
npy_format_descriptor
<
float16
>
()
{}
};
template
<
>
struct
type_caster
<
float16
>
:
public
npy_scalar_caster
<
float16
>
{
static
constexpr
auto
name
=
"float16"
;
};
}
// namespace detail
}
// namespace pybind11
using
mindspore
::
device
::
DeviceAddress
;
using
DeviceAddressPtr
=
std
::
shared_ptr
<
mindspore
::
device
::
DeviceAddress
>
;
// brief mindspore namespace.
// brief mindspore namespace.
//
//
// mindspore namespace is the top level namespace of Mindsporeession project.
// mindspore namespace is the top level namespace of Mindsporeession project.
...
@@ -133,7 +65,7 @@ class MetaTensor : public Value {
...
@@ -133,7 +65,7 @@ class MetaTensor : public Value {
// param shape The shape of the tensor.
// param shape The shape of the tensor.
MetaTensor
(
const
TypeId
data_type
,
const
std
::
vector
<
int
>
&
shape
);
MetaTensor
(
const
TypeId
data_type
,
const
std
::
vector
<
int
>
&
shape
);
MetaTensor
(
const
TypePtr
&
type_ptr
,
const
py
::
tuple
&
shape
);
MetaTensor
(
const
TypePtr
&
type_ptr
,
const
std
::
vector
<
int
>
&
shape
);
// brief Constructs a MetaTensor object from an existing MetaTensor instance.
// brief Constructs a MetaTensor object from an existing MetaTensor instance.
//
//
// The constructed MetaTensor object will have the same data type and shape as the
// The constructed MetaTensor object will have the same data type and shape as the
...
@@ -164,7 +96,6 @@ class MetaTensor : public Value {
...
@@ -164,7 +96,6 @@ class MetaTensor : public Value {
// All the types are defined in "ir/dtype.h".
// All the types are defined in "ir/dtype.h".
TypePtr
Dtype
()
const
;
TypePtr
Dtype
()
const
;
abstract
::
AbstractBasePtr
ToAbstract
()
override
;
abstract
::
AbstractBasePtr
ToAbstract
()
override
;
py
::
tuple
GetPyTupleShape
()
const
;
TypeId
data_type
()
const
{
return
data_type_
;
}
TypeId
data_type
()
const
{
return
data_type_
;
}
std
::
string
ToString
()
const
override
;
std
::
string
ToString
()
const
override
;
std
::
string
DumpText
()
const
override
;
std
::
string
DumpText
()
const
override
;
...
@@ -256,175 +187,7 @@ class MetaTensor : public Value {
...
@@ -256,175 +187,7 @@ class MetaTensor : public Value {
DeviceInfo
device_info_
;
DeviceInfo
device_info_
;
};
};
// Tensor entity class
class
Tensor
:
public
MetaTensor
{
public:
Tensor
()
=
default
;
abstract
::
AbstractBasePtr
ToAbstract
()
override
;
// brief Constructor for Python.
//
// param type_ptr [TypePty] Data type of the tensor.
// param py_shape [py::tuple] The shape represented by py::tuple of the tensor.
Tensor
(
const
TypePtr
&
type_ptr
,
const
py
::
tuple
&
shape
);
// brief Constructor for C++.
//
// param data_type [TypeId] Data type of the tensor.
// param shape The shape represented by std::vector<int> of the tensor.
Tensor
(
TypeId
data_type
,
const
std
::
vector
<
int
>
&
shape
);
// brief Constructor for Python.
//
// param input [py::array] Data value of the tensor.
// param data_type [TypeId] Data type of the tensor.
explicit
Tensor
(
const
py
::
array
&
input
,
const
TypePtr
&
data_type
=
nullptr
);
// brief Constructor
//
// param input [py::list] the data for tensor
// param data_type [TypeId] data type
explicit
Tensor
(
const
py
::
list
&
input
,
const
TypePtr
&
data_type
=
nullptr
);
// brief Constructor
//
// param input [py::tuple] the data for tensor
// param data_type [TypeId] data type
explicit
Tensor
(
const
py
::
tuple
&
input
,
const
TypePtr
&
data_type
=
nullptr
);
// brief Constructor
//
// param input [py::float_] the data for tensor
// param data_type [TypeId] data type
explicit
Tensor
(
const
py
::
float_
&
input
,
const
TypePtr
&
data_type
=
nullptr
);
// brief Constructor
//
// param input [py::int_] the data for tensor
// param data_type [TypeId] data type
explicit
Tensor
(
const
py
::
int_
&
input
,
const
TypePtr
&
data_type
=
nullptr
);
// brief Constructor
//
// param input [Tensor] the data for tensor
// param data_type [TypeId] data type
Tensor
(
const
Tensor
&
tensor
,
const
TypePtr
&
data_type
=
nullptr
);
~
Tensor
()
override
=
default
;
MS_DECLARE_PARENT
(
Tensor
,
MetaTensor
);
// brief Overloads operator = for Tensor.
//
// The constructed Tensor object has the same type and shape with tensor.
//
// param tensor An existing Tensor object.
Tensor
&
operator
=
(
const
Tensor
&
tensor
);
// brief Compares two Tensor objects.
//
// Compare two tensor objects to see if they have same data type, shape and
// data value.
//
// param tensor The Tensor object to be compared.
// return true: If having same type, shape and data, return true, or return false.
bool
operator
==
(
const
Tensor
&
tensor
)
const
;
// It is different from 'operator==' which just compare shape/type/address, it do real value comparison.
bool
ValueEqual
(
const
Tensor
&
other
)
const
;
bool
operator
==
(
const
Value
&
other
)
const
override
{
if
(
other
.
isa
<
Tensor
>
())
{
auto
other_
=
static_cast
<
const
Tensor
&>
(
other
);
return
*
this
==
other_
;
}
else
{
return
false
;
}
}
// brief Gets tensor's dimension
//
// return The number of dimensions of the tensor data.
int
DataDim
()
const
;
// brief Getting tensor data size
//
// return The total number of elements of the tensor data.
int
DataSize
()
const
;
// brief Tensor's data value.
//
// return [py::array] The tensor's data in py::array.
py
::
array
data
()
const
;
// brief Get the data type fo the tensor for C++
//
// return [int] The tensor's data type will be cast to int to return.
int
data_type_c
()
const
;
// brief Get the tensor's shape for C++
//
// return [std::vector<int>]
std
::
vector
<
int
>
shape_c
(
void
)
const
;
// brief Get Tensor data pointer for c++ type
//
// param writable true if writable, false if read only
// return The pointer to the object
void
*
data_c
(
bool
writable
=
false
);
// brief Get data type from tensor data.
//
// param buf The buffer info of the py::array data.
// return The [TypeId] of the tensor data.
TypeId
GetDataType
(
const
py
::
buffer_info
&
buf
)
const
;
// brief Sets the data type of a tensor.
//
// param data_type The data type of the tensor to be set.
//
TypeId
set_data_type
(
const
TypeId
data_type
)
override
;
TypePtr
SetDtype
(
const
TypePtr
type_ptr
)
override
;
std
::
string
GetShapeAndDataTypeInfo
()
const
;
std
::
string
ToString
()
const
override
;
std
::
string
ToStringRepr
()
const
;
py
::
array
data_
;
// < Tensor's data value
const
bool
parse_info_
=
true
;
bool
is_init
();
void
set_init_flag
(
bool
flag
);
private:
// brief init tensor
//
// param input [py::array] the data for tensor
// param data_type [TypeId] data type
// return true if succeed, false if failed.
void
init
(
const
py
::
array
&
input
,
const
TypeId
&
data_type
);
void
init
(
const
py
::
array
&
input
,
const
TypePtr
&
type_ptr
);
bool
init_flag_
{
false
};
// brief init tensor attribute
//
// param data_type [TypeId] Data type of the tensor.
// param shape [py::array] The shape of the tensor.
// return true if succeed, false if failed.
void
init
(
TypeId
data_type
,
const
std
::
vector
<
int
>
&
shape
,
py
::
array
*
data
);
bool
convert_data
(
const
py
::
array
&
in
,
const
TypeId
in_data_type
,
py
::
array
*
out
,
const
TypeId
out_data_type
);
public:
bool
is_dirty
()
const
{
return
dirty_
;
}
void
set_dirty
(
const
bool
dirty
)
{
dirty_
=
dirty
;
}
DeviceAddressPtr
device_address
()
const
{
return
device_address_
;
}
void
set_device_address
(
const
DeviceAddressPtr
&
device_address
)
{
device_address_
=
device_address
;
}
py
::
array
data_sync
();
private:
bool
dirty_
{
true
};
DeviceAddressPtr
device_address_
{
nullptr
};
};
using
TensorPtr
=
std
::
shared_ptr
<
Tensor
>
;
using
MetaTensorPtr
=
std
::
shared_ptr
<
MetaTensor
>
;
using
MetaTensorPtr
=
std
::
shared_ptr
<
MetaTensor
>
;
using
TensorPtrList
=
std
::
vector
<
std
::
shared_ptr
<
Tensor
>>
;
}
// namespace tensor
}
// namespace tensor
}
// namespace mindspore
}
// namespace mindspore
...
...
mindspore/ccsrc/ir/meta_tensor_extends.cc
0 → 100644
浏览文件 @
4508134c
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ir/meta_tensor.h"
#include <functional>
#include <numeric>
#include <vector>
#include <sstream>
#include <string>
#include "pipeline/static_analysis/abstract_value.h"
namespace
mindspore
{
namespace
tensor
{
abstract
::
AbstractBasePtr
MetaTensor
::
ToAbstract
()
{
auto
tens
=
shared_from_base
<
MetaTensor
>
();
auto
dtype
=
tens
->
Dtype
();
if
(
!
IsSubType
(
dtype
,
kNumber
))
{
MS_LOG
(
EXCEPTION
)
<<
"Expect MetaTensor type kNumber but got: "
<<
dtype
->
ToString
()
<<
"."
;
}
auto
tensor_shape
=
tens
->
shape
();
auto
abs_tensor
=
std
::
make_shared
<
abstract
::
AbstractTensor
>
(
dtype
,
tensor_shape
);
abs_tensor
->
set_value
(
shared_from_base
<
MetaTensor
>
());
return
abs_tensor
;
}
}
// namespace tensor
}
// namespace mindspore
mindspore/ccsrc/ir/tensor.cc
0 → 100644
浏览文件 @
4508134c
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ir/tensor.h"
#include <functional>
#include <numeric>
#include <vector>
#include <sstream>
#include <string>
#include "device/device_address.h"
#include "pybind_api/api_register.h"
#include "pybind_api/export_flags.h"
#include "pipeline/static_analysis/abstract_value.h"
namespace
mindspore
{
namespace
tensor
{
void
DataBuf2Contiguous
(
const
py
::
array
&
src
,
py
::
array
*
const
dest
)
{
if
(
dest
==
nullptr
)
{
MS_LOG
(
EXCEPTION
)
<<
"Failed to copy data to a contiguous buffer as dest is nullptr!"
;
}
Py_buffer
pybuf_src
;
if
(
PyObject_GetBuffer
(
src
.
ptr
(),
&
pybuf_src
,
PyBUF_ANY_CONTIGUOUS
))
{
MS_LOG
(
EXCEPTION
)
<<
"Failed to get buffer info from the src!"
;
}
if
(
!
PyBuffer_IsContiguous
(
&
pybuf_src
,
'C'
))
{
if
(
PyBuffer_ToContiguous
(
dest
->
request
(
true
).
ptr
,
&
pybuf_src
,
pybuf_src
.
len
,
'C'
))
{
MS_LOG
(
EXCEPTION
)
<<
"Can't copy numpy.ndarray to a contiguous buffer."
;
}
}
else
{
*
dest
=
src
;
}
PyBuffer_Release
(
&
pybuf_src
);
}
Tensor
::
Tensor
(
const
TypePtr
&
type_ptr
,
const
py
::
tuple
&
shape
)
{
TypeId
data_type
=
TypeId
::
kTypeUnknown
;
if
(
type_ptr
!=
nullptr
)
{
data_type
=
type_ptr
->
type_id
();
}
data_type_
=
data_type
;
shape_
.
resize
(
shape
.
size
());
for
(
size_t
i
=
0
;
i
<
shape
.
size
();
++
i
)
{
shape_
[
i
]
=
py
::
int_
(
shape
[
i
]);
}
init
(
data_type_
,
shape_
,
&
data_
);
}
Tensor
::
Tensor
(
TypeId
data_type
,
const
std
::
vector
<
int
>
&
shape
)
{
init
(
data_type
,
shape
,
&
data_
);
}
Tensor
::
Tensor
(
const
py
::
array
&
input
,
const
TypePtr
&
data_type
)
{
init
(
input
,
data_type
);
}
Tensor
::
Tensor
(
const
py
::
list
&
input
,
const
TypePtr
&
data_type
)
{
init
(
py
::
array
(
input
),
data_type
);
}
Tensor
::
Tensor
(
const
py
::
tuple
&
input
,
const
TypePtr
&
data_type
)
{
init
(
py
::
array
(
input
),
data_type
);
}
Tensor
::
Tensor
(
const
py
::
float_
&
input
,
const
TypePtr
&
data_type
)
{
init
(
py
::
array
(
input
),
data_type
);
}
Tensor
::
Tensor
(
const
py
::
int_
&
input
,
const
TypePtr
&
data_type
)
{
init
(
py
::
array
(
input
),
data_type
);
}
Tensor
::
Tensor
(
const
Tensor
&
tensor
,
const
TypePtr
&
data_type
)
:
MetaTensor
(
tensor
),
device_address_
(
tensor
.
device_address_
)
{
init
(
tensor
.
data_
,
data_type
);
dirty_
=
tensor
.
is_dirty
();
}
Tensor
&
Tensor
::
operator
=
(
const
Tensor
&
tensor
)
{
if
(
this
!=
&
tensor
)
{
MetaTensor
::
operator
=
(
tensor
);
dirty_
=
tensor
.
is_dirty
();
device_address_
=
tensor
.
device_address
();
data_
=
tensor
.
data_
;
}
return
*
this
;
}
bool
Tensor
::
operator
==
(
const
Tensor
&
tensor
)
const
{
return
(
MetaTensor
::
operator
==
(
tensor
)
&&
data_
==
tensor
.
data_
);
}
bool
Tensor
::
ValueEqual
(
const
Tensor
&
other
)
const
{
auto
equal
=
[
&
other
,
this
]()
->
bool
{
auto
np
=
py
::
module
::
import
(
"numpy"
);
auto
equal
=
np
.
attr
(
"equal"
)(
data_
,
other
.
data_
);
auto
all_equal
=
np
.
attr
(
"all"
)(
equal
);
return
all_equal
.
cast
<
bool
>
();
};
return
(
MetaTensor
::
operator
==
(
other
)
&&
(
data_
.
is
(
other
.
data_
)
||
equal
()));
}
py
::
tuple
Tensor
::
GetPyTupleShape
()
const
{
std
::
vector
<
int
>
shape
=
this
->
shape
();
py
::
tuple
dims
(
shape
.
size
());
for
(
size_t
i
=
0
;
i
<
dims
.
size
();
++
i
)
{
dims
[
i
]
=
py
::
int_
(
shape
[
i
]);
}
return
dims
;
}
int
Tensor
::
DataDim
()
const
{
return
static_cast
<
int
>
(
data_
.
ndim
());
}
int
Tensor
::
DataSize
()
const
{
return
static_cast
<
int
>
(
data_
.
size
());
}
py
::
array
Tensor
::
data
()
const
{
return
data_
;
}
int
Tensor
::
data_type_c
()
const
{
return
static_cast
<
int
>
(
data_type_
);
}
std
::
vector
<
int
>
Tensor
::
shape_c
(
void
)
const
{
return
shape
();
}
void
*
Tensor
::
data_c
(
bool
writable
)
{
// operand of bit operation should be unsigned int.
unsigned
int
flags
=
((
unsigned
int
)
data_
.
flags
())
&
pybind11
::
detail
::
npy_api
::
NPY_ARRAY_C_CONTIGUOUS_
;
bool
is_c_contiguous
=
(
flags
!=
0
)
?
true
:
false
;
if
(
!
is_c_contiguous
)
{
py
::
array
data_c
;
init
(
data_type_
,
shape_
,
&
data_c
);
DataBuf2Contiguous
(
data_
,
&
data_c
);
data_
=
data_c
;
}
return
data_
.
request
(
writable
).
ptr
;
}
TypeId
Tensor
::
GetDataType
(
const
py
::
buffer_info
&
buf
)
const
{
TypeId
data_type
=
TypeId
::
kTypeUnknown
;
if
(
buf
.
format
.
compare
(
"e"
)
==
0
)
{
data_type
=
TypeId
::
kNumberTypeFloat16
;
}
else
if
(
buf
.
format
.
compare
(
"f"
)
==
0
)
{
data_type
=
TypeId
::
kNumberTypeFloat32
;
}
else
if
(
buf
.
format
.
compare
(
"d"
)
==
0
)
{
data_type
=
TypeId
::
kNumberTypeFloat64
;
}
else
if
(
buf
.
format
.
compare
(
"B"
)
==
0
)
{
data_type
=
TypeId
::
kNumberTypeUInt8
;
}
else
if
(
buf
.
format
.
compare
(
"H"
)
==
0
)
{
data_type
=
TypeId
::
kNumberTypeUInt16
;
}
else
if
(
buf
.
format
.
compare
(
"I"
)
==
0
)
{
data_type
=
TypeId
::
kNumberTypeUInt32
;
}
else
if
(
buf
.
format
.
compare
(
"L"
)
==
0
||
buf
.
format
.
compare
(
"Q"
)
==
0
)
{
data_type
=
TypeId
::
kNumberTypeUInt64
;
}
else
if
(
buf
.
format
.
compare
(
"b"
)
==
0
)
{
data_type
=
TypeId
::
kNumberTypeInt8
;
}
else
if
(
buf
.
format
.
compare
(
"h"
)
==
0
)
{
data_type
=
TypeId
::
kNumberTypeInt16
;
}
else
if
(
buf
.
format
.
compare
(
"i"
)
==
0
)
{
data_type
=
TypeId
::
kNumberTypeInt32
;
}
else
if
(
buf
.
format
.
compare
(
"l"
)
==
0
||
buf
.
format
.
compare
(
"q"
)
==
0
)
{
data_type
=
TypeId
::
kNumberTypeInt64
;
}
else
if
(
buf
.
format
.
compare
(
"?"
)
==
0
)
{
data_type
=
TypeId
::
kNumberTypeBool
;
}
else
{
MS_LOG
(
WARNING
)
<<
"Get unsupported DataType "
<<
buf
.
format
<<
"."
;
}
return
data_type
;
}
void
Tensor
::
init
(
const
py
::
array
&
input
,
const
TypePtr
&
type_ptr
)
{
TypeId
data_type
=
TypeId
::
kTypeUnknown
;
if
(
type_ptr
!=
nullptr
)
{
data_type
=
type_ptr
->
type_id
();
}
init
(
input
,
data_type
);
}
void
Tensor
::
init
(
const
py
::
array
&
input
,
const
TypeId
&
data_type
)
{
py
::
buffer_info
buf
=
input
.
request
();
data_type_
=
GetDataType
(
buf
);
if
(
TypeId
::
kTypeUnknown
==
data_type
&&
TypeId
::
kTypeUnknown
==
data_type_
)
{
MS_LOG
(
EXCEPTION
)
<<
"Unsupported tensor type!"
;
}
std
::
vector
<
ssize_t
>
tm
=
buf
.
shape
;
size_t
len
=
tm
.
size
();
std
::
vector
<
int
>
dims
(
len
);
for
(
size_t
i
=
0
;
i
<
len
;
++
i
)
{
dims
[
i
]
=
static_cast
<
int
>
(
tm
[
i
]);
}
(
void
)
set_shape
(
dims
);
if
(
TypeId
::
kTypeUnknown
!=
data_type
&&
TypeId
::
kTypeUnknown
!=
data_type_
&&
data_type_
!=
data_type
)
{
// If user defined data type is not same as GetDataType from the data
bool
success
=
convert_data
(
input
,
data_type_
,
&
data_
,
data_type
);
if
(
success
)
{
data_type_
=
data_type
;
}
else
{
data_type_
=
TypeId
::
kTypeUnknown
;
MS_LOG
(
EXCEPTION
)
<<
"Convert data from "
<<
data_type_
<<
" to "
<<
data_type
<<
" failed!"
;
}
}
else
{
data_
=
input
;
}
dirty_
=
true
;
}
void
Tensor
::
init
(
TypeId
data_type
,
const
std
::
vector
<
int
>
&
shape
,
py
::
array
*
const
data
)
{
data_type_
=
data_type
;
shape_
=
shape
;
switch
(
data_type
)
{
case
kNumberTypeBool
:
*
data
=
py
::
array_t
<
bool
,
py
::
array
::
c_style
>
(
shape
);
break
;
case
kNumberTypeInt8
:
*
data
=
py
::
array_t
<
int8_t
,
py
::
array
::
c_style
>
(
shape
);
break
;
case
kNumberTypeInt16
:
*
data
=
py
::
array_t
<
int16_t
,
py
::
array
::
c_style
>
(
shape
);
break
;
case
kNumberTypeInt32
:
*
data
=
py
::
array_t
<
int32_t
,
py
::
array
::
c_style
>
(
shape
);
break
;
case
kNumberTypeInt64
:
*
data
=
py
::
array_t
<
int64_t
,
py
::
array
::
c_style
>
(
shape
);
break
;
case
kNumberTypeUInt8
:
*
data
=
py
::
array_t
<
uint8_t
,
py
::
array
::
c_style
>
(
shape
);
break
;
case
kNumberTypeUInt16
:
*
data
=
py
::
array_t
<
uint16_t
,
py
::
array
::
c_style
>
(
shape
);
break
;
case
kNumberTypeUInt32
:
*
data
=
py
::
array_t
<
uint32_t
,
py
::
array
::
c_style
>
(
shape
);
break
;
case
kNumberTypeUInt64
:
*
data
=
py
::
array_t
<
uint64_t
,
py
::
array
::
c_style
>
(
shape
);
break
;
case
kNumberTypeFloat16
:
*
data
=
py
::
array_t
<
float16
,
py
::
array
::
c_style
>
(
shape
);
break
;
case
kNumberTypeFloat32
:
*
data
=
py
::
array_t
<
float
,
py
::
array
::
c_style
>
(
shape
);
break
;
case
kNumberTypeFloat64
:
*
data
=
py
::
array_t
<
double
,
py
::
array
::
c_style
>
(
shape
);
break
;
default:
MS_LOG
(
EXCEPTION
)
<<
"Cannot construct Tensor because of unsupported data type: "
<<
data_type
<<
"."
;
break
;
}
}
TypePtr
Tensor
::
SetDtype
(
const
TypePtr
type_ptr
)
{
MS_EXCEPTION_IF_NULL
(
type_ptr
);
(
void
)
set_data_type
(
type_ptr
->
type_id
());
return
type_ptr
;
}
TypeId
Tensor
::
set_data_type
(
const
TypeId
data_type
)
{
if
(
data_
.
size
()
>
0
&&
data_type_
!=
data_type
)
{
bool
success
=
convert_data
(
data_
,
data_type_
,
&
data_
,
data_type
);
if
(
success
)
{
data_type_
=
data_type
;
}
else
{
MS_LOG
(
EXCEPTION
)
<<
"Convert data from "
<<
data_type_
<<
" to "
<<
data_type
<<
" failed!"
;
}
}
else
if
(
data_
.
size
()
==
0
)
{
data_type_
=
data_type
;
}
return
data_type_
;
}
bool
Tensor
::
is_init
()
{
return
init_flag_
;
}
void
Tensor
::
set_init_flag
(
bool
flag
)
{
init_flag_
=
flag
;
}
bool
Tensor
::
convert_data
(
const
py
::
array
&
in
,
const
TypeId
in_data_type
,
py
::
array
*
const
out
,
const
TypeId
out_data_type
)
{
if
(
out
==
nullptr
)
{
return
false
;
}
bool
result
=
true
;
if
(
TypeId
::
kTypeUnknown
==
in_data_type
||
TypeId
::
kTypeUnknown
==
out_data_type
)
{
result
=
false
;
}
else
if
(
in_data_type
==
out_data_type
)
{
*
out
=
in
;
}
else
if
(
TypeId
::
kNumberTypeFloat64
==
out_data_type
)
{
*
out
=
in
.
attr
(
"astype"
).
cast
<
py
::
function
>
()(
"float64"
).
cast
<
py
::
array
>
();
}
else
if
(
TypeId
::
kNumberTypeFloat32
==
out_data_type
)
{
*
out
=
in
.
attr
(
"astype"
).
cast
<
py
::
function
>
()(
"float32"
).
cast
<
py
::
array
>
();
}
else
if
(
TypeId
::
kNumberTypeFloat16
==
out_data_type
)
{
*
out
=
in
.
attr
(
"astype"
).
cast
<
py
::
function
>
()(
"float16"
).
cast
<
py
::
array
>
();
}
else
if
(
TypeId
::
kNumberTypeInt64
==
out_data_type
)
{
*
out
=
in
.
attr
(
"astype"
).
cast
<
py
::
function
>
()(
"int64"
).
cast
<
py
::
array
>
();
}
else
if
(
TypeId
::
kNumberTypeInt32
==
out_data_type
)
{
*
out
=
in
.
attr
(
"astype"
).
cast
<
py
::
function
>
()(
"int32"
).
cast
<
py
::
array
>
();
}
else
if
(
TypeId
::
kNumberTypeInt16
==
out_data_type
)
{
*
out
=
in
.
attr
(
"astype"
).
cast
<
py
::
function
>
()(
"int16"
).
cast
<
py
::
array
>
();
}
else
if
(
TypeId
::
kNumberTypeInt8
==
out_data_type
)
{
*
out
=
in
.
attr
(
"astype"
).
cast
<
py
::
function
>
()(
"int8"
).
cast
<
py
::
array
>
();
}
else
if
(
TypeId
::
kNumberTypeUInt8
==
out_data_type
)
{
*
out
=
in
.
attr
(
"astype"
).
cast
<
py
::
function
>
()(
"uint8"
).
cast
<
py
::
array
>
();
}
else
if
(
TypeId
::
kNumberTypeUInt16
==
out_data_type
)
{
*
out
=
in
.
attr
(
"astype"
).
cast
<
py
::
function
>
()(
"uint16"
).
cast
<
py
::
array
>
();
}
else
if
(
TypeId
::
kNumberTypeUInt32
==
out_data_type
)
{
*
out
=
in
.
attr
(
"astype"
).
cast
<
py
::
function
>
()(
"uint32"
).
cast
<
py
::
array
>
();
}
else
if
(
TypeId
::
kNumberTypeUInt64
==
out_data_type
)
{
*
out
=
in
.
attr
(
"astype"
).
cast
<
py
::
function
>
()(
"uint64"
).
cast
<
py
::
array
>
();
}
else
{
data_type_
=
TypeId
::
kTypeUnknown
;
MS_LOG
(
EXCEPTION
)
<<
"Cannot convert from "
<<
TypeIdLabel
(
in_data_type
)
<<
" to "
<<
TypeIdLabel
(
out_data_type
)
<<
"."
;
}
return
result
;
}
abstract
::
AbstractBasePtr
Tensor
::
ToAbstract
()
{
auto
tens
=
shared_from_base
<
Tensor
>
();
auto
dtype
=
tens
->
Dtype
();
if
(
!
IsSubType
(
dtype
,
kNumber
))
{
MS_LOG
(
EXCEPTION
)
<<
"Expect tensor type kNumber but got: "
<<
dtype
->
ToString
()
<<
"."
;
}
auto
tensor_shape
=
tens
->
shape
();
auto
abs_tensor
=
std
::
make_shared
<
abstract
::
AbstractTensor
>
(
dtype
,
tensor_shape
);
abs_tensor
->
set_value
(
shared_from_base
<
Tensor
>
());
return
abs_tensor
;
}
std
::
string
Tensor
::
GetShapeAndDataTypeInfo
()
const
{
std
::
ostringstream
buf
;
buf
<<
"Tensor
\n
shape:["
<<
shape
()
<<
"]"
<<
this
->
Dtype
()
->
ToString
();
return
buf
.
str
();
}
std
::
string
Tensor
::
ToString
()
const
{
const
int
small_tensor_size
=
30
;
std
::
ostringstream
buf
;
buf
<<
"Tensor
\n
shape:["
<<
shape
()
<<
"]"
<<
this
->
Dtype
()
->
ToString
();
// only print small tensor
if
(
DataSize
()
<
small_tensor_size
)
{
buf
<<
"val:"
<<
std
::
string
(
py
::
str
(
data
()));
}
return
buf
.
str
();
}
std
::
string
Tensor
::
ToStringRepr
()
const
{
std
::
ostringstream
buf
;
auto
type_ptr
=
this
->
Dtype
();
MS_EXCEPTION_IF_NULL
(
type_ptr
);
buf
<<
"Tensor shape:["
<<
shape
()
<<
"]"
<<
type_ptr
->
ToString
();
buf
<<
"
\n
val:"
<<
std
::
string
(
py
::
str
(
data
()));
return
buf
.
str
();
}
py
::
array
Tensor
::
data_sync
()
{
if
(
device_address_
!=
nullptr
)
{
if
(
!
device_address_
->
SyncDeviceToHost
(
this
->
shape
(),
static_cast
<
size_t
>
(
this
->
data
().
nbytes
()),
this
->
data_type
(),
this
->
data_c
(
true
)))
{
MS_LOG
(
EXCEPTION
)
<<
"SyncDeviceToHost when asnumpy."
;
}
}
return
data_
;
}
REGISTER_PYBIND_DEFINE
(
Tensor
,
([](
const
py
::
module
*
m
)
{
// dtype should define before Tensor, because Tensor init depend dtype
(
void
)
py
::
class_
<
Tensor
,
std
::
shared_ptr
<
Tensor
>>
(
*
m
,
"Tensor"
)
.
def
(
py
::
init
<
TypePtr
,
py
::
tuple
>
(),
py
::
arg
(
"dtype"
),
py
::
arg
(
"shape"
))
.
def
(
py
::
init
<
py
::
array
,
TypePtr
>
(),
py
::
arg
(
"input"
),
py
::
arg
(
"dtype"
)
=
nullptr
)
.
def
(
py
::
init
<
py
::
float_
,
TypePtr
>
(),
py
::
arg
(
"input"
),
py
::
arg
(
"dtype"
)
=
nullptr
)
.
def
(
py
::
init
<
py
::
int_
,
TypePtr
>
(),
py
::
arg
(
"input"
),
py
::
arg
(
"dtype"
)
=
nullptr
)
.
def
(
py
::
init
<
py
::
list
,
TypePtr
>
(),
py
::
arg
(
"input"
),
py
::
arg
(
"dtype"
)
=
nullptr
)
.
def
(
py
::
init
<
py
::
tuple
,
TypePtr
>
(),
py
::
arg
(
"input"
),
py
::
arg
(
"dtype"
)
=
nullptr
)
.
def
(
py
::
init
<
Tensor
,
TypePtr
>
(),
py
::
arg
(
"input"
),
py
::
arg
(
"dtype"
)
=
nullptr
)
.
def_readonly
(
PYTHON_TENSOR_FLAG
,
&
Tensor
::
parse_info_
)
.
def
(
"asnumpy"
,
&
Tensor
::
data_sync
,
R"mydelimiter(
Convert tensor to numpy.ndarray.
Returns:
numpy.ndarray.
Examples:
>>> data = mindspore.Tensor(np.ones((2, 3)))
>>> array = data.asnumpy()
>>> array
array([[1., 1., 1.],
[1., 1., 1.]])
)mydelimiter"
)
.
def
(
"size"
,
&
Tensor
::
DataSize
,
R"mydelimiter(
Get tensor's data size.
Returns:
int, the size of tensor.
Examples:
>>> data = mindspore.Tensor(np.ones((2, 3)))
>>> data.size()
6
)mydelimiter"
)
.
def
(
"is_init"
,
&
Tensor
::
is_init
,
R"mydelimiter(
Get tensor init_flag.
Returns:
bool, whether the tensor init.
Examples:
>>> data = mindspore.Tensor(np.ones((2, 3)))
>>> data.is_init()
False
)mydelimiter"
)
.
def
(
"set_init_flag"
,
&
Tensor
::
set_init_flag
,
R"mydelimiter(
Set tensor init_flag.
Examples:
>>> data = mindspore.Tensor(np.ones((2, 3)))
>>> data.set_init_flag(True)
)mydelimiter"
)
.
def
(
"dim"
,
&
Tensor
::
DataDim
,
R"mydelimiter(
Get tensor's data dimension.
Returns:
int, the dimension of tensor.
Examples:
>>> data = mindspore.Tensor(np.ones((2, 3)))
>>> data.dim()
2
)mydelimiter"
)
.
def
(
"dtype"
,
&
Tensor
::
Dtype
,
R"mydelimiter(
Get the tensor's data type.
Returns:
type, the data type of tensor.
Examples:
>>> data = mindspore.Tensor(np.ones((2, 1), np.int32))
>>> data.dtype()
Int32
)mydelimiter"
)
.
def
(
"set_dtype"
,
&
Tensor
::
SetDtype
,
R"mydelimiter(
Set the tensor's data type.
Arg:
dtype (:class:`mindspore.dtype`): The type of output tensor.
Examples:
>>> data = mindspore.Tensor(np.ones((1, 2), np.float32))
>>> data.set_dtype(mindspore.int32)
mindspore.int32
)mydelimiter"
)
.
def
(
"shape"
,
&
Tensor
::
GetPyTupleShape
,
R"mydelimiter(
Get the tensor's shape.
Returns:
tuple[int], the shape of tensor.
Examples:
>>> data = mindspore.Tensor(np.ones((3, 3)))
>>> data.shape()
(3, 3)
)mydelimiter"
)
.
def
(
"__str__"
,
&
Tensor
::
ToString
)
.
def
(
"__repr__"
,
&
Tensor
::
ToStringRepr
)
.
def
(
py
::
pickle
(
[](
const
Tensor
&
t
)
{
// __getstate__
/* Return a tuple that fully encodes the state of the object */
return
py
::
make_tuple
(
t
.
data
());
},
[](
const
py
::
tuple
&
t
)
{
// __setstate__
if
(
t
.
size
()
!=
1
)
{
throw
std
::
runtime_error
(
"Invalid state!"
);
}
/* Create a new C++ instance */
Tensor
tensor
(
t
[
0
].
cast
<
py
::
array
>
());
return
tensor
;
}));
(
void
)
py
::
class_
<
MetaTensor
,
std
::
shared_ptr
<
MetaTensor
>>
(
*
m
,
"MetaTensor"
)
.
def
(
py
::
init
<
TypePtr
,
const
std
::
vector
<
int
>>
(),
py
::
arg
(
"dtype"
),
py
::
arg
(
"shape"
))
.
def_readonly
(
PYTHON_META_TENSOR_FLAG
,
&
MetaTensor
::
parse_info_
)
.
def
(
"dtype"
,
&
MetaTensor
::
Dtype
,
"Get the MetaTensor's dtype."
)
.
def
(
"shape"
,
&
MetaTensor
::
shape
,
"Get the MetaTensor's shape."
);
}));
}
// namespace tensor
}
// namespace mindspore
mindspore/ccsrc/ir/tensor.h
0 → 100644
浏览文件 @
4508134c
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_IR_TENSOR_H_
#define MINDSPORE_CCSRC_IR_TENSOR_H_
#include <memory>
#include <string>
#include <vector>
#include "pybind11/numpy.h"
#include "pybind11/pybind11.h"
#include "Eigen/Core"
#include "device/device_address.h"
#include "ir/meta_tensor.h"
#include "utils/log_adapter.h"
namespace
py
=
pybind11
;
using
float16
=
Eigen
::
half
;
namespace
pybind11
{
namespace
detail
{
// Similar to enums in `pybind11/numpy.h`. Determined by doing:
// python3 -c 'import numpy as np; print(np.dtype(np.float16).num)'
constexpr
int
NPY_FLOAT16
=
23
;
template
<
typename
T
>
struct
npy_scalar_caster
{
PYBIND11_TYPE_CASTER
(
T
,
_
(
"PleaseOverride"
));
using
Array
=
array_t
<
T
>
;
bool
load
(
handle
src
,
bool
convert
)
{
// Taken from Eigen casters. Permits either scalar dtype or scalar array.
handle
type
=
dtype
::
of
<
T
>
().
attr
(
"type"
);
if
(
!
convert
&&
!
isinstance
<
Array
>
(
src
)
&&
!
isinstance
(
src
,
type
))
return
false
;
Array
tmp
=
Array
::
ensure
(
src
);
if
(
tmp
&&
tmp
.
size
()
==
1
&&
tmp
.
ndim
()
==
0
)
{
this
->
value
=
*
tmp
.
data
();
return
true
;
}
return
false
;
}
static
handle
cast
(
T
src
,
return_value_policy
,
handle
)
{
Array
tmp
({
1
});
tmp
.
mutable_at
(
0
)
=
src
;
tmp
.
resize
({});
// You could also just return the array if you want a scalar array.
object
scalar
=
tmp
[
tuple
()];
return
scalar
.
release
();
}
};
template
<
>
struct
npy_format_descriptor
<
float16
>
{
static
constexpr
auto
name
=
"float16"
;
static
pybind11
::
dtype
dtype
()
{
handle
ptr
=
npy_api
::
get
().
PyArray_DescrFromType_
(
NPY_FLOAT16
);
return
reinterpret_borrow
<
pybind11
::
dtype
>
(
ptr
);
}
virtual
~
npy_format_descriptor
<
float16
>
()
{}
};
template
<
>
struct
type_caster
<
float16
>
:
public
npy_scalar_caster
<
float16
>
{
static
constexpr
auto
name
=
"float16"
;
};
}
// namespace detail
}
// namespace pybind11
using
mindspore
::
device
::
DeviceAddress
;
using
DeviceAddressPtr
=
std
::
shared_ptr
<
mindspore
::
device
::
DeviceAddress
>
;
// brief mindspore namespace.
//
// mindspore namespace is the top level namespace of Mindsporeession project.
// Other namespace should be a sub namespace of mindspore namespace in the ME project.
namespace
mindspore
{
// brief mindspore::tensor namespace
//
// A sub namespace in ME to support tensor related definition.
namespace
tensor
{
// Tensor entity class
class
Tensor
:
public
MetaTensor
{
public:
Tensor
()
=
default
;
abstract
::
AbstractBasePtr
ToAbstract
()
override
;
// brief Constructor for Python.
//
// param type_ptr [TypePty] Data type of the tensor.
// param py_shape [py::tuple] The shape represented by py::tuple of the tensor.
Tensor
(
const
TypePtr
&
type_ptr
,
const
py
::
tuple
&
shape
);
// brief Constructor for C++.
//
// param data_type [TypeId] Data type of the tensor.
// param shape The shape represented by std::vector<int> of the tensor.
Tensor
(
TypeId
data_type
,
const
std
::
vector
<
int
>
&
shape
);
// brief Constructor for Python.
//
// param input [py::array] Data value of the tensor.
// param data_type [TypeId] Data type of the tensor.
explicit
Tensor
(
const
py
::
array
&
input
,
const
TypePtr
&
data_type
=
nullptr
);
// brief Constructor
//
// param input [py::list] the data for tensor
// param data_type [TypeId] data type
explicit
Tensor
(
const
py
::
list
&
input
,
const
TypePtr
&
data_type
=
nullptr
);
// brief Constructor
//
// param input [py::tuple] the data for tensor
// param data_type [TypeId] data type
explicit
Tensor
(
const
py
::
tuple
&
input
,
const
TypePtr
&
data_type
=
nullptr
);
// brief Constructor
//
// param input [py::float_] the data for tensor
// param data_type [TypeId] data type
explicit
Tensor
(
const
py
::
float_
&
input
,
const
TypePtr
&
data_type
=
nullptr
);
// brief Constructor
//
// param input [py::int_] the data for tensor
// param data_type [TypeId] data type
explicit
Tensor
(
const
py
::
int_
&
input
,
const
TypePtr
&
data_type
=
nullptr
);
// brief Constructor
//
// param input [Tensor] the data for tensor
// param data_type [TypeId] data type
Tensor
(
const
Tensor
&
tensor
,
const
TypePtr
&
data_type
=
nullptr
);
~
Tensor
()
override
=
default
;
MS_DECLARE_PARENT
(
Tensor
,
MetaTensor
);
// brief Overloads operator = for Tensor.
//
// The constructed Tensor object has the same type and shape with tensor.
//
// param tensor An existing Tensor object.
Tensor
&
operator
=
(
const
Tensor
&
tensor
);
// brief Compares two Tensor objects.
//
// Compare two tensor objects to see if they have same data type, shape and
// data value.
//
// param tensor The Tensor object to be compared.
// return true: If having same type, shape and data, return true, or return false.
bool
operator
==
(
const
Tensor
&
tensor
)
const
;
// It is different from 'operator==' which just compare shape/type/address, it do real value comparison.
bool
ValueEqual
(
const
Tensor
&
other
)
const
;
bool
operator
==
(
const
Value
&
other
)
const
override
{
if
(
other
.
isa
<
Tensor
>
())
{
auto
other_
=
static_cast
<
const
Tensor
&>
(
other
);
return
*
this
==
other_
;
}
else
{
return
false
;
}
}
py
::
tuple
GetPyTupleShape
()
const
;
// brief Gets tensor's dimension
//
// return The number of dimensions of the tensor data.
int
DataDim
()
const
;
// brief Getting tensor data size
//
// return The total number of elements of the tensor data.
int
DataSize
()
const
;
// brief Tensor's data value.
//
// return [py::array] The tensor's data in py::array.
py
::
array
data
()
const
;
// brief Get the data type fo the tensor for C++
//
// return [int] The tensor's data type will be cast to int to return.
int
data_type_c
()
const
;
// brief Get the tensor's shape for C++
//
// return [std::vector<int>]
std
::
vector
<
int
>
shape_c
(
void
)
const
;
// brief Get Tensor data pointer for c++ type
//
// param writable true if writable, false if read only
// return The pointer to the object
void
*
data_c
(
bool
writable
=
false
);
// brief Get data type from tensor data.
//
// param buf The buffer info of the py::array data.
// return The [TypeId] of the tensor data.
TypeId
GetDataType
(
const
py
::
buffer_info
&
buf
)
const
;
// brief Sets the data type of a tensor.
//
// param data_type The data type of the tensor to be set.
//
TypeId
set_data_type
(
const
TypeId
data_type
)
override
;
TypePtr
SetDtype
(
const
TypePtr
type_ptr
)
override
;
std
::
string
GetShapeAndDataTypeInfo
()
const
;
std
::
string
ToString
()
const
override
;
std
::
string
ToStringRepr
()
const
;
py
::
array
data_
;
// < Tensor's data value
const
bool
parse_info_
=
true
;
bool
is_init
();
void
set_init_flag
(
bool
flag
);
private:
// brief init tensor
//
// param input [py::array] the data for tensor
// param data_type [TypeId] data type
// return true if succeed, false if failed.
void
init
(
const
py
::
array
&
input
,
const
TypeId
&
data_type
);
void
init
(
const
py
::
array
&
input
,
const
TypePtr
&
type_ptr
);
bool
init_flag_
{
false
};
// brief init tensor attribute
//
// param data_type [TypeId] Data type of the tensor.
// param shape [py::array] The shape of the tensor.
// return true if succeed, false if failed.
void
init
(
TypeId
data_type
,
const
std
::
vector
<
int
>
&
shape
,
py
::
array
*
data
);
bool
convert_data
(
const
py
::
array
&
in
,
const
TypeId
in_data_type
,
py
::
array
*
out
,
const
TypeId
out_data_type
);
public:
bool
is_dirty
()
const
{
return
dirty_
;
}
void
set_dirty
(
const
bool
dirty
)
{
dirty_
=
dirty
;
}
DeviceAddressPtr
device_address
()
const
{
return
device_address_
;
}
void
set_device_address
(
const
DeviceAddressPtr
&
device_address
)
{
device_address_
=
device_address
;
}
py
::
array
data_sync
();
private:
bool
dirty_
{
true
};
DeviceAddressPtr
device_address_
{
nullptr
};
};
using
TensorPtr
=
std
::
shared_ptr
<
Tensor
>
;
using
TensorPtrList
=
std
::
vector
<
std
::
shared_ptr
<
Tensor
>>
;
}
// namespace tensor
}
// namespace mindspore
#endif // MINDSPORE_CCSRC_IR_TENSOR_H_
mindspore/ccsrc/kernel/kernel.h
浏览文件 @
4508134c
/**
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019
-2020
Huawei Technologies Co., Ltd
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may not use this file except in compliance with the License.
...
@@ -22,7 +22,7 @@
...
@@ -22,7 +22,7 @@
#include "ir/anf.h"
#include "ir/anf.h"
#include "ir/dtype.h"
#include "ir/dtype.h"
#include "utils/utils.h"
#include "utils/utils.h"
#include "ir/
meta_
tensor.h"
#include "ir/tensor.h"
#include "pipeline/static_analysis/dshape.h"
#include "pipeline/static_analysis/dshape.h"
#include "utils/log_adapter.h"
#include "utils/log_adapter.h"
...
...
mindspore/ccsrc/
ir
/param_value_minnie.h
→
mindspore/ccsrc/
minnie
/param_value_minnie.h
浏览文件 @
4508134c
...
@@ -14,8 +14,8 @@
...
@@ -14,8 +14,8 @@
* limitations under the License.
* limitations under the License.
*/
*/
#ifndef MINDSPORE_CCSRC_
IR
_PARAM_VALUE_MINNIE_H_
#ifndef MINDSPORE_CCSRC_
MINNIE
_PARAM_VALUE_MINNIE_H_
#define MINDSPORE_CCSRC_
IR
_PARAM_VALUE_MINNIE_H_
#define MINDSPORE_CCSRC_
MINNIE
_PARAM_VALUE_MINNIE_H_
#include <memory>
#include <memory>
...
@@ -39,5 +39,6 @@ class ParamValueMinnie : public ParamValue {
...
@@ -39,5 +39,6 @@ class ParamValueMinnie : public ParamValue {
};
};
using
ParamValueMinniePtr
=
std
::
shared_ptr
<
ParamValueMinnie
>
;
using
ParamValueMinniePtr
=
std
::
shared_ptr
<
ParamValueMinnie
>
;
}
// namespace mindspore
}
// namespace mindspore
#endif // MINDSPORE_CCSRC_
IR
_PARAM_VALUE_MINNIE_H_
#endif // MINDSPORE_CCSRC_
MINNIE
_PARAM_VALUE_MINNIE_H_
mindspore/ccsrc/minnie/tensor_minnie.cc
0 → 100644
浏览文件 @
4508134c
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "minnie/tensor_minnie.h"
namespace
mindspore
{
namespace
tensor
{
TensorMinnie
&
TensorMinnie
::
operator
=
(
const
TensorMinnie
&
tensor
)
{
if
(
&
tensor
==
this
)
{
return
*
this
;
}
this
->
tensor_addr_
=
tensor
.
tensor_addr
();
this
->
tensor_size_
=
tensor
.
tensor_size
();
return
*
this
;
}
bool
TensorMinnie
::
operator
==
(
const
TensorMinnie
&
tensor
)
{
return
tensor_addr_
==
tensor
.
tensor_addr
()
&&
tensor_size_
==
tensor
.
tensor_size
();
}
}
// namespace tensor
}
// namespace mindspore
mindspore/ccsrc/minnie/tensor_minnie.h
0 → 100644
浏览文件 @
4508134c
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_MINNIE_TENSOR_MINNIE_H_
#define MINDSPORE_CCSRC_MINNIE_TENSOR_MINNIE_H_
#include <memory>
#include "ir/meta_tensor.h"
namespace
mindspore
{
namespace
tensor
{
// definition of Tensor Minnie
class
TensorMinnie
:
public
MetaTensor
{
public:
TensorMinnie
()
:
MetaTensor
()
{}
~
TensorMinnie
()
override
=
default
;
MS_DECLARE_PARENT
(
TensorMinnie
,
MetaTensor
)
// brief Overloads operator = for TensorMinnie.
//
// The constructed TensorMinnie object has the same type and shape with tensor_base.
//
// param meta_tensor An existing TensorMinnie object.
virtual
TensorMinnie
&
operator
=
(
const
TensorMinnie
&
tensor
);
// brief Compares two TensorMinnie objects.
//
// The constructed TensorMinnie object has the same type and shape with tensor_base.
//
// param meta_tensor The TensorMinnie object to be compared.
// return true: If having same type and shape, return true, or return false.
virtual
bool
operator
==
(
const
TensorMinnie
&
tensor
);
// brief Get the tensor's size for C++
//
// return size_t
size_t
tensor_size
()
const
{
return
tensor_size_
;
}
// brief Set Tensor data size for c++ type
void
set_tensor_size
(
size_t
size
)
{
tensor_size_
=
size
;
}
// brief Get Tensor data pointer for c++ type
//
// return The pointer to the object
void
*
tensor_addr
()
const
{
return
tensor_addr_
;
}
// brief Set Tensor data pointer for c++ type
void
set_tensor_addr
(
void
*
addr
)
{
tensor_addr_
=
addr
;
}
protected:
// brief Data addr of the tensor.
void
*
tensor_addr_
;
// brief Data size of the tensor.
size_t
tensor_size_
;
};
using
TensorMinniePtr
=
std
::
shared_ptr
<
TensorMinnie
>
;
}
// namespace tensor
}
// namespace mindspore
#endif // MINDSPORE_CCSRC_MINNIE_TENSOR_MINNIE_H_
mindspore/ccsrc/operator/ops.cc
浏览文件 @
4508134c
/**
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019
-2020
Huawei Technologies Co., Ltd
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may not use this file except in compliance with the License.
...
@@ -17,8 +17,6 @@
...
@@ -17,8 +17,6 @@
#include "operator/ops.h"
#include "operator/ops.h"
#include <memory>
#include <memory>
#include <string>
#include <string>
#include "pipeline/parse/python_adapter.h"
#include "pipeline/parse/data_converter.h"
namespace
mindspore
{
namespace
mindspore
{
// namespace to support primitive operators
// namespace to support primitive operators
...
@@ -255,15 +253,5 @@ const PrimitivePtr kPrimScalarSummary = std::make_shared<Primitive>("ScalarSumma
...
@@ -255,15 +253,5 @@ const PrimitivePtr kPrimScalarSummary = std::make_shared<Primitive>("ScalarSumma
const
PrimitivePtr
kPrimImageSummary
=
std
::
make_shared
<
Primitive
>
(
"ImageSummary"
);
const
PrimitivePtr
kPrimImageSummary
=
std
::
make_shared
<
Primitive
>
(
"ImageSummary"
);
const
PrimitivePtr
kPrimTensorSummary
=
std
::
make_shared
<
Primitive
>
(
"TensorSummary"
);
const
PrimitivePtr
kPrimTensorSummary
=
std
::
make_shared
<
Primitive
>
(
"TensorSummary"
);
const
PrimitivePtr
kPrimHistogramSummary
=
std
::
make_shared
<
Primitive
>
(
"HistogramSummary"
);
const
PrimitivePtr
kPrimHistogramSummary
=
std
::
make_shared
<
Primitive
>
(
"HistogramSummary"
);
ValuePtr
GetPythonOps
(
const
std
::
string
&
op_name
,
const
std
::
string
&
module_name
)
{
py
::
object
obj
=
parse
::
python_adapter
::
GetPyFn
(
module_name
,
op_name
);
ValuePtr
node
=
nullptr
;
bool
succ
=
parse
::
ConvertData
(
obj
,
&
node
);
if
(
!
succ
)
{
MS_LOG
(
EXCEPTION
)
<<
"get Python op "
<<
op_name
<<
" from "
<<
module_name
<<
" fail"
;
}
return
node
;
}
}
// namespace prim
}
// namespace prim
}
// namespace mindspore
}
// namespace mindspore
mindspore/ccsrc/operator/ops.h
浏览文件 @
4508134c
/**
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019
-2020
Huawei Technologies Co., Ltd
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may not use this file except in compliance with the License.
...
@@ -21,7 +21,7 @@
...
@@ -21,7 +21,7 @@
#include <string>
#include <string>
#include <memory>
#include <memory>
#include "ir/anf.h"
#include "ir/anf.h"
#include "ir/primitive.h"
#include "ir/primitive
_base
.h"
namespace
mindspore
{
namespace
mindspore
{
// namespace to support primitive operators
// namespace to support primitive operators
...
...
mindspore/ccsrc/operator/ops_extends.cc
0 → 100755
浏览文件 @
4508134c
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "operator/ops.h"
#include <memory>
#include <string>
#include "pipeline/parse/python_adapter.h"
#include "pipeline/parse/data_converter.h"
namespace
mindspore
{
// namespace to support primitive operators
namespace
prim
{
ValuePtr
GetPythonOps
(
const
std
::
string
&
op_name
,
const
std
::
string
&
module_name
)
{
py
::
object
obj
=
parse
::
python_adapter
::
GetPyFn
(
module_name
,
op_name
);
ValuePtr
node
=
nullptr
;
bool
succ
=
parse
::
ConvertData
(
obj
,
&
node
);
if
(
!
succ
)
{
MS_LOG
(
EXCEPTION
)
<<
"get Python op "
<<
op_name
<<
" from "
<<
module_name
<<
" fail"
;
}
return
node
;
}
}
// namespace prim
}
// namespace mindspore
mindspore/ccsrc/parallel/context.h
浏览文件 @
4508134c
/**
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019
-2020
Huawei Technologies Co., Ltd
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may not use this file except in compliance with the License.
...
@@ -29,6 +29,7 @@
...
@@ -29,6 +29,7 @@
#include "ir/anf.h"
#include "ir/anf.h"
#include "ir/func_graph.h"
#include "ir/func_graph.h"
#include "debug/info.h"
#include "debug/info.h"
#include "pipeline/static_analysis/abstract_value.h"
namespace
mindspore
{
namespace
mindspore
{
namespace
parallel
{
namespace
parallel
{
...
...
mindspore/ccsrc/parallel/graph_util/get_parallel_info.cc
浏览文件 @
4508134c
/**
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019
-2020
Huawei Technologies Co., Ltd
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may not use this file except in compliance with the License.
...
@@ -23,6 +23,7 @@
...
@@ -23,6 +23,7 @@
#include "common/utils.h"
#include "common/utils.h"
#include "ir/func_graph.h"
#include "ir/func_graph.h"
#include "parallel/ops_info/operator_info.h"
#include "parallel/graph_util/graph_info.h"
#include "parallel/graph_util/graph_info.h"
#include "parallel/strategy.h"
#include "parallel/strategy.h"
#include "parallel/tensor_layout/tensor_layout.h"
#include "parallel/tensor_layout/tensor_layout.h"
...
...
mindspore/ccsrc/parallel/ops_info/gather_v2_info.cc
浏览文件 @
4508134c
...
@@ -20,7 +20,7 @@
...
@@ -20,7 +20,7 @@
#include <utility>
#include <utility>
#include <vector>
#include <vector>
#include "ir/
meta_
tensor.h"
#include "ir/tensor.h"
#include "ir/value.h"
#include "ir/value.h"
#include "parallel/auto_parallel/costmodel.h"
#include "parallel/auto_parallel/costmodel.h"
#include "parallel/device_matrix.h"
#include "parallel/device_matrix.h"
...
...
mindspore/ccsrc/parallel/ops_info/operator_info.cc
浏览文件 @
4508134c
/**
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019
-2020
Huawei Technologies Co., Ltd
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may not use this file except in compliance with the License.
...
@@ -25,7 +25,7 @@
...
@@ -25,7 +25,7 @@
#include <vector>
#include <vector>
#include "ir/dtype.h"
#include "ir/dtype.h"
#include "ir/
meta_
tensor.h"
#include "ir/tensor.h"
#include "ir/value.h"
#include "ir/value.h"
#include "parallel/auto_parallel/edge_costmodel.h"
#include "parallel/auto_parallel/edge_costmodel.h"
#include "parallel/auto_parallel/graph_costmodel.h"
#include "parallel/auto_parallel/graph_costmodel.h"
...
...
mindspore/ccsrc/parallel/ops_info/reduce_method_info.h
浏览文件 @
4508134c
/**
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019
-2020
Huawei Technologies Co., Ltd
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may not use this file except in compliance with the License.
...
@@ -22,7 +22,7 @@
...
@@ -22,7 +22,7 @@
#include <unordered_map>
#include <unordered_map>
#include <vector>
#include <vector>
#include "ir/
meta_
tensor.h"
#include "ir/tensor.h"
#include "ir/value.h"
#include "ir/value.h"
#include "parallel/auto_parallel/operator_costmodel.h"
#include "parallel/auto_parallel/operator_costmodel.h"
#include "parallel/ops_info/activation_info.h"
#include "parallel/ops_info/activation_info.h"
...
...
mindspore/ccsrc/parallel/step_auto_parallel.cc
浏览文件 @
4508134c
/**
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019
-2020
Huawei Technologies Co., Ltd
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may not use this file except in compliance with the License.
...
@@ -29,7 +29,7 @@
...
@@ -29,7 +29,7 @@
#include "ir/anf.h"
#include "ir/anf.h"
#include "ir/param_value_py.h"
#include "ir/param_value_py.h"
#include "ir/
meta_
tensor.h"
#include "ir/tensor.h"
#include "optimizer/opt.h"
#include "optimizer/opt.h"
#include "optimizer/optimizer.h"
#include "optimizer/optimizer.h"
#include "parallel/auto_parallel/dp_algo_costmodel.h"
#include "parallel/auto_parallel/dp_algo_costmodel.h"
...
...
mindspore/ccsrc/parallel/step_parallel.cc
浏览文件 @
4508134c
/**
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019
-2020
Huawei Technologies Co., Ltd
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may not use this file except in compliance with the License.
...
@@ -27,7 +27,7 @@
...
@@ -27,7 +27,7 @@
#include <unordered_map>
#include <unordered_map>
#include <utility>
#include <utility>
#include "ir/
meta_
tensor.h"
#include "ir/tensor.h"
#include "ir/param_value_py.h"
#include "ir/param_value_py.h"
#include "operator/ops.h"
#include "operator/ops.h"
#include "optimizer/optimizer.h"
#include "optimizer/optimizer.h"
...
...
mindspore/ccsrc/pipeline/parse/resolve.h
浏览文件 @
4508134c
/**
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019
-2020
Huawei Technologies Co., Ltd
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may not use this file except in compliance with the License.
...
@@ -23,6 +23,7 @@
...
@@ -23,6 +23,7 @@
#include "ir/manager.h"
#include "ir/manager.h"
#include "pipeline/parse/python_adapter.h"
#include "pipeline/parse/python_adapter.h"
#include "pipeline/parse/parse_base.h"
#include "pipeline/parse/parse_base.h"
#include "pipeline/static_analysis/abstract_value.h"
#include "utils/log_adapter.h"
#include "utils/log_adapter.h"
// forward declaration of ResourceBase
// forward declaration of ResourceBase
...
...
mindspore/ccsrc/pipeline/pipeline.h
浏览文件 @
4508134c
/**
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019
-2020
Huawei Technologies Co., Ltd
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may not use this file except in compliance with the License.
...
@@ -26,7 +26,7 @@
...
@@ -26,7 +26,7 @@
#include <mutex>
#include <mutex>
#include "debug/draw.h"
#include "debug/draw.h"
#include "ir/anf.h"
#include "ir/anf.h"
#include "ir/
meta_
tensor.h"
#include "ir/tensor.h"
#include "pipeline/action.h"
#include "pipeline/action.h"
#include "vm/segment_runner.h"
#include "vm/segment_runner.h"
#include "vm/transform.h"
#include "vm/transform.h"
...
...
mindspore/ccsrc/pipeline/pipeline_ge.cc
浏览文件 @
4508134c
...
@@ -23,7 +23,7 @@
...
@@ -23,7 +23,7 @@
#include <algorithm>
#include <algorithm>
#include "debug/anf_ir_dump.h"
#include "debug/anf_ir_dump.h"
#include "ir/
meta_
tensor.h"
#include "ir/tensor.h"
#include "transform/convert.h"
#include "transform/convert.h"
#include "transform/df_graph_manager.h"
#include "transform/df_graph_manager.h"
#include "transform/graph_builder.h"
#include "transform/graph_builder.h"
...
...
mindspore/ccsrc/pipeline/remove_value_node_dup.cc
浏览文件 @
4508134c
/**
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019
-2020
Huawei Technologies Co., Ltd
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may not use this file except in compliance with the License.
...
@@ -16,7 +16,7 @@
...
@@ -16,7 +16,7 @@
#include "pipeline/remove_value_node_dup.h"
#include "pipeline/remove_value_node_dup.h"
#include "ir/anf.h"
#include "ir/anf.h"
#include "ir/
meta_
tensor.h"
#include "ir/tensor.h"
#include "ir/manager.h"
#include "ir/manager.h"
#include "optimizer/cse.h"
#include "optimizer/cse.h"
#include "utils/log_adapter.h"
#include "utils/log_adapter.h"
...
...
mindspore/ccsrc/pipeline/static_analysis/abstract_value.h
浏览文件 @
4508134c
/**
/**
* This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
* This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
*
*
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019
-2020
Huawei Technologies Co., Ltd
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may not use this file except in compliance with the License.
...
@@ -30,7 +30,7 @@
...
@@ -30,7 +30,7 @@
#include "ir/base.h"
#include "ir/base.h"
#include "ir/dtype.h"
#include "ir/dtype.h"
#include "ir/value.h"
#include "ir/value.h"
#include "ir/
meta_
tensor.h"
#include "ir/tensor.h"
#include "pipeline/static_analysis/dshape.h"
#include "pipeline/static_analysis/dshape.h"
namespace
mindspore
{
namespace
mindspore
{
...
...
mindspore/ccsrc/pipeline/static_analysis/prim.cc
浏览文件 @
4508134c
/**
/**
* This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
* This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
*
*
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019
-2020
Huawei Technologies Co., Ltd
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may not use this file except in compliance with the License.
...
@@ -34,7 +34,7 @@
...
@@ -34,7 +34,7 @@
#include "./common.h"
#include "./common.h"
#include "pipeline/resource.h"
#include "pipeline/resource.h"
#include "pipeline/parse/resolve.h"
#include "pipeline/parse/resolve.h"
#include "ir/
meta_
tensor.h"
#include "ir/tensor.h"
#include "utils/convert_utils.h"
#include "utils/convert_utils.h"
#include "pipeline/parse/data_converter.h"
#include "pipeline/parse/data_converter.h"
#include "pipeline/static_analysis/param_validator.h"
#include "pipeline/static_analysis/param_validator.h"
...
...
mindspore/ccsrc/pipeline/static_analysis/static_analysis.cc
浏览文件 @
4508134c
/**
/**
* This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
* This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
*
*
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019
-2020
Huawei Technologies Co., Ltd
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may not use this file except in compliance with the License.
...
@@ -25,7 +25,7 @@
...
@@ -25,7 +25,7 @@
#include "pipeline/static_analysis/prim.h"
#include "pipeline/static_analysis/prim.h"
#include "operator/ops.h"
#include "operator/ops.h"
#include "utils/symbolic.h"
#include "utils/symbolic.h"
#include "ir/
meta_
tensor.h"
#include "ir/tensor.h"
#include "ir/func_graph_cloner.h"
#include "ir/func_graph_cloner.h"
#include "./common.h"
#include "./common.h"
#include "pipeline/parse/data_converter.h"
#include "pipeline/parse/data_converter.h"
...
...
mindspore/ccsrc/pre_activate/common/pass_manager.cc
浏览文件 @
4508134c
/**
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019
-2020
Huawei Technologies Co., Ltd
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may not use this file except in compliance with the License.
...
@@ -15,6 +15,7 @@
...
@@ -15,6 +15,7 @@
*/
*/
#include "pre_activate/common/pass_manager.h"
#include "pre_activate/common/pass_manager.h"
#include <sys/time.h>
#include <unordered_set>
#include <unordered_set>
#include <deque>
#include <deque>
#include <string>
#include <string>
...
...
mindspore/ccsrc/predict/converter/attr_utils/convert_util.h
浏览文件 @
4508134c
/**
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019
-2020
Huawei Technologies Co., Ltd
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may not use this file except in compliance with the License.
...
@@ -24,7 +24,7 @@
...
@@ -24,7 +24,7 @@
#include <unordered_map>
#include <unordered_map>
#include <string>
#include <string>
#include <fstream>
#include <fstream>
#include "ir/
meta_
tensor.h"
#include "ir/tensor.h"
#include "session/anf_runtime_algorithm.h"
#include "session/anf_runtime_algorithm.h"
#include "predict/schema/inner/ms_generated.h"
#include "predict/schema/inner/ms_generated.h"
...
...
mindspore/ccsrc/predict/converter/executor_tensor.h
浏览文件 @
4508134c
/**
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019
-2020
Huawei Technologies Co., Ltd
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may not use this file except in compliance with the License.
...
@@ -21,7 +21,7 @@
...
@@ -21,7 +21,7 @@
#include <memory>
#include <memory>
#include <unordered_map>
#include <unordered_map>
#include <utility>
#include <utility>
#include "ir/
meta_
tensor.h"
#include "ir/tensor.h"
namespace
mindspore
{
namespace
mindspore
{
namespace
executor
{
namespace
executor
{
...
...
mindspore/ccsrc/session/ascend_session.cc
浏览文件 @
4508134c
/**
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019
-2020
Huawei Technologies Co., Ltd
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may not use this file except in compliance with the License.
...
@@ -20,7 +20,7 @@
...
@@ -20,7 +20,7 @@
#include <set>
#include <set>
#include <list>
#include <list>
#include "operator/ops.h"
#include "operator/ops.h"
#include "ir/
meta_
tensor.h"
#include "ir/tensor.h"
#include "ir/anf.h"
#include "ir/anf.h"
#include "common/trans.h"
#include "common/trans.h"
#include "device/kernel_runtime.h"
#include "device/kernel_runtime.h"
...
...
mindspore/ccsrc/session/cpu_session.cc
浏览文件 @
4508134c
/**
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019
-2020
Huawei Technologies Co., Ltd
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may not use this file except in compliance with the License.
...
@@ -16,7 +16,7 @@
...
@@ -16,7 +16,7 @@
#include "session/cpu_session.h"
#include "session/cpu_session.h"
#include <algorithm>
#include <algorithm>
#include "ir/
meta_
tensor.h"
#include "ir/tensor.h"
#include "ir/anf.h"
#include "ir/anf.h"
#include "kernel/kernel.h"
#include "kernel/kernel.h"
#include "common/utils.h"
#include "common/utils.h"
...
...
mindspore/ccsrc/session/session_basic.h
浏览文件 @
4508134c
/**
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019
-2020
Huawei Technologies Co., Ltd
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may not use this file except in compliance with the License.
...
@@ -25,7 +25,7 @@
...
@@ -25,7 +25,7 @@
#include "session/session_context.h"
#include "session/session_context.h"
#include "session/kernel_graph.h"
#include "session/kernel_graph.h"
#include "ir/anf.h"
#include "ir/anf.h"
#include "ir/
meta_
tensor.h"
#include "ir/tensor.h"
#include "utils/any.h"
#include "utils/any.h"
#include "utils/base_ref.h"
#include "utils/base_ref.h"
#include "utils/contract.h"
#include "utils/contract.h"
...
...
mindspore/ccsrc/session/session_context.h
浏览文件 @
4508134c
/**
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019
-2020
Huawei Technologies Co., Ltd
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may not use this file except in compliance with the License.
...
@@ -22,7 +22,7 @@
...
@@ -22,7 +22,7 @@
#include <utility>
#include <utility>
#include <string>
#include <string>
#include "ir/
meta_
tensor.h"
#include "ir/tensor.h"
#include "pipeline/resource.h"
#include "pipeline/resource.h"
#include "utils/context/ms_context.h"
#include "utils/context/ms_context.h"
namespace
mindspore
{
namespace
mindspore
{
...
...
mindspore/ccsrc/transform/convert.h
浏览文件 @
4508134c
/**
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019
-2020
Huawei Technologies Co., Ltd
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may not use this file except in compliance with the License.
...
@@ -32,7 +32,7 @@
...
@@ -32,7 +32,7 @@
#include "ir/anf.h"
#include "ir/anf.h"
#include "ir/func_graph.h"
#include "ir/func_graph.h"
#include "transform/util.h"
#include "transform/util.h"
#include "ir/
meta_
tensor.h"
#include "ir/tensor.h"
#include "transform/df_graph_manager.h"
#include "transform/df_graph_manager.h"
#include "utils/config_manager.h"
#include "utils/config_manager.h"
#include "transform/op_declare.h"
#include "transform/op_declare.h"
...
...
mindspore/ccsrc/transform/graph_runner.h
浏览文件 @
4508134c
/**
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019
-2020
Huawei Technologies Co., Ltd
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may not use this file except in compliance with the License.
...
@@ -25,7 +25,7 @@
...
@@ -25,7 +25,7 @@
#include "transform/types.h"
#include "transform/types.h"
#include "transform/util.h"
#include "transform/util.h"
#include "ir/
meta_
tensor.h"
#include "ir/tensor.h"
#include "transform/df_graph_manager.h"
#include "transform/df_graph_manager.h"
namespace
mindspore
{
namespace
mindspore
{
...
...
mindspore/ccsrc/transform/types.h
浏览文件 @
4508134c
/**
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019
-2020
Huawei Technologies Co., Ltd
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may not use this file except in compliance with the License.
...
@@ -24,7 +24,7 @@
...
@@ -24,7 +24,7 @@
#include <unordered_map>
#include <unordered_map>
#include "ir/anf.h"
#include "ir/anf.h"
#include "ir/dtype.h"
#include "ir/dtype.h"
#include "ir/
meta_
tensor.h"
#include "ir/tensor.h"
#include "graph/tensor.h"
#include "graph/tensor.h"
#ifdef OPEN_SOURCE
#ifdef OPEN_SOURCE
...
...
mindspore/ccsrc/transform/util.h
浏览文件 @
4508134c
/**
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019
-2020
Huawei Technologies Co., Ltd
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may not use this file except in compliance with the License.
...
@@ -24,7 +24,7 @@
...
@@ -24,7 +24,7 @@
#include "securec/include/securec.h"
#include "securec/include/securec.h"
#include "ir/anf.h"
#include "ir/anf.h"
#include "ir/dtype.h"
#include "ir/dtype.h"
#include "ir/
meta_
tensor.h"
#include "ir/tensor.h"
#include "transform/types.h"
#include "transform/types.h"
#include "graph/tensor.h"
#include "graph/tensor.h"
...
...
mindspore/ccsrc/utils/callbacks.h
浏览文件 @
4508134c
/**
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019
-2020
Huawei Technologies Co., Ltd
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may not use this file except in compliance with the License.
...
@@ -20,7 +20,7 @@
...
@@ -20,7 +20,7 @@
#include <string>
#include <string>
#include <vector>
#include <vector>
#include <memory>
#include <memory>
#include "ir/
meta_
tensor.h"
#include "ir/tensor.h"
namespace
mindspore
{
namespace
mindspore
{
namespace
callbacks
{
namespace
callbacks
{
...
...
mindspore/ccsrc/utils/callbacks_ge.h
浏览文件 @
4508134c
...
@@ -22,7 +22,7 @@
...
@@ -22,7 +22,7 @@
#include <memory>
#include <memory>
#include "transform/types.h"
#include "transform/types.h"
#include "transform/util.h"
#include "transform/util.h"
#include "ir/
meta_
tensor.h"
#include "ir/tensor.h"
namespace
mindspore
{
namespace
mindspore
{
namespace
callbacks
{
namespace
callbacks
{
...
...
mindspore/ccsrc/utils/context/ms_context.cc
浏览文件 @
4508134c
...
@@ -29,7 +29,7 @@
...
@@ -29,7 +29,7 @@
#ifdef ENABLE_GE
#ifdef ENABLE_GE
#include "transform/df_graph_manager.h"
#include "transform/df_graph_manager.h"
#endif
#endif
#include "ir/
meta_
tensor.h"
#include "ir/tensor.h"
namespace
mindspore
{
namespace
mindspore
{
#ifdef ENABLE_GE
#ifdef ENABLE_GE
...
...
mindspore/ccsrc/utils/convert_utils.cc
浏览文件 @
4508134c
/**
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019
-2020
Huawei Technologies Co., Ltd
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may not use this file except in compliance with the License.
...
@@ -26,7 +26,7 @@
...
@@ -26,7 +26,7 @@
#include "pybind11/pybind11.h"
#include "pybind11/pybind11.h"
#include "ir/
meta_
tensor.h"
#include "ir/tensor.h"
#include "pipeline/parse/parse.h"
#include "pipeline/parse/parse.h"
#include "pipeline/parse/parse_base.h"
#include "pipeline/parse/parse_base.h"
#include "ir/value.h"
#include "ir/value.h"
...
...
mindspore/ccsrc/utils/graph_utils.h
浏览文件 @
4508134c
/**
/**
* This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
* This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
*
*
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019
-2020
Huawei Technologies Co., Ltd
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may not use this file except in compliance with the License.
...
@@ -31,7 +31,7 @@
...
@@ -31,7 +31,7 @@
#include "ir/anf.h"
#include "ir/anf.h"
#include "ir/primitive.h"
#include "ir/primitive.h"
#include "ir/scalar.h"
#include "ir/scalar.h"
#include "ir/
meta_
tensor.h"
#include "ir/tensor.h"
#include "debug/label.h"
#include "debug/label.h"
namespace
mindspore
{
namespace
mindspore
{
...
...
mindspore/ccsrc/utils/symbolic.h
浏览文件 @
4508134c
/**
/**
* This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
* This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
*
*
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019
-2020
Huawei Technologies Co., Ltd
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may not use this file except in compliance with the License.
...
@@ -27,6 +27,7 @@
...
@@ -27,6 +27,7 @@
#include "ir/anf.h"
#include "ir/anf.h"
#include "pipeline/static_analysis/abstract_value.h"
#include "pipeline/static_analysis/abstract_value.h"
#include "utils/any.h"
namespace
mindspore
{
namespace
mindspore
{
...
...
mindspore/ccsrc/utils/tensorprint_utils.cc
浏览文件 @
4508134c
...
@@ -20,7 +20,7 @@
...
@@ -20,7 +20,7 @@
#include <memory>
#include <memory>
#include <string>
#include <string>
#include <vector>
#include <vector>
#include "ir/
meta_
tensor.h"
#include "ir/tensor.h"
#include "device/convert_tensor_utils.h"
#include "device/convert_tensor_utils.h"
#include "./securec.h"
#include "./securec.h"
#ifndef NO_DLIB
#ifndef NO_DLIB
...
...
mindspore/ccsrc/vm/vmimpl.cc
浏览文件 @
4508134c
/**
/**
* This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
* This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
*
*
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019
-2020
Huawei Technologies Co., Ltd
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may not use this file except in compliance with the License.
...
@@ -26,10 +26,11 @@
...
@@ -26,10 +26,11 @@
#include <memory>
#include <memory>
#include <set>
#include <set>
#include "ir/
meta_
tensor.h"
#include "ir/tensor.h"
#include "operator/ops.h"
#include "operator/ops.h"
#include "ir/manager.h"
#include "ir/manager.h"
#include "ir/func_graph_cloner.h"
#include "ir/func_graph_cloner.h"
#include "ir/primitive.h"
#include "utils/convert_utils.h"
#include "utils/convert_utils.h"
#include "utils/primitive_utils.h"
#include "utils/primitive_utils.h"
#include "debug/draw.h"
#include "debug/draw.h"
...
@@ -37,8 +38,6 @@
...
@@ -37,8 +38,6 @@
namespace
mindspore
{
namespace
mindspore
{
namespace
compile
{
namespace
compile
{
using
PrimitivePyPtr
=
std
::
shared_ptr
<
PrimitivePy
>
;
// Indicate a call to a new frame.
// Indicate a call to a new frame.
struct
CallWrap
:
public
Base
{
struct
CallWrap
:
public
Base
{
explicit
CallWrap
(
const
VMFramePtr
&
vm_frame
)
:
frame
(
vm_frame
)
{}
explicit
CallWrap
(
const
VMFramePtr
&
vm_frame
)
:
frame
(
vm_frame
)
{}
...
...
mindspore/ccsrc/vm/vmimpl.h
浏览文件 @
4508134c
/**
/**
* This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
* This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
*
*
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019
-2020
Huawei Technologies Co., Ltd
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may not use this file except in compliance with the License.
...
@@ -26,7 +26,7 @@
...
@@ -26,7 +26,7 @@
#include "ir/anf.h"
#include "ir/anf.h"
#include "ir/manager.h"
#include "ir/manager.h"
#include "ir/
meta_
tensor.h"
#include "ir/tensor.h"
#include "utils/base_ref.h"
#include "utils/base_ref.h"
namespace
mindspore
{
namespace
mindspore
{
...
...
tests/ut/cpp/ir/meta_tensor_test.cc
浏览文件 @
4508134c
...
@@ -21,7 +21,7 @@
...
@@ -21,7 +21,7 @@
#include "common/py_func_graph_fetcher.h"
#include "common/py_func_graph_fetcher.h"
#include "securec/include/securec.h"
#include "securec/include/securec.h"
#include "ir/
meta_
tensor.h"
#include "ir/tensor.h"
namespace
mindspore
{
namespace
mindspore
{
namespace
tensor
{
namespace
tensor
{
...
...
tests/ut/cpp/operator/ops_test.cc
浏览文件 @
4508134c
...
@@ -19,6 +19,7 @@
...
@@ -19,6 +19,7 @@
#include "common/common_test.h"
#include "common/common_test.h"
#include "ir/value.h"
#include "ir/value.h"
#include "ir/primitive.h"
#include "operator/ops.h"
#include "operator/ops.h"
#include "./common.h"
#include "./common.h"
...
...
tests/ut/cpp/pipeline/static_analysis/prim_test.cc
浏览文件 @
4508134c
...
@@ -25,7 +25,7 @@
...
@@ -25,7 +25,7 @@
#include "pipeline/static_analysis/helper.h"
#include "pipeline/static_analysis/helper.h"
#include "operator/ops.h"
#include "operator/ops.h"
#include "debug/draw.h"
#include "debug/draw.h"
#include "ir/
meta_
tensor.h"
#include "ir/tensor.h"
#include "utils/symbolic.h"
#include "utils/symbolic.h"
#include "./common.h"
#include "./common.h"
...
...
tests/ut/cpp/pipeline/static_analysis/static_analysis_test.cc
浏览文件 @
4508134c
...
@@ -21,7 +21,7 @@
...
@@ -21,7 +21,7 @@
#include "common/common_test.h"
#include "common/common_test.h"
#include "common/py_func_graph_fetcher.h"
#include "common/py_func_graph_fetcher.h"
#include "ir/manager.h"
#include "ir/manager.h"
#include "ir/
meta_
tensor.h"
#include "ir/tensor.h"
#include "operator/ops.h"
#include "operator/ops.h"
#include "pipeline/parse/parse.h"
#include "pipeline/parse/parse.h"
#include "pipeline/parse/data_converter.h"
#include "pipeline/parse/data_converter.h"
...
...
tests/ut/cpp/pre_activate/ascend/enhancer/add_memcpy_async_test.cc
浏览文件 @
4508134c
...
@@ -17,7 +17,7 @@
...
@@ -17,7 +17,7 @@
#include "common/py_func_graph_fetcher.h"
#include "common/py_func_graph_fetcher.h"
#include "session/anf_runtime_algorithm.h"
#include "session/anf_runtime_algorithm.h"
#include "operator/ops.h"
#include "operator/ops.h"
#include "ir/
meta_
tensor.h"
#include "ir/tensor.h"
#include "debug/anf_ir_dump.h"
#include "debug/anf_ir_dump.h"
#include "utils/utils.h"
#include "utils/utils.h"
#include "kernel/kernel_build_info.h"
#include "kernel/kernel_build_info.h"
...
...
tests/ut/cpp/pre_activate/ascend/enhancer/getnext_memcpy_elimination.cc
浏览文件 @
4508134c
...
@@ -17,7 +17,7 @@
...
@@ -17,7 +17,7 @@
#include "common/py_func_graph_fetcher.h"
#include "common/py_func_graph_fetcher.h"
#include "session/anf_runtime_algorithm.h"
#include "session/anf_runtime_algorithm.h"
#include "operator/ops.h"
#include "operator/ops.h"
#include "ir/
meta_
tensor.h"
#include "ir/tensor.h"
#include "debug/anf_ir_dump.h"
#include "debug/anf_ir_dump.h"
#include "utils/utils.h"
#include "utils/utils.h"
#include "kernel/kernel_build_info.h"
#include "kernel/kernel_build_info.h"
...
...
tests/ut/cpp/pre_activate/ascend/format_type/check_consistency_test.cc
浏览文件 @
4508134c
/**
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019
-2020
Huawei Technologies Co., Ltd
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may not use this file except in compliance with the License.
...
@@ -21,7 +21,7 @@
...
@@ -21,7 +21,7 @@
#include "pipeline/resource.h"
#include "pipeline/resource.h"
#include "pipeline/action.h"
#include "pipeline/action.h"
#include "operator/ops.h"
#include "operator/ops.h"
#include "ir/
meta_
tensor.h"
#include "ir/tensor.h"
#include "ir/manager.h"
#include "ir/manager.h"
#include "debug/anf_ir_dump.h"
#include "debug/anf_ir_dump.h"
#include "utils/utils.h"
#include "utils/utils.h"
...
...
tests/ut/cpp/pre_activate/ascend/format_type/insert_cast_test.cc
浏览文件 @
4508134c
/**
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019
-2020
Huawei Technologies Co., Ltd
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may not use this file except in compliance with the License.
...
@@ -15,7 +15,7 @@
...
@@ -15,7 +15,7 @@
*/
*/
#include "common/backend_common_test.h"
#include "common/backend_common_test.h"
#include "operator/ops.h"
#include "operator/ops.h"
#include "ir/
meta_
tensor.h"
#include "ir/tensor.h"
#include "ir/manager.h"
#include "ir/manager.h"
#include "debug/anf_ir_dump.h"
#include "debug/anf_ir_dump.h"
#include "common/py_func_graph_fetcher.h"
#include "common/py_func_graph_fetcher.h"
...
...
tests/ut/cpp/pre_activate/ascend/format_type/merge_cast_to_op_test.cc
浏览文件 @
4508134c
/**
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019
-2020
Huawei Technologies Co., Ltd
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may not use this file except in compliance with the License.
...
@@ -17,7 +17,7 @@
...
@@ -17,7 +17,7 @@
#include "common/py_func_graph_fetcher.h"
#include "common/py_func_graph_fetcher.h"
#include "session/anf_runtime_algorithm.h"
#include "session/anf_runtime_algorithm.h"
#include "operator/ops.h"
#include "operator/ops.h"
#include "ir/
meta_
tensor.h"
#include "ir/tensor.h"
#include "debug/anf_ir_dump.h"
#include "debug/anf_ir_dump.h"
#include "utils/utils.h"
#include "utils/utils.h"
#include "kernel/kernel_build_info.h"
#include "kernel/kernel_build_info.h"
...
...
tests/ut/cpp/pre_activate/ascend/ir_fission/bn_grad_split_test.cc
浏览文件 @
4508134c
/**
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019
-2020
Huawei Technologies Co., Ltd
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may not use this file except in compliance with the License.
...
@@ -17,7 +17,7 @@
...
@@ -17,7 +17,7 @@
#include "common/py_func_graph_fetcher.h"
#include "common/py_func_graph_fetcher.h"
#include "session/anf_runtime_algorithm.h"
#include "session/anf_runtime_algorithm.h"
#include "operator/ops.h"
#include "operator/ops.h"
#include "ir/
meta_
tensor.h"
#include "ir/tensor.h"
#include "debug/anf_ir_dump.h"
#include "debug/anf_ir_dump.h"
#include "utils/utils.h"
#include "utils/utils.h"
#include "kernel/kernel_build_info.h"
#include "kernel/kernel_build_info.h"
...
...
tests/ut/cpp/pre_activate/ascend/ir_fission/bn_split_test.cc
浏览文件 @
4508134c
/**
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019
-2020
Huawei Technologies Co., Ltd
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may not use this file except in compliance with the License.
...
@@ -19,7 +19,7 @@
...
@@ -19,7 +19,7 @@
#include "session/anf_runtime_algorithm.h"
#include "session/anf_runtime_algorithm.h"
#include "pipeline/resource.h"
#include "pipeline/resource.h"
#include "operator/ops.h"
#include "operator/ops.h"
#include "ir/
meta_
tensor.h"
#include "ir/tensor.h"
#include "ir/manager.h"
#include "ir/manager.h"
#include "debug/anf_ir_dump.h"
#include "debug/anf_ir_dump.h"
#include "utils/utils.h"
#include "utils/utils.h"
...
...
tests/ut/cpp/pre_activate/ascend/ir_fission/layer_norm_grad_split_test.cc
浏览文件 @
4508134c
...
@@ -17,7 +17,7 @@
...
@@ -17,7 +17,7 @@
#include "common/py_func_graph_fetcher.h"
#include "common/py_func_graph_fetcher.h"
#include "session/anf_runtime_algorithm.h"
#include "session/anf_runtime_algorithm.h"
#include "operator/ops.h"
#include "operator/ops.h"
#include "ir/
meta_
tensor.h"
#include "ir/tensor.h"
#include "debug/anf_ir_dump.h"
#include "debug/anf_ir_dump.h"
#include "utils/utils.h"
#include "utils/utils.h"
#include "kernel/kernel_build_info.h"
#include "kernel/kernel_build_info.h"
...
...
tests/ut/cpp/pre_activate/pass/allreduce_fusion_test.cc
浏览文件 @
4508134c
/**
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019
-2020
Huawei Technologies Co., Ltd
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may not use this file except in compliance with the License.
...
@@ -16,7 +16,7 @@
...
@@ -16,7 +16,7 @@
#include "common/backend_common_test.h"
#include "common/backend_common_test.h"
#include "common/py_func_graph_fetcher.h"
#include "common/py_func_graph_fetcher.h"
#include "operator/ops.h"
#include "operator/ops.h"
#include "ir/
meta_
tensor.h"
#include "ir/tensor.h"
#include "ir/manager.h"
#include "ir/manager.h"
#include "debug/anf_ir_dump.h"
#include "debug/anf_ir_dump.h"
#include "session/anf_runtime_algorithm.h"
#include "session/anf_runtime_algorithm.h"
...
...
tests/ut/cpp/pre_activate/pass/common_subexpression_elimination_test.cc
浏览文件 @
4508134c
/**
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019
-2020
Huawei Technologies Co., Ltd
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may not use this file except in compliance with the License.
...
@@ -15,7 +15,7 @@
...
@@ -15,7 +15,7 @@
*/
*/
#include "common/backend_common_test.h"
#include "common/backend_common_test.h"
#include "operator/ops.h"
#include "operator/ops.h"
#include "ir/
meta_
tensor.h"
#include "ir/tensor.h"
#include "ir/manager.h"
#include "ir/manager.h"
#include "debug/anf_ir_dump.h"
#include "debug/anf_ir_dump.h"
#include "common/py_func_graph_fetcher.h"
#include "common/py_func_graph_fetcher.h"
...
...
tests/ut/cpp/pre_activate/pass/convert_const_input_to_tensor_input_test.cc
浏览文件 @
4508134c
...
@@ -15,7 +15,7 @@
...
@@ -15,7 +15,7 @@
*/
*/
#include "common/backend_common_test.h"
#include "common/backend_common_test.h"
#include "ir/anf.h"
#include "ir/anf.h"
#include "ir/
meta_
tensor.h"
#include "ir/tensor.h"
#include "debug/anf_ir_dump.h"
#include "debug/anf_ir_dump.h"
#include "common/py_func_graph_fetcher.h"
#include "common/py_func_graph_fetcher.h"
#include "session/anf_runtime_algorithm.h"
#include "session/anf_runtime_algorithm.h"
...
...
tests/ut/cpp/pre_activate/pass/convert_tuple_input_to_dynamic_input_test.cc
浏览文件 @
4508134c
...
@@ -15,7 +15,7 @@
...
@@ -15,7 +15,7 @@
*/
*/
#include "common/backend_common_test.h"
#include "common/backend_common_test.h"
#include "ir/anf.h"
#include "ir/anf.h"
#include "ir/
meta_
tensor.h"
#include "ir/tensor.h"
#include "debug/anf_ir_dump.h"
#include "debug/anf_ir_dump.h"
#include "common/py_func_graph_fetcher.h"
#include "common/py_func_graph_fetcher.h"
#include "session/anf_runtime_algorithm.h"
#include "session/anf_runtime_algorithm.h"
...
...
tests/ut/cpp/pre_activate/pass/convert_tuple_output_to_maketuple_test.cc
浏览文件 @
4508134c
...
@@ -15,7 +15,7 @@
...
@@ -15,7 +15,7 @@
*/
*/
#include "common/backend_common_test.h"
#include "common/backend_common_test.h"
#include "ir/anf.h"
#include "ir/anf.h"
#include "ir/
meta_
tensor.h"
#include "ir/tensor.h"
#include "debug/anf_ir_dump.h"
#include "debug/anf_ir_dump.h"
#include "common/py_func_graph_fetcher.h"
#include "common/py_func_graph_fetcher.h"
#include "session/anf_runtime_algorithm.h"
#include "session/anf_runtime_algorithm.h"
...
...
tests/ut/cpp/pre_activate/pass/eliminate_redundant_op_test.cc
浏览文件 @
4508134c
/**
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019
-2020
Huawei Technologies Co., Ltd
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may not use this file except in compliance with the License.
...
@@ -17,7 +17,7 @@
...
@@ -17,7 +17,7 @@
#include "common/backend_common_test.h"
#include "common/backend_common_test.h"
#include "kernel/kernel.h"
#include "kernel/kernel.h"
#include "operator/ops.h"
#include "operator/ops.h"
#include "ir/
meta_
tensor.h"
#include "ir/tensor.h"
#include "ir/manager.h"
#include "ir/manager.h"
#include "debug/anf_ir_dump.h"
#include "debug/anf_ir_dump.h"
#include "common/py_func_graph_fetcher.h"
#include "common/py_func_graph_fetcher.h"
...
...
tests/ut/cpp/transform/transform_base_test.h
浏览文件 @
4508134c
...
@@ -21,7 +21,7 @@
...
@@ -21,7 +21,7 @@
#include <memory>
#include <memory>
#include <vector>
#include <vector>
#include "transform/util.h"
#include "transform/util.h"
#include "ir/
meta_
tensor.h"
#include "ir/tensor.h"
#include "common/common_test.h"
#include "common/common_test.h"
#include "pipeline/parse/parse.h"
#include "pipeline/parse/parse.h"
...
...
tests/ut/cpp/vm/segment_runner_test.cc
浏览文件 @
4508134c
...
@@ -27,7 +27,7 @@
...
@@ -27,7 +27,7 @@
#include "operator/ops.h"
#include "operator/ops.h"
#include "vm/segment_runner.h"
#include "vm/segment_runner.h"
#include "vm/transform.h"
#include "vm/transform.h"
#include "ir/
meta_
tensor.h"
#include "ir/tensor.h"
#include "utils/convert_utils.h"
#include "utils/convert_utils.h"
#include "utils/log_adapter.h"
#include "utils/log_adapter.h"
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录