Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
MegEngine 天元
MegEngine
提交
750bde95
MegEngine
项目概览
MegEngine 天元
/
MegEngine
1 年多 前同步成功
通知
403
Star
4705
Fork
582
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
MegEngine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
750bde95
编写于
5月 13, 2021
作者:
M
Megvii Engine Team
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
refactor(mge/jit): remove is_compiled flag in cpp tensor
GitOrigin-RevId: 15f90af735e6e5e53d2ac823bd4b9382d8144144
上级
4f240ec2
变更
6
显示空白变更内容
内联
并排
Showing
6 changed file
with
19 addition
and
60 deletion
+19
-60
imperative/python/megengine/jit/__init__.py
imperative/python/megengine/jit/__init__.py
+0
-6
imperative/python/megengine/jit/tracing.py
imperative/python/megengine/jit/tracing.py
+10
-13
imperative/python/src/graph_rt.cpp
imperative/python/src/graph_rt.cpp
+0
-1
imperative/python/src/tensor.cpp
imperative/python/src/tensor.cpp
+5
-23
imperative/python/src/tensor.h
imperative/python/src/tensor.h
+1
-2
imperative/python/src/trace.cpp
imperative/python/src/trace.cpp
+3
-15
未找到文件。
imperative/python/megengine/jit/__init__.py
浏览文件 @
750bde95
...
...
@@ -7,15 +7,11 @@
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from
..core._imperative_rt.core2
import
(
set_cpp_apply_compiled_mode
,
set_cpp_apply_const_compiled_mode
,
set_cpp_apply_const_with_tracing
,
set_cpp_apply_with_tracing
,
)
from
.sublinear_memory_config
import
SublinearMemoryConfig
from
.tracing
import
(
apply_compiled_mode
,
apply_const_compiled_mode
,
apply_const_with_tracing
,
apply_with_tracing
,
exclude_from_trace
,
...
...
@@ -24,5 +20,3 @@ from .tracing import (
set_cpp_apply_with_tracing
(
apply_with_tracing
)
set_cpp_apply_const_with_tracing
(
apply_const_with_tracing
)
set_cpp_apply_compiled_mode
(
apply_compiled_mode
)
set_cpp_apply_const_compiled_mode
(
apply_const_compiled_mode
)
imperative/python/megengine/jit/tracing.py
浏览文件 @
750bde95
...
...
@@ -12,20 +12,16 @@ import functools
import
itertools
import
json
import
os
import
typing
import
weakref
import
numpy
as
np
from
..core._imperative_rt
import
GraphProfiler
,
common
from
..core._imperative_rt
import
GraphProfiler
from
..core._imperative_rt.core2
import
Tensor
as
RawTensor
from
..core._imperative_rt.core2
import
(
TensorWeakRef
,
apply
,
set_compiled
,
set_tracing
,
skip_tracing
,
unset_compiled
,
unset_tracing
,
)
from
..core._imperative_rt.ops
import
(
...
...
@@ -394,7 +390,6 @@ class trace:
if
self
.
_untraced
:
self
.
_init_trace
(
self
.
_symbolic
)
else
:
set_compiled
()
if
self
.
_graph
is
None
:
self
.
_compile
()
self
.
_graph
.
execute
()
...
...
@@ -442,7 +437,6 @@ class trace:
self
.
_tensor_remaps
=
None
self
.
_set_active
(
False
)
set_symbolic_shape
(
self
.
_save_symbolic_shape
)
unset_compiled
()
unset_tracing
()
def
do_exit
():
...
...
@@ -989,11 +983,6 @@ class trace:
raise
RuntimeError
(
"trace is not set with profiling=True"
)
return
json
.
loads
(
self
.
_profiler
.
get
())
def
__del__
(
self
):
for
x
in
self
.
_tinfo
:
if
getattr
(
x
,
"bound_data"
,
None
):
x
.
bound_data
=
None
def
trace
(
self
,
*
args
,
**
kwargs
):
raise
NotImplementedError
(
"trace is deemed unbeneficial with the new "
...
...
@@ -1148,6 +1137,9 @@ def apply_const_compiled_mode(value, dtype, device, is_const, no_cache, name):
def
apply_with_tracing
(
op
:
OpDef
,
*
args
:
RawTensor
):
if
active_trace
.
_graph
:
# if member _graph exits, then is_compiled
return
apply_compiled_mode
(
op
,
*
args
)
if
hasattr
(
op
,
"scope"
):
op
.
scope
=
AutoNaming
.
get_scope
()
if
active_trace
.
_symbolic
:
...
...
@@ -1162,11 +1154,16 @@ def apply_with_tracing(op: OpDef, *args: RawTensor):
def
apply_const_with_tracing
(
value
,
dtype
,
device
,
is_const
,
no_cache
,
name
):
if
active_trace
.
_graph
:
return
apply_const_compiled_mode
(
value
,
dtype
,
device
,
is_const
,
no_cache
,
name
)
if
active_trace
.
_symbolic
:
outputs
=
apply_const_symbolic_mode
(
value
,
dtype
,
device
,
name
)
else
:
unset_tracing
()
outputs
=
(
RawTensor
(
value
,
dtype
,
device
,
False
,
name
),)
outputs
=
RawTensor
(
value
,
dtype
,
device
,
False
,
name
)
if
np
.
array
(
value
).
ndim
==
0
:
setscalar
(
outputs
)
outputs
=
(
outputs
,)
set_tracing
()
active_trace
.
_record_const
(
outputs
)
return
list
(
outputs
)
imperative/python/src/graph_rt.cpp
浏览文件 @
750bde95
...
...
@@ -23,7 +23,6 @@
#include "./common.h"
#include "./ops.h"
#include "megbrain/gopt/inference.h"
#include "megbrain/imperative/ops/utility.h"
namespace
py
=
pybind11
;
...
...
imperative/python/src/tensor.cpp
浏览文件 @
750bde95
...
...
@@ -36,12 +36,9 @@ namespace mgb::imperative::python {
interpreter
::
Interpreter
::
Channel
*
interpreter_for_py
;
PyObject
*
cpp_apply_with_tracing
,
*
cpp_apply_const_with_tracing
,
*
cpp_apply_compiled_mode
,
*
cpp_apply_const_compiled_mode
;
PyObject
*
cpp_apply_with_tracing
,
*
cpp_apply_const_with_tracing
;
PyObject
*
cpp_apply_backward_varnode
;
#define REGISTE_APPLY_FUNC(mode) \
void set_##mode(py::object pyf) { \
mode = pyf.ptr(); \
...
...
@@ -49,14 +46,11 @@ PyObject *cpp_apply_backward_varnode;
REGISTE_APPLY_FUNC
(
cpp_apply_with_tracing
)
REGISTE_APPLY_FUNC
(
cpp_apply_const_with_tracing
)
REGISTE_APPLY_FUNC
(
cpp_apply_compiled_mode
)
REGISTE_APPLY_FUNC
(
cpp_apply_const_compiled_mode
)
REGISTE_APPLY_FUNC
(
cpp_apply_backward_varnode
)
#undef REGISTE_APPLY_FUNC
bool
is_tracing
=
false
;
bool
is_compiled
=
false
;
#define SET_UNSET_PROP(mode) \
void set_##mode() { \
...
...
@@ -67,7 +61,6 @@ bool is_compiled = false;
} \
SET_UNSET_PROP
(
tracing
)
SET_UNSET_PROP
(
compiled
)
#undef SET_UNSET_PROP
...
...
@@ -263,14 +256,7 @@ TensorWrapper::TensorWrapper(PyObject* args, PyObject* kwargs) {
// const op
if
(
is_const
&&
is_tracing
)
{
PyObject
*
pyf
;
if
(
is_compiled
)
{
pyf
=
cpp_apply_const_compiled_mode
;
}
else
{
pyf
=
cpp_apply_const_with_tracing
;
}
auto
py_ret
=
PyObject_Call
(
pyf
,
tup
.
ptr
(),
nullptr
);
auto
py_ret
=
PyObject_Call
(
cpp_apply_const_with_tracing
,
tup
.
ptr
(),
nullptr
);
if
(
!
py_ret
)
throw
py
::
error_already_set
();
auto
py_list
=
py
::
reinterpret_steal
<
py
::
list
>
(
py_ret
);
if
(
auto
*
t
=
try_cast
(
py_list
[
0
].
ptr
()))
{
...
...
@@ -961,8 +947,6 @@ void init_tensor(py::module m) {
m
.
def
(
"set_cpp_apply_with_tracing"
,
&
set_cpp_apply_with_tracing
);
m
.
def
(
"set_cpp_apply_const_with_tracing"
,
&
set_cpp_apply_const_with_tracing
);
m
.
def
(
"set_cpp_apply_compiled_mode"
,
&
set_cpp_apply_compiled_mode
);
m
.
def
(
"set_cpp_apply_const_compiled_mode"
,
&
set_cpp_apply_const_compiled_mode
);
m
.
def
(
"set_cpp_apply_backward_varnode"
,
&
set_cpp_apply_backward_varnode
);
m
.
attr
(
"skip_tracing"
)
=
&
skip_tracing
;
...
...
@@ -979,8 +963,6 @@ void init_tensor(py::module m) {
m
.
def
(
"set_tracing"
,
&
set_tracing
);
m
.
def
(
"unset_tracing"
,
&
unset_tracing
);
m
.
def
(
"set_compiled"
,
&
set_compiled
);
m
.
def
(
"unset_compiled"
,
&
unset_compiled
);
}
#undef MGE_PY_INTERFACE
...
...
imperative/python/src/tensor.h
浏览文件 @
750bde95
...
...
@@ -237,7 +237,6 @@ template <typename... Args>
constexpr
bool
is_all_tensor_ptr
=
(...
&&
std
::
is_same_v
<
decltype
(
resolve_arrow
(
std
::
declval
<
Args
>
())),
Tensor
*>
);
extern
bool
is_tracing
;
// FIXME: should use ApplyContext::global_enable
extern
bool
is_compiled
;
template
<
typename
...
Args
,
std
::
enable_if_t
<
is_all_tensor_ptr
<
Args
...>,
int
>
=
0
>
apply_result_t
apply
(
std
::
shared_ptr
<
OpDef
>
op
,
Args
&&
...
args
)
{
...
...
@@ -282,7 +281,7 @@ inline auto apply(std::shared_ptr<OpDef> op, Tensor*const* args, size_t nargs) {
void
init_tensor
(
pybind11
::
module
);
extern
PyObject
*
cpp_apply_with_tracing
,
*
cpp_apply_compiled_mode
;
extern
PyObject
*
cpp_apply_with_tracing
;
extern
PyObject
*
cpp_apply_backward_varnode
;
}
// namespace mgb::imperative::python
...
...
imperative/python/src/trace.cpp
浏览文件 @
750bde95
...
...
@@ -22,7 +22,6 @@ apply_result_t apply_trace(ApplyContext& ctx) {
apply_result_t
outputs
;
if
(
ctx
.
backward
)
{
// reach here when compiled=True
// call megbrain_graph.py apply(BackwardGraph, *args)
auto
args
=
py
::
tuple
(
ctx
.
nargs
+
1
);
args
[
0
]
=
py
::
cast
(
ctx
.
op
);
...
...
@@ -42,27 +41,16 @@ apply_result_t apply_trace(ApplyContext& ctx) {
return
outputs
;
}
PyObject
*
pyf
;
if
(
is_compiled
)
{
// run apply in compiled mode, step 2, 3, etc
pyf
=
cpp_apply_compiled_mode
;
}
else
{
// run first step, both symbolic and non symbolic
pyf
=
cpp_apply_with_tracing
;
}
auto
args
=
py
::
tuple
(
ctx
.
nargs
+
1
);
args
[
0
]
=
py
::
cast
(
ctx
.
op
);
for
(
size_t
i
=
0
;
i
<
ctx
.
nargs
;
i
++
)
{
args
[
i
+
1
]
=
TensorWrapper
::
make
(
ctx
.
args
[
i
]
->
shared_from_this
());
}
auto
pyout
=
PyObject_Call
(
pyf
,
args
.
ptr
(),
nullptr
);
auto
pyout
=
PyObject_Call
(
cpp_apply_with_tracing
,
args
.
ptr
(),
nullptr
);
if
(
!
pyout
)
throw
py
::
error_already_set
();
auto
ret
=
py
::
reinterpret_steal
<
py
::
object
>
(
pyout
);
// assumption: python function always returns PyList
auto
tup
=
py
::
reinterpret_
borrow
<
py
::
list
>
(
re
t
);
for
(
auto
i
=
0
;
i
<
tup
.
size
();
i
++
)
{
auto
tup
=
py
::
reinterpret_
steal
<
py
::
list
>
(
pyou
t
);
for
(
size_t
i
=
0
;
i
<
tup
.
size
();
i
++
)
{
auto
tw
=
TensorWrapper
::
try_cast
(
tup
[
i
].
ptr
());
outputs
.
emplace_back
(
tw
->
m_tensor
);
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录