Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
MegEngine 天元
MegEngine
提交
23437864
MegEngine
项目概览
MegEngine 天元
/
MegEngine
大约 1 年 前同步成功
通知
399
Star
4705
Fork
582
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
MegEngine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
23437864
编写于
9月 10, 2020
作者:
M
Megvii Engine Team
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix(mgb/jit): mlir doesn't support broadcast
GitOrigin-RevId: 08bfc4c34aa378b61835269a0c0de90d04602bb6
上级
f87bba68
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
69 addition
and
8 deletion
+69
-8
src/jit/impl/fusion_pass.cpp
src/jit/impl/fusion_pass.cpp
+25
-3
src/jit/impl/mlir/compiler.cpp
src/jit/impl/mlir/compiler.cpp
+0
-3
src/jit/impl/mlir/ir/create_gpu_kernel_outlining_pass.cpp
src/jit/impl/mlir/ir/create_gpu_kernel_outlining_pass.cpp
+1
-1
src/jit/impl/mlir/ir/types.h
src/jit/impl/mlir/ir/types.h
+1
-1
src/jit/test/fusion.cpp
src/jit/test/fusion.cpp
+42
-0
未找到文件。
src/jit/impl/fusion_pass.cpp
浏览文件 @
23437864
...
...
@@ -291,8 +291,27 @@ void JITFusionPass::Impl::process_opr(OperatorNodeBase* opr) {
cond_cn
=
opr
->
output
(
0
)
->
comp_node
()
==
ig_gen
->
output
()
->
comp_node
(),
cond_shp
=
check_shape
(
opr
,
ig_gen
),
cond_nr_inp
=
ig_gen
->
get_cnt_input_if_add
(
opr
)
<=
max_nr_input
;
if
(
cond_readers
&&
cond_cn
&&
cond_shp
&&
cond_nr_inp
)
{
cond_nr_inp
=
ig_gen
->
get_cnt_input_if_add
(
opr
)
<=
max_nr_input
,
cond_mlir_specific
=
true
;
#if MGB_JIT_MLIR
//! FIXME mlir does't support broadcast currently.
auto
backend
=
MGB_GETENV
(
"MGB_JIT_BACKEND"
);
if
(
!
strcmp
(
backend
,
"MLIR"
))
{
for
(
VarNode
*
var
:
opr
->
input
())
{
if
(
!
SymbolVar
{
var
}.
as_immutable_scalar
().
valid
())
{
if
(
opr
->
node_prop
().
dep_map
().
at
(
var
)
&
DepType
::
DEV_VALUE
)
{
if
(
!
var
->
shape
().
eq_shape
(
opr
->
output
(
0
)
->
shape
()))
{
cond_mlir_specific
=
false
;
}
}
}
}
}
#endif
if
(
cond_readers
&&
cond_cn
&&
cond_shp
&&
cond_nr_inp
&&
cond_mlir_specific
)
{
ig_gen
->
add_opr
(
opr
);
}
else
{
if
(
opr
->
same_type
<
opr
::
Dimshuffle
>
())
{
...
...
@@ -344,7 +363,10 @@ bool JITFusionPass::Impl::can_be_fused(cg::OperatorNodeBase* opr) const {
}
//! As MLIR backend has some contraints
auto
backend
=
MGB_GETENV
(
"MGB_JIT_BACKEND"
);
const
char
*
backend
=
MGB_GETENV
(
"MGB_JIT_BACKEND"
);
if
(
!
backend
)
{
backend
=
"DEFAULT"
;
}
// float elemwise
if
(
auto
elem
=
gopt
::
try_cast_as_op
<
opr
::
Elemwise
>
(
opr
))
{
bool
ret
=
true
;
...
...
src/jit/impl/mlir/compiler.cpp
浏览文件 @
23437864
...
...
@@ -222,9 +222,6 @@ void MLIRCompiler::run_lowering_pass(mlir::OwningModuleRef& module,
std
::
unique_ptr
<
Executable
>
MLIRCompiler
::
do_compile
(
const
InternalGraph
&
graph
,
const
JITExecutor
::
Args
&
args
)
{
MGB_MARK_USED_VAR
(
graph
);
MGB_MARK_USED_VAR
(
args
);
mlir
::
MLIRContext
ctx
;
ctx
.
printStackTraceOnDiagnostic
(
true
);
ctx
.
printOpOnDiagnostic
(
true
);
...
...
src/jit/impl/mlir/ir/create_gpu_kernel_outlining_pass.cpp
浏览文件 @
23437864
...
...
@@ -19,7 +19,7 @@
* implied.
*
* This file has been modified by Megvii ("Megvii Modifications").
* All Megvii Modifications are Copyright (C) 2014-20
19
Megvii Inc. All rights
* All Megvii Modifications are Copyright (C) 2014-20
20
Megvii Inc. All rights
* reserved.
*
*/
...
...
src/jit/impl/mlir/ir/types.h
浏览文件 @
23437864
...
...
@@ -19,7 +19,7 @@
namespace
mgb
{
namespace
jit
{
inline
bool
is_elemwise_float
(
const
mlir
::
Type
&
dt
)
{
inline
const
bool
is_elemwise_float
(
const
mlir
::
Type
&
dt
)
{
if
(
auto
cast
=
dt
.
dyn_cast_or_null
<
mlir
::
MemRefType
>
())
{
if
(
cast
.
getElementType
().
getKind
()
==
mlir
::
StandardTypes
::
F32
)
{
return
true
;
...
...
src/jit/test/fusion.cpp
浏览文件 @
23437864
...
...
@@ -1553,6 +1553,48 @@ TEST(TestJITExecutor, GradBehavior) {
}
}
#if MGB_JIT_MLIR
void
run_mlir
(
CompNode
cn
)
{
set_backend
(
Backend
::
MLIR
);
HostTensorGenerator
<>
gen
;
auto
host_x0
=
gen
({
23
,
42
},
cn
),
host_x1
=
gen
({
23
,
1
},
cn
),
host_x2
=
gen
({
1
,
42
},
cn
),
host_x3
=
gen
({
23
,
42
},
cn
),
host_x4
=
gen
({
1
,
42
},
cn
),
host_x5
=
gen
({
23
,
1
},
cn
);
auto
make_dst
=
[
&
](
ComputingGraph
&
graph
)
{
auto
a
=
opr
::
Host2DeviceCopy
::
make
(
graph
,
host_x0
),
b
=
opr
::
Host2DeviceCopy
::
make
(
graph
,
host_x1
),
c
=
opr
::
Host2DeviceCopy
::
make
(
graph
,
host_x2
),
d
=
opr
::
Host2DeviceCopy
::
make
(
graph
,
host_x3
),
e
=
opr
::
Host2DeviceCopy
::
make
(
graph
,
host_x4
);
return
a
+
opr
::
max
(
b
,
c
)
+
opr
::
max
(
d
,
e
);
};
HostTensorND
host_y1
,
host_y2
;
auto
funcs
=
make_func_pair
(
host_y1
,
host_y2
,
make_dst
,
2
);
funcs
.
first
->
execute
();
funcs
.
second
->
execute
();
MGB_ASSERT_TENSOR_EQ
(
host_y1
,
host_y2
);
JITExecutor
*
jit
;
unpack_vector
(
find_oprs
<
JITExecutor
>
(
*
funcs
.
second
),
jit
);
ASSERT_EQ
(
2u
,
find_oprs
<
opr
::
Elemwise
>
(
*
funcs
.
second
).
size
());
ASSERT_EQ
(
3u
,
jit
->
input
().
size
());
}
TEST
(
TestJITExecutor
,
TestJITMlirFusion
)
{
run_mlir
(
CompNode
::
load
(
"cpu0"
));
}
TEST
(
TestJITExecutor
,
TestJITMlirFusionGpu
)
{
REQUIRE_GPU
(
1
);
run_mlir
(
CompNode
::
load
(
"gpu0"
));
}
#endif // MGB_JIT_MLIR
#endif // MGB_JIT
// vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录