Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
MegEngine 天元
MegEngine
提交
a78c1109
MegEngine
项目概览
MegEngine 天元
/
MegEngine
大约 1 年 前同步成功
通知
396
Star
4705
Fork
582
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
MegEngine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
a78c1109
编写于
12月 14, 2020
作者:
M
Megvii Engine Team
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix(imperative): add param(axis) for GetVarShape
GitOrigin-RevId: 0b8f821929dc2ad640ac8c5d0a6c13bad519a952
上级
cde9727a
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
30 addition
and
20 deletion
+30
-20
imperative/src/impl/ops/tensor_manip.cpp
imperative/src/impl/ops/tensor_manip.cpp
+29
-19
src/core/include/megbrain/ir/ops.td
src/core/include/megbrain/ir/ops.td
+1
-1
未找到文件。
imperative/src/impl/ops/tensor_manip.cpp
浏览文件 @
a78c1109
...
...
@@ -20,22 +20,30 @@ namespace {
cg
::
OperatorNodeBase
*
apply_on_var_node
(
const
OpDef
&
def
,
const
VarNodeArray
&
inputs
)
{
def
.
cast_final_safe
<
GetVarShape
>
();
return
opr
::
GetVarShape
::
make
(
inputs
).
node
()
->
owner_opr
();
auto
&&
op_def
=
def
.
cast_final_safe
<
GetVarShape
>
();
return
opr
::
GetVarShape
::
make
(
inputs
,
op_def
.
param
()
).
node
()
->
owner_opr
();
}
SmallVector
<
TensorPtr
>
apply_on_physical_tensor
(
const
OpDef
&
def
,
const
SmallVector
<
TensorPtr
>&
inputs
)
{
def
.
cast_final_safe
<
GetVarShape
>
();
auto
&&
op_def
=
def
.
cast_final_safe
<
GetVarShape
>
();
mgb_assert
(
inputs
.
size
()
==
1
,
"GetVarShape take 1 input, got %lu"
,
inputs
.
size
());
auto
&&
inp
=
inputs
[
0
];
auto
&&
shp
=
inp
->
layout
();
mgb_assert
(
shp
.
ndim
!=
0
,
"input shape invalid"
);
HostTensorND
hv
(
inp
->
comp_node
(),
{
shp
.
ndim
},
dtype
::
Int32
());
auto
*
ptr
=
hv
.
ptr
<
dt_int32
>
();
for
(
size_t
i
=
0
;
i
<
shp
.
ndim
;
++
i
)
{
ptr
[
i
]
=
shp
.
shape
[
i
];
HostTensorND
hv
;
if
(
op_def
.
axis
==
opr
::
GetVarShape
::
Param
::
INVALID_AXIS
){
hv
=
HostTensorND
(
inp
->
comp_node
(),
{
shp
.
ndim
},
dtype
::
Int32
());
auto
*
ptr
=
hv
.
ptr
<
dt_int32
>
();
for
(
size_t
i
=
0
;
i
<
shp
.
ndim
;
++
i
)
{
ptr
[
i
]
=
shp
.
shape
[
i
];
}
}
else
{
mgb_assert
(
op_def
.
axis
<
shp
.
ndim
);
hv
=
HostTensorND
(
inp
->
comp_node
(),
{
1
},
dtype
::
Int32
());
auto
*
ptr
=
hv
.
ptr
<
dt_int32
>
();
ptr
[
0
]
=
shp
.
shape
[
op_def
.
axis
];
}
return
{
Tensor
::
make
(
std
::
move
(
hv
))};
}
...
...
@@ -43,29 +51,31 @@ SmallVector<TensorPtr> apply_on_physical_tensor(
std
::
tuple
<
SmallVector
<
LogicalTensorDesc
>
,
bool
>
infer_output_attrs_fallible
(
const
OpDef
&
def
,
const
SmallVector
<
LogicalTensorDesc
>&
inputs
)
{
def
.
cast_final_safe
<
GetVarShape
>
();
auto
&&
op_def
=
def
.
cast_final_safe
<
GetVarShape
>
();
mgb_assert
(
inputs
.
size
()
==
1
,
"GetVarShape take 1 input, got %lu"
,
inputs
.
size
());
auto
&&
desc
=
inputs
[
0
];
if
(
!
desc
.
layout
.
ndim
)
{
return
{{{
TensorLayout
(
dtype
::
Int32
()),
desc
.
comp_node
}},
true
};
}
DeviceTensorND
value
(
CompNode
::
default_cpu
(),
{
desc
.
layout
.
ndim
},
dtype
::
Int32
());
auto
*
ptr
=
value
.
ptr
<
dt_int32
>
();
for
(
size_t
i
=
0
;
i
<
desc
.
layout
.
ndim
;
++
i
)
{
ptr
[
i
]
=
desc
.
layout
[
i
];
DeviceTensorND
value
;
if
(
op_def
.
axis
==
opr
::
GetVarShape
::
Param
::
INVALID_AXIS
){
value
=
DeviceTensorND
(
CompNode
::
default_cpu
(),
{
desc
.
layout
.
ndim
},
dtype
::
Int32
());
auto
*
ptr
=
value
.
ptr
<
dt_int32
>
();
for
(
size_t
i
=
0
;
i
<
desc
.
layout
.
ndim
;
++
i
)
{
ptr
[
i
]
=
desc
.
layout
[
i
];
}
}
else
{
mgb_assert
(
op_def
.
axis
<
desc
.
layout
.
ndim
);
value
=
DeviceTensorND
(
CompNode
::
default_cpu
(),
{
1
},
dtype
::
Int32
());
auto
*
ptr
=
value
.
ptr
<
dt_int32
>
();
ptr
[
0
]
=
desc
.
layout
[
op_def
.
axis
];
}
return
{{{
value
.
layout
(),
desc
.
comp_node
,
std
::
move
(
value
)}},
true
};
}
std
::
shared_ptr
<
OpDef
>
make_from_op_node
(
cg
::
OperatorNodeBase
*
node_
)
{
auto
*
node
=
&
node_
->
cast_final_safe
<
opr
::
GetVarShape
>
();
if
(
node
->
config
().
comp_node
().
size
()
||
node
->
config
().
output_dtype
().
valid
()
||
node
->
param
().
axis
!=
opr
::
GetVarShape
::
Param
::
INVALID_AXIS
)
{
mgb_log_debug
(
"weird GetVarShape"
);
return
OpTrait
::
find_by_typeinfo
(
OprAttr
::
typeinfo
())
->
make_from_op_node
(
node
);
}
return
GetVarShape
::
make
();
return
GetVarShape
::
make
(
node
->
param
());
}
OP_TRAIT_REG
(
GetVarShape
,
GetVarShape
,
opr
::
GetVarShape
)
...
...
src/core/include/megbrain/ir/ops.td
浏览文件 @
a78c1109
...
...
@@ -122,7 +122,7 @@ def Eye: MgbHashableOp<"Eye", [EyeParam]> {
);
}
def GetVarShape : MgbHashableOp<"GetVarShape">;
def GetVarShape : MgbHashableOp<"GetVarShape"
, [OptionalAxisV1Param]
>;
def Concat: MgbHashableOp<"Concat", [AxisParam]> {
let extraArguments = (ins
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录