Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
MegEngine 天元
MegEngine
提交
787f187e
MegEngine
项目概览
MegEngine 天元
/
MegEngine
1 年多 前同步成功
通知
403
Star
4705
Fork
582
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
MegEngine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
787f187e
编写于
6月 10, 2021
作者:
M
Megvii Engine Team
提交者:
huangxinda
7月 19, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix(imperative/src): fix dot backward error
GitOrigin-RevId: 02ba44a0e6d8cd2ca863ae0058542b260e0b755d
上级
f35687ca
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
45 addition
and
12 deletion
+45
-12
imperative/python/test/unit/core/test_autodiff.py
imperative/python/test/unit/core/test_autodiff.py
+15
-0
imperative/src/impl/ops/tensor_manip.cpp
imperative/src/impl/ops/tensor_manip.cpp
+30
-12
未找到文件。
imperative/python/test/unit/core/test_autodiff.py
浏览文件 @
787f187e
...
...
@@ -442,3 +442,18 @@ def test_removeAxis():
grad
(
y
,
F
.
ones_like
(
y
))
np
.
testing
.
assert_equal
(
np
.
ones
((
3
,
3
,
1
,
1
),
dtype
=
np
.
float32
),
x
.
grad
.
numpy
())
def
test_dot
():
x
=
np
.
random
.
rand
(
2
,
2
).
astype
(
"float32"
)
x
=
mge
.
Tensor
(
x
)
u
=
F
.
ones
((
2
,))
v
=
F
.
ones
((
2
,))
grad
=
Grad
().
wrt
(
x
,
callback
=
save_to
(
x
))
def
f
(
x
):
return
F
.
dot
(
u
,
F
.
matmul
(
x
,
v
))
y
=
f
(
x
)
grad
(
y
,
F
.
ones_like
(
y
))
np
.
testing
.
assert_equal
(
np
.
ones
((
2
,
2
),
dtype
=
np
.
float32
),
x
.
grad
.
numpy
())
imperative/src/impl/ops/tensor_manip.cpp
浏览文件 @
787f187e
...
...
@@ -33,7 +33,7 @@ DispatchMode decide_dispatch_mode(
const
SmallVector
<
LogicalTensorDesc
>&
inputs
)
{
bool
host_computable
=
true
;
for
(
auto
&&
inp
:
inputs
)
{
// FIXME(czh): remove value chec
h
after proxy graph's
// FIXME(czh): remove value chec
k
after proxy graph's
// apply_on_device_tensornd is supported and output Tensor
// is made before add_task.
// then if layout is valid, ptr->layout must be ready
...
...
@@ -50,9 +50,18 @@ void apply_on_device_tensornd(
const
SmallVector
<
DeviceTensorND
>&
inputs
,
SmallVector
<
DeviceTensorND
>*
outputs
)
{
auto
&&
op_def
=
def
.
cast_final_safe
<
GetVarShape
>
();
mgb_assert
(
inputs
.
size
()
==
1
,
"GetVarShape take 1 input, got %lu"
,
inputs
.
size
());
auto
&&
inp
=
inputs
[
0
];
auto
&&
shp
=
inp
.
layout
();
TensorShape
shp
;
if
(
inputs
.
size
()
==
1
)
{
shp
=
inputs
[
0
].
layout
();
}
else
{
TensorShapeArray
src
(
inputs
.
size
());
for
(
size_t
i
=
0
;
i
<
inputs
.
size
();
++
i
)
{
src
[
i
]
=
inputs
[
i
].
layout
();
}
megdnn
::
Elemwise
::
deduce_shape
(
src
,
shp
);
}
mgb_assert
(
shp
.
ndim
!=
0
,
"input shape invalid"
);
mgb_assert
((
*
outputs
)[
0
].
comp_node
()
==
CompNode
::
default_cpu
(),
"GetVarShape's apply_on_device_tensornd should receive default_cpu outputs."
);
...
...
@@ -99,27 +108,36 @@ std::tuple<SmallVector<LogicalTensorDesc>, bool> infer_output_attrs_fallible(
const
OpDef
&
def
,
const
SmallVector
<
LogicalTensorDesc
>&
inputs
)
{
auto
&&
op_def
=
def
.
cast_final_safe
<
GetVarShape
>
();
mgb_assert
(
inputs
.
size
()
==
1
,
"GetVarShape take 1 input, got %lu"
,
inputs
.
size
());
auto
&&
desc
=
inputs
[
0
];
if
(
!
desc
.
layout
.
ndim
)
{
TensorShape
shp
;
if
(
inputs
.
size
()
==
1
)
{
shp
=
desc
.
layout
;
}
else
{
TensorShapeArray
src
(
inputs
.
size
());
for
(
size_t
i
=
0
;
i
<
inputs
.
size
();
++
i
)
{
src
[
i
]
=
inputs
[
i
].
layout
;
}
megdnn
::
Elemwise
::
deduce_shape
(
src
,
shp
);
}
if
(
!
shp
.
ndim
)
{
return
{{{
TensorLayout
(
dtype
::
Int32
()),
desc
.
comp_node
}},
false
};
}
DeviceTensorND
value
;
if
(
op_def
.
axis
==
opr
::
GetVarShape
::
Param
::
INVALID_AXIS
)
{
value
=
DeviceTensorND
(
CompNode
::
default_cpu
(),
{
desc
.
layout
.
ndim
},
dtype
::
Int32
());
value
=
DeviceTensorND
(
CompNode
::
default_cpu
(),
{
shp
.
ndim
},
dtype
::
Int32
());
auto
*
ptr
=
value
.
ptr
<
dt_int32
>
();
for
(
size_t
i
=
0
;
i
<
desc
.
layout
.
ndim
;
++
i
)
{
ptr
[
i
]
=
desc
.
layout
[
i
];
for
(
size_t
i
=
0
;
i
<
shp
.
ndim
;
++
i
)
{
ptr
[
i
]
=
shp
[
i
];
}
}
else
{
int32_t
axis
=
op_def
.
axis
;
if
(
axis
<
0
)
{
axis
+=
desc
.
layout
.
ndim
;
axis
+=
shp
.
ndim
;
}
mgb_assert
(
axis
>=
0
&&
axis
<
(
int32_t
)
desc
.
layout
.
ndim
);
mgb_assert
(
axis
>=
0
&&
axis
<
(
int32_t
)
shp
.
ndim
);
value
=
DeviceTensorND
(
CompNode
::
default_cpu
(),
{
1
},
dtype
::
Int32
());
auto
*
ptr
=
value
.
ptr
<
dt_int32
>
();
ptr
[
0
]
=
desc
.
layout
[
axis
];
ptr
[
0
]
=
shp
[
axis
];
}
return
{{{
value
.
layout
(),
desc
.
comp_node
,
std
::
move
(
value
)}},
true
};
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录