Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
MegEngine 天元
MegEngine
提交
e954b8f9
MegEngine
项目概览
MegEngine 天元
/
MegEngine
1 年多 前同步成功
通知
403
Star
4705
Fork
582
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
MegEngine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
e954b8f9
编写于
7月 30, 2021
作者:
M
Megvii Engine Team
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
feat(mgb/opr): let Subtensor support empty IO
GitOrigin-RevId: a768498104fbb6ece0b54f5fe4b901b07e2026ac
上级
1e83ab63
变更
7
隐藏空白更改
内联
并排
Showing
7 changed file
with
120 addition
and
14 deletion
+120
-14
imperative/python/test/unit/core/test_indexing_op.py
imperative/python/test/unit/core/test_indexing_op.py
+24
-0
src/core/impl/tensor.cpp
src/core/impl/tensor.cpp
+24
-9
src/core/include/megbrain/tensor.h
src/core/include/megbrain/tensor.h
+7
-3
src/opr/impl/internal/indexing_helper.cpp
src/opr/impl/internal/indexing_helper.cpp
+3
-1
src/opr/impl/tensor_manip.cpp
src/opr/impl/tensor_manip.cpp
+20
-1
src/opr/include/megbrain/opr/tensor_manip.h
src/opr/include/megbrain/opr/tensor_manip.h
+1
-0
src/opr/test/tensor_manip.cpp
src/opr/test/tensor_manip.cpp
+41
-0
未找到文件。
imperative/python/test/unit/core/test_indexing_op.py
浏览文件 @
e954b8f9
...
...
@@ -15,6 +15,7 @@ from utils import make_tensor
import
megengine
import
megengine.core.tensor.megbrain_graph
as
G
import
megengine.functional
as
F
import
megengine.jit
as
jit
from
megengine.core._imperative_rt.core2
import
apply
from
megengine.core._trace_option
import
use_symbolic_shape
from
megengine.core.ops
import
builtin
...
...
@@ -584,3 +585,26 @@ def test_advance_indexing_with_bool(test_varnode):
np
.
testing
.
assert_equal
(
a
[:,
b
,
0
:
2
,
[
True
,
False
]],
aa
[:,
bb
,
0
:
2
,
[
True
,
False
]].
numpy
()
)
@
pytest
.
mark
.
parametrize
(
"symbolic"
,
[
True
,
False
,
None
])
def
test_subtensor_on_empty_tensor
(
symbolic
):
np_x
=
np
.
array
([],
dtype
=
np
.
float32
).
reshape
(
10
,
0
,
10
)
mge_x
=
megengine
.
tensor
(
np_x
)
def
run_test
(
fn
):
out_ref
=
fn
(
np_x
)
if
symbolic
is
not
None
:
fn
=
jit
.
trace
(
symbolic
=
symbolic
)(
fn
)
for
i
in
range
(
3
):
out
=
fn
(
mge_x
)
np
.
testing
.
assert_equal
(
out
.
numpy
(),
out_ref
)
run_test
(
lambda
x
:
x
[
0
:
1
,
:,
:])
run_test
(
lambda
x
:
x
[
1
:
100
:
2
,
:,
:])
run_test
(
lambda
x
:
x
[
-
10
:
5
:
2
,
:,
:])
run_test
(
lambda
x
:
x
[
5
:
1
:
-
1
,
:,
:])
run_test
(
lambda
x
:
x
[
3
,
10
:
1
:
1
,
5
])
run_test
(
lambda
x
:
x
[
3
,
10
:
1
:
1
,
5
:
-
1
])
run_test
(
lambda
x
:
x
[:
100
,
:
100
,
:
100
])
run_test
(
lambda
x
:
x
[
100
:
200
,
300
:
400
,
500
:
600
])
src/core/impl/tensor.cpp
浏览文件 @
e954b8f9
...
...
@@ -133,27 +133,42 @@ SubTensorSpec Slice::apply(TensorLayout layout, int axis) const {
return
"None"
;
return
std
::
to_string
(
v
.
val
());
};
auto
mod_size
=
[
size_ax
](
ptrdiff_t
v
)
{
auto
mod_size
=
[
size_ax
](
ptrdiff_t
v
)
->
ptrdiff_t
{
if
(
size_ax
==
0
)
return
0
;
return
v
<
0
?
v
+
size_ax
:
v
;
};
MGB_MARK_USED_VAR
(
tostr
);
#define CHECK(cond) \
mgb_assert(cond, \
"index out of bound: layout=%s; request begin=%s end=%s step=%s " \
"axis=%d", \
layout.to_string().c_str(), tostr(m_begin).c_str(), \
tostr(m_end).c_str(), tostr(m_step).c_str(), axis)
#define CHECK(cond) \
if (m_is_scalar_idx) { \
mgb_assert(cond, \
"index out of bound: layout=%s; request index=%s, axis=%d", \
layout.to_string().c_str(), tostr(m_begin).c_str(), axis); \
} else { \
mgb_assert(cond, \
"index out of bound: layout=%s; request begin=%s end=%s step=%s " \
"axis=%d", \
layout.to_string().c_str(), tostr(m_begin).c_str(), \
tostr(m_end).c_str(), tostr(m_step).c_str(), axis); \
}
if
(
step
>
0
)
{
begin
=
mod_size
(
m_begin
.
val_with_default
(
0
));
end
=
mod_size
(
m_end
.
val_with_default
(
size_ax
));
CHECK
(
begin
>=
0
&&
end
>=
begin
&&
end
<=
size_ax
);
if
(
!
m_is_scalar_idx
)
{
end
=
std
::
min
(
end
,
size_ax
);
begin
=
std
::
min
(
begin
,
end
);
}
CHECK
(
begin
>=
0
&&
end
>=
begin
&&
end
<=
size_ax
)
}
else
{
begin
=
mod_size
(
m_begin
.
val_with_default
(
size_ax
-
1
));
end
=
m_end
.
valid
()
?
mod_size
(
m_end
.
val
())
:
-
1
;
if
(
!
m_is_scalar_idx
)
{
begin
=
std
::
min
(
begin
,
std
::
max
<
ptrdiff_t
>
(
size_ax
-
1
,
0
));
end
=
std
::
min
(
end
,
begin
);
}
CHECK
(
step
<
0
&&
begin
>=
0
&&
end
<=
begin
&&
begin
<
size_ax
&&
end
>=
-
1
)
;
end
>=
-
1
)
}
auto
step_abs
=
std
::
abs
(
step
);
layout
.
shape
[
axis
]
=
(
std
::
abs
(
end
-
begin
)
+
step_abs
-
1
)
/
step_abs
;
...
...
src/core/include/megbrain/tensor.h
浏览文件 @
e954b8f9
...
...
@@ -83,16 +83,20 @@ class SubTensorSpec {
/*!
* \brief slice along some axis; index as in Python, with negative indices
* supported
* supported. Scalar index can also be represented as a Slice, where
* m_begin = idx, m_end = idx+1 and m_step = 1. The flag m_is_scalar_idx
* indicates whether the Slice comes from a scalar index.
*/
class
Slice
{
Maybe
<
ptrdiff_t
>
m_begin
,
m_end
,
m_step
;
bool
m_is_scalar_idx
;
public:
Slice
(
Maybe
<
ptrdiff_t
>
begin
=
None
,
Maybe
<
ptrdiff_t
>
end
=
None
,
Maybe
<
ptrdiff_t
>
step
=
None
)
:
m_begin
{
begin
},
m_end
{
end
},
m_step
{
step
}
Maybe
<
ptrdiff_t
>
step
=
None
,
bool
is_scalar_idx
=
false
)
:
m_begin
{
begin
},
m_end
{
end
},
m_step
{
step
},
m_is_scalar_idx
{
is_scalar_idx
}
{
}
/*!
...
...
src/opr/impl/internal/indexing_helper.cpp
浏览文件 @
e954b8f9
...
...
@@ -178,7 +178,9 @@ SubTensorSpec FancyIndexingHelper::do_make_sub_spec(
i
.
axis
.
get_raw
(),
axis
);
prev_axis
=
axis
;
Maybe
<
ptrdiff_t
>
begin
,
end
,
step
;
bool
is_scalar_idx
=
false
;
if
(
i
.
idx
.
node
())
{
is_scalar_idx
=
true
;
if
(
!
m_require_scalar_index
)
{
continue
;
}
...
...
@@ -195,7 +197,7 @@ SubTensorSpec FancyIndexingHelper::do_make_sub_spec(
step
=
next_iv
();
}
spec
.
merge_with
(
Slice
(
begin
,
end
,
step
).
apply
(
spec
.
layout
(),
axis
));
spec
.
merge_with
(
Slice
(
begin
,
end
,
step
,
is_scalar_idx
).
apply
(
spec
.
layout
(),
axis
));
}
mgb_assert
(
iv_iter
==
m_value_infer_result
.
end
());
...
...
src/opr/impl/tensor_manip.cpp
浏览文件 @
e954b8f9
...
...
@@ -660,7 +660,19 @@ MGB_IMPL_OPR_GRAD(AxisAddRemove) {
/* f{{{ ======================= Subtensor ======================= */
MGB_IMPL_FANCY_INDEXING_OPR_GET
(
Subtensor
,
"subtensor"
,
true
);
Subtensor
::
Subtensor
(
VarNode
*
inp
,
const
IndexDesc
&
desc
,
const
OperatorNodeConfig
&
config
)
:
Super
({
inp
->
owner_graph
(),
config
,
"subtensor"
,
{
inp
}},
inp
,
nullptr
,
desc
,
true
)
{
output
(
0
)
->
add_flag
(
VarNode
::
Flag
::
ALLOW_EMPTY_SHAPE
);
}
SymbolVar
Subtensor
::
make
(
SymbolVar
inp
,
const
IndexDesc
&
desc
,
const
OperatorNodeConfig
&
config
)
{
return
inp
.
insert_single_output_opr
<
Subtensor
>
(
inp
.
node
(),
desc
,
config
);
}
MGB_DYN_TYPE_OBJ_FINAL_IMPL
(
Subtensor
);
#if MGB_ENABLE_GRAD
MGB_IMPL_OPR_GRAD
(
Subtensor
)
{
...
...
@@ -722,6 +734,13 @@ void Subtensor::init_rt_force_dynamic_mem_alloc_imply_chain() {
out
->
add_rt_force_dynamic_mem_alloc_imply_chain
(
inp
);
}
Subtensor
::
NodeProp
*
Subtensor
::
do_make_node_prop
()
const
{
auto
ret
=
Super
::
do_make_node_prop
();
ret
->
add_dep_type_existing_var
(
input
(
0
),
NodeProp
::
DepType
::
VALUE_ALLOW_EMPTY
);
return
ret
;
}
// f}}}
/* f{{{ ================== ModifySubtensorImplHelper ================== */
...
...
src/opr/include/megbrain/opr/tensor_manip.h
浏览文件 @
e954b8f9
...
...
@@ -358,6 +358,7 @@ MGB_DEFINE_OPR_CLASS(Subtensor,
void
scn_do_execute
()
override
;
void
mem_plan_fwd_in2out_readonly
()
override
;
void
init_rt_force_dynamic_mem_alloc_imply_chain
()
override
;
NodeProp
*
do_make_node_prop
()
const
override
;
public
:
Subtensor
(
VarNode
*
inp
,
const
IndexDesc
&
desc
,
...
...
src/opr/test/tensor_manip.cpp
浏览文件 @
e954b8f9
...
...
@@ -894,6 +894,47 @@ TEST(TestTensorManip, SubtensorIdxChange) {
run
(
false
);
}
TEST
(
TestTensorManip
,
SubtensorEmptyIO
)
{
using
AIdx
=
opr
::
Subtensor
::
AxisIndexer
;
using
IndexDesc
=
std
::
vector
<
AIdx
>
;
using
IndexDescCreater
=
thin_function
<
IndexDesc
(
SymbolVar
)
>
;
HostTensorGenerator
<>
gen
;
auto
run
=
[
&
](
const
TensorShape
&
inp_shp
,
const
TensorShape
&
out_shp
,
const
IndexDescCreater
&
c
)
{
auto
host_x
=
gen
(
inp_shp
);
auto
graph
=
ComputingGraph
::
make
();
auto
x
=
opr
::
Host2DeviceCopy
::
make
(
*
graph
,
host_x
);
auto
y
=
opr
::
Subtensor
::
make
(
x
,
c
(
x
));
HostTensorND
host_y
;
auto
func
=
graph
->
compile
({
make_callback_copy
(
y
,
host_y
)});
func
->
execute
();
ASSERT_EQ
(
host_y
.
shape
(),
out_shp
);
ASSERT_TRUE
(
host_y
.
empty
());
};
// x.shape = {0}, x[:0]
run
({
0
},
{
0
},
[
&
](
SymbolVar
x
)
->
IndexDesc
{
return
{
AIdx
::
make_interval
(
0
,
None
,
x
.
make_scalar
(
0
),
None
)};
});
// x.shape = {100, 0}, x[0:-10:2]
run
({
100
,
0
},
{
45
,
0
},
[
&
](
SymbolVar
x
)
->
IndexDesc
{
return
{
AIdx
::
make_interval
(
0
,
x
.
make_scalar
(
0
),
x
.
make_scalar
(
-
10
),
x
.
make_scalar
(
2
))};
});
// x.shape = {100, 0}, x[10:-10:2, 0:0]
run
({
100
,
0
},
{
40
,
0
},
[
&
](
SymbolVar
x
)
->
IndexDesc
{
return
{
AIdx
::
make_interval
(
0
,
x
.
make_scalar
(
10
),
x
.
make_scalar
(
-
10
),
x
.
make_scalar
(
2
)),
AIdx
::
make_interval
(
1
,
x
.
make_scalar
(
0
),
x
.
make_scalar
(
0
),
None
)};
});
// x.shape = {10, 0, 10}, x[5, 10:-10:-2]
run
({
10
,
0
,
10
},
{
0
,
10
},
[
&
](
SymbolVar
x
)
->
IndexDesc
{
return
{
AIdx
::
make_index
(
0
,
x
.
make_scalar
(
5
)),
AIdx
::
make_interval
(
1
,
x
.
make_scalar
(
10
),
x
.
make_scalar
(
-
10
),
x
.
make_scalar
(
2
))};
});
// x.shape = {10}, x[100:]
run
({
10
},
{
0
},
[
&
](
SymbolVar
x
)
->
IndexDesc
{
return
{
AIdx
::
make_interval
(
0
,
x
.
make_scalar
(
100
),
None
,
None
)};
});
}
namespace
{
void
test_subtensor_fwdonly
(
bool
dyn_inp
,
bool
dyn_idx
)
{
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录