Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
OpenDILab开源决策智能平台
DI-treetensor
提交
3d94d2ee
D
DI-treetensor
项目概览
OpenDILab开源决策智能平台
/
DI-treetensor
大约 1 年 前同步成功
通知
44
Star
172
Fork
11
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
DI-treetensor
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
3d94d2ee
编写于
12月 27, 2021
作者:
HansBug
😆
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'dev-unet-demo' of github.com:opendilab/DI-treetensor into dev-unet-demo
上级
2b8d5da0
8bfe7024
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
123 addition
and
1 deletion
+123
-1
test/variable_size/test_sequence.py
test/variable_size/test_sequence.py
+91
-0
test/variable_size/test_unet.py
test/variable_size/test_unet.py
+32
-1
未找到文件。
test/variable_size/test_sequence.py
0 → 100644
浏览文件 @
3d94d2ee
import
time
import
numpy
as
np
import
torch
import
nestedtensor
from
ding.torch_utils
import
Transformer
def
generate_variable_sequence
(
N
,
M
,
sample_range
):
return
[
torch
.
zeros
(
1
,
np
.
random
.
randint
(
*
sample_range
),
M
)
for
_
in
range
(
N
)]
def
naive_method
(
model
,
data
,
cuda
,
test_loop
=
3
):
result
=
[]
for
_
in
range
(
test_loop
):
t_start
=
time
.
time
()
with
torch
.
no_grad
():
output
=
[
model
(
d
)
for
d
in
data
]
if
cuda
:
torch
.
cuda
.
synchronize
()
t_end
=
time
.
time
()
result
.
append
(
t_end
-
t_start
)
print
(
'naive_method test time avg: {}, max: {}'
.
format
(
np
.
mean
(
result
),
np
.
max
(
result
)))
return
output
,
result
def
padding_method
(
model
,
data
,
cuda
,
test_loop
=
3
):
result
=
[]
for
_
in
range
(
test_loop
):
t_start
=
time
.
time
()
with
torch
.
no_grad
():
# padding
max_n
=
max
([
d
.
shape
[
1
]
for
d
in
data
])
new_data
=
torch
.
zeros
(
len
(
data
),
max_n
,
data
[
0
].
shape
[
-
1
]).
to
(
data
[
0
].
device
)
mask
=
torch
.
zeros
(
len
(
data
),
max_n
)
for
i
in
range
(
len
(
data
)):
mask
[
i
,
:
data
[
i
].
shape
[
1
]].
add_
(
1
)
mask
=
mask
.
bool
().
to
(
data
[
0
].
device
)
padding_output
=
model
(
new_data
,
mask
=
mask
)
output
=
[]
for
i
in
range
(
len
(
data
)):
output
.
append
(
padding_output
[
i
,
:
data
[
i
].
shape
[
1
]].
unsqueeze
(
0
))
if
cuda
:
torch
.
cuda
.
synchronize
()
t_end
=
time
.
time
()
result
.
append
(
t_end
-
t_start
)
print
(
'padding_method test time avg: {}, max: {}'
.
format
(
np
.
mean
(
result
),
np
.
max
(
result
)))
return
output
,
result
def
nestedtensor_method
(
model
,
data
,
cuda
,
test_loop
=
3
):
raise
NotImplementedError
(
"nestedtensor doesn't support chunk op now"
)
result
=
[]
data
=
nestedtensor
.
nested_tensor
([
d
.
squeeze
(
0
)
for
d
in
data
])
for
_
in
range
(
test_loop
):
t_start
=
time
.
time
()
with
torch
.
no_grad
():
output
=
model
(
data
)
if
cuda
:
torch
.
cuda
.
synchronize
()
t_end
=
time
.
time
()
result
.
append
(
t_end
-
t_start
)
print
(
'nestedtensor_method test time avg: {}, max: {}'
.
format
(
np
.
mean
(
result
),
np
.
max
(
result
)))
output
=
[
o
.
unsqueeze
(
0
)
for
o
in
output
]
return
output
,
result
def
main
(
cuda
):
N
,
M
=
64
,
128
sample_range
=
[
32
,
64
]
np
.
random
.
seed
(
0
)
data
=
generate_variable_sequence
(
N
,
M
,
sample_range
)
model
=
Transformer
(
input_dim
=
M
)
print
(
model
)
if
cuda
:
model
.
cuda
()
data
=
[
d
.
cuda
()
for
d
in
data
]
# warm up
for
_
in
range
(
10
):
with
torch
.
no_grad
():
model
(
data
[
0
])
naive_output
,
naive_result
=
naive_method
(
model
,
data
,
cuda
)
padding_output
,
padding_result
=
padding_method
(
model
,
data
,
cuda
)
# nest_output, nest_result = nestedtensor_method(model, data, cuda)
if
__name__
==
"__main__"
:
main
(
cuda
=
False
)
test/variable_size/test_unet.py
浏览文件 @
3d94d2ee
...
@@ -34,6 +34,36 @@ def naive_method(model, data, cuda, test_loop=3):
...
@@ -34,6 +34,36 @@ def naive_method(model, data, cuda, test_loop=3):
return
output
,
result
return
output
,
result
def
padding_method
(
model
,
data
,
cuda
,
test_loop
=
3
):
result
=
[]
for
_
in
range
(
test_loop
):
t_start
=
time
.
time
()
with
torch
.
no_grad
():
# padding
max_h
=
max
([
d
.
shape
[
-
2
]
for
d
in
data
])
max_w
=
max
([
d
.
shape
[
-
1
]
for
d
in
data
])
new_data
=
torch
.
zeros
(
len
(
data
),
3
,
max_h
,
max_w
).
to
(
data
[
0
].
device
)
start_h
=
[
max_h
-
d
.
shape
[
-
2
]
for
d
in
data
]
start_w
=
[
max_w
-
d
.
shape
[
-
1
]
for
d
in
data
]
for
i
in
range
(
len
(
data
)):
new_data
[
i
,
:,
start_h
[
i
]:,
start_w
[
i
]:]
=
data
[
i
]
padding_output
=
model
(
new_data
)
output
=
[]
for
i
in
range
(
len
(
data
)):
output
.
append
(
padding_output
[
i
,
:,
start_h
[
i
]:
start_h
[
i
]
+
data
[
i
].
shape
[
-
2
],
start_w
[
i
]:
start_w
[
i
]
+
data
[
i
].
shape
[
-
1
]]
)
if
cuda
:
torch
.
cuda
.
synchronize
()
t_end
=
time
.
time
()
result
.
append
(
t_end
-
t_start
)
print
(
'padding_method test time avg: {}, max: {}'
.
format
(
np
.
mean
(
result
),
np
.
max
(
result
)))
return
output
,
result
def
nestedtensor_method
(
model
,
data
,
cuda
,
test_loop
=
3
):
def
nestedtensor_method
(
model
,
data
,
cuda
,
test_loop
=
3
):
result
=
[]
result
=
[]
data
=
nestedtensor
.
nested_tensor
([
d
.
squeeze
(
0
)
for
d
in
data
])
data
=
nestedtensor
.
nested_tensor
([
d
.
squeeze
(
0
)
for
d
in
data
])
...
@@ -74,7 +104,8 @@ def main(cuda):
...
@@ -74,7 +104,8 @@ def main(cuda):
same_output
,
same_result
=
same_test
(
model
,
data
,
cuda
)
same_output
,
same_result
=
same_test
(
model
,
data
,
cuda
)
naive_output
,
naive_result
=
naive_method
(
model
,
data
,
cuda
)
naive_output
,
naive_result
=
naive_method
(
model
,
data
,
cuda
)
assert
len
(
naive_output
)
==
B
assert
len
(
naive_output
)
==
B
,
len
(
naive_output
)
padding_output
,
padding_result
=
padding_method
(
model
,
data
,
cuda
)
nest_output
,
nest_result
=
nestedtensor_method
(
model
,
data
,
cuda
)
nest_output
,
nest_result
=
nestedtensor_method
(
model
,
data
,
cuda
)
print
(
naive_output
[
0
][
0
,
0
,
0
,
:
10
])
print
(
naive_output
[
0
][
0
,
0
,
0
,
:
10
])
print
(
nest_output
[
0
][
0
,
0
,
0
,
:
10
])
print
(
nest_output
[
0
][
0
,
0
,
0
,
:
10
])
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录