Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
66c514ce
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
66c514ce
编写于
1月 19, 2021
作者:
Z
Zhang Ting
提交者:
GitHub
1月 19, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[2.0 API] device guard (#30307)
* add 2.0 API: device_guard
上级
7a0a576e
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
78 addition
and
102 deletion
+78
-102
python/paddle/fluid/framework.py
python/paddle/fluid/framework.py
+14
-13
python/paddle/fluid/tests/unittests/test_device_guard.py
python/paddle/fluid/tests/unittests/test_device_guard.py
+63
-89
python/paddle/static/__init__.py
python/paddle/static/__init__.py
+1
-0
未找到文件。
python/paddle/fluid/framework.py
浏览文件 @
66c514ce
...
...
@@ -5740,27 +5740,28 @@ def device_guard(device=None):
Examples:
.. code-block:: python
import paddle
.fluid as fluid
import paddle
support_gpu = fluid.is_compiled_with_cuda()
place = fluid.CPUPlace()
paddle.enable_static()
support_gpu = paddle.is_compiled_with_cuda()
place = paddle.CPUPlace()
if support_gpu:
place =
fluid
.CUDAPlace(0)
place =
paddle
.CUDAPlace(0)
# if GPU is supported, the three OPs below will be automatically assigned to CUDAPlace(0)
data1 =
fluid.layers.fill_constant(shape=[1, 3, 8, 8],
value=0.5, dtype='float32')
data2 =
fluid.layers.fill_constant(shape=[1, 3, 5, 5],
value=0.5, dtype='float32')
shape =
fluid.layers
.shape(data2)
data1 =
paddle.full(shape=[1, 3, 8, 8], fill_
value=0.5, dtype='float32')
data2 =
paddle.full(shape=[1, 3, 64], fill_
value=0.5, dtype='float32')
shape =
paddle
.shape(data2)
with
fluid
.device_guard("cpu"):
with
paddle.static
.device_guard("cpu"):
# Ops created here will be placed on CPUPlace
shape =
fluid.layers
.slice(shape, axes=[0], starts=[0], ends=[4])
with
fluid
.device_guard('gpu'):
shape =
paddle
.slice(shape, axes=[0], starts=[0], ends=[4])
with
paddle.static
.device_guard('gpu'):
# if GPU is supported, OPs created here will be placed on CUDAPlace(0), otherwise on CPUPlace
out =
fluid.layers.crop_tensor
(data1, shape=shape)
out =
paddle.reshape
(data1, shape=shape)
exe =
fluid
.Executor(place)
exe.run(
fluid
.default_startup_program())
exe =
paddle.static
.Executor(place)
exe.run(
paddle.static
.default_startup_program())
result = exe.run(fetch_list=[out])
"""
...
...
python/paddle/fluid/tests/unittests/test_device_guard.py
浏览文件 @
66c514ce
...
...
@@ -18,17 +18,18 @@ import unittest
from
op_test
import
OpTest
import
numpy
as
np
import
paddle
import
paddle.fluid
as
fluid
import
paddle.fluid.core
as
core
import
warnings
def
execute
(
main_program
,
startup_program
):
if
cor
e
.
is_compiled_with_cuda
():
place
=
cor
e
.
CUDAPlace
(
0
)
if
paddl
e
.
is_compiled_with_cuda
():
place
=
paddl
e
.
CUDAPlace
(
0
)
else
:
place
=
cor
e
.
CPUPlace
()
exe
=
fluid
.
Executor
(
place
)
place
=
paddl
e
.
CPUPlace
()
exe
=
paddle
.
static
.
Executor
(
place
)
exe
.
run
(
startup_program
)
exe
.
run
(
main_program
)
...
...
@@ -43,18 +44,17 @@ def get_vaild_warning_num(warning, w):
class
TestDeviceGuard
(
unittest
.
TestCase
):
def
test_device_guard
(
self
):
main_program
=
fluid
.
Program
()
startup_program
=
fluid
.
Program
()
with
fluid
.
program_guard
(
main_program
,
startup_program
):
data1
=
fluid
.
layers
.
fill_constant
(
shape
=
[
1
,
3
,
8
,
8
],
value
=
0.5
,
dtype
=
'float32'
)
data2
=
fluid
.
layers
.
fill_constant
(
shape
=
[
1
,
3
,
5
,
5
],
value
=
0.5
,
dtype
=
'float32'
)
shape
=
fluid
.
layers
.
shape
(
data2
)
with
fluid
.
device_guard
(
"cpu"
):
shape
=
fluid
.
layers
.
slice
(
shape
,
axes
=
[
0
],
starts
=
[
0
],
ends
=
[
4
])
with
fluid
.
device_guard
(
"gpu"
):
main_program
=
paddle
.
static
.
Program
()
startup_program
=
paddle
.
static
.
Program
()
with
paddle
.
static
.
program_guard
(
main_program
,
startup_program
):
data1
=
paddle
.
full
(
shape
=
[
1
,
3
,
8
,
8
],
fill_value
=
0.5
,
dtype
=
'float32'
)
data2
=
paddle
.
full
(
shape
=
[
1
,
3
,
5
,
5
],
fill_value
=
0.5
,
dtype
=
'float32'
)
shape
=
paddle
.
shape
(
data2
)
with
paddle
.
static
.
device_guard
(
"cpu"
):
shape
=
paddle
.
slice
(
shape
,
axes
=
[
0
],
starts
=
[
0
],
ends
=
[
4
])
with
paddle
.
static
.
device_guard
(
"gpu"
):
out
=
fluid
.
layers
.
crop_tensor
(
data1
,
shape
=
shape
)
# check if the device attr is set correctly
all_ops
=
main_program
.
global_block
().
ops
...
...
@@ -68,18 +68,17 @@ class TestDeviceGuard(unittest.TestCase):
execute
(
main_program
,
startup_program
)
def
test_device_guard_with_id
(
self
):
main_program
=
fluid
.
Program
()
startup_program
=
fluid
.
Program
()
with
fluid
.
program_guard
(
main_program
,
startup_program
):
data1
=
fluid
.
layers
.
fill_constant
(
shape
=
[
1
,
3
,
8
,
8
],
value
=
0.5
,
dtype
=
'float32'
)
data2
=
fluid
.
layers
.
fill_constant
(
shape
=
[
1
,
3
,
5
,
5
],
value
=
0.5
,
dtype
=
'float32'
)
shape
=
fluid
.
layers
.
shape
(
data2
)
with
fluid
.
device_guard
(
"cpu"
):
shape
=
fluid
.
layers
.
slice
(
shape
,
axes
=
[
0
],
starts
=
[
0
],
ends
=
[
4
])
with
fluid
.
device_guard
(
"gpu:1"
):
main_program
=
paddle
.
static
.
Program
()
startup_program
=
paddle
.
static
.
Program
()
with
paddle
.
static
.
program_guard
(
main_program
,
startup_program
):
data1
=
paddle
.
full
(
shape
=
[
1
,
3
,
8
,
8
],
fill_value
=
0.5
,
dtype
=
'float32'
)
data2
=
paddle
.
full
(
shape
=
[
1
,
3
,
5
,
5
],
fill_value
=
0.5
,
dtype
=
'float32'
)
shape
=
paddle
.
shape
(
data2
)
with
paddle
.
static
.
device_guard
(
"cpu"
):
shape
=
paddle
.
slice
(
shape
,
axes
=
[
0
],
starts
=
[
0
],
ends
=
[
4
])
with
paddle
.
static
.
device_guard
(
"gpu:1"
):
out
=
fluid
.
layers
.
crop_tensor
(
data1
,
shape
=
shape
)
# check if the device attr is set correctly
all_ops
=
main_program
.
global_block
().
ops
...
...
@@ -93,23 +92,22 @@ class TestDeviceGuard(unittest.TestCase):
execute
(
main_program
,
startup_program
)
def
test_cpu_only_op
(
self
):
main_program
=
fluid
.
Program
()
startup_program
=
fluid
.
Program
()
with
fluid
.
program_guard
(
main_program
,
startup_program
):
x
=
fluid
.
layers
.
fill_constant
(
shape
=
[
2
,
255
,
13
,
13
],
value
=
0.3
,
dtype
=
'float32'
)
gt_box
=
fluid
.
layers
.
fill_constant
(
shape
=
[
2
,
6
,
4
],
value
=
0.5
,
dtype
=
'float32'
)
gt_label
=
fluid
.
layers
.
fill_constant
(
shape
=
[
2
,
6
],
value
=
1.0
,
dtype
=
'int32'
)
gt_score
=
fluid
.
layers
.
fill_constant
(
shape
=
[
2
,
6
],
value
=
0.5
,
dtype
=
'float32'
)
main_program
=
paddle
.
static
.
Program
()
startup_program
=
paddle
.
static
.
Program
()
with
paddle
.
static
.
program_guard
(
main_program
,
startup_program
):
x
=
paddle
.
full
(
shape
=
[
2
,
255
,
13
,
13
],
fill_value
=
0.3
,
dtype
=
'float32'
)
gt_box
=
paddle
.
full
(
shape
=
[
2
,
6
,
4
],
fill_value
=
0.5
,
dtype
=
'float32'
)
gt_label
=
paddle
.
full
(
shape
=
[
2
,
6
],
fill_value
=
1.0
,
dtype
=
'int32'
)
gt_score
=
paddle
.
full
(
shape
=
[
2
,
6
],
fill_value
=
0.5
,
dtype
=
'float32'
)
anchors
=
[
10
,
13
,
16
,
30
,
33
,
23
,
30
,
61
,
62
,
45
,
59
,
119
,
116
,
90
,
156
,
198
,
373
,
326
]
anchor_mask
=
[
0
,
1
,
2
]
with
fluid
.
device_guard
(
"gpu"
):
with
paddle
.
static
.
device_guard
(
"gpu"
):
# yolov3_loss only has cpu kernel, so its cpu kernel will be executed
loss
=
fluid
.
layers
.
yolov3_loss
(
x
=
x
,
...
...
@@ -125,20 +123,19 @@ class TestDeviceGuard(unittest.TestCase):
execute
(
main_program
,
startup_program
)
def
test_without_kernel_op
(
self
):
main_program
=
fluid
.
Program
()
startup_program
=
fluid
.
Program
()
with
fluid
.
program_guard
(
main_program
,
startup_program
):
i
=
fluid
.
layers
.
fill_constant
(
shape
=
[
1
],
dtype
=
'int64'
,
value
=
0
)
loop_len
=
fluid
.
layers
.
fill_constant
(
shape
=
[
1
],
dtype
=
'int64'
,
value
=
10
)
cond
=
fluid
.
layers
.
less_than
(
x
=
i
,
y
=
loop_len
)
main_program
=
paddle
.
static
.
Program
()
startup_program
=
paddle
.
static
.
Program
()
with
paddle
.
static
.
program_guard
(
main_program
,
startup_program
):
i
=
paddle
.
full
(
shape
=
[
1
],
dtype
=
'int64'
,
fill_value
=
0
)
loop_len
=
paddle
.
full
(
shape
=
[
1
],
dtype
=
'int64'
,
fill_value
=
10
)
cond
=
paddle
.
less_than
(
x
=
i
,
y
=
loop_len
)
with
warnings
.
catch_warnings
(
record
=
True
)
as
w
:
warnings
.
simplefilter
(
"always"
)
with
fluid
.
device_guard
(
"cpu"
):
with
paddle
.
static
.
device_guard
(
"cpu"
):
while_op
=
fluid
.
layers
.
While
(
cond
=
cond
)
with
while_op
.
block
():
i
=
fluid
.
layers
.
increment
(
x
=
i
,
value
=
1
,
in_place
=
True
)
i
=
paddle
.
increment
(
x
=
i
,
value
=
1
)
fluid
.
layers
.
less_than
(
x
=
i
,
y
=
loop_len
,
cond
=
cond
)
warning
=
"The Op(while) is not support to set device."
...
...
@@ -155,55 +152,32 @@ class TestDeviceGuard(unittest.TestCase):
def
test_error
(
self
):
def
device_attr
():
with
fluid
.
device_guard
(
"cpu1"
):
out
=
fluid
.
layers
.
fill_constant
(
shape
=
[
1
],
value
=
0.2
,
dtype
=
'float32'
)
with
paddle
.
static
.
device_guard
(
"cpu1"
):
out
=
paddle
.
full
(
shape
=
[
1
],
fill_value
=
0.2
,
dtype
=
'float32'
)
def
device_attr2
():
with
fluid
.
device_guard
(
"cpu:1"
):
out
=
fluid
.
layers
.
fill_constant
(
shape
=
[
1
],
value
=
0.2
,
dtype
=
'float32'
)
with
paddle
.
static
.
device_guard
(
"cpu:1"
):
out
=
paddle
.
full
(
shape
=
[
1
],
fill_value
=
0.2
,
dtype
=
'float32'
)
self
.
assertRaises
(
ValueError
,
device_attr
)
self
.
assertRaises
(
ValueError
,
device_attr2
)
def
test_warning
(
self
):
main_program
=
fluid
.
Program
()
startup_program
=
fluid
.
Program
()
with
fluid
.
program_guard
(
main_program
,
startup_program
):
with
warnings
.
catch_warnings
(
record
=
True
)
as
w
:
warnings
.
simplefilter
(
"always"
)
with
fluid
.
device_guard
(
"gpu"
):
x
=
fluid
.
layers
.
fill_constant
(
shape
=
[
1
],
value
=
3.0
,
dtype
=
'float32'
,
force_cpu
=
True
)
y
=
fluid
.
layers
.
fill_constant
(
shape
=
[
1
],
value
=
4.0
,
dtype
=
'float32'
)
result
=
fluid
.
layers
.
less_than
(
x
=
x
,
y
=
y
,
force_cpu
=
False
)
warning
=
"
\'
device_guard
\'
has higher priority when they are used at the same time."
warning_num
=
get_vaild_warning_num
(
warning
,
w
)
assert
warning_num
==
2
all_ops
=
main_program
.
global_block
().
ops
device_attr_name
=
core
.
op_proto_and_checker_maker
.
kOpDeviceAttrName
()
for
op
in
all_ops
:
self
.
assertEqual
(
op
.
desc
.
attr
(
device_attr_name
),
"gpu"
)
# check if op_descs have op_device attr
def
test_op_descs_device_attr
(
self
):
main_program
=
fluid
.
Program
()
startup_program
=
fluid
.
Program
()
with
fluid
.
program_guard
(
main_program
,
startup_program
):
data1
=
fluid
.
layers
.
data
(
name
=
"data_1"
,
shape
=
[
2
],
dtype
=
"float32"
)
data2
=
fluid
.
layers
.
data
(
name
=
"data_2"
,
shape
=
[
2
],
dtype
=
"float32"
)
label
=
fluid
.
layers
.
data
(
name
=
"label"
,
shape
=
[
1
],
dtype
=
"int64"
)
fc1
=
fluid
.
layers
.
fc
(
input
=
data1
,
size
=
10
)
fc2
=
fluid
.
layers
.
fc
(
input
=
fc1
,
size
=
10
)
with
fluid
.
device_guard
(
"gpu"
):
out
=
fluid
.
layers
.
softmax_with_cross_entropy
(
main_program
=
paddle
.
static
.
Program
()
startup_program
=
paddle
.
static
.
Program
()
with
paddle
.
static
.
program_guard
(
main_program
,
startup_program
):
data1
=
paddle
.
static
.
data
(
name
=
"data_1"
,
shape
=
[
4
,
2
],
dtype
=
"float32"
)
label
=
paddle
.
static
.
data
(
name
=
"label"
,
shape
=
[
4
,
1
],
dtype
=
"int64"
)
fc1
=
paddle
.
static
.
nn
.
fc
(
x
=
data1
,
size
=
10
)
fc2
=
paddle
.
static
.
nn
.
fc
(
x
=
fc1
,
size
=
10
)
with
paddle
.
static
.
device_guard
(
"gpu"
):
out
=
paddle
.
nn
.
functional
.
softmax_with_cross_entropy
(
logits
=
fc1
+
fc2
,
label
=
label
)
loss
=
fluid
.
layers
.
mean
(
out
)
opt
=
fluid
.
optimizer
.
SGDOptimizer
(
0.1
)
loss
=
paddle
.
mean
(
out
)
opt
=
paddle
.
optimizer
.
SGD
(
0.1
)
opt
.
minimize
(
loss
)
all_ops
=
main_program
.
global_block
().
ops
...
...
python/paddle/static/__init__.py
浏览文件 @
66c514ce
...
...
@@ -72,6 +72,7 @@ from ..fluid.compiler import CompiledProgram #DEFINE_ALIAS
from
..fluid.compiler
import
ExecutionStrategy
#DEFINE_ALIAS
from
..fluid.framework
import
default_main_program
#DEFINE_ALIAS
from
..fluid.framework
import
default_startup_program
#DEFINE_ALIAS
from
..fluid.framework
import
device_guard
#DEFINE_ALIAS
from
..fluid.framework
import
Program
#DEFINE_ALIAS
from
..fluid.framework
import
name_scope
#DEFINE_ALIAS
from
..fluid.framework
import
program_guard
#DEFINE_ALIAS
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录