Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
af149f25
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
af149f25
编写于
4月 15, 2020
作者:
G
gfwm0502
提交者:
GitHub
4月 15, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
OP(compare/get_places/shrink_rnn_memory) error message enhancement (#23780)
As the title.
上级
47629418
变更
6
隐藏空白更改
内联
并排
Showing
6 changed file
with
80 addition
and
18 deletion
+80
-18
paddle/fluid/operators/controlflow/compare_op.cc
paddle/fluid/operators/controlflow/compare_op.cc
+1
-1
paddle/fluid/operators/controlflow/get_places_op.cc
paddle/fluid/operators/controlflow/get_places_op.cc
+6
-4
paddle/fluid/operators/shrink_rnn_memory_op.cc
paddle/fluid/operators/shrink_rnn_memory_op.cc
+23
-11
python/paddle/fluid/layers/control_flow.py
python/paddle/fluid/layers/control_flow.py
+3
-0
python/paddle/fluid/tests/unittests/test_get_places_op.py
python/paddle/fluid/tests/unittests/test_get_places_op.py
+15
-1
python/paddle/fluid/tests/unittests/test_shrink_rnn_memory.py
...on/paddle/fluid/tests/unittests/test_shrink_rnn_memory.py
+32
-1
未找到文件。
paddle/fluid/operators/controlflow/compare_op.cc
浏览文件 @
af149f25
...
...
@@ -81,7 +81,7 @@ class CompareOp : public framework::OperatorWithKernel {
void
InferShape
(
framework
::
InferShapeContext
*
context
)
const
override
{
OpComment
comment
;
OP_INOUT_CHECK
(
context
->
HasInput
(
"X"
),
"Input"
,
"X"
,
comment
.
type
);
OP_INOUT_CHECK
(
context
->
HasInput
(
"Y"
),
"
Out
put"
,
"Y"
,
comment
.
type
);
OP_INOUT_CHECK
(
context
->
HasInput
(
"Y"
),
"
In
put"
,
"Y"
,
comment
.
type
);
auto
dim_x
=
context
->
GetInputDim
(
"X"
);
auto
dim_y
=
context
->
GetInputDim
(
"Y"
);
...
...
paddle/fluid/operators/controlflow/get_places_op.cc
浏览文件 @
af149f25
...
...
@@ -51,8 +51,9 @@ class GetPlacesOp : public framework::OperatorBase {
device_count
=
is_gpu
?
CUDADevCount
()
:
std
::
thread
::
hardware_concurrency
();
}
PADDLE_ENFORCE_NE
(
device_count
,
0UL
,
"Cannot indicate %s device count"
,
is_gpu
?
"GPU"
:
"CPU"
);
PADDLE_ENFORCE_NE
(
device_count
,
0UL
,
platform
::
errors
::
InvalidArgument
(
"Cannot indicate %s device count"
,
is_gpu
?
"GPU"
:
"CPU"
));
auto
out_var_name
=
Output
(
"Out"
);
auto
&
places
=
*
(
GET_DATA_SAFELY
(
scope
.
FindVar
(
out_var_name
),
"Output"
,
...
...
@@ -61,8 +62,9 @@ class GetPlacesOp : public framework::OperatorBase {
places
.
reserve
(
device_count
);
if
(
is_gpu
)
{
PADDLE_ENFORCE_LE
(
device_count
,
CUDADevCount
(),
"Only %d CUDA devices found, cannot set to %d"
,
CUDADevCount
(),
device_count
);
platform
::
errors
::
InvalidArgument
(
"Only %d CUDA devices found, cannot set to %d"
,
CUDADevCount
(),
device_count
));
for
(
size_t
i
=
0
;
i
<
device_count
;
++
i
)
{
places
.
emplace_back
(
platform
::
CUDAPlace
(
static_cast
<
int
>
(
i
)));
}
...
...
paddle/fluid/operators/shrink_rnn_memory_op.cc
浏览文件 @
af149f25
...
...
@@ -31,11 +31,16 @@ class ShrinkRNNMemoryOp : public ArrayOp {
void
RunImpl
(
const
framework
::
Scope
&
scope
,
const
platform
::
Place
&
place
)
const
override
{
auto
*
x_var
=
scope
.
FindVar
(
Input
(
"X"
));
PADDLE_ENFORCE
(
x_var
!=
nullptr
,
"Input X must be set"
);
PADDLE_ENFORCE_NOT_NULL
(
x_var
,
platform
::
errors
::
NotFound
(
"Input(X) of ShrinkRNNMemoryOp is not found."
));
auto
&
x_tensor
=
x_var
->
Get
<
framework
::
LoDTensor
>
();
size_t
offset
=
this
->
GetOffset
(
scope
,
place
);
auto
*
rank_table_var
=
scope
.
FindVar
(
Input
(
"RankTable"
));
PADDLE_ENFORCE
(
rank_table_var
!=
nullptr
,
"RankTable must be set"
);
PADDLE_ENFORCE_NOT_NULL
(
rank_table_var
,
platform
::
errors
::
NotFound
(
"Input(RankTable) of ShrinkRNNMemoryOp is not found."
));
auto
&
rank_table
=
rank_table_var
->
Get
<
framework
::
LoDRankTable
>
();
auto
&
rank_items
=
rank_table
.
items
();
...
...
@@ -46,7 +51,9 @@ class ShrinkRNNMemoryOp : public ArrayOp {
rank_items
.
begin
();
auto
*
out_var
=
scope
.
FindVar
(
Output
(
"Out"
));
PADDLE_ENFORCE
(
out_var
!=
nullptr
,
"Output(Out) must be set."
);
PADDLE_ENFORCE_NOT_NULL
(
out_var
,
platform
::
errors
::
NotFound
(
"Output(Out) of ShrinkRNNMemoryOp is not found."
));
auto
&
out_tensor
=
*
out_var
->
GetMutable
<
framework
::
LoDTensor
>
();
size_t
height
=
dst_num_rows
;
...
...
@@ -96,9 +103,10 @@ batch size for the next time step.
class
ShrinkRNNMemoryInferShape
:
public
framework
::
InferShapeBase
{
public:
void
operator
()(
framework
::
InferShapeContext
*
context
)
const
override
{
PADDLE_ENFORCE
(
context
->
HasInput
(
"X"
));
PADDLE_ENFORCE
(
context
->
HasInput
(
"I"
));
PADDLE_ENFORCE
(
context
->
HasInput
(
"RankTable"
));
OP_INOUT_CHECK
(
context
->
HasInput
(
"X"
),
"Input"
,
"X"
,
"ShrinkRNNMemory"
);
OP_INOUT_CHECK
(
context
->
HasInput
(
"I"
),
"Input"
,
"I"
,
"ShrinkRNNMemory"
);
OP_INOUT_CHECK
(
context
->
HasInput
(
"RankTable"
),
"Input"
,
"RankTable"
,
"ShrinkRNNMemory"
);
context
->
SetOutputDim
(
"Out"
,
context
->
GetInputDim
(
"X"
));
// For runtime, output's lod is computed according to input's lod, but
// remove the finished sequence. It is set in detail kernel implementation.
...
...
@@ -121,10 +129,13 @@ class ShrinkRNNMemoryGradOp : public ArrayOp {
const
platform
::
Place
&
place
)
const
override
{
auto
*
dout_var
=
scope
.
FindVar
(
Input
(
framework
::
GradVarName
(
"Out"
)));
auto
*
dx_var
=
scope
.
FindVar
(
Output
(
framework
::
GradVarName
(
"X"
)));
PADDLE_ENFORCE
(
dx_var
!=
nullptr
,
"Input Gradient should not be nullptr"
);
PADDLE_ENFORCE_NOT_NULL
(
dx_var
,
platform
::
errors
::
NotFound
(
"Input(X@GRAD) of ShrinkRNNMemoryGradOp is not found."
));
auto
*
x_var
=
scope
.
FindVar
(
Input
(
"X"
));
PADDLE_ENFORCE
(
x_var
!=
nullptr
);
PADDLE_ENFORCE_NOT_NULL
(
x_var
,
platform
::
errors
::
NotFound
(
"Input(x) of ShrinkRNNMemoryGradOp is not found."
));
auto
&
x_tensor
=
x_var
->
Get
<
framework
::
LoDTensor
>
();
auto
&
dx_tensor
=
*
dx_var
->
GetMutable
<
framework
::
LoDTensor
>
();
dx_tensor
.
Resize
(
x_tensor
.
dims
());
...
...
@@ -154,8 +165,9 @@ class ShrinkRNNMemoryGradOp : public ArrayOp {
class
ShrinkRNNMemoryGradInferShape
:
public
framework
::
InferShapeBase
{
public:
void
operator
()(
framework
::
InferShapeContext
*
context
)
const
override
{
PADDLE_ENFORCE
(
context
->
HasInput
(
"X"
));
PADDLE_ENFORCE
(
context
->
HasOutput
(
framework
::
GradVarName
(
"X"
)));
OP_INOUT_CHECK
(
context
->
HasInput
(
"X"
),
"Input"
,
"X"
,
"ShrinkRNNMemoryGrad"
);
OP_INOUT_CHECK
(
context
->
HasOutput
(
framework
::
GradVarName
(
"X"
)),
"Output"
,
"X"
,
"ShrinkRNNMemoryGrad"
);
context
->
ShareDim
(
"X"
,
/*->*/
framework
::
GradVarName
(
"X"
));
context
->
ShareLoD
(
"X"
,
/*->*/
framework
::
GradVarName
(
"X"
));
...
...
python/paddle/fluid/layers/control_flow.py
浏览文件 @
af149f25
...
...
@@ -1816,6 +1816,9 @@ def shrink_memory(x, i, table):
usage.
"""
helper
=
LayerHelper
(
'shrink_memory'
,
**
locals
())
check_type
(
x
,
'x'
,
Variable
,
'shrink_memory'
)
check_type
(
i
,
'i'
,
Variable
,
'shrink_memory'
)
check_type
(
table
,
'table'
,
Variable
,
'shrink_memory'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'shrink_rnn_memory'
,
...
...
python/paddle/fluid/tests/unittests/test_get_places_op.py
浏览文件 @
af149f25
...
...
@@ -15,6 +15,7 @@
from
__future__
import
print_function
import
paddle.fluid
as
fluid
import
paddle.fluid.core
as
core
from
paddle.fluid.layers.device
import
get_places
from
decorator_helper
import
prog_scope
import
unittest
...
...
@@ -22,13 +23,26 @@ import unittest
class
TestGetPlaces
(
unittest
.
TestCase
):
@
prog_scope
()
def
test_get
_places
(
self
):
def
check_get_cpu
_places
(
self
):
places
=
get_places
()
cpu
=
fluid
.
CPUPlace
()
exe
=
fluid
.
Executor
(
cpu
)
exe
.
run
(
fluid
.
default_main_program
())
self
.
assertEqual
(
places
.
type
,
fluid
.
core
.
VarDesc
.
VarType
.
PLACE_LIST
)
@
prog_scope
()
def
check_get_gpu_places
(
self
):
places
=
get_places
(
device_type
=
'CUDA'
)
gpu
=
fluid
.
CUDAPlace
(
0
)
exe
=
fluid
.
Executor
(
gpu
)
exe
.
run
(
fluid
.
default_main_program
())
self
.
assertEqual
(
places
.
type
,
fluid
.
core
.
VarDesc
.
VarType
.
PLACE_LIST
)
def
test_main
(
self
):
if
core
.
is_compiled_with_cuda
():
self
.
check_get_gpu_places
()
self
.
check_get_cpu_places
()
if
__name__
==
'__main__'
:
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_shrink_rnn_memory.py
浏览文件 @
af149f25
...
...
@@ -20,7 +20,7 @@ from paddle.fluid.executor import Executor
import
paddle.fluid.layers
as
layers
from
paddle.fluid.backward
import
append_backward
from
paddle.fluid.framework
import
default_main_program
,
switch_main_program
from
paddle.fluid.framework
import
Program
from
paddle.fluid.framework
import
Program
,
program_guard
import
numpy
as
np
from
paddle.fluid.layers.control_flow
import
shrink_memory
...
...
@@ -104,5 +104,36 @@ class TestShrinkRNNMemoryNoLoD(TestShrinkRNNMemoryBase):
self
.
assertAlmostEqual
(
1.0
,
self
.
sum_lodtensor
(
outs
[
3
]),
delta
=
0.01
)
class
TestShrinkRNNMemoryOpError
(
unittest
.
TestCase
):
def
test_erroes
(
self
):
with
program_guard
(
Program
(),
Program
()):
x
=
layers
.
zeros
(
dtype
=
'int64'
,
shape
=
[
3
,
100
])
i
=
layers
.
zeros
(
dtype
=
'int64'
,
shape
=
[
1
])
rank_table_tensor
=
core
.
LoDTensor
()
rank_table_tensor
.
set_recursive_sequence_lengths
([[
1
,
2
,
3
]])
rank_table_tensor
.
set
(
np
.
random
.
random
(
size
=
(
6
,
1
)).
astype
(
'float32'
),
core
.
CPUPlace
())
rank_table
=
np
.
random
.
random
(
size
=
(
6
,
1
)).
astype
(
'float32'
)
# The type of x in shrink_rnn_memory must be Variable.
def
test_x_type
():
out
=
shrink_memory
(
x
=
1
,
i
=
i
,
table
=
rank_table_tensor
)
self
.
assertRaises
(
TypeError
,
test_x_type
)
# The type of i in shrink_rnn_memory must be Variable.
def
test_i_type
():
out
=
shrink_memory
(
x
=
x
,
i
=
0
,
table
=
rank_table_tensor
)
self
.
assertRaises
(
TypeError
,
test_i_type
)
# The type of table in shrink_rnn_memory must be Variable.
def
test_table_type
():
out
=
shrink_memory
(
x
=
x
,
i
=
i
,
table
=
rank_table
)
self
.
assertRaises
(
TypeError
,
test_table_type
)
if
__name__
==
'__main__'
:
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录