Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
a164b10d
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 1 年 前同步成功
通知
2298
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
未验证
提交
a164b10d
编写于
4月 23, 2020
作者:
0
0YuanZhang0
提交者:
GitHub
4月 23, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
API/OP error message enhancement (#23717)
* test=develop
上级
652e804b
变更
18
隐藏空白更改
内联
并排
Showing
18 changed file
with
459 addition
and
114 deletion
+459
-114
paddle/fluid/operators/metrics/precision_recall_op.cc
paddle/fluid/operators/metrics/precision_recall_op.cc
+63
-27
paddle/fluid/operators/metrics/precision_recall_op.h
paddle/fluid/operators/metrics/precision_recall_op.h
+22
-5
paddle/fluid/operators/sequence_ops/sequence_concat_op.cc
paddle/fluid/operators/sequence_ops/sequence_concat_op.cc
+19
-6
paddle/fluid/operators/sequence_ops/sequence_concat_op.h
paddle/fluid/operators/sequence_ops/sequence_concat_op.h
+21
-7
paddle/fluid/operators/sequence_ops/sequence_pad_op.cc
paddle/fluid/operators/sequence_ops/sequence_pad_op.cc
+55
-24
paddle/fluid/operators/sequence_ops/sequence_pad_op.h
paddle/fluid/operators/sequence_ops/sequence_pad_op.h
+2
-1
paddle/fluid/operators/sequence_ops/sequence_reshape_op.cc
paddle/fluid/operators/sequence_ops/sequence_reshape_op.cc
+20
-10
paddle/fluid/operators/sequence_ops/sequence_reshape_op.h
paddle/fluid/operators/sequence_ops/sequence_reshape_op.h
+20
-9
paddle/fluid/operators/sequence_ops/sequence_reverse_op.h
paddle/fluid/operators/sequence_ops/sequence_reverse_op.h
+25
-9
paddle/fluid/operators/sequence_ops/sequence_unpad_op.cc
paddle/fluid/operators/sequence_ops/sequence_unpad_op.cc
+29
-11
paddle/fluid/operators/truncated_gaussian_random_op.cc
paddle/fluid/operators/truncated_gaussian_random_op.cc
+10
-5
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+9
-0
python/paddle/fluid/layers/sequence_lod.py
python/paddle/fluid/layers/sequence_lod.py
+23
-0
python/paddle/fluid/tests/unittests/sequence/test_sequence_concat.py
...le/fluid/tests/unittests/sequence/test_sequence_concat.py
+38
-0
python/paddle/fluid/tests/unittests/sequence/test_sequence_pad_op.py
...le/fluid/tests/unittests/sequence/test_sequence_pad_op.py
+31
-0
python/paddle/fluid/tests/unittests/sequence/test_sequence_reshape.py
...e/fluid/tests/unittests/sequence/test_sequence_reshape.py
+22
-0
python/paddle/fluid/tests/unittests/sequence/test_sequence_reverse.py
...e/fluid/tests/unittests/sequence/test_sequence_reverse.py
+17
-0
python/paddle/fluid/tests/unittests/sequence/test_sequence_unpad_op.py
.../fluid/tests/unittests/sequence/test_sequence_unpad_op.py
+33
-0
未找到文件。
paddle/fluid/operators/metrics/precision_recall_op.cc
浏览文件 @
a164b10d
...
...
@@ -22,18 +22,30 @@ class PrecisionRecallOp : public framework::OperatorWithKernel {
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"MaxProbs"
),
"Input(MaxProbs) should not be null."
);
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"Indices"
),
"Input(Indices) should not be null."
);
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"Labels"
),
"Input(Labels) should not be null."
);
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"BatchMetrics"
),
"Output(BatchMetrics) should not be null."
);
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"AccumMetrics"
),
"Output(AccumMetrics) should not be null."
);
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"AccumStatesInfo"
),
"Output(AccumStatesInfo) should not be null."
);
PADDLE_ENFORCE_EQ
(
ctx
->
HasInput
(
"MaxProbs"
),
true
,
platform
::
errors
::
NotFound
(
"PrecisionRecallOp Input(MaxProbs) should not be null."
));
PADDLE_ENFORCE_EQ
(
ctx
->
HasInput
(
"Indices"
),
true
,
platform
::
errors
::
NotFound
(
"PrecisionRecallOp Input(Indices) should not be null."
));
PADDLE_ENFORCE_EQ
(
ctx
->
HasInput
(
"Labels"
),
true
,
platform
::
errors
::
NotFound
(
"PrecisionRecallOp Input(Labels) should not be null."
));
PADDLE_ENFORCE_EQ
(
ctx
->
HasOutput
(
"BatchMetrics"
),
true
,
platform
::
errors
::
NotFound
(
"PrecisionRecallOp Output(BatchMetrics) should not be null."
));
PADDLE_ENFORCE_EQ
(
ctx
->
HasOutput
(
"AccumMetrics"
),
true
,
platform
::
errors
::
NotFound
(
"PrecisionRecallOp Output(AccumMetrics) should not be null."
));
PADDLE_ENFORCE_EQ
(
ctx
->
HasOutput
(
"AccumStatesInfo"
),
true
,
platform
::
errors
::
NotFound
(
"PrecisionRecallOp Output(AccumStatesInfo) should not be null."
));
int64_t
cls_num
=
static_cast
<
int64_t
>
(
ctx
->
Attrs
().
Get
<
int
>
(
"class_number"
));
...
...
@@ -42,37 +54,61 @@ class PrecisionRecallOp : public framework::OperatorWithKernel {
if
(
ctx
->
IsRuntime
())
{
PADDLE_ENFORCE_EQ
(
max_probs_dims
[
1
],
1
,
"Each instance contains one max probability, so the "
"shape of Input(MaxProbs) should be [batch_size, 1]."
);
platform
::
errors
::
InvalidArgument
(
"Each instance of PrecisionRecallOp "
"Input(MaxProbs) contains one max probability, "
"the shape of Input(MaxProbs) should be "
"[batch_size, 1], the 2nd dimension of "
"Input(MaxProbs) should be 1. But the 2nd "
"dimension we received is %d"
,
max_probs_dims
[
1
]));
PADDLE_ENFORCE_EQ
(
ctx
->
GetInputDim
(
"Indices"
),
max_probs_dims
,
"The shape of Input(Indices) should bes same with max_probs_dims"
);
platform
::
errors
::
InvalidArgument
(
"The shape of PrecisionRecallOp Input(Indices) should be same "
"with "
"max_probs_dims. But received the shape of Input(Indices) is "
"[%d, %d], max_probs_dims is [%d, %d]"
,
ctx
->
GetInputDim
(
"Indices"
)[
0
],
ctx
->
GetInputDim
(
"Indices"
)[
1
],
max_probs_dims
[
0
],
max_probs_dims
[
1
]));
PADDLE_ENFORCE_EQ
(
max_probs_dims
[
0
],
labels_dims
[
0
],
"The 1st dimension of Input(MaxProbs) and "
"Input(Labels) both are batch_size and the shape should "
"be the same."
);
platform
::
errors
::
InvalidArgument
(
"The 1st dimension of PrecisionRecallOp Input(MaxProbs) and "
"Input(Labels) both should be batch_size"
"But the 1st dimension we received max_probs_dims[0] = %d, "
"labels_dims[0] = %d"
,
max_probs_dims
[
0
],
labels_dims
[
0
]));
PADDLE_ENFORCE_EQ
(
labels_dims
[
1
],
1
,
"The 2nd dimension of Input(Labels) contains instance "
"label and the shape should be equal to 1."
);
platform
::
errors
::
InvalidArgument
(
"The 2nd dimension of PrecisionRecallOp "
"Input(Labels) contains instance label and "
"the shape should be equal to 1. But the 2nd "
"dimension we received is %d"
,
labels_dims
[
1
]));
}
if
(
ctx
->
HasInput
(
"Weights"
))
{
auto
weights_dims
=
ctx
->
GetInputDim
(
"Weights"
);
if
(
ctx
->
IsRuntime
())
{
PADDLE_ENFORCE_EQ
(
weights_dims
,
framework
::
make_ddim
({
max_probs_dims
[
0
],
1
}),
"The shape of Input(Weights) should be "
"[batch_size, 1]."
);
PADDLE_ENFORCE_EQ
(
weights_dims
,
framework
::
make_ddim
({
max_probs_dims
[
0
],
1
}),
platform
::
errors
::
InvalidArgument
(
"The shape of PrecisionRecallOp Input(Weights) should be "
"[batch_size, 1]. But the shape we received is [%d, %d]"
,
weights_dims
[
0
],
weights_dims
[
1
]));
}
}
if
(
ctx
->
HasInput
(
"StatesInfo"
))
{
auto
states_dims
=
ctx
->
GetInputDim
(
"StatesInfo"
);
if
(
ctx
->
IsRuntime
())
{
PADDLE_ENFORCE_EQ
(
states_dims
,
framework
::
make_ddim
({
cls_num
,
4
}),
"The shape of Input(StatesInfo) should be "
"[class_number, 4]."
);
PADDLE_ENFORCE_EQ
(
states_dims
,
framework
::
make_ddim
({
cls_num
,
4
}),
platform
::
errors
::
InvalidArgument
(
"The shape of PrecisionRecallOp Input(StatesInfo) should be "
"[class_number, 4]. But the shape we received is [%d, %d]"
,
states_dims
[
0
],
states_dims
[
1
]));
}
}
...
...
paddle/fluid/operators/metrics/precision_recall_op.h
浏览文件 @
a164b10d
...
...
@@ -58,11 +58,28 @@ class PrecisionRecallKernel : public framework::OpKernel<T> {
size_t
idx
=
ids_data
[
i
];
size_t
label
=
labels_data
[
i
];
PADDLE_ENFORCE
(
idx
>=
0
&&
idx
<
cls_num
,
"Class index of each instance should be in "
"[0, class_number)."
);
PADDLE_ENFORCE
(
label
>=
0
&&
label
<
cls_num
,
"Label of each instance should be in [0, class_number)."
);
PADDLE_ENFORCE_GE
(
idx
,
0
,
platform
::
errors
::
InvalidArgument
(
"Class index of each instance should be "
"greater than or equal to 0, But the index we received is %d"
,
idx
));
PADDLE_ENFORCE_LT
(
idx
,
cls_num
,
platform
::
errors
::
InvalidArgument
(
"Class index of each instance should be less than "
"cls_num = %d, But the index we received is %d"
,
cls_num
,
idx
));
PADDLE_ENFORCE_GE
(
label
,
0
,
platform
::
errors
::
InvalidArgument
(
"Label of each instance should be greater than or "
"equal to 0, But the label we received is %d"
,
label
));
PADDLE_ENFORCE_LT
(
label
,
cls_num
,
platform
::
errors
::
InvalidArgument
(
"Label of each instance should be less than "
"cls_num = %d, But the label we received is %d"
,
cls_num
,
label
));
T
w
=
weights_data
?
weights_data
[
i
]
:
1.0
;
if
(
idx
==
label
)
{
...
...
paddle/fluid/operators/sequence_ops/sequence_concat_op.cc
浏览文件 @
a164b10d
...
...
@@ -41,13 +41,21 @@ class SequenceConcatOp : public framework::OperatorWithKernel {
protected:
void
InferShape
(
framework
::
InferShapeContext
*
context
)
const
override
{
PADDLE_ENFORCE
(
context
->
HasInputs
(
"X"
),
"Input(X) of Sequence Concat Op should not be null."
);
PADDLE_ENFORCE
(
context
->
HasOutput
(
"Out"
),
"Output(Out) of Sequence Concat Op should not be null."
);
PADDLE_ENFORCE_EQ
(
context
->
HasInputs
(
"X"
),
true
,
platform
::
errors
::
NotFound
(
"SequenceConcatOp Input(X) of Sequence "
"Concat Op should not be null."
));
PADDLE_ENFORCE_EQ
(
context
->
HasOutput
(
"Out"
),
true
,
platform
::
errors
::
NotFound
(
"SequenceConcatOp Output(Out) of Sequence "
"Concat Op should not be null."
));
PADDLE_ENFORCE_GT
(
context
->
Inputs
(
"X"
).
size
(),
1
,
"The number of input sequences is at least two."
);
platform
::
errors
::
InvalidArgument
(
"The number of SequenceConcatOp inputs should be "
"greater than 1. But "
"the number of inputs we received is %d"
,
context
->
Inputs
(
"X"
).
size
()));
auto
x_dims
=
context
->
GetInputsDim
(
"X"
);
int64_t
batch_size
=
0
;
int64_t
feature_size
=
0
;
...
...
@@ -62,7 +70,12 @@ class SequenceConcatOp : public framework::OperatorWithKernel {
}
else
{
PADDLE_ENFORCE_EQ
(
feature_size
,
framework
::
product
(
x_dim
)
/
x_dim
[
0
],
"Inputs of sequence concat must have same feature size"
);
platform
::
errors
::
InvalidArgument
(
"Each input of SequenceConcatOp inputs must have same feature "
"size, But "
"the feature size we received is %d, the feature size of 1st "
"input is %d"
,
feature_size
,
framework
::
product
(
x_dim
)
/
x_dim
[
0
]));
}
}
if
(
batch_size
<
0
)
{
...
...
paddle/fluid/operators/sequence_ops/sequence_concat_op.h
浏览文件 @
a164b10d
...
...
@@ -73,16 +73,25 @@ class SeqConcatKernel : public framework::OpKernel<T> {
for
(
auto
&
x
:
xs
)
{
if
(
lod_size
==
0
)
{
PADDLE_ENFORCE_EQ
(
x
.
get
().
lod
().
empty
(),
false
,
"Input(X) Tensor of SequenceConcatOp does not "
"contain LoD information."
);
platform
::
errors
::
NotFound
(
"Input(X) Tensor of SequenceConcatOp does not "
"contain LoD information."
));
lod_size
=
x
.
get
().
lod
()[
0
].
size
();
}
else
{
PADDLE_ENFORCE_EQ
(
lod_size
,
x
.
get
().
lod
()[
0
].
size
(),
"The number of sequence must be same between each input"
);
PADDLE_ENFORCE_EQ
(
lod_size
,
x
.
get
().
lod
()[
0
].
size
(),
platform
::
errors
::
InvalidArgument
(
"The lod size of each input must be the same, "
"But the lod size of input we received is %d, "
"the first input is %d"
,
x
.
get
().
lod
()[
0
].
size
(),
lod_size
));
}
}
PADDLE_ENFORCE_NE
(
lod_size
,
0
,
"Each input must have sequence information"
);
PADDLE_ENFORCE_NE
(
lod_size
,
0
,
platform
::
errors
::
InvalidArgument
(
"Each input must have sequence lod information. But we "
"received input lod size is %d"
,
lod_size
));
std
::
vector
<
framework
::
Tensor
>
x_in_order
;
out
.
set_lod
(
detail
::
ConcatLoD
(
xs
,
&
x_in_order
));
...
...
@@ -100,7 +109,12 @@ class SeqConcatGradKernel : public framework::OpKernel<T> {
auto
xs
=
context
.
MultiInput
<
framework
::
LoDTensor
>
(
"X"
);
auto
dxs
=
context
.
MultiOutput
<
framework
::
LoDTensor
>
(
framework
::
GradVarName
(
"X"
));
PADDLE_ENFORCE_EQ
(
xs
.
size
(),
dxs
.
size
());
PADDLE_ENFORCE_EQ
(
xs
.
size
(),
dxs
.
size
(),
platform
::
errors
::
InvalidArgument
(
"The rank of Input X and Output Grad X must be "
"same, But the rank of Input X we received is %d, "
"the rank of Output Grad X is %d"
,
xs
.
size
(),
dxs
.
size
()));
for
(
size_t
i
=
0
;
i
<
dxs
.
size
();
++
i
)
{
if
(
dxs
[
i
]
!=
nullptr
)
{
dxs
[
i
]
->
set_lod
(
xs
[
i
]
->
lod
());
...
...
paddle/fluid/operators/sequence_ops/sequence_pad_op.cc
浏览文件 @
a164b10d
...
...
@@ -26,24 +26,35 @@ class SequencePadOp : public framework::OperatorWithKernel {
protected:
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
PADDLE_ENFORCE_EQ
(
ctx
->
HasInput
(
"X"
),
true
,
"Input(X) of SequencePadOp should not be null."
);
PADDLE_ENFORCE_EQ
(
ctx
->
HasInput
(
"PadValue"
),
true
,
"Input(PadValue) of SequencePadOp should not be null."
);
platform
::
errors
::
NotFound
(
"Input(X) of SequencePadOp should not be null."
));
PADDLE_ENFORCE_EQ
(
ctx
->
HasInput
(
"PadValue"
),
true
,
platform
::
errors
::
NotFound
(
"Input(PadValue) of SequencePadOp should not be null."
));
PADDLE_ENFORCE_EQ
(
ctx
->
HasOutput
(
"Out"
),
true
,
"Output(Out) of SequencePadOp should not be null."
);
PADDLE_ENFORCE_EQ
(
ctx
->
HasOutput
(
"Length"
),
true
,
"Output(Length) of SequencePadOp should not be null."
);
platform
::
errors
::
NotFound
(
"Output(Out) of SequencePadOp should not be null."
));
PADDLE_ENFORCE_EQ
(
ctx
->
HasOutput
(
"Length"
),
true
,
platform
::
errors
::
NotFound
(
"Output(Length) of SequencePadOp should not be null."
));
auto
x_dims
=
ctx
->
GetInputDim
(
"X"
);
PADDLE_ENFORCE_GE
(
x_dims
.
size
(),
2
,
"The rank of Input(X) can't be less than 2."
);
platform
::
errors
::
InvalidArgument
(
"The rank of SequencePadOp Input(X) can't be less "
"than 2. But the rank we received is %d"
,
x_dims
.
size
()));
auto
time_step_dims
=
framework
::
slice_ddim
(
x_dims
,
1
,
x_dims
.
size
());
auto
pad_value_dims
=
ctx
->
GetInputDim
(
"PadValue"
);
PADDLE_ENFORCE_EQ
(
pad_value_dims
==
framework
::
make_ddim
({
1
})
||
pad_value_dims
==
time_step_dims
,
true
,
"The Input(PadValue) must be a scalar or a tensor whose "
"shape equals to time steps in sequences"
);
PADDLE_ENFORCE_EQ
(
pad_value_dims
==
framework
::
make_ddim
({
1
})
||
pad_value_dims
==
time_step_dims
,
true
,
platform
::
errors
::
InvalidArgument
(
"The SequencePadOp Input(PadValue) must be a scalar or a tensor "
"whose shape equals to time steps in sequences"
));
int
out_dim_0
=
-
1
;
...
...
@@ -54,22 +65,37 @@ class SequencePadOp : public framework::OperatorWithKernel {
boost
::
get
<
framework
::
Variable
*>
(
ctx
->
GetInputVarPtrs
(
"X"
)[
0
]);
const
auto
&
x_lod
=
x_var
->
Get
<
LoDTensor
>
().
lod
();
PADDLE_ENFORCE_EQ
(
x_lod
.
empty
(),
false
,
"The Input(X) must hold lod info."
);
platform
::
errors
::
NotFound
(
"The SequencePadOp Input(X) must hold lod info."
));
const
auto
&
x_lod_0
=
x_lod
[
0
];
PADDLE_ENFORCE_GE
(
x_lod_0
.
size
(),
2
,
"The Input(X)'s lod info is corrupted."
);
PADDLE_ENFORCE_EQ
(
x_dims
[
0
],
static_cast
<
int64_t
>
(
x_lod_0
.
back
()),
"The Input(X)'s lod info mismatches the actual tensor shape."
);
PADDLE_ENFORCE_GE
(
x_lod_0
.
size
(),
2
,
platform
::
errors
::
InvalidArgument
(
"The size of SequencePadOp Input(X)'s lod info can't be less "
"than 2. But the size we received is %d"
,
x_lod_0
.
size
()));
PADDLE_ENFORCE_EQ
(
x_dims
[
0
],
static_cast
<
int64_t
>
(
x_lod_0
.
back
()),
platform
::
errors
::
InvalidArgument
(
"The SequencePadOp Input(X)'s lod info mismatches "
"the actual tensor shape. The 1st dimension of "
"Input(X)'s lod info is %d, the 1st dimension of "
"actual tensor shape is %d"
,
x_dims
[
0
],
static_cast
<
int64_t
>
(
x_lod_0
.
back
())));
int
seq_num
=
x_lod_0
.
size
()
-
1
;
int
max_seq_len
=
math
::
MaximumSequenceLength
(
x_lod_0
);
if
(
padded_length
==
-
1
)
{
padded_length
=
max_seq_len
;
}
PADDLE_ENFORCE_GE
(
padded_length
,
max_seq_len
,
"The Attr(padded_length) must be -1 or an int greater "
"than the length of the longest original sequence."
);
PADDLE_ENFORCE_GE
(
padded_length
,
max_seq_len
,
platform
::
errors
::
InvalidArgument
(
"The SequencePadOp Attr(padded_length) should be greater than or "
"equal to the "
"length of the longest original sequence. But the padded_length "
"we received is %d, the length of the longest original sequence "
"is %d"
,
padded_length
,
max_seq_len
));
out_dim_0
=
seq_num
;
}
else
{
// compile time
...
...
@@ -78,7 +104,10 @@ class SequencePadOp : public framework::OperatorWithKernel {
}
PADDLE_ENFORCE_GT
(
ctx
->
GetLoDLevel
(
"X"
),
0
,
"The LoD level Input(X) of sequence_pad should be larger than 0."
);
platform
::
errors
::
InvalidArgument
(
"The LoD level of SequencePadOp Input(X) should be "
"larger than 0. But the LoD level we received is %d"
,
ctx
->
GetLoDLevel
(
"X"
)));
}
std
::
vector
<
int
>
out_dims_vec
{
out_dim_0
,
padded_length
};
...
...
@@ -185,10 +214,12 @@ class SequencePadGradOp : public framework::OperatorWithKernel {
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
PADDLE_ENFORCE_EQ
(
ctx
->
HasInput
(
"X"
),
true
,
"Input(X) of SequencePadGradOp should not be null."
);
platform
::
errors
::
NotFound
(
"Input(X) of SequencePadGradOp should not be null."
));
PADDLE_ENFORCE_EQ
(
ctx
->
HasInput
(
framework
::
GradVarName
(
"Out"
)),
true
,
"Input(Out@GRAD) of SequencePadGradOp should not be null."
);
platform
::
errors
::
NotFound
(
"Input(Out@GRAD) of SequencePadGradOp should not be null."
));
if
(
ctx
->
HasOutput
(
framework
::
GradVarName
(
"X"
)))
{
ctx
->
SetOutputDim
(
framework
::
GradVarName
(
"X"
),
ctx
->
GetInputDim
(
"X"
));
...
...
paddle/fluid/operators/sequence_ops/sequence_pad_op.h
浏览文件 @
a164b10d
...
...
@@ -37,7 +37,8 @@ class SequencePadOpKernel : public framework::OpKernel<T> {
PADDLE_ENFORCE_EQ
(
x
->
lod
().
empty
(),
false
,
"Input(X) Tensor of SequencePadOp does not contain LoD information."
);
platform
::
errors
::
NotFound
(
"Input(X) Tensor of SequencePadOp does not "
"contain LoD information."
));
const
auto
*
pad_value
=
ctx
.
Input
<
LoDTensor
>
(
"PadValue"
);
...
...
paddle/fluid/operators/sequence_ops/sequence_reshape_op.cc
浏览文件 @
a164b10d
...
...
@@ -23,13 +23,20 @@ class SequenceReshapeOp : public framework::OperatorWithKernel {
public:
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"X"
),
"Input(X) of SequenceReshapeOp should not be null."
);
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"Out"
),
"Output(Out) of SequenceReshapeOp should not be null."
);
PADDLE_ENFORCE_EQ
(
ctx
->
HasInput
(
"X"
),
true
,
platform
::
errors
::
NotFound
(
"Input(X) of SequenceReshapeOp should not be null."
));
PADDLE_ENFORCE_EQ
(
ctx
->
HasOutput
(
"Out"
),
true
,
platform
::
errors
::
NotFound
(
"Output(Out) of SequenceReshapeOp should not be null."
));
auto
x_dims
=
ctx
->
GetInputDim
(
"X"
);
auto
x_numel
=
product
(
x_dims
);
PADDLE_ENFORCE_EQ
(
x_dims
.
size
(),
2U
,
"Rank of Input(X) should be 2."
);
PADDLE_ENFORCE_EQ
(
x_dims
.
size
(),
2U
,
platform
::
errors
::
InvalidArgument
(
"The rank of SequenceReshapeOp Input(X) should be 2. "
"But the rank we received is %d"
,
x_dims
.
size
()));
int
new_dim
=
ctx
->
Attrs
().
Get
<
int
>
(
"new_dim"
);
if
(
ctx
->
IsRuntime
())
{
ctx
->
SetOutputDim
(
"Out"
,
...
...
@@ -90,11 +97,14 @@ class SequenceReshapeGradOp : public framework::OperatorWithKernel {
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
PADDLE_ENFORCE
(
ctx
->
HasInput
(
framework
::
GradVarName
(
"Out"
)),
"Input(Out@GRAD) of SequenceReshapeGradOp should not be null."
);
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"X"
),
"Input(X) of SequenceReshapeGradOp should not be null."
);
PADDLE_ENFORCE_EQ
(
ctx
->
HasInput
(
framework
::
GradVarName
(
"Out"
)),
true
,
platform
::
errors
::
NotFound
(
"Input(Out@GRAD) of SequenceReshapeGradOp should not be null."
));
PADDLE_ENFORCE_EQ
(
ctx
->
HasInput
(
"X"
),
true
,
platform
::
errors
::
NotFound
(
"Input(X) of SequenceReshapeGradOp should not be null."
));
ctx
->
ShareDim
(
"X"
,
/*->*/
framework
::
GradVarName
(
"X"
));
ctx
->
ShareLoD
(
"X"
,
/*->*/
framework
::
GradVarName
(
"X"
));
...
...
paddle/fluid/operators/sequence_ops/sequence_reshape_op.h
浏览文件 @
a164b10d
...
...
@@ -33,13 +33,22 @@ class SequenceReshapeKernel : public framework::OpKernel<T> {
auto
&
in_lod
=
in
->
lod
();
PADDLE_ENFORCE_EQ
(
in_lod
.
empty
(),
false
,
"Input(X) Tensor of SequenceReshapeOp does not contain "
"LoD information."
);
platform
::
errors
::
NotFound
(
"Input(X) Tensor of SequenceReshapeOp does not "
"contain LoD information."
));
PADDLE_ENFORCE_EQ
(
in_lod
.
size
(),
1UL
,
"Only support one level sequence now."
);
platform
::
errors
::
InvalidArgument
(
"Input(X) Tensor of SequenceReshapeOp Only support "
"one level sequence now. But lod size "
"of Input(X) is %d"
,
in_lod
.
size
()));
PADDLE_ENFORCE_EQ
(
(
uint64_t
)
in_dims
[
0
],
in_lod
[
0
].
back
(),
"Inconsistent size between X.shape[0] and X.lod()[0].back()."
);
platform
::
errors
::
InvalidArgument
(
"The size of SequenceReshapeOp X.shape[0] and X.lod()[0].back() "
"should "
"be same. But X.shape[0] = %d, X.lod()[0].back() = %d"
,
(
uint64_t
)
in_dims
[
0
],
in_lod
[
0
].
back
()));
auto
in_lod_l0
=
in_lod
[
0
];
int
seq_num
=
in_lod_l0
.
size
()
-
1
;
...
...
@@ -55,11 +64,13 @@ class SequenceReshapeKernel : public framework::OpKernel<T> {
size_t
seq_len
=
in_lod_l0
[
i
+
1
]
-
in_lod_l0
[
i
];
size_t
offset
=
0
;
offset
=
(
seq_len
*
in_width
)
/
out_width
;
PADDLE_ENFORCE_EQ
(
offset
*
out_width
,
seq_len
*
in_width
,
"Please make sure (sequence_length * dimension) can "
"be divided by new_dim with no remainder for each "
"sequence. The %dth sequence is invalid."
,
i
+
1
);
PADDLE_ENFORCE_EQ
(
offset
*
out_width
,
seq_len
*
in_width
,
platform
::
errors
::
InvalidArgument
(
"Please make sure (sequence_length * dimension) "
"can be divided by context Attr(new_dim) with no remainder for "
"each sequence. But the %dth sequence is invalid."
,
i
+
1
));
out_lod
[
0
][
i
+
1
]
=
out_lod
[
0
][
i
]
+
offset
;
}
}
...
...
paddle/fluid/operators/sequence_ops/sequence_reverse_op.h
浏览文件 @
a164b10d
...
...
@@ -27,12 +27,21 @@ class SequenceReverseOp : public framework::OperatorWithKernel {
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"X"
),
"Input(X) must exist"
);
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"Y"
),
"Output(Y) must exist"
);
PADDLE_ENFORCE_EQ
(
ctx
->
HasInput
(
"X"
),
true
,
platform
::
errors
::
NotFound
(
"Input(X) of SequenceReverse must exist"
));
PADDLE_ENFORCE_EQ
(
ctx
->
HasOutput
(
"Y"
),
true
,
platform
::
errors
::
NotFound
(
"Output(Y) of SequenceReverse must exist"
));
auto
x_dim
=
ctx
->
GetInputDim
(
"X"
);
PADDLE_ENFORCE_GE
(
x_dim
.
size
(),
2
,
"Rank of Input(X) must be not less than 2."
);
PADDLE_ENFORCE_GE
(
x_dim
.
size
(),
2
,
platform
::
errors
::
InvalidArgument
(
"The rank of SequenceReverseOp Input(X) must be greater "
"than or equal to 2. But the Input(X) tensor's rank we received is "
"%d"
,
x_dim
.
size
()));
ctx
->
SetOutputDim
(
"Y"
,
x_dim
);
ctx
->
ShareLoD
(
"X"
,
"Y"
);
...
...
@@ -108,10 +117,15 @@ class SequenceReverseOpKernel : public framework::OpKernel<T> {
auto
*
y
=
ctx
.
Output
<
LoDTensor
>
(
"Y"
);
PADDLE_ENFORCE_EQ
(
x
.
lod
().
empty
(),
false
,
"Input(X) Tensor of SequenceReverseOp does not contain "
"LoD information."
);
platform
::
errors
::
NotFound
(
"Input(X) Tensor of SequenceReverseOp does not "
"contain LoD information."
));
PADDLE_ENFORCE_EQ
(
x
.
lod
().
size
(),
1
,
"SequenceReverse Op only support one level lod."
);
platform
::
errors
::
InvalidArgument
(
"SequenceReverseOp only support one "
"level lod. But the Input(X) lod size is %d"
,
x
.
lod
().
size
()));
const
size_t
*
lod
;
size_t
lod_count
=
x
.
lod
()[
0
].
size
();
...
...
@@ -131,8 +145,10 @@ class SequenceReverseOpKernel : public framework::OpKernel<T> {
auto
*
x_data
=
x
.
data
<
T
>
();
auto
*
y_data
=
y
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
PADDLE_ENFORCE_NE
(
x_data
,
y_data
,
"SequenceReverse Op does not support in-place operation"
);
PADDLE_ENFORCE_NE
(
x_data
,
y_data
,
platform
::
errors
::
InvalidArgument
(
"SequenceReverse Op does not support in-place operation"
));
if
(
platform
::
is_cpu_place
(
ctx
.
GetPlace
()))
{
for
(
size_t
idx
=
0
;
idx
<
lod_count
-
1
;
idx
++
)
{
...
...
paddle/fluid/operators/sequence_ops/sequence_unpad_op.cc
浏览文件 @
a164b10d
...
...
@@ -26,22 +26,37 @@ class SequenceUnpadOp : public framework::OperatorWithKernel {
protected:
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
PADDLE_ENFORCE_EQ
(
ctx
->
HasInput
(
"X"
),
true
,
"Input(X) of SequenceUnpadOp should not be null."
);
PADDLE_ENFORCE_EQ
(
ctx
->
HasInput
(
"Length"
),
true
,
"Input(Length) of SequenceUnpadOp should not be null."
);
PADDLE_ENFORCE_EQ
(
ctx
->
HasOutput
(
"Out"
),
true
,
"Output(Out) of SequenceUnpadOp should not be null."
);
platform
::
errors
::
NotFound
(
"Input(X) of SequenceUnpadOp should not be null."
));
PADDLE_ENFORCE_EQ
(
ctx
->
HasInput
(
"Length"
),
true
,
platform
::
errors
::
NotFound
(
"Input(Length) of SequenceUnpadOp should not be null."
));
PADDLE_ENFORCE_EQ
(
ctx
->
HasOutput
(
"Out"
),
true
,
platform
::
errors
::
NotFound
(
"Output(Out) of SequenceUnpadOp should not be null."
));
auto
x_dims
=
ctx
->
GetInputDim
(
"X"
);
PADDLE_ENFORCE_GE
(
x_dims
.
size
(),
2
,
"The rank of Input(X) can't be less than 2."
);
platform
::
errors
::
InvalidArgument
(
"The rank of Input(X) can't be less than 2. But the "
"rank we received is %d"
,
x_dims
.
size
()));
auto
len_dims
=
ctx
->
GetInputDim
(
"Length"
);
PADDLE_ENFORCE_EQ
(
len_dims
.
size
(),
1
,
"The shape of Input(Length) should be [batch_size]."
);
platform
::
errors
::
InvalidArgument
(
"The rank of SequenceUnpadOp Input(Length) should "
"be 1. But the rank we received is %d"
,
len_dims
.
size
()));
PADDLE_ENFORCE_EQ
(
len_dims
[
0
],
x_dims
[
0
],
"Input(X) and Input(Length) should have the same first dimension."
);
platform
::
errors
::
InvalidArgument
(
"The 1st dimension of SequenceUnpadOp Input(X) and Input(Length)"
"should be same. But the 1st dimension of "
"Input(X) is %d, Input(Length) is %d"
,
x_dims
[
0
],
len_dims
[
0
]));
int64_t
out_dim_0
=
-
1
;
if
(
ctx
->
IsRuntime
())
{
...
...
@@ -115,11 +130,14 @@ class SequenceUnpadGradOp : public framework::OperatorWithKernel {
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
PADDLE_ENFORCE_EQ
(
ctx
->
HasInput
(
"X"
),
true
,
"Input(X) of SequenceUnpadGradOp should not be null."
);
PADDLE_ENFORCE_EQ
(
ctx
->
HasInput
(
"X"
),
true
,
platform
::
errors
::
NotFound
(
"Input(X) of SequenceUnpadGradOp should not be null."
));
PADDLE_ENFORCE_EQ
(
ctx
->
HasInput
(
framework
::
GradVarName
(
"Out"
)),
true
,
"Input(Out@GRAD) of SequenceUnpadGradOp should not be null."
);
platform
::
errors
::
NotFound
(
"Input(Out@GRAD) of SequenceUnpadGradOp should not be null."
));
if
(
ctx
->
HasOutput
(
framework
::
GradVarName
(
"X"
)))
{
ctx
->
SetOutputDim
(
framework
::
GradVarName
(
"X"
),
ctx
->
GetInputDim
(
"X"
));
...
...
paddle/fluid/operators/truncated_gaussian_random_op.cc
浏览文件 @
a164b10d
...
...
@@ -182,17 +182,22 @@ class TruncatedGaussianRandomOp : public framework::OperatorWithKernel {
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"Out"
),
"Output(Out) of TruncatedGaussianRandomOp should not be null."
);
PADDLE_ENFORCE_EQ
(
ctx
->
HasOutput
(
"Out"
),
true
,
platform
::
errors
::
NotFound
(
"Output(Out) of TruncatedGaussianRandomOp should not be null."
));
auto
shape
=
ctx
->
Attrs
().
Get
<
std
::
vector
<
int
>>
(
"shape"
);
std
::
vector
<
int64_t
>
out_dim
;
out_dim
.
reserve
(
shape
.
size
());
for
(
auto
dim
:
shape
)
{
out_dim
.
push_back
(
static_cast
<
int64_t
>
(
dim
));
}
PADDLE_ENFORCE
(
shape
.
size
()
>
0UL
,
"shape can be one int or array. shape must be set."
);
PADDLE_ENFORCE_GT
(
shape
.
size
(),
0UL
,
platform
::
errors
::
InvalidArgument
(
"the input shape of TruncatedGaussianRandomOp must be set, "
"But the rank of shape we received is %d"
,
shape
.
size
()));
ctx
->
SetOutputDim
(
"Out"
,
framework
::
make_ddim
(
out_dim
));
}
...
...
python/paddle/fluid/layers/nn.py
浏览文件 @
a164b10d
...
...
@@ -10046,6 +10046,9 @@ def gaussian_random(shape, mean=0.0, std=1.0, seed=0, dtype='float32'):
"""
helper = LayerHelper('gaussian_random', **locals())
check_type(shape, 'shape', (list, tuple), 'fluid.layers.gaussian_random')
check_dtype(dtype, 'dtype', ['float32', 'float64'],
'fluid.layers.gaussian_random')
out = helper.create_variable_for_type_inference(dtype)
c_dtype = convert_np_dtype_to_dtype_(dtype)
helper.append_op(
...
...
@@ -10139,6 +10142,12 @@ def gaussian_random_batch_size_like(input,
"""
helper = LayerHelper('gaussian_random_batch_size_like', **locals())
check_type(input, 'input', (Variable),
'fluid.layers.gaussian_random_batch_size_like')
check_type(shape, 'shape', (list, tuple),
'fluid.layers.gaussian_random_batch_size_like')
check_dtype(dtype, 'dtype', ['float16', 'float32', 'int'],
'fluid.layers.gaussian_random_batch_size_like')
out = helper.create_variable_for_type_inference(dtype)
c_dtype = convert_np_dtype_to_dtype_(dtype)
helper.append_op(
...
...
python/paddle/fluid/layers/sequence_lod.py
浏览文件 @
a164b10d
...
...
@@ -17,6 +17,7 @@ from __future__ import print_function
from
.layer_function_generator
import
templatedoc
from
..framework
import
Variable
,
in_dygraph_mode
from
..layer_helper
import
LayerHelper
from
..data_feeder
import
check_variable_and_dtype
,
check_type
,
check_dtype
__all__
=
[
'sequence_conv'
,
...
...
@@ -405,6 +406,13 @@ def sequence_concat(input, name=None):
assert
not
in_dygraph_mode
(),
(
"sequence layer is not supported in dygraph mode yet."
)
helper
=
LayerHelper
(
'sequence_concat'
,
**
locals
())
check_type
(
input
,
'input'
,
list
,
'fluid.layers.sequence_concat'
)
for
i
,
input_x
in
enumerate
(
input
):
check_variable_and_dtype
(
input_x
,
'input['
+
str
(
i
)
+
']'
,
[
'int64'
,
'float32'
,
'float64'
],
'fluid.layers.sequence_concat'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
())
helper
.
append_op
(
type
=
'sequence_concat'
,
inputs
=
{
'X'
:
input
},
outputs
=
{
'Out'
:
[
out
]})
...
...
@@ -926,6 +934,11 @@ def sequence_pad(x, pad_value, maxlen=None, name=None):
assert
not
in_dygraph_mode
(),
(
"sequence layer is not supported in dygraph mode yet."
)
helper
=
LayerHelper
(
'sequence_pad'
,
input
=
x
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'fluid.layers.sequence_pad'
)
check_variable_and_dtype
(
pad_value
,
'pad_value'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'fluid.layers.sequence_pad'
)
dtype
=
helper
.
input_dtype
()
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
length
=
helper
.
create_variable_for_type_inference
(
dtype
)
...
...
@@ -1001,6 +1014,10 @@ def sequence_unpad(x, length, name=None):
assert
not
in_dygraph_mode
(),
(
"sequence layer is not supported in dygraph mode yet."
)
helper
=
LayerHelper
(
'sequence_unpad'
,
input
=
x
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'fluid.layers.sequence_unpad'
)
check_variable_and_dtype
(
length
,
'length'
,
[
'int64'
],
'fluid.layers.sequence_unpad'
)
dtype
=
helper
.
input_dtype
()
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
...
...
@@ -1062,6 +1079,9 @@ def sequence_reshape(input, new_dim):
assert
not
in_dygraph_mode
(),
(
"sequence layer is not supported in dygraph mode yet."
)
helper
=
LayerHelper
(
'sequence_reshape'
,
**
locals
())
check_variable_and_dtype
(
input
,
'input'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'fluid.layers.sequence_reshape'
)
out
=
helper
.
create_variable_for_type_inference
(
helper
.
input_dtype
())
helper
.
append_op
(
type
=
'sequence_reshape'
,
...
...
@@ -1334,6 +1354,9 @@ def sequence_reverse(x, name=None):
assert
not
in_dygraph_mode
(),
(
"sequence layer is not supported in dygraph mode yet."
)
helper
=
LayerHelper
(
"sequence_reverse"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int8'
,
'int32'
,
'int64'
],
'fluid.layers.sequence_reverse'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
...
...
python/paddle/fluid/tests/unittests/sequence/test_sequence_concat.py
浏览文件 @
a164b10d
...
...
@@ -20,6 +20,8 @@ import sys
sys
.
path
.
append
(
"../"
)
from
op_test
import
OpTest
from
paddle
import
fluid
class
TestSequenceConcat
(
OpTest
):
def
setLoD
(
self
):
...
...
@@ -76,5 +78,41 @@ class TestSequenceConcatCase5(TestSequenceConcat):
self
.
out_lod
=
[
20
,
10
]
class
TestSequenceConcatOpError
(
unittest
.
TestCase
):
def
test_errors
(
self
):
def
test_input_list
():
# the input type must be list
x_data
=
fluid
.
layers
.
data
(
name
=
'x'
,
shape
=
[
4
],
dtype
=
'float32'
)
fluid
.
layers
.
sequence_concat
(
input
=
x_data
)
self
.
assertRaises
(
TypeError
,
test_input_list
)
def
test_variable1
():
# the input element type must be Variable
x1_data
=
np
.
array
([[
3
,
5
]]).
astype
(
'float32'
)
y1_data
=
fluid
.
layers
.
data
(
name
=
'y1'
,
shape
=
[
4
],
dtype
=
'float32'
)
fluid
.
layers
.
sequence_concat
(
input
=
[
x1_data
,
y1_data
])
def
test_variable2
():
x2_data
=
np
.
array
([[
3
,
5
]]).
astype
(
'float32'
)
y2_data
=
fluid
.
layers
.
data
(
name
=
'y2'
,
shape
=
[
4
],
dtype
=
'float32'
)
fluid
.
layers
.
sequence_concat
(
input
=
[
y2_data
,
x2_data
])
for
i
in
range
(
2
):
if
i
==
0
:
self
.
assertRaises
(
TypeError
,
test_variable1
)
else
:
self
.
assertRaises
(
TypeError
,
test_variable2
)
def
test_dtype
():
# dtype must be 'float32', 'float64', 'int64'
x3_data
=
fluid
.
layers
.
data
(
name
=
"x3"
,
shape
=
[
3
,
5
],
dtype
=
'int32'
)
y3_data
=
fluid
.
layers
.
data
(
name
=
"y3"
,
shape
=
[
3
,
5
],
dtype
=
'int16'
)
input_list
=
[
x3_data
,
y3_data
]
fluid
.
layers
.
sequence_concat
(
input
=
input_list
)
self
.
assertRaises
(
TypeError
,
test_dtype
)
if
__name__
==
'__main__'
:
unittest
.
main
()
python/paddle/fluid/tests/unittests/sequence/test_sequence_pad_op.py
浏览文件 @
a164b10d
...
...
@@ -18,6 +18,8 @@ import sys
sys
.
path
.
append
(
"../"
)
from
op_test
import
OpTest
import
paddle.fluid
as
fluid
class
TestSequencePadOp
(
OpTest
):
def
set_attr
(
self
):
...
...
@@ -143,5 +145,34 @@ class TestSequencePadOp8(TestSequencePadOp):
self
.
dtype
=
'float64'
class
TestSequencePadOpError
(
unittest
.
TestCase
):
def
test_error
(
self
):
def
test_x_variable
():
# the input x type must be Variable
x
=
np
.
random
.
random
((
2
,
4
)).
astype
(
"float32"
)
pad_value
=
fluid
.
layers
.
assign
(
input
=
np
.
array
(
[
0.0
],
dtype
=
np
.
float32
))
fluid
.
layers
.
sequence_pad
(
x
=
x
,
pad_value
=
pad_value
)
self
.
assertRaises
(
TypeError
,
test_x_variable
)
def
test_pad_value_variable
():
x1
=
fluid
.
layers
.
data
(
name
=
'x1'
,
shape
=
[
10
,
5
],
dtype
=
'float32'
,
lod_level
=
1
)
pad_value1
=
np
.
array
([
0.0
],
dtype
=
np
.
float32
)
fluid
.
layers
.
sequence_pad
(
x
=
x1
,
pad_value
=
pad_value1
)
self
.
assertRaises
(
TypeError
,
test_pad_value_variable
)
def
test_dtype
():
x2
=
fluid
.
layers
.
data
(
name
=
'x2'
,
shape
=
[
10
,
5
],
dtype
=
'int16'
,
lod_level
=
1
)
pad_value2
=
fluid
.
layers
.
assign
(
input
=
np
.
array
(
[
0.0
],
dtype
=
np
.
int32
))
fluid
.
layers
.
sequence_pad
(
x
=
x2
,
pad_value
=
pad_value2
)
self
.
assertRaises
(
TypeError
,
test_dtype
)
if
__name__
==
'__main__'
:
unittest
.
main
()
python/paddle/fluid/tests/unittests/sequence/test_sequence_reshape.py
浏览文件 @
a164b10d
...
...
@@ -21,6 +21,8 @@ import sys
sys
.
path
.
append
(
"../"
)
from
op_test
import
OpTest
import
paddle.fluid
as
fluid
class
TestSequenceReshape
(
OpTest
):
def
init_data
(
self
):
...
...
@@ -83,5 +85,25 @@ class TestSequenceReshape_reduce_seq_len0_case1(TestSequenceReshape):
self
.
x
=
np
.
random
.
uniform
(
0.1
,
1
,
[
12
,
12
]).
astype
(
'float64'
)
class
TestSequenceReshapeOpError
(
unittest
.
TestCase
):
def
test_error
(
self
):
def
test_variable
():
x
=
np
.
random
.
random
((
2
,
4
)).
astype
(
"float32"
)
fluid
.
layers
.
sequence_reshape
(
x
=
x
,
new_dim
=
4
)
self
.
assertRaises
(
TypeError
,
test_variable
)
def
test_dtype
():
x1
=
fluid
.
layers
.
data
(
name
=
'x1'
,
shape
=
[
2
,
6
],
append_batch_size
=
False
,
dtype
=
'float16'
,
lod_level
=
1
)
fluid
.
layers
.
sequence_reshape
(
x
=
x1
,
new_dim
=
4
)
self
.
assertRaises
(
TypeError
,
test_dtype
)
if
__name__
==
'__main__'
:
unittest
.
main
()
python/paddle/fluid/tests/unittests/sequence/test_sequence_reverse.py
浏览文件 @
a164b10d
...
...
@@ -79,5 +79,22 @@ class TestSequenceReverse3(TestSequenceReverseBase):
self
.
lod
=
[
0
,
2
,
10
,
0
]
class
TestSequenceReverseOpError
(
unittest
.
TestCase
):
def
test_error
(
self
):
def
test_variable
():
# the input type must be Variable
x_data
=
np
.
random
.
random
((
2
,
4
)).
astype
(
"float32"
)
fluid
.
layers
.
sequence_reverse
(
x
=
x_data
)
self
.
assertRaises
(
TypeError
,
test_variable
)
def
test_dtype
():
# dtype must be 'float32', 'float64', 'int8', 'int32', 'int64'
x2_data
=
fluid
.
layers
.
data
(
name
=
'x2'
,
shape
=
[
4
],
dtype
=
'float16'
)
fluid
.
layers
.
sequence_reverse
(
x
=
x2_data
)
self
.
assertRaises
(
TypeError
,
test_dtype
)
if
__name__
==
'__main__'
:
unittest
.
main
()
python/paddle/fluid/tests/unittests/sequence/test_sequence_unpad_op.py
浏览文件 @
a164b10d
...
...
@@ -19,6 +19,8 @@ import sys
sys
.
path
.
append
(
"../"
)
from
op_test
import
OpTest
import
paddle.fluid
as
fluid
class
TestSequenceUnpadOp
(
OpTest
):
def
init
(
self
):
...
...
@@ -84,5 +86,36 @@ class TestSequenceUnpadOp4(TestSequenceUnpadOp):
self
.
dtype
=
"float64"
class
TestSequenceUnpadOpError
(
unittest
.
TestCase
):
def
test_error
(
self
):
def
test_x_variable
():
x
=
np
.
random
.
random
((
10
,
5
)).
astype
(
"float64"
)
len
=
fluid
.
data
(
name
=
'length2'
,
shape
=
[
10
],
dtype
=
'int64'
)
fluid
.
layers
.
sequence_pad
(
x
=
x
,
length
=
len
)
self
.
assertRaises
(
TypeError
,
test_x_variable
)
def
test_length_variable
():
x1
=
fluid
.
data
(
name
=
'x1'
,
shape
=
[
10
,
5
],
dtype
=
'float32'
)
len1
=
np
.
random
.
random
((
10
)).
astype
(
"int64"
)
fluid
.
layers
.
sequence_pad
(
x
=
x1
,
length
=
len1
)
self
.
assertRaises
(
TypeError
,
test_length_variable
)
def
test_x_dtype
():
x2
=
fluid
.
data
(
name
=
'x2'
,
shape
=
[
10
,
5
],
dtype
=
'float16'
)
len2
=
fluid
.
data
(
name
=
'length2'
,
shape
=
[
10
],
dtype
=
'int64'
)
fluid
.
layers
.
sequence_pad
(
x
=
x2
,
length
=
len2
)
self
.
assertRaises
(
TypeError
,
test_x_dtype
)
def
test_length_dtype
():
x3
=
fluid
.
data
(
name
=
'x3'
,
shape
=
[
10
,
5
],
dtype
=
'float64'
)
len3
=
fluid
.
data
(
name
=
'length3'
,
shape
=
[
10
],
dtype
=
'int32'
)
fluid
.
layers
.
sequence_pad
(
x
=
x3
,
length
=
len3
)
self
.
assertRaises
(
TypeError
,
test_length_dtype
)
if
__name__
==
'__main__'
:
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录