Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
d8bfe83d
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
d8bfe83d
编写于
9月 10, 2021
作者:
zhouweiwei2014
提交者:
GitHub
9月 10, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add the extra for op rnn/sequence_conv/sequence_pool/sequence_softmax (#35554)
上级
47d15a30
变更
5
显示空白变更内容
内联
并排
Showing
5 changed file
with
26 addition
and
13 deletion
+26
-13
paddle/fluid/operators/rnn_op.cc
paddle/fluid/operators/rnn_op.cc
+3
-1
paddle/fluid/operators/rnn_op.h
paddle/fluid/operators/rnn_op.h
+5
-4
paddle/fluid/operators/sequence_ops/sequence_pool_op.cc
paddle/fluid/operators/sequence_ops/sequence_pool_op.cc
+2
-1
paddle/fluid/operators/sequence_ops/sequence_pool_op.h
paddle/fluid/operators/sequence_ops/sequence_pool_op.h
+2
-1
paddle/fluid/operators/sequence_ops/sequence_softmax_op.cc
paddle/fluid/operators/sequence_ops/sequence_softmax_op.cc
+14
-6
未找到文件。
paddle/fluid/operators/rnn_op.cc
浏览文件 @
d8bfe83d
...
...
@@ -162,8 +162,10 @@ class RNNOpMaker : public framework::OpProtoAndCheckerMaker {
AddAttr
<
std
::
string
>
(
"mode"
,
"(string) rnn types, including: LSTM, GRU, RNN_RELU, RNN_TANH."
);
AddAttr
<
bool
>
(
"is_test"
,
"True if in test phase."
).
SetDefault
(
false
);
AddAttr
<
int
>
(
"seed"
,
"seed to used if fix_seed is True"
).
SetDefault
(
0
);
AddAttr
<
bool
>
(
"is_test"
,
"True if in test phase."
)
.
SetDefault
(
false
)
.
AsExtra
();
AddComment
(
R"DOC(
)DOC"
);
}
...
...
paddle/fluid/operators/rnn_op.h
浏览文件 @
d8bfe83d
...
...
@@ -230,7 +230,7 @@ template <typename T>
void
dropout_cpu_function_inplace
(
const
framework
::
ExecutionContext
&
context
,
Tensor
*
x
,
Tensor
*
y
,
Tensor
*
mask
,
const
float
&
dropout_prob
,
const
int
&
seed_number
,
const
bool
&
is_test
,
const
int
&
seed_number
,
bool
is_test
,
bool
*
is_has_reset
)
{
if
(
is_test
)
{
return
;
...
...
@@ -816,7 +816,7 @@ void RnnFunc(const framework::ExecutionContext& ctx, const Tensor* input,
Tensor
*
dropout_mask
,
const
int
&
num_layers
,
const
int
&
gate_num
,
const
int
&
input_size
,
const
int
&
hidden_size
,
const
bool
&
is_bidirec
,
const
std
::
string
&
cell_type
,
const
float
&
dropout_prob
,
const
bool
&
is_test
,
const
int
&
seed
,
const
float
&
dropout_prob
,
bool
is_test
,
const
int
&
seed
,
Tensor
*
reserve_data
)
{
const
int
&
direction_num
=
is_bidirec
?
2
:
1
;
const
auto
&
init_h_dims
=
init_h
->
dims
();
...
...
@@ -952,8 +952,8 @@ class RNNCPUKernel : public framework::OpKernel<T> {
const
int
&
hidden_size
=
ctx
.
Attr
<
int
>
(
"hidden_size"
);
const
float
&
dropout_prob
=
ctx
.
Attr
<
float
>
(
"dropout_prob"
);
const
std
::
string
&
mode
=
ctx
.
Attr
<
std
::
string
>
(
"mode"
);
const
bool
&
is_test
=
ctx
.
Attr
<
bool
>
(
"is_test"
);
const
int
&
seed
=
ctx
.
Attr
<
int
>
(
"seed"
);
bool
is_test
=
ctx
.
HasAttr
(
"is_test"
)
?
ctx
.
Attr
<
bool
>
(
"is_test"
)
:
false
;
bool
has_seq_length
=
ctx
.
HasInput
(
"SequenceLength"
);
const
Tensor
*
sequence_length
=
nullptr
;
...
...
@@ -1809,7 +1809,8 @@ void RnnGradFunc(const framework::ExecutionContext& context,
const
int
&
num_layers
=
context
.
Attr
<
int
>
(
"num_layers"
);
const
bool
&
is_bidirec
=
context
.
Attr
<
bool
>
(
"is_bidirec"
);
const
float
&
dropout_prob
=
context
.
Attr
<
float
>
(
"dropout_prob"
);
const
bool
&
is_test
=
context
.
Attr
<
bool
>
(
"is_test"
);
bool
is_test
=
context
.
HasAttr
(
"is_test"
)
?
context
.
Attr
<
bool
>
(
"is_test"
)
:
false
;
// get the input_size, batch_size, time_step, hidden_size
const
int
&
time_step
=
input
->
dims
()[
0
];
...
...
paddle/fluid/operators/sequence_ops/sequence_pool_op.cc
浏览文件 @
d8bfe83d
...
...
@@ -61,7 +61,8 @@ class SequencePoolOpMaker : public framework::OpProtoAndCheckerMaker {
AddAttr
<
bool
>
(
"is_test"
,
"(bool, default false) Set to true for inference only, false "
"for training. Some layers may run faster when this is true."
)
.
SetDefault
(
false
);
.
SetDefault
(
false
)
.
AsExtra
();
AddAttr
<
std
::
string
>
(
"pooltype"
,
"(string, default 'AVERAGE') the pooling pooltype of SequencePoolOp."
)
...
...
paddle/fluid/operators/sequence_ops/sequence_pool_op.h
浏览文件 @
d8bfe83d
...
...
@@ -67,7 +67,8 @@ class SequencePoolKernel : public framework::OpKernel<T> {
out
->
mutable_data
<
T
>
(
context
.
GetPlace
());
Tensor
*
index
=
nullptr
;
const
bool
is_test
=
context
.
Attr
<
bool
>
(
"is_test"
);
bool
is_test
=
context
.
HasAttr
(
"is_test"
)
?
context
.
Attr
<
bool
>
(
"is_test"
)
:
false
;
// Do not create index buffer for inference (is_test) mode
// TODO(jczaja): Skip index buffer creation for other devices eg. GPU
...
...
paddle/fluid/operators/sequence_ops/sequence_softmax_op.cc
浏览文件 @
d8bfe83d
...
...
@@ -34,7 +34,8 @@ class SequenceSoftmaxOp : public framework::OperatorWithKernel {
framework
::
OpKernelType
GetExpectedKernelType
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
// choose cudnn kernel if the runtime supported.
bool
use_cudnn
=
ctx
.
Attr
<
bool
>
(
"use_cudnn"
);
bool
use_cudnn
=
ctx
.
HasAttr
(
"use_cudnn"
)
?
ctx
.
Attr
<
bool
>
(
"use_cudnn"
)
:
false
;
bool
runtime_cudnn_support
=
false
;
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
if
(
platform
::
is_gpu_place
(
ctx
.
GetPlace
()))
{
...
...
@@ -47,7 +48,9 @@ class SequenceSoftmaxOp : public framework::OperatorWithKernel {
if
(
use_cudnn
&&
runtime_cudnn_support
)
{
library_
=
framework
::
LibraryType
::
kCUDNN
;
}
std
::
string
data_format
=
ctx
.
Attr
<
std
::
string
>
(
"data_format"
);
std
::
string
data_format
=
ctx
.
HasAttr
(
"data_format"
)
?
ctx
.
Attr
<
std
::
string
>
(
"data_format"
)
:
"AnyLayout"
;
return
framework
::
OpKernelType
(
OperatorWithKernel
::
IndicateVarDataType
(
ctx
,
"X"
),
ctx
.
GetPlace
(),
framework
::
StringToDataLayout
(
data_format
),
library_
);
...
...
@@ -66,14 +69,16 @@ class SequenceSoftmaxOpMaker : public framework::OpProtoAndCheckerMaker {
AddAttr
<
bool
>
(
"use_cudnn"
,
"(bool, default false) Only used in cudnn kernel, need install cudnn"
)
.
SetDefault
(
false
);
.
SetDefault
(
false
)
.
AsExtra
();
AddAttr
<
std
::
string
>
(
"data_format"
,
"(string, default NCHW) Only used in "
"An optional string from:
\"
NHWC
\"
,
\"
NCHW
\"
. "
"Defaults to
\"
NHWC
\"
. Specify the data format of the output data, "
"the input will be transformed automatically. "
)
.
SetDefault
(
"AnyLayout"
);
.
SetDefault
(
"AnyLayout"
)
.
AsExtra
();
AddComment
(
R"DOC(
Sequence Softmax Operator.
...
...
@@ -130,7 +135,8 @@ class SequenceSoftmaxGradOp : public framework::OperatorWithKernel {
framework
::
OpKernelType
GetExpectedKernelType
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
// choose cudnn kernel if the runtime supported.
bool
use_cudnn
=
ctx
.
Attr
<
bool
>
(
"use_cudnn"
);
bool
use_cudnn
=
ctx
.
HasAttr
(
"use_cudnn"
)
?
ctx
.
Attr
<
bool
>
(
"use_cudnn"
)
:
false
;
bool
runtime_cudnn_support
=
false
;
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
if
(
platform
::
is_gpu_place
(
ctx
.
GetPlace
()))
{
...
...
@@ -143,7 +149,9 @@ class SequenceSoftmaxGradOp : public framework::OperatorWithKernel {
if
(
use_cudnn
&&
runtime_cudnn_support
)
{
library_
=
framework
::
LibraryType
::
kCUDNN
;
}
std
::
string
data_format
=
ctx
.
Attr
<
std
::
string
>
(
"data_format"
);
std
::
string
data_format
=
ctx
.
HasAttr
(
"data_format"
)
?
ctx
.
Attr
<
std
::
string
>
(
"data_format"
)
:
"AnyLayout"
;
return
framework
::
OpKernelType
(
OperatorWithKernel
::
IndicateVarDataType
(
ctx
,
"Out"
),
ctx
.
GetPlace
(),
framework
::
StringToDataLayout
(
data_format
),
library_
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录