Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
s920243400
PaddleDetection
提交
62a98210
P
PaddleDetection
项目概览
s920243400
/
PaddleDetection
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleDetection
通知
2
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
62a98210
编写于
9月 12, 2018
作者:
T
tensor-tang
提交者:
GitHub
9月 12, 2018
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #13346 from tensor-tang/refine/infershape
Refine/infershape
上级
6abe03be
b0b5f515
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
48 addition
and
51 deletion
+48
-51
paddle/fluid/framework/operator.cc
paddle/fluid/framework/operator.cc
+17
-17
paddle/fluid/operators/attention_lstm_op.cc
paddle/fluid/operators/attention_lstm_op.cc
+11
-11
paddle/fluid/operators/fusion_gru_op.cc
paddle/fluid/operators/fusion_gru_op.cc
+8
-8
paddle/fluid/operators/fusion_lstm_op.cc
paddle/fluid/operators/fusion_lstm_op.cc
+12
-15
未找到文件。
paddle/fluid/framework/operator.cc
浏览文件 @
62a98210
...
...
@@ -464,35 +464,35 @@ class RuntimeInferShapeContext : public InferShapeContext {
:
op_
(
op
),
scope_
(
scope
)
{}
bool
HasInput
(
const
std
::
string
&
name
)
const
override
{
if
(
!
op_
.
HasInputs
(
name
))
{
// has only one input
const
auto
&
ins
=
op_
.
Inputs
();
auto
it
=
ins
.
find
(
name
);
if
(
it
==
ins
.
end
())
{
return
false
;
}
auto
&
ins
=
Inputs
(
name
);
size_t
length
=
ins
.
size
();
if
(
length
==
0
)
{
const
auto
&
in
=
it
->
second
;
if
(
in
.
size
()
==
0
||
in
[
0
]
==
kEmptyVarName
)
{
return
false
;
}
PADDLE_ENFORCE_EQ
(
length
,
1UL
,
PADDLE_ENFORCE_EQ
(
in
.
size
()
,
1UL
,
"Input %s should not have more than one inputs"
,
name
);
auto
ipt
=
ins
[
0
];
auto
*
var
=
ipt
==
kEmptyVarName
?
nullptr
:
scope_
.
FindVar
(
ipt
);
return
var
!=
nullptr
;
return
scope_
.
FindVar
(
in
[
0
])
!=
nullptr
;
}
bool
HasOutput
(
const
std
::
string
&
name
)
const
override
{
if
(
!
op_
.
HasOutputs
(
name
))
{
// has only one output
const
auto
&
outs
=
op_
.
Outputs
();
auto
it
=
outs
.
find
(
name
);
if
(
it
==
outs
.
end
())
{
return
false
;
}
auto
&
outs
=
Outputs
(
name
);
size_t
length
=
outs
.
size
();
if
(
length
==
0
)
{
const
auto
&
out
=
it
->
second
;
if
(
out
.
size
()
==
0
||
out
[
0
]
==
kEmptyVarName
)
{
return
false
;
}
PADDLE_ENFORCE_EQ
(
length
,
1UL
,
"Output %s should not have more than one inputs"
,
name
);
auto
ipt
=
outs
[
0
];
auto
*
var
=
ipt
==
kEmptyVarName
?
nullptr
:
scope_
.
FindVar
(
ipt
);
return
var
!=
nullptr
;
PADDLE_ENFORCE_EQ
(
out
.
size
(),
1UL
,
"Output %s should not have more than one outputs"
,
name
);
return
scope_
.
FindVar
(
out
[
0
])
!=
nullptr
;
}
bool
HasInputs
(
const
std
::
string
&
name
)
const
override
{
...
...
paddle/fluid/operators/attention_lstm_op.cc
浏览文件 @
62a98210
...
...
@@ -24,28 +24,28 @@ namespace operators {
void
AttentionLSTMOp
::
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
{
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"X"
),
"
Input(X) of AttentionLSTM should not be null
."
);
"
Assert only one Input(X) of AttentionLSTM
."
);
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"C0"
),
"
Input(C0) of AttentionLSTM should not be null
."
);
"
Assert only one Input(C0) of AttentionLSTM
."
);
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"LSTMWeight"
),
"
Input(LSTMWeight) of AttentionLSTM should not be null
."
);
"
Assert only one Input(LSTMWeight) of AttentionLSTM
."
);
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"LSTMBias"
),
"
Input(LSTMBias) of AttentionLSTM should not be null
."
);
"
Assert only one Input(LSTMBias) of AttentionLSTM
."
);
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"AttentionWeight"
),
"
Input(AttentionWeight) of AttentionLSTM should not be null
."
);
"
Assert only one Input(AttentionWeight) of AttentionLSTM
."
);
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"Hidden"
),
"
Output(Hidden) of AttentionLSTM should not be null
."
);
"
Assert only one Output(Hidden) of AttentionLSTM
."
);
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"Cell"
),
"
Output(Cell) of AttentionLSTM should not be null
."
);
"
Assert only one Output(Cell) of AttentionLSTM
."
);
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"AttentionedX"
),
"
Output(AttentionedX) of AttentionLSTM should not be null
."
);
"
Assert only one Output(AttentionedX) of AttentionLSTM
."
);
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"AttentionFCOut"
),
"
Output(AttentionFCOut) of AttentionLSTM should not be null
."
);
"
Assert only one Output(AttentionFCOut) of AttentionLSTM
."
);
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"LSTMX"
),
"
Output(LSTMX) of AttentionLSTM should not be null
."
);
"
Assert only one Output(LSTMX) of AttentionLSTM
."
);
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"LSTMOUT"
),
"
Output(LSTMOUT) of AttentionLSTM should not be null
."
);
"
Assert only one Output(LSTMOUT) of AttentionLSTM
."
);
auto
x_dims
=
ctx
->
GetInputDim
(
"X"
);
const
int
M
=
x_dims
[
1
];
...
...
paddle/fluid/operators/fusion_gru_op.cc
浏览文件 @
62a98210
...
...
@@ -25,14 +25,14 @@ namespace paddle {
namespace
operators
{
void
FusionGRUOp
::
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
{
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"X"
),
"
Input(X) of GRU should not be null
."
);
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"X"
),
"
Assert only one Input(X) of GRU
."
);
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"WeightX"
),
"
Input(WeightX) of GRU should not be null
."
);
"
Assert only one Input(WeightX) of GRU
."
);
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"WeightH"
),
"
Input(WeightH) of GRU should not be null
."
);
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"XX"
),
"
Output(XX) of GRU should not be null
."
);
"
Assert only one Input(WeightH) of GRU
."
);
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"XX"
),
"
Assert only one Output(XX) of GRU
."
);
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"Hidden"
),
"
Output(Hidden) of GRU should not be null
."
);
"
Assert only one Output(Hidden) of GRU
."
);
auto
x_dims
=
ctx
->
GetInputDim
(
"X"
);
PADDLE_ENFORCE_EQ
(
x_dims
.
size
(),
2
,
"Input(X)'s rank must be 2."
);
...
...
@@ -80,11 +80,11 @@ void FusionGRUOp::InferShape(framework::InferShapeContext* ctx) const {
}
else
{
xx_width
=
x_dims
[
1
]
>
wx_dims
[
1
]
?
wx_dims
[
1
]
:
x_dims
[
1
];
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"ReorderedH0"
),
"
Output(ReorderedH0) of GRU should not be null
."
);
"
Assert only one Output(ReorderedH0) of GRU
."
);
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"BatchedInput"
),
"
Output(BatchedInput) of GRU should not be null
."
);
"
Assert only one Output(BatchedInput) of GRU
."
);
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"BatchedOut"
),
"
Output(BatchedOut) of GRU should not be null
."
);
"
Assert only one Output(BatchedOut) of GRU
."
);
ctx
->
SetOutputDim
(
"BatchedInput"
,
{
x_dims
[
0
],
wx_dims
[
1
]});
ctx
->
SetOutputDim
(
"BatchedOut"
,
out_dims
);
}
...
...
paddle/fluid/operators/fusion_lstm_op.cc
浏览文件 @
62a98210
...
...
@@ -24,20 +24,17 @@ namespace paddle {
namespace
operators
{
void
FusionLSTMOp
::
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
{
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"X"
),
"
Input(X) of LSTM should not be null
."
);
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"X"
),
"
Assert only one Input(X) of LSTM
."
);
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"WeightX"
),
"
Input(WeightX) of LSTM should not be null
."
);
"
Assert only one Input(WeightX) of LSTM
."
);
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"WeightH"
),
"Input(WeightH) of LSTM should not be null."
);
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"Bias"
),
"Input(Bias) of LSTM should not be null."
);
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"XX"
),
"Output(XX) of LSTM should not be null."
);
"Assert only one Input(WeightH) of LSTM."
);
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"Bias"
),
"Assert only one Input(Bias) of LSTM."
);
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"XX"
),
"Assert only one Output(XX) of LSTM."
);
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"Hidden"
),
"
Output(Hidden) of LSTM should not be null
."
);
"
Assert only one Output(Hidden) of LSTM
."
);
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"Cell"
),
"
Output(Cell) of LSTM should not be null
."
);
"
Assert only one Output(Cell) of LSTM
."
);
auto
x_dims
=
ctx
->
GetInputDim
(
"X"
);
PADDLE_ENFORCE_EQ
(
x_dims
.
size
(),
2
,
"Input(X)'s rank must be 2."
);
...
...
@@ -96,15 +93,15 @@ void FusionLSTMOp::InferShape(framework::InferShapeContext* ctx) const {
}
else
{
xx_width
=
x_dims
[
1
]
>
wx_dims
[
1
]
?
wx_dims
[
1
]
:
x_dims
[
1
];
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"BatchedInput"
),
"
Output(BatchedInput) of LSTM should not be null
."
);
"
Assert only one Output(BatchedInput) of LSTM
."
);
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"BatchedHidden"
),
"
Output(BatchedHidden) of LSTM should not be null
."
);
"
Assert only one Output(BatchedHidden) of LSTM
."
);
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"BatchedCell"
),
"
Output(BatchedCell) of LSTM should not be null
."
);
"
Assert only one Output(BatchedCell) of LSTM
."
);
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"ReorderedH0"
),
"
Output(ReorderedH0) of LSTM should not be null.
"
);
"
Assert only one Output(ReorderedH0) of LSTM
"
);
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"ReorderedC0"
),
"
Output(ReorderedC0) of LSTM should not be null
."
);
"
Assert only one Output(ReorderedC0) of LSTM
."
);
ctx
->
SetOutputDim
(
"BatchedInput"
,
{
x_dims
[
0
],
wx_dims
[
1
]});
ctx
->
SetOutputDim
(
"BatchedHidden"
,
out_dims
);
ctx
->
SetOutputDim
(
"BatchedCell"
,
out_dims
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录