Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
90886443
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 2 年 前同步成功
通知
2325
Star
20933
Fork
5424
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
90886443
编写于
9月 15, 2017
作者:
D
dangqingqing
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'develop' of
https://github.com/PaddlePaddle/Paddle
into seq_op_test
上级
d6a0280e
bc55c20f
变更
44
隐藏空白更改
内联
并排
Showing
44 changed file
with
480 addition
and
159 deletion
+480
-159
paddle/gserver/layers/MKLDNNConvLayer.cpp
paddle/gserver/layers/MKLDNNConvLayer.cpp
+5
-4
paddle/gserver/layers/MKLDNNFcLayer.cpp
paddle/gserver/layers/MKLDNNFcLayer.cpp
+181
-96
paddle/gserver/layers/MKLDNNFcLayer.h
paddle/gserver/layers/MKLDNNFcLayer.h
+59
-0
paddle/math/MKLDNNMatrix.h
paddle/math/MKLDNNMatrix.h
+6
-5
paddle/operators/accuracy_op.cc
paddle/operators/accuracy_op.cc
+8
-3
paddle/operators/add_op.cc
paddle/operators/add_op.cc
+7
-0
paddle/operators/concat_op.cc
paddle/operators/concat_op.cc
+3
-0
paddle/operators/cond_op.cc
paddle/operators/cond_op.cc
+16
-5
paddle/operators/cos_sim_op.cc
paddle/operators/cos_sim_op.cc
+10
-2
paddle/operators/elementwise_mul_op.cc
paddle/operators/elementwise_mul_op.cc
+8
-2
paddle/operators/elementwise_mul_op.h
paddle/operators/elementwise_mul_op.h
+0
-2
paddle/operators/fill_zeros_like_op.cc
paddle/operators/fill_zeros_like_op.cc
+7
-0
paddle/operators/gather_op.cc
paddle/operators/gather_op.cc
+7
-0
paddle/operators/gaussian_random_op.cc
paddle/operators/gaussian_random_op.cc
+6
-2
paddle/operators/identity_op.cc
paddle/operators/identity_op.cc
+5
-0
paddle/operators/lookup_table_op.cc
paddle/operators/lookup_table_op.cc
+11
-4
paddle/operators/mean_op.cc
paddle/operators/mean_op.cc
+3
-1
paddle/operators/minus_op.cc
paddle/operators/minus_op.cc
+7
-2
paddle/operators/mul_op.cc
paddle/operators/mul_op.cc
+7
-0
paddle/operators/onehot_cross_entropy_op.cc
paddle/operators/onehot_cross_entropy_op.cc
+10
-0
paddle/operators/pad_op.cc
paddle/operators/pad_op.cc
+5
-0
paddle/operators/reshape_op.cc
paddle/operators/reshape_op.cc
+5
-1
paddle/operators/rowwise_add_op.cc
paddle/operators/rowwise_add_op.cc
+7
-0
paddle/operators/scale_op.cc
paddle/operators/scale_op.cc
+5
-0
paddle/operators/scatter_op.cc
paddle/operators/scatter_op.cc
+9
-0
paddle/operators/sequence_avg_pool_op.cc
paddle/operators/sequence_avg_pool_op.cc
+6
-3
paddle/operators/sgd_op.cc
paddle/operators/sgd_op.cc
+7
-0
paddle/operators/sigmoid_op.cc
paddle/operators/sigmoid_op.cc
+5
-0
paddle/operators/softmax_op.cc
paddle/operators/softmax_op.cc
+5
-0
paddle/operators/squared_l2_distance_op.cc
paddle/operators/squared_l2_distance_op.cc
+12
-6
paddle/operators/sum_op.cc
paddle/operators/sum_op.cc
+5
-0
paddle/operators/top_k_op.cc
paddle/operators/top_k_op.cc
+6
-1
paddle/operators/uniform_random_op.cc
paddle/operators/uniform_random_op.cc
+4
-0
python/paddle/v2/framework/tests/test_add_op.py
python/paddle/v2/framework/tests/test_add_op.py
+0
-0
python/paddle/v2/framework/tests/test_gaussian_random_op.py
python/paddle/v2/framework/tests/test_gaussian_random_op.py
+1
-1
python/paddle/v2/framework/tests/test_identity_op.py
python/paddle/v2/framework/tests/test_identity_op.py
+20
-0
python/paddle/v2/framework/tests/test_lookup_table_op.py
python/paddle/v2/framework/tests/test_lookup_table_op.py
+0
-0
python/paddle/v2/framework/tests/test_minus_op.py
python/paddle/v2/framework/tests/test_minus_op.py
+1
-1
python/paddle/v2/framework/tests/test_onehot_cross_entropy_op.py
...paddle/v2/framework/tests/test_onehot_cross_entropy_op.py
+1
-1
python/paddle/v2/framework/tests/test_scale_op.py
python/paddle/v2/framework/tests/test_scale_op.py
+1
-14
python/paddle/v2/framework/tests/test_sgd_op.py
python/paddle/v2/framework/tests/test_sgd_op.py
+1
-1
python/paddle/v2/framework/tests/test_sigmoid_op.py
python/paddle/v2/framework/tests/test_sigmoid_op.py
+1
-1
python/paddle/v2/framework/tests/test_top_k_op.py
python/paddle/v2/framework/tests/test_top_k_op.py
+6
-0
python/paddle/v2/framework/tests/test_uniform_random_op.py
python/paddle/v2/framework/tests/test_uniform_random_op.py
+1
-1
未找到文件。
paddle/gserver/layers/MKLDNNConvLayer.cpp
浏览文件 @
90886443
...
@@ -285,10 +285,9 @@ void MKLDNNConvLayer::resetWgtBiasValue(
...
@@ -285,10 +285,9 @@ void MKLDNNConvLayer::resetWgtBiasValue(
wgt
=
MKLDNNMatrix
::
create
(
weight_
->
getW
(),
pd
->
weights_primitive_desc
());
wgt
=
MKLDNNMatrix
::
create
(
weight_
->
getW
(),
pd
->
weights_primitive_desc
());
VLOG
(
MKLDNN_FMTS
)
<<
"Weight value format: "
<<
wgt
->
getFormat
();
VLOG
(
MKLDNN_FMTS
)
<<
"Weight value format: "
<<
wgt
->
getFormat
();
bias
=
nullptr
;
bias
=
(
biases_
&&
biases_
->
getW
())
if
(
biases_
&&
biases_
->
getW
())
{
?
MKLDNNMatrix
::
create
(
biases_
->
getW
(),
pd
->
bias_primitive_desc
())
bias
=
MKLDNNMatrix
::
create
(
biases_
->
getW
(),
pd
->
bias_primitive_desc
());
:
nullptr
;
}
}
}
void
MKLDNNConvLayer
::
resetOutValue
(
void
MKLDNNConvLayer
::
resetOutValue
(
...
@@ -356,6 +355,7 @@ void MKLDNNConvLayer::resetBwdWgtPD(
...
@@ -356,6 +355,7 @@ void MKLDNNConvLayer::resetBwdWgtPD(
void
MKLDNNConvLayer
::
resetBwdDataPD
(
void
MKLDNNConvLayer
::
resetBwdDataPD
(
std
::
shared_ptr
<
conv_bwdData
::
primitive_desc
>&
pd
)
{
std
::
shared_ptr
<
conv_bwdData
::
primitive_desc
>&
pd
)
{
pd
=
nullptr
;
if
(
inputLayers_
[
0
]
->
getOutput
().
grad
==
nullptr
)
{
if
(
inputLayers_
[
0
]
->
getOutput
().
grad
==
nullptr
)
{
return
;
return
;
}
}
...
@@ -476,6 +476,7 @@ void MKLDNNConvLayer::resetWgtBiasGrad(
...
@@ -476,6 +476,7 @@ void MKLDNNConvLayer::resetWgtBiasGrad(
<<
"primitive desc of weight grad and value should be equal"
;
<<
"primitive desc of weight grad and value should be equal"
;
VLOG
(
MKLDNN_FMTS
)
<<
"weight grad format: "
<<
wgt
->
getFormat
();
VLOG
(
MKLDNN_FMTS
)
<<
"weight grad format: "
<<
wgt
->
getFormat
();
bias
=
nullptr
;
if
(
biasVal_
==
nullptr
)
{
if
(
biasVal_
==
nullptr
)
{
return
;
return
;
}
}
...
...
paddle/gserver/layers/MKLDNNFcLayer.cpp
浏览文件 @
90886443
...
@@ -17,9 +17,6 @@ limitations under the License. */
...
@@ -17,9 +17,6 @@ limitations under the License. */
using
namespace
mkldnn
;
// NOLINT
using
namespace
mkldnn
;
// NOLINT
typedef
memory
::
format
format
;
typedef
memory
::
format
format
;
typedef
inner_product_forward
fc_fwd
;
typedef
inner_product_backward_weights
fc_bwdWgt
;
typedef
inner_product_backward_data
fc_bwdData
;
namespace
paddle
{
namespace
paddle
{
...
@@ -93,35 +90,88 @@ void MKLDNNFcLayer::reshape(
...
@@ -93,35 +90,88 @@ void MKLDNNFcLayer::reshape(
printSizeInfo
();
printSizeInfo
();
}
}
void
MKLDNNFcLayer
::
resetFwd
(
std
::
vector
<
mkldnn
::
primitive
>&
pipeline
,
void
MKLDNNFcLayer
::
resetFwd
(
std
::
vector
<
primitive
>&
pipeline
,
MKLDNNMatrixPtr
&
in
,
MKLDNNMatrixPtr
&
in
,
MKLDNNMatrixPtr
&
wgt
,
MKLDNNMatrixPtr
&
wgt
,
MKLDNNMatrixPtr
&
bias
,
MKLDNNMatrixPtr
&
bias
,
MKLDNNMatrixPtr
&
out
)
{
MKLDNNMatrixPtr
&
out
)
{
pipeline
.
clear
();
resetFwdBuffers
(
in
,
wgt
,
bias
,
out
);
bool
hasBias
=
biases_
&&
biases_
->
getW
();
const
MatrixPtr
&
wgtVal
=
weight_
->
getW
();
resetFwdPD
(
fwdPD_
,
in
,
wgt
,
bias
,
out
);
const
MatrixPtr
&
biasVal
=
hasBias
?
biases_
->
getW
()
:
nullptr
;
const
MatrixPtr
&
outVal
=
output_
.
value
;
resetFwdPipeline
(
pipeline
,
fwdPD_
,
in
,
wgt
,
bias
,
out
);
printValueFormatFlow
();
}
void
MKLDNNFcLayer
::
resetBwd
(
std
::
vector
<
primitive
>&
pipeline
,
MKLDNNMatrixPtr
&
in
,
MKLDNNMatrixPtr
&
wgt
,
MKLDNNMatrixPtr
&
bias
,
MKLDNNMatrixPtr
&
out
)
{
std
::
shared_ptr
<
fc_bwdWgt
::
primitive_desc
>
bwdWgtPD
;
std
::
shared_ptr
<
fc_bwdData
::
primitive_desc
>
bwdDataPD
;
resetBwdBuffers
(
in
,
wgt
,
bias
,
out
);
resetBwdWgtPD
(
bwdWgtPD
,
wgt
,
bias
,
out
);
resetBwdDataPD
(
bwdDataPD
,
in
,
out
);
resetBwdPipeline
(
pipeline
,
bwdWgtPD
,
bwdDataPD
,
in
,
wgt
,
bias
,
out
);
printGradFormatFlow
();
}
void
MKLDNNFcLayer
::
updateInputData
()
{
inVal_
->
setData
(
getInputValue
(
0
,
CPU_DEVICE
)
->
getData
());
}
void
MKLDNNFcLayer
::
updateWeights
(
const
UpdateCallback
&
callback
)
{
weight_
->
getParameterPtr
()
->
incUpdate
(
callback
);
if
(
biases_
&&
biases_
->
getWGrad
())
{
biases_
->
getParameterPtr
()
->
incUpdate
(
callback
);
}
}
void
MKLDNNFcLayer
::
resetFwdBuffers
(
MKLDNNMatrixPtr
&
in
,
MKLDNNMatrixPtr
&
wgt
,
MKLDNNMatrixPtr
&
bias
,
MKLDNNMatrixPtr
&
out
)
{
resetInValue
(
in
);
resetWgtBiasValue
(
wgt
,
bias
);
resetOutValue
(
out
);
}
void
MKLDNNFcLayer
::
resetInValue
(
MKLDNNMatrixPtr
&
in
)
{
if
(
inputIsOnlyMKLDNN
())
{
if
(
inputIsOnlyMKLDNN
())
{
const
MatrixPtr
&
inVal
=
getInputValue
(
0
);
const
MatrixPtr
&
dnnIn
=
getInputValue
(
0
);
in
=
std
::
dynamic_pointer_cast
<
MKLDNNMatrix
>
(
inVal
);
in
=
std
::
dynamic_pointer_cast
<
MKLDNNMatrix
>
(
dnnIn
);
CHECK
(
in
)
<<
"Input should be MKLDNNMatrix"
;
CHECK
(
in
)
<<
"Input should be MKLDNNMatrix"
;
}
else
{
}
else
{
CHECK_EQ
(
getPrev
(
0
)
->
getDeviceId
(),
CPU_DEVICE
)
<<
"Only support CPU yet"
;
CHECK_EQ
(
getPrev
(
0
)
->
getDeviceId
(),
CPU_DEVICE
)
<<
"Only support CPU yet"
;
const
MatrixPtr
&
inVal
=
getInputValue
(
0
,
CPU_DEVICE
);
const
MatrixPtr
&
cpuIn
=
getInputValue
(
0
,
CPU_DEVICE
);
in
=
MKLDNNMatrix
::
create
(
in
=
MKLDNNMatrix
::
create
(
inVal
,
memory
::
dims
{
bs_
,
ic_
,
ih_
,
iw_
},
format
::
nchw
,
engine_
);
cpuIn
,
{
bs_
,
ic_
,
ih_
,
iw_
},
format
::
nchw
,
engine_
);
}
}
in
->
downSpatial
();
in
->
downSpatial
();
}
void
MKLDNNFcLayer
::
resetWgtBiasValue
(
MKLDNNMatrixPtr
&
wgt
,
MKLDNNMatrixPtr
&
bias
)
{
wgt
=
MKLDNNMatrix
::
create
(
wgt
=
MKLDNNMatrix
::
create
(
w
gtVal
,
memory
::
dims
{
oc_
,
ic_
,
ih_
,
iw_
},
format
::
oihw
,
engine_
);
w
eight_
->
getW
(),
{
oc_
,
ic_
,
ih_
,
iw_
},
format
::
oihw
,
engine_
);
wgt
->
downSpatial
();
wgt
->
downSpatial
();
bias
=
hasBias
?
MKLDNNMatrix
::
create
(
biasVal
,
{
oc_
},
format
::
x
,
engine_
)
:
nullptr
;
out
=
MKLDNNMatrix
::
create
(
outVal
,
{
bs_
,
oc_
},
format
::
nc
,
engine_
);
bias
=
(
biases_
&&
biases_
->
getW
())
?
MKLDNNMatrix
::
create
(
biases_
->
getW
(),
{
oc_
},
format
::
x
,
engine_
)
:
nullptr
;
}
void
MKLDNNFcLayer
::
resetOutValue
(
MKLDNNMatrixPtr
&
out
)
{
out
=
MKLDNNMatrix
::
create
(
output_
.
value
,
{
bs_
,
oc_
},
format
::
nc
,
engine_
);
// change original output value to mkldnn output value
// change original output value to mkldnn output value
output_
.
value
=
std
::
dynamic_pointer_cast
<
Matrix
>
(
out
);
output_
.
value
=
std
::
dynamic_pointer_cast
<
Matrix
>
(
out
);
if
(
!
outputIsOnlyMKLDNN
())
{
if
(
!
outputIsOnlyMKLDNN
())
{
...
@@ -129,46 +179,59 @@ void MKLDNNFcLayer::resetFwd(std::vector<mkldnn::primitive>& pipeline,
...
@@ -129,46 +179,59 @@ void MKLDNNFcLayer::resetFwd(std::vector<mkldnn::primitive>& pipeline,
// just share point
// just share point
getOutput
(
CPU_DEVICE
).
value
->
setData
(
output_
.
value
->
getData
());
getOutput
(
CPU_DEVICE
).
value
->
setData
(
output_
.
value
->
getData
());
}
}
}
// create forward handle
void
MKLDNNFcLayer
::
resetFwdPD
(
std
::
shared_ptr
<
fc_fwd
::
primitive_desc
>&
pd
,
MKLDNNMatrixPtr
in
,
MKLDNNMatrixPtr
wgt
,
MKLDNNMatrixPtr
bias
,
MKLDNNMatrixPtr
out
)
{
CHECK
(
in
);
CHECK
(
wgt
);
CHECK
(
out
);
prop_kind
pk
=
prop_kind
::
forward
;
prop_kind
pk
=
prop_kind
::
forward
;
fc_fwd
::
desc
fwdDesc
=
hasBias
?
fc_fwd
::
desc
(
pk
,
fc_fwd
::
desc
fwdDesc
=
bias
!=
nullptr
?
fc_fwd
::
desc
(
pk
,
in
->
getMemoryDesc
(),
in
->
getMemoryDesc
(),
wgt
->
getMemoryDesc
(),
wgt
->
getMemoryDesc
(),
bias
->
getMemoryDesc
(),
bias
->
getMemoryDesc
(),
out
->
getMemoryDesc
())
out
->
getMemoryDesc
())
:
fc_fwd
::
desc
(
pk
,
:
fc_fwd
::
desc
(
pk
,
in
->
getMemoryDesc
(),
in
->
getMemoryDesc
(),
wgt
->
getMemoryDesc
(),
wgt
->
getMemoryDesc
(),
out
->
getMemoryDesc
());
out
->
getMemoryDesc
());
fc_fwd
::
primitive_desc
fwdPD
=
fc_fwd
::
primitive_desc
(
fwdDesc
,
engine_
);
pd
.
reset
(
new
fc_fwd
::
primitive_desc
(
fwdDesc
,
engine_
));
if
(
hasBias
)
{
}
fwd_
.
reset
(
new
fc_fwd
(
fwdPD
,
*
in
,
*
wgt
,
*
bias
,
*
out
));
void
MKLDNNFcLayer
::
resetFwdPipeline
(
std
::
vector
<
primitive
>&
pipeline
,
std
::
shared_ptr
<
fc_fwd
::
primitive_desc
>&
pd
,
MKLDNNMatrixPtr
&
in
,
MKLDNNMatrixPtr
&
wgt
,
MKLDNNMatrixPtr
&
bias
,
MKLDNNMatrixPtr
&
out
)
{
pipeline
.
clear
();
if
(
bias
)
{
fwd_
.
reset
(
new
fc_fwd
(
*
pd
,
*
in
,
*
wgt
,
*
bias
,
*
out
));
}
else
{
}
else
{
fwd_
.
reset
(
new
fc_fwd
(
fwdPD
,
*
in
,
*
wgt
,
*
out
));
fwd_
.
reset
(
new
fc_fwd
(
*
pd
,
*
in
,
*
wgt
,
*
out
));
}
}
printValueFormatFlow
();
pipeline
.
push_back
(
*
fwd_
);
pipeline
.
push_back
(
*
fwd_
);
}
}
void
MKLDNNFcLayer
::
resetBwd
(
std
::
vector
<
mkldnn
::
primitive
>&
pipeline
,
void
MKLDNNFcLayer
::
resetBwdBuffers
(
MKLDNNMatrixPtr
&
in
,
MKLDNNMatrixPtr
&
in
,
MKLDNNMatrixPtr
&
wgt
,
MKLDNNMatrixPtr
&
wgt
,
MKLDNNMatrixPtr
&
bias
,
MKLDNNMatrixPtr
&
bias
,
MKLDNNMatrixPtr
&
out
)
{
MKLDNNMatrixPtr
&
out
)
{
resetOutGrad
(
out
);
pipeline
.
clear
();
if
(
!
needResetBwd_
)
{
resetWgtBiasGrad
(
wgt
,
bias
);
return
;
}
needResetBwd_
=
false
;
bool
hasBias
=
biases_
&&
biases_
->
getWGrad
();
/// backward weight
resetInGrad
(
in
);
CHECK
(
inVal_
)
<<
"Should have input value"
;
}
const
MatrixPtr
&
wgtGrad
=
weight_
->
getWGrad
();
const
MatrixPtr
&
biasGrad
=
hasBias
?
biases_
->
getWGrad
()
:
nullptr
;
void
MKLDNNFcLayer
::
resetOutGrad
(
MKLDNNMatrixPtr
&
out
)
{
// TODO(TJ): merge outgrad
// TODO(TJ): merge outgrad
int
device
=
outputIsOnlyMKLDNN
()
?
MKLDNN_DEVICE
:
CPU_DEVICE
;
int
device
=
outputIsOnlyMKLDNN
()
?
MKLDNN_DEVICE
:
CPU_DEVICE
;
// for MKLDNN device:
// for MKLDNN device:
...
@@ -178,66 +241,88 @@ void MKLDNNFcLayer::resetBwd(std::vector<mkldnn::primitive>& pipeline,
...
@@ -178,66 +241,88 @@ void MKLDNNFcLayer::resetBwd(std::vector<mkldnn::primitive>& pipeline,
// for CPU device:
// for CPU device:
// fc do not need to convert from cpu device since output is always nc format
// fc do not need to convert from cpu device since output is always nc format
// only need create from cpu device
// only need create from cpu device
const
MatrixPtr
&
outGrad
=
getOutput
(
device
).
grad
;
CHECK
(
outVal_
);
out
=
MKLDNNMatrix
::
create
(
outGrad
,
outVal_
->
getPrimitiveDesc
());
out
=
wgt
=
MKLDNNMatrix
::
create
(
wgtGrad
,
wgtVal_
->
getPrimitiveDesc
());
MKLDNNMatrix
::
create
(
getOutput
(
device
).
grad
,
outVal_
->
getPrimitiveDesc
());
bias
=
hasBias
?
MKLDNNMatrix
::
create
(
biasGrad
,
biasVal_
->
getPrimitiveDesc
())
}
:
nullptr
;
void
MKLDNNFcLayer
::
resetWgtBiasGrad
(
MKLDNNMatrixPtr
&
wgt
,
// create memory primitive desc
MKLDNNMatrixPtr
&
bias
)
{
fc_fwd
::
desc
fwdDesc
=
fc_fwd
::
desc
(
prop_kind
::
forward
,
CHECK
(
wgtVal_
);
inVal_
->
getMemoryDesc
(),
wgt
=
MKLDNNMatrix
::
create
(
weight_
->
getWGrad
(),
wgtVal_
->
getPrimitiveDesc
());
wgt
->
getMemoryDesc
(),
out
->
getMemoryDesc
());
bias
=
nullptr
;
fc_fwd
::
primitive_desc
fwdPD
=
fc_fwd
::
primitive_desc
(
fwdDesc
,
engine_
);
if
(
biasVal_
==
nullptr
)
{
fc_bwdWgt
::
desc
bwdWgtDesc
=
hasBias
return
;
?
fc_bwdWgt
::
desc
(
inVal_
->
getMemoryDesc
(),
wgt
->
getMemoryDesc
(),
bias
->
getMemoryDesc
(),
out
->
getMemoryDesc
())
:
fc_bwdWgt
::
desc
(
inVal_
->
getMemoryDesc
(),
wgt
->
getMemoryDesc
(),
out
->
getMemoryDesc
());
fc_bwdWgt
::
primitive_desc
bwdWgtPD
=
fc_bwdWgt
::
primitive_desc
(
bwdWgtDesc
,
engine_
,
fwdPD
);
if
(
hasBias
)
{
bwdWgt_
.
reset
(
new
fc_bwdWgt
(
bwdWgtPD
,
*
inVal_
,
*
out
,
*
wgt
,
*
bias
));
}
else
{
bwdWgt_
.
reset
(
new
fc_bwdWgt
(
bwdWgtPD
,
*
inVal_
,
*
out
,
*
wgt
));
}
}
pipeline
.
push_back
(
*
bwdWgt_
);
bias
=
MKLDNNMatrix
::
create
(
biases_
->
getWGrad
(),
biasVal_
->
getPrimitiveDesc
());
}
/// backward data
void
MKLDNNFcLayer
::
resetInGrad
(
MKLDNNMatrixPtr
&
in
)
{
in
=
nullptr
;
const
MatrixPtr
&
inGrad
=
inputLayers_
[
0
]
->
getOutput
().
grad
;
const
MatrixPtr
&
inGrad
=
inputLayers_
[
0
]
->
getOutput
().
grad
;
if
(
inGrad
==
nullptr
)
{
if
(
inGrad
==
nullptr
)
{
return
;
return
;
}
}
if
(
getInput
(
0
,
MKLDNN_DEVICE
).
getAllCount
()
>
1
)
{
// TODO(TJ): use outputMaps_ ways to get the inGrad_ when merge outgrad done
// TODO(TJ): use outputMaps_ ways to get the inGrad_ when merge outgrad done
CHECK
(
inVal_
);
}
else
{
in
=
MKLDNNMatrix
::
create
(
inGrad
,
inVal_
->
getPrimitiveDesc
());
in
=
MKLDNNMatrix
::
create
(
inGrad
,
inVal_
->
getPrimitiveDesc
());
}
}
fc_bwdData
::
desc
bwdDataDesc
=
fc_bwdData
::
desc
(
inVal_
->
getMemoryDesc
(),
wgt
->
getMemoryDesc
(),
out
->
getMemoryDesc
());
fc_bwdData
::
primitive_desc
bwdDataPD
=
fc_bwdData
::
primitive_desc
(
bwdDataDesc
,
engine_
,
fwdPD
);
CHECK
(
wgtVal_
)
<<
"Should have weight memory"
;
void
MKLDNNFcLayer
::
resetBwdWgtPD
(
bwdData_
.
reset
(
new
fc_bwdData
(
bwdDataPD
,
*
out
,
*
wgtVal_
,
*
in
));
std
::
shared_ptr
<
fc_bwdWgt
::
primitive_desc
>&
pd
,
printGradFormatFlow
();
MKLDNNMatrixPtr
&
wgt
,
pipeline
.
push_back
(
*
bwdData_
);
MKLDNNMatrixPtr
&
bias
,
MKLDNNMatrixPtr
&
out
)
{
CHECK
(
inVal_
);
fc_bwdWgt
::
desc
bwdWgtDesc
=
bias
?
fc_bwdWgt
::
desc
(
inVal_
->
getMemoryDesc
(),
wgt
->
getMemoryDesc
(),
bias
->
getMemoryDesc
(),
out
->
getMemoryDesc
())
:
fc_bwdWgt
::
desc
(
inVal_
->
getMemoryDesc
(),
wgt
->
getMemoryDesc
(),
out
->
getMemoryDesc
());
pd
.
reset
(
new
fc_bwdWgt
::
primitive_desc
(
bwdWgtDesc
,
engine_
,
*
fwdPD_
));
}
}
void
MKLDNNFcLayer
::
updateInputData
()
{
void
MKLDNNFcLayer
::
resetBwdDataPD
(
inVal_
->
setData
(
getInputValue
(
0
,
CPU_DEVICE
)
->
getData
());
std
::
shared_ptr
<
fc_bwdData
::
primitive_desc
>&
pd
,
MKLDNNMatrixPtr
&
in
,
MKLDNNMatrixPtr
&
out
)
{
pd
=
nullptr
;
if
(
in
==
nullptr
)
{
return
;
}
CHECK
(
wgtVal_
);
fc_bwdData
::
desc
bwdDataDesc
=
fc_bwdData
::
desc
(
in
->
getMemoryDesc
(),
wgtVal_
->
getMemoryDesc
(),
out
->
getMemoryDesc
());
pd
.
reset
(
new
fc_bwdData
::
primitive_desc
(
bwdDataDesc
,
engine_
,
*
fwdPD_
));
}
}
void
MKLDNNFcLayer
::
updateWeights
(
const
UpdateCallback
&
callback
)
{
void
MKLDNNFcLayer
::
resetBwdPipeline
(
weight_
->
getParameterPtr
()
->
incUpdate
(
callback
);
std
::
vector
<
primitive
>&
pipeline
,
if
(
biases_
&&
biases_
->
getWGrad
())
{
std
::
shared_ptr
<
fc_bwdWgt
::
primitive_desc
>&
bwdWgtPD
,
biases_
->
getParameterPtr
()
->
incUpdate
(
callback
);
std
::
shared_ptr
<
fc_bwdData
::
primitive_desc
>&
bwdDataPD
,
MKLDNNMatrixPtr
&
in
,
MKLDNNMatrixPtr
&
wgt
,
MKLDNNMatrixPtr
&
bias
,
MKLDNNMatrixPtr
&
out
)
{
pipeline
.
clear
();
CHECK
(
inVal_
);
if
(
bias
)
{
bwdWgt_
.
reset
(
new
fc_bwdWgt
(
*
bwdWgtPD
,
*
inVal_
,
*
out
,
*
wgt
,
*
bias
));
}
else
{
bwdWgt_
.
reset
(
new
fc_bwdWgt
(
*
bwdWgtPD
,
*
inVal_
,
*
out
,
*
wgt
));
}
pipeline
.
push_back
(
*
bwdWgt_
);
if
(
bwdDataPD
==
nullptr
)
{
return
;
}
}
CHECK
(
wgtVal_
)
<<
"Should have weight memory"
;
bwdData_
.
reset
(
new
fc_bwdData
(
*
bwdDataPD
,
*
out
,
*
wgtVal_
,
*
in
));
pipeline
.
push_back
(
*
bwdData_
);
}
}
}
// namespace paddle
}
// namespace paddle
paddle/gserver/layers/MKLDNNFcLayer.h
浏览文件 @
90886443
...
@@ -18,6 +18,9 @@ limitations under the License. */
...
@@ -18,6 +18,9 @@ limitations under the License. */
#include "mkldnn.hpp"
#include "mkldnn.hpp"
namespace
paddle
{
namespace
paddle
{
typedef
mkldnn
::
inner_product_forward
fc_fwd
;
typedef
mkldnn
::
inner_product_backward_weights
fc_bwdWgt
;
typedef
mkldnn
::
inner_product_backward_data
fc_bwdData
;
/**
/**
* @brief A subclass of MKLDNNLayer fc layer.
* @brief A subclass of MKLDNNLayer fc layer.
...
@@ -32,6 +35,9 @@ protected:
...
@@ -32,6 +35,9 @@ protected:
// if has already init the weight
// if has already init the weight
bool
hasInitedWgt_
;
bool
hasInitedWgt_
;
// save forward primitive_desc, which can be used backward
std
::
shared_ptr
<
fc_fwd
::
primitive_desc
>
fwdPD_
;
// fc weight and bias
// fc weight and bias
std
::
unique_ptr
<
Weight
>
weight_
;
std
::
unique_ptr
<
Weight
>
weight_
;
std
::
unique_ptr
<
Weight
>
biases_
;
std
::
unique_ptr
<
Weight
>
biases_
;
...
@@ -67,6 +73,59 @@ public:
...
@@ -67,6 +73,59 @@ public:
void
convertWeightsFromPaddle
()
override
;
void
convertWeightsFromPaddle
()
override
;
void
convertWeightsToPaddle
()
override
;
void
convertWeightsToPaddle
()
override
;
protected:
/**
* Forward functions: reset buffers(input, output, weight and bias),
* reset primitive descriptor,
* reset pipeline.
*/
void
resetFwdBuffers
(
MKLDNNMatrixPtr
&
in
,
MKLDNNMatrixPtr
&
wgt
,
MKLDNNMatrixPtr
&
bias
,
MKLDNNMatrixPtr
&
out
);
void
resetInValue
(
MKLDNNMatrixPtr
&
in
);
void
resetWgtBiasValue
(
MKLDNNMatrixPtr
&
wgt
,
MKLDNNMatrixPtr
&
bias
);
void
resetOutValue
(
MKLDNNMatrixPtr
&
out
);
void
resetFwdPD
(
std
::
shared_ptr
<
fc_fwd
::
primitive_desc
>&
pd
,
MKLDNNMatrixPtr
in
,
MKLDNNMatrixPtr
wgt
,
MKLDNNMatrixPtr
bias
,
MKLDNNMatrixPtr
out
);
void
resetFwdPipeline
(
std
::
vector
<
mkldnn
::
primitive
>&
pipeline
,
std
::
shared_ptr
<
fc_fwd
::
primitive_desc
>&
pd
,
MKLDNNMatrixPtr
&
in
,
MKLDNNMatrixPtr
&
wgt
,
MKLDNNMatrixPtr
&
bias
,
MKLDNNMatrixPtr
&
out
);
/**
* Backward functions: reset buffers(input, output, weight and bias),
* reset primitive descriptor for backward weight,
* reset primitive descriptor for backward data,
* reset pipeline.
*/
void
resetBwdBuffers
(
MKLDNNMatrixPtr
&
in
,
MKLDNNMatrixPtr
&
wgt
,
MKLDNNMatrixPtr
&
bias
,
MKLDNNMatrixPtr
&
out
);
void
resetOutGrad
(
MKLDNNMatrixPtr
&
out
);
void
resetWgtBiasGrad
(
MKLDNNMatrixPtr
&
wgt
,
MKLDNNMatrixPtr
&
bias
);
void
resetInGrad
(
MKLDNNMatrixPtr
&
in
);
void
resetBwdWgtPD
(
std
::
shared_ptr
<
fc_bwdWgt
::
primitive_desc
>&
pd
,
MKLDNNMatrixPtr
&
wgt
,
MKLDNNMatrixPtr
&
bias
,
MKLDNNMatrixPtr
&
out
);
void
resetBwdDataPD
(
std
::
shared_ptr
<
fc_bwdData
::
primitive_desc
>&
pd
,
MKLDNNMatrixPtr
&
in
,
MKLDNNMatrixPtr
&
out
);
void
resetBwdPipeline
(
std
::
vector
<
mkldnn
::
primitive
>&
pipeline
,
std
::
shared_ptr
<
fc_bwdWgt
::
primitive_desc
>&
bwdWgtPD
,
std
::
shared_ptr
<
fc_bwdData
::
primitive_desc
>&
bwdDataPD
,
MKLDNNMatrixPtr
&
in
,
MKLDNNMatrixPtr
&
wgt
,
MKLDNNMatrixPtr
&
bias
,
MKLDNNMatrixPtr
&
out
);
};
};
}
// namespace paddle
}
// namespace paddle
paddle/math/MKLDNNMatrix.h
浏览文件 @
90886443
...
@@ -66,11 +66,12 @@ public:
...
@@ -66,11 +66,12 @@ public:
/**
/**
* Create reorder primitive.
* Create reorder primitive.
* Create a mkldnn::reorder handle for converting src MKLDNNMatrix to dst.
* Create a mkldnn::reorder handle for converting src MKLDNNMatrix to dst.
* checkData: for whether to check the data handle of src and dst is the same.
* checkData: whether to check the data handle of src and dst.
* if true, means check it and do not want support inplace reorder;
* if true, it will check the data and do not allow them equal;
* otherwise do not check data which means the created reorder
* otherwise, it will not check them, then the reorder created
* maybe inplace buffer and do not guarantee the logical is correct
* may have inplace buffer.
* since not all format or conversion support inplace.
* Do not set false, if you can not guarantee the inplace logical
* would work with your reorder.
*/
*/
static
std
::
shared_ptr
<
mkldnn
::
reorder
>
createReorder
(
static
std
::
shared_ptr
<
mkldnn
::
reorder
>
createReorder
(
const
MKLDNNMatrixPtr
&
src
,
const
MKLDNNMatrixPtr
&
src
,
...
...
paddle/operators/accuracy_op.cc
浏览文件 @
90886443
...
@@ -23,10 +23,15 @@ class AccuracyOp : public framework::OperatorWithKernel {
...
@@ -23,10 +23,15 @@ class AccuracyOp : public framework::OperatorWithKernel {
protected:
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"Inference"
),
PADDLE_ENFORCE_NOT_NULL
(
"Input of Inference must be initialized."
);
ctx
.
InputVar
(
"Inference"
),
"Input(Inference) of AccuracyOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"Label"
),
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"Label"
),
"Input of Inference must be initialized."
);
"Input(Label) of AccuracyOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"Accuracy"
),
"Output(Accuracy) of AccuracyOp should not be null."
);
auto
*
inference
=
ctx
.
Input
<
framework
::
Tensor
>
(
"Inference"
);
auto
*
inference
=
ctx
.
Input
<
framework
::
Tensor
>
(
"Inference"
);
auto
*
label
=
ctx
.
Input
<
framework
::
Tensor
>
(
"Label"
);
auto
*
label
=
ctx
.
Input
<
framework
::
Tensor
>
(
"Label"
);
...
...
paddle/operators/add_op.cc
浏览文件 @
90886443
...
@@ -23,6 +23,13 @@ class AddOp : public framework::OperatorWithKernel {
...
@@ -23,6 +23,13 @@ class AddOp : public framework::OperatorWithKernel {
protected:
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"X"
),
"Input(X) of AddOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"Y"
),
"Input(Y) of AddOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"Out"
),
"Output(Out) of AddOp should not be null."
);
PADDLE_ENFORCE_EQ
(
ctx
.
Input
<
Tensor
>
(
"X"
)
->
dims
(),
PADDLE_ENFORCE_EQ
(
ctx
.
Input
<
Tensor
>
(
"X"
)
->
dims
(),
ctx
.
Input
<
Tensor
>
(
"Y"
)
->
dims
(),
ctx
.
Input
<
Tensor
>
(
"Y"
)
->
dims
(),
"Two input of Add Op's dimension must be same."
);
"Two input of Add Op's dimension must be same."
);
...
...
paddle/operators/concat_op.cc
浏览文件 @
90886443
...
@@ -25,6 +25,9 @@ class ConcatOp : public framework::OperatorWithKernel {
...
@@ -25,6 +25,9 @@ class ConcatOp : public framework::OperatorWithKernel {
protected:
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"Out"
),
"Output(Out) of ConcatOp should not be null."
);
auto
ins
=
ctx
.
MultiInput
<
framework
::
Tensor
>
(
"X"
);
auto
ins
=
ctx
.
MultiInput
<
framework
::
Tensor
>
(
"X"
);
auto
*
out
=
ctx
.
Output
<
framework
::
LoDTensor
>
(
"Out"
);
auto
*
out
=
ctx
.
Output
<
framework
::
LoDTensor
>
(
"Out"
);
size_t
axis
=
static_cast
<
size_t
>
(
ctx
.
Attr
<
int
>
(
"axis"
));
size_t
axis
=
static_cast
<
size_t
>
(
ctx
.
Attr
<
int
>
(
"axis"
));
...
...
paddle/operators/cond_op.cc
浏览文件 @
90886443
...
@@ -33,7 +33,8 @@ using DDim = framework::DDim;
...
@@ -33,7 +33,8 @@ using DDim = framework::DDim;
void
CondOp
::
CreateScope
(
const
Scope
&
scope
)
const
{
void
CondOp
::
CreateScope
(
const
Scope
&
scope
)
const
{
auto
sub_scopes_var
=
scope
.
FindVar
(
"SubScopes"
);
auto
sub_scopes_var
=
scope
.
FindVar
(
"SubScopes"
);
PADDLE_ENFORCE
(
sub_scopes_var
!=
nullptr
,
""
);
PADDLE_ENFORCE_NOT_NULL
(
sub_scopes_var
,
"Output(SubScopes) of CondOp should not be null."
);
auto
sub_scopes
=
sub_scopes_var
->
GetMutable
<
std
::
vector
<
Scope
*>>
();
auto
sub_scopes
=
sub_scopes_var
->
GetMutable
<
std
::
vector
<
Scope
*>>
();
auto
&
sub_scope
=
scope
.
NewScope
();
auto
&
sub_scope
=
scope
.
NewScope
();
sub_scopes
->
push_back
(
&
sub_scope
);
sub_scopes
->
push_back
(
&
sub_scope
);
...
@@ -41,7 +42,8 @@ void CondOp::CreateScope(const Scope& scope) const {
...
@@ -41,7 +42,8 @@ void CondOp::CreateScope(const Scope& scope) const {
void
CondOp
::
CreateIndexTensor
(
const
Scope
&
scope
)
const
{
void
CondOp
::
CreateIndexTensor
(
const
Scope
&
scope
)
const
{
auto
index_tensors_var
=
scope
.
FindVar
(
"IndexTensors"
);
auto
index_tensors_var
=
scope
.
FindVar
(
"IndexTensors"
);
PADDLE_ENFORCE
(
index_tensors_var
!=
nullptr
,
""
);
PADDLE_ENFORCE_NOT_NULL
(
index_tensors_var
,
"Output(IndexTensors) of CondOp should not be null."
);
auto
&
index_tensors
=
auto
&
index_tensors
=
*
index_tensors_var
->
GetMutable
<
std
::
vector
<
LoDTensor
>>
();
*
index_tensors_var
->
GetMutable
<
std
::
vector
<
LoDTensor
>>
();
index_tensors
.
push_back
(
LoDTensor
());
index_tensors
.
push_back
(
LoDTensor
());
...
@@ -49,7 +51,8 @@ void CondOp::CreateIndexTensor(const Scope& scope) const {
...
@@ -49,7 +51,8 @@ void CondOp::CreateIndexTensor(const Scope& scope) const {
void
CondOp
::
InferShape
(
const
Scope
&
scope
)
const
{
void
CondOp
::
InferShape
(
const
Scope
&
scope
)
const
{
auto
sub_scopes_var
=
scope
.
FindVar
(
"SubScopes"
);
auto
sub_scopes_var
=
scope
.
FindVar
(
"SubScopes"
);
PADDLE_ENFORCE_NOT_NULL
(
sub_scopes_var
);
PADDLE_ENFORCE_NOT_NULL
(
sub_scopes_var
,
"Output(SubScopes) of CondOp should not be null."
);
auto
&
sub_scopes
=
*
sub_scopes_var
->
GetMutable
<
std
::
vector
<
Scope
*>>
();
auto
&
sub_scopes
=
*
sub_scopes_var
->
GetMutable
<
std
::
vector
<
Scope
*>>
();
for
(
int
i
=
0
;
i
<
2
;
++
i
)
{
for
(
int
i
=
0
;
i
<
2
;
++
i
)
{
...
@@ -63,7 +66,8 @@ void CondOp::InferShape(const Scope& scope) const {
...
@@ -63,7 +66,8 @@ void CondOp::InferShape(const Scope& scope) const {
// branch
// branch
CreateIndexTensor
(
scope
);
CreateIndexTensor
(
scope
);
PADDLE_ENFORCE
(
!
Inputs
(
"Xs"
).
empty
(),
"Inputs can't be empty"
);
PADDLE_ENFORCE
(
!
Inputs
(
"Xs"
).
empty
(),
"Inputs(Xs) of CondOp can't be empty."
);
for
(
auto
&
input
:
Inputs
(
"Xs"
))
{
for
(
auto
&
input
:
Inputs
(
"Xs"
))
{
// Create a new tensor in sub-scope for input-type tensor
// Create a new tensor in sub-scope for input-type tensor
Variable
*
v
=
sub_scopes
[
i
]
->
NewVar
(
input
);
Variable
*
v
=
sub_scopes
[
i
]
->
NewVar
(
input
);
...
@@ -108,13 +112,18 @@ void CondOp::InferShape(const Scope& scope) const {
...
@@ -108,13 +112,18 @@ void CondOp::InferShape(const Scope& scope) const {
void
CondOp
::
Run
(
const
Scope
&
scope
,
void
CondOp
::
Run
(
const
Scope
&
scope
,
const
platform
::
DeviceContext
&
dev_ctx
)
const
{
const
platform
::
DeviceContext
&
dev_ctx
)
const
{
auto
*
sub_scopes_var
=
scope
.
FindVar
(
"SubScopes"
);
auto
*
sub_scopes_var
=
scope
.
FindVar
(
"SubScopes"
);
PADDLE_ENFORCE_NOT_NULL
(
sub_scopes_var
,
"Output(SubScopes) of CondOp should not be null."
);
auto
sub_scopes
=
sub_scopes_var
->
Get
<
std
::
vector
<
Scope
*>>
();
auto
sub_scopes
=
sub_scopes_var
->
Get
<
std
::
vector
<
Scope
*>>
();
auto
*
index_tensors_var
=
scope
.
FindVar
(
"IndexTensors"
);
auto
*
index_tensors_var
=
scope
.
FindVar
(
"IndexTensors"
);
PADDLE_ENFORCE_NOT_NULL
(
index_tensors_var
,
"Output(IndexTensors) of CondOp should not be null."
);
auto
index_tensors
=
index_tensors_var
->
Get
<
std
::
vector
<
LoDTensor
>>
();
auto
index_tensors
=
index_tensors_var
->
Get
<
std
::
vector
<
LoDTensor
>>
();
std
::
string
cond_name
=
Input
(
"Cond"
);
std
::
string
cond_name
=
Input
(
"Cond"
);
Variable
*
cond_var
=
scope
.
FindVar
(
cond_name
);
Variable
*
cond_var
=
scope
.
FindVar
(
cond_name
);
PADDLE_ENFORCE_NOT_NULL
(
cond_var
);
PADDLE_ENFORCE_NOT_NULL
(
cond_var
,
"Input(Cond) of CondOp should not be null."
);
const
LoDTensor
*
cond
=
cond_var
->
GetMutable
<
LoDTensor
>
();
const
LoDTensor
*
cond
=
cond_var
->
GetMutable
<
LoDTensor
>
();
// Step 1: get the true/false index at runtime
// Step 1: get the true/false index at runtime
...
@@ -171,6 +180,8 @@ void CondOp::Run(const Scope& scope,
...
@@ -171,6 +180,8 @@ void CondOp::Run(const Scope& scope,
}
}
// Step 4: merge output results
// Step 4: merge output results
PADDLE_ENFORCE
(
!
Outputs
(
"Outs"
).
empty
(),
"Outputs(Outs) of CondOp can't be empty."
);
for
(
int
i
=
0
;
i
<
2
;
++
i
)
{
for
(
int
i
=
0
;
i
<
2
;
++
i
)
{
// i= 0/i for True and False branches respectively
// i= 0/i for True and False branches respectively
for
(
auto
&
output
:
Outputs
(
"Outs"
))
{
for
(
auto
&
output
:
Outputs
(
"Outs"
))
{
...
...
paddle/operators/cos_sim_op.cc
浏览文件 @
90886443
...
@@ -26,8 +26,16 @@ class CosSimOp : public framework::OperatorWithKernel {
...
@@ -26,8 +26,16 @@ class CosSimOp : public framework::OperatorWithKernel {
protected:
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
// notnull check
// notnull check
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"X"
),
"Input(X) must not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"X"
),
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"Y"
),
"Input(Y) must not be null."
);
"Input(X) of CosSimOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"Y"
),
"Input(Y) of CosSimOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"Out"
),
"Output(Out) of CosSimOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"XNorm"
),
"Output(XNorm) of CosSimOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"YNorm"
),
"Output(YNorm) of CosSimOp should not be null."
);
// shape check
// shape check
auto
x_dims
=
ctx
.
Input
<
Tensor
>
(
"X"
)
->
dims
();
auto
x_dims
=
ctx
.
Input
<
Tensor
>
(
"X"
)
->
dims
();
...
...
paddle/operators/elementwise_mul_op.cc
浏览文件 @
90886443
...
@@ -25,8 +25,14 @@ class ElementWiseMulOp : public framework::OperatorWithKernel {
...
@@ -25,8 +25,14 @@ class ElementWiseMulOp : public framework::OperatorWithKernel {
protected:
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"X"
),
"Input(X) should not be null"
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"X"
),
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"Y"
),
"Input(Y) should not be null"
);
"Input(X) of ElementWiseMulOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"Y"
),
"Input(Y) of ElementWiseMulOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"Out"
),
"Output(Out) of ElementWiseMulOp should not be null."
);
auto
x_dim
=
ctx
.
Input
<
Tensor
>
(
"X"
)
->
dims
();
auto
x_dim
=
ctx
.
Input
<
Tensor
>
(
"X"
)
->
dims
();
auto
y_dim
=
ctx
.
Input
<
Tensor
>
(
"Y"
)
->
dims
();
auto
y_dim
=
ctx
.
Input
<
Tensor
>
(
"Y"
)
->
dims
();
PADDLE_ENFORCE_GE
(
x_dim
.
size
(),
y_dim
.
size
(),
PADDLE_ENFORCE_GE
(
x_dim
.
size
(),
y_dim
.
size
(),
...
...
paddle/operators/elementwise_mul_op.h
浏览文件 @
90886443
...
@@ -13,10 +13,8 @@
...
@@ -13,10 +13,8 @@
limitations under the License. */
limitations under the License. */
#pragma once
#pragma once
#include <iostream>
#include "paddle/framework/eigen.h"
#include "paddle/framework/eigen.h"
#include "paddle/framework/op_registry.h"
#include "paddle/framework/op_registry.h"
#include "paddle/operators/math/math_function.h"
namespace
paddle
{
namespace
paddle
{
namespace
operators
{
namespace
operators
{
...
...
paddle/operators/fill_zeros_like_op.cc
浏览文件 @
90886443
...
@@ -23,6 +23,13 @@ class FillZerosLikeOp : public framework::OperatorWithKernel {
...
@@ -23,6 +23,13 @@ class FillZerosLikeOp : public framework::OperatorWithKernel {
protected:
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"Src"
),
"Input(Src) of FillZerosLikeOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"Dst"
),
"Output(Dst) of FillZerosLikeOp should not be null."
);
ctx
.
Output
<
framework
::
LoDTensor
>
(
"Dst"
)
->
Resize
(
ctx
.
Output
<
framework
::
LoDTensor
>
(
"Dst"
)
->
Resize
(
ctx
.
Input
<
framework
::
Tensor
>
(
"Src"
)
->
dims
());
ctx
.
Input
<
framework
::
Tensor
>
(
"Src"
)
->
dims
());
}
}
...
...
paddle/operators/gather_op.cc
浏览文件 @
90886443
...
@@ -24,6 +24,13 @@ class GatherOp : public framework::OperatorWithKernel {
...
@@ -24,6 +24,13 @@ class GatherOp : public framework::OperatorWithKernel {
protected:
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"X"
),
"Input(X) of GatherOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"Index"
),
"Input(Index) of GatherOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"Out"
),
"Output(Out) of GatherOp should not be null."
);
int
batch_size
=
ctx
.
Input
<
Tensor
>
(
"Index"
)
->
dims
()[
0
];
int
batch_size
=
ctx
.
Input
<
Tensor
>
(
"Index"
)
->
dims
()[
0
];
PADDLE_ENFORCE_GE
(
batch_size
,
0
,
"Batch size must be >0"
);
PADDLE_ENFORCE_GE
(
batch_size
,
0
,
"Batch size must be >0"
);
framework
::
DDim
output_dims
(
ctx
.
Input
<
Tensor
>
(
"X"
)
->
dims
());
framework
::
DDim
output_dims
(
ctx
.
Input
<
Tensor
>
(
"X"
)
->
dims
());
...
...
paddle/operators/gaussian_random_op.cc
浏览文件 @
90886443
...
@@ -43,8 +43,12 @@ class GaussianRandomOp : public framework::OperatorWithKernel {
...
@@ -43,8 +43,12 @@ class GaussianRandomOp : public framework::OperatorWithKernel {
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
protected:
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
context
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
auto
*
tensor
=
context
.
Output
<
framework
::
LoDTensor
>
(
"Out"
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"Out"
),
"Output(Out) of GaussianRandomOp should not be null."
);
auto
*
tensor
=
ctx
.
Output
<
framework
::
LoDTensor
>
(
"Out"
);
auto
dims
=
Attr
<
std
::
vector
<
int
>>
(
"dims"
);
auto
dims
=
Attr
<
std
::
vector
<
int
>>
(
"dims"
);
std
::
vector
<
int64_t
>
temp
;
std
::
vector
<
int64_t
>
temp
;
temp
.
reserve
(
dims
.
size
());
temp
.
reserve
(
dims
.
size
());
...
...
paddle/operators/identity_op.cc
浏览文件 @
90886443
...
@@ -42,6 +42,11 @@ class IdentityOp : public NetOp {
...
@@ -42,6 +42,11 @@ class IdentityOp : public NetOp {
const
framework
::
VariableNameMap
&
outputs
,
const
framework
::
VariableNameMap
&
outputs
,
const
framework
::
AttributeMap
&
attrs
)
const
framework
::
AttributeMap
&
attrs
)
:
NetOp
(
type
,
inputs
,
outputs
,
attrs
)
{
:
NetOp
(
type
,
inputs
,
outputs
,
attrs
)
{
PADDLE_ENFORCE_NE
(
Input
(
"X"
),
framework
::
kEmptyVarName
,
"Input(X) of IdentityOp should not be null."
);
PADDLE_ENFORCE_NE
(
Output
(
"Out"
),
framework
::
kEmptyVarName
,
"Output(Out) of IdentityOp should not be null."
);
AppendOp
(
framework
::
OpRegistry
::
CreateOp
(
AppendOp
(
framework
::
OpRegistry
::
CreateOp
(
"scale"
,
{{
"X"
,
{
Input
(
"X"
)}}},
{{
"Out"
,
{
Output
(
"Out"
)}}},
"scale"
,
{{
"X"
,
{
Input
(
"X"
)}}},
{{
"Out"
,
{
Output
(
"Out"
)}}},
{{
"scale"
,
static_cast
<
AttrType
>
(
1
)}}));
{{
"scale"
,
static_cast
<
AttrType
>
(
1
)}}));
...
...
paddle/operators/lookup_table_op.cc
浏览文件 @
90886443
...
@@ -22,10 +22,17 @@ class LookupTableOp : public framework::OperatorWithKernel {
...
@@ -22,10 +22,17 @@ class LookupTableOp : public framework::OperatorWithKernel {
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
protected:
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
context
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
auto
table_t
=
context
.
Input
<
Tensor
>
(
"W"
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"W"
),
auto
ids_t
=
context
.
Input
<
Tensor
>
(
"Ids"
);
"Input(W) of LookupTableOp should not be null."
);
auto
output_t
=
context
.
Output
<
framework
::
LoDTensor
>
(
"Out"
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"Ids"
),
"Input(Ids) of LookupTableOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"Out"
),
"Output(Out) of LookupTableOp should not be null."
);
auto
table_t
=
ctx
.
Input
<
Tensor
>
(
"W"
);
auto
ids_t
=
ctx
.
Input
<
Tensor
>
(
"Ids"
);
auto
output_t
=
ctx
.
Output
<
framework
::
LoDTensor
>
(
"Out"
);
output_t
->
Resize
({
ids_t
->
dims
()[
0
],
table_t
->
dims
()[
1
]});
output_t
->
Resize
({
ids_t
->
dims
()[
0
],
table_t
->
dims
()[
1
]});
}
}
...
...
paddle/operators/mean_op.cc
浏览文件 @
90886443
...
@@ -24,7 +24,9 @@ class MeanOp : public framework::OperatorWithKernel {
...
@@ -24,7 +24,9 @@ class MeanOp : public framework::OperatorWithKernel {
protected:
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"X"
),
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"X"
),
"Input of MeanOp must be initialized."
);
"Input(X) of MeanOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"Out"
),
"Output(Out) of MeanOp should not be null."
);
ctx
.
Output
<
framework
::
LoDTensor
>
(
"Out"
)
->
Resize
({
1
});
ctx
.
Output
<
framework
::
LoDTensor
>
(
"Out"
)
->
Resize
({
1
});
}
}
};
};
...
...
paddle/operators/minus_op.cc
浏览文件 @
90886443
...
@@ -27,6 +27,13 @@ class MinusOp : public framework::OperatorWithKernel {
...
@@ -27,6 +27,13 @@ class MinusOp : public framework::OperatorWithKernel {
protected:
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"X"
),
"Input(X) of MinusOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"Y"
),
"Input(Y) of MinusOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"Out"
),
"Output(Out) of MinusOp should not be null."
);
auto
*
left_tensor
=
ctx
.
Input
<
framework
::
Tensor
>
(
"X"
);
auto
*
left_tensor
=
ctx
.
Input
<
framework
::
Tensor
>
(
"X"
);
auto
*
right_tensor
=
ctx
.
Input
<
framework
::
Tensor
>
(
"Y"
);
auto
*
right_tensor
=
ctx
.
Input
<
framework
::
Tensor
>
(
"Y"
);
...
@@ -77,8 +84,6 @@ class MinusGradOp : public NetOp {
...
@@ -77,8 +84,6 @@ class MinusGradOp : public NetOp {
}
// namespace operators
}
// namespace operators
}
// namespace paddle
}
// namespace paddle
USE_OP
(
scale
);
USE_NO_KERNEL_OP
(
identity
);
namespace
ops
=
paddle
::
operators
;
namespace
ops
=
paddle
::
operators
;
REGISTER_OP
(
minus
,
ops
::
MinusOp
,
ops
::
MinusOpMaker
,
minus_grad
,
REGISTER_OP
(
minus
,
ops
::
MinusOp
,
ops
::
MinusOpMaker
,
minus_grad
,
ops
::
MinusGradOp
<
float
>
);
ops
::
MinusGradOp
<
float
>
);
...
...
paddle/operators/mul_op.cc
浏览文件 @
90886443
...
@@ -26,6 +26,13 @@ class MulOp : public framework::OperatorWithKernel {
...
@@ -26,6 +26,13 @@ class MulOp : public framework::OperatorWithKernel {
protected:
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"X"
),
"Input(X) of MulOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"Y"
),
"Input(Y) of MulOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"Out"
),
"Output(Out) of MulOp should not be null."
);
auto
x_dims
=
ctx
.
Input
<
Tensor
>
(
"X"
)
->
dims
();
auto
x_dims
=
ctx
.
Input
<
Tensor
>
(
"X"
)
->
dims
();
auto
y_dims
=
ctx
.
Input
<
Tensor
>
(
"Y"
)
->
dims
();
auto
y_dims
=
ctx
.
Input
<
Tensor
>
(
"Y"
)
->
dims
();
int
x_num_col_dims
=
Attr
<
int
>
(
"x_num_col_dims"
);
int
x_num_col_dims
=
Attr
<
int
>
(
"x_num_col_dims"
);
...
...
paddle/operators/onehot_cross_entropy_op.cc
浏览文件 @
90886443
...
@@ -23,6 +23,16 @@ class OnehotCrossEntropyOp : public framework::OperatorWithKernel {
...
@@ -23,6 +23,16 @@ class OnehotCrossEntropyOp : public framework::OperatorWithKernel {
protected:
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"X"
),
"Input(X) of OnehotCrossEntropyOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"label"
),
"Input(label) of OnehotCrossEntropyOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"Y"
),
"Output(Y) of OnehotCrossEntropyOp should not be null."
);
auto
*
X
=
ctx
.
Input
<
Tensor
>
(
"X"
);
auto
*
X
=
ctx
.
Input
<
Tensor
>
(
"X"
);
auto
*
label
=
ctx
.
Input
<
Tensor
>
(
"label"
);
auto
*
label
=
ctx
.
Input
<
Tensor
>
(
"label"
);
...
...
paddle/operators/pad_op.cc
浏览文件 @
90886443
...
@@ -25,6 +25,11 @@ class PadOp : public framework::OperatorWithKernel {
...
@@ -25,6 +25,11 @@ class PadOp : public framework::OperatorWithKernel {
protected:
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"X"
),
"Input(X) of PadOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"Out"
),
"Output(Out) of PadOp should not be null."
);
auto
x_dim
=
ctx
.
Input
<
Tensor
>
(
"X"
)
->
dims
();
auto
x_dim
=
ctx
.
Input
<
Tensor
>
(
"X"
)
->
dims
();
auto
paddings
=
Attr
<
std
::
vector
<
int
>>
(
"paddings"
);
auto
paddings
=
Attr
<
std
::
vector
<
int
>>
(
"paddings"
);
PADDLE_ENFORCE_EQ
(
x_dim
.
size
()
*
2
,
int64_t
(
paddings
.
size
()),
PADDLE_ENFORCE_EQ
(
x_dim
.
size
()
*
2
,
int64_t
(
paddings
.
size
()),
...
...
paddle/operators/reshape_op.cc
浏览文件 @
90886443
...
@@ -28,7 +28,11 @@ class ReshapeOp : public framework::OperatorWithKernel {
...
@@ -28,7 +28,11 @@ class ReshapeOp : public framework::OperatorWithKernel {
protected:
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
// input check
// input check
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"X"
),
"Input(X) shouldn't be null"
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"X"
),
"Input(X) of ReshapeOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"Out"
),
"Output(Out) of ReshapeOp should not be null."
);
auto
shape
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"shape"
);
auto
shape
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"shape"
);
PADDLE_ENFORCE
(
shape
.
size
()
>
0
,
"Attr(shape) shouldn't be empty."
);
PADDLE_ENFORCE
(
shape
.
size
()
>
0
,
"Attr(shape) shouldn't be empty."
);
for
(
auto
dim
:
shape
)
{
for
(
auto
dim
:
shape
)
{
...
...
paddle/operators/rowwise_add_op.cc
浏览文件 @
90886443
...
@@ -25,6 +25,13 @@ class RowwiseAddOp : public framework::OperatorWithKernel {
...
@@ -25,6 +25,13 @@ class RowwiseAddOp : public framework::OperatorWithKernel {
protected:
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"X"
),
"Input(X) of RowwiseAddOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"b"
),
"Input(b) of RowwiseAddOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"Out"
),
"Output(Out) of RowwiseAddOp should not be null."
);
auto
x_dims
=
ctx
.
Input
<
Tensor
>
(
"X"
)
->
dims
();
auto
x_dims
=
ctx
.
Input
<
Tensor
>
(
"X"
)
->
dims
();
auto
b_dims
=
ctx
.
Input
<
Tensor
>
(
"b"
)
->
dims
();
auto
b_dims
=
ctx
.
Input
<
Tensor
>
(
"b"
)
->
dims
();
PADDLE_ENFORCE_GT
(
PADDLE_ENFORCE_GT
(
...
...
paddle/operators/scale_op.cc
浏览文件 @
90886443
...
@@ -27,6 +27,11 @@ class ScaleOp : public framework::OperatorWithKernel {
...
@@ -27,6 +27,11 @@ class ScaleOp : public framework::OperatorWithKernel {
protected:
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"X"
),
"Input(X) of ScaleOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"Out"
),
"Output(Out) of ScaleOp should not be null."
);
auto
*
in
=
ctx
.
Input
<
framework
::
Tensor
>
(
"X"
);
auto
*
in
=
ctx
.
Input
<
framework
::
Tensor
>
(
"X"
);
auto
*
out
=
ctx
.
Output
<
framework
::
LoDTensor
>
(
"Out"
);
auto
*
out
=
ctx
.
Output
<
framework
::
LoDTensor
>
(
"Out"
);
out
->
Resize
(
in
->
dims
());
out
->
Resize
(
in
->
dims
());
...
...
paddle/operators/scatter_op.cc
浏览文件 @
90886443
...
@@ -24,6 +24,15 @@ class ScatterOp : public framework::OperatorWithKernel {
...
@@ -24,6 +24,15 @@ class ScatterOp : public framework::OperatorWithKernel {
protected:
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"Ref"
),
"Input(Ref) of ScatterOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"Index"
),
"Input(Index) of ScatterOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"Updates"
),
"Input(Updates) of ScatterOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"Out"
),
"Output(Out) of ScatterOp should not be null."
);
PADDLE_ENFORCE_EQ
(
ctx
.
Input
<
Tensor
>
(
"Index"
)
->
dims
().
size
(),
1
,
PADDLE_ENFORCE_EQ
(
ctx
.
Input
<
Tensor
>
(
"Index"
)
->
dims
().
size
(),
1
,
"Update Index should be 1-D."
);
"Update Index should be 1-D."
);
PADDLE_ENFORCE_EQ
(
ctx
.
Input
<
Tensor
>
(
"Ref"
)
->
dims
().
size
(),
PADDLE_ENFORCE_EQ
(
ctx
.
Input
<
Tensor
>
(
"Ref"
)
->
dims
().
size
(),
...
...
paddle/operators/sequence_avg_pool_op.cc
浏览文件 @
90886443
...
@@ -23,9 +23,12 @@ class SequenceAvgPoolOp : public framework::OperatorWithKernel {
...
@@ -23,9 +23,12 @@ class SequenceAvgPoolOp : public framework::OperatorWithKernel {
protected:
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"X"
),
PADDLE_ENFORCE_NOT_NULL
(
"Input of SequenceAvgPoolOp"
ctx
.
InputVar
(
"X"
),
"Input(X) of SequenceAvgPoolOp should not be null."
);
"must be initialized."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"Out"
),
"Output(Out) of SequenceAvgPoolOp should not be null."
);
auto
*
x
=
ctx
.
Input
<
framework
::
LoDTensor
>
(
"X"
);
auto
*
x
=
ctx
.
Input
<
framework
::
LoDTensor
>
(
"X"
);
auto
dims
=
x
->
dims
();
auto
dims
=
x
->
dims
();
auto
lod
=
x
->
lod
();
auto
lod
=
x
->
lod
();
...
...
paddle/operators/sgd_op.cc
浏览文件 @
90886443
...
@@ -23,6 +23,13 @@ class SGDOp : public framework::OperatorWithKernel {
...
@@ -23,6 +23,13 @@ class SGDOp : public framework::OperatorWithKernel {
protected:
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"param"
),
"Input(param) of SGDOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"grad"
),
"Input(grad) of SGDOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"param_out"
),
"Output(param_out) of SGDOp should not be null."
);
PADDLE_ENFORCE_EQ
(
ctx
.
Input
<
Tensor
>
(
"param"
)
->
dims
(),
PADDLE_ENFORCE_EQ
(
ctx
.
Input
<
Tensor
>
(
"param"
)
->
dims
(),
ctx
.
Input
<
Tensor
>
(
"grad"
)
->
dims
(),
ctx
.
Input
<
Tensor
>
(
"grad"
)
->
dims
(),
"Two input of SGD Op's dimension must be same."
);
"Two input of SGD Op's dimension must be same."
);
...
...
paddle/operators/sigmoid_op.cc
浏览文件 @
90886443
...
@@ -23,6 +23,11 @@ class SigmoidOp : public framework::OperatorWithKernel {
...
@@ -23,6 +23,11 @@ class SigmoidOp : public framework::OperatorWithKernel {
protected:
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"X"
),
"Input(X) of SigmoidOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"Y"
),
"Output(Y) of SigmoidOp should not be null."
);
ctx
.
Output
<
framework
::
LoDTensor
>
(
"Y"
)
->
Resize
(
ctx
.
Output
<
framework
::
LoDTensor
>
(
"Y"
)
->
Resize
(
ctx
.
Input
<
Tensor
>
(
"X"
)
->
dims
());
ctx
.
Input
<
Tensor
>
(
"X"
)
->
dims
());
}
}
...
...
paddle/operators/softmax_op.cc
浏览文件 @
90886443
...
@@ -23,6 +23,11 @@ class SoftmaxOp : public framework::OperatorWithKernel {
...
@@ -23,6 +23,11 @@ class SoftmaxOp : public framework::OperatorWithKernel {
protected:
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"X"
),
"Input(X) of SoftmaxOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"Y"
),
"Output(Y) of SoftmaxOp should not be null."
);
PADDLE_ENFORCE
(
ctx
.
Input
<
Tensor
>
(
"X"
)
->
dims
().
size
()
==
2UL
,
PADDLE_ENFORCE
(
ctx
.
Input
<
Tensor
>
(
"X"
)
->
dims
().
size
()
==
2UL
,
"The input of softmax op must be a matrix."
);
"The input of softmax op must be a matrix."
);
ctx
.
Output
<
framework
::
LoDTensor
>
(
"Y"
)
->
Resize
(
ctx
.
Output
<
framework
::
LoDTensor
>
(
"Y"
)
->
Resize
(
...
...
paddle/operators/squared_l2_distance_op.cc
浏览文件 @
90886443
...
@@ -23,12 +23,18 @@ class SquaredL2DistanceOp : public framework::OperatorWithKernel {
...
@@ -23,12 +23,18 @@ class SquaredL2DistanceOp : public framework::OperatorWithKernel {
protected:
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"X"
),
PADDLE_ENFORCE_NOT_NULL
(
"Input of SquaredL2DistanceOp "
ctx
.
InputVar
(
"X"
),
"must be initialized."
);
"Input(X) of SquaredL2DistanceOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"Y"
),
PADDLE_ENFORCE_NOT_NULL
(
"Target of SquaredL2DistanceOp "
ctx
.
InputVar
(
"Y"
),
"must be initialized."
);
"Input(Y) of SquaredL2DistanceOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"sub_result"
),
"Output(sub_result) of SquaredL2DistanceOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"Out"
),
"Output(Out) of SquaredL2DistanceOp should not be null."
);
auto
*
x
=
ctx
.
Input
<
Tensor
>
(
"X"
);
auto
*
x
=
ctx
.
Input
<
Tensor
>
(
"X"
);
auto
x_dims
=
x
->
dims
();
auto
x_dims
=
x
->
dims
();
...
...
paddle/operators/sum_op.cc
浏览文件 @
90886443
...
@@ -22,6 +22,11 @@ class SumOp : public framework::OperatorWithKernel {
...
@@ -22,6 +22,11 @@ class SumOp : public framework::OperatorWithKernel {
protected:
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE
(
!
ctx
.
MultiInputVar
(
"X"
).
empty
(),
"Input(X) of SumOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"Out"
),
"Output(Out) of SumOp should not be null."
);
auto
ins
=
ctx
.
MultiInput
<
framework
::
Tensor
>
(
"X"
);
auto
ins
=
ctx
.
MultiInput
<
framework
::
Tensor
>
(
"X"
);
auto
*
out
=
ctx
.
Output
<
framework
::
LoDTensor
>
(
"Out"
);
auto
*
out
=
ctx
.
Output
<
framework
::
LoDTensor
>
(
"Out"
);
int
N
=
ins
.
size
();
int
N
=
ins
.
size
();
...
...
paddle/operators/top_k_op.cc
浏览文件 @
90886443
...
@@ -24,7 +24,12 @@ class TopkOp : public framework::OperatorWithKernel {
...
@@ -24,7 +24,12 @@ class TopkOp : public framework::OperatorWithKernel {
protected:
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"X"
),
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"X"
),
"Input of TopkOP must be initialized."
);
"Input(X) of TopkOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"Out"
),
"Output(Out) of TopkOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"Indices"
),
"Output(Indices) of TopkOp should not be null."
);
auto
*
input
=
ctx
.
Input
<
framework
::
Tensor
>
(
"X"
);
auto
*
input
=
ctx
.
Input
<
framework
::
Tensor
>
(
"X"
);
const
int
k
=
static_cast
<
int
>
(
ctx
.
Attr
<
int
>
(
"k"
));
const
int
k
=
static_cast
<
int
>
(
ctx
.
Attr
<
int
>
(
"k"
));
...
...
paddle/operators/uniform_random_op.cc
浏览文件 @
90886443
...
@@ -48,6 +48,10 @@ class UniformRandomOp : public framework::OperatorWithKernel {
...
@@ -48,6 +48,10 @@ class UniformRandomOp : public framework::OperatorWithKernel {
protected:
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"Out"
),
"Output(Out) of UniformRandomOp should not be null."
);
PADDLE_ENFORCE
(
Attr
<
float
>
(
"min"
)
<
Attr
<
float
>
(
"max"
),
PADDLE_ENFORCE
(
Attr
<
float
>
(
"min"
)
<
Attr
<
float
>
(
"max"
),
"uniform_random's min must less then max"
);
"uniform_random's min must less then max"
);
auto
*
tensor
=
ctx
.
Output
<
framework
::
LoDTensor
>
(
"Out"
);
auto
*
tensor
=
ctx
.
Output
<
framework
::
LoDTensor
>
(
"Out"
);
...
...
python/paddle/v2/framework/tests/test_add_
two_
op.py
→
python/paddle/v2/framework/tests/test_add_op.py
浏览文件 @
90886443
文件已移动
python/paddle/v2/framework/tests/test_gaussian_random_op.py
浏览文件 @
90886443
...
@@ -4,7 +4,7 @@ from paddle.v2.framework.op import Operator
...
@@ -4,7 +4,7 @@ from paddle.v2.framework.op import Operator
import
numpy
import
numpy
class
GaussianRandomTest
(
unittest
.
TestCase
):
class
TestGaussianRandomOp
(
unittest
.
TestCase
):
def
test_cpu
(
self
):
def
test_cpu
(
self
):
self
.
gaussian_random_test
(
place
=
core
.
CPUPlace
())
self
.
gaussian_random_test
(
place
=
core
.
CPUPlace
())
...
...
python/paddle/v2/framework/tests/test_identity_op.py
0 → 100644
浏览文件 @
90886443
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
class
TestIdentityOp
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"identity"
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
10
,
10
)).
astype
(
"float32"
)}
self
.
outputs
=
{
'Out'
:
self
.
inputs
[
'X'
]}
def
test_check_output
(
self
):
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Out'
)
if
__name__
==
"__main__"
:
unittest
.
main
()
python/paddle/v2/framework/tests/test_lookup_table.py
→
python/paddle/v2/framework/tests/test_lookup_table
_op
.py
浏览文件 @
90886443
文件已移动
python/paddle/v2/framework/tests/test_minus_op.py
浏览文件 @
90886443
...
@@ -3,7 +3,7 @@ import numpy as np
...
@@ -3,7 +3,7 @@ import numpy as np
from
op_test
import
OpTest
from
op_test
import
OpTest
class
MinusOpTest
(
OpTest
):
class
TestMinusOp
(
OpTest
):
def
setUp
(
self
):
def
setUp
(
self
):
self
.
op_type
=
"minus"
self
.
op_type
=
"minus"
self
.
inputs
=
{
self
.
inputs
=
{
...
...
python/paddle/v2/framework/tests/test_cross_entropy_op.py
→
python/paddle/v2/framework/tests/test_
onehot_
cross_entropy_op.py
浏览文件 @
90886443
...
@@ -3,7 +3,7 @@ import numpy
...
@@ -3,7 +3,7 @@ import numpy
from
op_test
import
OpTest
from
op_test
import
OpTest
class
Test
CrossEntropy
(
OpTest
):
class
Test
OnehotCrossEntropyOp
(
OpTest
):
def
setUp
(
self
):
def
setUp
(
self
):
self
.
op_type
=
"onehot_cross_entropy"
self
.
op_type
=
"onehot_cross_entropy"
batch_size
=
30
batch_size
=
30
...
...
python/paddle/v2/framework/tests/test_scale_
and_identity_
op.py
→
python/paddle/v2/framework/tests/test_scale_op.py
浏览文件 @
90886443
...
@@ -3,20 +3,7 @@ import numpy as np
...
@@ -3,20 +3,7 @@ import numpy as np
from
op_test
import
OpTest
from
op_test
import
OpTest
class
IdentityTest
(
OpTest
):
class
TestScaleOp
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"identity"
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
10
,
10
)).
astype
(
"float32"
)}
self
.
outputs
=
{
'Out'
:
self
.
inputs
[
'X'
]}
def
test_check_output
(
self
):
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Out'
)
class
ScaleTest
(
OpTest
):
def
setUp
(
self
):
def
setUp
(
self
):
self
.
op_type
=
"scale"
self
.
op_type
=
"scale"
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
10
,
10
)).
astype
(
"float32"
)}
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
10
,
10
)).
astype
(
"float32"
)}
...
...
python/paddle/v2/framework/tests/test_sgd_op.py
浏览文件 @
90886443
...
@@ -3,7 +3,7 @@ import numpy as np
...
@@ -3,7 +3,7 @@ import numpy as np
from
op_test
import
OpTest
from
op_test
import
OpTest
class
TestSGD
(
OpTest
):
class
TestSGD
Op
(
OpTest
):
def
setUp
(
self
):
def
setUp
(
self
):
self
.
op_type
=
"sgd"
self
.
op_type
=
"sgd"
w
=
np
.
random
.
random
((
102
,
105
)).
astype
(
"float32"
)
w
=
np
.
random
.
random
((
102
,
105
)).
astype
(
"float32"
)
...
...
python/paddle/v2/framework/tests/test_sigmoid_op.py
浏览文件 @
90886443
...
@@ -3,7 +3,7 @@ import numpy as np
...
@@ -3,7 +3,7 @@ import numpy as np
from
op_test
import
OpTest
from
op_test
import
OpTest
class
TestSigmoid
(
OpTest
):
class
TestSigmoid
Op
(
OpTest
):
def
setUp
(
self
):
def
setUp
(
self
):
self
.
op_type
=
"sigmoid"
self
.
op_type
=
"sigmoid"
self
.
inputs
=
{
self
.
inputs
=
{
...
...
python/paddle/v2/framework/tests/test_top_k_op.py
浏览文件 @
90886443
...
@@ -21,6 +21,9 @@ class TestTopkOp(OpTest):
...
@@ -21,6 +21,9 @@ class TestTopkOp(OpTest):
self
.
outputs
=
{
'Out'
:
output
,
'Indices'
:
indices
}
self
.
outputs
=
{
'Out'
:
output
,
'Indices'
:
indices
}
def
test_check_output
(
self
):
self
.
check_output
()
class
TestTopkOp3d
(
OpTest
):
class
TestTopkOp3d
(
OpTest
):
def
setUp
(
self
):
def
setUp
(
self
):
...
@@ -42,6 +45,9 @@ class TestTopkOp3d(OpTest):
...
@@ -42,6 +45,9 @@ class TestTopkOp3d(OpTest):
self
.
outputs
=
{
'Out'
:
output
,
'Indices'
:
indices
}
self
.
outputs
=
{
'Out'
:
output
,
'Indices'
:
indices
}
def
test_check_output
(
self
):
self
.
check_output
()
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
unittest
.
main
()
unittest
.
main
()
python/paddle/v2/framework/tests/test_uniform_random_op.py
浏览文件 @
90886443
...
@@ -4,7 +4,7 @@ import paddle.v2.framework.core as core
...
@@ -4,7 +4,7 @@ import paddle.v2.framework.core as core
import
numpy
import
numpy
class
UniformRandomTest
(
unittest
.
TestCase
):
class
TestUniformRandomOp
(
unittest
.
TestCase
):
def
test_uniform_random_cpu
(
self
):
def
test_uniform_random_cpu
(
self
):
self
.
uniform_random_test
(
place
=
core
.
CPUPlace
())
self
.
uniform_random_test
(
place
=
core
.
CPUPlace
())
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录