Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
384368f4
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
384368f4
编写于
9月 15, 2017
作者:
Z
zchen0211
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'develop' of
https://github.com/PaddlePaddle/Paddle
into develop
上级
490ca5f1
7bcb1fc3
变更
51
显示空白变更内容
内联
并排
Showing
51 changed file
with
596 addition
and
361 deletion
+596
-361
paddle/gserver/layers/ExpandConvBaseLayer.cpp
paddle/gserver/layers/ExpandConvBaseLayer.cpp
+0
-124
paddle/gserver/layers/ExpandConvBaseLayer.h
paddle/gserver/layers/ExpandConvBaseLayer.h
+0
-57
paddle/gserver/layers/ExpandConvLayer.cpp
paddle/gserver/layers/ExpandConvLayer.cpp
+38
-7
paddle/gserver/layers/ExpandConvLayer.h
paddle/gserver/layers/ExpandConvLayer.h
+5
-4
paddle/gserver/layers/MKLDNNConvLayer.cpp
paddle/gserver/layers/MKLDNNConvLayer.cpp
+5
-4
paddle/gserver/layers/MKLDNNFcLayer.cpp
paddle/gserver/layers/MKLDNNFcLayer.cpp
+181
-96
paddle/gserver/layers/MKLDNNFcLayer.h
paddle/gserver/layers/MKLDNNFcLayer.h
+59
-0
paddle/math/MKLDNNMatrix.h
paddle/math/MKLDNNMatrix.h
+6
-5
paddle/operators/accuracy_op.cc
paddle/operators/accuracy_op.cc
+8
-3
paddle/operators/add_op.cc
paddle/operators/add_op.cc
+7
-0
paddle/operators/concat_op.cc
paddle/operators/concat_op.cc
+3
-0
paddle/operators/cond_op.cc
paddle/operators/cond_op.cc
+16
-5
paddle/operators/cos_sim_op.cc
paddle/operators/cos_sim_op.cc
+10
-2
paddle/operators/elementwise_mul_op.cc
paddle/operators/elementwise_mul_op.cc
+8
-2
paddle/operators/elementwise_mul_op.h
paddle/operators/elementwise_mul_op.h
+0
-2
paddle/operators/fill_zeros_like_op.cc
paddle/operators/fill_zeros_like_op.cc
+7
-0
paddle/operators/gather_op.cc
paddle/operators/gather_op.cc
+7
-0
paddle/operators/gaussian_random_op.cc
paddle/operators/gaussian_random_op.cc
+6
-2
paddle/operators/identity_op.cc
paddle/operators/identity_op.cc
+5
-0
paddle/operators/lookup_table_op.cc
paddle/operators/lookup_table_op.cc
+11
-4
paddle/operators/mean_op.cc
paddle/operators/mean_op.cc
+3
-1
paddle/operators/minus_op.cc
paddle/operators/minus_op.cc
+7
-2
paddle/operators/mul_op.cc
paddle/operators/mul_op.cc
+7
-0
paddle/operators/onehot_cross_entropy_op.cc
paddle/operators/onehot_cross_entropy_op.cc
+10
-0
paddle/operators/pad_op.cc
paddle/operators/pad_op.cc
+5
-0
paddle/operators/reshape_op.cc
paddle/operators/reshape_op.cc
+5
-1
paddle/operators/rowwise_add_op.cc
paddle/operators/rowwise_add_op.cc
+7
-0
paddle/operators/scale_op.cc
paddle/operators/scale_op.cc
+5
-0
paddle/operators/scatter_op.cc
paddle/operators/scatter_op.cc
+9
-0
paddle/operators/sequence_avg_pool_op.cc
paddle/operators/sequence_avg_pool_op.cc
+9
-4
paddle/operators/sequence_avg_pool_op.h
paddle/operators/sequence_avg_pool_op.h
+8
-5
paddle/operators/sgd_op.cc
paddle/operators/sgd_op.cc
+7
-0
paddle/operators/sigmoid_op.cc
paddle/operators/sigmoid_op.cc
+5
-0
paddle/operators/softmax_op.cc
paddle/operators/softmax_op.cc
+5
-0
paddle/operators/squared_l2_distance_op.cc
paddle/operators/squared_l2_distance_op.cc
+12
-6
paddle/operators/sum_op.cc
paddle/operators/sum_op.cc
+5
-0
paddle/operators/top_k_op.cc
paddle/operators/top_k_op.cc
+6
-1
paddle/operators/uniform_random_op.cc
paddle/operators/uniform_random_op.cc
+4
-0
python/paddle/v2/framework/tests/op_test.py
python/paddle/v2/framework/tests/op_test.py
+11
-4
python/paddle/v2/framework/tests/test_add_op.py
python/paddle/v2/framework/tests/test_add_op.py
+0
-0
python/paddle/v2/framework/tests/test_gaussian_random_op.py
python/paddle/v2/framework/tests/test_gaussian_random_op.py
+1
-1
python/paddle/v2/framework/tests/test_identity_op.py
python/paddle/v2/framework/tests/test_identity_op.py
+20
-0
python/paddle/v2/framework/tests/test_lookup_table_op.py
python/paddle/v2/framework/tests/test_lookup_table_op.py
+0
-0
python/paddle/v2/framework/tests/test_minus_op.py
python/paddle/v2/framework/tests/test_minus_op.py
+1
-1
python/paddle/v2/framework/tests/test_onehot_cross_entropy_op.py
...paddle/v2/framework/tests/test_onehot_cross_entropy_op.py
+1
-1
python/paddle/v2/framework/tests/test_scale_op.py
python/paddle/v2/framework/tests/test_scale_op.py
+1
-14
python/paddle/v2/framework/tests/test_seq_pool.py
python/paddle/v2/framework/tests/test_seq_pool.py
+51
-0
python/paddle/v2/framework/tests/test_sgd_op.py
python/paddle/v2/framework/tests/test_sgd_op.py
+1
-1
python/paddle/v2/framework/tests/test_sigmoid_op.py
python/paddle/v2/framework/tests/test_sigmoid_op.py
+1
-1
python/paddle/v2/framework/tests/test_top_k_op.py
python/paddle/v2/framework/tests/test_top_k_op.py
+6
-0
python/paddle/v2/framework/tests/test_uniform_random_op.py
python/paddle/v2/framework/tests/test_uniform_random_op.py
+1
-1
未找到文件。
paddle/gserver/layers/ExpandConvBaseLayer.cpp
已删除
100644 → 0
浏览文件 @
490ca5f1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "ExpandConvBaseLayer.h"
#include "paddle/utils/Logging.h"
namespace
paddle
{
bool
ExpandConvBaseLayer
::
init
(
const
LayerMap
&
layerMap
,
const
ParameterMap
&
parameterMap
)
{
/* Initialize the basic convolutional parent class */
ConvBaseLayer
::
init
(
layerMap
,
parameterMap
);
int
index
=
0
;
for
(
auto
&
inputConfig
:
config_
.
inputs
())
{
const
ConvConfig
&
conf
=
inputConfig
.
conv_conf
();
/* Consistent caffe mode for multiple input */
caffeMode_
=
conf
.
caffe_mode
();
// create a new weight
size_t
height
,
width
;
height
=
filterPixels_
[
index
]
*
filterChannels_
[
index
];
width
=
(
!
isDeconv_
)
?
numFilters_
:
channels_
[
index
];
CHECK_EQ
(
parameters_
[
index
]
->
getSize
(),
width
*
height
);
Weight
*
w
=
new
Weight
(
height
,
width
,
parameters_
[
index
]);
weights_
.
emplace_back
(
w
);
index
++
;
}
if
(
biasParameter_
.
get
())
{
if
(
sharedBiases_
)
{
CHECK_EQ
((
size_t
)
numFilters_
,
biasParameter_
->
getSize
());
biases_
=
std
::
unique_ptr
<
Weight
>
(
new
Weight
(
numFilters_
,
1
,
biasParameter_
));
}
else
{
biases_
=
std
::
unique_ptr
<
Weight
>
(
new
Weight
(
getSize
(),
1
,
biasParameter_
));
}
}
getOutputSize
();
return
true
;
}
size_t
ExpandConvBaseLayer
::
getOutputSize
()
{
CHECK_NE
(
inputLayers_
.
size
(),
0UL
);
size_t
layerSize
=
ConvBaseLayer
::
calOutputSize
();
return
layerSize
;
}
void
ExpandConvBaseLayer
::
addSharedBias
()
{
size_t
mapW
=
getOutputSize
()
/
numFilters_
;
size_t
mapH
=
getOutputValue
()
->
getElementCnt
()
/
mapW
;
MatrixPtr
out
=
Matrix
::
create
(
getOutputValue
()
->
getData
(),
mapH
,
mapW
,
false
,
useGpu_
);
Matrix
::
resizeOrCreate
(
transOutValue_
,
mapW
,
mapH
,
false
,
useGpu_
);
out
->
transpose
(
transOutValue_
,
false
);
// false means no memory allocation
transOutValue_
->
reshape
(
transOutValue_
->
getElementCnt
()
/
numFilters_
,
numFilters_
);
MatrixPtr
bias
=
Matrix
::
create
(
biases_
->
getW
()
->
getData
(),
1
,
biases_
->
getW
()
->
getElementCnt
(),
false
,
useGpu_
);
transOutValue_
->
addBias
(
*
bias
,
1.0
f
);
transOutValue_
->
reshape
(
mapW
,
mapH
);
transOutValue_
->
transpose
(
out
,
false
);
// false means no memory allocation
out
->
clear
();
bias
->
clear
();
}
void
ExpandConvBaseLayer
::
addUnsharedBias
()
{
MatrixPtr
outValue
=
getOutputValue
();
MatrixPtr
bias
=
Matrix
::
create
(
biases_
->
getW
()
->
getData
(),
1
,
biases_
->
getW
()
->
getElementCnt
(),
false
,
useGpu_
);
outValue
->
addBias
(
*
bias
,
1.0
f
);
}
void
ExpandConvBaseLayer
::
bpropSharedBias
(
MatrixPtr
biases
,
MatrixPtr
v
)
{
size_t
mapW
=
getOutputSize
()
/
numFilters_
;
size_t
mapH
=
v
->
getElementCnt
()
/
mapW
;
MatrixPtr
vTmp
=
Matrix
::
create
(
v
->
getData
(),
mapH
,
mapW
,
false
,
useGpu_
);
Matrix
::
resizeOrCreate
(
transOutValue_
,
mapW
,
mapH
,
false
,
useGpu_
);
vTmp
->
transpose
(
transOutValue_
,
false
);
// false means no memory allocation
transOutValue_
->
reshape
(
transOutValue_
->
getElementCnt
()
/
numFilters_
,
numFilters_
);
biases
->
collectBias
(
*
transOutValue_
,
1.0
f
);
}
void
ExpandConvBaseLayer
::
bpropBiases
(
MatrixPtr
v
)
{
MatrixPtr
biases
=
Matrix
::
create
(
biases_
->
getWGrad
()
->
getData
(),
1
,
biases_
->
getWGrad
()
->
getElementCnt
(),
false
,
useGpu_
);
if
(
sharedBiases_
)
{
bpropSharedBias
(
biases
,
v
);
}
else
{
biases
->
collectBias
(
*
v
,
1.0
f
);
}
biases
->
clear
();
}
}
// namespace paddle
paddle/gserver/layers/ExpandConvBaseLayer.h
已删除
100644 → 0
浏览文件 @
490ca5f1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <vector>
#include "ConvBaseLayer.h"
#include "paddle/math/Matrix.h"
namespace
paddle
{
/**
* @brief A subclass of ConvBaseLayer that is a superclass of both
* ExpandConvLayer and ExpandConvTransLayer
*/
class
ExpandConvBaseLayer
:
public
ConvBaseLayer
{
protected:
/// The transpose of output, which is an auxiliary matrix.
MatrixPtr
transOutValue_
;
public:
explicit
ExpandConvBaseLayer
(
const
LayerConfig
&
config
)
:
ConvBaseLayer
(
config
)
{}
~
ExpandConvBaseLayer
()
{}
bool
init
(
const
LayerMap
&
layerMap
,
const
ParameterMap
&
parameterMap
)
override
;
size_t
getOutputSize
();
/**
* Add shared bias.
*/
void
addSharedBias
();
/**
* Add unshared bias.
*/
void
addUnsharedBias
();
void
bpropSharedBias
(
MatrixPtr
biases
,
MatrixPtr
v
);
void
bpropBiases
(
MatrixPtr
v
);
};
}
// namespace paddle
paddle/gserver/layers/ExpandConvLayer.cpp
浏览文件 @
384368f4
...
@@ -36,7 +36,36 @@ inline bool isDepthwiseConv(int channels, int groups) {
...
@@ -36,7 +36,36 @@ inline bool isDepthwiseConv(int channels, int groups) {
bool
ExpandConvLayer
::
init
(
const
LayerMap
&
layerMap
,
bool
ExpandConvLayer
::
init
(
const
LayerMap
&
layerMap
,
const
ParameterMap
&
parameterMap
)
{
const
ParameterMap
&
parameterMap
)
{
/* Initialize the basic convolutional parent class */
/* Initialize the basic convolutional parent class */
ExpandConvBaseLayer
::
init
(
layerMap
,
parameterMap
);
ConvBaseLayer
::
init
(
layerMap
,
parameterMap
);
int
index
=
0
;
for
(
auto
&
inputConfig
:
config_
.
inputs
())
{
const
ConvConfig
&
conf
=
inputConfig
.
conv_conf
();
/* Consistent caffe mode for multiple input */
caffeMode_
=
conf
.
caffe_mode
();
// create a new weight
size_t
height
,
width
;
height
=
filterPixels_
[
index
]
*
filterChannels_
[
index
];
width
=
(
!
isDeconv_
)
?
numFilters_
:
channels_
[
index
];
CHECK_EQ
(
parameters_
[
index
]
->
getSize
(),
width
*
height
);
Weight
*
w
=
new
Weight
(
height
,
width
,
parameters_
[
index
]);
weights_
.
emplace_back
(
w
);
index
++
;
}
if
(
biasParameter_
.
get
())
{
if
(
sharedBiases_
)
{
CHECK_EQ
((
size_t
)
numFilters_
,
biasParameter_
->
getSize
());
biases_
=
std
::
unique_ptr
<
Weight
>
(
new
Weight
(
1
,
numFilters_
,
biasParameter_
,
0
));
}
else
{
biases_
=
std
::
unique_ptr
<
Weight
>
(
new
Weight
(
1
,
getSize
(),
biasParameter_
,
0
));
}
}
getOutputSize
();
size_t
numInputs
=
config_
.
inputs_size
();
size_t
numInputs
=
config_
.
inputs_size
();
inputShape_
.
resize
(
numInputs
);
inputShape_
.
resize
(
numInputs
);
...
@@ -108,6 +137,12 @@ bool ExpandConvLayer::init(const LayerMap &layerMap,
...
@@ -108,6 +137,12 @@ bool ExpandConvLayer::init(const LayerMap &layerMap,
return
true
;
return
true
;
}
}
size_t
ExpandConvLayer
::
getOutputSize
()
{
CHECK_NE
(
inputLayers_
.
size
(),
0UL
);
size_t
layerSize
=
ConvBaseLayer
::
calOutputSize
();
return
layerSize
;
}
// i is the index of input layers
// i is the index of input layers
#define BACKWARD_INPUT(i, inputs, outputs) \
#define BACKWARD_INPUT(i, inputs, outputs) \
backward_[2 * i]->calc(inputs, outputs)
backward_[2 * i]->calc(inputs, outputs)
...
@@ -155,11 +190,7 @@ void ExpandConvLayer::forward(PassType passType) {
...
@@ -155,11 +190,7 @@ void ExpandConvLayer::forward(PassType passType) {
/* add the bias-vector */
/* add the bias-vector */
if
(
biases_
.
get
())
{
if
(
biases_
.
get
())
{
if
(
sharedBiases_
)
{
output_
.
value
->
addBias
(
*
biases_
->
getW
(),
1.0
,
sharedBiases_
);
addSharedBias
();
}
else
{
addUnsharedBias
();
}
}
}
/* activation */
/* activation */
...
@@ -171,7 +202,7 @@ void ExpandConvLayer::backward(const UpdateCallback &callback) {
...
@@ -171,7 +202,7 @@ void ExpandConvLayer::backward(const UpdateCallback &callback) {
MatrixPtr
outGrad
=
getOutputGrad
();
MatrixPtr
outGrad
=
getOutputGrad
();
if
(
biases_
&&
biases_
->
getWGrad
())
{
if
(
biases_
&&
biases_
->
getWGrad
())
{
b
propBiases
(
outGrad
);
b
iases_
->
getWGrad
()
->
collectBias
(
*
getOutputGrad
(),
1
,
sharedBiases_
);
/* Increasing the number of gradient */
/* Increasing the number of gradient */
biases_
->
getParameterPtr
()
->
incUpdate
(
callback
);
biases_
->
getParameterPtr
()
->
incUpdate
(
callback
);
}
}
...
...
paddle/gserver/layers/ExpandConvLayer.h
浏览文件 @
384368f4
...
@@ -15,7 +15,7 @@ limitations under the License. */
...
@@ -15,7 +15,7 @@ limitations under the License. */
#pragma once
#pragma once
#include <vector>
#include <vector>
#include "
Expand
ConvBaseLayer.h"
#include "ConvBaseLayer.h"
#include "paddle/math/Matrix.h"
#include "paddle/math/Matrix.h"
namespace
paddle
{
namespace
paddle
{
...
@@ -28,10 +28,9 @@ namespace paddle {
...
@@ -28,10 +28,9 @@ namespace paddle {
* The config file api is img_conv_layer.
* The config file api is img_conv_layer.
*/
*/
class
ExpandConvLayer
:
public
Expand
ConvBaseLayer
{
class
ExpandConvLayer
:
public
ConvBaseLayer
{
public:
public:
explicit
ExpandConvLayer
(
const
LayerConfig
&
config
)
explicit
ExpandConvLayer
(
const
LayerConfig
&
config
)
:
ConvBaseLayer
(
config
)
{}
:
ExpandConvBaseLayer
(
config
)
{}
~
ExpandConvLayer
()
{}
~
ExpandConvLayer
()
{}
...
@@ -41,6 +40,8 @@ public:
...
@@ -41,6 +40,8 @@ public:
void
forward
(
PassType
passType
)
override
;
void
forward
(
PassType
passType
)
override
;
void
backward
(
const
UpdateCallback
&
callback
)
override
;
void
backward
(
const
UpdateCallback
&
callback
)
override
;
size_t
getOutputSize
();
protected:
protected:
std
::
vector
<
TensorShape
>
inputShape_
;
std
::
vector
<
TensorShape
>
inputShape_
;
std
::
vector
<
TensorShape
>
filterShape_
;
std
::
vector
<
TensorShape
>
filterShape_
;
...
...
paddle/gserver/layers/MKLDNNConvLayer.cpp
浏览文件 @
384368f4
...
@@ -285,10 +285,9 @@ void MKLDNNConvLayer::resetWgtBiasValue(
...
@@ -285,10 +285,9 @@ void MKLDNNConvLayer::resetWgtBiasValue(
wgt
=
MKLDNNMatrix
::
create
(
weight_
->
getW
(),
pd
->
weights_primitive_desc
());
wgt
=
MKLDNNMatrix
::
create
(
weight_
->
getW
(),
pd
->
weights_primitive_desc
());
VLOG
(
MKLDNN_FMTS
)
<<
"Weight value format: "
<<
wgt
->
getFormat
();
VLOG
(
MKLDNN_FMTS
)
<<
"Weight value format: "
<<
wgt
->
getFormat
();
bias
=
nullptr
;
bias
=
(
biases_
&&
biases_
->
getW
())
if
(
biases_
&&
biases_
->
getW
())
{
?
MKLDNNMatrix
::
create
(
biases_
->
getW
(),
pd
->
bias_primitive_desc
())
bias
=
MKLDNNMatrix
::
create
(
biases_
->
getW
(),
pd
->
bias_primitive_desc
());
:
nullptr
;
}
}
}
void
MKLDNNConvLayer
::
resetOutValue
(
void
MKLDNNConvLayer
::
resetOutValue
(
...
@@ -356,6 +355,7 @@ void MKLDNNConvLayer::resetBwdWgtPD(
...
@@ -356,6 +355,7 @@ void MKLDNNConvLayer::resetBwdWgtPD(
void
MKLDNNConvLayer
::
resetBwdDataPD
(
void
MKLDNNConvLayer
::
resetBwdDataPD
(
std
::
shared_ptr
<
conv_bwdData
::
primitive_desc
>&
pd
)
{
std
::
shared_ptr
<
conv_bwdData
::
primitive_desc
>&
pd
)
{
pd
=
nullptr
;
if
(
inputLayers_
[
0
]
->
getOutput
().
grad
==
nullptr
)
{
if
(
inputLayers_
[
0
]
->
getOutput
().
grad
==
nullptr
)
{
return
;
return
;
}
}
...
@@ -476,6 +476,7 @@ void MKLDNNConvLayer::resetWgtBiasGrad(
...
@@ -476,6 +476,7 @@ void MKLDNNConvLayer::resetWgtBiasGrad(
<<
"primitive desc of weight grad and value should be equal"
;
<<
"primitive desc of weight grad and value should be equal"
;
VLOG
(
MKLDNN_FMTS
)
<<
"weight grad format: "
<<
wgt
->
getFormat
();
VLOG
(
MKLDNN_FMTS
)
<<
"weight grad format: "
<<
wgt
->
getFormat
();
bias
=
nullptr
;
if
(
biasVal_
==
nullptr
)
{
if
(
biasVal_
==
nullptr
)
{
return
;
return
;
}
}
...
...
paddle/gserver/layers/MKLDNNFcLayer.cpp
浏览文件 @
384368f4
...
@@ -17,9 +17,6 @@ limitations under the License. */
...
@@ -17,9 +17,6 @@ limitations under the License. */
using
namespace
mkldnn
;
// NOLINT
using
namespace
mkldnn
;
// NOLINT
typedef
memory
::
format
format
;
typedef
memory
::
format
format
;
typedef
inner_product_forward
fc_fwd
;
typedef
inner_product_backward_weights
fc_bwdWgt
;
typedef
inner_product_backward_data
fc_bwdData
;
namespace
paddle
{
namespace
paddle
{
...
@@ -93,35 +90,88 @@ void MKLDNNFcLayer::reshape(
...
@@ -93,35 +90,88 @@ void MKLDNNFcLayer::reshape(
printSizeInfo
();
printSizeInfo
();
}
}
void
MKLDNNFcLayer
::
resetFwd
(
std
::
vector
<
mkldnn
::
primitive
>&
pipeline
,
void
MKLDNNFcLayer
::
resetFwd
(
std
::
vector
<
primitive
>&
pipeline
,
MKLDNNMatrixPtr
&
in
,
MKLDNNMatrixPtr
&
in
,
MKLDNNMatrixPtr
&
wgt
,
MKLDNNMatrixPtr
&
wgt
,
MKLDNNMatrixPtr
&
bias
,
MKLDNNMatrixPtr
&
bias
,
MKLDNNMatrixPtr
&
out
)
{
MKLDNNMatrixPtr
&
out
)
{
pipeline
.
clear
();
resetFwdBuffers
(
in
,
wgt
,
bias
,
out
);
bool
hasBias
=
biases_
&&
biases_
->
getW
();
const
MatrixPtr
&
wgtVal
=
weight_
->
getW
();
resetFwdPD
(
fwdPD_
,
in
,
wgt
,
bias
,
out
);
const
MatrixPtr
&
biasVal
=
hasBias
?
biases_
->
getW
()
:
nullptr
;
const
MatrixPtr
&
outVal
=
output_
.
value
;
resetFwdPipeline
(
pipeline
,
fwdPD_
,
in
,
wgt
,
bias
,
out
);
printValueFormatFlow
();
}
void
MKLDNNFcLayer
::
resetBwd
(
std
::
vector
<
primitive
>&
pipeline
,
MKLDNNMatrixPtr
&
in
,
MKLDNNMatrixPtr
&
wgt
,
MKLDNNMatrixPtr
&
bias
,
MKLDNNMatrixPtr
&
out
)
{
std
::
shared_ptr
<
fc_bwdWgt
::
primitive_desc
>
bwdWgtPD
;
std
::
shared_ptr
<
fc_bwdData
::
primitive_desc
>
bwdDataPD
;
resetBwdBuffers
(
in
,
wgt
,
bias
,
out
);
resetBwdWgtPD
(
bwdWgtPD
,
wgt
,
bias
,
out
);
resetBwdDataPD
(
bwdDataPD
,
in
,
out
);
resetBwdPipeline
(
pipeline
,
bwdWgtPD
,
bwdDataPD
,
in
,
wgt
,
bias
,
out
);
printGradFormatFlow
();
}
void
MKLDNNFcLayer
::
updateInputData
()
{
inVal_
->
setData
(
getInputValue
(
0
,
CPU_DEVICE
)
->
getData
());
}
void
MKLDNNFcLayer
::
updateWeights
(
const
UpdateCallback
&
callback
)
{
weight_
->
getParameterPtr
()
->
incUpdate
(
callback
);
if
(
biases_
&&
biases_
->
getWGrad
())
{
biases_
->
getParameterPtr
()
->
incUpdate
(
callback
);
}
}
void
MKLDNNFcLayer
::
resetFwdBuffers
(
MKLDNNMatrixPtr
&
in
,
MKLDNNMatrixPtr
&
wgt
,
MKLDNNMatrixPtr
&
bias
,
MKLDNNMatrixPtr
&
out
)
{
resetInValue
(
in
);
resetWgtBiasValue
(
wgt
,
bias
);
resetOutValue
(
out
);
}
void
MKLDNNFcLayer
::
resetInValue
(
MKLDNNMatrixPtr
&
in
)
{
if
(
inputIsOnlyMKLDNN
())
{
if
(
inputIsOnlyMKLDNN
())
{
const
MatrixPtr
&
inVal
=
getInputValue
(
0
);
const
MatrixPtr
&
dnnIn
=
getInputValue
(
0
);
in
=
std
::
dynamic_pointer_cast
<
MKLDNNMatrix
>
(
inVal
);
in
=
std
::
dynamic_pointer_cast
<
MKLDNNMatrix
>
(
dnnIn
);
CHECK
(
in
)
<<
"Input should be MKLDNNMatrix"
;
CHECK
(
in
)
<<
"Input should be MKLDNNMatrix"
;
}
else
{
}
else
{
CHECK_EQ
(
getPrev
(
0
)
->
getDeviceId
(),
CPU_DEVICE
)
<<
"Only support CPU yet"
;
CHECK_EQ
(
getPrev
(
0
)
->
getDeviceId
(),
CPU_DEVICE
)
<<
"Only support CPU yet"
;
const
MatrixPtr
&
inVal
=
getInputValue
(
0
,
CPU_DEVICE
);
const
MatrixPtr
&
cpuIn
=
getInputValue
(
0
,
CPU_DEVICE
);
in
=
MKLDNNMatrix
::
create
(
in
=
MKLDNNMatrix
::
create
(
inVal
,
memory
::
dims
{
bs_
,
ic_
,
ih_
,
iw_
},
format
::
nchw
,
engine_
);
cpuIn
,
{
bs_
,
ic_
,
ih_
,
iw_
},
format
::
nchw
,
engine_
);
}
}
in
->
downSpatial
();
in
->
downSpatial
();
}
void
MKLDNNFcLayer
::
resetWgtBiasValue
(
MKLDNNMatrixPtr
&
wgt
,
MKLDNNMatrixPtr
&
bias
)
{
wgt
=
MKLDNNMatrix
::
create
(
wgt
=
MKLDNNMatrix
::
create
(
w
gtVal
,
memory
::
dims
{
oc_
,
ic_
,
ih_
,
iw_
},
format
::
oihw
,
engine_
);
w
eight_
->
getW
(),
{
oc_
,
ic_
,
ih_
,
iw_
},
format
::
oihw
,
engine_
);
wgt
->
downSpatial
();
wgt
->
downSpatial
();
bias
=
hasBias
?
MKLDNNMatrix
::
create
(
biasVal
,
{
oc_
},
format
::
x
,
engine_
)
bias
=
(
biases_
&&
biases_
->
getW
())
?
MKLDNNMatrix
::
create
(
biases_
->
getW
(),
{
oc_
},
format
::
x
,
engine_
)
:
nullptr
;
:
nullptr
;
out
=
MKLDNNMatrix
::
create
(
outVal
,
{
bs_
,
oc_
},
format
::
nc
,
engine_
);
}
void
MKLDNNFcLayer
::
resetOutValue
(
MKLDNNMatrixPtr
&
out
)
{
out
=
MKLDNNMatrix
::
create
(
output_
.
value
,
{
bs_
,
oc_
},
format
::
nc
,
engine_
);
// change original output value to mkldnn output value
// change original output value to mkldnn output value
output_
.
value
=
std
::
dynamic_pointer_cast
<
Matrix
>
(
out
);
output_
.
value
=
std
::
dynamic_pointer_cast
<
Matrix
>
(
out
);
if
(
!
outputIsOnlyMKLDNN
())
{
if
(
!
outputIsOnlyMKLDNN
())
{
...
@@ -129,10 +179,18 @@ void MKLDNNFcLayer::resetFwd(std::vector<mkldnn::primitive>& pipeline,
...
@@ -129,10 +179,18 @@ void MKLDNNFcLayer::resetFwd(std::vector<mkldnn::primitive>& pipeline,
// just share point
// just share point
getOutput
(
CPU_DEVICE
).
value
->
setData
(
output_
.
value
->
getData
());
getOutput
(
CPU_DEVICE
).
value
->
setData
(
output_
.
value
->
getData
());
}
}
}
// create forward handle
void
MKLDNNFcLayer
::
resetFwdPD
(
std
::
shared_ptr
<
fc_fwd
::
primitive_desc
>&
pd
,
MKLDNNMatrixPtr
in
,
MKLDNNMatrixPtr
wgt
,
MKLDNNMatrixPtr
bias
,
MKLDNNMatrixPtr
out
)
{
CHECK
(
in
);
CHECK
(
wgt
);
CHECK
(
out
);
prop_kind
pk
=
prop_kind
::
forward
;
prop_kind
pk
=
prop_kind
::
forward
;
fc_fwd
::
desc
fwdDesc
=
hasBias
?
fc_fwd
::
desc
(
pk
,
fc_fwd
::
desc
fwdDesc
=
bias
!=
nullptr
?
fc_fwd
::
desc
(
pk
,
in
->
getMemoryDesc
(),
in
->
getMemoryDesc
(),
wgt
->
getMemoryDesc
(),
wgt
->
getMemoryDesc
(),
bias
->
getMemoryDesc
(),
bias
->
getMemoryDesc
(),
...
@@ -141,34 +199,39 @@ void MKLDNNFcLayer::resetFwd(std::vector<mkldnn::primitive>& pipeline,
...
@@ -141,34 +199,39 @@ void MKLDNNFcLayer::resetFwd(std::vector<mkldnn::primitive>& pipeline,
in
->
getMemoryDesc
(),
in
->
getMemoryDesc
(),
wgt
->
getMemoryDesc
(),
wgt
->
getMemoryDesc
(),
out
->
getMemoryDesc
());
out
->
getMemoryDesc
());
fc_fwd
::
primitive_desc
fwdPD
=
fc_fwd
::
primitive_desc
(
fwdDesc
,
engine_
);
pd
.
reset
(
new
fc_fwd
::
primitive_desc
(
fwdDesc
,
engine_
));
if
(
hasBias
)
{
}
fwd_
.
reset
(
new
fc_fwd
(
fwdPD
,
*
in
,
*
wgt
,
*
bias
,
*
out
));
void
MKLDNNFcLayer
::
resetFwdPipeline
(
std
::
vector
<
primitive
>&
pipeline
,
std
::
shared_ptr
<
fc_fwd
::
primitive_desc
>&
pd
,
MKLDNNMatrixPtr
&
in
,
MKLDNNMatrixPtr
&
wgt
,
MKLDNNMatrixPtr
&
bias
,
MKLDNNMatrixPtr
&
out
)
{
pipeline
.
clear
();
if
(
bias
)
{
fwd_
.
reset
(
new
fc_fwd
(
*
pd
,
*
in
,
*
wgt
,
*
bias
,
*
out
));
}
else
{
}
else
{
fwd_
.
reset
(
new
fc_fwd
(
fwdPD
,
*
in
,
*
wgt
,
*
out
));
fwd_
.
reset
(
new
fc_fwd
(
*
pd
,
*
in
,
*
wgt
,
*
out
));
}
}
printValueFormatFlow
();
pipeline
.
push_back
(
*
fwd_
);
pipeline
.
push_back
(
*
fwd_
);
}
}
void
MKLDNNFcLayer
::
resetBwd
(
std
::
vector
<
mkldnn
::
primitive
>&
pipeline
,
void
MKLDNNFcLayer
::
resetBwdBuffers
(
MKLDNNMatrixPtr
&
in
,
MKLDNNMatrixPtr
&
in
,
MKLDNNMatrixPtr
&
wgt
,
MKLDNNMatrixPtr
&
wgt
,
MKLDNNMatrixPtr
&
bias
,
MKLDNNMatrixPtr
&
bias
,
MKLDNNMatrixPtr
&
out
)
{
MKLDNNMatrixPtr
&
out
)
{
pipeline
.
clear
();
resetOutGrad
(
out
);
if
(
!
needResetBwd_
)
{
return
;
}
needResetBwd_
=
false
;
bool
hasBias
=
biases_
&&
biases_
->
getWGrad
();
/// backward weight
resetWgtBiasGrad
(
wgt
,
bias
);
CHECK
(
inVal_
)
<<
"Should have input value"
;
const
MatrixPtr
&
wgtGrad
=
weight_
->
getWGrad
();
const
MatrixPtr
&
biasGrad
=
hasBias
?
biases_
->
getWGrad
()
:
nullptr
;
resetInGrad
(
in
);
}
void
MKLDNNFcLayer
::
resetOutGrad
(
MKLDNNMatrixPtr
&
out
)
{
// TODO(TJ): merge outgrad
// TODO(TJ): merge outgrad
int
device
=
outputIsOnlyMKLDNN
()
?
MKLDNN_DEVICE
:
CPU_DEVICE
;
int
device
=
outputIsOnlyMKLDNN
()
?
MKLDNN_DEVICE
:
CPU_DEVICE
;
// for MKLDNN device:
// for MKLDNN device:
...
@@ -178,66 +241,88 @@ void MKLDNNFcLayer::resetBwd(std::vector<mkldnn::primitive>& pipeline,
...
@@ -178,66 +241,88 @@ void MKLDNNFcLayer::resetBwd(std::vector<mkldnn::primitive>& pipeline,
// for CPU device:
// for CPU device:
// fc do not need to convert from cpu device since output is always nc format
// fc do not need to convert from cpu device since output is always nc format
// only need create from cpu device
// only need create from cpu device
const
MatrixPtr
&
outGrad
=
getOutput
(
device
).
grad
;
CHECK
(
outVal_
);
out
=
MKLDNNMatrix
::
create
(
outGrad
,
outVal_
->
getPrimitiveDesc
());
out
=
wgt
=
MKLDNNMatrix
::
create
(
wgtGrad
,
wgtVal_
->
getPrimitiveDesc
());
MKLDNNMatrix
::
create
(
getOutput
(
device
).
grad
,
outVal_
->
getPrimitiveDesc
());
bias
=
hasBias
?
MKLDNNMatrix
::
create
(
biasGrad
,
biasVal_
->
getPrimitiveDesc
())
}
:
nullptr
;
// create memory primitive desc
void
MKLDNNFcLayer
::
resetWgtBiasGrad
(
MKLDNNMatrixPtr
&
wgt
,
fc_fwd
::
desc
fwdDesc
=
fc_fwd
::
desc
(
prop_kind
::
forward
,
MKLDNNMatrixPtr
&
bias
)
{
inVal_
->
getMemoryDesc
(),
CHECK
(
wgtVal_
);
wgt
->
getMemoryDesc
(),
wgt
=
MKLDNNMatrix
::
create
(
weight_
->
getWGrad
(),
wgtVal_
->
getPrimitiveDesc
());
out
->
getMemoryDesc
());
fc_fwd
::
primitive_desc
fwdPD
=
fc_fwd
::
primitive_desc
(
fwdDesc
,
engine_
);
bias
=
nullptr
;
fc_bwdWgt
::
desc
bwdWgtDesc
=
hasBias
if
(
biasVal_
==
nullptr
)
{
?
fc_bwdWgt
::
desc
(
inVal_
->
getMemoryDesc
(),
return
;
}
bias
=
MKLDNNMatrix
::
create
(
biases_
->
getWGrad
(),
biasVal_
->
getPrimitiveDesc
());
}
void
MKLDNNFcLayer
::
resetInGrad
(
MKLDNNMatrixPtr
&
in
)
{
in
=
nullptr
;
const
MatrixPtr
&
inGrad
=
inputLayers_
[
0
]
->
getOutput
().
grad
;
if
(
inGrad
==
nullptr
)
{
return
;
}
// TODO(TJ): use outputMaps_ ways to get the inGrad_ when merge outgrad done
CHECK
(
inVal_
);
in
=
MKLDNNMatrix
::
create
(
inGrad
,
inVal_
->
getPrimitiveDesc
());
}
void
MKLDNNFcLayer
::
resetBwdWgtPD
(
std
::
shared_ptr
<
fc_bwdWgt
::
primitive_desc
>&
pd
,
MKLDNNMatrixPtr
&
wgt
,
MKLDNNMatrixPtr
&
bias
,
MKLDNNMatrixPtr
&
out
)
{
CHECK
(
inVal_
);
fc_bwdWgt
::
desc
bwdWgtDesc
=
bias
?
fc_bwdWgt
::
desc
(
inVal_
->
getMemoryDesc
(),
wgt
->
getMemoryDesc
(),
wgt
->
getMemoryDesc
(),
bias
->
getMemoryDesc
(),
bias
->
getMemoryDesc
(),
out
->
getMemoryDesc
())
out
->
getMemoryDesc
())
:
fc_bwdWgt
::
desc
(
inVal_
->
getMemoryDesc
(),
:
fc_bwdWgt
::
desc
(
inVal_
->
getMemoryDesc
(),
wgt
->
getMemoryDesc
(),
wgt
->
getMemoryDesc
(),
out
->
getMemoryDesc
());
out
->
getMemoryDesc
());
fc_bwdWgt
::
primitive_desc
bwdWgtPD
=
pd
.
reset
(
new
fc_bwdWgt
::
primitive_desc
(
bwdWgtDesc
,
engine_
,
*
fwdPD_
));
fc_bwdWgt
::
primitive_desc
(
bwdWgtDesc
,
engine_
,
fwdPD
);
}
void
MKLDNNFcLayer
::
resetBwdDataPD
(
std
::
shared_ptr
<
fc_bwdData
::
primitive_desc
>&
pd
,
MKLDNNMatrixPtr
&
in
,
MKLDNNMatrixPtr
&
out
)
{
pd
=
nullptr
;
if
(
in
==
nullptr
)
{
return
;
}
CHECK
(
wgtVal_
);
fc_bwdData
::
desc
bwdDataDesc
=
fc_bwdData
::
desc
(
in
->
getMemoryDesc
(),
wgtVal_
->
getMemoryDesc
(),
out
->
getMemoryDesc
());
pd
.
reset
(
new
fc_bwdData
::
primitive_desc
(
bwdDataDesc
,
engine_
,
*
fwdPD_
));
}
if
(
hasBias
)
{
void
MKLDNNFcLayer
::
resetBwdPipeline
(
bwdWgt_
.
reset
(
new
fc_bwdWgt
(
bwdWgtPD
,
*
inVal_
,
*
out
,
*
wgt
,
*
bias
));
std
::
vector
<
primitive
>&
pipeline
,
std
::
shared_ptr
<
fc_bwdWgt
::
primitive_desc
>&
bwdWgtPD
,
std
::
shared_ptr
<
fc_bwdData
::
primitive_desc
>&
bwdDataPD
,
MKLDNNMatrixPtr
&
in
,
MKLDNNMatrixPtr
&
wgt
,
MKLDNNMatrixPtr
&
bias
,
MKLDNNMatrixPtr
&
out
)
{
pipeline
.
clear
();
CHECK
(
inVal_
);
if
(
bias
)
{
bwdWgt_
.
reset
(
new
fc_bwdWgt
(
*
bwdWgtPD
,
*
inVal_
,
*
out
,
*
wgt
,
*
bias
));
}
else
{
}
else
{
bwdWgt_
.
reset
(
new
fc_bwdWgt
(
bwdWgtPD
,
*
inVal_
,
*
out
,
*
wgt
));
bwdWgt_
.
reset
(
new
fc_bwdWgt
(
*
bwdWgtPD
,
*
inVal_
,
*
out
,
*
wgt
));
}
}
pipeline
.
push_back
(
*
bwdWgt_
);
pipeline
.
push_back
(
*
bwdWgt_
);
/// backward data
if
(
bwdDataPD
==
nullptr
)
{
const
MatrixPtr
&
inGrad
=
inputLayers_
[
0
]
->
getOutput
().
grad
;
if
(
inGrad
==
nullptr
)
{
return
;
return
;
}
}
if
(
getInput
(
0
,
MKLDNN_DEVICE
).
getAllCount
()
>
1
)
{
// TODO(TJ): use outputMaps_ ways to get the inGrad_ when merge outgrad done
}
else
{
in
=
MKLDNNMatrix
::
create
(
inGrad
,
inVal_
->
getPrimitiveDesc
());
}
fc_bwdData
::
desc
bwdDataDesc
=
fc_bwdData
::
desc
(
inVal_
->
getMemoryDesc
(),
wgt
->
getMemoryDesc
(),
out
->
getMemoryDesc
());
fc_bwdData
::
primitive_desc
bwdDataPD
=
fc_bwdData
::
primitive_desc
(
bwdDataDesc
,
engine_
,
fwdPD
);
CHECK
(
wgtVal_
)
<<
"Should have weight memory"
;
CHECK
(
wgtVal_
)
<<
"Should have weight memory"
;
bwdData_
.
reset
(
new
fc_bwdData
(
bwdDataPD
,
*
out
,
*
wgtVal_
,
*
in
));
bwdData_
.
reset
(
new
fc_bwdData
(
*
bwdDataPD
,
*
out
,
*
wgtVal_
,
*
in
));
printGradFormatFlow
();
pipeline
.
push_back
(
*
bwdData_
);
pipeline
.
push_back
(
*
bwdData_
);
}
}
void
MKLDNNFcLayer
::
updateInputData
()
{
inVal_
->
setData
(
getInputValue
(
0
,
CPU_DEVICE
)
->
getData
());
}
void
MKLDNNFcLayer
::
updateWeights
(
const
UpdateCallback
&
callback
)
{
weight_
->
getParameterPtr
()
->
incUpdate
(
callback
);
if
(
biases_
&&
biases_
->
getWGrad
())
{
biases_
->
getParameterPtr
()
->
incUpdate
(
callback
);
}
}
}
// namespace paddle
}
// namespace paddle
paddle/gserver/layers/MKLDNNFcLayer.h
浏览文件 @
384368f4
...
@@ -18,6 +18,9 @@ limitations under the License. */
...
@@ -18,6 +18,9 @@ limitations under the License. */
#include "mkldnn.hpp"
#include "mkldnn.hpp"
namespace
paddle
{
namespace
paddle
{
typedef
mkldnn
::
inner_product_forward
fc_fwd
;
typedef
mkldnn
::
inner_product_backward_weights
fc_bwdWgt
;
typedef
mkldnn
::
inner_product_backward_data
fc_bwdData
;
/**
/**
* @brief A subclass of MKLDNNLayer fc layer.
* @brief A subclass of MKLDNNLayer fc layer.
...
@@ -32,6 +35,9 @@ protected:
...
@@ -32,6 +35,9 @@ protected:
// if has already init the weight
// if has already init the weight
bool
hasInitedWgt_
;
bool
hasInitedWgt_
;
// save forward primitive_desc, which can be used backward
std
::
shared_ptr
<
fc_fwd
::
primitive_desc
>
fwdPD_
;
// fc weight and bias
// fc weight and bias
std
::
unique_ptr
<
Weight
>
weight_
;
std
::
unique_ptr
<
Weight
>
weight_
;
std
::
unique_ptr
<
Weight
>
biases_
;
std
::
unique_ptr
<
Weight
>
biases_
;
...
@@ -67,6 +73,59 @@ public:
...
@@ -67,6 +73,59 @@ public:
void
convertWeightsFromPaddle
()
override
;
void
convertWeightsFromPaddle
()
override
;
void
convertWeightsToPaddle
()
override
;
void
convertWeightsToPaddle
()
override
;
protected:
/**
* Forward functions: reset buffers(input, output, weight and bias),
* reset primitive descriptor,
* reset pipeline.
*/
void
resetFwdBuffers
(
MKLDNNMatrixPtr
&
in
,
MKLDNNMatrixPtr
&
wgt
,
MKLDNNMatrixPtr
&
bias
,
MKLDNNMatrixPtr
&
out
);
void
resetInValue
(
MKLDNNMatrixPtr
&
in
);
void
resetWgtBiasValue
(
MKLDNNMatrixPtr
&
wgt
,
MKLDNNMatrixPtr
&
bias
);
void
resetOutValue
(
MKLDNNMatrixPtr
&
out
);
void
resetFwdPD
(
std
::
shared_ptr
<
fc_fwd
::
primitive_desc
>&
pd
,
MKLDNNMatrixPtr
in
,
MKLDNNMatrixPtr
wgt
,
MKLDNNMatrixPtr
bias
,
MKLDNNMatrixPtr
out
);
void
resetFwdPipeline
(
std
::
vector
<
mkldnn
::
primitive
>&
pipeline
,
std
::
shared_ptr
<
fc_fwd
::
primitive_desc
>&
pd
,
MKLDNNMatrixPtr
&
in
,
MKLDNNMatrixPtr
&
wgt
,
MKLDNNMatrixPtr
&
bias
,
MKLDNNMatrixPtr
&
out
);
/**
* Backward functions: reset buffers(input, output, weight and bias),
* reset primitive descriptor for backward weight,
* reset primitive descriptor for backward data,
* reset pipeline.
*/
void
resetBwdBuffers
(
MKLDNNMatrixPtr
&
in
,
MKLDNNMatrixPtr
&
wgt
,
MKLDNNMatrixPtr
&
bias
,
MKLDNNMatrixPtr
&
out
);
void
resetOutGrad
(
MKLDNNMatrixPtr
&
out
);
void
resetWgtBiasGrad
(
MKLDNNMatrixPtr
&
wgt
,
MKLDNNMatrixPtr
&
bias
);
void
resetInGrad
(
MKLDNNMatrixPtr
&
in
);
void
resetBwdWgtPD
(
std
::
shared_ptr
<
fc_bwdWgt
::
primitive_desc
>&
pd
,
MKLDNNMatrixPtr
&
wgt
,
MKLDNNMatrixPtr
&
bias
,
MKLDNNMatrixPtr
&
out
);
void
resetBwdDataPD
(
std
::
shared_ptr
<
fc_bwdData
::
primitive_desc
>&
pd
,
MKLDNNMatrixPtr
&
in
,
MKLDNNMatrixPtr
&
out
);
void
resetBwdPipeline
(
std
::
vector
<
mkldnn
::
primitive
>&
pipeline
,
std
::
shared_ptr
<
fc_bwdWgt
::
primitive_desc
>&
bwdWgtPD
,
std
::
shared_ptr
<
fc_bwdData
::
primitive_desc
>&
bwdDataPD
,
MKLDNNMatrixPtr
&
in
,
MKLDNNMatrixPtr
&
wgt
,
MKLDNNMatrixPtr
&
bias
,
MKLDNNMatrixPtr
&
out
);
};
};
}
// namespace paddle
}
// namespace paddle
paddle/math/MKLDNNMatrix.h
浏览文件 @
384368f4
...
@@ -66,11 +66,12 @@ public:
...
@@ -66,11 +66,12 @@ public:
/**
/**
* Create reorder primitive.
* Create reorder primitive.
* Create a mkldnn::reorder handle for converting src MKLDNNMatrix to dst.
* Create a mkldnn::reorder handle for converting src MKLDNNMatrix to dst.
* checkData: for whether to check the data handle of src and dst is the same.
* checkData: whether to check the data handle of src and dst.
* if true, means check it and do not want support inplace reorder;
* if true, it will check the data and do not allow them equal;
* otherwise do not check data which means the created reorder
* otherwise, it will not check them, then the reorder created
* maybe inplace buffer and do not guarantee the logical is correct
* may have inplace buffer.
* since not all format or conversion support inplace.
* Do not set false, if you can not guarantee the inplace logical
* would work with your reorder.
*/
*/
static
std
::
shared_ptr
<
mkldnn
::
reorder
>
createReorder
(
static
std
::
shared_ptr
<
mkldnn
::
reorder
>
createReorder
(
const
MKLDNNMatrixPtr
&
src
,
const
MKLDNNMatrixPtr
&
src
,
...
...
paddle/operators/accuracy_op.cc
浏览文件 @
384368f4
...
@@ -23,10 +23,15 @@ class AccuracyOp : public framework::OperatorWithKernel {
...
@@ -23,10 +23,15 @@ class AccuracyOp : public framework::OperatorWithKernel {
protected:
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"Inference"
),
PADDLE_ENFORCE_NOT_NULL
(
"Input of Inference must be initialized."
);
ctx
.
InputVar
(
"Inference"
),
"Input(Inference) of AccuracyOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"Label"
),
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"Label"
),
"Input of Inference must be initialized."
);
"Input(Label) of AccuracyOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"Accuracy"
),
"Output(Accuracy) of AccuracyOp should not be null."
);
auto
*
inference
=
ctx
.
Input
<
framework
::
Tensor
>
(
"Inference"
);
auto
*
inference
=
ctx
.
Input
<
framework
::
Tensor
>
(
"Inference"
);
auto
*
label
=
ctx
.
Input
<
framework
::
Tensor
>
(
"Label"
);
auto
*
label
=
ctx
.
Input
<
framework
::
Tensor
>
(
"Label"
);
...
...
paddle/operators/add_op.cc
浏览文件 @
384368f4
...
@@ -23,6 +23,13 @@ class AddOp : public framework::OperatorWithKernel {
...
@@ -23,6 +23,13 @@ class AddOp : public framework::OperatorWithKernel {
protected:
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"X"
),
"Input(X) of AddOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"Y"
),
"Input(Y) of AddOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"Out"
),
"Output(Out) of AddOp should not be null."
);
PADDLE_ENFORCE_EQ
(
ctx
.
Input
<
Tensor
>
(
"X"
)
->
dims
(),
PADDLE_ENFORCE_EQ
(
ctx
.
Input
<
Tensor
>
(
"X"
)
->
dims
(),
ctx
.
Input
<
Tensor
>
(
"Y"
)
->
dims
(),
ctx
.
Input
<
Tensor
>
(
"Y"
)
->
dims
(),
"Two input of Add Op's dimension must be same."
);
"Two input of Add Op's dimension must be same."
);
...
...
paddle/operators/concat_op.cc
浏览文件 @
384368f4
...
@@ -25,6 +25,9 @@ class ConcatOp : public framework::OperatorWithKernel {
...
@@ -25,6 +25,9 @@ class ConcatOp : public framework::OperatorWithKernel {
protected:
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"Out"
),
"Output(Out) of ConcatOp should not be null."
);
auto
ins
=
ctx
.
MultiInput
<
framework
::
Tensor
>
(
"X"
);
auto
ins
=
ctx
.
MultiInput
<
framework
::
Tensor
>
(
"X"
);
auto
*
out
=
ctx
.
Output
<
framework
::
LoDTensor
>
(
"Out"
);
auto
*
out
=
ctx
.
Output
<
framework
::
LoDTensor
>
(
"Out"
);
size_t
axis
=
static_cast
<
size_t
>
(
ctx
.
Attr
<
int
>
(
"axis"
));
size_t
axis
=
static_cast
<
size_t
>
(
ctx
.
Attr
<
int
>
(
"axis"
));
...
...
paddle/operators/cond_op.cc
浏览文件 @
384368f4
...
@@ -33,7 +33,8 @@ using DDim = framework::DDim;
...
@@ -33,7 +33,8 @@ using DDim = framework::DDim;
void
CondOp
::
CreateScope
(
const
Scope
&
scope
)
const
{
void
CondOp
::
CreateScope
(
const
Scope
&
scope
)
const
{
auto
sub_scopes_var
=
scope
.
FindVar
(
"SubScopes"
);
auto
sub_scopes_var
=
scope
.
FindVar
(
"SubScopes"
);
PADDLE_ENFORCE
(
sub_scopes_var
!=
nullptr
,
""
);
PADDLE_ENFORCE_NOT_NULL
(
sub_scopes_var
,
"Output(SubScopes) of CondOp should not be null."
);
auto
sub_scopes
=
sub_scopes_var
->
GetMutable
<
std
::
vector
<
Scope
*>>
();
auto
sub_scopes
=
sub_scopes_var
->
GetMutable
<
std
::
vector
<
Scope
*>>
();
auto
&
sub_scope
=
scope
.
NewScope
();
auto
&
sub_scope
=
scope
.
NewScope
();
sub_scopes
->
push_back
(
&
sub_scope
);
sub_scopes
->
push_back
(
&
sub_scope
);
...
@@ -41,7 +42,8 @@ void CondOp::CreateScope(const Scope& scope) const {
...
@@ -41,7 +42,8 @@ void CondOp::CreateScope(const Scope& scope) const {
void
CondOp
::
CreateIndexTensor
(
const
Scope
&
scope
)
const
{
void
CondOp
::
CreateIndexTensor
(
const
Scope
&
scope
)
const
{
auto
index_tensors_var
=
scope
.
FindVar
(
"IndexTensors"
);
auto
index_tensors_var
=
scope
.
FindVar
(
"IndexTensors"
);
PADDLE_ENFORCE
(
index_tensors_var
!=
nullptr
,
""
);
PADDLE_ENFORCE_NOT_NULL
(
index_tensors_var
,
"Output(IndexTensors) of CondOp should not be null."
);
auto
&
index_tensors
=
auto
&
index_tensors
=
*
index_tensors_var
->
GetMutable
<
std
::
vector
<
LoDTensor
>>
();
*
index_tensors_var
->
GetMutable
<
std
::
vector
<
LoDTensor
>>
();
index_tensors
.
push_back
(
LoDTensor
());
index_tensors
.
push_back
(
LoDTensor
());
...
@@ -49,7 +51,8 @@ void CondOp::CreateIndexTensor(const Scope& scope) const {
...
@@ -49,7 +51,8 @@ void CondOp::CreateIndexTensor(const Scope& scope) const {
void
CondOp
::
InferShape
(
const
Scope
&
scope
)
const
{
void
CondOp
::
InferShape
(
const
Scope
&
scope
)
const
{
auto
sub_scopes_var
=
scope
.
FindVar
(
"SubScopes"
);
auto
sub_scopes_var
=
scope
.
FindVar
(
"SubScopes"
);
PADDLE_ENFORCE_NOT_NULL
(
sub_scopes_var
);
PADDLE_ENFORCE_NOT_NULL
(
sub_scopes_var
,
"Output(SubScopes) of CondOp should not be null."
);
auto
&
sub_scopes
=
*
sub_scopes_var
->
GetMutable
<
std
::
vector
<
Scope
*>>
();
auto
&
sub_scopes
=
*
sub_scopes_var
->
GetMutable
<
std
::
vector
<
Scope
*>>
();
for
(
int
i
=
0
;
i
<
2
;
++
i
)
{
for
(
int
i
=
0
;
i
<
2
;
++
i
)
{
...
@@ -63,7 +66,8 @@ void CondOp::InferShape(const Scope& scope) const {
...
@@ -63,7 +66,8 @@ void CondOp::InferShape(const Scope& scope) const {
// branch
// branch
CreateIndexTensor
(
scope
);
CreateIndexTensor
(
scope
);
PADDLE_ENFORCE
(
!
Inputs
(
"Xs"
).
empty
(),
"Inputs can't be empty"
);
PADDLE_ENFORCE
(
!
Inputs
(
"Xs"
).
empty
(),
"Inputs(Xs) of CondOp can't be empty."
);
for
(
auto
&
input
:
Inputs
(
"Xs"
))
{
for
(
auto
&
input
:
Inputs
(
"Xs"
))
{
// Create a new tensor in sub-scope for input-type tensor
// Create a new tensor in sub-scope for input-type tensor
Variable
*
v
=
sub_scopes
[
i
]
->
NewVar
(
input
);
Variable
*
v
=
sub_scopes
[
i
]
->
NewVar
(
input
);
...
@@ -108,13 +112,18 @@ void CondOp::InferShape(const Scope& scope) const {
...
@@ -108,13 +112,18 @@ void CondOp::InferShape(const Scope& scope) const {
void
CondOp
::
Run
(
const
Scope
&
scope
,
void
CondOp
::
Run
(
const
Scope
&
scope
,
const
platform
::
DeviceContext
&
dev_ctx
)
const
{
const
platform
::
DeviceContext
&
dev_ctx
)
const
{
auto
*
sub_scopes_var
=
scope
.
FindVar
(
"SubScopes"
);
auto
*
sub_scopes_var
=
scope
.
FindVar
(
"SubScopes"
);
PADDLE_ENFORCE_NOT_NULL
(
sub_scopes_var
,
"Output(SubScopes) of CondOp should not be null."
);
auto
sub_scopes
=
sub_scopes_var
->
Get
<
std
::
vector
<
Scope
*>>
();
auto
sub_scopes
=
sub_scopes_var
->
Get
<
std
::
vector
<
Scope
*>>
();
auto
*
index_tensors_var
=
scope
.
FindVar
(
"IndexTensors"
);
auto
*
index_tensors_var
=
scope
.
FindVar
(
"IndexTensors"
);
PADDLE_ENFORCE_NOT_NULL
(
index_tensors_var
,
"Output(IndexTensors) of CondOp should not be null."
);
auto
index_tensors
=
index_tensors_var
->
Get
<
std
::
vector
<
LoDTensor
>>
();
auto
index_tensors
=
index_tensors_var
->
Get
<
std
::
vector
<
LoDTensor
>>
();
std
::
string
cond_name
=
Input
(
"Cond"
);
std
::
string
cond_name
=
Input
(
"Cond"
);
Variable
*
cond_var
=
scope
.
FindVar
(
cond_name
);
Variable
*
cond_var
=
scope
.
FindVar
(
cond_name
);
PADDLE_ENFORCE_NOT_NULL
(
cond_var
);
PADDLE_ENFORCE_NOT_NULL
(
cond_var
,
"Input(Cond) of CondOp should not be null."
);
const
LoDTensor
*
cond
=
cond_var
->
GetMutable
<
LoDTensor
>
();
const
LoDTensor
*
cond
=
cond_var
->
GetMutable
<
LoDTensor
>
();
// Step 1: get the true/false index at runtime
// Step 1: get the true/false index at runtime
...
@@ -171,6 +180,8 @@ void CondOp::Run(const Scope& scope,
...
@@ -171,6 +180,8 @@ void CondOp::Run(const Scope& scope,
}
}
// Step 4: merge output results
// Step 4: merge output results
PADDLE_ENFORCE
(
!
Outputs
(
"Outs"
).
empty
(),
"Outputs(Outs) of CondOp can't be empty."
);
for
(
int
i
=
0
;
i
<
2
;
++
i
)
{
for
(
int
i
=
0
;
i
<
2
;
++
i
)
{
// i= 0/i for True and False branches respectively
// i= 0/i for True and False branches respectively
for
(
auto
&
output
:
Outputs
(
"Outs"
))
{
for
(
auto
&
output
:
Outputs
(
"Outs"
))
{
...
...
paddle/operators/cos_sim_op.cc
浏览文件 @
384368f4
...
@@ -26,8 +26,16 @@ class CosSimOp : public framework::OperatorWithKernel {
...
@@ -26,8 +26,16 @@ class CosSimOp : public framework::OperatorWithKernel {
protected:
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
// notnull check
// notnull check
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"X"
),
"Input(X) must not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"X"
),
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"Y"
),
"Input(Y) must not be null."
);
"Input(X) of CosSimOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"Y"
),
"Input(Y) of CosSimOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"Out"
),
"Output(Out) of CosSimOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"XNorm"
),
"Output(XNorm) of CosSimOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"YNorm"
),
"Output(YNorm) of CosSimOp should not be null."
);
// shape check
// shape check
auto
x_dims
=
ctx
.
Input
<
Tensor
>
(
"X"
)
->
dims
();
auto
x_dims
=
ctx
.
Input
<
Tensor
>
(
"X"
)
->
dims
();
...
...
paddle/operators/elementwise_mul_op.cc
浏览文件 @
384368f4
...
@@ -25,8 +25,14 @@ class ElementWiseMulOp : public framework::OperatorWithKernel {
...
@@ -25,8 +25,14 @@ class ElementWiseMulOp : public framework::OperatorWithKernel {
protected:
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"X"
),
"Input(X) should not be null"
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"X"
),
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"Y"
),
"Input(Y) should not be null"
);
"Input(X) of ElementWiseMulOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"Y"
),
"Input(Y) of ElementWiseMulOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"Out"
),
"Output(Out) of ElementWiseMulOp should not be null."
);
auto
x_dim
=
ctx
.
Input
<
Tensor
>
(
"X"
)
->
dims
();
auto
x_dim
=
ctx
.
Input
<
Tensor
>
(
"X"
)
->
dims
();
auto
y_dim
=
ctx
.
Input
<
Tensor
>
(
"Y"
)
->
dims
();
auto
y_dim
=
ctx
.
Input
<
Tensor
>
(
"Y"
)
->
dims
();
PADDLE_ENFORCE_GE
(
x_dim
.
size
(),
y_dim
.
size
(),
PADDLE_ENFORCE_GE
(
x_dim
.
size
(),
y_dim
.
size
(),
...
...
paddle/operators/elementwise_mul_op.h
浏览文件 @
384368f4
...
@@ -13,10 +13,8 @@
...
@@ -13,10 +13,8 @@
limitations under the License. */
limitations under the License. */
#pragma once
#pragma once
#include <iostream>
#include "paddle/framework/eigen.h"
#include "paddle/framework/eigen.h"
#include "paddle/framework/op_registry.h"
#include "paddle/framework/op_registry.h"
#include "paddle/operators/math/math_function.h"
namespace
paddle
{
namespace
paddle
{
namespace
operators
{
namespace
operators
{
...
...
paddle/operators/fill_zeros_like_op.cc
浏览文件 @
384368f4
...
@@ -23,6 +23,13 @@ class FillZerosLikeOp : public framework::OperatorWithKernel {
...
@@ -23,6 +23,13 @@ class FillZerosLikeOp : public framework::OperatorWithKernel {
protected:
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"Src"
),
"Input(Src) of FillZerosLikeOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"Dst"
),
"Output(Dst) of FillZerosLikeOp should not be null."
);
ctx
.
Output
<
framework
::
LoDTensor
>
(
"Dst"
)
->
Resize
(
ctx
.
Output
<
framework
::
LoDTensor
>
(
"Dst"
)
->
Resize
(
ctx
.
Input
<
framework
::
Tensor
>
(
"Src"
)
->
dims
());
ctx
.
Input
<
framework
::
Tensor
>
(
"Src"
)
->
dims
());
}
}
...
...
paddle/operators/gather_op.cc
浏览文件 @
384368f4
...
@@ -24,6 +24,13 @@ class GatherOp : public framework::OperatorWithKernel {
...
@@ -24,6 +24,13 @@ class GatherOp : public framework::OperatorWithKernel {
protected:
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"X"
),
"Input(X) of GatherOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"Index"
),
"Input(Index) of GatherOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"Out"
),
"Output(Out) of GatherOp should not be null."
);
int
batch_size
=
ctx
.
Input
<
Tensor
>
(
"Index"
)
->
dims
()[
0
];
int
batch_size
=
ctx
.
Input
<
Tensor
>
(
"Index"
)
->
dims
()[
0
];
PADDLE_ENFORCE_GE
(
batch_size
,
0
,
"Batch size must be >0"
);
PADDLE_ENFORCE_GE
(
batch_size
,
0
,
"Batch size must be >0"
);
framework
::
DDim
output_dims
(
ctx
.
Input
<
Tensor
>
(
"X"
)
->
dims
());
framework
::
DDim
output_dims
(
ctx
.
Input
<
Tensor
>
(
"X"
)
->
dims
());
...
...
paddle/operators/gaussian_random_op.cc
浏览文件 @
384368f4
...
@@ -43,8 +43,12 @@ class GaussianRandomOp : public framework::OperatorWithKernel {
...
@@ -43,8 +43,12 @@ class GaussianRandomOp : public framework::OperatorWithKernel {
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
protected:
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
context
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
auto
*
tensor
=
context
.
Output
<
framework
::
LoDTensor
>
(
"Out"
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"Out"
),
"Output(Out) of GaussianRandomOp should not be null."
);
auto
*
tensor
=
ctx
.
Output
<
framework
::
LoDTensor
>
(
"Out"
);
auto
dims
=
Attr
<
std
::
vector
<
int
>>
(
"dims"
);
auto
dims
=
Attr
<
std
::
vector
<
int
>>
(
"dims"
);
std
::
vector
<
int64_t
>
temp
;
std
::
vector
<
int64_t
>
temp
;
temp
.
reserve
(
dims
.
size
());
temp
.
reserve
(
dims
.
size
());
...
...
paddle/operators/identity_op.cc
浏览文件 @
384368f4
...
@@ -42,6 +42,11 @@ class IdentityOp : public NetOp {
...
@@ -42,6 +42,11 @@ class IdentityOp : public NetOp {
const
framework
::
VariableNameMap
&
outputs
,
const
framework
::
VariableNameMap
&
outputs
,
const
framework
::
AttributeMap
&
attrs
)
const
framework
::
AttributeMap
&
attrs
)
:
NetOp
(
type
,
inputs
,
outputs
,
attrs
)
{
:
NetOp
(
type
,
inputs
,
outputs
,
attrs
)
{
PADDLE_ENFORCE_NE
(
Input
(
"X"
),
framework
::
kEmptyVarName
,
"Input(X) of IdentityOp should not be null."
);
PADDLE_ENFORCE_NE
(
Output
(
"Out"
),
framework
::
kEmptyVarName
,
"Output(Out) of IdentityOp should not be null."
);
AppendOp
(
framework
::
OpRegistry
::
CreateOp
(
AppendOp
(
framework
::
OpRegistry
::
CreateOp
(
"scale"
,
{{
"X"
,
{
Input
(
"X"
)}}},
{{
"Out"
,
{
Output
(
"Out"
)}}},
"scale"
,
{{
"X"
,
{
Input
(
"X"
)}}},
{{
"Out"
,
{
Output
(
"Out"
)}}},
{{
"scale"
,
static_cast
<
AttrType
>
(
1
)}}));
{{
"scale"
,
static_cast
<
AttrType
>
(
1
)}}));
...
...
paddle/operators/lookup_table_op.cc
浏览文件 @
384368f4
...
@@ -22,10 +22,17 @@ class LookupTableOp : public framework::OperatorWithKernel {
...
@@ -22,10 +22,17 @@ class LookupTableOp : public framework::OperatorWithKernel {
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
protected:
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
context
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
auto
table_t
=
context
.
Input
<
Tensor
>
(
"W"
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"W"
),
auto
ids_t
=
context
.
Input
<
Tensor
>
(
"Ids"
);
"Input(W) of LookupTableOp should not be null."
);
auto
output_t
=
context
.
Output
<
framework
::
LoDTensor
>
(
"Out"
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"Ids"
),
"Input(Ids) of LookupTableOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"Out"
),
"Output(Out) of LookupTableOp should not be null."
);
auto
table_t
=
ctx
.
Input
<
Tensor
>
(
"W"
);
auto
ids_t
=
ctx
.
Input
<
Tensor
>
(
"Ids"
);
auto
output_t
=
ctx
.
Output
<
framework
::
LoDTensor
>
(
"Out"
);
output_t
->
Resize
({
ids_t
->
dims
()[
0
],
table_t
->
dims
()[
1
]});
output_t
->
Resize
({
ids_t
->
dims
()[
0
],
table_t
->
dims
()[
1
]});
}
}
...
...
paddle/operators/mean_op.cc
浏览文件 @
384368f4
...
@@ -24,7 +24,9 @@ class MeanOp : public framework::OperatorWithKernel {
...
@@ -24,7 +24,9 @@ class MeanOp : public framework::OperatorWithKernel {
protected:
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"X"
),
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"X"
),
"Input of MeanOp must be initialized."
);
"Input(X) of MeanOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"Out"
),
"Output(Out) of MeanOp should not be null."
);
ctx
.
Output
<
framework
::
LoDTensor
>
(
"Out"
)
->
Resize
({
1
});
ctx
.
Output
<
framework
::
LoDTensor
>
(
"Out"
)
->
Resize
({
1
});
}
}
};
};
...
...
paddle/operators/minus_op.cc
浏览文件 @
384368f4
...
@@ -27,6 +27,13 @@ class MinusOp : public framework::OperatorWithKernel {
...
@@ -27,6 +27,13 @@ class MinusOp : public framework::OperatorWithKernel {
protected:
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"X"
),
"Input(X) of MinusOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"Y"
),
"Input(Y) of MinusOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"Out"
),
"Output(Out) of MinusOp should not be null."
);
auto
*
left_tensor
=
ctx
.
Input
<
framework
::
Tensor
>
(
"X"
);
auto
*
left_tensor
=
ctx
.
Input
<
framework
::
Tensor
>
(
"X"
);
auto
*
right_tensor
=
ctx
.
Input
<
framework
::
Tensor
>
(
"Y"
);
auto
*
right_tensor
=
ctx
.
Input
<
framework
::
Tensor
>
(
"Y"
);
...
@@ -77,8 +84,6 @@ class MinusGradOp : public NetOp {
...
@@ -77,8 +84,6 @@ class MinusGradOp : public NetOp {
}
// namespace operators
}
// namespace operators
}
// namespace paddle
}
// namespace paddle
USE_OP
(
scale
);
USE_NO_KERNEL_OP
(
identity
);
namespace
ops
=
paddle
::
operators
;
namespace
ops
=
paddle
::
operators
;
REGISTER_OP
(
minus
,
ops
::
MinusOp
,
ops
::
MinusOpMaker
,
minus_grad
,
REGISTER_OP
(
minus
,
ops
::
MinusOp
,
ops
::
MinusOpMaker
,
minus_grad
,
ops
::
MinusGradOp
<
float
>
);
ops
::
MinusGradOp
<
float
>
);
...
...
paddle/operators/mul_op.cc
浏览文件 @
384368f4
...
@@ -26,6 +26,13 @@ class MulOp : public framework::OperatorWithKernel {
...
@@ -26,6 +26,13 @@ class MulOp : public framework::OperatorWithKernel {
protected:
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"X"
),
"Input(X) of MulOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"Y"
),
"Input(Y) of MulOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"Out"
),
"Output(Out) of MulOp should not be null."
);
auto
x_dims
=
ctx
.
Input
<
Tensor
>
(
"X"
)
->
dims
();
auto
x_dims
=
ctx
.
Input
<
Tensor
>
(
"X"
)
->
dims
();
auto
y_dims
=
ctx
.
Input
<
Tensor
>
(
"Y"
)
->
dims
();
auto
y_dims
=
ctx
.
Input
<
Tensor
>
(
"Y"
)
->
dims
();
int
x_num_col_dims
=
Attr
<
int
>
(
"x_num_col_dims"
);
int
x_num_col_dims
=
Attr
<
int
>
(
"x_num_col_dims"
);
...
...
paddle/operators/onehot_cross_entropy_op.cc
浏览文件 @
384368f4
...
@@ -23,6 +23,16 @@ class OnehotCrossEntropyOp : public framework::OperatorWithKernel {
...
@@ -23,6 +23,16 @@ class OnehotCrossEntropyOp : public framework::OperatorWithKernel {
protected:
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"X"
),
"Input(X) of OnehotCrossEntropyOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"label"
),
"Input(label) of OnehotCrossEntropyOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"Y"
),
"Output(Y) of OnehotCrossEntropyOp should not be null."
);
auto
*
X
=
ctx
.
Input
<
Tensor
>
(
"X"
);
auto
*
X
=
ctx
.
Input
<
Tensor
>
(
"X"
);
auto
*
label
=
ctx
.
Input
<
Tensor
>
(
"label"
);
auto
*
label
=
ctx
.
Input
<
Tensor
>
(
"label"
);
...
...
paddle/operators/pad_op.cc
浏览文件 @
384368f4
...
@@ -25,6 +25,11 @@ class PadOp : public framework::OperatorWithKernel {
...
@@ -25,6 +25,11 @@ class PadOp : public framework::OperatorWithKernel {
protected:
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"X"
),
"Input(X) of PadOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"Out"
),
"Output(Out) of PadOp should not be null."
);
auto
x_dim
=
ctx
.
Input
<
Tensor
>
(
"X"
)
->
dims
();
auto
x_dim
=
ctx
.
Input
<
Tensor
>
(
"X"
)
->
dims
();
auto
paddings
=
Attr
<
std
::
vector
<
int
>>
(
"paddings"
);
auto
paddings
=
Attr
<
std
::
vector
<
int
>>
(
"paddings"
);
PADDLE_ENFORCE_EQ
(
x_dim
.
size
()
*
2
,
int64_t
(
paddings
.
size
()),
PADDLE_ENFORCE_EQ
(
x_dim
.
size
()
*
2
,
int64_t
(
paddings
.
size
()),
...
...
paddle/operators/reshape_op.cc
浏览文件 @
384368f4
...
@@ -28,7 +28,11 @@ class ReshapeOp : public framework::OperatorWithKernel {
...
@@ -28,7 +28,11 @@ class ReshapeOp : public framework::OperatorWithKernel {
protected:
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
// input check
// input check
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"X"
),
"Input(X) shouldn't be null"
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"X"
),
"Input(X) of ReshapeOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"Out"
),
"Output(Out) of ReshapeOp should not be null."
);
auto
shape
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"shape"
);
auto
shape
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"shape"
);
PADDLE_ENFORCE
(
shape
.
size
()
>
0
,
"Attr(shape) shouldn't be empty."
);
PADDLE_ENFORCE
(
shape
.
size
()
>
0
,
"Attr(shape) shouldn't be empty."
);
for
(
auto
dim
:
shape
)
{
for
(
auto
dim
:
shape
)
{
...
...
paddle/operators/rowwise_add_op.cc
浏览文件 @
384368f4
...
@@ -25,6 +25,13 @@ class RowwiseAddOp : public framework::OperatorWithKernel {
...
@@ -25,6 +25,13 @@ class RowwiseAddOp : public framework::OperatorWithKernel {
protected:
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"X"
),
"Input(X) of RowwiseAddOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"b"
),
"Input(b) of RowwiseAddOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"Out"
),
"Output(Out) of RowwiseAddOp should not be null."
);
auto
x_dims
=
ctx
.
Input
<
Tensor
>
(
"X"
)
->
dims
();
auto
x_dims
=
ctx
.
Input
<
Tensor
>
(
"X"
)
->
dims
();
auto
b_dims
=
ctx
.
Input
<
Tensor
>
(
"b"
)
->
dims
();
auto
b_dims
=
ctx
.
Input
<
Tensor
>
(
"b"
)
->
dims
();
PADDLE_ENFORCE_GT
(
PADDLE_ENFORCE_GT
(
...
...
paddle/operators/scale_op.cc
浏览文件 @
384368f4
...
@@ -27,6 +27,11 @@ class ScaleOp : public framework::OperatorWithKernel {
...
@@ -27,6 +27,11 @@ class ScaleOp : public framework::OperatorWithKernel {
protected:
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"X"
),
"Input(X) of ScaleOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"Out"
),
"Output(Out) of ScaleOp should not be null."
);
auto
*
in
=
ctx
.
Input
<
framework
::
Tensor
>
(
"X"
);
auto
*
in
=
ctx
.
Input
<
framework
::
Tensor
>
(
"X"
);
auto
*
out
=
ctx
.
Output
<
framework
::
LoDTensor
>
(
"Out"
);
auto
*
out
=
ctx
.
Output
<
framework
::
LoDTensor
>
(
"Out"
);
out
->
Resize
(
in
->
dims
());
out
->
Resize
(
in
->
dims
());
...
...
paddle/operators/scatter_op.cc
浏览文件 @
384368f4
...
@@ -24,6 +24,15 @@ class ScatterOp : public framework::OperatorWithKernel {
...
@@ -24,6 +24,15 @@ class ScatterOp : public framework::OperatorWithKernel {
protected:
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"Ref"
),
"Input(Ref) of ScatterOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"Index"
),
"Input(Index) of ScatterOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"Updates"
),
"Input(Updates) of ScatterOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"Out"
),
"Output(Out) of ScatterOp should not be null."
);
PADDLE_ENFORCE_EQ
(
ctx
.
Input
<
Tensor
>
(
"Index"
)
->
dims
().
size
(),
1
,
PADDLE_ENFORCE_EQ
(
ctx
.
Input
<
Tensor
>
(
"Index"
)
->
dims
().
size
(),
1
,
"Update Index should be 1-D."
);
"Update Index should be 1-D."
);
PADDLE_ENFORCE_EQ
(
ctx
.
Input
<
Tensor
>
(
"Ref"
)
->
dims
().
size
(),
PADDLE_ENFORCE_EQ
(
ctx
.
Input
<
Tensor
>
(
"Ref"
)
->
dims
().
size
(),
...
...
paddle/operators/sequence_avg_pool_op.cc
浏览文件 @
384368f4
...
@@ -23,9 +23,12 @@ class SequenceAvgPoolOp : public framework::OperatorWithKernel {
...
@@ -23,9 +23,12 @@ class SequenceAvgPoolOp : public framework::OperatorWithKernel {
protected:
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"X"
),
PADDLE_ENFORCE_NOT_NULL
(
"Input of SequenceAvgPoolOp"
ctx
.
InputVar
(
"X"
),
"Input(X) of SequenceAvgPoolOp should not be null."
);
"must be initialized."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"Out"
),
"Output(Out) of SequenceAvgPoolOp should not be null."
);
auto
*
x
=
ctx
.
Input
<
framework
::
LoDTensor
>
(
"X"
);
auto
*
x
=
ctx
.
Input
<
framework
::
LoDTensor
>
(
"X"
);
auto
dims
=
x
->
dims
();
auto
dims
=
x
->
dims
();
auto
lod
=
x
->
lod
();
auto
lod
=
x
->
lod
();
...
@@ -60,7 +63,9 @@ class SequenceAvgPoolGradOp : public framework::OperatorWithKernel {
...
@@ -60,7 +63,9 @@ class SequenceAvgPoolGradOp : public framework::OperatorWithKernel {
protected:
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
framework
::
GradVarName
(
"Out"
)),
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
framework
::
GradVarName
(
"Out"
)),
"Gradient of Out should not be null"
);
"Gradient of Out should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"X"
),
"The input X should not be null."
);
auto
og_dims
=
auto
og_dims
=
ctx
.
Input
<
framework
::
LoDTensor
>
(
framework
::
GradVarName
(
"Out"
))
->
dims
();
ctx
.
Input
<
framework
::
LoDTensor
>
(
framework
::
GradVarName
(
"Out"
))
->
dims
();
auto
x_dims
=
ctx
.
Input
<
framework
::
LoDTensor
>
(
"X"
)
->
dims
();
auto
x_dims
=
ctx
.
Input
<
framework
::
LoDTensor
>
(
"X"
)
->
dims
();
...
...
paddle/operators/sequence_avg_pool_op.h
浏览文件 @
384368f4
...
@@ -21,6 +21,9 @@ namespace operators {
...
@@ -21,6 +21,9 @@ namespace operators {
using
Tensor
=
framework
::
Tensor
;
using
Tensor
=
framework
::
Tensor
;
using
LoDTensor
=
framework
::
LoDTensor
;
using
LoDTensor
=
framework
::
LoDTensor
;
template
<
typename
T
,
int
MajorType
=
Eigen
::
RowMajor
,
typename
IndexType
=
Eigen
::
DenseIndex
>
using
EigenVector
=
framework
::
EigenVector
<
T
,
MajorType
,
IndexType
>
;
template
<
typename
T
,
int
MajorType
=
Eigen
::
RowMajor
,
template
<
typename
T
,
int
MajorType
=
Eigen
::
RowMajor
,
typename
IndexType
=
Eigen
::
DenseIndex
>
typename
IndexType
=
Eigen
::
DenseIndex
>
using
EigenMatrix
=
framework
::
EigenMatrix
<
T
,
MajorType
,
IndexType
>
;
using
EigenMatrix
=
framework
::
EigenMatrix
<
T
,
MajorType
,
IndexType
>
;
...
@@ -43,8 +46,8 @@ class SequenceAvgPoolKernel : public framework::OpKernel {
...
@@ -43,8 +46,8 @@ class SequenceAvgPoolKernel : public framework::OpKernel {
static_cast
<
int
>
(
lod
[
0
][
i
+
1
]));
static_cast
<
int
>
(
lod
[
0
][
i
+
1
]));
Tensor
out_t
=
out
->
Slice
<
T
>
(
i
,
i
+
1
);
Tensor
out_t
=
out
->
Slice
<
T
>
(
i
,
i
+
1
);
int64_t
h
=
static_cast
<
int64_t
>
(
lod
[
0
][
i
+
1
]
-
lod
[
0
][
i
]);
int64_t
h
=
static_cast
<
int64_t
>
(
lod
[
0
][
i
+
1
]
-
lod
[
0
][
i
]);
auto
in_e
=
EigenMatrix
<
T
>::
From
(
in_t
,
{
h
,
w
}
);
auto
in_e
=
EigenMatrix
<
T
>::
From
(
in_t
,
framework
::
make_ddim
({
h
,
w
})
);
auto
out_e
=
Eigen
Matrix
<
T
>::
From
(
out_t
,
{
h
,
w
}
);
auto
out_e
=
Eigen
Vector
<
T
>::
Flatten
(
out_t
);
out_e
.
device
(
place
)
=
in_e
.
mean
(
Eigen
::
array
<
int
,
1
>
({{
0
}}));
out_e
.
device
(
place
)
=
in_e
.
mean
(
Eigen
::
array
<
int
,
1
>
({{
0
}}));
}
}
}
}
...
@@ -54,9 +57,9 @@ template <typename Place, typename T>
...
@@ -54,9 +57,9 @@ template <typename Place, typename T>
class
SequenceAvgPoolGradKernel
:
public
framework
::
OpKernel
{
class
SequenceAvgPoolGradKernel
:
public
framework
::
OpKernel
{
public:
public:
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
auto
*
in
=
context
.
Output
<
LoDTensor
>
(
"X"
);
auto
*
in
=
context
.
Input
<
LoDTensor
>
(
"X"
);
auto
*
in_g
=
context
.
Output
<
LoDTensor
>
(
framework
::
GradVarName
(
"X"
));
auto
*
out_g
=
context
.
Input
<
LoDTensor
>
(
framework
::
GradVarName
(
"Out"
));
auto
*
out_g
=
context
.
Input
<
LoDTensor
>
(
framework
::
GradVarName
(
"Out"
));
auto
*
in_g
=
context
.
Output
<
LoDTensor
>
(
framework
::
GradVarName
(
"X"
));
auto
dims
=
in
->
dims
();
auto
dims
=
in
->
dims
();
auto
lod
=
in
->
lod
();
auto
lod
=
in
->
lod
();
...
@@ -71,7 +74,7 @@ class SequenceAvgPoolGradKernel : public framework::OpKernel {
...
@@ -71,7 +74,7 @@ class SequenceAvgPoolGradKernel : public framework::OpKernel {
int64_t
h
=
static_cast
<
int64_t
>
(
lod
[
0
][
i
+
1
]
-
lod
[
0
][
i
]);
int64_t
h
=
static_cast
<
int64_t
>
(
lod
[
0
][
i
+
1
]
-
lod
[
0
][
i
]);
auto
in_g_e
=
EigenMatrix
<
T
>::
From
(
in_g_t
,
{
h
,
w
});
auto
in_g_e
=
EigenMatrix
<
T
>::
From
(
in_g_t
,
{
h
,
w
});
auto
out_g_e
=
EigenMatrix
<
T
>::
From
(
out_g_t
,
{
1
,
w
});
auto
out_g_e
=
EigenMatrix
<
T
>::
From
(
out_g_t
,
{
1
,
w
});
Eigen
::
DSizes
<
int
,
2
>
bcast
(
h
,
w
);
Eigen
::
DSizes
<
int
,
2
>
bcast
(
h
,
1
);
in_g_e
.
device
(
place
)
=
(
out_g_e
/
static_cast
<
T
>
(
h
)).
broadcast
(
bcast
);
in_g_e
.
device
(
place
)
=
(
out_g_e
/
static_cast
<
T
>
(
h
)).
broadcast
(
bcast
);
}
}
}
}
...
...
paddle/operators/sgd_op.cc
浏览文件 @
384368f4
...
@@ -23,6 +23,13 @@ class SGDOp : public framework::OperatorWithKernel {
...
@@ -23,6 +23,13 @@ class SGDOp : public framework::OperatorWithKernel {
protected:
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"param"
),
"Input(param) of SGDOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"grad"
),
"Input(grad) of SGDOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"param_out"
),
"Output(param_out) of SGDOp should not be null."
);
PADDLE_ENFORCE_EQ
(
ctx
.
Input
<
Tensor
>
(
"param"
)
->
dims
(),
PADDLE_ENFORCE_EQ
(
ctx
.
Input
<
Tensor
>
(
"param"
)
->
dims
(),
ctx
.
Input
<
Tensor
>
(
"grad"
)
->
dims
(),
ctx
.
Input
<
Tensor
>
(
"grad"
)
->
dims
(),
"Two input of SGD Op's dimension must be same."
);
"Two input of SGD Op's dimension must be same."
);
...
...
paddle/operators/sigmoid_op.cc
浏览文件 @
384368f4
...
@@ -23,6 +23,11 @@ class SigmoidOp : public framework::OperatorWithKernel {
...
@@ -23,6 +23,11 @@ class SigmoidOp : public framework::OperatorWithKernel {
protected:
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"X"
),
"Input(X) of SigmoidOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"Y"
),
"Output(Y) of SigmoidOp should not be null."
);
ctx
.
Output
<
framework
::
LoDTensor
>
(
"Y"
)
->
Resize
(
ctx
.
Output
<
framework
::
LoDTensor
>
(
"Y"
)
->
Resize
(
ctx
.
Input
<
Tensor
>
(
"X"
)
->
dims
());
ctx
.
Input
<
Tensor
>
(
"X"
)
->
dims
());
}
}
...
...
paddle/operators/softmax_op.cc
浏览文件 @
384368f4
...
@@ -23,6 +23,11 @@ class SoftmaxOp : public framework::OperatorWithKernel {
...
@@ -23,6 +23,11 @@ class SoftmaxOp : public framework::OperatorWithKernel {
protected:
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"X"
),
"Input(X) of SoftmaxOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"Y"
),
"Output(Y) of SoftmaxOp should not be null."
);
PADDLE_ENFORCE
(
ctx
.
Input
<
Tensor
>
(
"X"
)
->
dims
().
size
()
==
2UL
,
PADDLE_ENFORCE
(
ctx
.
Input
<
Tensor
>
(
"X"
)
->
dims
().
size
()
==
2UL
,
"The input of softmax op must be a matrix."
);
"The input of softmax op must be a matrix."
);
ctx
.
Output
<
framework
::
LoDTensor
>
(
"Y"
)
->
Resize
(
ctx
.
Output
<
framework
::
LoDTensor
>
(
"Y"
)
->
Resize
(
...
...
paddle/operators/squared_l2_distance_op.cc
浏览文件 @
384368f4
...
@@ -23,12 +23,18 @@ class SquaredL2DistanceOp : public framework::OperatorWithKernel {
...
@@ -23,12 +23,18 @@ class SquaredL2DistanceOp : public framework::OperatorWithKernel {
protected:
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"X"
),
PADDLE_ENFORCE_NOT_NULL
(
"Input of SquaredL2DistanceOp "
ctx
.
InputVar
(
"X"
),
"must be initialized."
);
"Input(X) of SquaredL2DistanceOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"Y"
),
PADDLE_ENFORCE_NOT_NULL
(
"Target of SquaredL2DistanceOp "
ctx
.
InputVar
(
"Y"
),
"must be initialized."
);
"Input(Y) of SquaredL2DistanceOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"sub_result"
),
"Output(sub_result) of SquaredL2DistanceOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"Out"
),
"Output(Out) of SquaredL2DistanceOp should not be null."
);
auto
*
x
=
ctx
.
Input
<
Tensor
>
(
"X"
);
auto
*
x
=
ctx
.
Input
<
Tensor
>
(
"X"
);
auto
x_dims
=
x
->
dims
();
auto
x_dims
=
x
->
dims
();
...
...
paddle/operators/sum_op.cc
浏览文件 @
384368f4
...
@@ -22,6 +22,11 @@ class SumOp : public framework::OperatorWithKernel {
...
@@ -22,6 +22,11 @@ class SumOp : public framework::OperatorWithKernel {
protected:
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE
(
!
ctx
.
MultiInputVar
(
"X"
).
empty
(),
"Input(X) of SumOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"Out"
),
"Output(Out) of SumOp should not be null."
);
auto
ins
=
ctx
.
MultiInput
<
framework
::
Tensor
>
(
"X"
);
auto
ins
=
ctx
.
MultiInput
<
framework
::
Tensor
>
(
"X"
);
auto
*
out
=
ctx
.
Output
<
framework
::
LoDTensor
>
(
"Out"
);
auto
*
out
=
ctx
.
Output
<
framework
::
LoDTensor
>
(
"Out"
);
int
N
=
ins
.
size
();
int
N
=
ins
.
size
();
...
...
paddle/operators/top_k_op.cc
浏览文件 @
384368f4
...
@@ -24,7 +24,12 @@ class TopkOp : public framework::OperatorWithKernel {
...
@@ -24,7 +24,12 @@ class TopkOp : public framework::OperatorWithKernel {
protected:
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"X"
),
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"X"
),
"Input of TopkOP must be initialized."
);
"Input(X) of TopkOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"Out"
),
"Output(Out) of TopkOp should not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"Indices"
),
"Output(Indices) of TopkOp should not be null."
);
auto
*
input
=
ctx
.
Input
<
framework
::
Tensor
>
(
"X"
);
auto
*
input
=
ctx
.
Input
<
framework
::
Tensor
>
(
"X"
);
const
int
k
=
static_cast
<
int
>
(
ctx
.
Attr
<
int
>
(
"k"
));
const
int
k
=
static_cast
<
int
>
(
ctx
.
Attr
<
int
>
(
"k"
));
...
...
paddle/operators/uniform_random_op.cc
浏览文件 @
384368f4
...
@@ -48,6 +48,10 @@ class UniformRandomOp : public framework::OperatorWithKernel {
...
@@ -48,6 +48,10 @@ class UniformRandomOp : public framework::OperatorWithKernel {
protected:
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
OutputVar
(
"Out"
),
"Output(Out) of UniformRandomOp should not be null."
);
PADDLE_ENFORCE
(
Attr
<
float
>
(
"min"
)
<
Attr
<
float
>
(
"max"
),
PADDLE_ENFORCE
(
Attr
<
float
>
(
"min"
)
<
Attr
<
float
>
(
"max"
),
"uniform_random's min must less then max"
);
"uniform_random's min must less then max"
);
auto
*
tensor
=
ctx
.
Output
<
framework
::
LoDTensor
>
(
"Out"
);
auto
*
tensor
=
ctx
.
Output
<
framework
::
LoDTensor
>
(
"Out"
);
...
...
python/paddle/v2/framework/tests/op_test.py
浏览文件 @
384368f4
...
@@ -47,17 +47,24 @@ def set_input(scope, op, inputs, place):
...
@@ -47,17 +47,24 @@ def set_input(scope, op, inputs, place):
if
in_name
in
inputs
:
if
in_name
in
inputs
:
if
in_dup
:
if
in_dup
:
sub_in
=
inputs
[
in_name
]
sub_in
=
inputs
[
in_name
]
for
sub_in_name
,
sub_in_
array
in
sub_in
:
for
sub_in_name
,
sub_in_
val
in
sub_in
:
var
=
scope
.
find_var
(
sub_in_name
)
var
=
scope
.
find_var
(
sub_in_name
)
tensor
=
var
.
get_tensor
()
tensor
=
var
.
get_tensor
()
sub_in_array
=
sub_in_val
[
0
]
\
if
isinstance
(
sub_in_val
,
tuple
)
else
sub_in_val
tensor
.
set_dims
(
sub_in_array
.
shape
)
tensor
.
set_dims
(
sub_in_array
.
shape
)
tensor
.
set
(
sub_in_array
,
place
)
tensor
.
set
(
sub_in_array
,
place
)
if
isinstance
(
sub_in_val
,
tuple
):
tensor
.
set_lod
(
sub_in_val
[
1
])
else
:
else
:
var
=
scope
.
find_var
(
in_name
)
var
=
scope
.
find_var
(
in_name
)
tensor
=
var
.
get_tensor
()
tensor
=
var
.
get_tensor
()
arr
=
inputs
[
in_name
]
in_val
=
inputs
[
in_name
]
tensor
.
set_dims
(
arr
.
shape
)
in_array
=
in_val
[
0
]
if
isinstance
(
in_val
,
tuple
)
else
in_val
tensor
.
set
(
arr
,
place
)
tensor
.
set_dims
(
in_array
.
shape
)
tensor
.
set
(
in_array
,
place
)
if
isinstance
(
in_val
,
tuple
):
tensor
.
set_lod
(
in_val
[
1
])
def
set_output_grad
(
scope
,
op
,
outputs
,
place
):
def
set_output_grad
(
scope
,
op
,
outputs
,
place
):
...
...
python/paddle/v2/framework/tests/test_add_
two_
op.py
→
python/paddle/v2/framework/tests/test_add_op.py
浏览文件 @
384368f4
文件已移动
python/paddle/v2/framework/tests/test_gaussian_random_op.py
浏览文件 @
384368f4
...
@@ -4,7 +4,7 @@ from paddle.v2.framework.op import Operator
...
@@ -4,7 +4,7 @@ from paddle.v2.framework.op import Operator
import
numpy
import
numpy
class
GaussianRandomTest
(
unittest
.
TestCase
):
class
TestGaussianRandomOp
(
unittest
.
TestCase
):
def
test_cpu
(
self
):
def
test_cpu
(
self
):
self
.
gaussian_random_test
(
place
=
core
.
CPUPlace
())
self
.
gaussian_random_test
(
place
=
core
.
CPUPlace
())
...
...
python/paddle/v2/framework/tests/test_identity_op.py
0 → 100644
浏览文件 @
384368f4
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
class
TestIdentityOp
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"identity"
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
10
,
10
)).
astype
(
"float32"
)}
self
.
outputs
=
{
'Out'
:
self
.
inputs
[
'X'
]}
def
test_check_output
(
self
):
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Out'
)
if
__name__
==
"__main__"
:
unittest
.
main
()
python/paddle/v2/framework/tests/test_lookup_table.py
→
python/paddle/v2/framework/tests/test_lookup_table
_op
.py
浏览文件 @
384368f4
文件已移动
python/paddle/v2/framework/tests/test_minus_op.py
浏览文件 @
384368f4
...
@@ -3,7 +3,7 @@ import numpy as np
...
@@ -3,7 +3,7 @@ import numpy as np
from
op_test
import
OpTest
from
op_test
import
OpTest
class
MinusOpTest
(
OpTest
):
class
TestMinusOp
(
OpTest
):
def
setUp
(
self
):
def
setUp
(
self
):
self
.
op_type
=
"minus"
self
.
op_type
=
"minus"
self
.
inputs
=
{
self
.
inputs
=
{
...
...
python/paddle/v2/framework/tests/test_cross_entropy_op.py
→
python/paddle/v2/framework/tests/test_
onehot_
cross_entropy_op.py
浏览文件 @
384368f4
...
@@ -3,7 +3,7 @@ import numpy
...
@@ -3,7 +3,7 @@ import numpy
from
op_test
import
OpTest
from
op_test
import
OpTest
class
Test
CrossEntropy
(
OpTest
):
class
Test
OnehotCrossEntropyOp
(
OpTest
):
def
setUp
(
self
):
def
setUp
(
self
):
self
.
op_type
=
"onehot_cross_entropy"
self
.
op_type
=
"onehot_cross_entropy"
batch_size
=
30
batch_size
=
30
...
...
python/paddle/v2/framework/tests/test_scale_
and_identity_
op.py
→
python/paddle/v2/framework/tests/test_scale_op.py
浏览文件 @
384368f4
...
@@ -3,20 +3,7 @@ import numpy as np
...
@@ -3,20 +3,7 @@ import numpy as np
from
op_test
import
OpTest
from
op_test
import
OpTest
class
IdentityTest
(
OpTest
):
class
TestScaleOp
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"identity"
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
10
,
10
)).
astype
(
"float32"
)}
self
.
outputs
=
{
'Out'
:
self
.
inputs
[
'X'
]}
def
test_check_output
(
self
):
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Out'
)
class
ScaleTest
(
OpTest
):
def
setUp
(
self
):
def
setUp
(
self
):
self
.
op_type
=
"scale"
self
.
op_type
=
"scale"
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
10
,
10
)).
astype
(
"float32"
)}
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
10
,
10
)).
astype
(
"float32"
)}
...
...
python/paddle/v2/framework/tests/test_seq_pool.py
0 → 100644
浏览文件 @
384368f4
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
class
TestSeqAvgPool1D
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
'sequence_avg_pool'
# one level, batch size is 4
x
=
np
.
random
.
uniform
(
0.1
,
1
,
[
11
,
23
]).
astype
(
'float32'
)
lod
=
[[
0
,
4
,
5
,
8
,
11
]]
out
=
np
.
zeros
((
4
,
23
)).
astype
(
'float32'
)
for
i
in
range
(
4
):
sub_x
=
x
[
lod
[
0
][
i
]:
lod
[
0
][
i
+
1
],
:]
out
[
i
]
=
sub_x
.
mean
(
axis
=
0
)
self
.
inputs
=
{
'X'
:
(
x
,
lod
)}
self
.
outputs
=
{
'Out'
:
out
}
def
test_check_output
(
self
):
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
([
"X"
],
"Out"
)
class
TestSeqAvgPool2D
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
'sequence_avg_pool'
# one level, batch size is 4
x
=
np
.
random
.
uniform
(
0.1
,
1
,
[
13
,
3
,
17
]).
astype
(
'float32'
)
lod
=
[[
0
,
4
,
5
,
8
,
13
]]
out
=
np
.
zeros
((
4
,
3
,
17
)).
astype
(
'float32'
)
for
i
in
range
(
4
):
sub_x
=
np
.
reshape
(
x
[
lod
[
0
][
i
]:
lod
[
0
][
i
+
1
],
:],
(
-
1
,
3
*
17
))
out
[
i
]
=
np
.
reshape
(
sub_x
.
mean
(
axis
=
0
),
(
3
,
17
))
self
.
inputs
=
{
'X'
:
(
x
,
lod
)}
self
.
outputs
=
{
'Out'
:
out
}
def
test_check_output
(
self
):
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
([
"X"
],
"Out"
)
if
__name__
==
'__main__'
:
unittest
.
main
()
python/paddle/v2/framework/tests/test_sgd_op.py
浏览文件 @
384368f4
...
@@ -3,7 +3,7 @@ import numpy as np
...
@@ -3,7 +3,7 @@ import numpy as np
from
op_test
import
OpTest
from
op_test
import
OpTest
class
TestSGD
(
OpTest
):
class
TestSGD
Op
(
OpTest
):
def
setUp
(
self
):
def
setUp
(
self
):
self
.
op_type
=
"sgd"
self
.
op_type
=
"sgd"
w
=
np
.
random
.
random
((
102
,
105
)).
astype
(
"float32"
)
w
=
np
.
random
.
random
((
102
,
105
)).
astype
(
"float32"
)
...
...
python/paddle/v2/framework/tests/test_sigmoid_op.py
浏览文件 @
384368f4
...
@@ -3,7 +3,7 @@ import numpy as np
...
@@ -3,7 +3,7 @@ import numpy as np
from
op_test
import
OpTest
from
op_test
import
OpTest
class
TestSigmoid
(
OpTest
):
class
TestSigmoid
Op
(
OpTest
):
def
setUp
(
self
):
def
setUp
(
self
):
self
.
op_type
=
"sigmoid"
self
.
op_type
=
"sigmoid"
self
.
inputs
=
{
self
.
inputs
=
{
...
...
python/paddle/v2/framework/tests/test_top_k_op.py
浏览文件 @
384368f4
...
@@ -21,6 +21,9 @@ class TestTopkOp(OpTest):
...
@@ -21,6 +21,9 @@ class TestTopkOp(OpTest):
self
.
outputs
=
{
'Out'
:
output
,
'Indices'
:
indices
}
self
.
outputs
=
{
'Out'
:
output
,
'Indices'
:
indices
}
def
test_check_output
(
self
):
self
.
check_output
()
class
TestTopkOp3d
(
OpTest
):
class
TestTopkOp3d
(
OpTest
):
def
setUp
(
self
):
def
setUp
(
self
):
...
@@ -42,6 +45,9 @@ class TestTopkOp3d(OpTest):
...
@@ -42,6 +45,9 @@ class TestTopkOp3d(OpTest):
self
.
outputs
=
{
'Out'
:
output
,
'Indices'
:
indices
}
self
.
outputs
=
{
'Out'
:
output
,
'Indices'
:
indices
}
def
test_check_output
(
self
):
self
.
check_output
()
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
unittest
.
main
()
unittest
.
main
()
python/paddle/v2/framework/tests/test_uniform_random_op.py
浏览文件 @
384368f4
...
@@ -4,7 +4,7 @@ import paddle.v2.framework.core as core
...
@@ -4,7 +4,7 @@ import paddle.v2.framework.core as core
import
numpy
import
numpy
class
UniformRandomTest
(
unittest
.
TestCase
):
class
TestUniformRandomOp
(
unittest
.
TestCase
):
def
test_uniform_random_cpu
(
self
):
def
test_uniform_random_cpu
(
self
):
self
.
uniform_random_test
(
place
=
core
.
CPUPlace
())
self
.
uniform_random_test
(
place
=
core
.
CPUPlace
())
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录