Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
69fbc542
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
69fbc542
编写于
9月 05, 2017
作者:
F
fengjiayi
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'develop' of
https://github.com/PaddlePaddle/Paddle
into dev_add_axis
上级
af0264aa
b64aac54
变更
17
隐藏空白更改
内联
并排
Showing
17 changed file
with
186 addition
and
94 deletion
+186
-94
doc/howto/dev/new_op_cn.md
doc/howto/dev/new_op_cn.md
+41
-13
paddle/framework/backward.cc
paddle/framework/backward.cc
+1
-1
paddle/framework/op_registry.h
paddle/framework/op_registry.h
+2
-0
paddle/gserver/layers/Conv3DLayer.cpp
paddle/gserver/layers/Conv3DLayer.cpp
+17
-6
paddle/gserver/layers/DeConv3DLayer.cpp
paddle/gserver/layers/DeConv3DLayer.cpp
+16
-6
paddle/operators/minus_op.cc
paddle/operators/minus_op.cc
+1
-1
paddle/operators/mul_op.cc
paddle/operators/mul_op.cc
+2
-2
paddle/operators/mul_op.h
paddle/operators/mul_op.h
+42
-38
paddle/operators/recurrent_op.cc
paddle/operators/recurrent_op.cc
+1
-1
paddle/operators/rowwise_add_op.cc
paddle/operators/rowwise_add_op.cc
+4
-2
paddle/operators/rowwise_add_op.h
paddle/operators/rowwise_add_op.h
+14
-10
paddle/pybind/pybind.cc
paddle/pybind/pybind.cc
+2
-2
python/paddle/trainer_config_helpers/tests/configs/protostr/test_sub_nested_seq_select_layer.protostr
...onfigs/protostr/test_sub_nested_seq_select_layer.protostr
+0
-0
python/paddle/v2/framework/op.py
python/paddle/v2/framework/op.py
+1
-1
python/paddle/v2/framework/tests/gradient_checker.py
python/paddle/v2/framework/tests/gradient_checker.py
+3
-1
python/paddle/v2/framework/tests/test_mul_op.py
python/paddle/v2/framework/tests/test_mul_op.py
+26
-5
python/paddle/v2/framework/tests/test_rowwise_add_op.py
python/paddle/v2/framework/tests/test_rowwise_add_op.py
+13
-5
未找到文件。
doc/howto/dev/new_op_cn.md
浏览文件 @
69fbc542
...
...
@@ -227,6 +227,12 @@ make mul_op
USE_CPU_ONLY_OP(gather);
```
如果OP不带Kernel,则使用`USE_NO_KENREL_OP`:
```
USE_NO_KENREL_OP(recurrent);
```
使用`USE_OP`告知编译器需要链接该Op的目标文件,具体解释参考[代码注释](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/op_registry.h#L81)。
...
...
@@ -280,28 +286,50 @@ class TestMulOp(unittest.TestCase):
反向Op单测继承自
`GradientChecker`
,而
`GradientChecker`
集成自
`unittest.TestCase`
,所以反向单测函数需要
`test_`
开头。
```
class MulGradOpTest
(GradientChecker):
def
test_mul
(self):
op = create_op("mul")
inputs = {
```
class TestMulGradOp
(GradientChecker):
def
setUp
(self):
self.
op = create_op("mul")
self.
inputs = {
'X': np.random.random((32, 84)).astype("float32"),
'Y': np.random.random((84, 100)).astype("float32")
}
self.compare_grad(op, inputs)
def test_cpu_gpu_compare(self):
self.compare_grad(self.op, self.inputs)
def test_normal(self):
# mul op will enlarge the relative error
self.check_grad(
op, inputs, set(["X", "Y"]), "Out", max_relative_error=0.5)
```
self.op, self.inputs, ["X", "Y"], "Out", max_relative_error=0.5)
def test_ignore_x(self):
self.check_grad(
self.op,
self.inputs, ["Y"],
"Out",
max_relative_error=0.5,
no_grad_set={"X"})
def test_ignore_y(self):
self.check_grad(
self.op,
self.inputs, ["X"],
"Out",
max_relative_error=0.5,
no_grad_set={"Y"})
```
下面解释一些关键的地方:
-
调用
`create_op("mul")`
创建反向Op对应的前向Op。
-
定义输入
`inputs`
。
-
调用
`compare_grad`
函数对比CPU、GPU计算结果。
-
调用
`check_grad`
检查梯度稳定性,这里采用数值法检测梯度正确性。
-
第一个参数
`
op`
: 前向o
p。
-
第二个参数
`inputs`
: 输入词典,词典的Key和
`ProtoMaker`
定义保持一致。
-
第三个参数
`
set(["X", "Y"])
`
: 指定对输入变量
`X`
、
`Y`
做梯度检测。
-
`test_normal`
中
调用
`check_grad`
检查梯度稳定性,这里采用数值法检测梯度正确性。
-
第一个参数
`
self.op`
: 前向O
p。
-
第二个参数
`
self.
inputs`
: 输入词典,词典的Key和
`ProtoMaker`
定义保持一致。
-
第三个参数
`
["X", "Y"]
`
: 指定对输入变量
`X`
、
`Y`
做梯度检测。
-
第四个参数
`"Out"`
: 指定前向网络最终的输出目标变量
`Out`
-
`test_ignore_x`
和
`test_ignore_y`
分支测试只需要计算一个输入梯度的情况。
### 编译和执行
...
...
paddle/framework/backward.cc
浏览文件 @
69fbc542
...
...
@@ -182,7 +182,7 @@ static std::unique_ptr<OperatorBase> BackwardRecursive(
});
// process recurrent gradient op as a special operator.
if
(
forwardOp
.
Type
()
==
"recurrent
_op
"
)
{
if
(
forwardOp
.
Type
()
==
"recurrent"
)
{
// NOTE clean up cycle call somewhere (RNN's stepnet constains itself), or
// this will result in infinite loop.
const
auto
&
rnnop
=
...
...
paddle/framework/op_registry.h
浏览文件 @
69fbc542
...
...
@@ -199,6 +199,8 @@ class OpKernelRegistrar : public Registrar {
USE_OP_DEVICE_KERNEL(op_type, GPU)
#endif
#define USE_NO_KERNEL_OP(op_type) USE_OP_ITSELF(op_type);
#define USE_CPU_ONLY_OP(op_type) \
USE_OP_ITSELF(op_type); \
USE_OP_DEVICE_KERNEL(op_type, CPU);
...
...
paddle/gserver/layers/Conv3DLayer.cpp
浏览文件 @
69fbc542
...
...
@@ -42,10 +42,10 @@ bool Conv3DLayer::init(const LayerMap &layerMap,
if
(
sharedBiases_
)
{
CHECK_EQ
((
size_t
)
numFilters_
,
biasParameter_
->
getSize
());
biases_
=
std
::
unique_ptr
<
Weight
>
(
new
Weight
(
1
,
numFilters_
,
biasParameter_
));
std
::
unique_ptr
<
Weight
>
(
new
Weight
(
numFilters_
,
1
,
biasParameter_
));
}
else
{
biases_
=
std
::
unique_ptr
<
Weight
>
(
new
Weight
(
1
,
getSize
()
,
biasParameter_
));
std
::
unique_ptr
<
Weight
>
(
new
Weight
(
getSize
(),
1
,
biasParameter_
));
}
}
return
true
;
...
...
@@ -224,20 +224,31 @@ void Conv3DLayer::bpropData(int i) {
}
void
Conv3DLayer
::
bpropBiases
()
{
MatrixPtr
biases
=
Matrix
::
create
(
biases_
->
getWGrad
()
->
getData
(),
1
,
biases_
->
getWGrad
()
->
getElementCnt
(),
false
,
useGpu_
);
MatrixPtr
outGradMat
=
getOutputGrad
();
if
(
this
->
sharedBiases_
)
{
biases
_
->
getWGrad
()
->
collectSharedBias
(
*
outGradMat
,
1.0
f
);
biases
->
collectSharedBias
(
*
outGradMat
,
1.0
f
);
}
else
{
biases
_
->
getWGrad
()
->
collectBias
(
*
outGradMat
,
1.0
f
);
biases
->
collectBias
(
*
outGradMat
,
1.0
f
);
}
}
void
Conv3DLayer
::
addBias
()
{
MatrixPtr
outMat
=
getOutputValue
();
MatrixPtr
bias
=
Matrix
::
create
(
biases_
->
getW
()
->
getData
(),
1
,
biases_
->
getW
()
->
getElementCnt
(),
false
,
useGpu_
);
if
(
this
->
sharedBiases_
)
{
outMat
->
addSharedBias
(
*
(
bias
es_
->
getW
()
),
1.0
f
);
outMat
->
addSharedBias
(
*
(
bias
),
1.0
f
);
}
else
{
outMat
->
addBias
(
*
(
bias
es_
->
getW
()
),
1.0
f
);
outMat
->
addBias
(
*
(
bias
),
1.0
f
);
}
}
...
...
paddle/gserver/layers/DeConv3DLayer.cpp
浏览文件 @
69fbc542
...
...
@@ -42,10 +42,10 @@ bool DeConv3DLayer::init(const LayerMap &layerMap,
if
(
sharedBiases_
)
{
CHECK_EQ
((
size_t
)
numFilters_
,
biasParameter_
->
getSize
());
biases_
=
std
::
unique_ptr
<
Weight
>
(
new
Weight
(
1
,
numFilters_
,
biasParameter_
));
std
::
unique_ptr
<
Weight
>
(
new
Weight
(
numFilters_
,
1
,
biasParameter_
));
}
else
{
biases_
=
std
::
unique_ptr
<
Weight
>
(
new
Weight
(
1
,
getSize
()
,
biasParameter_
));
std
::
unique_ptr
<
Weight
>
(
new
Weight
(
getSize
(),
1
,
biasParameter_
));
}
}
return
true
;
...
...
@@ -191,21 +191,31 @@ void DeConv3DLayer::bpropWeights(int i) {}
void
DeConv3DLayer
::
bpropData
(
int
i
)
{}
void
DeConv3DLayer
::
bpropBiases
()
{
MatrixPtr
biases
=
Matrix
::
create
(
biases_
->
getWGrad
()
->
getData
(),
1
,
biases_
->
getWGrad
()
->
getElementCnt
(),
false
,
useGpu_
);
const
MatrixPtr
&
outGradMat
=
getOutputGrad
();
if
(
this
->
sharedBiases_
)
{
biases
_
->
getWGrad
()
->
collectSharedBias
(
*
outGradMat
,
1.0
f
);
biases
->
collectSharedBias
(
*
outGradMat
,
1.0
f
);
}
else
{
biases
_
->
getWGrad
()
->
collectBias
(
*
outGradMat
,
1.0
f
);
biases
->
collectBias
(
*
outGradMat
,
1.0
f
);
}
}
void
DeConv3DLayer
::
addBias
()
{
MatrixPtr
outMat
=
getOutputValue
();
MatrixPtr
bias
=
Matrix
::
create
(
biases_
->
getW
()
->
getData
(),
1
,
biases_
->
getW
()
->
getElementCnt
(),
false
,
useGpu_
);
if
(
this
->
sharedBiases_
)
{
outMat
->
addSharedBias
(
*
(
bias
es_
->
getW
()
),
1.0
f
);
outMat
->
addSharedBias
(
*
(
bias
),
1.0
f
);
}
else
{
outMat
->
addBias
(
*
(
bias
es_
->
getW
()
),
1.0
f
);
outMat
->
addBias
(
*
(
bias
),
1.0
f
);
}
}
...
...
paddle/operators/minus_op.cc
浏览文件 @
69fbc542
...
...
@@ -79,7 +79,7 @@ class MinusGradOp : public NetOp {
}
// namespace paddle
USE_OP
(
scale
);
USE_
OP_ITSELF
(
identity
);
USE_
NO_KERNEL_OP
(
identity
);
namespace
ops
=
paddle
::
operators
;
REGISTER_OP
(
minus
,
ops
::
MinusOp
,
ops
::
MinusOpMaker
,
minus_grad
,
ops
::
MinusGradOp
<
float
>
);
...
...
paddle/operators/mul_op.cc
浏览文件 @
69fbc542
...
...
@@ -107,8 +107,8 @@ class MulOpGrad : public framework::OperatorWithKernel {
"The second dimension of Out@GRAD must equal to the second "
"dimension of the second operand."
);
x_grad
->
Resize
(
x_dims
);
y_grad
->
Resize
(
y_dims
);
if
(
x_grad
)
x_grad
->
Resize
(
x_dims
);
if
(
y_grad
)
y_grad
->
Resize
(
y_dims
);
}
};
...
...
paddle/operators/mul_op.h
浏览文件 @
69fbc542
...
...
@@ -2,13 +2,13 @@
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
Y
ou may obtain a copy of the License at
y
ou may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF AN
Y
KIND, either express or implied.
WITHOUT WARRANTIES OR CONDITIONS OF AN
y
KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
...
...
@@ -31,24 +31,24 @@ template <typename Place, typename T>
class
MulKernel
:
public
framework
::
OpKernel
{
public:
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
const
Tensor
*
X
=
context
.
Input
<
Tensor
>
(
"X"
);
const
Tensor
*
Y
=
context
.
Input
<
Tensor
>
(
"Y"
);
const
Tensor
*
x
=
context
.
Input
<
Tensor
>
(
"X"
);
const
Tensor
*
y
=
context
.
Input
<
Tensor
>
(
"Y"
);
Tensor
*
Z
=
context
.
Output
<
Tensor
>
(
"Out"
);
const
Tensor
X
_matrix
=
X
->
dims
().
size
()
>
2
const
Tensor
x
_matrix
=
x
->
dims
().
size
()
>
2
?
framework
::
FlattenToMatrix
<
T
>
(
*
X
,
context
.
template
GetAttr
<
int
>(
"x_num_row_dims"
))
:
*
X
;
const
Tensor
Y
_matrix
=
Y
->
dims
().
size
()
>
2
*
x
,
context
.
template
GetAttr
<
int
>(
"x_num_row_dims"
))
:
*
x
;
const
Tensor
y
_matrix
=
y
->
dims
().
size
()
>
2
?
framework
::
FlattenToMatrix
<
T
>
(
*
Y
,
context
.
template
GetAttr
<
int
>(
"y_num_row_dims"
))
:
*
Y
;
*
y
,
context
.
template
GetAttr
<
int
>(
"y_num_row_dims"
))
:
*
y
;
Z
->
mutable_data
<
T
>
(
context
.
GetPlace
());
auto
*
device_context
=
const_cast
<
platform
::
DeviceContext
*>
(
context
.
device_context_
);
math
::
matmul
<
Place
,
T
>
(
X_matrix
,
false
,
Y
_matrix
,
false
,
1
,
Z
,
0
,
math
::
matmul
<
Place
,
T
>
(
x_matrix
,
false
,
y
_matrix
,
false
,
1
,
Z
,
0
,
device_context
);
}
};
...
...
@@ -59,34 +59,38 @@ class MulGradKernel : public framework::OpKernel {
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
int
x_num_row_dims
=
ctx
.
template
GetAttr
<
int
>(
"x_num_row_dims"
);
int
y_num_row_dims
=
ctx
.
template
GetAttr
<
int
>(
"y_num_row_dims"
);
const
Tensor
*
X
=
ctx
.
Input
<
Tensor
>
(
"X"
);
const
Tensor
*
Y
=
ctx
.
Input
<
Tensor
>
(
"Y"
);
const
Tensor
X
_matrix
=
X
->
dims
().
size
()
>
2
?
framework
::
FlattenToMatrix
<
T
>
(
*
X
,
x_num_row_dims
)
:
*
X
;
const
Tensor
Y
_matrix
=
Y
->
dims
().
size
()
>
2
?
framework
::
FlattenToMatrix
<
T
>
(
*
Y
,
y_num_row_dims
)
:
*
Y
;
const
Tensor
*
d
O
ut
=
ctx
.
Input
<
Tensor
>
(
framework
::
GradVarName
(
"Out"
));
const
Tensor
*
x
=
ctx
.
Input
<
Tensor
>
(
"X"
);
const
Tensor
*
y
=
ctx
.
Input
<
Tensor
>
(
"Y"
);
const
Tensor
x
_matrix
=
x
->
dims
().
size
()
>
2
?
framework
::
FlattenToMatrix
<
T
>
(
*
x
,
x_num_row_dims
)
:
*
x
;
const
Tensor
y
_matrix
=
y
->
dims
().
size
()
>
2
?
framework
::
FlattenToMatrix
<
T
>
(
*
y
,
y_num_row_dims
)
:
*
y
;
const
Tensor
*
d
o
ut
=
ctx
.
Input
<
Tensor
>
(
framework
::
GradVarName
(
"Out"
));
Tensor
*
dX
=
ctx
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"X"
));
Tensor
*
dY
=
ctx
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"Y"
));
dX
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
dY
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
Tensor
dX_matrix
=
dX
->
dims
().
size
()
>
2
?
framework
::
FlattenToMatrix
<
T
>
(
*
dX
,
x_num_row_dims
)
:
*
dX
;
Tensor
dY_matrix
=
dY
->
dims
().
size
()
>
2
?
framework
::
FlattenToMatrix
<
T
>
(
*
dY
,
y_num_row_dims
)
:
*
dY
;
Tensor
*
dx
=
ctx
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"X"
));
Tensor
*
dy
=
ctx
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"Y"
));
auto
*
device_context
=
const_cast
<
platform
::
DeviceContext
*>
(
ctx
.
device_context_
);
// dX = dOut * Y'. dX: M x K, dOut : M x N, Y : K x N
math
::
matmul
<
Place
,
T
>
(
*
dOut
,
false
,
Y_matrix
,
true
,
1
,
&
dX_matrix
,
0
,
device_context
);
// dY = X' * dOut. dY: K x N, dOut : M x N, X : M x K
math
::
matmul
<
Place
,
T
>
(
X_matrix
,
true
,
*
dOut
,
false
,
1
,
&
dY_matrix
,
0
,
device_context
);
if
(
dx
)
{
dx
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
Tensor
dx_matrix
=
dx
->
dims
().
size
()
>
2
?
framework
::
FlattenToMatrix
<
T
>
(
*
dx
,
x_num_row_dims
)
:
*
dx
;
// dx = dout * y'. dx: M x K, dout : M x N, y : K x N
math
::
matmul
<
Place
,
T
>
(
*
dout
,
false
,
y_matrix
,
true
,
1
,
&
dx_matrix
,
0
,
device_context
);
}
if
(
dy
)
{
dy
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
Tensor
dy_matrix
=
dy
->
dims
().
size
()
>
2
?
framework
::
FlattenToMatrix
<
T
>
(
*
dy
,
y_num_row_dims
)
:
*
dy
;
// dy = x' * dout. dy K x N, dout : M x N, x : M x K
math
::
matmul
<
Place
,
T
>
(
x_matrix
,
true
,
*
dout
,
false
,
1
,
&
dy_matrix
,
0
,
device_context
);
}
}
};
...
...
paddle/operators/recurrent_op.cc
浏览文件 @
69fbc542
...
...
@@ -235,5 +235,5 @@ RecurrentGradientOp::RecurrentGradientOp(
}
// namespace paddle
REGISTER_OP_WITHOUT_GRADIENT
(
recurrent
_op
,
paddle
::
operators
::
RecurrentOp
,
recurrent
,
paddle
::
operators
::
RecurrentOp
,
paddle
::
operators
::
RecurrentAlgorithmProtoAndCheckerMaker
);
paddle/operators/rowwise_add_op.cc
浏览文件 @
69fbc542
...
...
@@ -64,8 +64,10 @@ class RowwiseAddGradOp : public framework::OperatorWithKernel {
auto
dims0
=
ctx
.
Input
<
Tensor
>
(
"X"
)
->
dims
();
auto
dims1
=
ctx
.
Input
<
Tensor
>
(
"b"
)
->
dims
();
PADDLE_ENFORCE_EQ
(
1
,
dims1
.
size
(),
"b dims should be 1"
)
ctx
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"X"
))
->
Resize
(
dims0
);
ctx
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"b"
))
->
Resize
(
dims1
);
auto
*
dx
=
ctx
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"X"
));
auto
*
db
=
ctx
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"b"
));
if
(
dx
)
dx
->
Resize
(
dims0
);
if
(
db
)
db
->
Resize
(
dims1
);
}
};
...
...
paddle/operators/rowwise_add_op.h
浏览文件 @
69fbc542
...
...
@@ -51,20 +51,24 @@ template <typename Place, typename T>
class
RowwiseAddGradKernel
:
public
framework
::
OpKernel
{
public:
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
auto
*
d
O
ut
=
context
.
Input
<
Tensor
>
(
framework
::
GradVarName
(
"Out"
));
auto
*
d
X
=
context
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"X"
));
auto
*
d
o
ut
=
context
.
Input
<
Tensor
>
(
framework
::
GradVarName
(
"Out"
));
auto
*
d
x
=
context
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"X"
));
auto
*
db
=
context
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"b"
));
dX
->
mutable_data
<
T
>
(
context
.
GetPlace
());
db
->
mutable_data
<
T
>
(
context
.
GetPlace
());
auto
OutGrad
=
EigenMatrix
<
T
>::
From
(
*
dO
ut
);
auto
out_grad
=
EigenMatrix
<
T
>::
From
(
*
do
ut
);
auto
place
=
context
.
GetEigenDevice
<
Place
>
();
EigenMatrix
<
T
>::
From
(
*
dX
).
device
(
place
)
=
OutGrad
;
if
(
dx
)
{
dx
->
mutable_data
<
T
>
(
context
.
GetPlace
());
EigenMatrix
<
T
>::
From
(
*
dx
).
device
(
place
)
=
out_grad
;
}
// https://eigen.tuxfamily.org/dox/unsupported/TensorBase_8h_source.html
// colwise add
Eigen
::
array
<
int
,
1
>
dims
{{
0
}};
/* dimension to reduce */
EigenVector
<
T
>::
Flatten
(
*
db
).
device
(
place
)
=
OutGrad
.
sum
(
dims
);
if
(
db
)
{
db
->
mutable_data
<
T
>
(
context
.
GetPlace
());
// https://eigen.tuxfamily.org/dox/unsupported/TensorBase_8h_source.html
// colwise add
Eigen
::
array
<
int
,
1
>
dims
{{
0
}};
/* dimension to reduce */
EigenVector
<
T
>::
Flatten
(
*
db
).
device
(
place
)
=
out_grad
.
sum
(
dims
);
}
}
};
}
// namespace operators
...
...
paddle/pybind/pybind.cc
浏览文件 @
69fbc542
...
...
@@ -39,12 +39,12 @@ USE_OP(sigmoid);
USE_OP
(
softmax
);
USE_OP
(
rowwise_add
);
USE_OP
(
fill_zeros_like
);
USE_
OP_ITSELF
(
recurrent_op
);
USE_
NO_KERNEL_OP
(
recurrent
);
USE_OP
(
gaussian_random
);
USE_OP
(
uniform_random
);
USE_OP
(
lookup_table
);
USE_OP
(
scale
);
USE_
OP_ITSELF
(
identity
);
USE_
NO_KERNEL_OP
(
identity
);
USE_OP
(
minus
);
USE_CPU_ONLY_OP
(
gather
);
USE_CPU_ONLY_OP
(
scatter
);
...
...
python/paddle/trainer_config_helpers/tests/configs/protostr/test_s
eq_select_layers
.protostr
→
python/paddle/trainer_config_helpers/tests/configs/protostr/test_s
ub_nested_seq_select_layer
.protostr
浏览文件 @
69fbc542
文件已移动
python/paddle/v2/framework/op.py
浏览文件 @
69fbc542
...
...
@@ -179,7 +179,7 @@ class OperatorFactory(object):
class
__RecurrentOp__
(
object
):
__proto__
=
None
type
=
'recurrent
_op
'
type
=
'recurrent'
def
__init__
(
self
):
# cache recurrent_op's proto
...
...
python/paddle/v2/framework/tests/gradient_checker.py
浏览文件 @
69fbc542
...
...
@@ -286,6 +286,9 @@ class GradientChecker(unittest.TestCase):
for
no_grad
in
no_grad_set
:
if
no_grad
not
in
in_names
:
raise
ValueError
(
"no_grad should be in in_names"
)
if
no_grad
in
inputs_to_check
:
raise
ValueError
(
"no_grad should not be in inputs_to_check"
)
backward_op
=
core
.
Operator
.
backward
(
forward_op
,
no_grad_set
)
places
=
[
core
.
CPUPlace
()]
...
...
@@ -301,7 +304,6 @@ class GradientChecker(unittest.TestCase):
check_names
=
[
grad_var_name
(
name
)
for
name
in
inputs_to_check
]
for
place
in
places
:
# get analytical gradients according to different device
analytic_grads
=
self
.
__get_gradient
(
forward_op
,
backward_op
,
input_vars
,
check_names
,
place
)
self
.
__assert_is_close
(
numeric_grads
,
analytic_grads
,
check_names
,
...
...
python/paddle/v2/framework/tests/test_mul_op.py
浏览文件 @
69fbc542
...
...
@@ -16,16 +16,37 @@ class TestMulOp(unittest.TestCase):
self
.
outputs
=
{
'Out'
:
np
.
dot
(
self
.
inputs
[
'X'
],
self
.
inputs
[
'Y'
])}
class
MulGradOpTest
(
GradientChecker
):
def
test_mul
(
self
):
op
=
create_op
(
"mul"
)
inputs
=
{
class
TestMulGradOp
(
GradientChecker
):
def
setUp
(
self
):
self
.
op
=
create_op
(
"mul"
)
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
32
,
84
)).
astype
(
"float32"
),
'Y'
:
np
.
random
.
random
((
84
,
100
)).
astype
(
"float32"
)
}
def
test_cpu_gpu_compare
(
self
):
self
.
compare_grad
(
self
.
op
,
self
.
inputs
)
def
test_normal
(
self
):
# mul op will enlarge the relative error
self
.
check_grad
(
op
,
inputs
,
set
([
"X"
,
"Y"
]),
"Out"
,
max_relative_error
=
0.5
)
self
.
op
,
self
.
inputs
,
[
"X"
,
"Y"
],
"Out"
,
max_relative_error
=
0.5
)
def
test_ignore_x
(
self
):
self
.
check_grad
(
self
.
op
,
self
.
inputs
,
[
"Y"
],
"Out"
,
max_relative_error
=
0.5
,
no_grad_set
=
{
"X"
})
def
test_ignore_y
(
self
):
self
.
check_grad
(
self
.
op
,
self
.
inputs
,
[
"X"
],
"Out"
,
max_relative_error
=
0.5
,
no_grad_set
=
{
"Y"
})
# TODO(dzh,qijun) : mulgrad test case need transpose feature of blas library
...
...
python/paddle/v2/framework/tests/test_rowwise_add_op.py
浏览文件 @
69fbc542
...
...
@@ -16,14 +16,22 @@ class TestRowwiseAddOp(unittest.TestCase):
self
.
outputs
=
{
'Out'
:
np
.
add
(
self
.
inputs
[
'X'
],
self
.
inputs
[
'b'
])}
class
RowwiseAddGradOpTest
(
GradientChecker
):
def
test_rowwise_add
(
self
):
op
=
create_op
(
"rowwise_add"
)
inputs
=
{
class
TestRowwiseAddGradOp
(
GradientChecker
):
def
setUp
(
self
):
self
.
op
=
create_op
(
"rowwise_add"
)
self
.
inputs
=
{
"X"
:
np
.
random
.
uniform
(
0.1
,
1
,
[
5
,
10
]).
astype
(
"float32"
),
"b"
:
np
.
random
.
uniform
(
0.1
,
1
,
[
10
]).
astype
(
"float32"
)
}
self
.
check_grad
(
op
,
inputs
,
set
([
"X"
,
"b"
]),
"Out"
)
def
test_normal
(
self
):
self
.
check_grad
(
self
.
op
,
self
.
inputs
,
[
"X"
,
"b"
],
"Out"
)
def
test_ignore_b
(
self
):
self
.
check_grad
(
self
.
op
,
self
.
inputs
,
[
"X"
],
"Out"
,
no_grad_set
=
{
"b"
})
def
test_ignore_x
(
self
):
self
.
check_grad
(
self
.
op
,
self
.
inputs
,
[
"b"
],
"Out"
,
no_grad_set
=
{
"X"
})
if
__name__
==
'__main__'
:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录