Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
f2a66ffa
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
f2a66ffa
编写于
9月 06, 2017
作者:
F
fengjiayi
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Follow comments
上级
256d6a33
变更
11
显示空白变更内容
内联
并排
Showing
11 changed file
with
73 addition
and
69 deletion
+73
-69
paddle/framework/attribute.h
paddle/framework/attribute.h
+2
-2
paddle/framework/ddim.cc
paddle/framework/ddim.cc
+5
-3
paddle/framework/ddim.h
paddle/framework/ddim.h
+1
-1
paddle/framework/eigen.h
paddle/framework/eigen.h
+8
-8
paddle/framework/tensor_impl.h
paddle/framework/tensor_impl.h
+2
-2
paddle/framework/tensor_test.cc
paddle/framework/tensor_test.cc
+2
-2
paddle/operators/mul_op.cc
paddle/operators/mul_op.cc
+17
-17
paddle/operators/mul_op.h
paddle/operators/mul_op.h
+18
-18
paddle/operators/rowwise_add_op.cc
paddle/operators/rowwise_add_op.cc
+8
-8
paddle/operators/rowwise_add_op.h
paddle/operators/rowwise_add_op.h
+8
-6
python/paddle/v2/framework/tests/test_mul_op.py
python/paddle/v2/framework/tests/test_mul_op.py
+2
-2
未找到文件。
paddle/framework/attribute.h
浏览文件 @
f2a66ffa
...
...
@@ -44,7 +44,7 @@ class LargerThanChecker {
public:
explicit
LargerThanChecker
(
T
lower_bound
)
:
lower_bound_
(
lower_bound
)
{}
void
operator
()(
T
&
value
)
const
{
PADDLE_ENFORCE
(
value
>
lower_bound_
,
"larger_than check fail"
);
PADDLE_ENFORCE
(
value
>
lower_bound_
,
"larger_than check fail
s.
"
);
}
private:
...
...
@@ -56,7 +56,7 @@ class EqualLargerThanChecker {
public:
explicit
EqualLargerThanChecker
(
T
lower_bound
)
:
lower_bound_
(
lower_bound
)
{}
void
operator
()(
T
&
value
)
const
{
PADDLE_ENFORCE
(
value
>=
lower_bound_
,
"equal_larger_than check fail"
);
PADDLE_ENFORCE
(
value
>=
lower_bound_
,
"equal_larger_than check fail
s.
"
);
}
private:
...
...
paddle/framework/ddim.cc
浏览文件 @
f2a66ffa
...
...
@@ -284,11 +284,13 @@ DDim::DDim(std::initializer_list<int> init_list) {
*
this
=
make_ddim
(
init_list
);
}
DDim
flatten_to_2d
(
const
DDim
&
src
,
int
num_row_dims
)
{
// Reshape a tensor to a matrix. The matrix's first dimension(column length)
// will be the product of tensor's first `num_col_dims` dimensions
DDim
flatten_to_2d
(
const
DDim
&
src
,
int
num_col_dims
)
{
int
rank
=
src
.
size
();
return
make_ddim
(
{
static_cast
<
int
>
(
product
(
slice_ddim
(
src
,
0
,
rank
-
num_row
_dims
))),
static_cast
<
int
>
(
product
(
slice_ddim
(
src
,
rank
-
num_row
_dims
,
rank
)))});
{
static_cast
<
int
>
(
product
(
slice_ddim
(
src
,
0
,
num_col
_dims
))),
static_cast
<
int
>
(
product
(
slice_ddim
(
src
,
num_col
_dims
,
rank
)))});
}
DDim
flatten_to_1d
(
const
DDim
&
src
)
{
...
...
paddle/framework/ddim.h
浏览文件 @
f2a66ffa
...
...
@@ -115,7 +115,7 @@ int arity(const DDim& ddim);
std
::
ostream
&
operator
<<
(
std
::
ostream
&
,
const
DDim
&
);
DDim
flatten_to_2d
(
const
DDim
&
src
,
int
num_
row
_dims
);
DDim
flatten_to_2d
(
const
DDim
&
src
,
int
num_
col
_dims
);
DDim
flatten_to_1d
(
const
DDim
&
src
);
...
...
paddle/framework/eigen.h
浏览文件 @
f2a66ffa
...
...
@@ -64,21 +64,21 @@ struct EigenTensor {
template
<
typename
T
,
int
MajorType
=
Eigen
::
RowMajor
,
typename
IndexType
=
Eigen
::
DenseIndex
>
struct
EigenMatrix
:
public
EigenTensor
<
T
,
2
,
MajorType
,
IndexType
>
{
static
typename
EigenMatrix
::
Type
Reshape
(
Tensor
&
tensor
,
int
num_
row
_dims
)
{
static
typename
EigenMatrix
::
Type
Reshape
(
Tensor
&
tensor
,
int
num_
col
_dims
)
{
int
rank
=
tensor
.
dims_
.
size
();
PADDLE_ENFORCE
(
num_
row_dims
>
0
&&
num_row
_dims
<
rank
,
"`num_
row
_dims` must be between (0, rank_of_tensor)."
);
PADDLE_ENFORCE
(
num_
col_dims
>
0
&&
num_col
_dims
<
rank
,
"`num_
col
_dims` must be between (0, rank_of_tensor)."
);
return
EigenMatrix
::
From
(
tensor
,
flatten_to_2d
(
tensor
.
dims
(),
num_
row
_dims
));
flatten_to_2d
(
tensor
.
dims
(),
num_
col
_dims
));
}
static
typename
EigenMatrix
::
ConstType
Reshape
(
const
Tensor
&
tensor
,
int
num_
row
_dims
)
{
int
num_
col
_dims
)
{
int
rank
=
tensor
.
dims_
.
size
();
PADDLE_ENFORCE
(
num_
row_dims
>
0
&&
num_row
_dims
<
rank
,
"`num_
row
_dims` must be between (0, rank_of_tensor)."
);
PADDLE_ENFORCE
(
num_
col_dims
>
0
&&
num_col
_dims
<
rank
,
"`num_
col
_dims` must be between (0, rank_of_tensor)."
);
return
EigenMatrix
::
From
(
tensor
,
flatten_to_2d
(
tensor
.
dims
(),
num_
row
_dims
));
flatten_to_2d
(
tensor
.
dims
(),
num_
col
_dims
));
}
};
...
...
paddle/framework/tensor_impl.h
浏览文件 @
f2a66ffa
...
...
@@ -149,10 +149,10 @@ inline Tensor& Tensor::Resize(const DDim& dims) {
inline
const
DDim
&
Tensor
::
dims
()
const
{
return
dims_
;
}
template
<
typename
T
>
inline
Tensor
FlattenToMatrix
(
const
Tensor
&
src
,
int
num_row
_dims
)
{
inline
Tensor
ReshapeToMatrix
(
const
Tensor
&
src
,
int
num_col
_dims
)
{
Tensor
res
;
res
.
ShareDataWith
<
T
>
(
src
);
res
.
Resize
(
flatten_to_2d
(
src
.
dims
(),
num_
row
_dims
));
res
.
Resize
(
flatten_to_2d
(
src
.
dims
(),
num_
col
_dims
));
return
res
;
}
...
...
paddle/framework/tensor_test.cc
浏览文件 @
f2a66ffa
...
...
@@ -263,7 +263,7 @@ TEST(Tensor, CopyFrom) {
#endif
}
TEST
(
Tensor
,
Flatten
ToMatrix
)
{
TEST
(
Tensor
,
Reshape
ToMatrix
)
{
using
namespace
paddle
::
framework
;
using
namespace
paddle
::
platform
;
Tensor
src
;
...
...
@@ -271,7 +271,7 @@ TEST(Tensor, FlattenToMatrix) {
for
(
int
i
=
0
;
i
<
2
*
3
*
4
*
9
;
++
i
)
{
src_ptr
[
i
]
=
i
;
}
Tensor
res
=
Flatten
ToMatrix
<
int
>
(
src
,
2
);
Tensor
res
=
Reshape
ToMatrix
<
int
>
(
src
,
2
);
ASSERT_EQ
(
res
.
dims
()[
0
],
2
*
3
);
ASSERT_EQ
(
res
.
dims
()[
1
],
4
*
9
);
}
\ No newline at end of file
paddle/operators/mul_op.cc
浏览文件 @
f2a66ffa
...
...
@@ -27,20 +27,20 @@ class MulOp : public framework::OperatorWithKernel {
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
auto
x_dims
=
ctx
.
Input
<
Tensor
>
(
"X"
)
->
dims
();
auto
y_dims
=
ctx
.
Input
<
Tensor
>
(
"Y"
)
->
dims
();
int
x_num_
row_dims
=
GetAttr
<
int
>
(
"x_num_row
_dims"
);
int
y_num_
row_dims
=
GetAttr
<
int
>
(
"y_num_row
_dims"
);
int
x_num_
col_dims
=
GetAttr
<
int
>
(
"x_num_col
_dims"
);
int
y_num_
col_dims
=
GetAttr
<
int
>
(
"y_num_col
_dims"
);
PADDLE_ENFORCE
(
x_dims
.
size
()
>
x_num_
row
_dims
,
PADDLE_ENFORCE
(
x_dims
.
size
()
>
x_num_
col
_dims
,
"The rank of input tensor X(%s) should be larger than "
"`mul_op`'s `x_num_
row
_dims`."
,
"`mul_op`'s `x_num_
col
_dims`."
,
ctx
.
op
().
Input
(
"X"
));
PADDLE_ENFORCE
(
y_dims
.
size
()
>
y_num_
row
_dims
,
PADDLE_ENFORCE
(
y_dims
.
size
()
>
y_num_
col
_dims
,
"The rank of input tensor Y(%s) should be larger than "
"`mul_op`'s `y_num_
row
_dims`."
,
"`mul_op`'s `y_num_
col
_dims`."
,
ctx
.
op
().
Input
(
"Y"
));
auto
x_mat_dims
=
framework
::
flatten_to_2d
(
x_dims
,
x_num_
row
_dims
);
auto
y_mat_dims
=
framework
::
flatten_to_2d
(
y_dims
,
y_num_
row
_dims
);
auto
x_mat_dims
=
framework
::
flatten_to_2d
(
x_dims
,
x_num_
col
_dims
);
auto
y_mat_dims
=
framework
::
flatten_to_2d
(
y_dims
,
y_num_
col
_dims
);
PADDLE_ENFORCE_EQ
(
x_mat_dims
[
1
],
y_mat_dims
[
0
],
...
...
@@ -57,19 +57,19 @@ class MulOpMaker : public framework::OpProtoAndCheckerMaker {
AddInput
(
"Y"
,
"The second input of mul op"
);
AddOutput
(
"Out"
,
"The output of mul op"
);
AddAttr
<
int
>
(
"x_num_
row
_dims"
,
"x_num_
col
_dims"
,
"mul_op can take tensors with more than two dimensions as input `X`, "
"in that case, tensors will be
flatten
ed to a matrix. The matrix's "
"
second dimension(row
length) will be the product of tensor's last "
"`num_
row_dims` dimensions, and the matrix's first dimension(column
"
"length) will be the product of tensor's first `rank - num_
row
_dims` "
"in that case, tensors will be
reshap
ed to a matrix. The matrix's "
"
first dimension(column
length) will be the product of tensor's last "
"`num_
col_dims` dimensions, and the matrix's second dimension(row
"
"length) will be the product of tensor's first `rank - num_
col
_dims` "
"dimensions."
)
.
SetDefault
(
1
)
.
EqualLargerThan
(
1
);
AddAttr
<
int
>
(
"y_num_
row
_dims"
,
"y_num_
col
_dims"
,
"mul_op can take tensors with more than two dimensions as input `Y`, "
"in that case, tensors will be
flatten
ed to a matrix. Just like input "
"in that case, tensors will be
reshap
ed to a matrix. Just like input "
"`X`."
)
.
SetDefault
(
1
)
.
EqualLargerThan
(
1
);
...
...
@@ -98,9 +98,9 @@ class MulOpGrad : public framework::OperatorWithKernel {
auto
*
y_grad
=
ctx
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"Y"
));
auto
x_mat_dims
=
framework
::
flatten_to_2d
(
x_dims
,
GetAttr
<
int
>
(
"x_num_
row
_dims"
));
framework
::
flatten_to_2d
(
x_dims
,
GetAttr
<
int
>
(
"x_num_
col
_dims"
));
auto
y_mat_dims
=
framework
::
flatten_to_2d
(
y_dims
,
GetAttr
<
int
>
(
"y_num_
row
_dims"
));
framework
::
flatten_to_2d
(
y_dims
,
GetAttr
<
int
>
(
"y_num_
col
_dims"
));
PADDLE_ENFORCE_EQ
(
x_mat_dims
[
0
],
out_dims
[
0
],
...
...
paddle/operators/mul_op.h
浏览文件 @
f2a66ffa
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
y
ou may not use this file except in compliance with the License.
y
ou may obtain a copy of the License at
Y
ou may not use this file except in compliance with the License.
Y
ou may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF AN
y
KIND, either express or implied.
WITHOUT WARRANTIES OR CONDITIONS OF AN
Y
KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
...
...
@@ -33,22 +33,22 @@ class MulKernel : public framework::OpKernel {
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
const
Tensor
*
x
=
context
.
Input
<
Tensor
>
(
"X"
);
const
Tensor
*
y
=
context
.
Input
<
Tensor
>
(
"Y"
);
Tensor
*
Z
=
context
.
Output
<
Tensor
>
(
"Out"
);
Tensor
*
z
=
context
.
Output
<
Tensor
>
(
"Out"
);
const
Tensor
x_matrix
=
x
->
dims
().
size
()
>
2
?
framework
::
Flatten
ToMatrix
<
T
>
(
*
x
,
context
.
template
GetAttr
<
int
>(
"x_num_
row
_dims"
))
?
framework
::
Reshape
ToMatrix
<
T
>
(
*
x
,
context
.
template
GetAttr
<
int
>(
"x_num_
col
_dims"
))
:
*
x
;
const
Tensor
y_matrix
=
y
->
dims
().
size
()
>
2
?
framework
::
Flatten
ToMatrix
<
T
>
(
*
y
,
context
.
template
GetAttr
<
int
>(
"y_num_
row
_dims"
))
?
framework
::
Reshape
ToMatrix
<
T
>
(
*
y
,
context
.
template
GetAttr
<
int
>(
"y_num_
col
_dims"
))
:
*
y
;
Z
->
mutable_data
<
T
>
(
context
.
GetPlace
());
z
->
mutable_data
<
T
>
(
context
.
GetPlace
());
auto
*
device_context
=
const_cast
<
platform
::
DeviceContext
*>
(
context
.
device_context_
);
math
::
matmul
<
Place
,
T
>
(
x_matrix
,
false
,
y_matrix
,
false
,
1
,
Z
,
0
,
math
::
matmul
<
Place
,
T
>
(
x_matrix
,
false
,
y_matrix
,
false
,
1
,
z
,
0
,
device_context
);
}
};
...
...
@@ -57,15 +57,15 @@ template <typename Place, typename T>
class
MulGradKernel
:
public
framework
::
OpKernel
{
public:
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
int
x_num_
row_dims
=
ctx
.
template
GetAttr
<
int
>(
"x_num_row
_dims"
);
int
y_num_
row_dims
=
ctx
.
template
GetAttr
<
int
>(
"y_num_row
_dims"
);
int
x_num_
col_dims
=
ctx
.
template
GetAttr
<
int
>(
"x_num_col
_dims"
);
int
y_num_
col_dims
=
ctx
.
template
GetAttr
<
int
>(
"y_num_col
_dims"
);
const
Tensor
*
x
=
ctx
.
Input
<
Tensor
>
(
"X"
);
const
Tensor
*
y
=
ctx
.
Input
<
Tensor
>
(
"Y"
);
const
Tensor
x_matrix
=
x
->
dims
().
size
()
>
2
?
framework
::
FlattenToMatrix
<
T
>
(
*
x
,
x_num_row
_dims
)
x
->
dims
().
size
()
>
2
?
framework
::
ReshapeToMatrix
<
T
>
(
*
x
,
x_num_col
_dims
)
:
*
x
;
const
Tensor
y_matrix
=
y
->
dims
().
size
()
>
2
?
framework
::
FlattenToMatrix
<
T
>
(
*
y
,
y_num_row
_dims
)
y
->
dims
().
size
()
>
2
?
framework
::
ReshapeToMatrix
<
T
>
(
*
y
,
y_num_col
_dims
)
:
*
y
;
const
Tensor
*
dout
=
ctx
.
Input
<
Tensor
>
(
framework
::
GradVarName
(
"Out"
));
...
...
@@ -75,8 +75,8 @@ class MulGradKernel : public framework::OpKernel {
const_cast
<
platform
::
DeviceContext
*>
(
ctx
.
device_context_
);
if
(
dx
)
{
dx
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
Tensor
dx_matrix
=
dx
->
dims
().
size
()
>
2
?
framework
::
Flatten
ToMatrix
<
T
>
(
*
dx
,
x_num_
row
_dims
)
Tensor
dx_matrix
=
dx
->
dims
().
size
()
>
2
?
framework
::
Reshape
ToMatrix
<
T
>
(
*
dx
,
x_num_
col
_dims
)
:
*
dx
;
// dx = dout * y'. dx: M x K, dout : M x N, y : K x N
math
::
matmul
<
Place
,
T
>
(
*
dout
,
false
,
y_matrix
,
true
,
1
,
&
dx_matrix
,
0
,
...
...
@@ -84,8 +84,8 @@ class MulGradKernel : public framework::OpKernel {
}
if
(
dy
)
{
dy
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
Tensor
dy_matrix
=
dy
->
dims
().
size
()
>
2
?
framework
::
Flatten
ToMatrix
<
T
>
(
*
dy
,
y_num_
row
_dims
)
Tensor
dy_matrix
=
dy
->
dims
().
size
()
>
2
?
framework
::
Reshape
ToMatrix
<
T
>
(
*
dy
,
y_num_
col
_dims
)
:
*
dy
;
// dy = x' * dout. dy K x N, dout : M x N, x : M x K
math
::
matmul
<
Place
,
T
>
(
x_matrix
,
true
,
*
dout
,
false
,
1
,
&
dy_matrix
,
0
,
...
...
paddle/operators/rowwise_add_op.cc
浏览文件 @
f2a66ffa
...
...
@@ -31,11 +31,11 @@ class RowwiseAddOp : public framework::OperatorWithKernel {
x_dims
.
size
(),
b_dims
.
size
(),
"The rank of input `X` must be larger than the one of input `b`."
);
int
num_
row_dims
=
b_dims
.
size
();
int
num_
col_dims
=
x_dims
.
size
()
-
b_dims
.
size
();
PADDLE_ENFORCE_EQ
(
framework
::
slice_ddim
(
x_dims
,
x_dims
.
size
()
-
num_row_dims
,
x_dims
.
size
())
,
b_dims
,
"The width of two operands must be same"
);
PADDLE_ENFORCE_EQ
(
framework
::
slice_ddim
(
x_dims
,
num_col_dims
,
x_dims
.
size
()),
b_dims
,
"The width of two operands must be same"
);
PADDLE_ENFORCE_EQ
(
ctx
.
OutputSize
(
"Out"
),
1
,
"The output size must be 1"
);
ctx
.
Output
<
Tensor
>
(
"Out"
)
->
Resize
(
x_dims
);
}
...
...
@@ -72,10 +72,10 @@ class RowwiseAddGradOp : public framework::OperatorWithKernel {
x_dims
.
size
(),
b_dims
.
size
(),
"The rank of input `X` must be larger than the one of input `b`."
);
int
num_
row_dims
=
b_dims
.
size
();
PADDLE_ENFORCE_EQ
(
framework
::
slice_ddim
(
x_dims
,
x_dims
.
size
()
-
num_row_dims
,
x_dims
.
size
())
,
b_dims
,
"The width of two operands must be same"
);
int
num_
col_dims
=
x_dims
.
size
()
-
b_dims
.
size
();
PADDLE_ENFORCE_EQ
(
framework
::
slice_ddim
(
x_dims
,
num_col_dims
,
x_dims
.
size
()),
b_dims
,
"The width of two operands must be same"
);
auto
*
dx
=
ctx
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"X"
));
auto
*
db
=
ctx
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"b"
));
if
(
dx
)
dx
->
Resize
(
x_dims
);
...
...
paddle/operators/rowwise_add_op.h
浏览文件 @
f2a66ffa
...
...
@@ -33,11 +33,12 @@ class RowwiseAddKernel : public framework::OpKernel {
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
auto
out
=
context
.
Output
<
Tensor
>
(
"Out"
);
out
->
mutable_data
<
T
>
(
context
.
GetPlace
());
int
num_row_dims
=
context
.
Input
<
Tensor
>
(
"b"
)
->
dims
().
size
();
int
num_col_dims
=
context
.
Input
<
Tensor
>
(
"X"
)
->
dims
().
size
()
-
context
.
Input
<
Tensor
>
(
"b"
)
->
dims
().
size
();
auto
input
=
EigenMatrix
<
T
>::
Reshape
(
*
context
.
Input
<
Tensor
>
(
"X"
),
num_
row
_dims
);
EigenMatrix
<
T
>::
Reshape
(
*
context
.
Input
<
Tensor
>
(
"X"
),
num_
col
_dims
);
auto
bias
=
EigenVector
<
T
>::
Flatten
(
*
context
.
Input
<
Tensor
>
(
"b"
));
auto
output
=
EigenMatrix
<
T
>::
Reshape
(
*
out
,
num_
row
_dims
);
auto
output
=
EigenMatrix
<
T
>::
Reshape
(
*
out
,
num_
col
_dims
);
const
int
bias_size
=
bias
.
dimension
(
0
);
const
int
rest_size
=
input
.
size
()
/
bias_size
;
...
...
@@ -55,14 +56,15 @@ class RowwiseAddGradKernel : public framework::OpKernel {
auto
*
dout
=
context
.
Input
<
Tensor
>
(
framework
::
GradVarName
(
"Out"
));
auto
*
dx
=
context
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"X"
));
auto
*
db
=
context
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"b"
));
int
num_row_dims
=
context
.
Input
<
Tensor
>
(
"b"
)
->
dims
().
size
();
int
num_col_dims
=
context
.
Input
<
Tensor
>
(
"X"
)
->
dims
().
size
()
-
context
.
Input
<
Tensor
>
(
"b"
)
->
dims
().
size
();
auto
out_grad
=
EigenMatrix
<
T
>::
Reshape
(
*
dout
,
num_
row
_dims
);
auto
out_grad
=
EigenMatrix
<
T
>::
Reshape
(
*
dout
,
num_
col
_dims
);
auto
place
=
context
.
GetEigenDevice
<
Place
>
();
if
(
dx
)
{
dx
->
mutable_data
<
T
>
(
context
.
GetPlace
());
EigenMatrix
<
T
>::
Reshape
(
*
dx
,
num_
row
_dims
).
device
(
place
)
=
out_grad
;
EigenMatrix
<
T
>::
Reshape
(
*
dx
,
num_
col
_dims
).
device
(
place
)
=
out_grad
;
}
if
(
db
)
{
...
...
python/paddle/v2/framework/tests/test_mul_op.py
浏览文件 @
f2a66ffa
...
...
@@ -26,7 +26,7 @@ class TestMulOp2(unittest.TestCase):
'X'
:
np
.
random
.
random
((
15
,
4
,
12
,
10
)).
astype
(
"float32"
),
'Y'
:
np
.
random
.
random
((
4
,
30
,
8
,
2
,
9
)).
astype
(
"float32"
)
}
self
.
attrs
=
{
'x_num_
row_dims'
:
2
,
'y_num_row_dims'
:
3
}
self
.
attrs
=
{
'x_num_
col_dims'
:
2
,
'y_num_col_dims'
:
2
}
self
.
outputs
=
{
'Out'
:
np
.
dot
(
self
.
inputs
[
'X'
].
reshape
(
15
*
4
,
12
*
10
),
self
.
inputs
[
'Y'
].
reshape
(
4
*
30
,
8
*
2
*
9
))
...
...
@@ -69,7 +69,7 @@ class TestMulGradOp(GradientChecker):
class
TestMulGradTest2
(
GradientChecker
):
def
setUp
(
self
):
self
.
op
=
Operator
(
"mul"
,
X
=
"X"
,
Y
=
"Y"
,
Out
=
"Out"
,
x_num_
row_dims
=
2
,
y_num_row_dims
=
3
)
"mul"
,
X
=
"X"
,
Y
=
"Y"
,
Out
=
"Out"
,
x_num_
col_dims
=
2
,
y_num_col_dims
=
2
)
self
.
inputs
=
{
"X"
:
np
.
random
.
random
((
15
,
4
,
12
,
10
)).
astype
(
"float32"
),
"Y"
:
np
.
random
.
random
((
4
,
30
,
8
,
2
,
9
)).
astype
(
"float32"
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录