Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
abf9832c
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
abf9832c
编写于
9月 17, 2018
作者:
S
sneaxiy
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
tiny change to save memory
上级
f86198e6
变更
8
隐藏空白更改
内联
并排
Showing
8 changed file
with
103 addition
and
13 deletion
+103
-13
paddle/fluid/framework/grad_op_desc_maker.h
paddle/fluid/framework/grad_op_desc_maker.h
+3
-0
paddle/fluid/operators/elementwise_mul_op.cc
paddle/fluid/operators/elementwise_mul_op.cc
+38
-1
paddle/fluid/operators/elementwise_mul_op.h
paddle/fluid/operators/elementwise_mul_op.h
+2
-1
paddle/fluid/operators/matmul_op.cc
paddle/fluid/operators/matmul_op.cc
+7
-2
paddle/fluid/operators/mul_op.cc
paddle/fluid/operators/mul_op.cc
+19
-2
paddle/fluid/operators/scale_op.cc
paddle/fluid/operators/scale_op.cc
+2
-0
paddle/fluid/operators/scale_op.h
paddle/fluid/operators/scale_op.h
+17
-4
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+15
-3
未找到文件。
paddle/fluid/framework/grad_op_desc_maker.h
浏览文件 @
abf9832c
...
...
@@ -129,6 +129,9 @@ class GradOpDescMakerBase {
std
::
string
ForwardOpType
()
const
{
return
this
->
fwd_op_
.
Type
();
}
protected:
const
OpDesc
&
ForwardOp
()
const
{
return
fwd_op_
;
}
private:
const
OpDesc
&
fwd_op_
;
const
std
::
unordered_set
<
std
::
string
>&
no_grad_set_
;
...
...
paddle/fluid/operators/elementwise_mul_op.cc
浏览文件 @
abf9832c
...
...
@@ -13,9 +13,46 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/elementwise_mul_op.h"
#include <string>
#include "paddle/fluid/operators/elementwise_op.h"
namespace
paddle
{
namespace
operators
{
class
ElementwiseMulOpGradDescMaker
:
public
framework
::
SingleGradOpDescMaker
{
public:
using
framework
::
SingleGradOpDescMaker
::
SingleGradOpDescMaker
;
protected:
std
::
unique_ptr
<
framework
::
OpDesc
>
Apply
()
const
override
{
std
::
unique_ptr
<
framework
::
OpDesc
>
op
(
new
framework
::
OpDesc
());
op
->
SetType
(
"elementwise_mul_grad"
);
op
->
SetInput
(
"X"
,
Input
(
"X"
));
op
->
SetInput
(
"Y"
,
Input
(
"Y"
));
op
->
SetInput
(
framework
::
GradVarName
(
"Out"
),
OutputGrad
(
"Out"
));
op
->
SetAttrMap
(
Attrs
());
op
->
SetOutput
(
::
paddle
::
framework
::
GradVarName
(
"X"
),
InputGrad
(
"X"
));
op
->
SetOutput
(
::
paddle
::
framework
::
GradVarName
(
"Y"
),
InputGrad
(
"Y"
));
return
op
;
}
};
class
ElementwiseMulOpMaker
:
public
ElementwiseOpMaker
{
protected:
virtual
std
::
string
GetName
()
const
{
return
"Mul"
;
}
virtual
std
::
string
GetEquation
()
const
{
return
"Out = X
\\\\
odot Y"
;
}
};
}
// namespace operators
}
// namespace paddle
namespace
ops
=
paddle
::
operators
;
REGISTER_ELEMWISE_OP
(
elementwise_mul
,
"Mul"
,
"Out = X
\\\\
odot Y"
);
// REGISTER_ELEMWISE_OP(elementwise_mul, "Mul", "Out = X \\\\odot Y");
REGISTER_OPERATOR
(
elementwise_mul
,
ops
::
ElementwiseOp
,
ops
::
ElementwiseMulOpMaker
,
ops
::
ElementwiseOpInferVarType
,
ops
::
ElementwiseMulOpGradDescMaker
);
REGISTER_OPERATOR
(
elementwise_mul_grad
,
ops
::
ElementwiseOpGrad
);
REGISTER_OP_CPU_KERNEL
(
elementwise_mul
,
ops
::
ElementwiseMulKernel
<
paddle
::
platform
::
CPUDeviceContext
,
float
>
,
...
...
paddle/fluid/operators/elementwise_mul_op.h
浏览文件 @
abf9832c
...
...
@@ -57,8 +57,9 @@ class ElementwiseMulGradKernel : public framework::OpKernel<T> {
auto
*
x
=
ctx
.
Input
<
Tensor
>
(
"X"
);
auto
*
y
=
ctx
.
Input
<
Tensor
>
(
"Y"
);
auto
*
out
=
ctx
.
Input
<
Tensor
>
(
"Out"
);
//
auto* out = ctx.Input<Tensor>("Out");
auto
*
dout
=
ctx
.
Input
<
Tensor
>
(
framework
::
GradVarName
(
"Out"
));
auto
*
out
=
dout
;
// out is not necessary
auto
*
dx
=
ctx
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"X"
));
auto
*
dy
=
ctx
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"Y"
));
int
axis
=
ctx
.
Attr
<
int
>
(
"axis"
);
...
...
paddle/fluid/operators/matmul_op.cc
浏览文件 @
abf9832c
...
...
@@ -59,7 +59,9 @@ class MatMulKernel : public framework::OpKernel<T> {
RowMatrixFromVector
(
x
.
dims
()),
0
,
context
.
Attr
<
bool
>
(
"transpose_X"
));
auto
mat_dim_b
=
math
::
CreateMatrixDescriptor
(
ColumnMatrixFromVector
(
y
.
dims
()),
0
,
context
.
Attr
<
bool
>
(
"transpose_Y"
));
blas
.
MatMul
(
x
,
mat_dim_a
,
y
,
mat_dim_b
,
T
(
1
),
out
,
T
(
0
));
auto
scale
=
static_cast
<
T
>
(
context
.
Attr
<
float
>
(
"scale"
));
auto
bias
=
static_cast
<
T
>
(
context
.
Attr
<
float
>
(
"bias"
));
blas
.
MatMul
(
x
,
mat_dim_a
,
y
,
mat_dim_b
,
scale
,
out
,
bias
);
}
};
...
...
@@ -185,7 +187,8 @@ class MatMulGradKernel : public framework::OpKernel<T> {
auto
blas
=
math
::
GetBlas
<
DeviceContext
,
T
>
(
context
);
auto
mat_dim_a
=
math
::
CreateMatrixDescriptor
(
a
.
dims
(),
0
,
trans_a
);
auto
mat_dim_b
=
math
::
CreateMatrixDescriptor
(
b
.
dims
(),
0
,
trans_b
);
blas
.
MatMul
(
a
,
mat_dim_a
,
b
,
mat_dim_b
,
T
(
1
),
out
,
T
(
0
));
blas
.
MatMul
(
a
,
mat_dim_a
,
b
,
mat_dim_b
,
static_cast
<
T
>
(
context
.
Attr
<
float
>
(
"scale"
)),
out
,
T
(
0
));
}
void
CalcInputGrad
(
const
framework
::
ExecutionContext
&
context
,
...
...
@@ -334,6 +337,8 @@ class MatMulOpMaker : public framework::OpProtoAndCheckerMaker {
R"DOC(If true, use the transpose of `Y`.
)DOC"
)
.
SetDefault
(
false
);
AddAttr
<
float
>
(
"scale"
,
"Scale"
).
SetDefault
(
1.0
f
);
AddAttr
<
float
>
(
"bias"
,
"Bias"
).
SetDefault
(
0.0
f
);
AddComment
(
R"DOC(
MatMul Operator.
...
...
paddle/fluid/operators/mul_op.cc
浏览文件 @
abf9832c
...
...
@@ -156,12 +156,29 @@ class MulGradOp : public framework::OperatorWithKernel {
}
};
class
MulOpGradMaker
:
public
framework
::
SingleGradOpDescMaker
{
public:
using
framework
::
SingleGradOpDescMaker
::
SingleGradOpDescMaker
;
protected:
std
::
unique_ptr
<
framework
::
OpDesc
>
Apply
()
const
override
{
std
::
unique_ptr
<
framework
::
OpDesc
>
retv
(
new
framework
::
OpDesc
());
retv
->
SetType
(
"mul_grad"
);
retv
->
SetInput
(
"X"
,
Input
(
"X"
));
retv
->
SetInput
(
"Y"
,
Input
(
"Y"
));
retv
->
SetInput
(
framework
::
GradVarName
(
"Out"
),
OutputGrad
(
"Out"
));
retv
->
SetOutput
(
framework
::
GradVarName
(
"X"
),
InputGrad
(
"X"
));
retv
->
SetOutput
(
framework
::
GradVarName
(
"Y"
),
InputGrad
(
"Y"
));
retv
->
SetAttrMap
(
Attrs
());
return
retv
;
}
};
}
// namespace operators
}
// namespace paddle
namespace
ops
=
paddle
::
operators
;
REGISTER_OPERATOR
(
mul
,
ops
::
MulOp
,
ops
::
MulOpMaker
,
paddle
::
framework
::
DefaultGradOpDescMaker
<
true
>
);
REGISTER_OPERATOR
(
mul
,
ops
::
MulOp
,
ops
::
MulOpMaker
,
ops
::
MulOpGradMaker
);
REGISTER_OPERATOR
(
mul_grad
,
ops
::
MulGradOp
);
REGISTER_OP_CPU_KERNEL
(
mul
,
ops
::
MulKernel
<
paddle
::
platform
::
CPUDeviceContext
,
float
>
,
...
...
paddle/fluid/operators/scale_op.cc
浏览文件 @
abf9832c
...
...
@@ -49,6 +49,7 @@ $$Out = scale*X$$
)DOC"
);
AddAttr
<
float
>
(
"scale"
,
"The scaling factor of the scale operator."
)
.
SetDefault
(
1.0
);
AddAttr
<
float
>
(
"bias"
,
"The bias of the scale operator."
).
SetDefault
(
0.0
);
}
};
...
...
@@ -62,6 +63,7 @@ class ScaleGradMaker : public framework::SingleGradOpDescMaker {
grad_op
->
SetInput
(
"X"
,
OutputGrad
(
"Out"
));
grad_op
->
SetOutput
(
"Out"
,
InputGrad
(
"X"
));
grad_op
->
SetAttr
(
"scale"
,
GetAttr
(
"scale"
));
grad_op
->
SetAttr
(
"bias"
,
0.0
f
);
return
std
::
unique_ptr
<
framework
::
OpDesc
>
(
grad_op
);
}
};
...
...
paddle/fluid/operators/scale_op.h
浏览文件 @
abf9832c
...
...
@@ -29,11 +29,24 @@ class ScaleKernel : public framework::OpKernel<T> {
auto
scale
=
static_cast
<
T
>
(
context
.
Attr
<
float
>
(
"scale"
));
auto
eigen_out
=
framework
::
EigenVector
<
T
>::
Flatten
(
*
tensor
);
PADDLE_ENFORCE_EQ
(
in
->
dims
(),
out
->
dims
(),
"in and out should have the same dim"
);
auto
scale
=
static_cast
<
T
>
(
ctx
.
Attr
<
float
>
(
"scale"
));
auto
bias
=
static_cast
<
T
>
(
ctx
.
Attr
<
float
>
(
"bias"
));
if
(
in_var
->
IsType
<
framework
::
SelectedRows
>
()
&&
in_var
!=
out_var
)
{
auto
&
in_slr
=
in_var
->
Get
<
framework
::
SelectedRows
>
();
auto
*
out_slr
=
out_var
->
GetMutable
<
framework
::
SelectedRows
>
();
out_slr
->
set_rows
(
in_slr
.
rows
());
out_slr
->
set_height
(
in_slr
.
height
());
}
auto
eigen_out
=
framework
::
EigenVector
<
T
>::
Flatten
(
*
out
);
auto
eigen_in
=
framework
::
EigenVector
<
T
>::
Flatten
(
*
in
);
auto
&
dev
=
*
context
.
template
device_context
<
DeviceContext
>().
eigen_device
();
eigen_out
.
device
(
dev
)
=
scale
*
eigen_in
;
auto
&
dev
=
*
ctx
.
template
device_context
<
DeviceContext
>().
eigen_device
();
eigen_out
.
device
(
dev
)
=
static_cast
<
T
>
(
scale
)
*
eigen_in
+
static_cast
<
T
>
(
bias
)
;
}
};
...
...
python/paddle/fluid/layers/nn.py
浏览文件 @
abf9832c
...
...
@@ -3314,7 +3314,13 @@ def l2_normalize(x, axis, epsilon=1e-12, name=None):
return
out
def
matmul
(
x
,
y
,
transpose_x
=
False
,
transpose_y
=
False
,
name
=
None
):
def
matmul
(
x
,
y
,
transpose_x
=
False
,
transpose_y
=
False
,
scale
=
1.0
,
bias
=
0.0
,
name
=
None
):
"""
Applies matrix multiplication to two tensors.
...
...
@@ -3348,6 +3354,8 @@ def matmul(x, y, transpose_x=False, transpose_y=False, name=None):
y (Variable): The input variable which is a Tensor or LoDTensor.
transpose_x (bool): Whether to transpose :math:`x` before multiplication.
transpose_y (bool): Whether to transpose :math:`y` before multiplication.
scale (float): The scale of output. Default 1.0.
bias (float): The bias added to output. Default 0.0.
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
...
...
@@ -3415,8 +3423,12 @@ def matmul(x, y, transpose_x=False, transpose_y=False, name=None):
inputs
=
{
'X'
:
x
,
'Y'
:
y
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'transpose_X'
:
transpose_x
,
'transpose_Y'
:
transpose_y
})
attrs
=
{
'transpose_X'
:
transpose_x
,
'transpose_Y'
:
transpose_y
,
'scale'
:
scale
,
'bias'
:
bias
})
return
out
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录