Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
5a933b44
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
5a933b44
编写于
3月 20, 2017
作者:
Q
qingqing01
提交者:
GitHub
3月 20, 2017
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #1592 from Noplz/smooth_l1_layer
Smooth l1 layer for SSD
上级
24b00ac6
b3313f2b
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
150 addition
and
0 deletion
+150
-0
paddle/gserver/layers/CostLayer.cpp
paddle/gserver/layers/CostLayer.cpp
+53
-0
paddle/gserver/layers/CostLayer.h
paddle/gserver/layers/CostLayer.h
+23
-0
paddle/gserver/tests/test_LayerGrad.cpp
paddle/gserver/tests/test_LayerGrad.cpp
+14
-0
paddle/math/Matrix.cpp
paddle/math/Matrix.cpp
+49
-0
paddle/math/Matrix.h
paddle/math/Matrix.h
+11
-0
未找到文件。
paddle/gserver/layers/CostLayer.cpp
浏览文件 @
5a933b44
...
@@ -192,6 +192,59 @@ void SumOfSquaresCostLayer::backwardImp(Matrix& output,
...
@@ -192,6 +192,59 @@ void SumOfSquaresCostLayer::backwardImp(Matrix& output,
outputG
.
sumOfSquaresBp
(
output
,
*
label
.
value
);
outputG
.
sumOfSquaresBp
(
output
,
*
label
.
value
);
}
}
//
// class SmoothL1CostLayer
//
REGISTER_LAYER
(
smooth_l1
,
SmoothL1CostLayer
);
bool
SmoothL1CostLayer
::
init
(
const
LayerMap
&
layerMap
,
const
ParameterMap
&
parameterMap
)
{
return
CostLayer
::
init
(
layerMap
,
parameterMap
);
}
void
SmoothL1CostLayer
::
forwardImp
(
Matrix
&
output
,
Argument
&
label
,
Matrix
&
target
)
{
MatrixPtr
targetCpu
,
outputCpu
,
labelCpu
;
if
(
useGpu_
)
{
targetCpu
=
Matrix
::
create
(
target
.
getHeight
(),
target
.
getWidth
(),
false
,
false
);
outputCpu
=
Matrix
::
create
(
output
.
getHeight
(),
output
.
getWidth
(),
false
,
false
);
labelCpu
=
Matrix
::
create
(
label
.
value
->
getHeight
(),
label
.
value
->
getWidth
(),
false
,
false
);
targetCpu
->
copyFrom
(
target
);
outputCpu
->
copyFrom
(
output
);
labelCpu
->
copyFrom
(
*
label
.
value
);
targetCpu
->
smoothL1
(
*
outputCpu
,
*
(
labelCpu
));
target
.
copyFrom
(
*
targetCpu
);
}
else
{
target
.
smoothL1
(
output
,
*
label
.
value
);
}
}
void
SmoothL1CostLayer
::
backwardImp
(
Matrix
&
output
,
Argument
&
label
,
Matrix
&
outputG
)
{
MatrixPtr
outputGCpu
,
outputCpu
,
labelCpu
;
if
(
useGpu_
)
{
outputGCpu
=
Matrix
::
create
(
outputG
.
getHeight
(),
outputG
.
getWidth
(),
false
,
false
);
outputCpu
=
Matrix
::
create
(
output
.
getHeight
(),
output
.
getWidth
(),
false
,
false
);
labelCpu
=
Matrix
::
create
(
label
.
value
->
getHeight
(),
label
.
value
->
getWidth
(),
false
,
false
);
outputGCpu
->
copyFrom
(
outputG
);
outputCpu
->
copyFrom
(
output
);
labelCpu
->
copyFrom
(
*
label
.
value
);
outputGCpu
->
smoothL1Bp
(
*
outputCpu
,
*
labelCpu
);
outputG
.
copyFrom
(
*
outputGCpu
);
}
else
{
outputG
.
smoothL1Bp
(
output
,
*
label
.
value
);
}
}
//
//
// class RankingCost
// class RankingCost
//
//
...
...
paddle/gserver/layers/CostLayer.h
浏览文件 @
5a933b44
...
@@ -159,6 +159,29 @@ public:
...
@@ -159,6 +159,29 @@ public:
Matrix
&
outputGrad
)
override
;
Matrix
&
outputGrad
)
override
;
};
};
/**
* This cost layer compute smooth L1 loss for real-valued regression
* tasks.
* \f[
* L =
* (output - label)^2 * 0.5 / -1 < (output - label) < 1 /
* (output - label) - 0.5 / otherwise /
* \f]
*/
class
SmoothL1CostLayer
:
public
CostLayer
{
public:
explicit
SmoothL1CostLayer
(
const
LayerConfig
&
config
)
:
CostLayer
(
config
)
{}
bool
init
(
const
LayerMap
&
layerMap
,
const
ParameterMap
&
parameterMap
)
override
;
void
forwardImp
(
Matrix
&
output
,
Argument
&
label
,
Matrix
&
cost
)
override
;
void
backwardImp
(
Matrix
&
outputValue
,
Argument
&
label
,
Matrix
&
outputGrad
)
override
;
};
/**
/**
* A cost layer for learning to rank (LTR) task. This layer contains at leat
* A cost layer for learning to rank (LTR) task. This layer contains at leat
* three inputs.
* three inputs.
...
...
paddle/gserver/tests/test_LayerGrad.cpp
浏览文件 @
5a933b44
...
@@ -1602,6 +1602,20 @@ TEST(Layer, PadLayer) {
...
@@ -1602,6 +1602,20 @@ TEST(Layer, PadLayer) {
}
}
}
}
TEST
(
Layer
,
smooth_l1
)
{
TestConfig
config
;
config
.
layerConfig
.
set_type
(
"smooth_l1"
);
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
1
,
0
});
config
.
inputDefs
.
push_back
({
INPUT_DATA_TARGET
,
"layer_1"
,
1
,
0
});
config
.
layerConfig
.
add_inputs
();
config
.
layerConfig
.
add_inputs
();
for
(
auto
useGpu
:
{
false
,
true
})
{
testLayerGrad
(
config
,
"smooth_l1"
,
100
,
false
,
useGpu
,
false
,
2.0
);
}
}
int
main
(
int
argc
,
char
**
argv
)
{
int
main
(
int
argc
,
char
**
argv
)
{
testing
::
InitGoogleTest
(
&
argc
,
argv
);
testing
::
InitGoogleTest
(
&
argc
,
argv
);
initMain
(
argc
,
argv
);
initMain
(
argc
,
argv
);
...
...
paddle/math/Matrix.cpp
浏览文件 @
5a933b44
...
@@ -3590,6 +3590,55 @@ void CpuMatrix::sumOfSquaresBp(Matrix& output, Matrix& label) {
...
@@ -3590,6 +3590,55 @@ void CpuMatrix::sumOfSquaresBp(Matrix& output, Matrix& label) {
}
}
}
}
void
CpuMatrix
::
smoothL1
(
Matrix
&
output
,
Matrix
&
label
)
{
CHECK
(
output
.
useGpu_
==
false
&&
label
.
useGpu_
==
false
)
<<
"Matrix type are not equal"
;
size_t
numSamples
=
getHeight
();
size_t
dim
=
output
.
getWidth
();
CHECK_EQ
(
label
.
getHeight
(),
numSamples
);
CHECK_EQ
(
output
.
getHeight
(),
numSamples
);
CHECK_EQ
(
label
.
getWidth
(),
dim
);
CHECK_EQ
(
getWidth
(),
(
size_t
)
1
);
real
*
out
=
output
.
getData
();
real
*
cost
=
getData
();
real
*
lbl
=
label
.
getData
();
for
(
size_t
i
=
0
;
i
<
numSamples
;
++
i
,
out
+=
dim
,
cost
+=
dim
,
lbl
+=
dim
)
{
for
(
size_t
j
=
0
;
j
<
dim
;
++
j
)
{
cost
[
j
]
=
std
::
fabs
(
out
[
j
]
-
lbl
[
j
]);
if
(
cost
[
j
]
<
1.0
)
cost
[
j
]
=
0.5
*
cost
[
j
]
*
cost
[
j
];
else
cost
[
j
]
=
cost
[
j
]
-
0.5
;
}
}
}
void
CpuMatrix
::
smoothL1Bp
(
Matrix
&
output
,
Matrix
&
label
)
{
CHECK
(
output
.
useGpu_
==
false
&&
label
.
useGpu_
==
false
)
<<
"Matrix type are not equal"
;
size_t
numSamples
=
getHeight
();
size_t
dim
=
output
.
getWidth
();
CHECK_EQ
(
label
.
getHeight
(),
numSamples
);
CHECK_EQ
(
output
.
getHeight
(),
numSamples
);
CHECK_EQ
(
label
.
getWidth
(),
dim
);
CHECK_EQ
(
getWidth
(),
(
size_t
)
1
);
real
*
out
=
output
.
getData
();
real
*
cost
=
getData
();
real
*
lbl
=
label
.
getData
();
// f'(x) = x if |x| < 1
// = sign(x) otherwise
for
(
size_t
i
=
0
;
i
<
numSamples
;
++
i
,
out
+=
dim
,
cost
+=
dim
,
lbl
+=
dim
)
{
for
(
size_t
j
=
0
;
j
<
dim
;
++
j
)
{
cost
[
j
]
=
out
[
j
]
-
lbl
[
j
];
if
(
std
::
fabs
(
cost
[
j
])
>=
1
)
cost
[
j
]
=
(
0
<
cost
[
j
])
-
(
cost
[
j
]
<
0
);
}
}
}
void
CpuMatrix
::
tanh
(
Matrix
&
output
)
{
void
CpuMatrix
::
tanh
(
Matrix
&
output
)
{
CHECK
(
isContiguous
());
CHECK
(
isContiguous
());
CHECK
(
output
.
isContiguous
());
CHECK
(
output
.
isContiguous
());
...
...
paddle/math/Matrix.h
浏览文件 @
5a933b44
...
@@ -783,6 +783,14 @@ public:
...
@@ -783,6 +783,14 @@ public:
LOG
(
FATAL
)
<<
"Not implemented"
;
LOG
(
FATAL
)
<<
"Not implemented"
;
}
}
virtual
void
smoothL1
(
Matrix
&
output
,
Matrix
&
label
)
{
LOG
(
FATAL
)
<<
"Not implemented"
;
}
virtual
void
smoothL1Bp
(
Matrix
&
outputV
,
Matrix
&
label
)
{
LOG
(
FATAL
)
<<
"Not implemented"
;
}
virtual
void
tanh
(
Matrix
&
output
)
{
LOG
(
FATAL
)
<<
"Not implemented"
;
}
virtual
void
tanh
(
Matrix
&
output
)
{
LOG
(
FATAL
)
<<
"Not implemented"
;
}
virtual
void
tanhDerivative
(
Matrix
&
output
)
{
virtual
void
tanhDerivative
(
Matrix
&
output
)
{
...
@@ -1720,6 +1728,9 @@ public:
...
@@ -1720,6 +1728,9 @@ public:
/// gradient of sumOfSquares.
/// gradient of sumOfSquares.
void
sumOfSquaresBp
(
Matrix
&
outputV
,
Matrix
&
label
);
void
sumOfSquaresBp
(
Matrix
&
outputV
,
Matrix
&
label
);
void
smoothL1
(
Matrix
&
output
,
Matrix
&
label
);
void
smoothL1Bp
(
Matrix
&
output
,
Matrix
&
label
);
void
tanh
(
Matrix
&
output
);
void
tanh
(
Matrix
&
output
);
void
tanhDerivative
(
Matrix
&
output
);
void
tanhDerivative
(
Matrix
&
output
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录