Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleDetection
提交
5c6ecb27
P
PaddleDetection
项目概览
PaddlePaddle
/
PaddleDetection
大约 1 年 前同步成功
通知
694
Star
11112
Fork
2696
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
184
列表
看板
标记
里程碑
合并请求
40
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
184
Issue
184
列表
看板
标记
里程碑
合并请求
40
合并请求
40
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
5c6ecb27
编写于
9月 13, 2016
作者:
E
emailweixu
提交者:
GitHub
9月 13, 2016
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request
#1
from baidu/master
Update from the original
上级
674d69ce
3fc99a23
变更
9
隐藏空白更改
内联
并排
Showing
9 changed file
with
329 addition
and
55 deletion
+329
-55
paddle/gserver/gradientmachines/NeuralNetwork.cpp
paddle/gserver/gradientmachines/NeuralNetwork.cpp
+1
-0
paddle/gserver/layers/CudnnBatchNormLayer.cpp
paddle/gserver/layers/CudnnBatchNormLayer.cpp
+15
-12
paddle/utils/CustomStackTrace.cpp
paddle/utils/CustomStackTrace.cpp
+35
-0
paddle/utils/CustomStackTrace.h
paddle/utils/CustomStackTrace.h
+128
-36
paddle/utils/Util.cpp
paddle/utils/Util.cpp
+1
-7
paddle/utils/tests/CMakeLists.txt
paddle/utils/tests/CMakeLists.txt
+10
-0
paddle/utils/tests/test_CustomStackTrace.cpp
paddle/utils/tests/test_CustomStackTrace.cpp
+95
-0
paddle/utils/tests/test_CustomStackTracePrint.cpp
paddle/utils/tests/test_CustomStackTracePrint.cpp
+29
-0
paddle/utils/tests/test_CustomStackTracePrint.sh
paddle/utils/tests/test_CustomStackTracePrint.sh
+15
-0
未找到文件。
paddle/gserver/gradientmachines/NeuralNetwork.cpp
浏览文件 @
5c6ecb27
...
...
@@ -277,6 +277,7 @@ void NeuralNetwork::getState(MachineState& machineState) {
}
void
NeuralNetwork
::
backward
(
const
UpdateCallback
&
callback
)
{
gLayerStackTrace
.
pop
(
""
);
// tell layer trace is during backward.
FOR_EACH_R
(
layer
,
layers_
)
{
REGISTER_TIMER_INFO
(
"BackwardTimer"
,
(
*
layer
)
->
getName
().
c_str
());
if
((
*
layer
)
->
needGradient
())
{
...
...
paddle/gserver/layers/CudnnBatchNormLayer.cpp
浏览文件 @
5c6ecb27
...
...
@@ -114,27 +114,30 @@ void CudnnBatchNormLayer::backward(const UpdateCallback& callback) {
}
else
{
create
(
tmpBiasGrad_
,
1
,
channels_
,
&
betaGrad
);
}
#if CUDNN_VERSION < 5000
// because of the different api of cudnn v4 and v5.
if
(
weight_
->
getWGrad
())
{
create
(
tmpWGrad_
,
1
,
channels_
,
&
gammaGrad
);
}
if
(
biases_
&&
biases_
->
getWGrad
())
{
create
(
tmpBiasGrad_
,
1
,
channels_
,
&
betaGrad
);
if
(
hl_get_cudnn_lib_version
()
<
5000
)
{
if
(
weight_
->
getWGrad
())
{
create
(
tmpWGrad_
,
1
,
channels_
,
&
gammaGrad
);
}
if
(
biases_
&&
biases_
->
getWGrad
())
{
create
(
tmpBiasGrad_
,
1
,
channels_
,
&
betaGrad
);
}
}
#endif
hl_batch_norm_backward
(
ioDesc_
,
input
,
ioDesc_
,
outGrad
,
ioDesc_
,
inGrad
,
bnParamDesc_
,
gamma
,
gammaGrad
,
betaGrad
,
EPS
,
savedMean
,
savedInvVar
);
#if CUDNN_VERSION < 5000
// because of the different api of cudnn v4 and v5.
if
(
weight_
->
getWGrad
()
&&
biases_
->
getWGrad
())
{
weight_
->
getWGrad
()
->
add
(
*
tmpWGrad_
);
biases_
->
getWGrad
()
->
add
(
*
tmpBiasGrad_
);
if
(
hl_get_cudnn_lib_version
()
<
5000
)
{
if
(
weight_
->
getWGrad
()
&&
biases_
->
getWGrad
())
{
weight_
->
getWGrad
()
->
add
(
*
tmpWGrad_
);
biases_
->
getWGrad
()
->
add
(
*
tmpBiasGrad_
);
}
}
#endif
{
REGISTER_TIMER_INFO
(
"WeightUpdate"
,
getName
().
c_str
());
biases_
->
getParameterPtr
()
->
incUpdate
(
callback
);
...
...
paddle/utils/CustomStackTrace.cpp
浏览文件 @
5c6ecb27
...
...
@@ -14,9 +14,44 @@ limitations under the License. */
#include "CustomStackTrace.h"
#include "CommandLineParser.h"
#include <iostream>
P_DEFINE_bool
(
layer_stack_error_only_current_thread
,
true
,
"Dump current thread or whole process layer stack when signal error "
"occurred. true means only dump current thread layer stack"
);
namespace
paddle
{
CustomStackTrace
<
std
::
string
>
gLayerStackTrace
;
static
std
::
mutex
gLayerStackTraceMtx
;
void
installLayerStackTracer
()
{
logging
::
installFailureWriter
([](
const
char
*
data
,
int
sz
)
{
std
::
lock_guard
<
std
::
mutex
>
guard
(
gLayerStackTraceMtx
);
if
(
!
gLayerStackTrace
.
empty
())
{
size_t
curTid
=
-
1UL
;
std
::
hash
<
std
::
thread
::
id
>
hasher
;
gLayerStackTrace
.
dump
([
&
curTid
,
&
hasher
](
std
::
thread
::
id
tid
,
bool
*
isForwarding
,
const
std
::
string
&
layerName
)
{
if
(
curTid
!=
hasher
(
tid
))
{
if
(
curTid
!=
-
1UL
)
{
std
::
cerr
<<
std
::
endl
;
}
curTid
=
hasher
(
tid
);
std
::
cerr
<<
"Thread ["
<<
tid
<<
"] "
;
if
(
isForwarding
)
{
std
::
cerr
<<
(
*
isForwarding
?
"Forwarding "
:
"Backwarding "
);
}
}
std
::
cerr
<<
layerName
<<
", "
;
},
FLAGS_layer_stack_error_only_current_thread
);
std
::
cerr
<<
std
::
endl
;
}
std
::
cerr
.
write
(
data
,
sz
);
});
}
}
// namespace paddle
paddle/utils/CustomStackTrace.h
浏览文件 @
5c6ecb27
...
...
@@ -15,6 +15,9 @@ limitations under the License. */
#pragma once
#include <stack>
#include <thread>
#include <unordered_map>
#include <functional>
#include "ThreadLocal.h"
...
...
@@ -29,25 +32,18 @@ namespace paddle {
* @code{.cpp}
*
* paddle::CustomStackTrace<std::string> stack;
* PASS_TEST=0;
* for (auto& layer : layers){
* stack.push(layer->getName());
* layer->forward(
passType
);
* layer->forward();
* }
* for (auto& layer : layers){
*
* stack.pop(""); // mark under pop stage.
*
* for (auto it = layers.rbegin(); it != layers.rend(); ++it){
* auto& layer = *it;
* layer->backward(passType);
* stack.pop(layer->getName());
* }
*
* if(passType == PASS_TEST) {
* stack.clear();
* }
* else {
* stack.dump([](const std::string& layername){
* LOG(INFO) << "LayerName: " << layername;
* })
* }
*
*
* @endcode
*/
...
...
@@ -55,45 +51,141 @@ template <typename T>
class
CustomStackTrace
{
public:
/**
* @brief Pop out an item from the top of the stack
. For safety the item
*
will be poped should equal to ip
.
* @brief Pop out an item from the top of the stack
if item == top.
*
Else, just set status to popping
.
*/
void
pop
(
const
T
&
ip
)
{
auto
&
p
=
*
logstack_
;
CHECK_EQ
(
ip
,
p
.
top
());
p
.
pop
();
void
pop
(
const
T
&
item
)
{
pushing
()
=
false
;
auto
&
s
=
this
->
stack
();
if
(
item
==
s
.
top
())
{
s
.
pop
();
}
}
/**
* @brief Empty the stack by sequence from top to button.
* @param[in] callback A function deal with each item while dumping.
* It must have and only have a in parameter which is the stack item.
* @brief clear current thread stack.
*/
template
<
typename
Callback
>
void
dump
(
Callback
callback
)
{
auto
&
p
=
*
logstack_
;
while
(
!
p
.
empty
())
{
callback
(
p
.
top
());
p
.
pop
();
void
clear
()
{
auto
&
s
=
stack
();
while
(
!
s
.
empty
())
{
s
.
pop
();
}
}
/**
* @brief Only empty the stack.
* @brief return true if all thread's stack is empty.
* @return true if empty
*/
void
clear
()
{
dump
([](
const
T
&
ip
){});
bool
empty
()
const
{
std
::
lock_guard
<
std
::
mutex
>
g
(
this
->
mtx_
);
for
(
auto
p
:
this
->
stackBuffers_
)
{
std
::
stack
<
T
>&
s
=
*
p
.
second
;
if
(
!
s
.
empty
())
{
return
false
;
}
}
return
true
;
}
/**
* @brief DumpCallback Type. It will be invoked many times by dump method.
*
* The first parameter is stack thread id.
* The second parameter is the last action of stack is push or not.
* The third parameter is the item in stack.
*/
typedef
std
::
function
<
void
(
const
std
::
thread
::
id
&
/*threadId*/
,
bool
*
/*isPushing*/
,
const
T
&
/*item*/
)
>
DumpCallback
;
/**
* Dump all thread stack, and all stack will be cleared.
*/
void
dump
(
const
DumpCallback
&
callback
,
bool
onlyCurrentThread
=
false
)
{
std
::
lock_guard
<
std
::
mutex
>
g
(
this
->
mtx_
);
for
(
auto
p
:
this
->
stackBuffers_
)
{
std
::
thread
::
id
tid
=
p
.
first
;
if
(
onlyCurrentThread
&&
tid
!=
std
::
this_thread
::
get_id
())
{
continue
;
}
std
::
stack
<
T
>&
s
=
*
p
.
second
;
bool
*
isPush
=
nullptr
;
auto
it
=
this
->
pushingBuffers_
.
find
(
tid
);
if
(
it
!=
this
->
pushingBuffers_
.
end
())
{
isPush
=
it
->
second
;
}
while
(
!
s
.
empty
())
{
callback
(
tid
,
isPush
,
s
.
top
());
s
.
pop
();
}
}
}
/**
* @brief Push item
ip to the top of the
stack.
* @brief Push item
to current thread
stack.
*/
void
push
(
const
T
&
ip
)
{
auto
&
p
=
*
logstack_
;
p
.
push
(
ip
);
void
push
(
const
T
&
item
)
{
pushing
()
=
true
;
auto
&
p
=
this
->
stack
();
p
.
push
(
item
);
}
private:
ThreadLocalD
<
std
::
stack
<
T
>
>
logstack_
;
/**
* Get thread local attribute, and save them into a map (threadId => TYPE*)
*
* @tparam TYPE thread local attribute type.
* @param threadLocal Thread Local object.
* @param buffers a map from threadId to TYPE*
*/
template
<
typename
TYPE
>
inline
TYPE
&
getThreadLocal
(
ThreadLocal
<
TYPE
>&
threadLocal
,
std
::
unordered_map
<
std
::
thread
::
id
,
TYPE
*>&
buffers
)
{
TYPE
*
retv
=
threadLocal
.
get
(
false
);
if
(
retv
)
{
return
*
retv
;
}
else
{
std
::
lock_guard
<
std
::
mutex
>
guard
(
this
->
mtx_
);
retv
=
threadLocal
.
get
();
auto
id
=
std
::
this_thread
::
get_id
();
buffers
.
insert
({
id
,
retv
});
return
*
retv
;
}
}
/**
* @brief Get thread local stack reference.
*/
std
::
stack
<
T
>&
stack
()
{
return
this
->
getThreadLocal
(
this
->
logStack_
,
this
->
stackBuffers_
);
}
/**
* @brief Get thread local pushing flag.
*/
bool
&
pushing
()
{
return
this
->
getThreadLocal
(
this
->
isPushing_
,
this
->
pushingBuffers_
);
}
private:
mutable
std
::
mutex
mtx_
;
std
::
unordered_map
<
std
::
thread
::
id
,
std
::
stack
<
T
>*
>
stackBuffers_
;
std
::
unordered_map
<
std
::
thread
::
id
,
bool
*
>
pushingBuffers_
;
ThreadLocal
<
bool
>
isPushing_
;
ThreadLocal
<
std
::
stack
<
T
>
>
logStack_
;
};
extern
CustomStackTrace
<
std
::
string
>
gLayerStackTrace
;
/**
* @brief Install a failure handler to print layer stack when error.
*/
extern
void
installLayerStackTracer
();
}
// namespace paddle
paddle/utils/Util.cpp
浏览文件 @
5c6ecb27
...
...
@@ -129,13 +129,7 @@ void runInitFunctions() {
void
initMain
(
int
argc
,
char
**
argv
)
{
initializeLogging
(
argc
,
argv
);
logging
::
installFailureWriter
([](
const
char
*
data
,
int
sz
)
{
std
::
cerr
<<
"Current Layer forward/backward stack is "
<<
std
::
endl
;
gLayerStackTrace
.
dump
([](
const
std
::
string
&
layername
){
std
::
cerr
<<
"LayerName: "
<<
layername
<<
std
::
endl
;
});
std
::
cerr
.
write
(
data
,
sz
);
});
installLayerStackTracer
();
std
::
string
line
;
for
(
int
i
=
0
;
i
<
argc
;
++
i
)
{
line
+=
argv
[
i
];
...
...
paddle/utils/tests/CMakeLists.txt
浏览文件 @
5c6ecb27
...
...
@@ -2,3 +2,13 @@ add_simple_unittest(test_CommandLineParser)
add_simple_unittest
(
test_Logging
)
add_simple_unittest
(
test_Thread
)
add_simple_unittest
(
test_StringUtils
)
add_simple_unittest
(
test_CustomStackTrace
)
add_executable
(
test_CustomStackTracePrint
test_CustomStackTracePrint.cpp
)
link_paddle_exe
(
test_CustomStackTracePrint
)
add_test
(
NAME test_CustomStackTracePrint
COMMAND
${
PROJ_ROOT
}
/paddle/utils/tests/test_CustomStackTracePrint.sh
WORKING_DIRECTORY
${
CMAKE_CURRENT_BINARY_DIR
}
)
paddle/utils/tests/test_CustomStackTrace.cpp
0 → 100644
浏览文件 @
5c6ecb27
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include <chrono>
#include "paddle/utils/CustomStackTrace.h"
#include "paddle/utils/CommandLineParser.h"
#include "paddle/utils/Util.h"
#include "paddle/utils/Locks.h"
P_DEFINE_int32
(
test_thread_num
,
10
,
"testing thread number"
);
void
testNormalImpl
(
const
std
::
function
<
void
(
paddle
::
CustomStackTrace
<
std
::
string
>&
,
size_t
,
size_t
,
paddle
::
ThreadBarrier
&
,
paddle
::
ThreadBarrier
&
)
>&
callback
)
{
paddle
::
CustomStackTrace
<
std
::
string
>
tracer
;
paddle
::
ThreadBarrier
doneBarrier
(
FLAGS_test_thread_num
+
1
);
paddle
::
ThreadBarrier
startBarrier
(
FLAGS_test_thread_num
+
1
);
constexpr
size_t
countDown
=
10
;
constexpr
size_t
layerSize
=
1000
;
std
::
vector
<
std
::
unique_ptr
<
std
::
thread
>>
threads
;
threads
.
reserve
(
FLAGS_test_thread_num
);
for
(
int32_t
i
=
0
;
i
<
FLAGS_test_thread_num
;
++
i
)
{
threads
.
emplace_back
(
new
std
::
thread
([
&
tracer
,
&
countDown
,
&
layerSize
,
&
startBarrier
,
&
doneBarrier
,
&
callback
]{
callback
(
tracer
,
countDown
,
layerSize
,
startBarrier
,
doneBarrier
);
}));
}
size_t
cntDown
=
countDown
;
while
(
cntDown
--
>
0
)
{
startBarrier
.
wait
();
doneBarrier
.
wait
();
ASSERT_TRUE
(
tracer
.
empty
());
}
for
(
auto
&
thread
:
threads
)
{
thread
->
join
();
}
}
TEST
(
CustomStackTrace
,
normalTrain
)
{
testNormalImpl
([](
paddle
::
CustomStackTrace
<
std
::
string
>&
tracer
,
size_t
countDown
,
size_t
layerSize
,
paddle
::
ThreadBarrier
&
start
,
paddle
::
ThreadBarrier
&
finish
){
while
(
countDown
--
>
0
)
{
start
.
wait
();
for
(
size_t
i
=
0
;
i
<
layerSize
;
++
i
)
{
tracer
.
push
(
"layer_"
+
std
::
to_string
(
i
));
}
tracer
.
pop
(
""
);
for
(
size_t
i
=
0
;
i
<
layerSize
;
++
i
)
{
tracer
.
pop
(
"layer_"
+
std
::
to_string
(
layerSize
-
1
-
i
));
}
finish
.
wait
();
}
});
}
TEST
(
CustomStackTrace
,
normalTest
)
{
testNormalImpl
([]
(
paddle
::
CustomStackTrace
<
std
::
string
>&
tracer
,
size_t
countDown
,
size_t
layerSize
,
paddle
::
ThreadBarrier
&
start
,
paddle
::
ThreadBarrier
&
finish
){
while
(
countDown
--
>
0
)
{
start
.
wait
();
for
(
size_t
i
=
0
;
i
<
layerSize
;
++
i
)
{
tracer
.
push
(
"layer_"
+
std
::
to_string
(
i
));
}
tracer
.
clear
();
// in forward test, tracer will clear after forward.
finish
.
wait
();
}
});
}
int
main
(
int
argc
,
char
**
argv
)
{
testing
::
InitGoogleTest
(
&
argc
,
argv
);
paddle
::
initMain
(
argc
,
argv
);
return
RUN_ALL_TESTS
();
}
paddle/utils/tests/test_CustomStackTracePrint.cpp
0 → 100644
浏览文件 @
5c6ecb27
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/utils/Util.h"
#include "paddle/utils/CustomStackTrace.h"
int
main
(
int
argc
,
char
**
argv
)
{
paddle
::
initMain
(
argc
,
argv
);
for
(
size_t
i
=
0
;
i
<
1000
;
++
i
)
{
paddle
::
gLayerStackTrace
.
push
(
"layer_"
+
std
::
to_string
(
i
));
if
(
i
==
998
)
{
throw
"Unhandle exception"
;
}
}
return
0
;
}
paddle/utils/tests/test_CustomStackTracePrint.sh
0 → 100755
浏览文件 @
5c6ecb27
#!/bin/bash
echo
"Test Custom Stack Trace print correct result when fail"
./test_CustomStackTracePrint
>
customStackTraceLog 2>&1
if
[
$?
-eq
0
]
;
then
exit
1
else
set
-e
TEXT
=
""
for
((
i
=
0
;
i<
=
998
;
i++
))
do
TEXT
=
"layer_
$i
, "
$TEXT
done
TEXT
=
"Forwarding "
$TEXT
grep
-q
"
$TEXT
"
customStackTraceLog
fi
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录