Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
s920243400
PaddleDetection
提交
a816443e
P
PaddleDetection
项目概览
s920243400
/
PaddleDetection
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleDetection
通知
2
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
a816443e
编写于
11月 30, 2016
作者:
L
Liu Yiqun
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Add submodule warp-ctc.
上级
5a97c98d
变更
10
隐藏空白更改
内联
并排
Showing
10 changed file
with
62 addition
and
73 deletion
+62
-73
.gitmodules
.gitmodules
+3
-0
.pre-commit-config.yaml
.pre-commit-config.yaml
+2
-0
paddle/cuda/include/hl_dso_loader.h
paddle/cuda/include/hl_dso_loader.h
+1
-1
paddle/cuda/include/hl_warpctc_wrap.h
paddle/cuda/include/hl_warpctc_wrap.h
+0
-1
paddle/cuda/src/hl_cuda_sequence.cu
paddle/cuda/src/hl_cuda_sequence.cu
+6
-18
paddle/cuda/src/hl_dso_loader.cc
paddle/cuda/src/hl_dso_loader.cc
+1
-1
paddle/cuda/src/hl_warpctc_wrap.cc
paddle/cuda/src/hl_warpctc_wrap.cc
+14
-14
paddle/gserver/layers/WarpCTCLayer.cpp
paddle/gserver/layers/WarpCTCLayer.cpp
+6
-12
paddle/gserver/tests/test_WarpCTCLayer.cpp
paddle/gserver/tests/test_WarpCTCLayer.cpp
+28
-26
warp-ctc
warp-ctc
+1
-0
未找到文件。
.gitmodules
浏览文件 @
a816443e
[submodule "warp-ctc"]
path = warp-ctc
url = https://github.com/baidu-research/warp-ctc.git
.pre-commit-config.yaml
浏览文件 @
a816443e
...
...
@@ -2,6 +2,7 @@
sha
:
c25201a00e6b0514370501050cf2a8538ac12270
hooks
:
-
id
:
remove-crlf
files
:
(?!.*warp-ctc)^.*$
-
repo
:
https://github.com/reyoung/mirrors-yapf.git
sha
:
v0.13.2
hooks
:
...
...
@@ -13,6 +14,7 @@
-
id
:
check-merge-conflict
-
id
:
check-symlinks
-
id
:
detect-private-key
files
:
(?!.*warp-ctc)^.*$
-
id
:
end-of-file-fixer
-
repo
:
https://github.com/PaddlePaddle/clang-format-pre-commit-hook.git
sha
:
28c0ea8a67a3e2dbbf4822ef44e85b63a0080a29
...
...
paddle/cuda/include/hl_dso_loader.h
浏览文件 @
a816443e
...
...
@@ -58,6 +58,6 @@ void GetCurandDsoHandle(void** dso_handle);
* @param **dso_handle dso handler
*
*/
void
GetWarp
ctc
DsoHandle
(
void
**
dso_handle
);
void
GetWarp
CTC
DsoHandle
(
void
**
dso_handle
);
#endif // HL_DSO_LOADER_H_
paddle/cuda/include/hl_warpctc_wrap.h
浏览文件 @
a816443e
...
...
@@ -16,7 +16,6 @@ limitations under the License. */
#define HL_WARPCTC_WRAP_H_
#include "hl_base.h"
/// #include "hl_cuda.h"
#include "warp-ctc/include/ctc.h"
typedef
ctcStatus_t
hl_warpctc_status_t
;
...
...
paddle/cuda/src/hl_cuda_sequence.cu
浏览文件 @
a816443e
...
...
@@ -463,30 +463,18 @@ void KeSequence2BatchPadding(real* batch,
int
batchBaseIdx
=
(
sequenceIdx
*
numSequences
+
batchIdx
)
*
sequenceWidth
;
int
sequenceBaseIdx
=
(
sequenceStart
+
sequenceIdx
)
*
sequenceWidth
;
real
scale
=
normByTimes
?
(
1.0
f
/
(
real
)
sequenceLength
)
:
1.0
f
;
if
(
sequenceIdx
<
sequenceLength
)
{
if
(
seq2batch
)
{
/* sequence -> batch */
if
(
normByTimes
)
{
real
scale
=
1.0
f
/
(
real
)
sequenceLength
;
for
(
int
i
=
threadIdx
.
x
;
i
<
sequenceWidth
;
i
+=
blockDim
.
x
)
{
batch
[
batchBaseIdx
+
i
]
=
scale
*
sequence
[
sequenceBaseIdx
+
i
];
}
}
else
{
for
(
int
i
=
threadIdx
.
x
;
i
<
sequenceWidth
;
i
+=
blockDim
.
x
)
{
batch
[
batchBaseIdx
+
i
]
=
sequence
[
sequenceBaseIdx
+
i
];
}
for
(
int
i
=
threadIdx
.
x
;
i
<
sequenceWidth
;
i
+=
blockDim
.
x
)
{
batch
[
batchBaseIdx
+
i
]
=
scale
*
sequence
[
sequenceBaseIdx
+
i
];
}
}
else
{
/* batch -> sequence */
if
(
normByTimes
)
{
real
scale
=
1.0
f
/
(
real
)
sequenceLength
;
for
(
int
i
=
threadIdx
.
x
;
i
<
sequenceWidth
;
i
+=
blockDim
.
x
)
{
sequence
[
sequenceBaseIdx
+
i
]
=
scale
*
batch
[
batchBaseIdx
+
i
];
}
}
else
{
for
(
int
i
=
threadIdx
.
x
;
i
<
sequenceWidth
;
i
+=
blockDim
.
x
)
{
sequence
[
sequenceBaseIdx
+
i
]
=
batch
[
batchBaseIdx
+
i
];
}
for
(
int
i
=
threadIdx
.
x
;
i
<
sequenceWidth
;
i
+=
blockDim
.
x
)
{
sequence
[
sequenceBaseIdx
+
i
]
=
scale
*
batch
[
batchBaseIdx
+
i
];
}
}
}
else
if
(
sequenceIdx
<
maxSequenceLength
)
{
...
...
paddle/cuda/src/hl_dso_loader.cc
浏览文件 @
a816443e
...
...
@@ -163,7 +163,7 @@ void GetCurandDsoHandle(void** dso_handle) {
#endif
}
void
GetWarp
ctc
DsoHandle
(
void
**
dso_handle
)
{
void
GetWarp
CTC
DsoHandle
(
void
**
dso_handle
)
{
#if defined(__APPLE__) || defined(__OSX__)
GetDsoHandleFromSearchPath
(
FLAGS_warpctc_dir
,
"libwarpctc.dylib"
,
dso_handle
);
#else
...
...
paddle/cuda/src/hl_warpctc_wrap.cc
浏览文件 @
a816443e
...
...
@@ -30,32 +30,32 @@ void* warpctc_dso_handle = nullptr;
* the linked-libs of paddle or to LD_PRELOAD.
*/
#ifdef PADDLE_USE_DSO
#define DYNAMIC_LOAD_WARPCTC_WRAP(__name
, __type)
\
#define DYNAMIC_LOAD_WARPCTC_WRAP(__name
)
\
struct DynLoad__##__name { \
template <typename... Args> \
__type operator()(Args... args) {
\
typedef __type (*warpctcFunc)(Args...);
\
auto operator()(Args... args) -> decltype(__name(args...)) {
\
using warpctcFunc = decltype(__name(args...)) (*)(Args...);
\
std::call_once( \
warpctc_dso_flag, GetWarp
ctc
DsoHandle, &warpctc_dso_handle); \
warpctc_dso_flag, GetWarp
CTC
DsoHandle, &warpctc_dso_handle); \
void* p_##_name = dlsym(warpctc_dso_handle, #__name); \
return reinterpret_cast<warpctcFunc>(p_##_name)(args...); \
} \
} __name; // struct DynLoad__##__name
#else
#define DYNAMIC_LOAD_WARPCTC_WRAP(__name
, __type)
\
struct DynLoad__##__name { \
template <typename... Args> \
__type operator()(Args... args) {
\
return __name(args...); \
} \
#define DYNAMIC_LOAD_WARPCTC_WRAP(__name
)
\
struct DynLoad__##__name {
\
template <typename... Args>
\
auto operator()(Args... args) -> decltype(__name(args...)) {
\
return __name(args...);
\
}
\
} __name; // struct DynLoad__##__name
#endif
// include all needed warp-ctc functions
DYNAMIC_LOAD_WARPCTC_WRAP
(
get_warpctc_version
,
int
)
DYNAMIC_LOAD_WARPCTC_WRAP
(
ctcGetStatusString
,
const
char
*
)
DYNAMIC_LOAD_WARPCTC_WRAP
(
compute_ctc_loss
,
hl_warpctc_status_t
)
DYNAMIC_LOAD_WARPCTC_WRAP
(
get_workspace_size
,
hl_warpctc_status_t
)
DYNAMIC_LOAD_WARPCTC_WRAP
(
get_warpctc_version
)
DYNAMIC_LOAD_WARPCTC_WRAP
(
ctcGetStatusString
)
DYNAMIC_LOAD_WARPCTC_WRAP
(
compute_ctc_loss
)
DYNAMIC_LOAD_WARPCTC_WRAP
(
get_workspace_size
)
#undef DYNAMIC_LOAD_WARPCTC_WRAP
...
...
paddle/gserver/layers/WarpCTCLayer.cpp
浏览文件 @
a816443e
...
...
@@ -100,8 +100,8 @@ void WarpCTCLayer::forward(PassType passType) {
/* labels always in CPU memory */
Matrix
::
resizeOrCreate
(
cpuCosts_
,
/*
width
*/
numSequences
,
/*
height
*/
1
,
/*
height
*/
numSequences
,
/*
width
*/
1
,
/* trans */
false
,
/* useGpu */
false
);
...
...
@@ -209,17 +209,11 @@ void WarpCTCLayer::batch2seqPadding(const MatrixPtr& seqValue,
int
sequenceStart
=
seqStartPositionsData
[
i
];
int
sequenceLength
=
seqStartPositionsData
[
i
+
1
]
-
seqStartPositionsData
[
i
];
real
scale
=
normByTimes
?
(
1.0
f
/
(
real
)
sequenceLength
)
:
1.0
f
;
for
(
int
j
=
0
;
j
<
sequenceLength
;
j
++
)
{
if
(
normByTimes
)
{
for
(
size_t
k
=
0
;
k
<
numClasses_
;
k
++
)
{
seqData
[(
sequenceStart
+
j
)
*
numClasses_
+
k
]
=
batchData
[(
j
*
numSequences
+
i
)
*
numClasses_
+
k
]
/
sequenceLength
;
}
}
else
{
memcpy
(
seqData
+
(
sequenceStart
+
j
)
*
numClasses_
,
batchData
+
(
j
*
numSequences
+
i
)
*
numClasses_
,
numClasses_
*
sizeof
(
real
));
for
(
size_t
k
=
0
;
k
<
numClasses_
;
k
++
)
{
seqData
[(
sequenceStart
+
j
)
*
numClasses_
+
k
]
=
batchData
[(
j
*
numSequences
+
i
)
*
numClasses_
+
k
]
*
scale
;
}
}
}
...
...
paddle/gserver/tests/test_WarpCTCLayer.cpp
浏览文件 @
a816443e
...
...
@@ -30,7 +30,7 @@ P_DECLARE_bool(use_gpu);
const
real
*
getData
(
const
Matrix
&
matrix
)
{
if
(
matrix
.
useGpu
())
{
MatrixPtr
cpuMatrix
=
Matrix
::
create
(
matrix
.
get
Width
(),
matrix
.
getHeight
(),
matrix
.
isTransposed
(),
false
);
matrix
.
get
Height
(),
matrix
.
getWidth
(),
matrix
.
isTransposed
(),
false
);
cpuMatrix
->
copyFrom
(
matrix
);
return
cpuMatrix
->
getData
();
}
else
{
...
...
@@ -200,41 +200,43 @@ LayerPtr createWarpCTCLayer(string name,
TEST
(
Layer
,
WarpCTCLayer
)
{
for
(
auto
layerSize
:
{
10
,
64
,
128
})
{
for
(
auto
batchSize
:
{
1
,
10
,
20
,
64
})
{
for
(
auto
useGpu
:
{
false
,
true
})
{
for
(
auto
normByTimes
:
{
false
,
true
})
{
for
(
auto
useGpu
:
{
false
,
true
})
{
#ifdef PADDLE_ONLY_CPU
if
(
useGpu
)
continue
;
if
(
useGpu
)
continue
;
#endif
LOG
(
INFO
)
<<
" layerSize="
<<
layerSize
<<
" batchSize="
<<
batchSize
<<
" useGpu="
<<
useGpu
;
LOG
(
INFO
)
<<
" layerSize="
<<
layerSize
<<
" batchSize="
<<
batchSize
<<
" normByTimes = "
<<
normByTimes
<<
" useGpu="
<<
useGpu
;
FLAGS_use_gpu
=
useGpu
;
FLAGS_use_gpu
=
useGpu
;
Argument
data0
;
initArgument
(
batchSize
,
layerSize
,
useGpu
,
data0
);
Argument
data0
;
initArgument
(
batchSize
,
layerSize
,
useGpu
,
data0
);
Argument
data1
;
data1
.
resizeAndCopyFrom
(
data0
);
Argument
data1
;
data1
.
resizeAndCopyFrom
(
data0
);
LayerPtr
dataLayer0
=
createDataLayer
(
"data"
,
batchSize
,
layerSize
,
useGpu
,
data0
);
LayerPtr
dataLayer1
=
createDataLayer
(
"data"
,
batchSize
,
layerSize
,
useGpu
,
data1
);
LayerPtr
dataLayer0
=
createDataLayer
(
"data"
,
batchSize
,
layerSize
,
useGpu
,
data0
);
LayerPtr
dataLayer1
=
createDataLayer
(
"data"
,
batchSize
,
layerSize
,
useGpu
,
data1
);
LayerPtr
labelLayer
=
createLabelLayer
(
"label"
,
batchSize
,
layerSize
,
useGpu
);
LayerPtr
labelLayer
=
createLabelLayer
(
"label"
,
batchSize
,
layerSize
,
useGpu
);
LayerPtr
warpctcLayer
=
createWarpCTCLayer
(
"cost"
,
layerSize
,
useGpu
,
false
,
dataLayer0
,
labelLayer
);
LayerPtr
ctcLayer
=
createCTCLayer
(
"cost"
,
layerSize
,
useGpu
,
false
,
dataLayer1
,
labelLayer
);
LayerPtr
warpctcLayer
=
createWarpCTCLayer
(
"cost"
,
layerSize
,
useGpu
,
normByTimes
,
dataLayer0
,
labelLayer
);
LayerPtr
ctcLayer
=
createCTCLayer
(
"cost"
,
layerSize
,
useGpu
,
normByTimes
,
dataLayer1
,
labelLayer
);
/// Check loss
checkError
(
*
(
warpctcLayer
->
getOutput
().
value
),
*
(
ctcLayer
->
getOutput
().
value
));
/// Check loss
checkError
(
*
(
warpctcLayer
->
getOutput
().
value
),
*
(
ctcLayer
->
getOutput
().
value
));
/// Check gradients
checkError
(
*
(
dataLayer0
->
getOutput
().
grad
),
*
(
dataLayer1
->
getOutput
().
grad
));
/// Check gradients
checkError
(
*
(
dataLayer0
->
getOutput
().
grad
),
*
(
dataLayer1
->
getOutput
().
grad
));
}
}
}
}
...
...
warp-ctc
@
bd535c8d
Subproject commit bd535c8d44e03c8ebd2d768e06c8c05fdccd11d2
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录