提交 18b85e55 编写于 作者: L Liu Yiqun

Add a script to auto compile the warp-ctc submodule.

上级 25f1fbc4
...@@ -18,8 +18,7 @@ set(CUDA_CXX_WITH_GPU_SOURCES ...@@ -18,8 +18,7 @@ set(CUDA_CXX_WITH_GPU_SOURCES
src/hl_cudart_wrap.cc src/hl_cudart_wrap.cc
src/hl_cuda_cublas.cc src/hl_cuda_cublas.cc
src/hl_cuda_cudnn.cc src/hl_cuda_cudnn.cc
src/hl_cuda_device.cc src/hl_cuda_device.cc)
)
if(WITH_GPU) if(WITH_GPU)
set(CUDA_CXX_SOURCES set(CUDA_CXX_SOURCES
......
...@@ -71,9 +71,13 @@ add_unittest(test_RecurrentLayer ...@@ -71,9 +71,13 @@ add_unittest(test_RecurrentLayer
############### test_WarpCTCLayer ####################### ############### test_WarpCTCLayer #######################
if(NOT WITH_DOUBLE) if(NOT WITH_DOUBLE)
add_unittest(test_WarpCTCLayer add_unittest_without_exec(test_WarpCTCLayer
test_WarpCTCLayer.cpp test_WarpCTCLayer.cpp
TestUtil.cpp) TestUtil.cpp)
add_test(NAME test_WarpCTCLayer
COMMAND ${CMAKE_CURRENT_BINARY_DIR}/test_WarpCTCLayer --warpctc_dir=${PROJ_ROOT}/warp-ctc/build
WORKING_DIRECTORY ${PROJ_ROOT}/paddle)
endif() endif()
############### test_RecurrentGradientMachine ############### ############### test_RecurrentGradientMachine ###############
......
...@@ -38,7 +38,7 @@ const real* getData(const Matrix& matrix) { ...@@ -38,7 +38,7 @@ const real* getData(const Matrix& matrix) {
} }
} }
void checkError(const Matrix& matrix1, const Matrix& matrix2) { int checkError(const Matrix& matrix1, const Matrix& matrix2) {
CHECK_EQ(matrix1.getHeight(), matrix2.getHeight()); CHECK_EQ(matrix1.getHeight(), matrix2.getHeight());
CHECK_EQ(matrix1.getWidth(), matrix2.getWidth()); CHECK_EQ(matrix1.getWidth(), matrix2.getWidth());
CHECK_EQ(matrix1.isTransposed(), matrix2.isTransposed()); CHECK_EQ(matrix1.isTransposed(), matrix2.isTransposed());
...@@ -62,6 +62,7 @@ void checkError(const Matrix& matrix1, const Matrix& matrix2) { ...@@ -62,6 +62,7 @@ void checkError(const Matrix& matrix1, const Matrix& matrix2) {
} }
} }
EXPECT_EQ(count, 0) << "There are " << count << " different element."; EXPECT_EQ(count, 0) << "There are " << count << " different element.";
return count;
} }
void initArgument(size_t batchSize, void initArgument(size_t batchSize,
...@@ -72,7 +73,6 @@ void initArgument(size_t batchSize, ...@@ -72,7 +73,6 @@ void initArgument(size_t batchSize,
data.grad = Matrix::create(batchSize, layerSize, false, useGpu); data.grad = Matrix::create(batchSize, layerSize, false, useGpu);
data.value->randomizeUniform(); data.value->randomizeUniform();
data.value->add(-0.5); data.value->add(-0.5);
/// data.value->sigmoid(*data.value);
data.grad->zeroMem(); data.grad->zeroMem();
generateSequenceStartPositions(batchSize, data.sequenceStartPositions); generateSequenceStartPositions(batchSize, data.sequenceStartPositions);
...@@ -90,9 +90,6 @@ LayerPtr createDataLayer( ...@@ -90,9 +90,6 @@ LayerPtr createDataLayer(
dataLayer->setData(data); dataLayer->setData(data);
dataLayer->forward(PASS_GC); dataLayer->forward(PASS_GC);
/// std::cout << "dataLayer: " << std::endl;
/// (dataLayer->getOutput().value)->print(std::cout);
return layer; return layer;
} }
...@@ -198,14 +195,14 @@ LayerPtr createWarpCTCLayer(string name, ...@@ -198,14 +195,14 @@ LayerPtr createWarpCTCLayer(string name,
} }
TEST(Layer, WarpCTCLayer) { TEST(Layer, WarpCTCLayer) {
for (auto layerSize : {10, 64, 128}) { for (auto layerSize : {10, 64}) {
for (auto batchSize : {1, 10, 20, 64}) { for (auto batchSize : {1, 10, 32}) {
for (auto normByTimes : {false, true}) { for (auto normByTimes : {false, true}) {
for (auto useGpu : {false, true}) { for (auto useGpu : {false, true}) {
#ifdef PADDLE_ONLY_CPU #ifdef PADDLE_ONLY_CPU
if (useGpu) continue; if (useGpu) continue;
#endif #endif
LOG(INFO) << " layerSize=" << layerSize << " batchSize=" << batchSize LOG(INFO) << "layerSize=" << layerSize << " batchSize=" << batchSize
<< " normByTimes = " << normByTimes << " useGpu=" << useGpu; << " normByTimes = " << normByTimes << " useGpu=" << useGpu;
FLAGS_use_gpu = useGpu; FLAGS_use_gpu = useGpu;
...@@ -229,13 +226,17 @@ TEST(Layer, WarpCTCLayer) { ...@@ -229,13 +226,17 @@ TEST(Layer, WarpCTCLayer) {
LayerPtr ctcLayer = createCTCLayer( LayerPtr ctcLayer = createCTCLayer(
"cost", layerSize, useGpu, normByTimes, dataLayer1, labelLayer); "cost", layerSize, useGpu, normByTimes, dataLayer1, labelLayer);
/// Check loss /// Check cost
checkError(*(warpctcLayer->getOutput().value), LOG(INFO) << "Check cost: "
*(ctcLayer->getOutput().value)); << checkError(*(warpctcLayer->getOutput().value),
*(ctcLayer->getOutput().value))
<< " different elements.";
/// Check gradients /// Check gradients
checkError(*(dataLayer0->getOutput().grad), LOG(INFO) << "Check gradients: "
*(dataLayer1->getOutput().grad)); << checkError(*(dataLayer0->getOutput().grad),
*(dataLayer1->getOutput().grad))
<< " different elements";
} }
} }
} }
......
#!/bin/bash #!/bin/bash
./submodules.sh
source ./common.sh source ./common.sh
CMAKE_EXTRA="" CMAKE_EXTRA=""
if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then
......
#!/bin/bash
set -e
PROJ_ROOT=$(git rev-parse --show-cdup)
SUBMODULES=$(grep path ${PROJ_ROOT}.gitmodules | sed 's/^.*path = //')
for module in $SUBMODULES
do
case $module in
"warp-ctc")
if [ -d ${PROJ_ROOT}warp-ctc/build ]; then
rm -rf ${PROJ_ROOT}warp-ctc/build
fi
mkdir ${PROJ_ROOT}warp-ctc/build
cd ${PROJ_ROOT}warp-ctc/build
cmake ..; make
;;
esac
done
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册