提交 936b0ed1 编写于 作者: X xutianbing

add paddle_test_util static lib to simplify unit test.

上级 495649af
...@@ -156,6 +156,7 @@ function(link_paddle_test TARGET_NAME) ...@@ -156,6 +156,7 @@ function(link_paddle_test TARGET_NAME)
link_paddle_exe(${TARGET_NAME}) link_paddle_exe(${TARGET_NAME})
target_link_libraries(${TARGET_NAME} target_link_libraries(${TARGET_NAME}
paddle_test_main paddle_test_main
paddle_test_util
${GTEST_LIBRARIES}) ${GTEST_LIBRARIES})
endfunction() endfunction()
......
...@@ -17,9 +17,7 @@ if(WITH_TESTING) ...@@ -17,9 +17,7 @@ if(WITH_TESTING)
# file(GLOB test_files . *OpTest.cpp) # file(GLOB test_files . *OpTest.cpp)
# add_executable(${test_bin} EXCLUDE_FROM_ALL ${test_files}) # add_executable(${test_bin} EXCLUDE_FROM_ALL ${test_files})
add_simple_unittest(CrossMapNormalOpTest) add_simple_unittest(CrossMapNormalOpTest)
add_unittest(ContextProjectionOpTest add_simple_unittest(ContextProjectionOpTest)
ContextProjectionOpTest.cpp
../gserver/tests/TestUtil.cpp)
endif() endif()
endif() endif()
......
...@@ -14,8 +14,8 @@ limitations under the License. */ ...@@ -14,8 +14,8 @@ limitations under the License. */
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include "FunctionTest.h" #include "FunctionTest.h"
#include "paddle/gserver/tests/TestUtil.h"
#include "paddle/math/Matrix.h" #include "paddle/math/Matrix.h"
#include "paddle/testing/TestUtil.h"
using namespace paddle; // NOLINT using namespace paddle; // NOLINT
......
...@@ -2,8 +2,7 @@ ...@@ -2,8 +2,7 @@
################### test_ProtoDataProvider ############ ################### test_ProtoDataProvider ############
add_unittest_without_exec(test_ProtoDataProvider add_unittest_without_exec(test_ProtoDataProvider
test_ProtoDataProvider.cpp test_ProtoDataProvider.cpp)
TestUtil.cpp)
# test_ProtoDataProvider will mkdir as same name, # test_ProtoDataProvider will mkdir as same name,
# so if WORKING_DIRECTORY is default directory, then # so if WORKING_DIRECTORY is default directory, then
...@@ -15,53 +14,46 @@ add_test(NAME test_ProtoDataProvider ...@@ -15,53 +14,46 @@ add_test(NAME test_ProtoDataProvider
################# test_LayerGrad ####################### ################# test_LayerGrad #######################
add_unittest_without_exec(test_LayerGrad add_unittest_without_exec(test_LayerGrad
test_LayerGrad.cpp test_LayerGrad.cpp
LayerGradUtil.cpp LayerGradUtil.cpp)
TestUtil.cpp)
add_test(NAME test_LayerGrad add_test(NAME test_LayerGrad
COMMAND test_LayerGrad) COMMAND test_LayerGrad)
add_unittest_without_exec(test_ActivationGrad add_unittest_without_exec(test_ActivationGrad
test_ActivationGrad.cpp test_ActivationGrad.cpp
LayerGradUtil.cpp LayerGradUtil.cpp)
TestUtil.cpp)
add_test(NAME test_ActivationGrad add_test(NAME test_ActivationGrad
COMMAND test_ActivationGrad) COMMAND test_ActivationGrad)
################# test_ConvTrans ####################### ################# test_ConvTrans #######################
add_unittest_without_exec(test_ConvTrans add_unittest_without_exec(test_ConvTrans
test_ConvTrans.cpp test_ConvTrans.cpp
LayerGradUtil.cpp LayerGradUtil.cpp)
TestUtil.cpp)
add_test(NAME test_ConvTrans add_test(NAME test_ConvTrans
COMMAND test_ConvTrans) COMMAND test_ConvTrans)
################# test_PriorBox ####################### ################# test_PriorBox #######################
add_unittest_without_exec(test_PriorBox add_unittest_without_exec(test_PriorBox
test_PriorBox.cpp test_PriorBox.cpp
LayerGradUtil.cpp LayerGradUtil.cpp)
TestUtil.cpp)
add_test(NAME test_PriorBox add_test(NAME test_PriorBox
COMMAND test_PriorBox) COMMAND test_PriorBox)
################# test_ConvUnify ####################### ################# test_ConvUnify #######################
add_unittest_without_exec(test_ConvUnify add_unittest_without_exec(test_ConvUnify
test_ConvUnify.cpp test_ConvUnify.cpp
LayerGradUtil.cpp LayerGradUtil.cpp)
TestUtil.cpp)
add_test(NAME test_ConvUnify add_test(NAME test_ConvUnify
COMMAND test_ConvUnify) COMMAND test_ConvUnify)
################# test_BatchNorm ####################### ################# test_BatchNorm #######################
add_unittest_without_exec(test_BatchNorm add_unittest_without_exec(test_BatchNorm
test_BatchNorm.cpp test_BatchNorm.cpp
LayerGradUtil.cpp LayerGradUtil.cpp)
TestUtil.cpp)
add_test(NAME test_BatchNorm add_test(NAME test_BatchNorm
COMMAND test_BatchNorm) COMMAND test_BatchNorm)
################## test_Evaluator ####################### ################## test_Evaluator #######################
add_unittest(test_Evaluator add_unittest(test_Evaluator
test_Evaluator.cpp test_Evaluator.cpp)
TestUtil.cpp)
################ test_LinearChainCRF #################### ################ test_LinearChainCRF ####################
add_simple_unittest(test_LinearChainCRF) add_simple_unittest(test_LinearChainCRF)
...@@ -72,8 +64,7 @@ add_simple_unittest(test_MultinomialSampler) ...@@ -72,8 +64,7 @@ add_simple_unittest(test_MultinomialSampler)
############## test_PyDataProvider ######################## ############## test_PyDataProvider ########################
if(WITH_PYTHON) if(WITH_PYTHON)
add_unittest_without_exec(test_PyDataProvider add_unittest_without_exec(test_PyDataProvider
test_PyDataProvider.cpp test_PyDataProvider.cpp)
TestUtil.cpp)
add_test(NAME test_PyDataProvider add_test(NAME test_PyDataProvider
COMMAND .set_python_path.sh -d ./gserver/tests:${PROJ_ROOT}/python/ ${CMAKE_CURRENT_BINARY_DIR}/test_PyDataProvider COMMAND .set_python_path.sh -d ./gserver/tests:${PROJ_ROOT}/python/ ${CMAKE_CURRENT_BINARY_DIR}/test_PyDataProvider
...@@ -81,15 +72,12 @@ if(WITH_PYTHON) ...@@ -81,15 +72,12 @@ if(WITH_PYTHON)
endif() endif()
############### test_RecurrentLayer ####################### ############### test_RecurrentLayer #######################
add_unittest(test_RecurrentLayer add_simple_unittest(test_RecurrentLayer)
test_RecurrentLayer.cpp
TestUtil.cpp)
############### test_WarpCTCLayer ####################### ############### test_WarpCTCLayer #######################
if(NOT WITH_DOUBLE) if(NOT WITH_DOUBLE)
add_unittest_without_exec(test_WarpCTCLayer add_unittest_without_exec(test_WarpCTCLayer
test_WarpCTCLayer.cpp test_WarpCTCLayer.cpp)
TestUtil.cpp)
add_test(NAME test_WarpCTCLayer add_test(NAME test_WarpCTCLayer
COMMAND ${CMAKE_CURRENT_BINARY_DIR}/test_WarpCTCLayer --warpctc_dir=${PROJ_ROOT}/warp-ctc/build COMMAND ${CMAKE_CURRENT_BINARY_DIR}/test_WarpCTCLayer --warpctc_dir=${PROJ_ROOT}/warp-ctc/build
...@@ -108,8 +96,7 @@ add_test(NAME test_RecurrentGradientMachine ...@@ -108,8 +96,7 @@ add_test(NAME test_RecurrentGradientMachine
WORKING_DIRECTORY ${PROJ_ROOT}/paddle) WORKING_DIRECTORY ${PROJ_ROOT}/paddle)
add_unittest_without_exec(test_NetworkCompare add_unittest_without_exec(test_NetworkCompare
test_NetworkCompare.cpp test_NetworkCompare.cpp)
TestUtil.cpp)
if(WITH_GPU) if(WITH_GPU)
add_test(NAME test_NetworkCompare add_test(NAME test_NetworkCompare
COMMAND .set_python_path.sh -d ${PROJ_ROOT}/python ${CMAKE_CURRENT_BINARY_DIR}/test_NetworkCompare --use_gpu=true COMMAND .set_python_path.sh -d ${PROJ_ROOT}/python ${CMAKE_CURRENT_BINARY_DIR}/test_NetworkCompare --use_gpu=true
......
...@@ -17,7 +17,7 @@ limitations under the License. */ ...@@ -17,7 +17,7 @@ limitations under the License. */
#include "paddle/gserver/layers/DataLayer.h" #include "paddle/gserver/layers/DataLayer.h"
#include "paddle/trainer/Trainer.h" #include "paddle/trainer/Trainer.h"
#include "TestUtil.h" #include "paddle/testing/TestUtil.h"
using namespace std; // NOLINT using namespace std; // NOLINT
namespace paddle { namespace paddle {
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "TestUtil.h"
#include <gflags/gflags.h>
#include "paddle/math/SparseMatrix.h"
DEFINE_int32(fixed_seq_length, 0, "Produce some sequence of fixed length");
namespace paddle {
std::string randStr(const int len) {
std::string str =
"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz";
std::string s = "";
for (int i = 0; i < len; ++i) s += str[(rand() % 62)]; // NOLINT
return s;
}
MatrixPtr makeRandomSparseMatrix(size_t height,
size_t width,
bool withValue,
bool useGpu,
bool equalNnzPerSample) {
std::vector<int64_t> ids(height);
std::vector<int64_t> indices(height + 1);
indices[0] = 0;
std::function<size_t()> randomer = [] { return uniformRandom(10); };
if (equalNnzPerSample) {
size_t n = 0;
do {
n = uniformRandom(10);
} while (!n);
randomer = [=] { return n; };
}
for (size_t i = 0; i < height; ++i) {
indices[i + 1] = indices[i] + std::min(randomer(), width);
ids[i] = i;
}
if (!withValue) {
std::vector<sparse_non_value_t> data;
data.resize(indices[height] - indices[0]);
for (size_t i = 0; i < data.size(); ++i) {
data[i].col = uniformRandom(width);
}
auto mat = Matrix::createSparseMatrix(
height, width, data.size(), NO_VALUE, SPARSE_CSR, false, useGpu);
if (useGpu) {
std::dynamic_pointer_cast<GpuSparseMatrix>(mat)->copyFrom(
ids.data(), indices.data(), data.data(), HPPL_STREAM_DEFAULT);
} else {
std::dynamic_pointer_cast<CpuSparseMatrix>(mat)->copyFrom(
ids.data(), indices.data(), data.data());
}
return mat;
} else {
std::vector<sparse_float_value_t> data;
data.resize(indices[height] - indices[0]);
for (size_t i = 0; i < data.size(); ++i) {
data[i].col = uniformRandom(width);
data[i].value = rand() / static_cast<float>(RAND_MAX); // NOLINT
}
auto mat = Matrix::createSparseMatrix(
height, width, data.size(), FLOAT_VALUE, SPARSE_CSR, false, useGpu);
if (useGpu) {
std::dynamic_pointer_cast<GpuSparseMatrix>(mat)->copyFrom(
ids.data(), indices.data(), data.data(), HPPL_STREAM_DEFAULT);
} else {
std::dynamic_pointer_cast<CpuSparseMatrix>(mat)->copyFrom(
ids.data(), indices.data(), data.data());
}
return mat;
}
}
void generateSequenceStartPositions(size_t batchSize,
IVectorPtr& sequenceStartPositions) {
ICpuGpuVectorPtr gpuCpuVec;
generateSequenceStartPositions(batchSize, gpuCpuVec);
sequenceStartPositions = gpuCpuVec->getMutableVector(false);
}
void generateSequenceStartPositions(size_t batchSize,
ICpuGpuVectorPtr& sequenceStartPositions) {
int numSeqs;
if (FLAGS_fixed_seq_length != 0) {
numSeqs = std::ceil((float)batchSize / (float)FLAGS_fixed_seq_length);
} else {
numSeqs = batchSize / 10 + 1;
}
sequenceStartPositions =
ICpuGpuVector::create(numSeqs + 1, /* useGpu= */ false);
int* buf = sequenceStartPositions->getMutableData(false);
int64_t pos = 0;
int len = FLAGS_fixed_seq_length;
int maxLen = 2 * batchSize / numSeqs;
for (int i = 0; i < numSeqs; ++i) {
if (FLAGS_fixed_seq_length == 0) {
len = uniformRandom(
std::min<int64_t>(maxLen, batchSize - pos - numSeqs + i)) +
1;
}
buf[i] = pos;
pos += len;
VLOG(1) << " len=" << len;
}
buf[numSeqs] = batchSize;
}
void generateSubSequenceStartPositions(
const ICpuGpuVectorPtr& sequenceStartPositions,
ICpuGpuVectorPtr& subSequenceStartPositions) {
int numSeqs = sequenceStartPositions->getSize() - 1;
const int* buf = sequenceStartPositions->getData(false);
int numOnes = 0;
for (int i = 0; i < numSeqs; ++i) {
if (buf[i + 1] - buf[i] == 1) {
++numOnes;
}
}
// each seq has two sub-seq except length 1
int numSubSeqs = numSeqs * 2 - numOnes;
subSequenceStartPositions =
ICpuGpuVector::create(numSubSeqs + 1, /* useGpu= */ false);
int* subBuf = subSequenceStartPositions->getMutableData(false);
int j = 0;
for (int i = 0; i < numSeqs; ++i) {
if (buf[i + 1] - buf[i] == 1) {
subBuf[j++] = buf[i];
} else {
int len = uniformRandom(buf[i + 1] - buf[i] - 1) + 1;
subBuf[j++] = buf[i];
subBuf[j++] = buf[i] + len;
}
}
subBuf[j] = buf[numSeqs];
}
void generateMDimSequenceData(const IVectorPtr& sequenceStartPositions,
IVectorPtr& cpuSequenceDims) {
/* generate sequences with 2 dims */
int numSeqs = sequenceStartPositions->getSize() - 1;
int numDims = 2;
cpuSequenceDims = IVector::create(numSeqs * numDims, /* useGpu= */ false);
int* bufStarts = sequenceStartPositions->getData();
int* bufDims = cpuSequenceDims->getData();
for (int i = 0; i < numSeqs; i++) {
int len = bufStarts[i + 1] - bufStarts[i];
/* get width and height randomly */
std::vector<int> dimVec;
for (int j = 0; j < len; j++) {
if (len % (j + 1) == 0) {
dimVec.push_back(1);
}
}
int idx = rand() % dimVec.size(); // NOLINT use rand_r
bufDims[i * numDims] = dimVec[idx];
bufDims[i * numDims + 1] = len / dimVec[idx];
}
}
void generateMDimSequenceData(const ICpuGpuVectorPtr& sequenceStartPositions,
IVectorPtr& cpuSequenceDims) {
/* generate sequences with 2 dims */
int numSeqs = sequenceStartPositions->getSize() - 1;
int numDims = 2;
cpuSequenceDims = IVector::create(numSeqs * numDims, /* useGpu= */ false);
const int* bufStarts = sequenceStartPositions->getData(false);
int* bufDims = cpuSequenceDims->getData();
for (int i = 0; i < numSeqs; i++) {
int len = bufStarts[i + 1] - bufStarts[i];
/* get width and height randomly */
std::vector<int> dimVec;
for (int j = 0; j < len; j++) {
if (len % (j + 1) == 0) {
dimVec.push_back(1);
}
}
int idx = rand() % dimVec.size(); // NOLINT use rand_r
bufDims[i * numDims] = dimVec[idx];
bufDims[i * numDims + 1] = len / dimVec[idx];
}
}
void checkMatrixEqual(const MatrixPtr& a, const MatrixPtr& b) {
EXPECT_EQ(a->getWidth(), b->getWidth());
EXPECT_EQ(a->getHeight(), b->getHeight());
EXPECT_EQ(a->isTransposed(), b->isTransposed());
for (size_t r = 0; r < a->getHeight(); ++r) {
for (size_t c = 0; c < a->getWidth(); ++c) {
EXPECT_FLOAT_EQ(a->getElement(r, c), b->getElement(r, c));
}
}
}
void checkVectorEqual(const IVectorPtr& a, const IVectorPtr& b) {
EXPECT_EQ(a->getSize(), b->getSize());
for (size_t r = 0; r < a->getSize(); ++r) {
EXPECT_FLOAT_EQ(a->get(r), b->get(r));
}
}
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <gtest/gtest.h>
#include "paddle/math/Matrix.h"
namespace paddle {
std::string randStr(const int len);
inline int uniformRandom(int n) { return n == 0 ? 0 : rand() % n; }
inline bool approximatelyEqual(float a, float b, float epsilon) {
return fabs(a - b) <= ((fabs(a) < fabs(b) ? fabs(b) : fabs(a)) * epsilon);
}
MatrixPtr makeRandomSparseMatrix(size_t height,
size_t width,
bool withValue,
bool useGpu,
bool equalNnzPerSample = false);
/**
* @brief generate sequenceStartPositions for INPUT_SEQUENCE_DATA,
* INPUT_HASSUB_SEQUENCE_DATA and INPUT_SEQUENCE_LABEL
*
* @param batchSize batchSize
* sequenceStartPositions[out] generation output
*/
void generateSequenceStartPositions(size_t batchSize,
IVectorPtr& sequenceStartPositions);
void generateSequenceStartPositions(size_t batchSize,
ICpuGpuVectorPtr& sequenceStartPositions);
/**
* @brief generate subSequenceStartPositions for INPUT_HASSUB_SEQUENCE_DATA
* according to sequenceStartPositions
*
* @param sequenceStartPositions[in] input
* subSequenceStartPositions[out] generation output
*/
void generateSubSequenceStartPositions(const IVectorPtr& sequenceStartPositions,
IVectorPtr& subSequenceStartPositions);
void generateSubSequenceStartPositions(
const ICpuGpuVectorPtr& sequenceStartPositions,
ICpuGpuVectorPtr& subSequenceStartPositions);
/**
* @brief generate cpuSequenceDims for INPUT_SEQUENCE_MDIM_DATA according to
* sequenceStartPositions
*
* @param sequenceStartPositions[in] input
* cpuSequenceDims[out] generation output
*/
void generateMDimSequenceData(const IVectorPtr& sequenceStartPositions,
IVectorPtr& cpuSequenceDims);
void generateMDimSequenceData(const ICpuGpuVectorPtr& sequenceStartPositions,
IVectorPtr& cpuSequenceDims);
void checkMatrixEqual(const MatrixPtr& a, const MatrixPtr& b);
void checkVectorEqual(const IVectorPtr& a, const IVectorPtr& b);
} // namespace paddle
...@@ -20,7 +20,7 @@ limitations under the License. */ ...@@ -20,7 +20,7 @@ limitations under the License. */
#include "paddle/trainer/Trainer.h" #include "paddle/trainer/Trainer.h"
#include "LayerGradUtil.h" #include "LayerGradUtil.h"
#include "TestUtil.h" #include "paddle/testing/TestUtil.h"
using namespace paddle; // NOLINT using namespace paddle; // NOLINT
using namespace std; // NOLINT using namespace std; // NOLINT
......
...@@ -22,7 +22,7 @@ limitations under the License. */ ...@@ -22,7 +22,7 @@ limitations under the License. */
#include "paddle/utils/GlobalConstants.h" #include "paddle/utils/GlobalConstants.h"
#include "LayerGradUtil.h" #include "LayerGradUtil.h"
#include "TestUtil.h" #include "paddle/testing/TestUtil.h"
using namespace paddle; // NOLINT using namespace paddle; // NOLINT
using namespace std; // NOLINT using namespace std; // NOLINT
......
...@@ -23,7 +23,7 @@ limitations under the License. */ ...@@ -23,7 +23,7 @@ limitations under the License. */
#include "paddle/utils/GlobalConstants.h" #include "paddle/utils/GlobalConstants.h"
#include "LayerGradUtil.h" #include "LayerGradUtil.h"
#include "TestUtil.h" #include "paddle/testing/TestUtil.h"
using namespace paddle; // NOLINT using namespace paddle; // NOLINT
using namespace std; // NOLINT using namespace std; // NOLINT
......
...@@ -23,7 +23,7 @@ limitations under the License. */ ...@@ -23,7 +23,7 @@ limitations under the License. */
#include "paddle/utils/GlobalConstants.h" #include "paddle/utils/GlobalConstants.h"
#include "LayerGradUtil.h" #include "LayerGradUtil.h"
#include "TestUtil.h" #include "paddle/testing/TestUtil.h"
using namespace paddle; // NOLINT using namespace paddle; // NOLINT
using namespace std; // NOLINT using namespace std; // NOLINT
......
...@@ -15,7 +15,7 @@ limitations under the License. */ ...@@ -15,7 +15,7 @@ limitations under the License. */
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <vector> #include <vector>
#include "ModelConfig.pb.h" #include "ModelConfig.pb.h"
#include "TestUtil.h" #include "paddle/testing/TestUtil.h"
#include "paddle/trainer/Trainer.h" #include "paddle/trainer/Trainer.h"
using namespace paddle; // NOLINT using namespace paddle; // NOLINT
......
...@@ -21,7 +21,7 @@ limitations under the License. */ ...@@ -21,7 +21,7 @@ limitations under the License. */
#include "paddle/trainer/Trainer.h" #include "paddle/trainer/Trainer.h"
#include "LayerGradUtil.h" #include "LayerGradUtil.h"
#include "TestUtil.h" #include "paddle/testing/TestUtil.h"
using namespace paddle; // NOLINT using namespace paddle; // NOLINT
using namespace std; // NOLINT using namespace std; // NOLINT
......
...@@ -18,7 +18,7 @@ limitations under the License. */ ...@@ -18,7 +18,7 @@ limitations under the License. */
#include <algorithm> #include <algorithm>
#include <cstdlib> #include <cstdlib>
#include "TestUtil.h" #include "paddle/testing/TestUtil.h"
#include "paddle/trainer/Trainer.h" #include "paddle/trainer/Trainer.h"
#include "paddle/utils/Stat.h" #include "paddle/utils/Stat.h"
......
...@@ -17,7 +17,7 @@ limitations under the License. */ ...@@ -17,7 +17,7 @@ limitations under the License. */
#include <vector> #include <vector>
#include "LayerGradUtil.h" #include "LayerGradUtil.h"
#include "TestUtil.h" #include "paddle/testing/TestUtil.h"
using namespace paddle; // NOLINT using namespace paddle; // NOLINT
using namespace std; // NOLINT using namespace std; // NOLINT
......
...@@ -20,7 +20,7 @@ limitations under the License. */ ...@@ -20,7 +20,7 @@ limitations under the License. */
#include "paddle/gserver/dataproviders/ProtoDataProvider.h" #include "paddle/gserver/dataproviders/ProtoDataProvider.h"
#include "paddle/utils/Util.h" #include "paddle/utils/Util.h"
#include "TestUtil.h" #include "paddle/testing/TestUtil.h"
using namespace std; // NOLINT using namespace std; // NOLINT
......
...@@ -20,7 +20,7 @@ limitations under the License. */ ...@@ -20,7 +20,7 @@ limitations under the License. */
#include "paddle/gserver/dataproviders/PyDataProvider.h" #include "paddle/gserver/dataproviders/PyDataProvider.h"
#include "paddle/utils/Util.h" #include "paddle/utils/Util.h"
#include "TestUtil.h" #include "paddle/testing/TestUtil.h"
using namespace std; // NOLINT using namespace std; // NOLINT
using namespace paddle; // NOLINT using namespace paddle; // NOLINT
......
...@@ -19,7 +19,7 @@ limitations under the License. */ ...@@ -19,7 +19,7 @@ limitations under the License. */
#include "paddle/gserver/layers/DataLayer.h" #include "paddle/gserver/layers/DataLayer.h"
#include "paddle/gserver/layers/Layer.h" #include "paddle/gserver/layers/Layer.h"
#include "TestUtil.h" #include "paddle/testing/TestUtil.h"
using namespace paddle; // NOLINT using namespace paddle; // NOLINT
using namespace std; // NOLINT using namespace std; // NOLINT
......
...@@ -20,7 +20,7 @@ limitations under the License. */ ...@@ -20,7 +20,7 @@ limitations under the License. */
#include "paddle/gserver/layers/Layer.h" #include "paddle/gserver/layers/Layer.h"
#include "paddle/gserver/layers/WarpCTCLayer.h" #include "paddle/gserver/layers/WarpCTCLayer.h"
#include "TestUtil.h" #include "paddle/testing/TestUtil.h"
using namespace paddle; // NOLINT using namespace paddle; // NOLINT
using namespace std; // NOLINT using namespace std; // NOLINT
......
...@@ -7,8 +7,7 @@ add_simple_unittest(test_SparseMatrix) ...@@ -7,8 +7,7 @@ add_simple_unittest(test_SparseMatrix)
# TODO(yuyang18): Refactor TestUtil.cpp. Remove this cross module reference. # TODO(yuyang18): Refactor TestUtil.cpp. Remove this cross module reference.
add_unittest(test_matrixCompare add_unittest(test_matrixCompare
test_matrixCompare.cpp test_matrixCompare.cpp)
../../gserver/tests/TestUtil.cpp)
add_simple_unittest(test_sparseMatrixCompare) add_simple_unittest(test_sparseMatrixCompare)
add_simple_unittest(test_perturbation) add_simple_unittest(test_perturbation)
......
...@@ -15,9 +15,9 @@ limitations under the License. */ ...@@ -15,9 +15,9 @@ limitations under the License. */
#ifndef PADDLE_ONLY_CPU #ifndef PADDLE_ONLY_CPU
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include "paddle/gserver/tests/TestUtil.h"
#include "paddle/math/Matrix.h" #include "paddle/math/Matrix.h"
#include "paddle/math/SparseMatrix.h" #include "paddle/math/SparseMatrix.h"
#include "paddle/testing/TestUtil.h"
#include "paddle/utils/Stat.h" #include "paddle/utils/Stat.h"
#include "paddle/utils/Util.h" #include "paddle/utils/Util.h"
......
...@@ -18,9 +18,9 @@ limitations under the License. */ ...@@ -18,9 +18,9 @@ limitations under the License. */
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include "TensorCheck.h" #include "TensorCheck.h"
#include "paddle/gserver/tests/TestUtil.h"
#include "paddle/math/Matrix.h" #include "paddle/math/Matrix.h"
#include "paddle/math/SparseMatrix.h" #include "paddle/math/SparseMatrix.h"
#include "paddle/testing/TestUtil.h"
#include "paddle/utils/Stat.h" #include "paddle/utils/Stat.h"
#include "paddle/utils/Util.h" #include "paddle/utils/Util.h"
......
...@@ -3,4 +3,6 @@ ...@@ -3,4 +3,6 @@
if(WITH_TESTING) if(WITH_TESTING)
add_library(paddle_test_main STATIC TestMain.cpp) add_library(paddle_test_main STATIC TestMain.cpp)
add_dependencies(paddle_test_main gen_proto_cpp) add_dependencies(paddle_test_main gen_proto_cpp)
add_library(paddle_test_util STATIC TestUtil.cpp)
add_dependencies(paddle_test_util gen_proto_cpp)
endif() endif()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册