diff --git a/paddle/math/tests/test_matrixCompare.cpp b/paddle/math/tests/test_matrixCompare.cpp index a0101d3f30e855ab23e01e409cecc68db4876c94..65a37cefe66f5cd8c59475ee37e48c9fda84dfaf 100644 --- a/paddle/math/tests/test_matrixCompare.cpp +++ b/paddle/math/tests/test_matrixCompare.cpp @@ -30,8 +30,6 @@ using namespace std; // NOLINT using autotest::TensorCheckEqual; using autotest::TensorCheckErr; -// clang-format off - void testMatrixMaxSequence(int batchSize, int inputDim) { // forward MatrixPtr cpuInput = std::make_shared(batchSize, inputDim); @@ -1160,11 +1158,10 @@ void testBatch2seqPadding(int batchSize, int inputDim) { cpuOutput->zero(); gpuOutput->zero(); - size_t maxSeqLen = 0; size_t numSeq = cpuSequence->getSize() - 1; - maxSeqLen = *std::max_element( - cpuSequence->getData(), cpuSequence->getData() + numSeq); + maxSeqLen = *std::max_element(cpuSequence->getData(), + cpuSequence->getData() + numSeq); MatrixPtr cBatch = std::make_shared(numSeq * maxSeqLen, inputDim); MatrixPtr gBatch = std::make_shared(numSeq * maxSeqLen, inputDim); @@ -1180,8 +1177,6 @@ void testBatch2seqPadding(int batchSize, int inputDim) { true); cCheck->copyFrom(*gBatch); - // CPU - int* seqStart = cpuSequence->getData(); float* batchData = cBatch->getData(); float* seqData = cpuInput->getData(); @@ -1204,12 +1199,11 @@ void testBatch2seqPadding(int batchSize, int inputDim) { TensorCheckErr(*cBatch, *cCheck); } - TEST(Matrix, warpCTC) { for (auto batchSize : {51, 1285, 3884}) { for (auto inputDim : {32, 512, 3026}) { - VLOG(3) << " batchSize=" << batchSize << " inputDim=" << inputDim; - testBatch2seqPadding(batchSize, inputDim); + VLOG(3) << " batchSize=" << batchSize << " inputDim=" << inputDim; + testBatch2seqPadding(batchSize, inputDim); } } }