提交 ee52c759 编写于 作者: H hedaoyuan

delete BaseMatrixCompare, and add AutoCompare::cmpWithoutArg

上级 c4103821
...@@ -15,11 +15,28 @@ limitations under the License. */ ...@@ -15,11 +15,28 @@ limitations under the License. */
#pragma once #pragma once
/** /**
* TestUtils.h is used to automatically compare CPU and GPU code is consistent. * This file provides a AutoCompare calss to simplify the comparison
* This file provides a class(AutoCompare) and a template
* function(BaseMatrixCompare) to simplify the comparison
* of CPU and GPU member functions. * of CPU and GPU member functions.
* Refer test_Matrix.cpp and test_BaseMatrix.cpp for how to use autotest. *
* This takes two steps
* 1. Construct an AutoCompare object.
* When constructing an AutoCompare object, you can set the err argument
* to specify the maximum error for CPU and GPU functions.
*
* 2. Use the template functions cmpWithArg or cmpWithoutArg.
* A. [cmpWithArg] Requires the caller construct the cpu arguments.
*
* AutoCompare test;
* Init Argument arg1,arg2...
* test.cmpWithArg(function, arg1, arg2....)
*
* B. [cmpWithoutArg] The caller do not need construct arguments.
* If matrix used in these functions arguments is the same size.
* Such as the element wise function and the aggregate function
* defined in the BaseMatrix.cpp.
*
* AutoCompare test;
* test.cmpWithoutArg<I...>(function, height, width)
*/ */
#include <gtest/gtest.h> #include <gtest/gtest.h>
...@@ -30,6 +47,8 @@ limitations under the License. */ ...@@ -30,6 +47,8 @@ limitations under the License. */
namespace autotest { namespace autotest {
using paddle::BaseMatrix; using paddle::BaseMatrix;
using paddle::CpuMatrix;
using paddle::GpuMatrix;
using paddle::CpuIVector; using paddle::CpuIVector;
using paddle::GpuIVector; using paddle::GpuIVector;
using paddle::CpuSparseMatrix; using paddle::CpuSparseMatrix;
...@@ -154,47 +173,6 @@ R call(C& obj, R (FC::*f)(FArgs...), Args&&... args) { ...@@ -154,47 +173,6 @@ R call(C& obj, R (FC::*f)(FArgs...), Args&&... args) {
return (obj.*f)(args...); return (obj.*f)(args...);
} }
template <bool AsRowVector,
bool AsColVector,
std::size_t... I,
typename C,
typename R,
typename... Args,
typename AssertEq>
void BaseMatrixCompare(R (C::*f)(Args...), AssertEq compare) {
for (auto height : {1, 11, 73, 128, 200, 330}) {
for (auto width : {1, 3, 32, 100, 512, 1000}) {
CpuMatrix obj1(AsRowVector ? 1 : height, AsColVector ? 1 : width);
GpuMatrix obj2(AsRowVector ? 1 : height, AsColVector ? 1 : width);
init(obj1);
copy(obj2, obj1);
auto tuple1 = std::make_tuple(
construct<typename ReplaceType<
typename std::decay<
typename std::tuple_element<I,
std::tuple<Args...>>::type>::type,
CpuMatrix>::type>(height, width)...);
auto tuple2 = std::make_tuple(
construct<typename ReplaceType<
typename std::decay<
typename std::tuple_element<I,
std::tuple<Args...>>::type>::type,
GpuMatrix>::type>(height, width)...);
initTuple(tuple1);
copyTuple(tuple2, tuple1);
call(obj1, f, std::get<I>(tuple1)...);
call(obj2, f, std::get<I>(tuple2)...);
TensorCheck(compare, obj1, obj2);
}
}
}
// AutoCompare
template <typename T> template <typename T>
class ReturnType { class ReturnType {
public: public:
...@@ -252,64 +230,60 @@ GpuSparseMatrix autoArgs(CpuSparseMatrix& v) { ...@@ -252,64 +230,60 @@ GpuSparseMatrix autoArgs(CpuSparseMatrix& v) {
class AutoCompare { class AutoCompare {
public: public:
AutoCompare(size_t height, size_t width) /**
: cpu(height, width), gpu(height, width) { * err is the allowed calculation error.
* The smaller the value of err,
* the stricter the comparison is between CPU and GPU calculations.
*/
AutoCompare(size_t height, size_t width, real err = 1e-3)
: cpu(height, width), gpu(height, width), compare(err) {
init(cpu); init(cpu);
copy(gpu, cpu); copy(gpu, cpu);
} }
template <typename C, typename R, typename... FArgs, typename... Args> template <typename C, typename R, typename... FArgs, typename... Args>
void operator()(R (C::*f)(FArgs...), Args&&... args) { void cmpWithArg(R (C::*f)(FArgs...), Args&&... args) {
static_assert(sizeof...(FArgs) == sizeof...(Args),
"size of parameter packs are not equal");
call(cpu, f, args...); call(cpu, f, args...);
call(gpu, f, autoArgs(args)...); call(gpu, f, autoArgs(args)...);
TensorCheckErr(cpu, gpu); TensorCheck(compare, cpu, gpu);
} }
protected: template <std::size_t... I, typename C, typename R, typename... Args>
CpuMatrix cpu; void cmpWithoutArg(R (C::*f)(Args...), size_t height, size_t width) {
GpuMatrix gpu;
};
} // namespace autotest
template <std::size_t... I, typename C, typename R, typename... Args>
void BaseMatrixCompare(R (C::*f)(Args...)) {
static_assert(sizeof...(I) == sizeof...(Args), static_assert(sizeof...(I) == sizeof...(Args),
"size of parameter packs are not equal"); "size of parameter packs are not equal");
(void)height;
(void)width;
auto tuple1 = std::make_tuple(
construct<typename ReplaceType<
typename std::decay<
typename std::tuple_element<I,
std::tuple<Args...>>::type>::type,
CpuMatrix>::type>(height, width)...);
#ifndef PADDLE_TYPE_DOUBLE auto tuple2 = std::make_tuple(
autotest::AssertEqual compare(1e-5); construct<typename ReplaceType<
#else typename std::decay<
autotest::AssertEqual compare(1e-10); typename std::tuple_element<I,
#endif std::tuple<Args...>>::type>::type,
GpuMatrix>::type>(height, width)...);
autotest::BaseMatrixCompare<false, false, I...>(f, compare);
}
template <std::size_t... I, typename C, typename R, typename... Args> initTuple(tuple1);
void BaseMatrixAsColVector(R (C::*f)(Args...)) { copyTuple(tuple2, tuple1);
static_assert(sizeof...(I) == sizeof...(Args),
"size of parameter packs are not equal");
#ifndef PADDLE_TYPE_DOUBLE call(cpu, f, std::get<I>(tuple1)...);
autotest::AssertEqual compare(1e-3); call(gpu, f, std::get<I>(tuple2)...);
#else
autotest::AssertEqual compare(1e-8);
#endif
autotest::BaseMatrixCompare<false, true, I...>(f, compare); TensorCheck(compare, cpu, gpu);
} }
template <std::size_t... I, typename C, typename R, typename... Args> protected:
void BaseMatrixAsRowVector(R (C::*f)(Args...)) { CpuMatrix cpu;
static_assert(sizeof...(I) == sizeof...(Args), GpuMatrix gpu;
"size of parameter packs are not equal"); AssertEqual compare;
};
#ifndef PADDLE_TYPE_DOUBLE } // namespace autotest
autotest::AssertEqual compare(1e-3);
#else
autotest::AssertEqual compare(1e-8);
#endif
autotest::BaseMatrixCompare<true, false, I...>(f, compare);
}
...@@ -14,201 +14,237 @@ limitations under the License. */ ...@@ -14,201 +14,237 @@ limitations under the License. */
#ifndef PADDLE_ONLY_CPU #ifndef PADDLE_ONLY_CPU
/** /**
* This test file compares the implementation of CPU and GPU function * This test file use autotest::AutoCompare and cmpWithoutArg to compares the
* in BaseMatrix.cpp or Matrix.cpp. * implementation of CPU and GPU member function in
* BaseMatrix.cpp and Matrix.cpp.
*/ */
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include "paddle/utils/Util.h"
#include "paddle/math/BaseMatrix.h" #include "paddle/math/BaseMatrix.h"
#include "TestUtils.h" #include "TestUtils.h"
using namespace paddle; // NOLINT using paddle::BaseMatrix;
using paddle::Matrix;
using autotest::AutoCompare;
/** // Test all void (BaseMatrix::*)() function
* Test member functions which prototype is
* void (BaseMatrix::*)().
*/
TEST(BaseMatrix, void) { TEST(BaseMatrix, void) {
typedef void (BaseMatrix::*FunctionProto)(); for (auto height : {1, 3, 11, 73, 128, 200, 330}) {
#define BASEMATRIXCOMPARE(function) \ for (auto width : {1, 3, 32, 100, 512, 1000, 3210}) {
BaseMatrixCompare(static_cast<FunctionProto>(&BaseMatrix::function)); auto compare = [height, width](void (BaseMatrix::*f)()) {
AutoCompare test(height, width, 1e-5);
BASEMATRIXCOMPARE(neg); test.cmpWithoutArg(f, height, width);
BASEMATRIXCOMPARE(exp); };
BASEMATRIXCOMPARE(log);
BASEMATRIXCOMPARE(sqrt); compare(&BaseMatrix::neg);
BASEMATRIXCOMPARE(square); compare(&BaseMatrix::exp);
BASEMATRIXCOMPARE(reciprocal); compare(&BaseMatrix::log);
BASEMATRIXCOMPARE(abs); compare(&BaseMatrix::sqrt);
BASEMATRIXCOMPARE(sign); compare(&BaseMatrix::square);
BASEMATRIXCOMPARE(zero); compare(&BaseMatrix::reciprocal);
BASEMATRIXCOMPARE(one); compare(&BaseMatrix::abs);
compare(&BaseMatrix::sign);
#undef BASEMATRIXCOMPARE compare(&BaseMatrix::zero);
compare(&BaseMatrix::one);
}
}
} }
/** // Test all void (BaseMatrix::*)(real) function
* Test member functions which prototype is
* void (BaseMatrix::*)(real).
*/
TEST(BaseMatrix, real) { TEST(BaseMatrix, real) {
typedef void (BaseMatrix::*FunctionProto)(real); for (auto height : {1, 3, 11, 73, 128, 200, 330}) {
#define BASEMATRIXCOMPARE(function) \ for (auto width : {1, 3, 32, 100, 512, 1000, 3210}) {
BaseMatrixCompare<0>(static_cast<FunctionProto>(&BaseMatrix::function)); auto compare = [height, width](void (BaseMatrix::*f)(real)) {
AutoCompare test(height, width, 1e-5);
BASEMATRIXCOMPARE(pow); test.cmpWithoutArg<0>(f, height, width);
BASEMATRIXCOMPARE(subScalar); };
BASEMATRIXCOMPARE(mulScalar);
BASEMATRIXCOMPARE(divScalar); compare(&BaseMatrix::pow);
BASEMATRIXCOMPARE(assign); compare(&BaseMatrix::subScalar);
BASEMATRIXCOMPARE(add); compare(&BaseMatrix::mulScalar);
BASEMATRIXCOMPARE(biggerThanScalar); compare(&BaseMatrix::divScalar);
BASEMATRIXCOMPARE(downClip); compare(&BaseMatrix::assign);
compare(&BaseMatrix::add);
#undef BASEMATRIXCOMPARE compare(&BaseMatrix::biggerThanScalar);
compare(&BaseMatrix::downClip);
}
}
} }
/** // Test all void (BaseMatrix::*)(BaseMatrix&) function
* Test member functions which prototype is TEST(BaseMatrix, BaseMatrix) {
* void (BaseMatrix::*)(real, real). for (auto height : {1, 3, 11, 73, 128, 200, 330}) {
*/ for (auto width : {1, 3, 32, 100, 512, 1000, 3210}) {
TEST(BaseMatrix, real_real) { auto compare = [height, width](void (BaseMatrix::*f)(BaseMatrix&)) {
typedef void (BaseMatrix::*FunctionProto)(real, real); AutoCompare test(height, width, 1e-5);
#define BASEMATRIXCOMPARE(function) \ test.cmpWithoutArg<0>(f, height, width);
BaseMatrixCompare<0, 1>(static_cast<FunctionProto>(&BaseMatrix::function)); };
BASEMATRIXCOMPARE(add); compare(&BaseMatrix::assign);
BASEMATRIXCOMPARE(clip); compare(&BaseMatrix::add);
compare(&BaseMatrix::relu);
#undef BASEMATRIXCOMPARE compare(&BaseMatrix::reluDerivative);
compare(&BaseMatrix::softrelu);
compare(&BaseMatrix::softreluDerivative);
compare(&BaseMatrix::brelu);
compare(&BaseMatrix::breluDerivative);
compare(&BaseMatrix::square);
compare(&BaseMatrix::squareDerivative);
compare(&BaseMatrix::tanh);
compare(&BaseMatrix::tanhDerivative);
compare(&BaseMatrix::reciprocal);
compare(&BaseMatrix::reciprocalDerivative);
compare(&BaseMatrix::abs);
compare(&BaseMatrix::absDerivative);
compare(&BaseMatrix::sigmoid);
compare(&BaseMatrix::sigmoidDerivative);
compare(&BaseMatrix::expDerivative);
compare(&BaseMatrix::sign);
compare(&BaseMatrix::exp);
compare(&BaseMatrix::log);
compare(&BaseMatrix::sqrt);
compare(&BaseMatrix::dotMul);
compare(&BaseMatrix::dotMulSquare);
compare(&BaseMatrix::dotSquareMul);
compare(&BaseMatrix::addColVector);
compare(&BaseMatrix::addRowVector);
compare(&BaseMatrix::mulRowVector);
compare(&BaseMatrix::divRowVector);
compare(&BaseMatrix::addP2P);
compare(&BaseMatrix::invSqrt);
}
}
} }
/** // Test all void (BaseMatrix::*)(real, real) function
* Test member functions which prototype is TEST(BaseMatrix, real_real) {
* void (BaseMatrix::*)(BaseMatrix&). for (auto height : {1, 3, 11, 73, 128, 200, 330}) {
*/ for (auto width : {1, 3, 32, 100, 512, 1000, 3210}) {
TEST(BaseMatrix, BaseMatrix) { auto compare = [height, width](void (BaseMatrix::*f)(real, real)) {
typedef void (BaseMatrix::*FunctionProto)(BaseMatrix&); AutoCompare test(height, width, 1e-5);
#define BASEMATRIXCOMPARE(function) \ test.cmpWithoutArg<0, 1>(f, height, width);
BaseMatrixCompare<0>(static_cast<FunctionProto>(&BaseMatrix::function)); };
BASEMATRIXCOMPARE(assign); compare(&BaseMatrix::add);
BASEMATRIXCOMPARE(add); compare(&BaseMatrix::clip);
BASEMATRIXCOMPARE(relu); }
BASEMATRIXCOMPARE(reluDerivative); }
BASEMATRIXCOMPARE(softrelu);
BASEMATRIXCOMPARE(softreluDerivative);
BASEMATRIXCOMPARE(brelu);
BASEMATRIXCOMPARE(breluDerivative);
BASEMATRIXCOMPARE(square);
BASEMATRIXCOMPARE(squareDerivative);
BASEMATRIXCOMPARE(tanh);
BASEMATRIXCOMPARE(tanhDerivative);
BASEMATRIXCOMPARE(reciprocal);
BASEMATRIXCOMPARE(reciprocalDerivative);
BASEMATRIXCOMPARE(abs);
BASEMATRIXCOMPARE(absDerivative);
BASEMATRIXCOMPARE(sigmoid);
BASEMATRIXCOMPARE(sigmoidDerivative);
BASEMATRIXCOMPARE(expDerivative);
BASEMATRIXCOMPARE(sign);
BASEMATRIXCOMPARE(exp);
BASEMATRIXCOMPARE(log);
BASEMATRIXCOMPARE(sqrt);
BASEMATRIXCOMPARE(dotMul);
BASEMATRIXCOMPARE(dotMulSquare);
BASEMATRIXCOMPARE(dotSquareMul);
BASEMATRIXCOMPARE(addColVector);
BASEMATRIXCOMPARE(addRowVector);
BASEMATRIXCOMPARE(mulRowVector);
BASEMATRIXCOMPARE(divRowVector);
BASEMATRIXCOMPARE(addP2P);
BASEMATRIXCOMPARE(invSqrt);
#undef BASEMATRIXCOMPARE
} }
/** // Test all void (BaseMatrix::*)(BaseMatrix&, real) function
* Test member functions which prototype is
* void (BaseMatrix::*)(BaseMatrix&, real).
*/
TEST(BaseMatrix, BaseMatrix_real) { TEST(BaseMatrix, BaseMatrix_real) {
typedef void (BaseMatrix::*FunctionProto)(BaseMatrix&, real); for (auto height : {1, 3, 11, 73, 128, 200, 330}) {
#define BASEMATRIXCOMPARE(function) \ for (auto width : {1, 3, 32, 100, 512, 1000, 3210}) {
BaseMatrixCompare<0, 1>(static_cast<FunctionProto>(&BaseMatrix::function)); auto compare = [height, width](void (BaseMatrix::*f)(BaseMatrix&, real)) {
AutoCompare test(height, width, 1e-5);
BASEMATRIXCOMPARE(addBias); test.cmpWithoutArg<0, 1>(f, height, width);
BASEMATRIXCOMPARE(add); };
BASEMATRIXCOMPARE(sub);
BASEMATRIXCOMPARE(pow); compare(&BaseMatrix::addBias);
BASEMATRIXCOMPARE(addScalar); compare(&BaseMatrix::add);
BASEMATRIXCOMPARE(subScalar); compare(&BaseMatrix::sub);
BASEMATRIXCOMPARE(mulScalar); compare(&BaseMatrix::pow);
BASEMATRIXCOMPARE(divScalar); compare(&BaseMatrix::addScalar);
BASEMATRIXCOMPARE(scalarDiv); compare(&BaseMatrix::subScalar);
BASEMATRIXCOMPARE(addSquare); compare(&BaseMatrix::mulScalar);
compare(&BaseMatrix::divScalar);
BASEMATRIXCOMPARE(isEqualTo); compare(&BaseMatrix::scalarDiv);
compare(&BaseMatrix::addSquare);
#undef BASEMATRIXCOMPARE compare(&BaseMatrix::isEqualTo);
}
}
} }
/** // Test all void (BaseMatrix::*)(BaseMatrix&, BaseMatrix&) function
* Test member functions which prototype is
* void (BaseMatrix::*)(BaseMatrix&, BaseMatrix&).
*/
TEST(BaseMatrix, BaseMatrix_BaseMatrix) { TEST(BaseMatrix, BaseMatrix_BaseMatrix) {
typedef void (BaseMatrix::*FunctionProto)(BaseMatrix&, BaseMatrix&); for (auto height : {1, 3, 11, 73, 128, 200, 330}) {
#define BASEMATRIXCOMPARE(function) \ for (auto width : {1, 3, 32, 100, 512, 1000, 3210}) {
BaseMatrixCompare<0, 1>(static_cast<FunctionProto>(&BaseMatrix::function)); auto compare = [height,
width](void (BaseMatrix::*f)(BaseMatrix&, BaseMatrix&)) {
BASEMATRIXCOMPARE(softCrossEntropy); AutoCompare test(height, width, 1e-5);
BASEMATRIXCOMPARE(softCrossEntropyBp); test.cmpWithoutArg<0, 1>(f, height, width);
BASEMATRIXCOMPARE(binaryLabelCrossEntropy); };
BASEMATRIXCOMPARE(binaryLabelCrossEntropyBp);
BASEMATRIXCOMPARE(sub); compare(&BaseMatrix::softCrossEntropy);
BASEMATRIXCOMPARE(add2); compare(&BaseMatrix::softCrossEntropyBp);
BASEMATRIXCOMPARE(dotMul); compare(&BaseMatrix::binaryLabelCrossEntropy);
BASEMATRIXCOMPARE(dotDiv); compare(&BaseMatrix::binaryLabelCrossEntropyBp);
BASEMATRIXCOMPARE(logisticRegressionLoss); compare(&BaseMatrix::sub);
BASEMATRIXCOMPARE(logisticRegressionLossBp); compare(&BaseMatrix::add2);
BASEMATRIXCOMPARE(biggerThan); compare(&BaseMatrix::dotMul);
BASEMATRIXCOMPARE(max); compare(&BaseMatrix::dotDiv);
BASEMATRIXCOMPARE(dotMulSquare); compare(&BaseMatrix::logisticRegressionLoss);
BASEMATRIXCOMPARE(dotSquareSquare); compare(&BaseMatrix::logisticRegressionLossBp);
compare(&BaseMatrix::biggerThan);
#undef BASEMATRIXCOMPARE compare(&BaseMatrix::max);
compare(&BaseMatrix::dotMulSquare);
compare(&BaseMatrix::dotSquareSquare);
}
}
} }
// member function without overloaded void TestEelementWise(size_t height, size_t width) {
TEST(BaseMatrix, Other) { AutoCompare rowScale(height, width);
BaseMatrixCompare<0, 1, 2>(&BaseMatrix::rowScale); rowScale.cmpWithoutArg<0, 1, 2>(&BaseMatrix::rowScale, height, width);
BaseMatrixCompare<0, 1, 2>(&BaseMatrix::rowDotMul);
BaseMatrixCompare<0, 1, 2, 3>(&BaseMatrix::binaryClassificationError); AutoCompare rowDotMul(height, width);
rowDotMul.cmpWithoutArg<0, 1, 2>(&BaseMatrix::rowDotMul, height, width);
AutoCompare binaryClassificationError(height, width);
binaryClassificationError.cmpWithoutArg<0, 1, 2, 3>(
&BaseMatrix::binaryClassificationError, height, width);
AutoCompare sumOfSquaresBp(height, width);
sumOfSquaresBp.cmpWithoutArg<0, 1>(&Matrix::sumOfSquaresBp, height, width);
}
void TestAggregateToRow(size_t height, size_t width) {
AutoCompare maxCols(1, width);
maxCols.cmpWithoutArg<0>(&BaseMatrix::maxCols, height, width);
AutoCompare minCols(1, width);
minCols.cmpWithoutArg<0>(&BaseMatrix::minCols, height, width);
BaseMatrixCompare<0, 1>(&Matrix::sumOfSquaresBp); AutoCompare addDotMulVMM(1, width);
addDotMulVMM.cmpWithoutArg<0, 1>(&BaseMatrix::addDotMulVMM, height, width);
AutoCompare sumCols(1, width);
sumCols.cmpWithoutArg<0, 1, 2>(&BaseMatrix::sumCols, height, width);
AutoCompare collectBias(1, width);
collectBias.cmpWithoutArg<0, 1>(
static_cast<void (Matrix::*)(Matrix&, real)>(&Matrix::collectBias),
height,
width);
}
void TestAggregateToCol(size_t height, size_t width) {
AutoCompare maxRows(height, 1);
maxRows.cmpWithoutArg<0>(&BaseMatrix::maxRows, height, width);
AutoCompare minRows(height, 1);
minRows.cmpWithoutArg<0>(&BaseMatrix::minRows, height, width);
AutoCompare sumRows(height, 1);
sumRows.cmpWithoutArg<0, 1, 2>(&BaseMatrix::sumRows, height, width);
AutoCompare sumOfSquares(height, 1);
sumOfSquares.cmpWithoutArg<0, 1>(&Matrix::sumOfSquares, height, width);
} }
TEST(BaseMatrix, Aggregate) { TEST(BaseMatrix, Other) {
BaseMatrixAsColVector<0>(&BaseMatrix::maxRows); for (auto height : {1, 3, 11, 73, 128, 200, 330}) {
BaseMatrixAsColVector<0>(&BaseMatrix::minRows); for (auto width : {1, 3, 32, 100, 512, 1000, 3210}) {
BaseMatrixAsColVector<0, 1, 2>(&BaseMatrix::sumRows); TestEelementWise(height, width);
BaseMatrixAsColVector<0, 1>(&Matrix::sumOfSquares); TestAggregateToRow(height, width);
TestAggregateToCol(height, width);
BaseMatrixAsRowVector<0>(&BaseMatrix::maxCols); }
BaseMatrixAsRowVector<0>(&BaseMatrix::minCols); }
BaseMatrixAsRowVector<0, 1>(&BaseMatrix::addDotMulVMM);
BaseMatrixAsRowVector<0, 1, 2>(&BaseMatrix::sumCols);
BaseMatrixAsRowVector<0, 1>(
static_cast<void (Matrix::*)(Matrix&, real)>(&Matrix::collectBias));
} }
int main(int argc, char** argv) { int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv); testing::InitGoogleTest(&argc, argv);
initMain(argc, argv); paddle::initMain(argc, argv);
return RUN_ALL_TESTS(); return RUN_ALL_TESTS();
} }
......
...@@ -14,20 +14,8 @@ limitations under the License. */ ...@@ -14,20 +14,8 @@ limitations under the License. */
#ifndef PADDLE_ONLY_CPU #ifndef PADDLE_ONLY_CPU
/** /**
* This test file use autotest::AutoCompare to compares the implementation * This test file use autotest::AutoCompare and cmpWithArg to compares the
* of CPU and GPU member function in Matrix.cpp. * implementation of CPU and GPU member function in Matrix.cpp.
*
* 1. Constructs an AutoCompare object, a AutoCompare object contains
* a CpuMatrix and a GpuMatrix;
* 2. Initializes the required parameters for the member function.
* Only need to initialize the CPU parameters.
* 3. Use the operator() template for testing. In the operator() will call back
* member functions, and compare the results.
*
* use case:
* AutoCompare test(...);
* Init Argument arg1,arg2...
* test(function, arg1, arg2....)
*/ */
#include <gtest/gtest.h> #include <gtest/gtest.h>
...@@ -38,11 +26,6 @@ using paddle::Matrix; ...@@ -38,11 +26,6 @@ using paddle::Matrix;
using paddle::CpuMatrix; using paddle::CpuMatrix;
using paddle::CpuIVector; using paddle::CpuIVector;
using paddle::CpuSparseMatrix; using paddle::CpuSparseMatrix;
using paddle::SparseValueType;
using paddle::SparseFormat;
using paddle::NO_VALUE;
using paddle::SPARSE_CSR;
using paddle::initMain;
using autotest::AutoCompare; using autotest::AutoCompare;
void testBilinearFwdBwd(int numSamples, void testBilinearFwdBwd(int numSamples,
...@@ -57,7 +40,7 @@ void testBilinearFwdBwd(int numSamples, ...@@ -57,7 +40,7 @@ void testBilinearFwdBwd(int numSamples,
AutoCompare forward(numSamples, outWidth); AutoCompare forward(numSamples, outWidth);
CpuMatrix arg1(numSamples, inWidth); CpuMatrix arg1(numSamples, inWidth);
arg1.randomizeUniform(); arg1.randomizeUniform();
forward(&Matrix::bilinearForward, forward.cmpWithArg(&Matrix::bilinearForward,
arg1, arg1,
imgSizeH, imgSizeH,
imgSizeW, imgSizeW,
...@@ -70,7 +53,7 @@ void testBilinearFwdBwd(int numSamples, ...@@ -70,7 +53,7 @@ void testBilinearFwdBwd(int numSamples,
AutoCompare backward(numSamples, inWidth); AutoCompare backward(numSamples, inWidth);
CpuMatrix arg2(numSamples, outWidth); CpuMatrix arg2(numSamples, outWidth);
arg2.randomizeUniform(); arg2.randomizeUniform();
backward(&Matrix::bilinearBackward, backward.cmpWithArg(&Matrix::bilinearBackward,
arg2, arg2,
2 * imgSizeH, 2 * imgSizeH,
2 * imgSizeW, 2 * imgSizeW,
...@@ -99,7 +82,8 @@ void testMatrixAddBias(int height, int width, real scale) { ...@@ -99,7 +82,8 @@ void testMatrixAddBias(int height, int width, real scale) {
AutoCompare test(height, width); AutoCompare test(height, width);
CpuMatrix arg1(1, width); CpuMatrix arg1(1, width);
arg1.randomizeUniform(); arg1.randomizeUniform();
test(static_cast<void (Matrix::*)(Matrix&, real)>(&Matrix::addBias), test.cmpWithArg(
static_cast<void (Matrix::*)(Matrix&, real)>(&Matrix::addBias),
arg1, arg1,
scale); scale);
} }
...@@ -110,7 +94,7 @@ void testMatrixAddDotMulMMV(int height, int width) { ...@@ -110,7 +94,7 @@ void testMatrixAddDotMulMMV(int height, int width) {
CpuMatrix arg2(1, width); CpuMatrix arg2(1, width);
arg1.randomizeUniform(); arg1.randomizeUniform();
arg2.randomizeUniform(); arg2.randomizeUniform();
test(&BaseMatrix::addDotMulMMV, arg1, arg2); test.cmpWithArg(&BaseMatrix::addDotMulMMV, arg1, arg2);
} }
TEST(Matrix, unary) { TEST(Matrix, unary) {
...@@ -128,14 +112,14 @@ void testMatrixAddAtOffset(int height, int width1, int width2, int offset) { ...@@ -128,14 +112,14 @@ void testMatrixAddAtOffset(int height, int width1, int width2, int offset) {
AutoCompare test(height, width2); AutoCompare test(height, width2);
CpuMatrix arg1(height, width1); CpuMatrix arg1(height, width1);
arg1.randomizeUniform(); arg1.randomizeUniform();
test(&Matrix::addAtOffset, arg1, offset); test.cmpWithArg(&Matrix::addAtOffset, arg1, offset);
} }
void testMatrixAssignAtOffset(int height, int width1, int width2, int offset) { void testMatrixAssignAtOffset(int height, int width1, int width2, int offset) {
AutoCompare test(height, width2); AutoCompare test(height, width2);
CpuMatrix arg1(height, width1); CpuMatrix arg1(height, width1);
arg1.randomizeUniform(); arg1.randomizeUniform();
test(&Matrix::assignAtOffset, arg1, offset); test.cmpWithArg(&Matrix::assignAtOffset, arg1, offset);
} }
TEST(Matrix, AtOffset) { TEST(Matrix, AtOffset) {
...@@ -162,7 +146,7 @@ void testMatrixSelectRows(int numSamples, int tableSize, int inputDim) { ...@@ -162,7 +146,7 @@ void testMatrixSelectRows(int numSamples, int tableSize, int inputDim) {
CpuIVector arg2(numSamples); CpuIVector arg2(numSamples);
arg1.randomizeUniform(); arg1.randomizeUniform();
arg2.rand(tableSize); arg2.rand(tableSize);
test(&Matrix::selectRows, arg1, arg2); test.cmpWithArg(&Matrix::selectRows, arg1, arg2);
} }
TEST(Matrix, tableProjection) { TEST(Matrix, tableProjection) {
...@@ -183,7 +167,7 @@ void testMatrixCopyByRowIndex(int outHeight, int inHeight, int width) { ...@@ -183,7 +167,7 @@ void testMatrixCopyByRowIndex(int outHeight, int inHeight, int width) {
CpuIVector arg2(outHeight); CpuIVector arg2(outHeight);
arg1.randomizeUniform(); arg1.randomizeUniform();
arg2.rand(inHeight); arg2.rand(inHeight);
test(&Matrix::copyByRowIndex, arg1, arg2); test.cmpWithArg(&Matrix::copyByRowIndex, arg1, arg2);
} }
TEST(Matrix, copyByRowIndex) { TEST(Matrix, copyByRowIndex) {
...@@ -204,7 +188,7 @@ void testCosSim(int heightX, int heightY, int width, real scale) { ...@@ -204,7 +188,7 @@ void testCosSim(int heightX, int heightY, int width, real scale) {
arg1.randomizeUniform(); arg1.randomizeUniform();
arg2.randomizeUniform(); arg2.randomizeUniform();
arg2.add(-0.5); arg2.add(-0.5);
test(&Matrix::cosSim, arg1, arg2, scale); test.cmpWithArg(&Matrix::cosSim, arg1, arg2, scale);
} }
TEST(Matrix, cosSim) { TEST(Matrix, cosSim) {
...@@ -226,7 +210,7 @@ void testParamReluForward(int height, int width, int w_height, int w_width) { ...@@ -226,7 +210,7 @@ void testParamReluForward(int height, int width, int w_height, int w_width) {
arg1.randomizeUniform(); arg1.randomizeUniform();
arg2.randomizeUniform(); arg2.randomizeUniform();
arg1.add(-0.5); arg1.add(-0.5);
test(&Matrix::paramReluForward, arg1, arg2); test.cmpWithArg(&Matrix::paramReluForward, arg1, arg2);
} }
void testParamReluBackwardW(int height, int width, int w_height, int w_width) { void testParamReluBackwardW(int height, int width, int w_height, int w_width) {
...@@ -236,7 +220,7 @@ void testParamReluBackwardW(int height, int width, int w_height, int w_width) { ...@@ -236,7 +220,7 @@ void testParamReluBackwardW(int height, int width, int w_height, int w_width) {
arg1.randomizeUniform(); arg1.randomizeUniform();
arg2.randomizeUniform(); arg2.randomizeUniform();
arg2.add(-0.5); arg2.add(-0.5);
test(&Matrix::paramReluBackwardW, arg1, arg2); test.cmpWithArg(&Matrix::paramReluBackwardW, arg1, arg2);
} }
TEST(Matrix, paramRelu) { TEST(Matrix, paramRelu) {
...@@ -256,14 +240,14 @@ void testAddSharedBias(int numSamples, int dim, int channel) { ...@@ -256,14 +240,14 @@ void testAddSharedBias(int numSamples, int dim, int channel) {
AutoCompare test(numSamples, dim); AutoCompare test(numSamples, dim);
CpuMatrix arg1(1, channel); CpuMatrix arg1(1, channel);
arg1.randomizeUniform(); arg1.randomizeUniform();
test(&Matrix::addSharedBias, arg1, 1.0); test.cmpWithArg(&Matrix::addSharedBias, arg1, 1.0);
} }
void testCollectSharedBias(int numSamples, int dim, int channel) { void testCollectSharedBias(int numSamples, int dim, int channel) {
AutoCompare test(1, channel); AutoCompare test(1, channel);
CpuMatrix arg1(numSamples, dim); CpuMatrix arg1(numSamples, dim);
arg1.randomizeUniform(); arg1.randomizeUniform();
test(&Matrix::collectSharedBias, arg1, 1.0); test.cmpWithArg(&Matrix::collectSharedBias, arg1, 1.0);
} }
TEST(Matrix, sharedBias) { TEST(Matrix, sharedBias) {
...@@ -282,7 +266,8 @@ TEST(Matrix, sharedBias) { ...@@ -282,7 +266,8 @@ TEST(Matrix, sharedBias) {
void testMultiBinaryLabelCrossEntropy(int numSamples, int dim) { void testMultiBinaryLabelCrossEntropy(int numSamples, int dim) {
AutoCompare forward(numSamples, 1); AutoCompare forward(numSamples, 1);
CpuMatrix arg1(numSamples, dim); CpuMatrix arg1(numSamples, dim);
CpuSparseMatrix arg2(numSamples, dim, numSamples, NO_VALUE, SPARSE_CSR); CpuSparseMatrix arg2(
numSamples, dim, numSamples, paddle::NO_VALUE, paddle::SPARSE_CSR);
CpuMatrix output1(numSamples, dim); CpuMatrix output1(numSamples, dim);
output1.randomizeUniform(); output1.randomizeUniform();
...@@ -291,10 +276,10 @@ void testMultiBinaryLabelCrossEntropy(int numSamples, int dim) { ...@@ -291,10 +276,10 @@ void testMultiBinaryLabelCrossEntropy(int numSamples, int dim) {
const unsigned int id = std::rand() % dim; const unsigned int id = std::rand() % dim;
arg2.setRow(i, 1, &id, nullptr); arg2.setRow(i, 1, &id, nullptr);
} }
forward(&Matrix::multiBinaryLabelCrossEntropy, arg1, arg2); forward.cmpWithArg(&Matrix::multiBinaryLabelCrossEntropy, arg1, arg2);
AutoCompare backward(numSamples, dim); AutoCompare backward(numSamples, dim);
backward(&Matrix::multiBinaryLabelCrossEntropyBp, arg1, arg2); backward.cmpWithArg(&Matrix::multiBinaryLabelCrossEntropyBp, arg1, arg2);
} }
TEST(Matrix, multiBinaryCrossEntropy) { TEST(Matrix, multiBinaryCrossEntropy) {
...@@ -308,7 +293,7 @@ TEST(Matrix, multiBinaryCrossEntropy) { ...@@ -308,7 +293,7 @@ TEST(Matrix, multiBinaryCrossEntropy) {
int main(int argc, char** argv) { int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv); testing::InitGoogleTest(&argc, argv);
initMain(argc, argv); paddle::initMain(argc, argv);
return RUN_ALL_TESTS(); return RUN_ALL_TESTS();
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册