提交 1df826e7 编写于 作者: H hedaoyuan

Add a AutoCompare and move some test form test_matrixCompare.cpp to test_Matrix.cpp

上级 f70fc4a4
......@@ -13,11 +13,14 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include <cmath>
#include <gtest/gtest.h>
#include "paddle/math/Matrix.h"
using namespace paddle; // NOLINT
using namespace std; // NOLINT
using paddle::Matrix;
using paddle::CpuMatrix;
using paddle::GpuMatrix;
using paddle::VectorT;
using paddle::CpuVectorT;
using paddle::GpuVectorT;
namespace autotest {
......@@ -71,6 +74,53 @@ private:
CpuMatrix arg_;
};
template <>
class CopyToCpu<Matrix> {
public:
explicit CopyToCpu(const Matrix& arg)
: arg_(arg.getHeight(), arg.getWidth()) {
arg_.copyFrom(arg);
}
CpuMatrix& copiedArg() { return arg_; }
private:
CpuMatrix arg_;
};
template <typename T>
class CopyToCpu<CpuVectorT<T>> {
public:
explicit CopyToCpu(const CpuVectorT<T>& arg) : arg_(arg) {}
const CpuVectorT<T>& copiedArg() const { return arg_; }
private:
const CpuVectorT<T>& arg_;
};
template <typename T>
class CopyToCpu<GpuVectorT<T>> {
public:
explicit CopyToCpu(const GpuVectorT<T>& arg) : arg_(arg.getSize()) {
arg_.copyFrom(arg);
}
CpuVectorT<T>& copiedArg() { return arg_; }
private:
CpuVectorT<T> arg_;
};
template <typename T>
class CopyToCpu<VectorT<T>> {
public:
explicit CopyToCpu(const VectorT<T>& arg) : arg_(arg.getSize()) {
arg_.copyFrom(arg);
}
CpuVectorT<T>& copiedArg() { return arg_; }
private:
CpuVectorT<T> arg_;
};
template <typename AssertEq>
void TensorCheck(AssertEq compare,
const CpuMatrix& matrix1,
......@@ -95,10 +145,30 @@ void TensorCheck(AssertEq compare,
EXPECT_EQ(count, 0) << "There are " << count << " different element.";
}
template <typename AssertEq, class T>
void TensorCheck(AssertEq compare,
const CpuVectorT<T>& vector1,
const CpuVectorT<T>& vector2) {
CHECK(vector1.getSize() == vector2.getSize());
const T* data1 = vector1.getData();
const T* data2 = vector2.getData();
size_t size = vector1.getSize();
int count = 0;
for (size_t i = 0; i < size; i++) {
real a = data1[i];
real b = data2[i];
if (!compare(a, b)) {
count++;
}
}
EXPECT_EQ(count, 0) << "There are " << count << " different element.";
}
template <typename AssertEq, typename Tensor1, typename Tensor2>
extern void TensorCheck(AssertEq compare,
const Tensor1& tensor1,
const Tensor2& tensor2) {
void TensorCheck(AssertEq compare,
const Tensor1& tensor1,
const Tensor2& tensor2) {
TensorCheck(compare,
CopyToCpu<Tensor1>(tensor1).copiedArg(),
CopyToCpu<Tensor2>(tensor2).copiedArg());
......@@ -116,4 +186,24 @@ void TensorCheck(AssertEq compare, size_t args1, size_t args2) {
<< ", args2 = " << args2;
}
template <typename Tensor1, typename Tensor2>
void TensorCheckEqual(const Tensor1& tensor1, const Tensor2& tensor2) {
AssertEqual compare(0);
TensorCheck(compare,
CopyToCpu<Tensor1>(tensor1).copiedArg(),
CopyToCpu<Tensor2>(tensor2).copiedArg());
}
template <typename Tensor1, typename Tensor2>
void TensorCheckErr(const Tensor1& tensor1, const Tensor2& tensor2) {
#ifndef PADDLE_TYPE_DOUBLE
AssertEqual compare(1e-3);
#else
AssertEqual compare(1e-10);
#endif
TensorCheck(compare,
CopyToCpu<Tensor1>(tensor1).copiedArg(),
CopyToCpu<Tensor2>(tensor2).copiedArg());
}
} // namespace autotest
......@@ -14,21 +14,19 @@ limitations under the License. */
/**
* TestUtils.h is used to automatically compare CPU and GPU code is consistent.
*
* Auto compare BaseMatrix member function:
* Use case:
* a. void BaseMatrix::tanh(BaseMatrixT& b);
* Compare method: BaseMatrixCompare<0>(&BaseMatrix::tanh);
*
* b.
*
* Refer test_Matrix.cpp and test_BaseMatrix.cpp for how to use autotest.
*/
#include <gtest/gtest.h>
#include "paddle/math/Matrix.h"
#include "paddle/math/SparseMatrix.h"
#include "TensorCheck.h"
using namespace paddle; // NOLINT
using paddle::BaseMatrix;
using paddle::CpuIVector;
using paddle::GpuIVector;
using paddle::CpuSparseMatrix;
using paddle::GpuSparseMatrix;
namespace autotest {
......@@ -196,9 +194,7 @@ template <bool AsRowVector,
typename R,
typename... Args,
typename AssertEq>
void BaseMatrixCompare(R (C::*f)(Args...),
AssertEq compare,
bool checkArgs = false) {
void BaseMatrixCompare(R (C::*f)(Args...), AssertEq compare) {
for (auto height : {1, 11, 73, 128, 200, 330}) {
for (auto width : {1, 3, 32, 100, 512, 1000}) {
CpuMatrix obj1(AsRowVector ? 1 : height, AsColVector ? 1 : width);
......@@ -227,17 +223,91 @@ void BaseMatrixCompare(R (C::*f)(Args...),
call(obj2, f, std::get<I>(tuple2)...);
TensorCheck(compare, obj1, obj2);
if (checkArgs) {
checkTuple(tuple1, tuple2, compare);
}
}
}
}
template <typename T>
class ReturnType {
public:
typedef T type;
};
template <>
class ReturnType<CpuMatrix> {
public:
typedef GpuMatrix type;
};
template <>
class ReturnType<CpuIVector> {
public:
typedef GpuIVector type;
};
template <>
class ReturnType<CpuSparseMatrix> {
public:
typedef GpuSparseMatrix type;
};
template <typename T>
typename ReturnType<T>::type autoArgs(T v) {
return v;
}
template <>
GpuMatrix autoArgs(CpuMatrix v) {
GpuMatrix a(v.getHeight(), v.getWidth());
a.copyFrom(v);
return a;
}
template <>
GpuIVector autoArgs(CpuIVector v) {
GpuIVector a(v.getSize());
a.copyFrom(v);
return a;
}
template <>
GpuSparseMatrix autoArgs(CpuSparseMatrix v) {
GpuSparseMatrix a(v.getHeight(),
v.getWidth(),
v.getElementCnt(),
v.getValueType(),
v.getFormat());
a.copyFrom(v, HPPL_STREAM_DEFAULT);
hl_stream_synchronize(HPPL_STREAM_DEFAULT);
return a;
}
class AutoCompare {
public:
AutoCompare(size_t height, size_t width)
: cpu(height, width), gpu(height, width) {
init(cpu);
copy(gpu, cpu);
}
template <typename C, typename R, typename... FArgs, typename... Args>
void operator()(R (C::*f)(FArgs...), Args&&... args) {
call(cpu, f, args...);
call(gpu, f, autoArgs(args)...);
TensorCheckErr(cpu, gpu);
}
protected:
CpuMatrix cpu;
GpuMatrix gpu;
};
} // namespace autotest
template <std::size_t... I, typename C, typename R, typename... Args>
void BaseMatrixCompare(R (C::*f)(Args...), bool checkArgs = false) {
void BaseMatrixCompare(R (C::*f)(Args...)) {
static_assert(sizeof...(I) == sizeof...(Args),
"size of parameter packs are not equal");
......@@ -247,7 +317,7 @@ void BaseMatrixCompare(R (C::*f)(Args...), bool checkArgs = false) {
autotest::AssertEqual compare(1e-10);
#endif
autotest::BaseMatrixCompare<false, false, I...>(f, compare, checkArgs);
autotest::BaseMatrixCompare<false, false, I...>(f, compare);
}
template <std::size_t... I, typename C, typename R, typename... Args>
......
......@@ -15,7 +15,7 @@ limitations under the License. */
#ifndef PADDLE_ONLY_CPU
/**
* This test file compares the implementation of CPU and GPU function
* in BaseMatrix.cpp.
* in BaseMatrix.cpp or Matrix.cpp.
*/
#include <gtest/gtest.h>
......@@ -188,17 +188,22 @@ TEST(BaseMatrix, Other) {
BaseMatrixCompare<0, 1, 2>(&BaseMatrix::rowScale);
BaseMatrixCompare<0, 1, 2>(&BaseMatrix::rowDotMul);
BaseMatrixCompare<0, 1, 2, 3>(&BaseMatrix::binaryClassificationError);
BaseMatrixCompare<0, 1>(&Matrix::sumOfSquaresBp);
}
TEST(BaseMatrix, Aggregate) {
BaseMatrixAsColVector<0>(&BaseMatrix::maxRows);
BaseMatrixAsColVector<0>(&BaseMatrix::minRows);
BaseMatrixAsColVector<0, 1, 2>(&BaseMatrix::sumRows);
BaseMatrixAsColVector<0, 1>(&Matrix::sumOfSquares);
BaseMatrixAsRowVector<0>(&BaseMatrix::maxCols);
BaseMatrixAsRowVector<0>(&BaseMatrix::minCols);
BaseMatrixAsRowVector<0, 1>(&BaseMatrix::addDotMulVMM);
BaseMatrixAsRowVector<0, 1, 2>(&BaseMatrix::sumCols);
BaseMatrixAsRowVector<0, 1>(
static_cast<void (Matrix::*)(Matrix&, real)>(&Matrix::collectBias));
}
int main(int argc, char** argv) {
......
......@@ -14,25 +14,295 @@ limitations under the License. */
#ifndef PADDLE_ONLY_CPU
/**
* This test file compares the implementation of CPU and GPU function
* in Matrix.cpp.
* This test file use AutoCompare to compares the implementation
* of CPU and GPU member function in Matrix.cpp.
*
* 1. Constructs an AutoCompare object, a AutoCompare object contains
* a CpuMatrix and a GpuMatrix;
* 2. Initializes the required parameters for the member function.
* Only need to initialize the CPU parameters.
* 3. Use the operator() template for testing. In the operator() will call back
* member functions, and compare the results.
*
* use case:
* AutoCompare test(...);
* Init Argument arg1,arg2...
* test(function, arg1, arg2....)
*
*/
#include <gtest/gtest.h>
#include "TestUtils.h"
using namespace paddle; // NOLINT
using paddle::CpuMatrix;
using paddle::SparseValueType;
using paddle::SparseFormat;
using paddle::NO_VALUE;
using paddle::SPARSE_CSR;
using paddle::initMain;
using autotest::TensorCheckEqual;
using autotest::TensorCheckErr;
using autotest::AutoCompare;
TEST(Matrix, Matrix) {
BaseMatrixCompare<0>(&Matrix::softmax, true);
BaseMatrixCompare<0, 1>(&Matrix::sumOfSquaresBp);
void testBilinearFwdBwd(int numSamples,
int imgSizeH,
int imgSizeW,
int channels) {
int inWidth = imgSizeH * imgSizeW * channels;
int outWidth = 2 * imgSizeH * 2 * imgSizeW * channels;
real ratioH = 0.5;
real ratioW = 0.5;
AutoCompare forward(numSamples, outWidth);
CpuMatrix arg1(numSamples, inWidth);
arg1.randomizeUniform();
forward(&Matrix::bilinearForward,
arg1,
imgSizeH,
imgSizeW,
2 * imgSizeH,
2 * imgSizeW,
channels,
ratioH,
ratioW);
AutoCompare backward(numSamples, inWidth);
CpuMatrix arg2(numSamples, outWidth);
arg2.randomizeUniform();
backward(&Matrix::bilinearBackward,
arg2,
2 * imgSizeH,
2 * imgSizeW,
imgSizeH,
imgSizeW,
channels,
ratioH,
ratioW);
}
TEST(Matrix, BilinearFwdBwd) {
for (auto numSamples : {5, 10}) {
for (auto channels : {8, 16}) {
for (auto imgSizeH : {14, 28}) {
for (auto imgSizeW : {16, 30}) {
VLOG(3) << " numSamples=" << numSamples << " channels=" << channels
<< " imgSizeH=" << imgSizeH << " imgSizeW=" << imgSizeW;
testBilinearFwdBwd(numSamples, imgSizeH, imgSizeW, channels);
}
}
}
}
}
void testMatrixAddBias(int height, int width, real scale) {
AutoCompare test(height, width);
CpuMatrix arg1(1, width);
arg1.randomizeUniform();
test(static_cast<void (Matrix::*)(Matrix&, real)>(&Matrix::addBias),
arg1,
scale);
}
void testMatrixAddDotMulMMV(int height, int width) {
AutoCompare test(height, width);
CpuMatrix arg1(height, width);
CpuMatrix arg2(1, width);
arg1.randomizeUniform();
arg2.randomizeUniform();
test(&BaseMatrix::addDotMulMMV, arg1, arg2);
}
TEST(Matrix, unary) {
for (auto height : {1, 3, 11, 73, 128, 200, 330}) {
for (auto width : {1, 3, 32, 100, 512, 1000, 3210}) {
VLOG(3) << " height=" << height << " width=" << width;
testMatrixAddBias(height, width, 1.0);
testMatrixAddBias(height, width, 3.5);
testMatrixAddDotMulMMV(height, width);
}
}
}
void testMatrixAddAtOffset(int height, int width1, int width2, int offset) {
AutoCompare test(height, width2);
CpuMatrix arg1(height, width1);
arg1.randomizeUniform();
test(&Matrix::addAtOffset, arg1, offset);
}
void testMatrixAssignAtOffset(int height, int width1, int width2, int offset) {
AutoCompare test(height, width2);
CpuMatrix arg1(height, width1);
arg1.randomizeUniform();
test(&Matrix::assignAtOffset, arg1, offset);
}
TEST(Matrix, AtOffset) {
for (auto height : {1, 11, 73, 128, 200}) {
for (auto width1 : {1, 32, 100, 512, 1000}) {
for (auto width2 : {1, 32, 100, 512, 1000}) {
int columnOffset = 0;
int offset = std::abs(width1 - width2);
if (offset) {
columnOffset = std::rand() % offset;
}
VLOG(3) << " height=" << height << " width1=" << width1
<< " width2=" << width2 << " columnOffset = " << columnOffset;
testMatrixAddAtOffset(height, width1, width2, columnOffset);
testMatrixAssignAtOffset(height, width1, width2, columnOffset);
}
}
}
}
void testMatrixSelectRows(int numSamples, int tableSize, int inputDim) {
AutoCompare test(numSamples, inputDim);
CpuMatrix arg1(tableSize, inputDim);
CpuIVector arg2(numSamples);
arg1.randomizeUniform();
arg2.rand(tableSize);
test(&Matrix::selectRows, arg1, arg2);
}
TEST(Matrix, tableProjection) {
for (auto numSamples : {10, 100, 1000, 10000, 80000}) {
for (auto tableSize : {10, 100}) {
for (auto inputDim : {20, 50}) {
VLOG(3) << " numSamples=" << numSamples << " tableSize=" << tableSize
<< " inputDim=" << inputDim;
testMatrixSelectRows(numSamples, tableSize, inputDim);
}
}
}
}
void testMatrixCopyByRowIndex(int outHeight, int inHeight, int width) {
AutoCompare test(outHeight, width);
CpuMatrix arg1(inHeight, width);
CpuIVector arg2(outHeight);
arg1.randomizeUniform();
arg2.rand(inHeight);
test(&Matrix::copyByRowIndex, arg1, arg2);
}
TEST(Matrix, Aggregate) {
BaseMatrixAsRowVector<0, 1>(
static_cast<void (Matrix::*)(Matrix&, real)>(&Matrix::collectBias));
TEST(Matrix, copyByRowIndex) {
for (auto outHeight : {31, 500, 1000}) {
for (auto inHeight : {17, 257, 500, 1200}) {
for (auto width : {512, 1024}) {
VLOG(3) << outHeight << " " << inHeight << " " << width;
testMatrixCopyByRowIndex(outHeight, inHeight, width);
}
}
}
}
void testCosSim(int heightX, int heightY, int width, real scale) {
AutoCompare test(heightX, 1);
CpuMatrix arg1(heightX, width);
CpuMatrix arg2(heightY, width);
arg1.randomizeUniform();
arg2.randomizeUniform();
arg2.add(-0.5);
test(&Matrix::cosSim, arg1, arg2, scale);
}
TEST(Matrix, cosSim) {
for (auto heightX : {10, 100, 1000}) {
for (auto heightY : {1, heightX}) {
for (auto width : {10, 100, 1000}) {
for (auto scale : {1.0, 2.0}) {
testCosSim(heightX, heightY, width, scale);
}
}
}
}
}
void testParamReluForward(int height, int width, int w_height, int w_width) {
AutoCompare test(height, width);
CpuMatrix arg1(height, width);
CpuMatrix arg2(w_height, w_width);
arg1.randomizeUniform();
arg2.randomizeUniform();
arg1.add(-0.5);
test(&Matrix::paramReluForward, arg1, arg2);
}
void testParamReluBackwardW(int height, int width, int w_height, int w_width) {
AutoCompare test(w_height, w_width);
CpuMatrix arg1(height, width);
CpuMatrix arg2(height, width);
arg1.randomizeUniform();
arg2.randomizeUniform();
arg2.add(-0.5);
test(&Matrix::paramReluBackwardW, arg1, arg2);
}
TEST(Matrix, paramRelu) {
for (auto height : {10, 100}) {
for (auto width : {10, 100}) {
for (auto w_height : {1, 2}) {
for (auto w_width : {1, 2}) {
testParamReluForward(height, width, w_height, w_width);
testParamReluBackwardW(height, width, w_height, w_width);
}
}
}
}
}
void testAddSharedBias(int numSamples, int dim, int channel) {
AutoCompare test(numSamples, dim);
CpuMatrix arg1(1, channel);
arg1.randomizeUniform();
test(&Matrix::addSharedBias, arg1, 1.0);
}
void testCollectSharedBias(int numSamples, int dim, int channel) {
AutoCompare test(1, channel);
CpuMatrix arg1(numSamples, dim);
arg1.randomizeUniform();
test(&Matrix::collectSharedBias, arg1, 1.0);
}
TEST(Matrix, sharedBias) {
for (auto numSamples : {1, 100, 520}) {
for (auto dim : {100 * 16, 100 * 32}) {
for (auto channel : {8, 16}) {
VLOG(3) << " numSamples=" << numSamples << " dim=" << dim
<< " channel=" << channel;
testAddSharedBias(numSamples, dim, channel);
testCollectSharedBias(numSamples, dim, channel);
}
}
}
}
void testMultiBinaryLabelCrossEntropy(int numSamples, int dim) {
AutoCompare forward(numSamples, 1);
CpuMatrix arg1(numSamples, dim);
CpuSparseMatrix arg2(numSamples, dim, numSamples, NO_VALUE, SPARSE_CSR);
CpuMatrix output1(numSamples, dim);
output1.randomizeUniform();
output1.softmax(arg1);
for (int i = 0; i < numSamples; i++) {
const unsigned int id = std::rand() % dim;
arg2.setRow(i, 1, &id, nullptr);
}
forward(&Matrix::multiBinaryLabelCrossEntropy, arg1, arg2);
AutoCompare backward(numSamples, dim);
backward(&Matrix::multiBinaryLabelCrossEntropyBp, arg1, arg2);
}
BaseMatrixAsColVector<0, 1>(&Matrix::sumOfSquares);
TEST(Matrix, multiBinaryCrossEntropy) {
for (auto numSamples : {100, 1000, 10000}) {
for (auto dim : {100, 1000, 10000}) {
VLOG(3) << " numSamples=" << numSamples << " dim=" << dim;
testMultiBinaryLabelCrossEntropy(numSamples, dim);
}
}
}
int main(int argc, char** argv) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册