提交 85e0cd70 编写于 作者: H hedaoyuan

move some BaseMatrix test from test_matrixCompare.cpp to test_BaseMatrix.cpp

上级 ff7b4284
......@@ -21,60 +21,29 @@ using namespace std; // NOLINT
namespace autotest {
class CheckEqual {
class AssertEqual {
public:
inline int operator()(real a, real b) {
if (a != b) {
return 1;
}
return 0;
}
};
AssertEqual(real err = 0) : err_(err) {}
class CheckWithErr {
public:
CheckWithErr() {
#ifndef PADDLE_TYPE_DOUBLE
err_ = 1e-5;
#else
err_ = 1e-10;
#endif
}
inline int operator()(real a, real b) {
if (std::fabs(a - b) > err_) {
if ((std::fabs(a - b) / std::fabs(a)) > (err_ / 10.0f)) {
return 1;
inline bool operator()(real a, real b) {
if (err_ == 0) {
if (a != b) {
return false;
}
} else {
if (std::fabs(a - b) > err_) {
if ((std::fabs(a - b) / std::fabs(a)) > (err_ / 10.0f)) {
return false;
}
}
}
return 0;
return true;
}
private:
real err_;
};
template<typename Check>
void TensorCheck(Check op, const CpuMatrix& matrix1, const CpuMatrix& matrix2) {
CHECK(matrix1.getHeight() == matrix2.getHeight());
CHECK(matrix1.getWidth() == matrix2.getWidth());
int height = matrix1.getHeight();
int width = matrix1.getWidth();
const real* data1 = matrix1.getData();
const real* data2 = matrix2.getData();
int count = 0;
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
real a = data1[i * width + j];
real b = data2[i * width + j];
count += op(a, b);
}
}
EXPECT_EQ(count, 0) << "There are " << count << " different element.";
}
template <typename Tensor>
class CopyToCpu;
......@@ -101,10 +70,36 @@ private:
CpuMatrix arg_;
};
template<typename Tensor1, typename Tensor2>
extern void TensorCheckErr(const Tensor1& tensor1, const Tensor2& tensor2) {
template<typename AssertEq>
void TensorCheck(AssertEq compare,
const CpuMatrix& matrix1,
const CpuMatrix& matrix2) {
CHECK(matrix1.getHeight() == matrix2.getHeight());
CHECK(matrix1.getWidth() == matrix2.getWidth());
int height = matrix1.getHeight();
int width = matrix1.getWidth();
const real* data1 = matrix1.getData();
const real* data2 = matrix2.getData();
int count = 0;
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
real a = data1[i * width + j];
real b = data2[i * width + j];
if (!compare(a, b)) {
count++;
}
}
}
EXPECT_EQ(count, 0) << "There are " << count << " different element.";
}
template<typename AssertEq, typename Tensor1, typename Tensor2>
extern void TensorCheck(AssertEq compare,
const Tensor1& tensor1,
const Tensor2& tensor2) {
TensorCheck(
CheckWithErr(),
compare,
CopyToCpu<Tensor1>(tensor1).copiedArg(),
CopyToCpu<Tensor2>(tensor2).copiedArg());
}
......
......@@ -107,12 +107,16 @@ R call(C& obj, R (FC::*f)(FArgs...), Args&&... args) {
return (obj.*f)(args...);
}
template <std::size_t... I, typename C, typename R, typename ...Args>
void BaseMatrixCompare(R (C::*f)(Args...)) {
template <bool ApplyRow, bool ApplyCol,
std::size_t... I, typename C, typename R, typename ...Args,
typename AssertEq>
void BaseMatrixCompare(R (C::*f)(Args...), AssertEq compare) {
for (auto height : {1, 11, 73, 128, 200, 330}) {
for (auto width : {1, 3, 32, 100, 512, 1000, 3210}) {
CpuMatrix obj1(height, width);
GpuMatrix obj2(height, width);
for (auto width : {1, 3, 32, 100, 512, 1000}) {
CpuMatrix obj1(ApplyCol ? 1 : height,
ApplyRow ? 1 : width);
GpuMatrix obj2(ApplyCol ? 1 : height,
ApplyRow ? 1 : width);
init(obj1);
copy(obj2, obj1);
......@@ -132,7 +136,7 @@ void BaseMatrixCompare(R (C::*f)(Args...)) {
call(obj1, f, std::get<I>(tuple1)...);
call(obj2, f, std::get<I>(tuple2)...);
TensorCheckErr(obj1, obj2);
TensorCheck(compare, obj1, obj2);
}
}
}
......@@ -144,6 +148,39 @@ void BaseMatrixCompare(R (C::*f)(Args...)) {
static_assert(sizeof...(I) == sizeof...(Args),
"size of parameter packs are not equal");
autotest::BaseMatrixCompare<I...>(f);
#ifndef PADDLE_TYPE_DOUBLE
autotest::AssertEqual compare(1e-5);
#else
autotest::AssertEqual compare(1e-10);
#endif
autotest::BaseMatrixCompare<false, false, I...>(f, compare);
}
template <std::size_t... I, typename C, typename R, typename ...Args>
void BaseMatrixApplyRow(R (C::*f)(Args...)) {
static_assert(sizeof...(I) == sizeof...(Args),
"size of parameter packs are not equal");
#ifndef PADDLE_TYPE_DOUBLE
autotest::AssertEqual compare(1e-3);
#else
autotest::AssertEqual compare(1e-8);
#endif
autotest::BaseMatrixCompare<true, false, I...>(f, compare);
}
template <std::size_t... I, typename C, typename R, typename ...Args>
void BaseMatrixApplyCol(R (C::*f)(Args...)) {
static_assert(sizeof...(I) == sizeof...(Args),
"size of parameter packs are not equal");
#ifndef PADDLE_TYPE_DOUBLE
autotest::AssertEqual compare(1e-3);
#else
autotest::AssertEqual compare(1e-8);
#endif
autotest::BaseMatrixCompare<false, true, I...>(f, compare);
}
......@@ -26,25 +26,224 @@ limitations under the License. */
using namespace paddle; // NOLINT
using namespace std; // NOLINT
TEST(BaseMatrix, apply) {
// member function with no argument
BaseMatrixCompare(&BaseMatrix::neg);
/**
* Test member functions which prototype is
* void (BaseMatrix::*)().
*/
TEST(BaseMatrix, void) {
typedef void (BaseMatrix::*FunctionProto)();
#define BASEMATRIXCOMPARE(function) \
BaseMatrixCompare(static_cast<FunctionProto>(&BaseMatrix::function));
BASEMATRIXCOMPARE(neg);
BASEMATRIXCOMPARE(exp);
BASEMATRIXCOMPARE(log);
BASEMATRIXCOMPARE(sqrt);
BASEMATRIXCOMPARE(square);
BASEMATRIXCOMPARE(reciprocal);
BASEMATRIXCOMPARE(abs);
BASEMATRIXCOMPARE(sign);
BASEMATRIXCOMPARE(zero);
BASEMATRIXCOMPARE(one);
#undef BASEMATRIXCOMPARE
}
/**
* Test member functions which prototype is
* void (BaseMatrix::*)(real).
*/
TEST(BaseMatrix, real) {
typedef void (BaseMatrix::*FunctionProto)(real);
#define BASEMATRIXCOMPARE(function) \
BaseMatrixCompare<0>(static_cast<FunctionProto>(&BaseMatrix::function));
BASEMATRIXCOMPARE(pow);
BASEMATRIXCOMPARE(subScalar);
BASEMATRIXCOMPARE(mulScalar);
BASEMATRIXCOMPARE(divScalar);
BASEMATRIXCOMPARE(assign);
BASEMATRIXCOMPARE(add);
BASEMATRIXCOMPARE(biggerThanScalar);
BASEMATRIXCOMPARE(downClip);
#undef BASEMATRIXCOMPARE
}
/**
* Test member functions which prototype is
* void (BaseMatrix::*)(real, real).
*/
TEST(BaseMatrix, real_real) {
typedef void (BaseMatrix::*FunctionProto)(real, real);
#define BASEMATRIXCOMPARE(function) \
BaseMatrixCompare<0, 1>(static_cast<FunctionProto>(&BaseMatrix::function));
BASEMATRIXCOMPARE(add);
BASEMATRIXCOMPARE(clip);
#undef BASEMATRIXCOMPARE
}
/**
* Test member functions which prototype is
* void (BaseMatrix::*)(BaseMatrix&).
*/
TEST(BaseMatrix, BaseMatrix) {
typedef void (BaseMatrix::*FunctionProto)(BaseMatrix&);
#define BASEMATRIXCOMPARE(function) \
BaseMatrixCompare<0>(static_cast<FunctionProto>(&BaseMatrix::function));
BASEMATRIXCOMPARE(assign);
BASEMATRIXCOMPARE(add);
BASEMATRIXCOMPARE(relu);
BASEMATRIXCOMPARE(reluDerivative);
BASEMATRIXCOMPARE(softrelu);
BASEMATRIXCOMPARE(softreluDerivative);
BASEMATRIXCOMPARE(brelu);
BASEMATRIXCOMPARE(breluDerivative);
BASEMATRIXCOMPARE(square);
BASEMATRIXCOMPARE(squareDerivative);
BASEMATRIXCOMPARE(tanh);
BASEMATRIXCOMPARE(tanhDerivative);
BASEMATRIXCOMPARE(reciprocal);
BASEMATRIXCOMPARE(reciprocalDerivative);
BASEMATRIXCOMPARE(abs);
BASEMATRIXCOMPARE(absDerivative);
BASEMATRIXCOMPARE(sigmoid);
BASEMATRIXCOMPARE(sigmoidDerivative);
BASEMATRIXCOMPARE(expDerivative);
BASEMATRIXCOMPARE(sign);
BASEMATRIXCOMPARE(exp);
BASEMATRIXCOMPARE(log);
BASEMATRIXCOMPARE(sqrt);
BASEMATRIXCOMPARE(dotMul);
BASEMATRIXCOMPARE(dotMulSquare);
BASEMATRIXCOMPARE(dotSquareMul);
BASEMATRIXCOMPARE(addColVector);
BASEMATRIXCOMPARE(addRowVector);
BASEMATRIXCOMPARE(mulRowVector);
BASEMATRIXCOMPARE(divRowVector);
BASEMATRIXCOMPARE(addP2P);
BASEMATRIXCOMPARE(invSqrt);
#undef BASEMATRIXCOMPARE
}
/**
* Test member functions which prototype is
* void (BaseMatrix::*)(BaseMatrix&, real).
*/
TEST(BaseMatrix, BaseMatrix_real) {
typedef void (BaseMatrix::*FunctionProto)(BaseMatrix&, real);
#define BASEMATRIXCOMPARE(function) \
BaseMatrixCompare<0, 1>(static_cast<FunctionProto>(&BaseMatrix::function));
// If the member function are overloaded, use static_cast to specify which
// member function need be test.
BaseMatrixCompare(
static_cast<void (BaseMatrix::*)()>(&BaseMatrix::exp));
BaseMatrixCompare(
static_cast<void (BaseMatrix::*)()>(&BaseMatrix::sqrt));
BASEMATRIXCOMPARE(addBias);
BASEMATRIXCOMPARE(add);
BASEMATRIXCOMPARE(sub);
BASEMATRIXCOMPARE(pow);
BASEMATRIXCOMPARE(addScalar);
BASEMATRIXCOMPARE(subScalar);
BASEMATRIXCOMPARE(mulScalar);
BASEMATRIXCOMPARE(divScalar);
BASEMATRIXCOMPARE(scalarDiv);
BASEMATRIXCOMPARE(addSquare);
BASEMATRIXCOMPARE(isEqualTo);
#undef BASEMATRIXCOMPARE
}
/**
* Test member functions which prototype is
* void (BaseMatrix::*)(BaseMatrix&, BaseMatrix&).
*/
TEST(BaseMatrix, BaseMatrix_BaseMatrix) {
typedef void (BaseMatrix::*FunctionProto)(BaseMatrix&, BaseMatrix&);
#define BASEMATRIXCOMPARE(function) \
BaseMatrixCompare<0, 1>(static_cast<FunctionProto>(&BaseMatrix::function));
BASEMATRIXCOMPARE(softCrossEntropy);
BASEMATRIXCOMPARE(softCrossEntropyBp);
BASEMATRIXCOMPARE(binaryLabelCrossEntropy);
BASEMATRIXCOMPARE(binaryLabelCrossEntropyBp);
BASEMATRIXCOMPARE(sub);
BASEMATRIXCOMPARE(add2);
BASEMATRIXCOMPARE(dotMul);
BASEMATRIXCOMPARE(dotDiv);
BASEMATRIXCOMPARE(logisticRegressionLoss);
BASEMATRIXCOMPARE(logisticRegressionLossBp);
BASEMATRIXCOMPARE(biggerThan);
BASEMATRIXCOMPARE(max);
BASEMATRIXCOMPARE(dotMulSquare);
BASEMATRIXCOMPARE(dotSquareSquare);
#undef BASEMATRIXCOMPARE
}
/**
* Test aggregate member functions which prototype is
* void (BaseMatrix::*)(BaseMatrix&).
*/
TEST(Aggregate, BaseMatrix) {
typedef void (BaseMatrix::*FunctionProto)(BaseMatrix&);
#define BASEMATRIXAPPLYROW(function) \
BaseMatrixApplyRow<0>(static_cast<FunctionProto>(&BaseMatrix::function));
#define BASEMATRIXAPPLYCOL(function) \
BaseMatrixApplyCol<0>(static_cast<FunctionProto>(&BaseMatrix::function));
BASEMATRIXAPPLYROW(maxRows);
BASEMATRIXAPPLYROW(minRows);
BASEMATRIXAPPLYCOL(sumCols);
BASEMATRIXAPPLYCOL(maxCols);
BASEMATRIXAPPLYCOL(minCols);
#undef BASEMATRIXAPPLYROW
#undef BASEMATRIXAPPLYCOL
}
/**
* Test aggregate member functions which prototype is
* void (BaseMatrix::*)(BaseMatrix&, BaseMatrix&).
*/
TEST(Aggregate, BaseMatrix_BaseMatrix) {
typedef void (BaseMatrix::*FunctionProto)(BaseMatrix&, BaseMatrix&);
#define BASEMATRIXAPPLYROW(function) \
BaseMatrixApplyRow<0, 1>(static_cast<FunctionProto>(&BaseMatrix::function));
#define BASEMATRIXAPPLYCOL(function) \
BaseMatrixApplyCol<0, 1>(static_cast<FunctionProto>(&BaseMatrix::function));
BASEMATRIXAPPLYCOL(addDotMulVMM);
#undef BASEMATRIXAPPLYROW
#undef BASEMATRIXAPPLYCOL
}
/**
* Test aggregate member functions which prototype is
* void (BaseMatrix::*)(BaseMatrix&, real, real).
*/
TEST(Aggregate, BaseMatrix_real_real) {
typedef void (BaseMatrix::*FunctionProto)(BaseMatrix&, real, real);
#define BASEMATRIXAPPLYROW(function) \
BaseMatrixApplyRow<0, 1, 2>(\
static_cast<FunctionProto>(&BaseMatrix::function));
// member function with one argument
#define BASEMATRIXAPPLYCOL(function) \
BaseMatrixApplyCol<0, 1, 2>(\
static_cast<FunctionProto>(&BaseMatrix::function));
BaseMatrixCompare<0>(&BaseMatrix::tanh);
BASEMATRIXAPPLYROW(sumRows);
BASEMATRIXAPPLYCOL(sumCols);
BaseMatrixCompare<0>(
static_cast<void (BaseMatrix::*)(real)>(&BaseMatrix::assign));
BaseMatrixCompare<0>(
static_cast<void (BaseMatrix::*)(real)>(&BaseMatrix::pow));
#undef BASEMATRIXAPPLYROW
#undef BASEMATRIXAPPLYCOL
}
int main(int argc, char** argv) {
......
......@@ -448,125 +448,6 @@ void testMatrixZeroAtOffset(int height, int width) {
MatrixCheckEqual(*cpuA, *cpuTest);
}
void testMatrixBinaryAdd(int height, int width) {
MatrixPtr cpuA = std::make_shared<CpuMatrix>(height, width);
MatrixPtr cpuB = std::make_shared<CpuMatrix>(height, width);
MatrixPtr gpuA = std::make_shared<GpuMatrix>(height, width);
MatrixPtr gpuB = std::make_shared<GpuMatrix>(height, width);
cpuA->randomizeUniform();
cpuB->randomizeUniform();
gpuA->copyFrom(*cpuA);
gpuB->copyFrom(*cpuB);
cpuA->add(*cpuB);
gpuA->add(*gpuB);
MatrixPtr outputCheck = std::make_shared<CpuMatrix>(height, width);
outputCheck->copyFrom(*gpuA);
MatrixCheckEqual(*cpuA, *outputCheck);
}
void testMatrixAssign(int height, int width) {
MatrixPtr cpuA = std::make_shared<CpuMatrix>(height, width);
MatrixPtr gpuA = std::make_shared<GpuMatrix>(height, width);
cpuA->randomizeUniform();
gpuA->copyFrom(*cpuA);
cpuA->assign(2.5);
gpuA->assign(2.5);
MatrixPtr outputCheck = std::make_shared<CpuMatrix>(height, width);
outputCheck->copyFrom(*gpuA);
MatrixCheckEqual(*cpuA, *outputCheck);
}
void testMatrixAdd(int height, int width) {
MatrixPtr cpuA = std::make_shared<CpuMatrix>(height, width);
MatrixPtr gpuA = std::make_shared<GpuMatrix>(height, width);
cpuA->randomizeUniform();
gpuA->copyFrom(*cpuA);
cpuA->add(2.5);
gpuA->add(2.5);
MatrixPtr outputCheck = std::make_shared<CpuMatrix>(height, width);
outputCheck->copyFrom(*gpuA);
MatrixCheckEqual(*cpuA, *outputCheck);
}
void testMatrixSqrt(int height, int width) {
MatrixPtr cpuA = std::make_shared<CpuMatrix>(height, width);
MatrixPtr gpuA = std::make_shared<GpuMatrix>(height, width);
cpuA->randomizeUniform();
gpuA->copyFrom(*cpuA);
cpuA->sqrt();
gpuA->sqrt();
MatrixPtr outputCheck = std::make_shared<CpuMatrix>(height, width);
outputCheck->copyFrom(*gpuA);
MatrixCheckErr(*cpuA, *outputCheck);
}
void testMatrixTanhDerivative(int height, int width) {
MatrixPtr cpuA = std::make_shared<CpuMatrix>(height, width);
MatrixPtr cpuB = std::make_shared<CpuMatrix>(height, width);
MatrixPtr gpuA = std::make_shared<GpuMatrix>(height, width);
MatrixPtr gpuB = std::make_shared<GpuMatrix>(height, width);
cpuA->randomizeUniform();
cpuB->randomizeUniform();
gpuA->copyFrom(*cpuA);
gpuB->copyFrom(*cpuB);
cpuA->tanhDerivative(*cpuB);
gpuA->tanhDerivative(*gpuB);
MatrixPtr outputCheck = std::make_shared<CpuMatrix>(height, width);
outputCheck->copyFrom(*gpuA);
MatrixCheckErr(*cpuA, *outputCheck);
}
void testMatrixTanh(int height, int width) {
MatrixPtr cpuA = std::make_shared<CpuMatrix>(height, width);
MatrixPtr cpuB = std::make_shared<CpuMatrix>(height, width);
MatrixPtr gpuA = std::make_shared<GpuMatrix>(height, width);
MatrixPtr gpuB = std::make_shared<GpuMatrix>(height, width);
cpuA->randomizeUniform();
cpuB->randomizeUniform();
gpuA->copyFrom(*cpuA);
gpuB->copyFrom(*cpuB);
cpuA->tanh(*cpuB);
gpuA->tanh(*gpuB);
MatrixPtr outputCheck = std::make_shared<CpuMatrix>(height, width);
outputCheck->copyFrom(*gpuA);
MatrixCheckErr(*cpuA, *outputCheck);
}
void testMatrixTernarySub(int height, int width) {
MatrixPtr cpuA = std::make_shared<CpuMatrix>(height, width);
MatrixPtr cpuB = std::make_shared<CpuMatrix>(height, width);
MatrixPtr cpuC = std::make_shared<CpuMatrix>(height, width);
MatrixPtr gpuA = std::make_shared<GpuMatrix>(height, width);
MatrixPtr gpuB = std::make_shared<GpuMatrix>(height, width);
MatrixPtr gpuC = std::make_shared<GpuMatrix>(height, width);
cpuA->randomizeUniform();
cpuB->randomizeUniform();
cpuC->randomizeUniform();
gpuA->copyFrom(*cpuA);
gpuB->copyFrom(*cpuB);
gpuC->copyFrom(*cpuC);
cpuA->sub(*cpuB, *cpuC);
gpuA->sub(*gpuB, *gpuC);
MatrixPtr outputCheck = std::make_shared<CpuMatrix>(height, width);
outputCheck->copyFrom(*gpuA);
MatrixCheckEqual(*cpuA, *outputCheck);
}
void testMatrixSumOfSquaresBp(int height, int width) {
MatrixPtr cpuA = std::make_shared<CpuMatrix>(height, width);
MatrixPtr cpuB = std::make_shared<CpuMatrix>(height, width);
......@@ -789,18 +670,7 @@ TEST(Matrix, unary) {
for (auto width : {1, 3, 32, 100, 512, 1000, 3210}) {
VLOG(3) << " height=" << height << " width=" << width;
// applyUnary
testMatrixAssign(height, width);
testMatrixAdd(height, width);
testMatrixSqrt(height, width);
// applyBinary
testMatrixBinaryAdd(height, width);
testMatrixTanh(height, width);
testMatrixTanhDerivative(height, width);
// applyTernary
testMatrixTernarySub(height, width);
testMatrixSumOfSquaresBp(height, width);
// asRowVector
......@@ -931,165 +801,6 @@ TEST(Matrix, softmax) {
}
}
void testMatrixAddDotMulVMM(int height, int width, int endCol = 0) {
MatrixPtr cpuA = std::make_shared<CpuMatrix>(1, width);
MatrixPtr cpuB = std::make_shared<CpuMatrix>(height, width);
MatrixPtr cpuC = std::make_shared<CpuMatrix>(height, width);
MatrixPtr gpuA = std::make_shared<GpuMatrix>(1, width);
MatrixPtr gpuB = std::make_shared<GpuMatrix>(height, width);
MatrixPtr gpuC = std::make_shared<GpuMatrix>(height, width);
MatrixPtr cpuA1 = std::make_shared<CpuMatrix>(1, width);
MatrixPtr cpuB1 = std::make_shared<CpuMatrix>(height, width);
MatrixPtr cpuC1 = std::make_shared<CpuMatrix>(height, width);
cpuA->randomizeUniform();
cpuB->randomizeUniform();
cpuC->randomizeUniform();
gpuA->copyFrom(*cpuA);
gpuB->copyFrom(*cpuB);
gpuC->copyFrom(*cpuC);
cpuA1->copyFrom(*cpuA);
cpuB1->copyFrom(*cpuB);
cpuC1->copyFrom(*cpuC);
if (!endCol) {
cpuA->addDotMulVMM(*cpuB, *cpuC);
gpuA->addDotMulVMM(*gpuB, *gpuC);
cpuA1->addDotMulVMM2(*cpuB1, *cpuC1);
MatrixCheckErr(*cpuA, *cpuA1);
} else {
MatrixPtr subCpuA = cpuA->subColMatrix(0, endCol);
MatrixPtr subCpuB = cpuB->subColMatrix(0, endCol);
MatrixPtr subCpuC = cpuC->subColMatrix(0, endCol);
MatrixPtr subGpuA = gpuA->subColMatrix(0, endCol);
MatrixPtr subGpuB = gpuB->subColMatrix(0, endCol);
MatrixPtr subGpuC = gpuC->subColMatrix(0, endCol);
subCpuA->addDotMulVMM(*subCpuB, *subCpuC);
subGpuA->addDotMulVMM(*subGpuB, *subGpuC);
}
MatrixPtr outputCheck = std::make_shared<CpuMatrix>(1, width);
outputCheck->copyFrom(*gpuA);
MatrixCheckErr(*cpuA, *outputCheck);
}
void testMatrixRowSum(int height, int width) {
MatrixPtr cpuA = std::make_shared<CpuMatrix>(height, 1);
MatrixPtr cpuB = std::make_shared<CpuMatrix>(height, width);
MatrixPtr gpuA = std::make_shared<GpuMatrix>(height, 1);
MatrixPtr gpuB = std::make_shared<GpuMatrix>(height, width);
MatrixPtr cpuA1 = std::make_shared<CpuMatrix>(height, 1);
MatrixPtr cpuB1 = std::make_shared<CpuMatrix>(height, width);
MatrixPtr gpuA1 = std::make_shared<GpuMatrix>(height, 1);
MatrixPtr gpuB1 = std::make_shared<GpuMatrix>(height, width);
cpuA->randomizeUniform();
cpuB->randomizeUniform();
gpuA->copyFrom(*cpuA);
gpuB->copyFrom(*cpuB);
cpuA1->copyFrom(*cpuA);
cpuB1->copyFrom(*cpuB);
gpuA1->copyFrom(*cpuA);
gpuB1->copyFrom(*cpuB);
cpuA->colMerge(*cpuB);
gpuA->colMerge(*gpuB);
cpuB1->rowSum(*cpuA1);
gpuB1->rowSum(*gpuA1);
MatrixPtr outputCheck = std::make_shared<CpuMatrix>(height, 1);
outputCheck->copyFrom(*gpuA);
MatrixCheckErr(*cpuA, *outputCheck);
outputCheck->copyFrom(*gpuA1);
MatrixCheckErr(*cpuA1, *outputCheck);
}
void testMatrixRowMax(int height, int width, int endCol = 0) {
MatrixPtr cpuA = std::make_shared<CpuMatrix>(height, 1);
MatrixPtr cpuB = std::make_shared<CpuMatrix>(height, width);
MatrixPtr gpuA = std::make_shared<GpuMatrix>(height, 1);
MatrixPtr gpuB = std::make_shared<GpuMatrix>(height, width);
cpuA->randomizeUniform();
cpuB->randomizeUniform();
gpuA->copyFrom(*cpuA);
gpuB->copyFrom(*cpuB);
if (!endCol) {
cpuB->rowMax(*cpuA);
gpuB->rowMax(*gpuA);
} else {
MatrixPtr subCpuB = cpuB->subColMatrix(0, endCol);
MatrixPtr subGpuB = gpuB->subColMatrix(0, endCol);
subCpuB->rowMax(*cpuA);
subGpuB->rowMax(*gpuA);
}
MatrixPtr outputCheck = std::make_shared<CpuMatrix>(height, 1);
outputCheck->copyFrom(*gpuA);
MatrixCheckErr(*cpuA, *outputCheck);
}
void testMatrixColSum(int height, int width, int endCol = 0) {
MatrixPtr cpuA = std::make_shared<CpuMatrix>(1, width);
MatrixPtr cpuB = std::make_shared<CpuMatrix>(height, width);
MatrixPtr gpuA = std::make_shared<GpuMatrix>(1, width);
MatrixPtr gpuB = std::make_shared<GpuMatrix>(height, width);
cpuA->randomizeUniform();
cpuB->randomizeUniform();
gpuA->copyFrom(*cpuA);
gpuB->copyFrom(*cpuB);
if (!endCol) {
cpuA->accumulateColSum(*cpuB);
gpuA->accumulateColSum(*gpuB);
} else {
MatrixPtr subCpuA = cpuA->subColMatrix(0, endCol);
MatrixPtr subGpuA = gpuA->subColMatrix(0, endCol);
MatrixPtr subCpuB = cpuB->subColMatrix(0, endCol);
MatrixPtr subGpuB = gpuB->subColMatrix(0, endCol);
subCpuA->accumulateColSum(*subCpuB);
subGpuA->accumulateColSum(*subGpuB);
}
MatrixPtr outputCheck = std::make_shared<CpuMatrix>(1, width);
outputCheck->copyFrom(*gpuA);
MatrixCheckErr(*cpuA, *outputCheck);
}
void testMatrixColMax(int height, int width, int endCol = 0) {
MatrixPtr cpuA = std::make_shared<CpuMatrix>(1, width);
MatrixPtr cpuB = std::make_shared<CpuMatrix>(height, width);
MatrixPtr gpuA = std::make_shared<GpuMatrix>(1, width);
MatrixPtr gpuB = std::make_shared<GpuMatrix>(height, width);
cpuA->randomizeUniform();
cpuB->randomizeUniform();
gpuA->copyFrom(*cpuA);
gpuB->copyFrom(*cpuB);
if (!endCol) {
cpuB->colMax(*cpuA);
gpuB->colMax(*gpuA);
} else {
MatrixPtr subCpuA = cpuA->subColMatrix(0, endCol);
MatrixPtr subGpuA = gpuA->subColMatrix(0, endCol);
MatrixPtr subCpuB = cpuB->subColMatrix(0, endCol);
MatrixPtr subGpuB = gpuB->subColMatrix(0, endCol);
subCpuB->colMax(*subCpuA);
subGpuB->colMax(*subGpuA);
}
MatrixPtr outputCheck = std::make_shared<CpuMatrix>(1, width);
outputCheck->copyFrom(*gpuA);
MatrixCheckErr(*cpuA, *outputCheck);
}
void testMatrixCollectBias(int height, int width) {
MatrixPtr cpuA = std::make_shared<CpuMatrix>(1, width);
MatrixPtr cpuB = std::make_shared<CpuMatrix>(height, width);
......@@ -1182,13 +893,8 @@ TEST(Matrix, aggregate) {
for (auto height : {1, 11, 16, 32, 64, 73, 128, 200, 1024, 2345}) {
for (auto width : {1, 9, 16, 32, 64, 100, 512, 1000, 1024, 2453}) {
VLOG(3) << " height=" << height << " width=" << width;
testMatrixRowSum(height, width);
testMatrixRowMax(height, width);
testMatrixColSum(height, width);
testMatrixColMax(height, width);
testMatrixCollectBias(height, width);
testMatrixTernaryRowDotMul(height, width);
testMatrixAddDotMulVMM(height, width);
testMatrixSumOfSquares(height, width);
testMatrixBinaryClassificationError(height, width);
......@@ -1203,11 +909,7 @@ TEST(Matrix, aggregate2) {
VLOG(3) << " height=" << height << " width=" << width;
int endCol = rand() % width; // NOLINT
testMatrixRowMax(height, width, endCol);
testMatrixSumOfSquares(height, width, endCol);
testMatrixColSum(height, width, endCol);
testMatrixColMax(height, width, endCol);
testMatrixAddDotMulVMM(height, width, endCol);
}
}
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册