From 9b17c3ff2ec6e3179db7ddffbe7a84a4704e1186 Mon Sep 17 00:00:00 2001 From: xutianbing Date: Wed, 25 Jan 2017 13:53:28 -0800 Subject: [PATCH] rewrite unit test using Daoyuan's new FunctionTest. --- paddle/function/CosSimOp.cpp | 14 ++-- paddle/function/CosSimOpTest.cpp | 122 ++++++------------------------- paddle/function/FunctionTest.h | 17 ++++- 3 files changed, 47 insertions(+), 106 deletions(-) diff --git a/paddle/function/CosSimOp.cpp b/paddle/function/CosSimOp.cpp index 130ee56f3..7ece7b2df 100644 --- a/paddle/function/CosSimOp.cpp +++ b/paddle/function/CosSimOp.cpp @@ -76,12 +76,12 @@ class CosSimForwardFunc : public FunctionBase { } void calc(const BufferArgs& inputs, const BufferArgs& outputs) override { - CHECK_EQ(inputs.size(), 2); - CHECK_EQ(outputs.size(), 1); + CHECK_EQ(inputs.size(), 2UL); + CHECK_EQ(outputs.size(), 1UL); - CHECK_EQ(inputs[0].shape().ndims(), (size_t)2); - CHECK_EQ(inputs[1].shape().ndims(), (size_t)2); - CHECK_EQ(outputs[0].shape().ndims(), (size_t)2); + CHECK_EQ(inputs[0].shape().ndims(), 2UL); + CHECK_EQ(inputs[1].shape().ndims(), 2UL); + CHECK_EQ(outputs[0].shape().ndims(), 2UL); CHECK_EQ(inputs[0].shape()[0], outputs[0].shape()[0]); CHECK_EQ(inputs[0].shape()[1], inputs[1].shape()[1]); @@ -196,8 +196,8 @@ class CosSimBackwardFunc : public FunctionBase { } void calc(const BufferArgs& inputs, const BufferArgs& outputs) override { - CHECK_EQ(inputs.size(), 4); - CHECK_EQ(outputs.size(), 2); + CHECK_EQ(inputs.size(), 4UL); + CHECK_EQ(outputs.size(), 2UL); /// dim of out_grad and out_val == 1, column vector CHECK_EQ(inputs[0].shape()[1], 1UL); CHECK_EQ(inputs[1].shape()[1], 1UL); diff --git a/paddle/function/CosSimOpTest.cpp b/paddle/function/CosSimOpTest.cpp index dce959e81..48c815f02 100644 --- a/paddle/function/CosSimOpTest.cpp +++ b/paddle/function/CosSimOpTest.cpp @@ -22,114 +22,40 @@ void testCosSimForward(size_t height_x, size_t height_y, size_t width, real scale) { - FunctionCompare compare("CosSimForward", FuncConfig().set("scale", scale)); - - CpuMatrix cpu_arg1(height_x, width); - CpuMatrix gpu_arg1(height_x, width); - CpuMatrix cpu_arg2(height_y, width); - CpuMatrix gpu_arg2(height_y, width); - cpu_arg1.randomizeUniform(); - gpu_arg1.copyFrom(cpu_arg1); - cpu_arg2.randomizeUniform(); - cpu_arg2.add(-0.5); - gpu_arg2.copyFrom(cpu_arg2); - CpuMatrix cpu_out(height_x, 1); - GpuMatrix gpu_out(height_x, 1); - - BufferArgs cpu_inputs; - BufferArgs cpu_outputs; - cpu_inputs.addArg(cpu_arg1); - cpu_inputs.addArg(cpu_arg2); - cpu_outputs.addArg(cpu_out, ASSIGN_TO); - - BufferArgs gpu_inputs; - BufferArgs gpu_outputs; - gpu_inputs.addArg(gpu_arg1); - gpu_inputs.addArg(gpu_arg2); - gpu_outputs.addArg(gpu_out, ASSIGN_TO); - - compare.getCpuFunction()->calc(cpu_inputs, cpu_outputs); - compare.getGpuFunction()->calc(gpu_inputs, gpu_outputs); - - autotest::TensorCheckErr(cpu_out, gpu_out); -} - -TEST(Matrix, cosSimForward) { - for (auto height_x : {10, 100, 1000}) { - for (auto height_y : {1, height_x}) { - for (auto width : {10, 100, 1000}) { - for (auto scale : {1.0, 2.0}) { - testCosSimForward(height_x, height_y, width, scale); - } - } - } - } + FunctionCompare test("CosSimForward", FuncConfig().set("scale", scale)); + // prepare input arguments + test.addInputs(BufferArg(VALUE_TYPE_FLOAT, TensorShape{height_x, width})); + test.addInputs(BufferArg(VALUE_TYPE_FLOAT, TensorShape{height_y, width})); + test.addOutputs(BufferArg(VALUE_TYPE_FLOAT, TensorShape{height_x, 1}), + ASSIGN_TO); + // run Function + test.run(); } void testCosSimBackward(size_t height_x, size_t height_y, size_t width, real scale) { - FunctionCompare compare("CosSimBackward", FuncConfig().set("scale", scale)); - - CpuMatrix cpu_out_grad(height_x, 1); - CpuMatrix cpu_out_val(height_x, 1); - CpuMatrix cpu_in1_val(height_x, width); - CpuMatrix cpu_in2_val(height_x, width); - CpuMatrix cpu_in1_grad(height_x, width); - CpuMatrix cpu_in2_grad(height_x, width); - - cpu_out_grad.randomizeUniform(); - cpu_out_val.randomizeUniform(); - cpu_in1_val.randomizeUniform(); - cpu_in2_val.randomizeUniform(); - cpu_in1_grad.randomizeUniform(); - cpu_in2_grad.randomizeUniform(); - - GpuMatrix gpu_out_grad(height_x, 1); - GpuMatrix gpu_out_val(height_x, 1); - GpuMatrix gpu_in1_val(height_x, width); - GpuMatrix gpu_in2_val(height_x, width); - GpuMatrix gpu_in1_grad(height_x, width); - GpuMatrix gpu_in2_grad(height_x, width); - - gpu_out_grad.copyFrom(cpu_out_grad); - gpu_out_val.copyFrom(cpu_out_val); - gpu_in1_val.copyFrom(cpu_in1_val); - gpu_in2_val.copyFrom(cpu_in2_val); - gpu_in1_grad.copyFrom(cpu_in1_grad); - gpu_in2_grad.copyFrom(cpu_in2_grad); - - BufferArgs cpu_inputs; - BufferArgs cpu_outputs; - cpu_inputs.addArg(cpu_out_grad); - cpu_inputs.addArg(cpu_out_val); - cpu_inputs.addArg(cpu_in1_val); - cpu_inputs.addArg(cpu_in2_val); - cpu_outputs.addArg(cpu_in1_grad, ADD_TO); - cpu_outputs.addArg(cpu_in2_grad, ADD_TO); - - BufferArgs gpu_inputs; - BufferArgs gpu_outputs; - gpu_inputs.addArg(gpu_out_grad); - gpu_inputs.addArg(gpu_out_val); - gpu_inputs.addArg(gpu_in1_val); - gpu_inputs.addArg(gpu_in2_val); - gpu_outputs.addArg(gpu_in1_grad, ADD_TO); - gpu_outputs.addArg(gpu_in2_grad, ADD_TO); - - compare.getCpuFunction()->calc(cpu_inputs, cpu_outputs); - compare.getGpuFunction()->calc(gpu_inputs, gpu_outputs); - - autotest::TensorCheckErr(cpu_in1_grad, gpu_in1_grad); - autotest::TensorCheckErr(cpu_in2_grad, gpu_in2_grad); + FunctionCompare test("CosSimBackward", FuncConfig().set("scale", scale)); + // prepare input arguments + test.addInputs(BufferArg(VALUE_TYPE_FLOAT, TensorShape{height_x, 1})); + test.addInputs(BufferArg(VALUE_TYPE_FLOAT, TensorShape{height_x, 1})); + test.addInputs(BufferArg(VALUE_TYPE_FLOAT, TensorShape{height_x, width})); + test.addInputs(BufferArg(VALUE_TYPE_FLOAT, TensorShape{height_y, width})); + test.addOutputs(BufferArg(VALUE_TYPE_FLOAT, TensorShape{height_x, width}), + ADD_TO); + test.addOutputs(BufferArg(VALUE_TYPE_FLOAT, TensorShape{height_y, width}), + ADD_TO); + // run Function + test.run(); } -TEST(Matrix, cosSimBackward) { - for (auto height_x : {1, 10, 100}) { +TEST(Matrix, cosSim) { + for (auto height_x : {10, 100, 1000}) { for (auto height_y : {1, height_x}) { - for (auto width : {1, 10, 100}) { + for (auto width : {10, 100, 1000}) { for (auto scale : {1.0, 2.0}) { + testCosSimForward(height_x, height_y, width, scale); testCosSimBackward(height_x, height_y, width, scale); } } diff --git a/paddle/function/FunctionTest.h b/paddle/function/FunctionTest.h index 35de3a65d..68587ab1a 100644 --- a/paddle/function/FunctionTest.h +++ b/paddle/function/FunctionTest.h @@ -157,6 +157,7 @@ public: cpuSparse_->randomizeUniform(); gpuSparse_->copyFrom(*cpuSparse_, stream); hl_stream_synchronize(stream); + void addInputs(const SequenceArg& input) { size_t batchSize = input.shape()[0]; size_t numSeqs = batchSize / 10 + 1; @@ -300,6 +301,21 @@ protected: } void initOutputs() { + for (size_t i = 0; i < cpuOutputs_.size(); i++) { + initArg(*cpuOutputs_[i]); + + // TODO: Need a BufferCopy used to copy from one BufferArg to another. + CpuVector cpuVector(cpuOutputs_[i]->shape().getElements(), + (real*)cpuOutputs_[i]->data()); + GpuVector gpuVector(gpuOutputs_[i]->shape().getElements(), + (real*)gpuOutputs_[i]->data()); + + gpuVector.copyFrom(cpuVector); + } + } + + void compareOutputs() { +>>>>>>> rewrite unit test using Daoyuan's new FunctionTest. for (size_t i = 0; i < cpuOutputs_.size(); i++) { if (cpuOutputs_[i]->isSparseArg()) { continue; /// sparse matrix already init @@ -334,7 +350,6 @@ protected: } protected: -<<<<<<< HEAD std::shared_ptr cpuFunc_; std::shared_ptr gpuFunc_; std::vector cpuMemory_; -- GitLab