提交 9b17c3ff 编写于 作者: X xutianbing

rewrite unit test using Daoyuan's new FunctionTest.

上级 877decdc
...@@ -76,12 +76,12 @@ class CosSimForwardFunc : public FunctionBase { ...@@ -76,12 +76,12 @@ class CosSimForwardFunc : public FunctionBase {
} }
void calc(const BufferArgs& inputs, const BufferArgs& outputs) override { void calc(const BufferArgs& inputs, const BufferArgs& outputs) override {
CHECK_EQ(inputs.size(), 2); CHECK_EQ(inputs.size(), 2UL);
CHECK_EQ(outputs.size(), 1); CHECK_EQ(outputs.size(), 1UL);
CHECK_EQ(inputs[0].shape().ndims(), (size_t)2); CHECK_EQ(inputs[0].shape().ndims(), 2UL);
CHECK_EQ(inputs[1].shape().ndims(), (size_t)2); CHECK_EQ(inputs[1].shape().ndims(), 2UL);
CHECK_EQ(outputs[0].shape().ndims(), (size_t)2); CHECK_EQ(outputs[0].shape().ndims(), 2UL);
CHECK_EQ(inputs[0].shape()[0], outputs[0].shape()[0]); CHECK_EQ(inputs[0].shape()[0], outputs[0].shape()[0]);
CHECK_EQ(inputs[0].shape()[1], inputs[1].shape()[1]); CHECK_EQ(inputs[0].shape()[1], inputs[1].shape()[1]);
...@@ -196,8 +196,8 @@ class CosSimBackwardFunc : public FunctionBase { ...@@ -196,8 +196,8 @@ class CosSimBackwardFunc : public FunctionBase {
} }
void calc(const BufferArgs& inputs, const BufferArgs& outputs) override { void calc(const BufferArgs& inputs, const BufferArgs& outputs) override {
CHECK_EQ(inputs.size(), 4); CHECK_EQ(inputs.size(), 4UL);
CHECK_EQ(outputs.size(), 2); CHECK_EQ(outputs.size(), 2UL);
/// dim of out_grad and out_val == 1, column vector /// dim of out_grad and out_val == 1, column vector
CHECK_EQ(inputs[0].shape()[1], 1UL); CHECK_EQ(inputs[0].shape()[1], 1UL);
CHECK_EQ(inputs[1].shape()[1], 1UL); CHECK_EQ(inputs[1].shape()[1], 1UL);
......
...@@ -22,114 +22,40 @@ void testCosSimForward(size_t height_x, ...@@ -22,114 +22,40 @@ void testCosSimForward(size_t height_x,
size_t height_y, size_t height_y,
size_t width, size_t width,
real scale) { real scale) {
FunctionCompare compare("CosSimForward", FuncConfig().set("scale", scale)); FunctionCompare test("CosSimForward", FuncConfig().set("scale", scale));
// prepare input arguments
CpuMatrix cpu_arg1(height_x, width); test.addInputs(BufferArg(VALUE_TYPE_FLOAT, TensorShape{height_x, width}));
CpuMatrix gpu_arg1(height_x, width); test.addInputs(BufferArg(VALUE_TYPE_FLOAT, TensorShape{height_y, width}));
CpuMatrix cpu_arg2(height_y, width); test.addOutputs(BufferArg(VALUE_TYPE_FLOAT, TensorShape{height_x, 1}),
CpuMatrix gpu_arg2(height_y, width); ASSIGN_TO);
cpu_arg1.randomizeUniform(); // run Function
gpu_arg1.copyFrom(cpu_arg1); test.run();
cpu_arg2.randomizeUniform();
cpu_arg2.add(-0.5);
gpu_arg2.copyFrom(cpu_arg2);
CpuMatrix cpu_out(height_x, 1);
GpuMatrix gpu_out(height_x, 1);
BufferArgs cpu_inputs;
BufferArgs cpu_outputs;
cpu_inputs.addArg(cpu_arg1);
cpu_inputs.addArg(cpu_arg2);
cpu_outputs.addArg(cpu_out, ASSIGN_TO);
BufferArgs gpu_inputs;
BufferArgs gpu_outputs;
gpu_inputs.addArg(gpu_arg1);
gpu_inputs.addArg(gpu_arg2);
gpu_outputs.addArg(gpu_out, ASSIGN_TO);
compare.getCpuFunction()->calc(cpu_inputs, cpu_outputs);
compare.getGpuFunction()->calc(gpu_inputs, gpu_outputs);
autotest::TensorCheckErr(cpu_out, gpu_out);
}
TEST(Matrix, cosSimForward) {
for (auto height_x : {10, 100, 1000}) {
for (auto height_y : {1, height_x}) {
for (auto width : {10, 100, 1000}) {
for (auto scale : {1.0, 2.0}) {
testCosSimForward(height_x, height_y, width, scale);
}
}
}
}
} }
void testCosSimBackward(size_t height_x, void testCosSimBackward(size_t height_x,
size_t height_y, size_t height_y,
size_t width, size_t width,
real scale) { real scale) {
FunctionCompare compare("CosSimBackward", FuncConfig().set("scale", scale)); FunctionCompare test("CosSimBackward", FuncConfig().set("scale", scale));
// prepare input arguments
CpuMatrix cpu_out_grad(height_x, 1); test.addInputs(BufferArg(VALUE_TYPE_FLOAT, TensorShape{height_x, 1}));
CpuMatrix cpu_out_val(height_x, 1); test.addInputs(BufferArg(VALUE_TYPE_FLOAT, TensorShape{height_x, 1}));
CpuMatrix cpu_in1_val(height_x, width); test.addInputs(BufferArg(VALUE_TYPE_FLOAT, TensorShape{height_x, width}));
CpuMatrix cpu_in2_val(height_x, width); test.addInputs(BufferArg(VALUE_TYPE_FLOAT, TensorShape{height_y, width}));
CpuMatrix cpu_in1_grad(height_x, width); test.addOutputs(BufferArg(VALUE_TYPE_FLOAT, TensorShape{height_x, width}),
CpuMatrix cpu_in2_grad(height_x, width); ADD_TO);
test.addOutputs(BufferArg(VALUE_TYPE_FLOAT, TensorShape{height_y, width}),
cpu_out_grad.randomizeUniform(); ADD_TO);
cpu_out_val.randomizeUniform(); // run Function
cpu_in1_val.randomizeUniform(); test.run();
cpu_in2_val.randomizeUniform();
cpu_in1_grad.randomizeUniform();
cpu_in2_grad.randomizeUniform();
GpuMatrix gpu_out_grad(height_x, 1);
GpuMatrix gpu_out_val(height_x, 1);
GpuMatrix gpu_in1_val(height_x, width);
GpuMatrix gpu_in2_val(height_x, width);
GpuMatrix gpu_in1_grad(height_x, width);
GpuMatrix gpu_in2_grad(height_x, width);
gpu_out_grad.copyFrom(cpu_out_grad);
gpu_out_val.copyFrom(cpu_out_val);
gpu_in1_val.copyFrom(cpu_in1_val);
gpu_in2_val.copyFrom(cpu_in2_val);
gpu_in1_grad.copyFrom(cpu_in1_grad);
gpu_in2_grad.copyFrom(cpu_in2_grad);
BufferArgs cpu_inputs;
BufferArgs cpu_outputs;
cpu_inputs.addArg(cpu_out_grad);
cpu_inputs.addArg(cpu_out_val);
cpu_inputs.addArg(cpu_in1_val);
cpu_inputs.addArg(cpu_in2_val);
cpu_outputs.addArg(cpu_in1_grad, ADD_TO);
cpu_outputs.addArg(cpu_in2_grad, ADD_TO);
BufferArgs gpu_inputs;
BufferArgs gpu_outputs;
gpu_inputs.addArg(gpu_out_grad);
gpu_inputs.addArg(gpu_out_val);
gpu_inputs.addArg(gpu_in1_val);
gpu_inputs.addArg(gpu_in2_val);
gpu_outputs.addArg(gpu_in1_grad, ADD_TO);
gpu_outputs.addArg(gpu_in2_grad, ADD_TO);
compare.getCpuFunction()->calc(cpu_inputs, cpu_outputs);
compare.getGpuFunction()->calc(gpu_inputs, gpu_outputs);
autotest::TensorCheckErr(cpu_in1_grad, gpu_in1_grad);
autotest::TensorCheckErr(cpu_in2_grad, gpu_in2_grad);
} }
TEST(Matrix, cosSimBackward) { TEST(Matrix, cosSim) {
for (auto height_x : {1, 10, 100}) { for (auto height_x : {10, 100, 1000}) {
for (auto height_y : {1, height_x}) { for (auto height_y : {1, height_x}) {
for (auto width : {1, 10, 100}) { for (auto width : {10, 100, 1000}) {
for (auto scale : {1.0, 2.0}) { for (auto scale : {1.0, 2.0}) {
testCosSimForward(height_x, height_y, width, scale);
testCosSimBackward(height_x, height_y, width, scale); testCosSimBackward(height_x, height_y, width, scale);
} }
} }
......
...@@ -157,6 +157,7 @@ public: ...@@ -157,6 +157,7 @@ public:
cpuSparse_->randomizeUniform(); cpuSparse_->randomizeUniform();
gpuSparse_->copyFrom(*cpuSparse_, stream); gpuSparse_->copyFrom(*cpuSparse_, stream);
hl_stream_synchronize(stream); hl_stream_synchronize(stream);
void addInputs(const SequenceArg& input) { void addInputs(const SequenceArg& input) {
size_t batchSize = input.shape()[0]; size_t batchSize = input.shape()[0];
size_t numSeqs = batchSize / 10 + 1; size_t numSeqs = batchSize / 10 + 1;
...@@ -300,6 +301,21 @@ protected: ...@@ -300,6 +301,21 @@ protected:
} }
void initOutputs() { void initOutputs() {
for (size_t i = 0; i < cpuOutputs_.size(); i++) {
initArg(*cpuOutputs_[i]);
// TODO: Need a BufferCopy used to copy from one BufferArg to another.
CpuVector cpuVector(cpuOutputs_[i]->shape().getElements(),
(real*)cpuOutputs_[i]->data());
GpuVector gpuVector(gpuOutputs_[i]->shape().getElements(),
(real*)gpuOutputs_[i]->data());
gpuVector.copyFrom(cpuVector);
}
}
void compareOutputs() {
>>>>>>> rewrite unit test using Daoyuan's new FunctionTest.
for (size_t i = 0; i < cpuOutputs_.size(); i++) { for (size_t i = 0; i < cpuOutputs_.size(); i++) {
if (cpuOutputs_[i]->isSparseArg()) { if (cpuOutputs_[i]->isSparseArg()) {
continue; /// sparse matrix already init continue; /// sparse matrix already init
...@@ -334,7 +350,6 @@ protected: ...@@ -334,7 +350,6 @@ protected:
} }
protected: protected:
<<<<<<< HEAD
std::shared_ptr<FunctionBase> cpuFunc_; std::shared_ptr<FunctionBase> cpuFunc_;
std::shared_ptr<FunctionBase> gpuFunc_; std::shared_ptr<FunctionBase> gpuFunc_;
std::vector<CpuMemHandlePtr> cpuMemory_; std::vector<CpuMemHandlePtr> cpuMemory_;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册