From 248149f44863820baeed54ca24b0231c46624402 Mon Sep 17 00:00:00 2001 From: xzl Date: Fri, 21 Jul 2017 00:13:54 +0800 Subject: [PATCH] add depthwiseconv test and fix the little bug of the convOpTest --- paddle/function/ConvOpTest.cpp | 349 +++++++++++++++++++-------------- 1 file changed, 202 insertions(+), 147 deletions(-) diff --git a/paddle/function/ConvOpTest.cpp b/paddle/function/ConvOpTest.cpp index f0c45c97b1..7f32c73479 100644 --- a/paddle/function/ConvOpTest.cpp +++ b/paddle/function/ConvOpTest.cpp @@ -38,76 +38,76 @@ public: for (size_t filterSize : {1, 3, 5}) { for (size_t inputChannels : {3, 64}) { for (size_t outputChannels : {3, 64}) { - for (size_t groups : {1, 3, 64}) { - if (inputChannels > outputChannels) break; - if (groups != 1 && - (inputChannels != groups || outputChannels % groups != 0)) - continue; - if (!useGroups) groups = 1; - - for (size_t stride : {1, 2}) { - for (size_t padding : {0, 1}) { - if (padding >= filterSize) break; - size_t outputSize = - (inputSize - filterSize + 2 * padding + stride) / - stride; - VLOG(3) << " batchSize=" << batchSize - << " inputChannels=" << inputChannels - << " inputHeight=" << inputSize - << " inputWidth=" << inputSize - << " outputChannels=" << outputChannels - << " filterHeight=" << filterSize - << " filterWidth=" << filterSize - << " outputHeight=" << outputSize - << " outputWidth=" << outputSize - << " stride=" << stride << " padding=" << padding; - - std::vector paddings = {padding, padding}; - std::vector strides = {stride, stride}; - Compare2Function test( - conv1, - conv2, - FuncConfig() - .set("paddings", paddings) - .set("strides", strides) - .set("groups", groups) - .set("algo", algo)); - - TensorShape input{ - batchSize, inputChannels, inputSize, inputSize}; - - TensorShape filter; - if (groups > 1) - filter = TensorShape({groups, - outputChannels / groups, - inputChannels / groups, - filterSize, - filterSize}); - else - filter = TensorShape({outputChannels, - inputChannels, - filterSize, - filterSize}); - TensorShape output{ - batchSize, outputChannels, outputSize, outputSize}; - - if (type == kForwardTest) { - test.addInputs(BufferArg(VALUE_TYPE_FLOAT, input)); - test.addInputs(BufferArg(VALUE_TYPE_FLOAT, filter)); - test.addOutputs(BufferArg(VALUE_TYPE_FLOAT, output)); - test.run(); - } else if (type == kBackwardInputTest) { - test.addInputs(BufferArg(VALUE_TYPE_FLOAT, output)); - test.addInputs(BufferArg(VALUE_TYPE_FLOAT, filter)); - test.addOutputs(BufferArg(VALUE_TYPE_FLOAT, input), - ADD_TO); - test.run(); - } else if (type == kBackwardFilterTest) { - test.addInputs(BufferArg(VALUE_TYPE_FLOAT, output)); - test.addInputs(BufferArg(VALUE_TYPE_FLOAT, input)); - test.addOutputs(BufferArg(VALUE_TYPE_FLOAT, filter)); - test.run(); - } + if (inputChannels > outputChannels) break; + size_t groups; + if (!useGroups) { + groups = 1; + } else { + if (outputChannels % inputChannels != 0) continue; + groups = inputChannels; + } + + for (size_t stride : {1, 2}) { + for (size_t padding : {0, 1}) { + if (padding >= filterSize) break; + size_t outputSize = + (inputSize - filterSize + 2 * padding + stride) / stride; + VLOG(3) << " batchSize=" << batchSize + << " inputChannels=" << inputChannels + << " inputHeight=" << inputSize + << " inputWidth=" << inputSize + << " outputChannels=" << outputChannels + << " filterHeight=" << filterSize + << " filterWidth=" << filterSize + << " outputHeight=" << outputSize + << " outputWidth=" << outputSize + << " stride=" << stride << " padding=" << padding; + + std::vector paddings = {padding, padding}; + std::vector strides = {stride, stride}; + Compare2Function test( + conv1, + conv2, + FuncConfig() + .set("paddings", paddings) + .set("strides", strides) + .set("groups", groups) + .set("algo", algo)); + + TensorShape input{ + batchSize, inputChannels, inputSize, inputSize}; + + TensorShape filter; + if (groups > 1) + filter = TensorShape({groups, + outputChannels / groups, + inputChannels / groups, + filterSize, + filterSize}); + else + filter = TensorShape({outputChannels, + inputChannels, + filterSize, + filterSize}); + TensorShape output{ + batchSize, outputChannels, outputSize, outputSize}; + + if (type == kForwardTest) { + test.addInputs(BufferArg(VALUE_TYPE_FLOAT, input)); + test.addInputs(BufferArg(VALUE_TYPE_FLOAT, filter)); + test.addOutputs(BufferArg(VALUE_TYPE_FLOAT, output)); + test.run(); + } else if (type == kBackwardInputTest) { + test.addInputs(BufferArg(VALUE_TYPE_FLOAT, output)); + test.addInputs(BufferArg(VALUE_TYPE_FLOAT, filter)); + test.addOutputs(BufferArg(VALUE_TYPE_FLOAT, input), ADD_TO); + test.run(); + } else if (type == kBackwardFilterTest) { + test.addInputs(BufferArg(VALUE_TYPE_FLOAT, output)); + test.addInputs(BufferArg(VALUE_TYPE_FLOAT, input)); + test.addOutputs(BufferArg(VALUE_TYPE_FLOAT, filter), + ADD_TO); + test.run(); } } } @@ -136,77 +136,78 @@ public: for (size_t filterWidth : {3, 7}) { for (size_t inputChannels : {7}) { for (size_t outputChannels : {7}) { - for (size_t groups : {1, 7}) { - if (groups != 1 && (inputChannels != groups || - outputChannels % groups != 0)) - continue; - if (!useGroups) groups = 1; - - size_t stride = 1; - size_t padding = 0; - size_t outputHeight = - (inputHeight - filterHeight + 2 * padding + stride) / - stride; - size_t outputWidth = - (inputWidth - filterWidth + 2 * padding + stride) / - stride; - VLOG(3) << " batchSize=" << batchSize - << " inputChannels=" << inputChannels - << " inputHeight=" << inputHeight - << " inputWidth=" << inputWidth - << " outputChannels=" << outputChannels - << " filterHeight=" << filterHeight - << " filterWidth=" << filterWidth - << " outputHeight=" << outputHeight - << " outputWidth=" << outputWidth - << " stride=" << stride << " padding=" << padding; - - std::vector paddings = {padding, padding}; - std::vector strides = {stride, stride}; - Compare2Function test( - conv1, - conv2, - FuncConfig() - .set("paddings", paddings) - .set("strides", strides) - .set("groups", groups) - .set("algo", algo)); - - TensorShape input{ - batchSize, inputChannels, inputHeight, inputWidth}; - - TensorShape filter; - if (groups > 1) - filter = TensorShape({groups, - outputChannels / groups, - inputChannels / groups, - filterHeight, - filterWidth}); - else - filter = TensorShape({outputChannels, - inputChannels, - filterHeight, - filterWidth}); - TensorShape output{ - batchSize, outputChannels, outputHeight, outputWidth}; - - if (type == kForwardTest) { - test.addInputs(BufferArg(VALUE_TYPE_FLOAT, input)); - test.addInputs(BufferArg(VALUE_TYPE_FLOAT, filter)); - test.addOutputs(BufferArg(VALUE_TYPE_FLOAT, output)); - test.run(); - } else if (type == kBackwardInputTest) { - test.addInputs(BufferArg(VALUE_TYPE_FLOAT, output)); - test.addInputs(BufferArg(VALUE_TYPE_FLOAT, filter)); - test.addOutputs(BufferArg(VALUE_TYPE_FLOAT, input), - ADD_TO); - test.run(); - } else if (type == kBackwardFilterTest) { - test.addInputs(BufferArg(VALUE_TYPE_FLOAT, output)); - test.addInputs(BufferArg(VALUE_TYPE_FLOAT, input)); - test.addOutputs(BufferArg(VALUE_TYPE_FLOAT, filter)); - test.run(); - } + size_t groups; + if (!useGroups) { + groups = 1; + } else { + if (outputChannels % inputChannels != 0) continue; + groups = inputChannels; + } + + size_t stride = 1; + size_t padding = 0; + size_t outputHeight = + (inputHeight - filterHeight + 2 * padding + stride) / + stride; + size_t outputWidth = + (inputWidth - filterWidth + 2 * padding + stride) / + stride; + VLOG(3) << " batchSize=" << batchSize + << " inputChannels=" << inputChannels + << " inputHeight=" << inputHeight + << " inputWidth=" << inputWidth + << " outputChannels=" << outputChannels + << " filterHeight=" << filterHeight + << " filterWidth=" << filterWidth + << " outputHeight=" << outputHeight + << " outputWidth=" << outputWidth + << " stride=" << stride << " padding=" << padding; + + std::vector paddings = {padding, padding}; + std::vector strides = {stride, stride}; + Compare2Function test( + conv1, + conv2, + FuncConfig() + .set("paddings", paddings) + .set("strides", strides) + .set("groups", groups) + .set("algo", algo)); + + TensorShape input{ + batchSize, inputChannels, inputHeight, inputWidth}; + + TensorShape filter; + if (groups > 1) + filter = TensorShape({groups, + outputChannels / groups, + inputChannels / groups, + filterHeight, + filterWidth}); + else + filter = TensorShape({outputChannels, + inputChannels, + filterHeight, + filterWidth}); + TensorShape output{ + batchSize, outputChannels, outputHeight, outputWidth}; + + if (type == kForwardTest) { + test.addInputs(BufferArg(VALUE_TYPE_FLOAT, input)); + test.addInputs(BufferArg(VALUE_TYPE_FLOAT, filter)); + test.addOutputs(BufferArg(VALUE_TYPE_FLOAT, output)); + test.run(); + } else if (type == kBackwardInputTest) { + test.addInputs(BufferArg(VALUE_TYPE_FLOAT, output)); + test.addInputs(BufferArg(VALUE_TYPE_FLOAT, filter)); + test.addOutputs(BufferArg(VALUE_TYPE_FLOAT, input), ADD_TO); + test.run(); + } else if (type == kBackwardFilterTest) { + test.addInputs(BufferArg(VALUE_TYPE_FLOAT, output)); + test.addInputs(BufferArg(VALUE_TYPE_FLOAT, input)); + test.addOutputs(BufferArg(VALUE_TYPE_FLOAT, filter), + ADD_TO); + test.run(); } } } @@ -218,6 +219,8 @@ public: } }; +// ======Start Convolution TEST====== + TEST(Forward, GEMM) { ConvolutionTest test( "NaiveConv-CPU", "GemmConv-CPU", kForwardTest, false); @@ -228,24 +231,76 @@ TEST(Forward, GEMM) { #ifndef PADDLE_ONLY_CPU TEST(Forward, GEMM2) { ConvolutionTest test( - "GemmConv-CPU", "GemmConv-GPU", kForwardTest); + "GemmConv-CPU", "GemmConv-GPU", kForwardTest, false); ConvolutionTest2 test2( - "GemmConv-CPU", "GemmConv-GPU", kForwardTest); + "GemmConv-CPU", "GemmConv-GPU", kForwardTest, false); } TEST(BackwardInput, GEMM) { ConvolutionTest test( - "GemmConvGradInput-CPU", "GemmConvGradInput-GPU", kBackwardInputTest); + "GemmConvGradInput-CPU", + "GemmConvGradInput-GPU", + kBackwardInputTest, + false); ConvolutionTest2 test2( - "GemmConvGradInput-CPU", "GemmConvGradInput-GPU", kBackwardInputTest); + "GemmConvGradInput-CPU", + "GemmConvGradInput-GPU", + kBackwardInputTest, + false); } TEST(BackwardFilter, GEMM) { ConvolutionTest test( - "GemmConvGradFilter-CPU", "GemmConvGradFilter-GPU", kBackwardFilterTest); + "GemmConvGradFilter-CPU", + "GemmConvGradFilter-GPU", + kBackwardFilterTest, + false); ConvolutionTest2 test2( - "GemmConvGradFilter-CPU", "GemmConvGradFilter-GPU", kBackwardFilterTest); + "GemmConvGradFilter-CPU", + "GemmConvGradFilter-GPU", + kBackwardFilterTest, + false); } #endif +// ======End Convolution TEST====== + +// ======Start DepthwiseConvolution TEST====== + +// TODO(zhaolong) The depthwise convolution cpu test will be added when the cpu +// version of depthwiseConv is implemented. + +#ifndef PADDLE_ONLY_CPU + +TEST(DepthwiseConvForward, GEMM2) { + ConvolutionTest test( + "GemmConv-CPU", "DepthwiseConv-GPU", kForwardTest); + ConvolutionTest2 test2( + "GemmConv-CPU", "DepthwiseConv-GPU", kForwardTest); +} + +TEST(DepthwiseConvBackwardInput, GEMM) { + ConvolutionTest test( + "GemmConvGradInput-CPU", + "DepthwiseConvGradInput-GPU", + kBackwardInputTest); + ConvolutionTest2 test2( + "GemmConvGradInput-CPU", + "DepthwiseConvGradInput-GPU", + kBackwardInputTest); +} + +TEST(DepthwiseConvBackwardFilter, GEMM) { + ConvolutionTest test( + "GemmConvGradFilter-CPU", + "DepthwiseConvGradFilter-GPU", + kBackwardFilterTest); + ConvolutionTest2 test2( + "GemmConvGradFilter-CPU", + "DepthwiseConvGradFilter-GPU", + kBackwardFilterTest); +} + +#endif +// ======End DepthwiseConvolution TEST====== } // namespace paddle -- GitLab