提交 1f516fa0 编写于 作者: X xzl

modify format, and modify the layer grad test, op test

上级 81998868
......@@ -25,40 +25,32 @@ enum TestType {
kBackwardFilterTest = 2,
};
enum LayerType {
convolutionType = 0,
depthwiseConvolutionType = 1,
};
template <DeviceType DType1, DeviceType DType2>
class ConvolutionTest {
public:
ConvolutionTest(const std::string& conv1,
const std::string& conv2,
LayerType layerType,
TestType type,
bool useGroups = true,
std::string algo = "auto") {
for (size_t batchSize : {1, 32}) {
for (size_t inputSize : {7, 14, 54}) {
for (size_t filterSize : {1, 3, 5}) {
for (size_t inputChannels : {3, 64}) {
for (size_t outputChannels : {3, 64, 128}) {
for (size_t groups : {1, 3, 64}) {
if (inputChannels > outputChannels) break;
if (layerType == depthwiseConvolutionType &&
outputChannels % inputChannels != 0)
break;
size_t groups = 1;
if (layerType == depthwiseConvolutionType) {
groups = inputChannels;
}
if (groups != 1 &&
(inputChannels != groups || outputChannels % groups != 0))
continue;
if (!useGroups) groups = 1;
for (size_t stride : {1, 2}) {
for (size_t padding : {0, 1}) {
if (padding >= filterSize) break;
size_t outputSize =
(inputSize - filterSize + 2 * padding + stride) / stride;
(inputSize - filterSize + 2 * padding + stride) /
stride;
VLOG(3) << " batchSize=" << batchSize
<< " inputChannels=" << inputChannels
<< " inputHeight=" << inputSize
......@@ -85,10 +77,10 @@ public:
batchSize, inputChannels, inputSize, inputSize};
TensorShape filter;
if (layerType == depthwiseConvolutionType)
if (groups > 1)
filter = TensorShape({groups,
outputChannels / groups,
(size_t)1,
inputChannels / groups,
filterSize,
filterSize});
else
......@@ -107,7 +99,8 @@ public:
} else if (type == kBackwardInputTest) {
test.addInputs(BufferArg(VALUE_TYPE_FLOAT, output));
test.addInputs(BufferArg(VALUE_TYPE_FLOAT, filter));
test.addOutputs(BufferArg(VALUE_TYPE_FLOAT, input), ADD_TO);
test.addOutputs(BufferArg(VALUE_TYPE_FLOAT, input),
ADD_TO);
test.run();
} else if (type == kBackwardFilterTest) {
test.addInputs(BufferArg(VALUE_TYPE_FLOAT, output));
......@@ -123,6 +116,7 @@ public:
}
}
}
}
};
// Mainly used to test cases where the height and width (input, filter)
......@@ -132,8 +126,8 @@ class ConvolutionTest2 {
public:
ConvolutionTest2(const std::string& conv1,
const std::string& conv2,
LayerType layerType,
TestType type,
bool useGroups = true,
std::string algo = "auto") {
for (size_t batchSize : {16}) {
for (size_t inputHeight : {7, 31}) {
......@@ -142,15 +136,13 @@ public:
for (size_t filterWidth : {3, 7}) {
for (size_t inputChannels : {7}) {
for (size_t outputChannels : {7, 32}) {
if (layerType == depthwiseConvolutionType &&
outputChannels % inputChannels != 0)
break;
for (size_t groups : {1, 7}) {
if (!useGroups && groups != 1 &&
(inputChannels != groups ||
outputChannels % groups != 0))
continue;
if (!useGroups) groups = 1;
size_t groups = 1;
if (layerType == depthwiseConvolutionType) {
groups = inputChannels;
}
size_t stride = 1;
size_t padding = 0;
size_t outputHeight =
......@@ -185,10 +177,10 @@ public:
batchSize, inputChannels, inputHeight, inputWidth};
TensorShape filter;
if (layerType == depthwiseConvolutionType)
if (groups > 1)
filter = TensorShape({groups,
outputChannels / groups,
(size_t)1,
inputChannels / groups,
filterHeight,
filterWidth});
else
......@@ -207,7 +199,8 @@ public:
} else if (type == kBackwardInputTest) {
test.addInputs(BufferArg(VALUE_TYPE_FLOAT, output));
test.addInputs(BufferArg(VALUE_TYPE_FLOAT, filter));
test.addOutputs(BufferArg(VALUE_TYPE_FLOAT, input), ADD_TO);
test.addOutputs(BufferArg(VALUE_TYPE_FLOAT, input),
ADD_TO);
test.run();
} else if (type == kBackwardFilterTest) {
test.addInputs(BufferArg(VALUE_TYPE_FLOAT, output));
......@@ -223,109 +216,37 @@ public:
}
}
}
}
};
// ======Start Convolution TEST======
TEST(Forward, GEMM) {
ConvolutionTest<DEVICE_TYPE_CPU, DEVICE_TYPE_CPU> test(
"NaiveConv-CPU", "GemmConv-CPU", convolutionType, kForwardTest);
"NaiveConv-CPU", "GemmConv-CPU", kForwardTest, false);
ConvolutionTest2<DEVICE_TYPE_CPU, DEVICE_TYPE_CPU> test2(
"NaiveConv-CPU", "GemmConv-CPU", convolutionType, kForwardTest);
"NaiveConv-CPU", "GemmConv-CPU", kForwardTest, false);
}
#ifndef PADDLE_ONLY_CPU
TEST(Forward, GEMM2) {
ConvolutionTest<DEVICE_TYPE_CPU, DEVICE_TYPE_GPU> test(
"GemmConv-CPU", "GemmConv-GPU", convolutionType, kForwardTest);
"GemmConv-CPU", "GemmConv-GPU", kForwardTest);
ConvolutionTest2<DEVICE_TYPE_CPU, DEVICE_TYPE_GPU> test2(
"GemmConv-CPU", "GemmConv-GPU", convolutionType, kForwardTest);
"GemmConv-CPU", "GemmConv-GPU", kForwardTest);
}
TEST(BackwardInput, GEMM) {
ConvolutionTest<DEVICE_TYPE_CPU, DEVICE_TYPE_GPU> test(
"GemmConvGradInput-CPU",
"GemmConvGradInput-GPU",
convolutionType,
kBackwardInputTest);
"GemmConvGradInput-CPU", "GemmConvGradInput-GPU", kBackwardInputTest);
ConvolutionTest2<DEVICE_TYPE_CPU, DEVICE_TYPE_GPU> test2(
"GemmConvGradInput-CPU",
"GemmConvGradInput-GPU",
convolutionType,
kBackwardInputTest);
"GemmConvGradInput-CPU", "GemmConvGradInput-GPU", kBackwardInputTest);
}
TEST(BackwardFilter, GEMM) {
ConvolutionTest<DEVICE_TYPE_CPU, DEVICE_TYPE_GPU> test(
"GemmConvGradFilter-CPU",
"GemmConvGradFilter-GPU",
convolutionType,
kBackwardFilterTest);
ConvolutionTest2<DEVICE_TYPE_CPU, DEVICE_TYPE_GPU> test2(
"GemmConvGradFilter-CPU",
"GemmConvGradFilter-GPU",
convolutionType,
kBackwardFilterTest);
}
#endif
// ======End Convolution TEST======
// ======Start DepthwiseConvolution TEST======
// TODO(zhaolong) The depthwise convolution cpu test will be added when the cpu
// version of depthwiseConv is implemented.
#ifndef PADDLE_ONLY_CPU
TEST(DepthwiseConvForward, GEMM) {
ConvolutionTest<DEVICE_TYPE_GPU, DEVICE_TYPE_GPU> test(
"GemmConv-GPU",
"DepthwiseConv-GPU",
depthwiseConvolutionType,
kForwardTest);
ConvolutionTest2<DEVICE_TYPE_CPU, DEVICE_TYPE_GPU> test2(
"GemmConv-GPU",
"DepthwiseConv-GPU",
depthwiseConvolutionType,
kForwardTest);
}
TEST(DepthwiseConvForward, GEMM2) {
ConvolutionTest<DEVICE_TYPE_GPU, DEVICE_TYPE_GPU> test(
"DepthwiseConv-GPU",
"DepthwiseConv-GPU",
depthwiseConvolutionType,
kForwardTest);
ConvolutionTest2<DEVICE_TYPE_CPU, DEVICE_TYPE_GPU> test2(
"DepthwiseConv-GPU",
"DepthwiseConv-GPU",
depthwiseConvolutionType,
kForwardTest);
}
TEST(DepthwiseConvBackwardInput, GEMM) {
ConvolutionTest<DEVICE_TYPE_CPU, DEVICE_TYPE_GPU> test(
"DepthwiseConvGradInput-GPU",
"DepthwiseConvGradInput-GPU",
depthwiseConvolutionType,
kBackwardInputTest);
ConvolutionTest2<DEVICE_TYPE_CPU, DEVICE_TYPE_GPU> test2(
"DepthwiseConvGradInput-GPU",
"DepthwiseConvGradInput-GPU",
depthwiseConvolutionType,
kBackwardInputTest);
}
TEST(DepthwiseConvBackwardFilter, GEMM) {
ConvolutionTest<DEVICE_TYPE_CPU, DEVICE_TYPE_GPU> test(
"DepthwiseConvGradFilter-GPU",
"DepthwiseConvGradFilter-GPU",
depthwiseConvolutionType,
kBackwardFilterTest);
"GemmConvGradFilter-CPU", "GemmConvGradFilter-GPU", kBackwardFilterTest);
ConvolutionTest2<DEVICE_TYPE_CPU, DEVICE_TYPE_GPU> test2(
"DepthwiseConvGradFilter-GPU",
"DepthwiseConvGradFilter-GPU",
depthwiseConvolutionType,
kBackwardFilterTest);
"GemmConvGradFilter-CPU", "GemmConvGradFilter-GPU", kBackwardFilterTest);
}
#endif
// ======End DepthwiseConvolution TEST======
} // namespace paddle
......@@ -39,21 +39,22 @@ bool ExpandConvLayer::init(const LayerMap &layerMap,
filterShape_.resize(numInputs);
outputShape_.resize(numInputs);
string convType;
string convGradInputType;
string convGradFilterType;
std::string convType;
std::string convGradInputType;
std::string convGradFilterType;
for (int i = 0; i < config_.inputs_size(); i++) {
std::vector<size_t> paddings = {(size_t)paddingY_[i], (size_t)padding_[i]};
std::vector<size_t> strides = {(size_t)strideY_[i], (size_t)stride_[i]};
if (useGpu_ && (size_t)groups_[i] == (size_t)channels_[i] && !isDeconv_) {
convType = "DepthwiseConv" convGradInputType =
"DepthwiseConvGradInput" convGradFilterType =
"DepthwiseConvGradFilter"
convType = "DepthwiseConv";
convGradInputType = "DepthwiseConvGradInput";
convGradFilterType = "DepthwiseConvGradFilter";
} else {
convType = "GemmConv" convGradInputType =
"GemmConvGradInput" convGradFilterType = "GemmConvGradFilter"
convType = "GemmConv";
convGradInputType = "GemmConvGradInput";
convGradFilterType = "GemmConvGradFilter";
}
if (FLAGS_use_nnpack) {
......
......@@ -349,13 +349,13 @@ TEST(Layer, CosSimVecMatLayer) {
void testDepthwiseConvLayer(const string& type, bool useGpu) {
TestConfig config;
config.biasSize = 16;
config.biasSize = 32;
config.layerConfig.set_type(type);
config.layerConfig.set_num_filters(16);
config.layerConfig.set_num_filters(32);
config.layerConfig.set_partial_sum(1);
config.layerConfig.set_shared_biases(true);
config.inputDefs.push_back({INPUT_DATA, "layer_0", 2048, 192 / 2});
config.inputDefs.push_back({INPUT_DATA, "layer_0", 2048, 192});
LayerInputConfig* input = config.layerConfig.add_inputs();
ConvConfig* conv = input->mutable_conv_conf();
conv->set_filter_size(2);
......@@ -388,8 +388,11 @@ void testDepthwiseConvLayer(const string& type, bool useGpu) {
}
TEST(Layer, depthwiseConvLayer) {
// 'depthwise_conv' is a sepecial case of 'exconv' whose
// groups size equals to the input channels size.
testDepthwiseConvLayer("exconv", /* useGpu= */ false);
#ifndef PADDLE_ONLY_CPU
testDepthwiseConvLayer("depthwise_conv", /* useGpu= */ true);
testDepthwiseConvLayer("exconv", /* useGpu= */ true);
#endif
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册