From fd925943532e7b33314cd8826afe2fbd39233330 Mon Sep 17 00:00:00 2001 From: wangyang59 Date: Mon, 21 Nov 2016 11:44:14 -0800 Subject: [PATCH] added test_ConvUnify --- paddle/gserver/tests/CMakeLists.txt | 7 ++ paddle/gserver/tests/test_ConvUnify.cpp | 133 ++++++++++++++++++++++++ 2 files changed, 140 insertions(+) create mode 100644 paddle/gserver/tests/test_ConvUnify.cpp diff --git a/paddle/gserver/tests/CMakeLists.txt b/paddle/gserver/tests/CMakeLists.txt index 0651d0b4733..79741bef2fe 100644 --- a/paddle/gserver/tests/CMakeLists.txt +++ b/paddle/gserver/tests/CMakeLists.txt @@ -34,7 +34,14 @@ add_unittest_without_exec(test_ConvTrans add_test(NAME test_ConvTrans COMMAND test_ConvTrans) +################# test_ConvUnify ####################### +add_unittest_without_exec(test_ConvUnify + test_ConvUnify.cpp + LayerGradUtil.cpp + TestUtil.cpp) +add_test(NAME test_ConvUnify + COMMAND test_ConvUnify) ################## test_Evaluator ####################### add_unittest(test_Evaluator test_Evaluator.cpp diff --git a/paddle/gserver/tests/test_ConvUnify.cpp b/paddle/gserver/tests/test_ConvUnify.cpp new file mode 100644 index 00000000000..f1442ca7b83 --- /dev/null +++ b/paddle/gserver/tests/test_ConvUnify.cpp @@ -0,0 +1,133 @@ +/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include +#include +#include "paddle/gserver/layers/DataLayer.h" +#include "ModelConfig.pb.h" +#include "paddle/trainer/Trainer.h" +#include "paddle/utils/GlobalConstants.h" +#include "paddle/gserver/layers/ExpandConvTransLayer.h" +#include "paddle/math/MathUtils.h" + +#include "TestUtil.h" +#include "LayerGradUtil.h" + +using namespace paddle; // NOLINT +using namespace std; // NOLINT + +P_DECLARE_bool(use_gpu); +P_DECLARE_int32(gpu_id); +P_DECLARE_double(checkgrad_eps); +P_DECLARE_bool(thread_local_rand_use_global_seed); +P_DECLARE_bool(prev_batch_state); + +// Do one forward pass of convTrans layer and check to see if its output +// matches the given result +MatrixPtr doOneConvTest(size_t imgSize, size_t output_x, size_t stride, + size_t padding, size_t filter_size, size_t channel, + size_t numfilters, MatrixPtr& inputData, + real* param, bool useGpu) { + TestConfig config; + config.biasSize = numfilters; + if (useGpu) { + config.layerConfig.set_type("cudnn_conv"); + } else { + config.layerConfig.set_type("exconv"); + } + config.layerConfig.set_num_filters(numfilters); + config.layerConfig.set_partial_sum(1); + config.layerConfig.set_shared_biases(true); + + config.inputDefs.push_back({INPUT_DATA, "layer_0", + imgSize * imgSize * channel, + channel* filter_size * filter_size * config.layerConfig.num_filters()}); + LayerInputConfig* input = config.layerConfig.add_inputs(); + ConvConfig* conv = input->mutable_conv_conf(); + conv->set_filter_size(filter_size); + conv->set_filter_size_y(filter_size); + conv->set_channels(channel); + conv->set_padding(padding); + conv->set_padding_y(padding); + conv->set_stride(stride); + conv->set_stride_y(stride); + conv->set_groups(1); + conv->set_filter_channels(channel); + conv->set_img_size(imgSize); + conv->set_output_x(output_x); + + config.layerConfig.set_size(conv->output_x() * conv->output_x() * + config.layerConfig.num_filters()); + config.layerConfig.set_name("conv"); + + std::vector dataLayers; + LayerMap layerMap; + vector datas; + initDataLayer(config, &dataLayers, &datas, &layerMap, "conv", + 1, false, useGpu); + dataLayers[0]->getOutputValue()->zeroMem(); + dataLayers[0]->getOutputValue()->copyFrom(*inputData); + + // test layer initialize + std::vector parameters; + LayerPtr convLayer; + initTestLayer(config, &layerMap, ¶meters, &convLayer); + convLayer->getBiasParameter()->zeroMem(); + convLayer->getParameters()[0]->zeroMem(); + convLayer->getParameters()[0]->getBuf(PARAMETER_VALUE)->copyFrom(param, 18); + convLayer->forward(PASS_GC); + + return convLayer->getOutputValue(); +} + +TEST(Layer, convTransLayerFwd2) { + MatrixPtr input, resultCpu, resultGpu; + input = Matrix::create(1, 4 * 4, false, false); + float inputData[] = {1, 2, 3, 4, + 5, 6, 7, 8, + 9, 10, 11, 12, + 13, 14, 15, 16}; + float param[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, + 9, 8, 7, 6, 5, 4, 3, 2, 1}; + + input->setData(inputData); + + resultCpu = doOneConvTest(/* imgSize */ 4, + /* output_x */ 2, + /* stride */ 1, + /* padding */ 0, + /* filter_size */ 3, + /*channel*/ 1, + /*numfilters*/ 2, + input, param, false); + + resultGpu = doOneConvTest(/* imgSize */ 4, + /* output_x */ 2, + /* stride */ 1, + /* padding */ 0, + /* filter_size */ 3, + /*channel*/ 1, + /*numfilters*/ 2, + input, param, true); + checkMatrixEqual(resultCpu, resultGpu); +} + +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + initMain(argc, argv); + FLAGS_thread_local_rand_use_global_seed = true; + srand(1); + return RUN_ALL_TESTS(); +} -- GitLab