diff --git a/paddle/gserver/layers/ConvBaseLayer.cpp b/paddle/gserver/layers/ConvBaseLayer.cpp index 607fb99cf6288dfc6906147fbcafc388b87ee48b..5bc22f477932c9fe10c6c833e2bc641cf031b78c 100644 --- a/paddle/gserver/layers/ConvBaseLayer.cpp +++ b/paddle/gserver/layers/ConvBaseLayer.cpp @@ -48,8 +48,20 @@ bool ConvBaseLayer::init(const LayerMap& layerMap, outputW_.push_back(conf.output_x()); } + CHECK(inputLayers_.size() == parameters_.size()); + for (size_t i = 0; i < inputLayers_.size(); i++) { + size_t height, width; + height = filterPixels_[i] * filterChannels_[i]; + width = (!isDeconv_) ? numFilters_ : channels_[i]; + + // create a new weight + CHECK_EQ(parameters_[i]->getSize(), width * height); + Weight* w = new Weight(height, width, parameters_[i]); + weights_.emplace_back(w); + } + /* initialize the biases_ */ - if (biasParameter_.get() != NULL) { + if (biasParameter_.get()) { if (sharedBiases_) { CHECK_EQ((size_t)numFilters_, biasParameter_->getSize()); biases_ = @@ -76,25 +88,46 @@ size_t ConvBaseLayer::calOutputSize() { clearAndReserve(&outputH_); clearAndReserve(&outputW_); size_t layerSize = 0; - for (size_t i = 0; i < inputLayers_.size(); i++) { - imgSizeH_.push_back(inputLayers_[i]->getOutput().getFrameHeight()); - imgSizeW_.push_back(inputLayers_[i]->getOutput().getFrameWidth()); - if (imgSizeH_[i] == 0) - imgSizeH_[i] = config_.inputs(i).conv_conf().img_size(); - if (imgSizeW_[i] == 0) - imgSizeW_[i] = config_.inputs(i).conv_conf().img_size(); - outputH_.push_back(outputSize(imgSizeH_[i], filterSizeY_[i], paddingY_[i], - strideY_[i], caffeMode_)); - outputW_.push_back(outputSize(imgSizeW_[i], filterSize_[i], padding_[i], - stride_[i], caffeMode_)); - CHECK_EQ(outputH_[i], outputH_[0]); - CHECK_EQ(outputW_[i], outputW_[0]); + + if (!isDeconv_) { + for (size_t i = 0; i < inputLayers_.size(); i++) { + imgSizeH_.push_back(inputLayers_[i]->getOutput().getFrameHeight()); + imgSizeW_.push_back(inputLayers_[i]->getOutput().getFrameWidth()); + if (imgSizeH_[i] == 0) + imgSizeH_[i] = config_.inputs(i).conv_conf().img_size(); + if (imgSizeW_[i] == 0) + imgSizeW_[i] = config_.inputs(i).conv_conf().img_size(); + outputH_.push_back( + outputSize(imgSizeH_[i], filterSizeY_[i], paddingY_[i], strideY_[i])); + outputW_.push_back( + outputSize(imgSizeW_[i], filterSize_[i], padding_[i], stride_[i])); + CHECK_EQ(outputH_[i], outputH_[0]); + CHECK_EQ(outputW_[i], outputW_[0]); + } + getOutput().setFrameHeight(outputH_[0]); + getOutput().setFrameWidth(outputW_[0]); + layerSize = outputH_[0] * outputW_[0] * size_t(numFilters_); + } else { + for (size_t i = 0; i < inputLayers_.size(); i++) { + outputH_.push_back(inputLayers_[i]->getOutput().getFrameHeight()); + outputW_.push_back(inputLayers_[i]->getOutput().getFrameWidth()); + if (outputH_[i] == 0) + outputH_[i] = config_.inputs(i).conv_conf().output_x(); + if (outputW_[i] == 0) + outputW_[i] = config_.inputs(i).conv_conf().output_x(); + imgSizeH_.push_back( + imageSize(outputH_[i], filterSizeY_[i], paddingY_[i], strideY_[i])); + imgSizeW_.push_back( + imageSize(outputW_[i], filterSize_[i], padding_[i], stride_[i])); + CHECK_EQ(imgSizeH_[i], imgSizeH_[0]); + CHECK_EQ(imgSizeW_[i], imgSizeW_[0]); + } + getOutput().setFrameHeight(imgSizeH_[0]); + getOutput().setFrameWidth(imgSizeW_[0]); + layerSize = imgSizeH_[0] * imgSizeW_[0] * size_t(numFilters_); } - getOutput().setFrameHeight(outputH_[0]); - getOutput().setFrameWidth(outputW_[0]); - layerSize = outputH_[0] * outputW_[0] * size_t(numFilters_); - return layerSize; + return layerSize; } } // namespace paddle diff --git a/paddle/gserver/layers/ExpandConvBaseLayer.cpp b/paddle/gserver/layers/ExpandConvBaseLayer.cpp index 9693ad82450f45545ef284007e540163323ef688..75ac8245d8829a98fb6b7f8d72ab8bd3570042a1 100644 --- a/paddle/gserver/layers/ExpandConvBaseLayer.cpp +++ b/paddle/gserver/layers/ExpandConvBaseLayer.cpp @@ -45,15 +45,27 @@ bool ExpandConvBaseLayer::init(const LayerMap &layerMap, caffeMode_ = conf.caffe_mode(); } + getOutputSize(); + return true; } +size_t ExpandConvBaseLayer::getOutputSize() { + CHECK_NE(inputLayers_.size(), 0UL); + size_t layerSize = ConvBaseLayer::calOutputSize(); + subN_.clear(); + for (size_t i = 0; i < inputLayers_.size(); i++) { + subN_.push_back(outputH_[i] * outputW_[i]); + } + return layerSize; +} + void ExpandConvBaseLayer::resetExpandInput(size_t height, size_t width) { Matrix::resizeOrCreate(expandInput_, height, width, false, useGpu_); } void ExpandConvBaseLayer::addSharedBias() { - size_t mapW = getSize() / numFilters_; + size_t mapW = getOutputSize() / numFilters_; size_t mapH = getOutputValue()->getElementCnt() / mapW; MatrixPtr out = Matrix::create(getOutputValue()->getData(), mapH, mapW, false, useGpu_); @@ -224,7 +236,7 @@ void ExpandConvBaseLayer::bpropWeights(MatrixPtr image, MatrixPtr out, } void ExpandConvBaseLayer::bpropSharedBias(MatrixPtr biases, MatrixPtr v) { - size_t mapW = getSize() / numFilters_; + size_t mapW = getOutputSize() / numFilters_; size_t mapH = v->getElementCnt() / mapW; MatrixPtr vTmp = Matrix::create(v->getData(), mapH, mapW, false, useGpu_); diff --git a/paddle/gserver/layers/ExpandConvBaseLayer.h b/paddle/gserver/layers/ExpandConvBaseLayer.h index 418c9dd6ce2ad6ecd9783c79957d87fb4cf02829..9858fa348c3fc85fdea0c017ca44fa047a6eaf42 100644 --- a/paddle/gserver/layers/ExpandConvBaseLayer.h +++ b/paddle/gserver/layers/ExpandConvBaseLayer.h @@ -34,14 +34,6 @@ protected: IntV subN_; /// subK_ = channels_ * filterPixels_ * groups_. IntV subK_; - /// The spatial dimensions of height of input feature map. - IntV imgSizeH_; - /// The spatial dimensions of width of input feature map. - IntV imgSizeW_; - /// The spatial dimensions of height of output feature map. - IntV outputH_; - /// The spatial dimensions of width of output feature map. - IntV outputW_; /*The expandInput_ and transOutValue_ are used for CPU expand conv calc * Expand one sample at a time. shape: @@ -59,6 +51,7 @@ public: bool init(const LayerMap& layerMap, const ParameterMap& parameterMap); + size_t getOutputSize(); /** * Create or resize expandInput_. */ diff --git a/paddle/gserver/layers/ExpandConvLayer.cpp b/paddle/gserver/layers/ExpandConvLayer.cpp index 379823a6feb45551eea4cd1e262dfb924a046ddf..9f30fcf00a422599dc9ce34c14a3fbc7af378510 100644 --- a/paddle/gserver/layers/ExpandConvLayer.cpp +++ b/paddle/gserver/layers/ExpandConvLayer.cpp @@ -28,16 +28,6 @@ bool ExpandConvLayer::init(const LayerMap &layerMap, return true; } -size_t ExpandConvLayer::getOutputSize() { - CHECK_NE(inputLayers_.size(), 0UL); - size_t layerSize = ConvBaseLayer::calOutputSize(); - subN_.clear(); - for (size_t i = 0; i < inputLayers_.size(); i++) { - subN_.push_back(outputH_[i] * outputW_[i]); - } - return layerSize; -} - void ExpandConvLayer::forward(PassType passType) { Layer::forward(passType); diff --git a/paddle/gserver/layers/ExpandConvLayer.h b/paddle/gserver/layers/ExpandConvLayer.h index b5cb448bdfcde9baf96a0e89f61cf157f8895e9c..c07188a406183416cd57e2d027ba1205f6b65176 100644 --- a/paddle/gserver/layers/ExpandConvLayer.h +++ b/paddle/gserver/layers/ExpandConvLayer.h @@ -38,8 +38,6 @@ public: bool init(const LayerMap& layerMap, const ParameterMap& parameterMap); - size_t getOutputSize(); - void forward(PassType passType); void backward(const UpdateCallback& callback); }; diff --git a/paddle/gserver/layers/ExpandConvTransLayer.cpp b/paddle/gserver/layers/ExpandConvTransLayer.cpp index 1d630a4ecd031a7cd6ef2697fcbe03448ebe07b2..4c4016c30168f9980984ed1b73b50e4a43af08ad 100644 --- a/paddle/gserver/layers/ExpandConvTransLayer.cpp +++ b/paddle/gserver/layers/ExpandConvTransLayer.cpp @@ -34,34 +34,6 @@ bool ExpandConvTransLayer::init(const LayerMap &layerMap, return true; } -// Why this is necessary after calling init? -size_t ExpandConvTransLayer::getSize() { - CHECK_NE(inputLayers_.size(), 0UL); - imgSizeH_.clear(); - imgSizeW_.clear(); - outputH_.clear(); - outputW_.clear(); - subN_.clear(); - size_t layerSize = 0; - for (size_t i = 0; i < inputLayers_.size(); i++) { - outputH_.push_back(inputLayers_[i]->getOutput().getFrameHeight()); - outputW_.push_back(inputLayers_[i]->getOutput().getFrameWidth()); - if (outputH_[i] == 0) outputH_[i] = outputX_[i]; - if (outputW_[i] == 0) outputW_[i] = outputX_[i]; - imgSizeH_.push_back( - imageSize(outputH_[i], filterSize_[i], padding_[i], stride_[i])); - imgSizeW_.push_back( - imageSize(outputW_[i], filterSize_[i], padding_[i], stride_[i])); - subN_.push_back(outputH_[i] * outputW_[i]); - CHECK(layerSize == 0 || - imgSizeH_[i] * imgSizeW_[i] * (size_t)numFilters_ == layerSize); - layerSize = imgSizeH_[i] * imgSizeW_[i] * numFilters_; - } - getOutput().setFrameHeight(imgSizeH_[0]); - getOutput().setFrameWidth(imgSizeW_[0]); - return layerSize; -} - void ExpandConvTransLayer::forward(PassType passType) { Layer::forward(passType); @@ -69,7 +41,7 @@ void ExpandConvTransLayer::forward(PassType passType) { /* note: one sample correspond to one colum, and the * transOutValue correspond sample to one row */ int batchSize = inputLayers_[0]->getOutputValue()->getHeight(); - resetOutput(batchSize, getSize()); + resetOutput(batchSize, getOutputSize()); MatrixPtr output = nullptr; for (size_t i = 0; i < inputLayers_.size(); ++i) { diff --git a/paddle/gserver/layers/ExpandConvTransLayer.h b/paddle/gserver/layers/ExpandConvTransLayer.h index ebcb34f073494553d44d54efbbe292dda40c46ac..d0c0469c351aa100dbe644470b158d74b16dc2f8 100644 --- a/paddle/gserver/layers/ExpandConvTransLayer.h +++ b/paddle/gserver/layers/ExpandConvTransLayer.h @@ -37,8 +37,6 @@ public: bool init(const LayerMap& layerMap, const ParameterMap& parameterMap); - size_t getSize(); - void forward(PassType passType); void backward(const UpdateCallback& callback); }; diff --git a/paddle/gserver/tests/test_ConvTrans.cpp b/paddle/gserver/tests/test_ConvTrans.cpp index 787113d242391d7ec652b6e85d9f744afa051da5..756faf26516fe6513b0d147300ccd981760120d5 100644 --- a/paddle/gserver/tests/test_ConvTrans.cpp +++ b/paddle/gserver/tests/test_ConvTrans.cpp @@ -43,11 +43,11 @@ TEST(Layer, convTransLayerFwd) { configt.layerConfig.set_partial_sum(1); configt.layerConfig.set_shared_biases(true); - configt.inputDefs.push_back({INPUT_DATA, "layer_0", 1024, 288}); + configt.inputDefs.push_back({INPUT_DATA, "layer_0", 1024, 384}); LayerInputConfig* input = configt.layerConfig.add_inputs(); ConvConfig* conv = input->mutable_conv_conf(); conv->set_filter_size(2); - conv->set_filter_size_y(3); + conv->set_filter_size_y(4); conv->set_channels(16); conv->set_padding(0); conv->set_padding_y(1); @@ -86,11 +86,11 @@ TEST(Layer, convTransLayerFwd) { config.layerConfig.set_partial_sum(1); config.layerConfig.set_shared_biases(true); - config.inputDefs.push_back({INPUT_DATA, "layer_1", 768, 288}); + config.inputDefs.push_back({INPUT_DATA, "layer_1", 768, 384}); input = config.layerConfig.add_inputs(); conv = input->mutable_conv_conf(); conv->set_filter_size(2); - conv->set_filter_size_y(3); + conv->set_filter_size_y(4); conv->set_channels(3); conv->set_padding(0); conv->set_padding_y(1); diff --git a/python/paddle/trainer/config_parser.py b/python/paddle/trainer/config_parser.py index b17ec6c6f6bf7dfee0ed0eb58ab212e2ffb05e67..b3d17a47a965f31d9ee39207054318c641bd04ab 100644 --- a/python/paddle/trainer/config_parser.py +++ b/python/paddle/trainer/config_parser.py @@ -1670,11 +1670,13 @@ class ConvTransLayerBase(LayerBase): if self.layer_type == "cudnn_convt": config_assert(use_gpu, "cudnn_convt only support GPU") - if (use_gpu == 1 and self.layer_type != "exconvt" and - (parallel_nn == 0 or self.config.device > -1)): - self.layer_type = "cudnn_convt" - else: - self.layer_type = "exconvt" +# if (use_gpu == 1 and self.layer_type != "exconvt" and +# (parallel_nn == 0 or self.config.device > -1)): +# self.layer_type = "cudnn_convt" +# else: +# self.layer_type = "exconvt" + # cudnn_convt has not been implemented so use exconvt only + self.layer_type = "exconvt" # need to specify layer in config self.config.type = self.layer_type