提交 b8afb140 编写于 作者: W wangyang59

cudnn deconv implememtation

上级 5a933b44
...@@ -25,12 +25,16 @@ filter_test(GSERVER_HEADER) ...@@ -25,12 +25,16 @@ filter_test(GSERVER_HEADER)
filter_test(GSERVER_SOURCES) filter_test(GSERVER_SOURCES)
if(NOT WITH_GPU) if(NOT WITH_GPU)
list(REMOVE_ITEM GSERVER_HEADER list(REMOVE_ITEM GSERVER_HEADER
layers/CudnnConvBaseLayer.h
layers/CudnnConvLayer.h layers/CudnnConvLayer.h
layers/CudnnConvTransLayer.h
layers/CudnnPoolLayer.h layers/CudnnPoolLayer.h
layers/CudnnBatchNormLayer.h) layers/CudnnBatchNormLayer.h)
list(REMOVE_ITEM GSERVER_SOURCES list(REMOVE_ITEM GSERVER_SOURCES
layers/CudnnConvBaseLayer.cpp
layers/CudnnConvLayer.cpp layers/CudnnConvLayer.cpp
layers/CudnnConvTransLayer.cpp
layers/CudnnPoolLayer.cpp layers/CudnnPoolLayer.cpp
layers/CudnnBatchNormLayer.cpp) layers/CudnnBatchNormLayer.cpp)
compile_cu_as_cpp(layers/LstmCompute.cu) compile_cu_as_cpp(layers/LstmCompute.cu)
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "ConvBaseProjection.h"
#include "paddle/utils/Stat.h"
namespace paddle {
ThreadLocalD<std::vector<MemoryHandle *>> ConvBaseProjection::convMem_;
ConvBaseProjection::ConvBaseProjection(const ProjectionConfig &config,
ParameterPtr parameter,
bool useGpu)
: Projection(config, parameter, useGpu) {
CHECK(useGpu); // only support GPU
getConvParams();
initCudnn();
size_t height = filterH_ * filterW_ * channels_ / groups_;
size_t width = numFilters_;
weight_.reset(new Weight(height, width, parameter));
weightOffset_ = height * width / groups_;
}
void ConvBaseProjection::getConvParams() {
const ConvConfig &conf = config_.conv_conf();
paddingH_ = conf.padding_y();
paddingW_ = conf.padding();
strideH_ = conf.stride_y();
strideW_ = conf.stride();
filterH_ = conf.filter_size_y();
filterW_ = conf.filter_size();
configImgH_ = conf.has_img_size_y() ? conf.img_size_y() : conf.img_size();
configImgW_ = conf.img_size();
configOutH_ = conf.has_output_y() ? conf.output_y() : conf.output_x();
configOutW_ = conf.output_x();
configChannels_ = conf.channels();
configNumFilters_ = config_.num_filters();
isDeconv_ = (config_.type() == "conv") ? false : true;
channels_ = (isDeconv_) ? configNumFilters_ : configChannels_;
numFilters_ = (isDeconv_) ? configChannels_ : configNumFilters_;
groups_ = conf.groups();
CHECK_EQ(channels_ % groups_, 0);
CHECK_EQ(numFilters_ % groups_, 0);
}
void ConvBaseProjection::initCudnn() {
hl_create_filter_descriptor(&filterDesc_,
channels_ / groups_,
numFilters_ / groups_,
filterH_,
filterW_);
hl_create_tensor_descriptor(&imageDesc_);
hl_create_tensor_descriptor(&outputDesc_);
hl_create_convolution_descriptor(&convDesc_,
imageDesc_,
filterDesc_,
paddingH_,
paddingW_,
strideH_,
strideW_);
// initialize all to default algorithms
fwdAlgo_ = 0;
bwdFilterAlgo_ = 0;
bwdDataAlgo_ = 0;
fwdLimitBytes_ = 0;
bwdDataLimitBytes_ = 0;
bwdFilterLimitBytes_ = 0;
workSpaceInBytes_ = 0;
batchNum_ = 0;
isSelectAlgo_ = false;
}
void ConvBaseProjection::reshapeTensorDesc(int batchSize) {
hl_tensor_reshape(imageDesc_,
batchSize,
channels_ / groups_,
imageH_,
imageW_,
channels_ * imageH_ * imageW_,
imageH_ * imageW_,
imageW_,
1);
hl_reset_convolution_descriptor(convDesc_,
imageDesc_,
filterDesc_,
paddingH_,
paddingW_,
strideH_,
strideW_);
// The stride between two consecutive images in ConvProjection may not be 1,
// for example, in the case of layer ConcatenateLayer2 with two
// ConvProjection, the stride is the output_size of layer ConcatenateLayer2.
// So the calculation of nStride is different from CudnnConvLayer.
// In fact, only "nStride = out_->value->getStride()" is ok.
// size_t nStride = numFilters_ * outputH_ * outputW_;
// if (out_->value->isContiguous()) {
// CHECK_EQ(nStride, out_->value->getWidth());
// } else {
// nStride = out_->value->getStride();
// }
size_t nStride = out_->value->getStride();
hl_tensor_reshape(outputDesc_,
batchSize,
numFilters_ / groups_,
outputH_,
outputW_,
nStride,
outputH_ * outputW_,
outputW_,
1);
}
void ConvBaseProjection::reshape(int batchSize) {
size_t width = calOutputSize();
CHECK_EQ(width, out_->value->getWidth());
if (isDeconv_) {
CHECK_EQ(static_cast<size_t>(configChannels_ * outputH_ * outputW_),
in_->value->getWidth())
<< "Wrong input size for convolution transpose"
<< " channels=" << configChannels_ << " outputH=" << outputH_
<< " outputW=" << outputW_ << " inputSize=" << in_->value->getWidth();
} else {
CHECK_EQ(static_cast<size_t>(configChannels_ * imageH_ * imageW_),
in_->value->getWidth())
<< "Wrong input size for convolution"
<< " channels=" << configChannels_ << " imageH=" << imageH_
<< " imageW=" << imageW_ << " inputSize=" << in_->value->getWidth();
}
isSelectAlgo_ = (batchSize == batchNum_);
batchNum_ = batchSize;
if (!isSelectAlgo_) {
reshapeTensorDesc(batchSize);
hl_conv_workspace(imageDesc_,
outputDesc_,
filterDesc_,
convDesc_,
&fwdAlgo_,
&fwdLimitBytes_,
&bwdDataAlgo_,
&bwdDataLimitBytes_,
&bwdFilterAlgo_,
&bwdFilterLimitBytes_);
size_t maxWorkSpace = 0;
maxWorkSpace = std::max(fwdLimitBytes_, bwdDataLimitBytes_);
maxWorkSpace = std::max(maxWorkSpace, bwdFilterLimitBytes_);
workSpaceInBytes_ = maxWorkSpace;
VLOG(3) << getName() << " Fwd / BwdData / BwdFilter algo: " << fwdAlgo_
<< " / " << bwdDataAlgo_ << " / " << bwdFilterAlgo_;
}
isSelectAlgo_ = true;
}
void *ConvBaseProjection::getSpaceBytes(size_t size) {
std::vector<MemoryHandle *> &convMem = *convMem_;
if (convMem.empty()) {
int numDevices = hl_get_device_count();
convMem.resize(numDevices);
}
int devId = hl_get_device();
MemoryHandle **localMem = &(convMem[devId]);
if (NULL == *localMem || size > (*localMem)->getAllocSize()) {
*localMem = new GpuMemoryHandle(size);
}
return (*localMem)->getBuf();
}
ConvBaseProjection::~ConvBaseProjection() {
hl_destroy_tensor_descriptor(imageDesc_);
hl_destroy_tensor_descriptor(outputDesc_);
hl_destroy_filter_descriptor(filterDesc_);
hl_destroy_convolution_descriptor(convDesc_);
}
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "Projection.h"
#include "paddle/math/MathUtils.h"
namespace paddle {
/**
* @brief Base class for ConvProjection and ConvTransProjection.
*/
class ConvBaseProjection : public Projection {
public:
/**
* Constructor.
*/
ConvBaseProjection(const ProjectionConfig& config,
ParameterPtr parameter,
bool useGpu);
~ConvBaseProjection();
protected:
void getConvParams();
void initCudnn();
void reshapeTensorDesc(int batchSize);
void reshape(int batchSize);
size_t calOutputSize() {
if (isDeconv_) {
outputH_ = in_->getFrameHeight();
outputW_ = in_->getFrameWidth();
if (outputH_ == 0) outputH_ = configOutH_;
if (outputW_ == 0) outputW_ = configOutW_;
imageH_ = imageSize(outputH_,
filterH_,
paddingH_,
strideH_,
/* caffeMode */ true);
imageW_ = imageSize(outputW_,
filterW_,
paddingW_,
strideW_,
/* caffeMode */ true);
const_cast<Argument*>(out_)->setFrameHeight(imageH_);
const_cast<Argument*>(out_)->setFrameWidth(imageW_);
inputOffset_ = (configChannels_ / groups_) * outputH_ * outputW_;
outputOffset_ = (configNumFilters_ / groups_) * imageH_ * imageW_;
return imageH_ * imageW_ * configNumFilters_;
} else {
imageH_ = in_->getFrameHeight();
imageW_ = in_->getFrameWidth();
if (imageH_ == 0) imageH_ = configImgH_;
if (imageW_ == 0) imageW_ = configImgW_;
outputH_ = outputSize(imageH_,
filterH_,
paddingH_,
strideH_,
/* caffeMode */ true);
outputW_ = outputSize(imageW_,
filterW_,
paddingW_,
strideW_,
/* caffeMode */ true);
const_cast<Argument*>(out_)->setFrameHeight(outputH_);
const_cast<Argument*>(out_)->setFrameWidth(outputW_);
inputOffset_ = (configChannels_ / groups_) * imageH_ * imageW_;
outputOffset_ = (configNumFilters_ / groups_) * outputH_ * outputW_;
return outputH_ * outputW_ * configNumFilters_;
}
}
static void* getSpaceBytes(size_t size);
/// True if it's deconv projection layer, false if it's ConvProjection layer
bool isDeconv_;
/// imageH_ and imageW_ / outputH_ and outputW_
/// is calculated from the input layer.
int imageH_, imageW_;
int outputH_, outputW_;
/// configImgH_ and configImgW_ / configOutH_ and configOutW_
/// is obtained from config.
int configImgH_, configImgW_;
int configOutH_, configOutW_;
/// channels_ and numFilters_ are defined in terms of convolution semantics
int channels_, numFilters_;
/// configChannels and configNumFilters_ are obtained from config
/// For Conv they are the same as channels_ and numFilters
/// For ConvTrans they are opposite to channels_ and numFilters
int configChannels_, configNumFilters_;
int paddingH_, paddingW_;
int strideH_, strideW_;
int filterH_, filterW_;
/// One group offset of input data.
int inputOffset_;
/// One group offset of output data.
int outputOffset_;
/// One group offset of weight.
int weightOffset_;
int groups_;
/// Cudnn tensor descriptor for input.
hl_tensor_descriptor imageDesc_;
/// Cudnn tensor descriptor for output.
hl_tensor_descriptor outputDesc_;
/// Cudnn tensor descriptor for filter.
hl_filter_descriptor filterDesc_;
/// Cudnn tensor descriptor for a convolution operation.
hl_convolution_descriptor convDesc_;
/// Record the algorithm for forward convolution, which is obtained by cudnn
/// api to search the best suited algorithm.
int fwdAlgo_;
/// Record the algorithm for computing convolution gradient with respect to
/// filter coefficients.
int bwdFilterAlgo_;
/// Record the algorithm for computing convolution gradient with respect to
/// the output.
int bwdDataAlgo_;
/// Amount of GPU memory needed as workspace to be able to execute a
/// forward convolution with the specified algo.
size_t fwdLimitBytes_;
/// Amount of GPU memory needed as workspace to be able to execute a
/// backwardFilter with the specified algo.
size_t bwdDataLimitBytes_;
/// Amount of GPU memory needed as workspace to be able to execute a
/// backwardData with the specified algo.
size_t bwdFilterLimitBytes_;
/// Size of total work space.
size_t workSpaceInBytes_;
/// Whether to call cuDNN api to choose conv algorithm.
bool isSelectAlgo_;
/// batchNum is used to record batch size. If the batch size is changed,
/// the selection algorithm will be called.
int batchNum_;
bool bias_;
std::unique_ptr<Weight> weight_;
static ThreadLocalD<std::vector<MemoryHandle*>> convMem_;
};
} // namespace paddle
...@@ -19,151 +19,6 @@ namespace paddle { ...@@ -19,151 +19,6 @@ namespace paddle {
REGISTER_PROJECTION(conv, ConvProjection); REGISTER_PROJECTION(conv, ConvProjection);
ThreadLocalD<std::vector<MemoryHandle *>> ConvProjection::convMem_;
ConvProjection::ConvProjection(const ProjectionConfig &config,
ParameterPtr parameter,
bool useGpu)
: Projection(config, parameter, useGpu) {
CHECK(useGpu); // only support GPU
getConvParams();
initCudnn();
size_t height = filterH_ * filterW_ * channels_ / groups_;
size_t width = numFilters_;
weight_.reset(new Weight(height, width, parameter));
weightOffset_ = height * width / groups_;
}
void ConvProjection::getConvParams() {
const ConvConfig &conf = config_.conv_conf();
paddingH_ = conf.padding_y();
paddingW_ = conf.padding();
strideH_ = conf.stride_y();
strideW_ = conf.stride();
filterH_ = conf.filter_size_y();
filterW_ = conf.filter_size();
configImgH_ = conf.has_img_size_y() ? conf.img_size_y() : conf.img_size();
configImgW_ = conf.img_size();
channels_ = conf.channels();
numFilters_ = config_.num_filters();
groups_ = conf.groups();
CHECK_EQ(channels_ % groups_, 0);
CHECK_EQ(numFilters_ % groups_, 0);
}
void ConvProjection::initCudnn() {
hl_create_filter_descriptor(&filterDesc_,
channels_ / groups_,
numFilters_ / groups_,
filterH_,
filterW_);
hl_create_tensor_descriptor(&inputDesc_);
hl_create_tensor_descriptor(&outputDesc_);
hl_create_convolution_descriptor(&convDesc_,
inputDesc_,
filterDesc_,
paddingH_,
paddingW_,
strideH_,
strideW_);
// initialize all to default algorithms
fwdAlgo_ = 0;
bwdFilterAlgo_ = 0;
bwdDataAlgo_ = 0;
fwdLimitBytes_ = 0;
bwdDataLimitBytes_ = 0;
bwdFilterLimitBytes_ = 0;
workSpaceInBytes_ = 0;
batchNum_ = 0;
isSelectAlgo_ = false;
}
void ConvProjection::reshapeTensorDesc(int batchSize) {
hl_tensor_reshape(inputDesc_,
batchSize,
channels_ / groups_,
imageH_,
imageW_,
channels_ * imageH_ * imageW_,
imageH_ * imageW_,
imageW_,
1);
hl_reset_convolution_descriptor(convDesc_,
inputDesc_,
filterDesc_,
paddingH_,
paddingW_,
strideH_,
strideW_);
// The stride between two consecutive images in ConvProjection may not be 1,
// for example, in the case of layer ConcatenateLayer2 with two
// ConvProjection, the stride is the output_size of layer ConcatenateLayer2.
// So the calculation of nStride is different from CudnnConvLayer.
// In fact, only "nStride = out_->value->getStride()" is ok.
size_t nStride = numFilters_ * outputH_ * outputW_;
if (out_->value->isContiguous()) {
CHECK_EQ(nStride, out_->value->getWidth());
} else {
nStride = out_->value->getStride();
}
hl_tensor_reshape(outputDesc_,
batchSize,
numFilters_ / groups_,
outputH_,
outputW_,
nStride,
outputH_ * outputW_,
outputW_,
1);
}
void ConvProjection::reshape(int batchSize) {
size_t width = calOutputSize();
CHECK_EQ(width, out_->value->getWidth());
CHECK_EQ(static_cast<size_t>(channels_ * imageH_ * imageW_),
in_->value->getWidth())
<< "Wrong input size for convolution"
<< " channels=" << channels_ << " imageH=" << imageH_
<< " imageW=" << imageW_ << " inputSize=" << in_->value->getWidth();
isSelectAlgo_ = (batchSize == batchNum_);
batchNum_ = batchSize;
if (!isSelectAlgo_) {
reshapeTensorDesc(batchSize);
hl_conv_workspace(inputDesc_,
outputDesc_,
filterDesc_,
convDesc_,
&fwdAlgo_,
&fwdLimitBytes_,
&bwdDataAlgo_,
&bwdDataLimitBytes_,
&bwdFilterAlgo_,
&bwdFilterLimitBytes_);
size_t maxWorkSpace = 0;
maxWorkSpace = std::max(fwdLimitBytes_, bwdDataLimitBytes_);
maxWorkSpace = std::max(maxWorkSpace, bwdFilterLimitBytes_);
workSpaceInBytes_ = maxWorkSpace;
VLOG(3) << getName() << " Fwd / BwdData / BwdFilter algo: " << fwdAlgo_
<< " / " << bwdDataAlgo_ << " / " << bwdFilterAlgo_;
}
isSelectAlgo_ = true;
}
void ConvProjection::forward() { void ConvProjection::forward() {
int batchSize = in_->value->getHeight(); int batchSize = in_->value->getHeight();
reshape(batchSize); reshape(batchSize);
...@@ -179,7 +34,7 @@ void ConvProjection::forward() { ...@@ -179,7 +34,7 @@ void ConvProjection::forward() {
real *inputData = in_->value->getData() + g * inputOffset_; real *inputData = in_->value->getData() + g * inputOffset_;
real *wgtData = weight_->getW()->getData() + g * weightOffset_; real *wgtData = weight_->getW()->getData() + g * weightOffset_;
real *outData = out_->value->getData() + g * outputOffset_; real *outData = out_->value->getData() + g * outputOffset_;
hl_convolution_forward(inputDesc_, hl_convolution_forward(imageDesc_,
inputData, inputData,
outputDesc_, outputDesc_,
outData, outData,
...@@ -205,7 +60,7 @@ void ConvProjection::backward(const UpdateCallback &callback) { ...@@ -205,7 +60,7 @@ void ConvProjection::backward(const UpdateCallback &callback) {
if (weight_->getWGrad()) { if (weight_->getWGrad()) {
real *inputData = in_->value->getData() + g * inputOffset_; real *inputData = in_->value->getData() + g * inputOffset_;
real *weightGrad = weight_->getWGrad()->getData() + g * weightOffset_; real *weightGrad = weight_->getWGrad()->getData() + g * weightOffset_;
hl_convolution_backward_filter(inputDesc_, hl_convolution_backward_filter(imageDesc_,
inputData, inputData,
outputDesc_, outputDesc_,
outGrad, outGrad,
...@@ -221,7 +76,7 @@ void ConvProjection::backward(const UpdateCallback &callback) { ...@@ -221,7 +76,7 @@ void ConvProjection::backward(const UpdateCallback &callback) {
if (NULL != preGrad) { if (NULL != preGrad) {
real *inputGrad = preGrad->getData() + g * inputOffset_; real *inputGrad = preGrad->getData() + g * inputOffset_;
real *wgtData = weight_->getW()->getData() + g * weightOffset_; real *wgtData = weight_->getW()->getData() + g * weightOffset_;
hl_convolution_backward_data(inputDesc_, hl_convolution_backward_data(imageDesc_,
inputGrad, inputGrad,
outputDesc_, outputDesc_,
outGrad, outGrad,
...@@ -237,26 +92,4 @@ void ConvProjection::backward(const UpdateCallback &callback) { ...@@ -237,26 +92,4 @@ void ConvProjection::backward(const UpdateCallback &callback) {
weight_->getParameterPtr()->incUpdate(callback); weight_->getParameterPtr()->incUpdate(callback);
} }
void *ConvProjection::getSpaceBytes(size_t size) {
std::vector<MemoryHandle *> &convMem = *convMem_;
if (convMem.empty()) {
int numDevices = hl_get_device_count();
convMem.resize(numDevices);
}
int devId = hl_get_device();
MemoryHandle **localMem = &(convMem[devId]);
if (NULL == *localMem || size > (*localMem)->getAllocSize()) {
*localMem = new GpuMemoryHandle(size);
}
return (*localMem)->getBuf();
}
ConvProjection::~ConvProjection() {
hl_destroy_tensor_descriptor(inputDesc_);
hl_destroy_tensor_descriptor(outputDesc_);
hl_destroy_filter_descriptor(filterDesc_);
hl_destroy_convolution_descriptor(convDesc_);
}
} // namespace paddle } // namespace paddle
...@@ -14,7 +14,7 @@ limitations under the License. */ ...@@ -14,7 +14,7 @@ limitations under the License. */
#pragma once #pragma once
#include "Projection.h" #include "ConvBaseProjection.h"
#include "paddle/math/MathUtils.h" #include "paddle/math/MathUtils.h"
namespace paddle { namespace paddle {
...@@ -22,109 +22,20 @@ namespace paddle { ...@@ -22,109 +22,20 @@ namespace paddle {
/** /**
* @brief Convolution projection do the same calculation with CudnnConvLayer. * @brief Convolution projection do the same calculation with CudnnConvLayer.
*/ */
class ConvProjection : public Projection { class ConvProjection : public ConvBaseProjection {
public: public:
/** /**
* Constructor. * Constructor.
*/ */
ConvProjection(const ProjectionConfig& config, ConvProjection(const ProjectionConfig& config,
ParameterPtr parameter, ParameterPtr parameter,
bool useGpu); bool useGpu)
: ConvBaseProjection(config, parameter, useGpu) {}
~ConvProjection(); ~ConvProjection() {}
virtual void forward(); virtual void forward();
virtual void backward(const UpdateCallback& callback); virtual void backward(const UpdateCallback& callback);
protected:
void getConvParams();
void initCudnn();
void reshapeTensorDesc(int batchSize);
void reshape(int batchSize);
size_t calOutputSize() {
imageH_ = in_->getFrameHeight();
imageW_ = in_->getFrameWidth();
if (imageH_ == 0) imageH_ = configImgH_;
if (imageW_ == 0) imageW_ = configImgW_;
outputH_ = outputSize(imageH_,
filterH_,
paddingH_,
strideH_,
/* caffeMode */ true);
outputW_ = outputSize(imageW_,
filterW_,
paddingW_,
strideW_,
/* caffeMode */ true);
const_cast<Argument*>(out_)->setFrameHeight(outputH_);
const_cast<Argument*>(out_)->setFrameWidth(outputW_);
inputOffset_ = (channels_ / groups_) * imageH_ * imageW_;
outputOffset_ = (numFilters_ / groups_) * outputH_ * outputW_;
return outputH_ * outputW_ * numFilters_;
}
static void* getSpaceBytes(size_t size);
/// imageH_ and imageW_ is calculated from the input layer.
int imageH_, imageW_;
/// configImgH_ and configImgW_ is obtained from config.
int configImgH_, configImgW_;
int outputH_, outputW_;
int channels_, numFilters_;
int paddingH_, paddingW_;
int strideH_, strideW_;
int filterH_, filterW_;
/// One group offset of input data.
int inputOffset_;
/// One group offset of output data.
int outputOffset_;
/// One group offset of weight.
int weightOffset_;
int groups_;
/// Cudnn tensor descriptor for input.
hl_tensor_descriptor inputDesc_;
/// Cudnn tensor descriptor for output.
hl_tensor_descriptor outputDesc_;
/// Cudnn tensor descriptor for filter.
hl_filter_descriptor filterDesc_;
/// Cudnn tensor descriptor for a convolution operation.
hl_convolution_descriptor convDesc_;
/// Record the algorithm for forward convolution, which is obtained by cudnn
/// api to search the best suited algorithm.
int fwdAlgo_;
/// Record the algorithm for computing convolution gradient with respect to
/// filter coefficients.
int bwdFilterAlgo_;
/// Record the algorithm for computing convolution gradient with respect to
/// the output.
int bwdDataAlgo_;
/// Amount of GPU memory needed as workspace to be able to execute a
/// forward convolution with the specified algo.
size_t fwdLimitBytes_;
/// Amount of GPU memory needed as workspace to be able to execute a
/// backwardFilter with the specified algo.
size_t bwdDataLimitBytes_;
/// Amount of GPU memory needed as workspace to be able to execute a
/// backwardData with the specified algo.
size_t bwdFilterLimitBytes_;
/// Size of total work space.
size_t workSpaceInBytes_;
/// Whether to call cuDNN api to choose conv algorithm.
bool isSelectAlgo_;
/// batchNum is used to record batch size. If the batch size is changed,
/// the selection algorithm will be called.
int batchNum_;
bool bias_;
std::unique_ptr<Weight> weight_;
static ThreadLocalD<std::vector<MemoryHandle*>> convMem_;
}; };
} // namespace paddle } // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "ConvTransProjection.h"
#include "paddle/utils/Stat.h"
namespace paddle {
REGISTER_PROJECTION(convt, ConvTransProjection);
void ConvTransProjection::forward() {
int batchSize = in_->value->getHeight();
reshape(batchSize);
void *workSpace = NULL;
if (workSpaceInBytes_ > 0) {
workSpace = getSpaceBytes(workSpaceInBytes_);
}
for (int g = 0; g < groups_; ++g) {
REGISTER_TIMER_INFO("CudnnConvTransFwTimer", getName().c_str());
real *inData = in_->value->getData() + g * inputOffset_;
real *wgtData = weight_->getW()->getData() + g * weightOffset_;
real *outData = out_->value->getData() + g * outputOffset_;
hl_convolution_backward_data(imageDesc_,
outData,
outputDesc_,
inData,
filterDesc_,
wgtData,
convDesc_,
workSpace,
bwdDataLimitBytes_,
bwdDataAlgo_);
}
}
void ConvTransProjection::backward(const UpdateCallback &callback) {
REGISTER_TIMER_INFO("CudnnConvTransBpTimer", getName().c_str());
void *workSpace = NULL;
if (workSpaceInBytes_ > 0) {
workSpace = getSpaceBytes(workSpaceInBytes_);
}
for (int g = 0; g < groups_; ++g) {
real *outGrad = out_->grad->getData() + g * outputOffset_;
if (weight_->getWGrad()) {
real *inData = in_->value->getData() + g * inputOffset_;
real *weightGrad = weight_->getWGrad()->getData() + g * weightOffset_;
hl_convolution_backward_filter(imageDesc_,
outGrad,
outputDesc_,
inData,
filterDesc_,
weightGrad,
convDesc_,
workSpace,
bwdFilterLimitBytes_,
bwdFilterAlgo_);
}
MatrixPtr preGrad = in_->grad;
if (NULL != preGrad) {
real *inGrad = preGrad->getData() + g * inputOffset_;
real *wgtData = weight_->getW()->getData() + g * weightOffset_;
hl_convolution_forward(imageDesc_,
outGrad,
outputDesc_,
inGrad,
filterDesc_,
wgtData,
convDesc_,
workSpace,
fwdLimitBytes_,
fwdAlgo_);
}
}
weight_->getParameterPtr()->incUpdate(callback);
}
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "ConvBaseProjection.h"
#include "paddle/math/MathUtils.h"
namespace paddle {
/**
* @brief Convolution projection do the same calculation with CudnnConvLayer.
*/
class ConvTransProjection : public ConvBaseProjection {
public:
/**
* Constructor.
*/
ConvTransProjection(const ProjectionConfig& config,
ParameterPtr parameter,
bool useGpu)
: ConvBaseProjection(config, parameter, useGpu) {}
~ConvTransProjection() {}
virtual void forward();
virtual void backward(const UpdateCallback& callback);
};
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "CudnnConvBaseLayer.h"
#include "paddle/utils/Logging.h"
#include "paddle/utils/Stat.h"
namespace paddle {
bool CudnnConvBaseLayer::init(const LayerMap &layerMap,
const ParameterMap &parameterMap) {
if (!ConvBaseLayer::init(layerMap, parameterMap)) return false;
CHECK(useGpu_) << "CudnnConvLayer only support gpu";
CHECK_EQ(inputLayers_.size(), parameters_.size());
projections_.reserve(inputLayers_.size());
projConf_.reserve(inputLayers_.size());
numFilters_ = config_.num_filters();
CHECK(config_.shared_biases());
for (size_t i = 0; i < inputLayers_.size(); i++) {
ProjectionConfig *conf = new ProjectionConfig();
if (isDeconv_) {
conf->set_type("convt");
} else {
conf->set_type("conv");
}
conf->set_num_filters(numFilters_);
ConvConfig *convConf = conf->mutable_conv_conf();
*convConf = *(config_.mutable_inputs(i)->mutable_conv_conf());
conf->set_input_size(getPrev(i)->getSize());
conf->set_output_size(getSize());
projConf_.emplace_back(conf);
projections_.emplace_back(
Projection::create(*projConf_[i], parameters_[i], useGpu_));
}
if (biases_.get() && sharedBiases_) {
hl_create_tensor_descriptor(&biasDesc_);
hl_create_tensor_descriptor(&outputDesc_);
hl_tensor_reshape(biasDesc_, 1, numFilters_, 1, 1);
}
return true;
}
void CudnnConvBaseLayer::forward(PassType passType) {
Layer::forward(passType);
int batchSize = getInput(0).getBatchSize();
resetOutput(batchSize, calOutputSize());
for (size_t i = 0; i != inputLayers_.size(); ++i) {
projections_[i]->forward(&getInput(i), &getOutput(), passType);
}
if (biases_) {
REGISTER_TIMER_INFO("CudnnConvBiasTimer", getName().c_str());
int batchSize = inputLayers_[0]->getOutputValue()->getHeight();
int outH, outW;
if (isDeconv_) {
outH = imgSizeH_[0];
outW = imgSizeW_[0];
} else {
outH = outputH_[0];
outW = outputW_[0];
}
hl_tensor_reshape(outputDesc_,
batchSize,
numFilters_,
outH,
outW,
numFilters_ * outH * outW,
outH * outW,
outW,
1);
real *outData = getOutputValue()->getData();
real *biasData = biases_->getW()->getData();
hl_convolution_forward_add_bias(biasDesc_, biasData, outputDesc_, outData);
}
forwardActivation();
}
void CudnnConvBaseLayer::backward(const UpdateCallback &callback) {
backwardActivation();
if (biases_ && biases_->getWGrad()) {
REGISTER_TIMER_INFO("CudnnConvBpBiasTimer", getName().c_str());
real *biasGrad = biases_->getWGrad()->getData();
real *outGrad = getOutputGrad()->getData();
hl_convolution_backward_bias(biasDesc_, biasGrad, outputDesc_, outGrad);
biases_->getParameterPtr()->incUpdate(callback);
}
for (size_t i = 0; i != inputLayers_.size(); ++i) {
projections_[i]->backward(callback);
}
}
CudnnConvBaseLayer::~CudnnConvBaseLayer() {
if (biases_) {
hl_destroy_tensor_descriptor(biasDesc_);
hl_destroy_tensor_descriptor(outputDesc_);
}
}
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <vector>
#include "ConvBaseLayer.h"
#include "Projection.h"
#include "paddle/math/Matrix.h"
namespace paddle {
/**
* @brief A 2-dimension conv layer implemented by cuDNN. It only
* supports GPU mode. We automatic select CudnnConvLayer for GPU
* mode and ExpandConvLayer for CPU mode if you set type of "conv".
* User also can specfiy type of "exconv" or "cudnn_conv" for
* particular type.
*
* The config file api is img_conv_layer.
*/
class CudnnConvBaseLayer : public ConvBaseLayer {
protected:
std::vector<std::unique_ptr<ProjectionConfig>> projConf_;
std::vector<std::unique_ptr<Projection>> projections_;
hl_tensor_descriptor biasDesc_;
hl_tensor_descriptor outputDesc_;
public:
explicit CudnnConvBaseLayer(const LayerConfig& config)
: ConvBaseLayer(config) {}
~CudnnConvBaseLayer();
void forward(PassType passType) override;
void backward(const UpdateCallback& callback) override;
bool init(const LayerMap& layerMap, const ParameterMap& parameterMap)
override;
};
} // namespace paddle
...@@ -20,97 +20,4 @@ namespace paddle { ...@@ -20,97 +20,4 @@ namespace paddle {
REGISTER_LAYER(cudnn_conv, CudnnConvLayer); REGISTER_LAYER(cudnn_conv, CudnnConvLayer);
bool CudnnConvLayer::init(const LayerMap &layerMap,
const ParameterMap &parameterMap) {
if (!ConvBaseLayer::init(layerMap, parameterMap)) return false;
CHECK(useGpu_) << "CudnnConvLayer only support gpu";
CHECK_EQ(inputLayers_.size(), parameters_.size());
projections_.reserve(inputLayers_.size());
projConf_.reserve(inputLayers_.size());
numFilters_ = config_.num_filters();
CHECK(config_.shared_biases());
for (size_t i = 0; i < inputLayers_.size(); i++) {
ProjectionConfig *conf = new ProjectionConfig();
conf->set_type("conv");
conf->set_num_filters(numFilters_);
ConvConfig *convConf = conf->mutable_conv_conf();
*convConf = *(config_.mutable_inputs(i)->mutable_conv_conf());
conf->set_input_size(getPrev(i)->getSize());
conf->set_output_size(getSize());
projConf_.emplace_back(conf);
projections_.emplace_back(
Projection::create(*projConf_[i], parameters_[i], useGpu_));
}
if (biases_.get() && sharedBiases_) {
hl_create_tensor_descriptor(&biasDesc_);
hl_create_tensor_descriptor(&outputDesc_);
hl_tensor_reshape(biasDesc_, 1, numFilters_ / groups_[0], 1, 1);
biasOffset_ = numFilters_ / groups_[0];
}
return true;
}
void CudnnConvLayer::forward(PassType passType) {
Layer::forward(passType);
int batchSize = getInput(0).getBatchSize();
resetOutput(batchSize, calOutputSize());
for (size_t i = 0; i != inputLayers_.size(); ++i) {
projections_[i]->forward(&getInput(i), &getOutput(), passType);
}
if (biases_) {
REGISTER_TIMER_INFO("CudnnConvBiasTimer", getName().c_str());
int batchSize = inputLayers_[0]->getOutputValue()->getHeight();
hl_tensor_reshape(outputDesc_,
batchSize,
numFilters_ / groups_[0],
outputH_[0],
outputW_[0],
numFilters_ * outputH_[0] * outputW_[0],
outputH_[0] * outputW_[0],
outputW_[0],
1);
outputOffset_ = getOutputValue()->getWidth() / groups_[0];
for (int g = 0; g < groups_[0]; ++g) {
real *biasData = biases_->getW()->getData() + biasOffset_ * g;
real *outData = getOutputValue()->getData() + outputOffset_ * g;
hl_convolution_forward_add_bias(
biasDesc_, biasData, outputDesc_, outData);
}
}
forwardActivation();
}
void CudnnConvLayer::backward(const UpdateCallback &callback) {
backwardActivation();
if (biases_ && biases_->getWGrad()) {
REGISTER_TIMER_INFO("CudnnConvBpBiasTimer", getName().c_str());
for (int g = 0; g < groups_[0]; ++g) {
real *biasGrad = biases_->getWGrad()->getData() + biasOffset_ * g;
real *outGrad = getOutputGrad()->getData() + outputOffset_ * g;
hl_convolution_backward_bias(biasDesc_, biasGrad, outputDesc_, outGrad);
}
biases_->getParameterPtr()->incUpdate(callback);
}
for (size_t i = 0; i != inputLayers_.size(); ++i) {
projections_[i]->backward(callback);
}
}
CudnnConvLayer::~CudnnConvLayer() {
if (biases_) {
hl_destroy_tensor_descriptor(biasDesc_);
hl_destroy_tensor_descriptor(outputDesc_);
}
}
} // namespace paddle } // namespace paddle
...@@ -15,7 +15,7 @@ limitations under the License. */ ...@@ -15,7 +15,7 @@ limitations under the License. */
#pragma once #pragma once
#include <vector> #include <vector>
#include "ConvBaseLayer.h" #include "CudnnConvBaseLayer.h"
#include "Projection.h" #include "Projection.h"
#include "paddle/math/Matrix.h" #include "paddle/math/Matrix.h"
...@@ -30,27 +30,12 @@ namespace paddle { ...@@ -30,27 +30,12 @@ namespace paddle {
* *
* The config file api is img_conv_layer. * The config file api is img_conv_layer.
*/ */
class CudnnConvLayer : public ConvBaseLayer { class CudnnConvLayer : public CudnnConvBaseLayer {
protected:
std::vector<std::unique_ptr<ProjectionConfig>> projConf_;
std::vector<std::unique_ptr<Projection>> projections_;
hl_tensor_descriptor biasDesc_;
hl_tensor_descriptor outputDesc_;
int biasOffset_;
int outputOffset_;
public: public:
explicit CudnnConvLayer(const LayerConfig& config) : ConvBaseLayer(config) {} explicit CudnnConvLayer(const LayerConfig& config)
: CudnnConvBaseLayer(config) {}
~CudnnConvLayer();
bool init(const LayerMap& layerMap, ~CudnnConvLayer() {}
const ParameterMap& parameterMap) override;
void forward(PassType passType) override;
void backward(const UpdateCallback& callback) override;
void addBiases();
void bpropBiases();
}; };
} // namespace paddle } // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "CudnnConvTransLayer.h"
#include "paddle/utils/Logging.h"
#include "paddle/utils/Stat.h"
namespace paddle {
REGISTER_LAYER(cudnn_convt, CudnnConvTransLayer);
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <vector>
#include "CudnnConvBaseLayer.h"
#include "Projection.h"
#include "paddle/math/Matrix.h"
namespace paddle {
/**
* @brief A 2-dimension conv layer implemented by cuDNN. It only
* supports GPU mode. We automatic select CudnnConvLayer for GPU
* mode and ExpandConvLayer for CPU mode if you set type of "conv".
* User also can specfiy type of "exconv" or "cudnn_conv" for
* particular type.
*
* The config file api is img_conv_layer.
*/
class CudnnConvTransLayer : public CudnnConvBaseLayer {
public:
explicit CudnnConvTransLayer(const LayerConfig& config)
: CudnnConvBaseLayer(config) {}
~CudnnConvTransLayer() {}
};
} // namespace paddle
...@@ -34,8 +34,7 @@ DECLARE_double(checkgrad_eps); ...@@ -34,8 +34,7 @@ DECLARE_double(checkgrad_eps);
DECLARE_bool(thread_local_rand_use_global_seed); DECLARE_bool(thread_local_rand_use_global_seed);
DECLARE_bool(prev_batch_state); DECLARE_bool(prev_batch_state);
// Do one forward pass of convTrans layer and check to see if its output // Do one forward pass of ConvLayer using either exconv or cudnn_conv
// matches the given result
MatrixPtr doOneConvTest(size_t imgSize, MatrixPtr doOneConvTest(size_t imgSize,
size_t output_x, size_t output_x,
size_t stride, size_t stride,
......
...@@ -166,15 +166,19 @@ TEST(Projection, scaling) { ...@@ -166,15 +166,19 @@ TEST(Projection, scaling) {
} }
} }
void testProjectionConv(size_t groups) { void testProjectionConv(size_t groups, bool isDeconv) {
const int NUM_FILTERS = 18; const int NUM_FILTERS = 18;
const int FILTER_SIZE = 2; const int FILTER_SIZE = 2;
const int FILTER_SIZE_Y = 3; const int FILTER_SIZE_Y = 4;
const int CHANNELS = 3; const int CHANNELS = 3;
const int IMAGE_SIZE = 16; const int IMAGE_SIZE = 16;
ProjectionConfig conf; ProjectionConfig conf;
if (isDeconv) {
conf.set_type("convt");
} else {
conf.set_type("conv"); conf.set_type("conv");
}
conf.set_num_filters(NUM_FILTERS); conf.set_num_filters(NUM_FILTERS);
ConvConfig* conv = conf.mutable_conv_conf(); ConvConfig* conv = conf.mutable_conv_conf();
...@@ -186,7 +190,11 @@ void testProjectionConv(size_t groups) { ...@@ -186,7 +190,11 @@ void testProjectionConv(size_t groups) {
conv->set_stride(2); conv->set_stride(2);
conv->set_stride_y(2); conv->set_stride_y(2);
conv->set_groups(groups); conv->set_groups(groups);
if (isDeconv) {
conv->set_filter_channels(NUM_FILTERS / conv->groups());
} else {
conv->set_filter_channels(conv->channels() / conv->groups()); conv->set_filter_channels(conv->channels() / conv->groups());
}
conv->set_img_size(IMAGE_SIZE); conv->set_img_size(IMAGE_SIZE);
int output_x = outputSize(conv->img_size(), int output_x = outputSize(conv->img_size(),
conv->filter_size(), conv->filter_size(),
...@@ -199,8 +207,14 @@ void testProjectionConv(size_t groups) { ...@@ -199,8 +207,14 @@ void testProjectionConv(size_t groups) {
conv->stride_y(), conv->stride_y(),
/* caffeMode */ true); /* caffeMode */ true);
conv->set_output_x(output_x); conv->set_output_x(output_x);
conv->set_output_y(output_y);
if (isDeconv) {
conf.set_input_size(output_x * output_y * CHANNELS);
conf.set_output_size(IMAGE_SIZE * IMAGE_SIZE * NUM_FILTERS);
} else {
conf.set_input_size(IMAGE_SIZE * IMAGE_SIZE * CHANNELS); conf.set_input_size(IMAGE_SIZE * IMAGE_SIZE * CHANNELS);
conf.set_output_size(output_x * output_y * NUM_FILTERS); conf.set_output_size(output_x * output_y * NUM_FILTERS);
}
testProjectionGrad(conf, testProjectionGrad(conf,
INPUT_DATA, INPUT_DATA,
...@@ -215,8 +229,12 @@ void testProjectionConv(size_t groups) { ...@@ -215,8 +229,12 @@ void testProjectionConv(size_t groups) {
#ifndef PADDLE_ONLY_CPU #ifndef PADDLE_ONLY_CPU
TEST(Projection, conv) { TEST(Projection, conv) {
testProjectionConv(1); /// test ConvProjection
testProjectionConv(3); testProjectionConv(1, false);
testProjectionConv(3, false);
/// test ConvTransProjection
testProjectionConv(1, true);
testProjectionConv(3, true);
} }
#endif #endif
...@@ -385,11 +403,11 @@ void testConvTransLayer(const string& type, bool trans, bool useGpu) { ...@@ -385,11 +403,11 @@ void testConvTransLayer(const string& type, bool trans, bool useGpu) {
config.layerConfig.set_partial_sum(1); config.layerConfig.set_partial_sum(1);
config.layerConfig.set_shared_biases(true); config.layerConfig.set_shared_biases(true);
config.inputDefs.push_back({INPUT_DATA, "layer_0", 1024, 288}); config.inputDefs.push_back({INPUT_DATA, "layer_0", 1024, 384});
LayerInputConfig* input = config.layerConfig.add_inputs(); LayerInputConfig* input = config.layerConfig.add_inputs();
ConvConfig* conv = input->mutable_conv_conf(); ConvConfig* conv = input->mutable_conv_conf();
conv->set_filter_size(2); conv->set_filter_size(2);
conv->set_filter_size_y(3); conv->set_filter_size_y(4);
conv->set_channels(16); conv->set_channels(16);
conv->set_padding(0); conv->set_padding(0);
conv->set_padding_y(1); conv->set_padding_y(1);
...@@ -416,6 +434,9 @@ TEST(Layer, convTransLayer) { ...@@ -416,6 +434,9 @@ TEST(Layer, convTransLayer) {
for (auto useGpu : {false, true}) { for (auto useGpu : {false, true}) {
testConvTransLayer("exconvt", /* trans= */ false, /* useGpu= */ useGpu); testConvTransLayer("exconvt", /* trans= */ false, /* useGpu= */ useGpu);
} }
#ifndef PADDLE_ONLY_CPU
testConvTransLayer("cudnn_convt", /* trans= */ false, /* useGpu= */ true);
#endif
} }
TEST(Layer, blockExpandLayer) { TEST(Layer, blockExpandLayer) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册