提交 707a9c9b 编写于 作者: G gaoyuan

Fix variable name and add the annotation

上级 c0076084
...@@ -17,6 +17,15 @@ limitations under the License. */ ...@@ -17,6 +17,15 @@ limitations under the License. */
#include "paddle/math/BaseMatrix.h" #include "paddle/math/BaseMatrix.h"
namespace paddle { namespace paddle {
/**
* @brief A layer for generate prior box locations and variances.
* - Input: Two and only two input layer are accepted. The input layer must be
* be a data output layer and a convolution output layer.
* - Output: The prior box locations and variances of the input data.
* Reference:
* Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed,
* Cheng-Yang Fu, Alexander C. Berg. SSD: Single Shot MultiBox Detector
*/
class PriorBoxLayer : public Layer { class PriorBoxLayer : public Layer {
public: public:
...@@ -24,106 +33,84 @@ public: ...@@ -24,106 +33,84 @@ public:
bool init(const LayerMap& layerMap, const ParameterMap& parameterMap); bool init(const LayerMap& layerMap, const ParameterMap& parameterMap);
void forward(PassType passType); void forward(PassType passType);
void backward(const UpdateCallback& callback) {} void backward(const UpdateCallback& callback) {}
void forwardImp(const Argument& featureMap, const Argument& imageShape);
int numPriors_; int numPriors_;
std::vector<int> minSize_; std::vector<int> minSize_;
std::vector<int> maxSize_; std::vector<int> maxSize_;
std::vector<float> aspectRatio_; std::vector<float> aspectRatio_;
std::vector<float> variance_; std::vector<float> variance_;
std::vector<Argument> tmpCpuInput_;
MatrixPtr buffer_; MatrixPtr buffer_;
}; };
bool PriorBoxLayer::init(const LayerMap& layerMap, bool PriorBoxLayer::init(const LayerMap& layerMap,
const ParameterMap& parameterMap) { const ParameterMap& parameterMap) {
Layer::init(layerMap, parameterMap); Layer::init(layerMap, parameterMap);
auto pb_conf = config_.inputs(0).priorbox_conf(); auto pbConf = config_.inputs(0).priorbox_conf();
std::copy(pb_conf.min_size().begin(), std::copy(pbConf.min_size().begin(),
pb_conf.min_size().end(), pbConf.min_size().end(),
std::back_inserter(minSize_)); std::back_inserter(minSize_));
std::copy(pb_conf.max_size().begin(), std::copy(pbConf.max_size().begin(),
pb_conf.max_size().end(), pbConf.max_size().end(),
std::back_inserter(maxSize_)); std::back_inserter(maxSize_));
std::copy(pb_conf.aspect_ratio().begin(), std::copy(pbConf.aspect_ratio().begin(),
pb_conf.aspect_ratio().end(), pbConf.aspect_ratio().end(),
std::back_inserter(aspectRatio_)); std::back_inserter(aspectRatio_));
std::copy(pb_conf.variance().begin(), std::copy(pbConf.variance().begin(),
pb_conf.variance().end(), pbConf.variance().end(),
std::back_inserter(variance_)); std::back_inserter(variance_));
// flip // flip
int input_ratio_length = aspectRatio_.size(); int inputRatioLength = aspectRatio_.size();
for (int index = 0; index < input_ratio_length; index++) for (int index = 0; index < inputRatioLength; index++)
aspectRatio_.push_back(1 / aspectRatio_[index]); aspectRatio_.push_back(1 / aspectRatio_[index]);
aspectRatio_.push_back(1.); aspectRatio_.push_back(1.);
numPriors_ = aspectRatio_.size(); numPriors_ = aspectRatio_.size();
if (maxSize_.size() > 0) numPriors_++; if (maxSize_.size() > 0) numPriors_++;
buffer_ = Matrix::create(1, 1, false, false);
if (useGpu_) {
tmpCpuInput_.reserve(inputLayers_.size());
for (size_t i = 0; i < inputLayers_.size(); i++) {
tmpCpuInput_.push_back(Argument());
}
}
return true; return true;
} }
void PriorBoxLayer::forward(PassType passType) { void PriorBoxLayer::forward(PassType passType) {
Layer::forward(passType); Layer::forward(passType);
if (useGpu_) { auto input = getInput(0);
for (size_t i = 0; i < inputLayers_.size(); i++) { int layerWidth = input.getFrameWidth();
tmpCpuInput_[i].resizeAndCopyFrom( int layerHeight = input.getFrameHeight();
getInput(i), false, HPPL_STREAM_DEFAULT);
hl_stream_synchronize(HPPL_STREAM_DEFAULT);
forwardImp(tmpCpuInput_[0], tmpCpuInput_[1]);
}
} else {
forwardImp(getInput(0), getInput(1));
}
}
void PriorBoxLayer::forwardImp(const Argument& featureMap,
const Argument& imageShape) {
int layer_width = featureMap.getFrameWidth();
int layer_height = featureMap.getFrameHeight();
MatrixPtr inV1 = imageShape.value; auto image = getInput(1);
int image_width = inV1->getElement(0, 0); int imageWidth = image.getFrameWidth();
int image_height = inV1->getElement(0, 1); int imageHeight = image.getFrameHeight();
float step_w = static_cast<float>(image_width) / layer_width; float stepW = static_cast<float>(imageWidth) / layerWidth;
float step_h = static_cast<float>(image_height) / layer_height; float stepH = static_cast<float>(imageHeight) / layerHeight;
int dim = layer_height * layer_width * numPriors_ * 4; int dim = layerHeight * layerWidth * numPriors_ * 4;
reserveOutput(1, dim * 2); reserveOutput(1, dim * 2);
// use a cpu buffer to compute // use a cpu buffer to compute
Matrix::resizeOrCreate(buffer_, 1, dim * 2, false, false); Matrix::resizeOrCreate(buffer_, 1, dim * 2, false, false);
auto* tmp_ptr = buffer_->getData(); auto* tmpPtr = buffer_->getData();
int idx = 0; int idx = 0;
for (int h = 0; h < layer_height; ++h) { for (int h = 0; h < layerHeight; ++h) {
for (int w = 0; w < layer_width; ++w) { for (int w = 0; w < layerWidth; ++w) {
float center_x = (w + 0.5) * step_w; float centerX = (w + 0.5) * stepW;
float center_y = (h + 0.5) * step_h; float centerY = (h + 0.5) * stepH;
int min_size = 0; int minSize = 0;
for (size_t s = 0; s < minSize_.size(); s++) { for (size_t s = 0; s < minSize_.size(); s++) {
// first prior. // first prior.
min_size = minSize_[s]; minSize = minSize_[s];
int box_width = min_size; int boxWidth = minSize;
int box_height = min_size; int boxHeight = minSize;
// xmin, ymin, xmax, ymax. // xmin, ymin, xmax, ymax.
tmp_ptr[idx++] = (center_x - box_width / 2.) / image_width; tmpPtr[idx++] = (centerX - boxWidth / 2.) / imageWidth;
tmp_ptr[idx++] = (center_y - box_height / 2.) / image_height; tmpPtr[idx++] = (centerY - boxHeight / 2.) / imageHeight;
tmp_ptr[idx++] = (center_x + box_width / 2.) / image_width; tmpPtr[idx++] = (centerX + boxWidth / 2.) / imageWidth;
tmp_ptr[idx++] = (center_y + box_height / 2.) / image_height; tmpPtr[idx++] = (centerY + boxHeight / 2.) / imageHeight;
if (maxSize_.size() > 0) { if (maxSize_.size() > 0) {
CHECK_EQ(minSize_.size(), maxSize_.size()); CHECK_EQ(minSize_.size(), maxSize_.size());
// second prior. // second prior.
for (size_t s = 0; s < maxSize_.size(); s++) { for (size_t s = 0; s < maxSize_.size(); s++) {
int max_size = maxSize_[s]; int maxSize = maxSize_[s];
box_width = box_height = sqrt(min_size * max_size); boxWidth = boxHeight = sqrt(minSize * maxSize);
tmp_ptr[idx++] = (center_x - box_width / 2.) / image_width; tmpPtr[idx++] = (centerX - boxWidth / 2.) / imageWidth;
tmp_ptr[idx++] = (center_y - box_height / 2.) / image_height; tmpPtr[idx++] = (centerY - boxHeight / 2.) / imageHeight;
tmp_ptr[idx++] = (center_x + box_width / 2.) / image_width; tmpPtr[idx++] = (centerX + boxWidth / 2.) / imageWidth;
tmp_ptr[idx++] = (center_y + box_height / 2.) / image_height; tmpPtr[idx++] = (centerY + boxHeight / 2.) / imageHeight;
} }
} }
} }
...@@ -131,27 +118,26 @@ void PriorBoxLayer::forwardImp(const Argument& featureMap, ...@@ -131,27 +118,26 @@ void PriorBoxLayer::forwardImp(const Argument& featureMap,
for (size_t r = 0; r < aspectRatio_.size(); r++) { for (size_t r = 0; r < aspectRatio_.size(); r++) {
float ar = aspectRatio_[r]; float ar = aspectRatio_[r];
if (fabs(ar - 1.) < 1e-6) continue; if (fabs(ar - 1.) < 1e-6) continue;
float box_width = min_size * sqrt(ar); float boxWidth = minSize * sqrt(ar);
float box_height = min_size / sqrt(ar); float boxHeight = minSize / sqrt(ar);
tmp_ptr[idx++] = (center_x - box_width / 2.) / image_width; tmpPtr[idx++] = (centerX - boxWidth / 2.) / imageWidth;
tmp_ptr[idx++] = (center_y - box_height / 2.) / image_height; tmpPtr[idx++] = (centerY - boxHeight / 2.) / imageHeight;
tmp_ptr[idx++] = (center_x + box_width / 2.) / image_width; tmpPtr[idx++] = (centerX + boxWidth / 2.) / imageWidth;
tmp_ptr[idx++] = (center_y + box_height / 2.) / image_height; tmpPtr[idx++] = (centerY + boxHeight / 2.) / imageHeight;
} }
} }
} }
// clip the prior's coordidate such that it is within [0, 1] // clip the prior's coordidate such that it is within [0, 1]
for (int d = 0; d < dim; ++d) for (int d = 0; d < dim; ++d)
tmp_ptr[d] = std::min(std::max(tmp_ptr[d], (float)0.), (float)1.); tmpPtr[d] = std::min(std::max(tmpPtr[d], (float)0.), (float)1.);
// set the variance. // set the variance.
for (int h = 0; h < layer_height; h++) for (int h = 0; h < layerHeight; h++)
for (int w = 0; w < layer_width; w++) for (int w = 0; w < layerWidth; w++)
for (int i = 0; i < numPriors_; i++) for (int i = 0; i < numPriors_; i++)
for (int j = 0; j < 4; j++) tmp_ptr[idx++] = variance_[j]; for (int j = 0; j < 4; j++) tmpPtr[idx++] = variance_[j];
MatrixPtr outV = getOutputValue(); MatrixPtr outV = getOutputValue();
outV->copyFrom(buffer_->data_, dim * 2); outV->copyFrom(buffer_->data_, dim * 2);
} }
REGISTER_LAYER(priorbox, PriorBoxLayer); REGISTER_LAYER(priorbox, PriorBoxLayer);
} // namespace paddle } // namespace paddle
...@@ -1589,8 +1589,6 @@ class PriorBoxLayer(LayerBase): ...@@ -1589,8 +1589,6 @@ class PriorBoxLayer(LayerBase):
self.config.inputs[0].priorbox_conf.aspect_ratio.extend(aspect_ratio) self.config.inputs[0].priorbox_conf.aspect_ratio.extend(aspect_ratio)
self.config.inputs[0].priorbox_conf.variance.extend(variance) self.config.inputs[0].priorbox_conf.variance.extend(variance)
self.config.size = size self.config.size = size
input_layer0 = self.get_input_layer(0)
input_layer1 = self.get_input_layer(1)
@config_layer('data') @config_layer('data')
......
...@@ -938,7 +938,7 @@ def print_layer(input, name=None): ...@@ -938,7 +938,7 @@ def print_layer(input, name=None):
@wrap_name_default("priorbox") @wrap_name_default("priorbox")
def priorbox_layer(input, def priorbox_layer(input,
img_shape, image,
aspect_ratio, aspect_ratio,
variance, variance,
min_size, min_size,
...@@ -951,8 +951,8 @@ def priorbox_layer(input, ...@@ -951,8 +951,8 @@ def priorbox_layer(input,
:type name: basestring :type name: basestring
:param input: The input layer. :param input: The input layer.
:type input: LayerOutput :type input: LayerOutput
:param img_shape: The width and height of the network input image. :param image: The network input image.
:type img_shape: LayerOutput :type image: LayerOutput
:param aspect_ratio: The aspect ratio. :param aspect_ratio: The aspect ratio.
:type aspect_ratio: list :type aspect_ratio: list
:param variance: The bounding box variance. :param variance: The bounding box variance.
...@@ -968,7 +968,7 @@ def priorbox_layer(input, ...@@ -968,7 +968,7 @@ def priorbox_layer(input,
Layer( Layer(
name=name, name=name,
type=LayerType.PRIORBOX_LAYER, type=LayerType.PRIORBOX_LAYER,
inputs=[input.name, img_shape.name], inputs=[input.name, image.name],
size=size, size=size,
min_size=min_size, min_size=min_size,
max_size=max_size, max_size=max_size,
...@@ -977,7 +977,7 @@ def priorbox_layer(input, ...@@ -977,7 +977,7 @@ def priorbox_layer(input,
return LayerOutput( return LayerOutput(
name, name,
LayerType.PRIORBOX_LAYER, LayerType.PRIORBOX_LAYER,
parents=[input, img_shape], parents=[input, image],
num_filters=num_filters, num_filters=num_filters,
size=size) size=size)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册