提交 fc0ad904 编写于 作者: X xuwei06

Repeat layer for column vector

上级 14c0e71d
......@@ -40,6 +40,7 @@ namespace paddle {
class FeatureMapExpandLayer : public Layer {
private:
int numFilters_;
bool asRowVector_;
public:
explicit FeatureMapExpandLayer(const LayerConfig& config) : Layer(config) {}
......@@ -62,6 +63,7 @@ bool FeatureMapExpandLayer::init(const LayerMap& layerMap,
CHECK_EQ(inputLayers_.size(), 1UL);
numFilters_ = config_.num_filters();
asRowVector_ = config_.user_arg() != "as_col_vec";
return true;
}
......@@ -76,6 +78,7 @@ void FeatureMapExpandLayer::forward(PassType passType) {
{
AsyncGpuBlock asyncGpuBlock;
if (asRowVector_) {
for (size_t i = 0; i < batchSize; i++) {
MatrixPtr outVTmp =
Matrix::create(outputV->getData() + i * imgSize * numFilters_,
......@@ -87,6 +90,19 @@ void FeatureMapExpandLayer::forward(PassType passType) {
inputV->getData() + i * imgSize, 1, imgSize, false, useGpu_);
outVTmp->addRowVector(*inVTmp);
}
} else {
for (size_t i = 0; i < batchSize; i++) {
MatrixPtr outVTmp =
Matrix::create(outputV->getData() + i * imgSize * numFilters_,
imgSize,
numFilters_,
false,
useGpu_);
MatrixPtr inVTmp = Matrix::create(
inputV->getData() + i * imgSize, imgSize, 1, false, useGpu_);
outVTmp->addColVector(*inVTmp);
}
}
}
/* activation */ {
REGISTER_TIMER_INFO("FwAtvTimer", getName().c_str());
......@@ -102,8 +118,13 @@ void FeatureMapExpandLayer::backward(const UpdateCallback& callback) {
MatrixPtr outGrad = getOutputGrad();
size_t batchSize = getInput(0).getBatchSize();
int imgSize = inGrad->getWidth();
/* Do activation */ {
REGISTER_TIMER_INFO("BpAvtTimer", getName().c_str());
backwardActivation();
}
{
AsyncGpuBlock asyncGpuBlock;
if (asRowVector_) {
for (size_t i = 0; i < batchSize; i++) {
MatrixPtr outGradTmp =
Matrix::create(outGrad->getData() + i * imgSize * numFilters_,
......@@ -115,10 +136,19 @@ void FeatureMapExpandLayer::backward(const UpdateCallback& callback) {
inGrad->getData() + i * imgSize, 1, imgSize, false, useGpu_);
inGradTmp->collectBias(*outGradTmp, 1);
}
} else {
for (size_t i = 0; i < batchSize; i++) {
MatrixPtr outGradTmp =
Matrix::create(outGrad->getData() + i * imgSize * numFilters_,
imgSize,
numFilters_,
false,
useGpu_);
MatrixPtr inGradTmp = Matrix::create(
inGrad->getData() + i * imgSize, imgSize, 1, false, useGpu_);
inGradTmp->sumRows(*outGradTmp, 1, 1);
}
}
/* Do derivation */ {
REGISTER_TIMER_INFO("BpAvtTimer", getName().c_str());
backwardActivation();
}
}
......
......@@ -1598,6 +1598,8 @@ TEST(Layer, FeatureMapExpandLayer) {
/* paraSize= */ 0});
config.layerConfig.add_inputs();
for (auto useGpu : {false, true}) {
for (auto asRowVec : {false, true}) {
config.layerConfig.set_user_arg(asRowVec ? "as_row_vec" : "as_col_vec");
testLayerGrad(config,
"featmap_expand",
/*batch_size*/ 100,
......@@ -1605,6 +1607,7 @@ TEST(Layer, FeatureMapExpandLayer) {
useGpu,
/* useWeight */ true);
}
}
}
TEST(Layer, MultiplexLayer) {
......
......@@ -2428,7 +2428,13 @@ class ExpandLayer(LayerBase):
@config_layer('featmap_expand')
class FeatMapExpandLayer(LayerBase):
def __init__(self, name, inputs, device=None, num_filters=None, bias=False):
def __init__(self,
name,
inputs,
device=None,
num_filters=None,
as_row_vector=True,
bias=False):
super(FeatMapExpandLayer, self).__init__(
name, 'featmap_expand', 0, inputs=inputs, device=device)
config_assert(
......@@ -2437,6 +2443,8 @@ class FeatMapExpandLayer(LayerBase):
self.config.num_filters = num_filters
else:
logger.fatal("FeatMapExpandLayer must specify num_filters.")
if not as_row_vector:
self.config.user_arg = "as_col_vec"
self.set_layer_size(self.get_input_layer(0).size * num_filters)
......
......@@ -1566,13 +1566,21 @@ def expand_layer(input,
@wrap_name_default()
@layer_support()
def repeat_layer(input, num_repeats, name=None, layer_attr=None):
def repeat_layer(input,
num_repeats,
as_row_vector=True,
name=None,
layer_attr=None):
"""
A layer for repeating the input for num_repeats times. This is equivalent
to apply concat_layer() with num_repeats same input.
A layer for repeating the input for num_repeats times.
If as_row_vector:
.. math::
y = [x, x, \cdots, x]
y = [x_1,\cdots, x_n, \cdots, x_1, \cdots, x_n]
If not as_row_vector:
.. math::
y = [x_1,\cdots, x_1, \cdots, x_n, \cdots, x_n]
The example usage is:
......@@ -1585,6 +1593,12 @@ def repeat_layer(input, num_repeats, name=None, layer_attr=None):
:param num_repeats: Repeat the input so many times
:type num_repeats: int
:param name: Layer name.
:param as_row_vector: True for treating input as row vector and repeating
in the column direction. This is equivalent to apply
concat_layer() with num_repeats same input.
False for treating input as column vector and repeating
in the row direction.
:type as_row_vector: bool
:type name: basestring
:param layer_attr: extra layer attributes.
:type layer_attr: ExtraLayerAttribute.
......@@ -1596,6 +1610,7 @@ def repeat_layer(input, num_repeats, name=None, layer_attr=None):
inputs=[input.name],
name=name,
num_filters=num_repeats,
as_row_vector=as_row_vector,
type=LayerType.FEATURE_MAP_EXPAND_LAYER,
**ExtraAttr.to_kwargs(layer_attr))
return LayerOutput(
......@@ -2846,17 +2861,19 @@ def seq_concat_layer(a, b, act=None, name=None, layer_attr=None,
Concat sequence a with sequence b.
Inputs:
- a = [a1, a2, ..., an]
- a = [a1, a2, ..., am]
- b = [b1, b2, ..., bn]
- Note that the length of a and b should be the same.
Output: [a1, b1, a2, b2, ..., an, bn]
Output: [a1, ..., am, b1, ..., bn]
Note that the above computation is for one sample. Multiple samples are
processed in one batch.
The example usage is:
.. code-block:: python
concat = seq_concat_layer(a=layer1, b=layer2)
concat = seq_concat_layer(al=layer1, b=layer2)
:param name: Layer name.
:type name: basestring
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册