提交 54d4968d 编写于 作者: D dangqingqing

Refine code and comments for CRMNormLayer.

ISSUE=4612671

git-svn-id: https://svn.baidu.com/idl/trunk/paddle@1501 1ad973e4-5ce8-4261-8a94-b56d1f490c56
上级 2c3c0278
......@@ -82,12 +82,6 @@ img_cmrnorm_layer
:members: img_cmrnorm_layer
:noindex:
img_rnorm_layer
-----------------
.. automodule:: paddle.trainer_config_helpers.layers
:members: img_rnorm_layer
:noindex:
batch_norm_layer
---------------------
.. automodule:: paddle.trainer_config_helpers.layers
......
......@@ -7,7 +7,7 @@
## API Reference
* [Trainer Config Helpers](api/trainer_config_helpers/index.md)
* [Model Config Interface](api/trainer_config_helpers/index.md)
## Command Line Argument
......
......@@ -46,9 +46,6 @@ bool CMRProjectionNormLayer::init(const LayerMap& layerMap,
/* the size of inputs for norm-layer is 1 */
CHECK_EQ(config_.inputs_size(), 1);
auto& inputConfig = config_.inputs(0);
blocked_ = inputConfig.norm_conf().blocked();
return true;
}
......@@ -69,7 +66,7 @@ void CMRProjectionNormLayer::forward(PassType passType) {
denoms_->zeroMem();
outV->crossMapNormalFwd(*input, imgSizeH_, imgSizeW_, *denoms_, channels_,
size_, scale_, pow_, blocked_);
size_, scale_, pow_);
}
void CMRProjectionNormLayer::backward(const UpdateCallback& callback) {
......@@ -86,6 +83,6 @@ void CMRProjectionNormLayer::backward(const UpdateCallback& callback) {
preOutGrad->crossMapNormalBwd(*localGrad, *denoms_, *preOutV, *localOutV,
channels_, imgSizeH_, imgSizeW_, size_, scale_,
pow_, blocked_);
pow_);
}
} // namespace paddle
......@@ -29,9 +29,6 @@ class CMRProjectionNormLayer : public ResponseNormLayer {
size_t imgSizeH_, imgSizeW_;
size_t outputH_, outputW_;
protected:
bool blocked_;
public:
explicit CMRProjectionNormLayer(const LayerConfig& config)
: ResponseNormLayer(config) {}
......
......@@ -943,7 +943,7 @@ void GpuMatrix::avgPoolBackward(Matrix& outGrad, size_t imgSizeH,
void GpuMatrix::crossMapNormalFwd(Matrix& input, size_t imgSizeH,
size_t imgSizeW, Matrix& denoms,
size_t channels, size_t sizeX, float scale,
float pow, bool blocked) {
float pow) {
size_t num = input.getHeight();
size_t height = imgSizeH;
size_t width = imgSizeW;
......@@ -960,7 +960,7 @@ void GpuMatrix::crossMapNormalBwd(Matrix& localGrad, Matrix& denoms,
Matrix& preOutV, Matrix& localOutV,
size_t channels, size_t imgSizeH,
size_t imgSizeW, size_t sizeX, float scale,
float pow, bool blocked) {
float pow) {
size_t num = preOutV.getHeight();
size_t height = imgSizeH;
size_t width = imgSizeW;
......@@ -1602,7 +1602,7 @@ void CpuMatrix::avgPoolBackward(Matrix& input, size_t imgSizeH, size_t imgSizeW,
void CpuMatrix::crossMapNormalFwd(Matrix& input, size_t imgSizeH,
size_t imgSizeW, Matrix& denoms,
size_t channels, size_t sizeX, float scale,
float pow, bool blocked) {
float pow) {
size_t num = input.getHeight();
size_t height = imgSizeH;
size_t width = imgSizeW;
......@@ -1655,7 +1655,7 @@ void CpuMatrix::crossMapNormalBwd(Matrix& localGrad, Matrix& denoms,
Matrix& preOutV, Matrix& localOutV,
size_t channels, size_t imgSizeH,
size_t imgSizeW, size_t size, float scale,
float pow, bool blocked) {
float pow) {
LOG(FATAL) << "Not implemented";
CHECK(imgSizeH * imgSizeW * channels == preOutV.getWidth());
......
......@@ -774,7 +774,7 @@ public:
virtual void crossMapNormalFwd(Matrix& input, size_t imgSizeH,
size_t imgSizeW, Matrix& denoms,
size_t channels, size_t sizeX, float scale,
float pow, bool blocked) {
float pow) {
LOG(FATAL) << "Not implemeted";
}
......@@ -782,7 +782,7 @@ public:
Matrix& preOutV, Matrix& localOutV,
size_t channels, size_t imgSizeH,
size_t imgSizeW, size_t size, float scale,
float pow, bool blocked) {
float pow) {
LOG(FATAL) << "Not implemeted";
}
......@@ -1149,12 +1149,12 @@ public:
void crossMapNormalFwd(Matrix& input, size_t imgSizeH, size_t imgSizeW,
Matrix& denoms, size_t channels, size_t sizeX,
float scale, float pow, bool blocked);
float scale, float pow);
void crossMapNormalBwd(Matrix& localGrad, Matrix& denoms, Matrix& preOutV,
Matrix& localOutV, size_t channels, size_t imgSizeH,
size_t imgSizeW, size_t sizeX, float scale, float pow,
bool blocked);
size_t imgSizeW, size_t sizeX,
float scale, float pow);
void maxSequenceForward(Matrix& input, const IVector& sequence,
IVector& index);
......@@ -1260,12 +1260,12 @@ public:
void crossMapNormalFwd(Matrix& input, size_t imgSizeH, size_t imgSizeW,
Matrix& denoms, size_t channels, size_t sizeX,
float scale, float pow, bool blocked);
float scale, float pow);
void crossMapNormalBwd(Matrix& localGrad, Matrix& denoms, Matrix& preOutV,
Matrix& localOutV, size_t channels, size_t imgSizeH,
size_t imgSizeW, size_t sizeX, float scale, float pow,
bool blocked);
size_t imgSizeW, size_t sizeX,
float scale, float pow);
void maxSequenceForward(Matrix& input, const IVector& sequence,
IVector& index);
......
......@@ -1500,7 +1500,7 @@ def img_pool_layer(input, pool_size, name=None,
def __img_norm_layer__(name, input, size, norm_type, scale, power,
num_channels, blocked, layer_attr):
num_channels, blocked=0, layer_attr):
if num_channels is None:
assert input.num_filters is not None
num_channels = input.num_filters
......@@ -1522,9 +1522,9 @@ def __img_norm_layer__(name, input, size, norm_type, scale, power,
@layer_support()
def img_cmrnorm_layer(input, size, scale=0.0128, power=0.75,
name=None, num_channels=None,
blocked=0, layer_attr=None):
layer_attr=None):
"""
Convolution cross-map-response-normalize layer.
Response normalization across feature maps.
The details please refer to
`Alex's paper <http://www.cs.toronto.edu/~fritz/absps/imagenet.pdf>`_.
......@@ -1532,7 +1532,7 @@ def img_cmrnorm_layer(input, size, scale=0.0128, power=0.75,
:type name: None|basestring
:param input: layer's input.
:type input: LayerOutput
:param size: cross map response size.
:param size: Normalize in number of :math:`size` feature maps.
:type size: int
:param scale: The hyper-parameter.
:type scale: float
......@@ -1547,30 +1547,7 @@ def img_cmrnorm_layer(input, size, scale=0.0128, power=0.75,
:rtype: LayerOutput
"""
return __img_norm_layer__(name, input, size, "cmrnorm-projection", scale,
power, num_channels, blocked, layer_attr)
@wrap_name_default("rnorm")
@layer_support()
def img_rnorm_layer(input, size, scale, power, name=None, num_channels=None,
layer_attr=None):
"""
Normalize the input in local region, namely response normalization
across feature maps.
:param name: The name of this layer.
:rtype name: None|basestring
:param input: The input of this layer.
:param size:
:param scale:
:param power:
:param num_channels:
:param layer_attr:
:return: LayerOutput object.
:rtype: LayerOutput
"""
return __img_norm_layer__(name, input, size, 'rnorm', scale, power,
num_channels, 0, layer_attr)
power, num_channels, 0, layer_attr)
@wrap_bias_attr_default()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册