提交 54d4968d 编写于 作者: D dangqingqing

Refine code and comments for CRMNormLayer.

ISSUE=4612671

git-svn-id: https://svn.baidu.com/idl/trunk/paddle@1501 1ad973e4-5ce8-4261-8a94-b56d1f490c56
上级 2c3c0278
...@@ -82,12 +82,6 @@ img_cmrnorm_layer ...@@ -82,12 +82,6 @@ img_cmrnorm_layer
:members: img_cmrnorm_layer :members: img_cmrnorm_layer
:noindex: :noindex:
img_rnorm_layer
-----------------
.. automodule:: paddle.trainer_config_helpers.layers
:members: img_rnorm_layer
:noindex:
batch_norm_layer batch_norm_layer
--------------------- ---------------------
.. automodule:: paddle.trainer_config_helpers.layers .. automodule:: paddle.trainer_config_helpers.layers
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
## API Reference ## API Reference
* [Trainer Config Helpers](api/trainer_config_helpers/index.md) * [Model Config Interface](api/trainer_config_helpers/index.md)
## Command Line Argument ## Command Line Argument
......
...@@ -46,9 +46,6 @@ bool CMRProjectionNormLayer::init(const LayerMap& layerMap, ...@@ -46,9 +46,6 @@ bool CMRProjectionNormLayer::init(const LayerMap& layerMap,
/* the size of inputs for norm-layer is 1 */ /* the size of inputs for norm-layer is 1 */
CHECK_EQ(config_.inputs_size(), 1); CHECK_EQ(config_.inputs_size(), 1);
auto& inputConfig = config_.inputs(0);
blocked_ = inputConfig.norm_conf().blocked();
return true; return true;
} }
...@@ -69,7 +66,7 @@ void CMRProjectionNormLayer::forward(PassType passType) { ...@@ -69,7 +66,7 @@ void CMRProjectionNormLayer::forward(PassType passType) {
denoms_->zeroMem(); denoms_->zeroMem();
outV->crossMapNormalFwd(*input, imgSizeH_, imgSizeW_, *denoms_, channels_, outV->crossMapNormalFwd(*input, imgSizeH_, imgSizeW_, *denoms_, channels_,
size_, scale_, pow_, blocked_); size_, scale_, pow_);
} }
void CMRProjectionNormLayer::backward(const UpdateCallback& callback) { void CMRProjectionNormLayer::backward(const UpdateCallback& callback) {
...@@ -86,6 +83,6 @@ void CMRProjectionNormLayer::backward(const UpdateCallback& callback) { ...@@ -86,6 +83,6 @@ void CMRProjectionNormLayer::backward(const UpdateCallback& callback) {
preOutGrad->crossMapNormalBwd(*localGrad, *denoms_, *preOutV, *localOutV, preOutGrad->crossMapNormalBwd(*localGrad, *denoms_, *preOutV, *localOutV,
channels_, imgSizeH_, imgSizeW_, size_, scale_, channels_, imgSizeH_, imgSizeW_, size_, scale_,
pow_, blocked_); pow_);
} }
} // namespace paddle } // namespace paddle
...@@ -23,15 +23,12 @@ namespace paddle { ...@@ -23,15 +23,12 @@ namespace paddle {
/** /**
* @brief response normalization across feature maps * @brief response normalization across feature maps
* namely normalize in number of size_ channels * namely normalize in number of size_ channels
*/ */
class CMRProjectionNormLayer : public ResponseNormLayer { class CMRProjectionNormLayer : public ResponseNormLayer {
size_t imgSizeH_, imgSizeW_; size_t imgSizeH_, imgSizeW_;
size_t outputH_, outputW_; size_t outputH_, outputW_;
protected:
bool blocked_;
public: public:
explicit CMRProjectionNormLayer(const LayerConfig& config) explicit CMRProjectionNormLayer(const LayerConfig& config)
: ResponseNormLayer(config) {} : ResponseNormLayer(config) {}
......
...@@ -943,7 +943,7 @@ void GpuMatrix::avgPoolBackward(Matrix& outGrad, size_t imgSizeH, ...@@ -943,7 +943,7 @@ void GpuMatrix::avgPoolBackward(Matrix& outGrad, size_t imgSizeH,
void GpuMatrix::crossMapNormalFwd(Matrix& input, size_t imgSizeH, void GpuMatrix::crossMapNormalFwd(Matrix& input, size_t imgSizeH,
size_t imgSizeW, Matrix& denoms, size_t imgSizeW, Matrix& denoms,
size_t channels, size_t sizeX, float scale, size_t channels, size_t sizeX, float scale,
float pow, bool blocked) { float pow) {
size_t num = input.getHeight(); size_t num = input.getHeight();
size_t height = imgSizeH; size_t height = imgSizeH;
size_t width = imgSizeW; size_t width = imgSizeW;
...@@ -960,7 +960,7 @@ void GpuMatrix::crossMapNormalBwd(Matrix& localGrad, Matrix& denoms, ...@@ -960,7 +960,7 @@ void GpuMatrix::crossMapNormalBwd(Matrix& localGrad, Matrix& denoms,
Matrix& preOutV, Matrix& localOutV, Matrix& preOutV, Matrix& localOutV,
size_t channels, size_t imgSizeH, size_t channels, size_t imgSizeH,
size_t imgSizeW, size_t sizeX, float scale, size_t imgSizeW, size_t sizeX, float scale,
float pow, bool blocked) { float pow) {
size_t num = preOutV.getHeight(); size_t num = preOutV.getHeight();
size_t height = imgSizeH; size_t height = imgSizeH;
size_t width = imgSizeW; size_t width = imgSizeW;
...@@ -1602,7 +1602,7 @@ void CpuMatrix::avgPoolBackward(Matrix& input, size_t imgSizeH, size_t imgSizeW, ...@@ -1602,7 +1602,7 @@ void CpuMatrix::avgPoolBackward(Matrix& input, size_t imgSizeH, size_t imgSizeW,
void CpuMatrix::crossMapNormalFwd(Matrix& input, size_t imgSizeH, void CpuMatrix::crossMapNormalFwd(Matrix& input, size_t imgSizeH,
size_t imgSizeW, Matrix& denoms, size_t imgSizeW, Matrix& denoms,
size_t channels, size_t sizeX, float scale, size_t channels, size_t sizeX, float scale,
float pow, bool blocked) { float pow) {
size_t num = input.getHeight(); size_t num = input.getHeight();
size_t height = imgSizeH; size_t height = imgSizeH;
size_t width = imgSizeW; size_t width = imgSizeW;
...@@ -1655,7 +1655,7 @@ void CpuMatrix::crossMapNormalBwd(Matrix& localGrad, Matrix& denoms, ...@@ -1655,7 +1655,7 @@ void CpuMatrix::crossMapNormalBwd(Matrix& localGrad, Matrix& denoms,
Matrix& preOutV, Matrix& localOutV, Matrix& preOutV, Matrix& localOutV,
size_t channels, size_t imgSizeH, size_t channels, size_t imgSizeH,
size_t imgSizeW, size_t size, float scale, size_t imgSizeW, size_t size, float scale,
float pow, bool blocked) { float pow) {
LOG(FATAL) << "Not implemented"; LOG(FATAL) << "Not implemented";
CHECK(imgSizeH * imgSizeW * channels == preOutV.getWidth()); CHECK(imgSizeH * imgSizeW * channels == preOutV.getWidth());
......
...@@ -585,7 +585,7 @@ public: ...@@ -585,7 +585,7 @@ public:
* \f[ * \f[
* a[i] = \sum_{j=-(N-1)/2}^{(N-1)/2} b_{i+j} * c_{j} * a[i] = \sum_{j=-(N-1)/2}^{(N-1)/2} b_{i+j} * c_{j}
* \f] * \f]
* *
* b contains M elements, * b contains M elements,
* c contains N elements (N is odd), * c contains N elements (N is odd),
* b's index arithmetic is computed modulo M, * b's index arithmetic is computed modulo M,
...@@ -774,7 +774,7 @@ public: ...@@ -774,7 +774,7 @@ public:
virtual void crossMapNormalFwd(Matrix& input, size_t imgSizeH, virtual void crossMapNormalFwd(Matrix& input, size_t imgSizeH,
size_t imgSizeW, Matrix& denoms, size_t imgSizeW, Matrix& denoms,
size_t channels, size_t sizeX, float scale, size_t channels, size_t sizeX, float scale,
float pow, bool blocked) { float pow) {
LOG(FATAL) << "Not implemeted"; LOG(FATAL) << "Not implemeted";
} }
...@@ -782,7 +782,7 @@ public: ...@@ -782,7 +782,7 @@ public:
Matrix& preOutV, Matrix& localOutV, Matrix& preOutV, Matrix& localOutV,
size_t channels, size_t imgSizeH, size_t channels, size_t imgSizeH,
size_t imgSizeW, size_t size, float scale, size_t imgSizeW, size_t size, float scale,
float pow, bool blocked) { float pow) {
LOG(FATAL) << "Not implemeted"; LOG(FATAL) << "Not implemeted";
} }
...@@ -883,7 +883,7 @@ public: ...@@ -883,7 +883,7 @@ public:
* @code * @code
* this[i] = -sum(label[i][j]*log(output[i][j]) * this[i] = -sum(label[i][j]*log(output[i][j])
* + (1-label[i][j])*log(1-output[i][j])) * + (1-label[i][j])*log(1-output[i][j]))
* @endcode * @endcode
*/ */
virtual void multiBinaryLabelCrossEntropy(Matrix& output, Matrix& label) { virtual void multiBinaryLabelCrossEntropy(Matrix& output, Matrix& label) {
LOG(FATAL) << "Not implemented"; LOG(FATAL) << "Not implemented";
...@@ -895,7 +895,7 @@ public: ...@@ -895,7 +895,7 @@ public:
* @code * @code
* this[i][j] = -label[i][j]/output[i][j] * this[i][j] = -label[i][j]/output[i][j]
* + (1-label[i][j])/(1-output[i][j]) * + (1-label[i][j])/(1-output[i][j])
* @endcode * @endcode
*/ */
virtual void multiBinaryLabelCrossEntropyBp(Matrix& output, Matrix& label) { virtual void multiBinaryLabelCrossEntropyBp(Matrix& output, Matrix& label) {
LOG(FATAL) << "Not implemented"; LOG(FATAL) << "Not implemented";
...@@ -903,12 +903,12 @@ public: ...@@ -903,12 +903,12 @@ public:
/** /**
* @brief Calculate the classification error for multi binary labels * @brief Calculate the classification error for multi binary labels
* *
* @code * @code
* this[i] = sum((output[i][j] >= threshold && label[i][j] == 0) * this[i] = sum((output[i][j] >= threshold && label[i][j] == 0)
* || (output[i][j] < threshold && label[i][j] == 1)) * || (output[i][j] < threshold && label[i][j] == 1))
* / output->getWidth() * / output->getWidth()
* @endcode * @endcode
*/ */
virtual void classificationErrorMulti(Matrix& output, Matrix& label, virtual void classificationErrorMulti(Matrix& output, Matrix& label,
real threshold) { real threshold) {
...@@ -1149,12 +1149,12 @@ public: ...@@ -1149,12 +1149,12 @@ public:
void crossMapNormalFwd(Matrix& input, size_t imgSizeH, size_t imgSizeW, void crossMapNormalFwd(Matrix& input, size_t imgSizeH, size_t imgSizeW,
Matrix& denoms, size_t channels, size_t sizeX, Matrix& denoms, size_t channels, size_t sizeX,
float scale, float pow, bool blocked); float scale, float pow);
void crossMapNormalBwd(Matrix& localGrad, Matrix& denoms, Matrix& preOutV, void crossMapNormalBwd(Matrix& localGrad, Matrix& denoms, Matrix& preOutV,
Matrix& localOutV, size_t channels, size_t imgSizeH, Matrix& localOutV, size_t channels, size_t imgSizeH,
size_t imgSizeW, size_t sizeX, float scale, float pow, size_t imgSizeW, size_t sizeX,
bool blocked); float scale, float pow);
void maxSequenceForward(Matrix& input, const IVector& sequence, void maxSequenceForward(Matrix& input, const IVector& sequence,
IVector& index); IVector& index);
...@@ -1260,12 +1260,12 @@ public: ...@@ -1260,12 +1260,12 @@ public:
void crossMapNormalFwd(Matrix& input, size_t imgSizeH, size_t imgSizeW, void crossMapNormalFwd(Matrix& input, size_t imgSizeH, size_t imgSizeW,
Matrix& denoms, size_t channels, size_t sizeX, Matrix& denoms, size_t channels, size_t sizeX,
float scale, float pow, bool blocked); float scale, float pow);
void crossMapNormalBwd(Matrix& localGrad, Matrix& denoms, Matrix& preOutV, void crossMapNormalBwd(Matrix& localGrad, Matrix& denoms, Matrix& preOutV,
Matrix& localOutV, size_t channels, size_t imgSizeH, Matrix& localOutV, size_t channels, size_t imgSizeH,
size_t imgSizeW, size_t sizeX, float scale, float pow, size_t imgSizeW, size_t sizeX,
bool blocked); float scale, float pow);
void maxSequenceForward(Matrix& input, const IVector& sequence, void maxSequenceForward(Matrix& input, const IVector& sequence,
IVector& index); IVector& index);
...@@ -1307,14 +1307,14 @@ public: ...@@ -1307,14 +1307,14 @@ public:
* @code * @code
* table.row[ids[i]] += this.row[i] * table.row[ids[i]] += this.row[i]
* @endcode * @endcode
*/ */
virtual void addToRows(Matrix& table, IVector& ids); virtual void addToRows(Matrix& table, IVector& ids);
/** /**
* @code * @code
* this[i] = table[i, id[i]] * this[i] = table[i, id[i]]
* @endcode * @endcode
*/ */
virtual void selectElements(Matrix& table, IVector& ids); virtual void selectElements(Matrix& table, IVector& ids);
/** /**
......
...@@ -1500,7 +1500,7 @@ def img_pool_layer(input, pool_size, name=None, ...@@ -1500,7 +1500,7 @@ def img_pool_layer(input, pool_size, name=None,
def __img_norm_layer__(name, input, size, norm_type, scale, power, def __img_norm_layer__(name, input, size, norm_type, scale, power,
num_channels, blocked, layer_attr): num_channels, blocked=0, layer_attr):
if num_channels is None: if num_channels is None:
assert input.num_filters is not None assert input.num_filters is not None
num_channels = input.num_filters num_channels = input.num_filters
...@@ -1522,9 +1522,9 @@ def __img_norm_layer__(name, input, size, norm_type, scale, power, ...@@ -1522,9 +1522,9 @@ def __img_norm_layer__(name, input, size, norm_type, scale, power,
@layer_support() @layer_support()
def img_cmrnorm_layer(input, size, scale=0.0128, power=0.75, def img_cmrnorm_layer(input, size, scale=0.0128, power=0.75,
name=None, num_channels=None, name=None, num_channels=None,
blocked=0, layer_attr=None): layer_attr=None):
""" """
Convolution cross-map-response-normalize layer. Response normalization across feature maps.
The details please refer to The details please refer to
`Alex's paper <http://www.cs.toronto.edu/~fritz/absps/imagenet.pdf>`_. `Alex's paper <http://www.cs.toronto.edu/~fritz/absps/imagenet.pdf>`_.
...@@ -1532,7 +1532,7 @@ def img_cmrnorm_layer(input, size, scale=0.0128, power=0.75, ...@@ -1532,7 +1532,7 @@ def img_cmrnorm_layer(input, size, scale=0.0128, power=0.75,
:type name: None|basestring :type name: None|basestring
:param input: layer's input. :param input: layer's input.
:type input: LayerOutput :type input: LayerOutput
:param size: cross map response size. :param size: Normalize in number of :math:`size` feature maps.
:type size: int :type size: int
:param scale: The hyper-parameter. :param scale: The hyper-parameter.
:type scale: float :type scale: float
...@@ -1547,30 +1547,7 @@ def img_cmrnorm_layer(input, size, scale=0.0128, power=0.75, ...@@ -1547,30 +1547,7 @@ def img_cmrnorm_layer(input, size, scale=0.0128, power=0.75,
:rtype: LayerOutput :rtype: LayerOutput
""" """
return __img_norm_layer__(name, input, size, "cmrnorm-projection", scale, return __img_norm_layer__(name, input, size, "cmrnorm-projection", scale,
power, num_channels, blocked, layer_attr) power, num_channels, 0, layer_attr)
@wrap_name_default("rnorm")
@layer_support()
def img_rnorm_layer(input, size, scale, power, name=None, num_channels=None,
layer_attr=None):
"""
Normalize the input in local region, namely response normalization
across feature maps.
:param name: The name of this layer.
:rtype name: None|basestring
:param input: The input of this layer.
:param size:
:param scale:
:param power:
:param num_channels:
:param layer_attr:
:return: LayerOutput object.
:rtype: LayerOutput
"""
return __img_norm_layer__(name, input, size, 'rnorm', scale, power,
num_channels, 0, layer_attr)
@wrap_bias_attr_default() @wrap_bias_attr_default()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册