Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
3bce32ba
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
3bce32ba
编写于
3月 23, 2017
作者:
G
gaoyuan
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Add create matrix pointer funtion
上级
17c697c7
变更
2
显示空白变更内容
内联
并排
Showing
2 changed file
with
42 addition
and
39 deletion
+42
-39
paddle/gserver/layers/CrossChannelNormLayer.cpp
paddle/gserver/layers/CrossChannelNormLayer.cpp
+40
-38
paddle/gserver/layers/NormLayer.h
paddle/gserver/layers/NormLayer.h
+2
-1
未找到文件。
paddle/gserver/layers/CrossChannelNormLayer.cpp
浏览文件 @
3bce32ba
...
...
@@ -19,6 +19,23 @@ limitations under the License. */
namespace
paddle
{
MatrixPtr
CrossChannelNormLayer
::
createSampleMatrix
(
MatrixPtr
data
,
size_t
iter
,
size_t
spatialDim
)
{
return
Matrix
::
create
(
data
->
getData
()
+
iter
*
channels_
*
spatialDim
,
channels_
,
spatialDim
,
false
,
useGpu_
);
}
MatrixPtr
CrossChannelNormLayer
::
createSpatialMatrix
(
MatrixPtr
data
,
size_t
iter
,
size_t
spatialDim
)
{
return
Matrix
::
create
(
data
->
getData
()
+
iter
*
spatialDim
,
1
,
spatialDim
,
false
,
useGpu_
);
}
void
CrossChannelNormLayer
::
forward
(
PassType
passType
)
{
Layer
::
forward
(
passType
);
MatrixPtr
inV
=
getInputValue
(
0
);
...
...
@@ -40,25 +57,19 @@ void CrossChannelNormLayer::forward(PassType passType) {
normBuffer_
->
addScalar
(
*
normBuffer_
,
1e-6
);
inV
->
square2
(
*
dataBuffer_
);
for
(
size_t
i
=
0
;
i
<
batchSize
;
i
++
)
{
MatrixPtr
inTmp
=
Matrix
::
create
(
inV
->
getData
()
+
i
*
dataDim
,
channels_
,
spatialDim
,
false
,
useGpu_
);
MatrixPtr
dataTmp
=
Matrix
::
create
(
dataBuffer_
->
getData
()
+
i
*
dataDim
,
channels_
,
spatialDim
,
false
,
useGpu_
);
MatrixPtr
outTmp
=
Matrix
::
create
(
outV
->
getData
()
+
i
*
dataDim
,
channels_
,
spatialDim
,
false
,
useGpu_
);
MatrixPtr
normTmp
=
Matrix
::
create
(
normBuffer_
->
getData
()
+
i
*
spatialDim
,
1
,
spatialDim
,
false
,
useGpu_
);
const
MatrixPtr
inVTmp
=
createSampleMatrix
(
inV
,
i
,
spatialDim
);
const
MatrixPtr
dataTmp
=
createSampleMatrix
(
dataBuffer_
,
i
,
spatialDim
);
MatrixPtr
outVTmp
=
createSampleMatrix
(
outV
,
i
,
spatialDim
);
MatrixPtr
normTmp
=
createSpatialMatrix
(
normBuffer_
,
i
,
spatialDim
);
// compute norm.
spatialBuffer_
->
sumCols
(
*
dataTmp
,
1
,
1
);
spatialBuffer_
->
sumCols
(
*
dataTmp
,
1
,
0
);
spatialBuffer_
->
sqrt2
(
*
spatialBuffer_
);
normTmp
->
copyFrom
(
*
spatialBuffer_
);
out
Tmp
->
copyFrom
(
*
in
Tmp
);
outTmp
->
divRowVector
(
*
spatialBuffer_
);
out
VTmp
->
copyFrom
(
*
inV
Tmp
);
out
V
Tmp
->
divRowVector
(
*
spatialBuffer_
);
// scale the layer.
outTmp
->
mulColVector
(
*
scale_
->
getW
());
out
V
Tmp
->
mulColVector
(
*
scale_
->
getW
());
}
}
...
...
@@ -78,40 +89,31 @@ void CrossChannelNormLayer::backward(const UpdateCallback& callback) {
Matrix
::
resizeOrCreate
(
sampleBuffer_
,
channels_
,
spatialDim
,
false
,
useGpu_
);
scaleDiff_
->
zeroMem
();
for
(
size_t
i
=
0
;
i
<
batchSize
;
i
++
)
{
// propagate to param.
MatrixPtr
dataBufferTmp
=
Matrix
::
create
(
dataBuffer_
->
getData
()
+
i
*
dataDim
,
channels_
,
spatialDim
,
false
,
useGpu_
);
const
MatrixPtr
inValueTmp
=
Matrix
::
create
(
inV
->
getData
()
+
i
*
dataDim
,
channels_
,
spatialDim
,
false
,
useGpu_
);
const
MatrixPtr
outGradTmp
=
Matrix
::
create
(
outG
->
getData
()
+
i
*
dataDim
,
channels_
,
spatialDim
,
false
,
useGpu_
);
MatrixPtr
inGradTmp
=
Matrix
::
create
(
inG
->
getData
()
+
i
*
dataDim
,
channels_
,
spatialDim
,
false
,
useGpu_
);
const
MatrixPtr
normTmp
=
Matrix
::
create
(
normBuffer_
->
getData
()
+
i
*
spatialDim
,
1
,
spatialDim
,
false
,
useGpu_
);
channelBuffer_
->
sumRows
(
*
dataBufferTmp
,
1
,
1
);
MatrixPtr
outGTmp
=
createSampleMatrix
(
outG
,
i
,
spatialDim
);
const
MatrixPtr
dataTmp
=
createSampleMatrix
(
dataBuffer_
,
i
,
spatialDim
);
const
MatrixPtr
inVTmp
=
createSampleMatrix
(
inV
,
i
,
spatialDim
);
const
MatrixPtr
inGTmp
=
createSampleMatrix
(
inG
,
i
,
spatialDim
);
const
MatrixPtr
normTmp
=
createSpatialMatrix
(
normBuffer_
,
i
,
spatialDim
);
channelBuffer_
->
sumRows
(
*
dataTmp
,
1
,
0
);
channelBuffer_
->
dotDiv
(
*
channelBuffer_
,
*
(
scale_
->
getW
()));
// store a / scale[i] in scaleDiff_ temporary
scaleDiff_
->
add
(
*
channelBuffer_
,
1.
);
sampleBuffer_
->
dotMul
(
*
inV
alueTmp
,
*
outGrad
Tmp
);
sampleBuffer_
->
dotMul
(
*
inV
Tmp
,
*
outG
Tmp
);
spatialBuffer_
->
sumCols
(
*
sampleBuffer_
,
1.
,
1.
);
// scale the grad
inG
radTmp
->
copyFrom
(
*
inValue
Tmp
);
inG
rad
Tmp
->
mulRowVector
(
*
spatialBuffer_
);
inG
Tmp
->
copyFrom
(
*
inV
Tmp
);
inGTmp
->
mulRowVector
(
*
spatialBuffer_
);
// divide by square of norm
spatialBuffer_
->
dotMul
(
*
normTmp
,
*
normTmp
);
inG
rad
Tmp
->
divRowVector
(
*
spatialBuffer_
);
inGTmp
->
divRowVector
(
*
spatialBuffer_
);
// subtract
inG
radTmp
->
add
(
*
outGrad
Tmp
,
-
1
,
1
);
inG
Tmp
->
add
(
*
outG
Tmp
,
-
1
,
1
);
// divide by norm
inG
rad
Tmp
->
divRowVector
(
*
normTmp
);
inGTmp
->
divRowVector
(
*
normTmp
);
// scale the diff
inG
rad
Tmp
->
mulColVector
(
*
scale_
->
getW
());
inGTmp
->
mulColVector
(
*
scale_
->
getW
());
}
// updata scale
if
(
scale_
->
getWGrad
())
scale_
->
getWGrad
()
->
copyFrom
(
*
scaleDiff_
);
...
...
paddle/gserver/layers/NormLayer.h
浏览文件 @
3bce32ba
...
...
@@ -80,9 +80,10 @@ public:
explicit
CrossChannelNormLayer
(
const
LayerConfig
&
config
)
:
NormLayer
(
config
)
{}
bool
init
(
const
LayerMap
&
layerMap
,
const
ParameterMap
&
parameterMap
);
void
forward
(
PassType
passType
);
void
backward
(
const
UpdateCallback
&
callback
);
MatrixPtr
createSampleMatrix
(
MatrixPtr
data
,
size_t
iter
,
size_t
spatialDim
);
MatrixPtr
createSpatialMatrix
(
MatrixPtr
data
,
size_t
iter
,
size_t
spatialDim
);
protected:
size_t
channels_
;
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录