Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
c792ef7d
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
c792ef7d
编写于
8月 18, 2017
作者:
C
chengduoZH
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix DeConv3D, Conv3D
上级
424b325d
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
229 addition
and
205 deletion
+229
-205
paddle/gserver/layers/Conv3DLayer.cpp
paddle/gserver/layers/Conv3DLayer.cpp
+134
-114
paddle/gserver/layers/DeConv3DLayer.cpp
paddle/gserver/layers/DeConv3DLayer.cpp
+95
-91
未找到文件。
paddle/gserver/layers/Conv3DLayer.cpp
浏览文件 @
c792ef7d
...
...
@@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "Conv3DLayer.h"
#include "paddle/utils/Logging.h"
#include "paddle/utils/Stat.h"
#include "Conv3DLayer.h"
namespace
paddle
{
...
...
@@ -22,32 +22,30 @@ REGISTER_LAYER(conv3d, Conv3DLayer);
bool
Conv3DLayer
::
init
(
const
LayerMap
&
layerMap
,
const
ParameterMap
&
parameterMap
)
{
if
(
!
ConvBaseLayer
::
init
(
layerMap
,
parameterMap
))
return
false
;
if
(
!
ConvBaseLayer
::
init
(
layerMap
,
parameterMap
))
return
false
;
int
index
=
0
;
for
(
auto
&
inputConfig
:
config_
.
inputs
())
{
const
ConvConfig
&
conf
=
inputConfig
.
conv_conf
();
M_
.
push_back
(
numFilters_
/
conf
.
groups
());
K_
.
push_back
(
conf
.
filter_channels
()
*
conf
.
filter_size_z
()
*
\
conf
.
filter_size_y
()
*
conf
.
filter_size
());
weights_
[
index
]
->
getW
()
->
reshape
(
weights_
[
index
]
->
getW
()
->
getWidth
(),
weights_
[
index
]
->
getW
()
->
getHeight
());
const
ConvConfig
&
conf
=
inputConfig
.
conv_conf
();
M_
.
push_back
(
numFilters_
/
conf
.
groups
());
K_
.
push_back
(
filterPixels_
[
index
]
*
filterChannels_
[
index
]);
if
(
nullptr
!=
weights_
[
index
]
->
getW
())
weights_
[
index
]
->
getW
()
->
reshape
(
weights_
[
index
]
->
getW
()
->
getWidth
(),
weights_
[
index
]
->
getW
()
->
getHeight
());
if
(
nullptr
!=
weights_
[
index
]
->
getWGrad
())
weights_
[
index
]
->
getWGrad
()
->
reshape
(
weights_
[
index
]
->
getWGrad
()
->
getWidth
(),
weights_
[
index
]
->
getWGrad
()
->
getHeight
());
++
index
;
weights_
[
index
]
->
getWGrad
()
->
getWidth
(),
weights_
[
index
]
->
getWGrad
()
->
getHeight
());
++
index
;
}
biases_
->
getWGrad
()
->
reshape
(
biases_
->
getWGrad
()
->
width_
,
biases_
->
getWGrad
()
->
height_
);
biases_
->
getW
()
->
reshape
(
biases_
->
getW
()
->
width_
,
biases_
->
getW
()
->
height_
);
if
(
nullptr
!=
biases_
->
getWGrad
())
biases_
->
getWGrad
()
->
reshape
(
biases_
->
getWGrad
()
->
width_
,
biases_
->
getWGrad
()
->
height_
);
if
(
nullptr
!=
biases_
->
getW
())
biases_
->
getW
()
->
reshape
(
biases_
->
getW
()
->
width_
,
biases_
->
getW
()
->
height_
);
CHECK
(
inputLayers_
.
size
()
==
parameters_
.
size
());
return
true
;
}
size_t
Conv3DLayer
::
getSize
()
{
CHECK_NE
(
inputLayers_
.
size
(),
0UL
);
// imgSizeH_.clear();
...
...
@@ -59,22 +57,19 @@ size_t Conv3DLayer::getSize() {
N_
.
clear
();
size_t
layerSize
=
0
;
for
(
size_t
i
=
0
;
i
<
inputLayers_
.
size
();
++
i
)
{
// imgSizeH_.push_back(inputLayers_[i]->getOutput().getFrameHeight());
// imgSizeW_.push_back(inputLayers_[i]->getOutput().getFrameWidth());
// imgSizeD_.push_back(inputLayers_[i]->getOutput().getFrameDepth());
outputW_
.
push_back
(
outputSize
(
imgSizeW_
[
i
],
filterSize_
[
i
],
padding_
[
i
],
stride_
[
i
],
true
));
outputH_
.
push_back
(
outputSize
(
imgSizeH_
[
i
],
filterSizeY_
[
i
],
paddingY_
[
i
],
strideY_
[
i
],
true
));
outputD_
.
push_back
(
outputSize
(
imgSizeD_
[
i
],
filterSizeZ_
[
i
],
paddingZ_
[
i
],
strideZ_
[
i
],
true
));
N_
.
push_back
(
outputD_
[
i
]
*
outputH_
[
i
]
*
outputW_
[
i
]);
CHECK
(
layerSize
==
0
||
N_
[
i
]
*
size_t
(
numFilters_
)
==
layerSize
);
layerSize
+=
N_
[
i
]
*
numFilters_
;
// imgSizeH_.push_back(inputLayers_[i]->getOutput().getFrameHeight());
// imgSizeW_.push_back(inputLayers_[i]->getOutput().getFrameWidth());
// imgSizeD_.push_back(inputLayers_[i]->getOutput().getFrameDepth());
outputW_
.
push_back
(
outputSize
(
imgSizeW_
[
i
],
filterSize_
[
i
],
padding_
[
i
],
stride_
[
i
],
true
));
outputH_
.
push_back
(
outputSize
(
imgSizeH_
[
i
],
filterSizeY_
[
i
],
paddingY_
[
i
],
strideY_
[
i
],
true
));
outputD_
.
push_back
(
outputSize
(
imgSizeD_
[
i
],
filterSizeZ_
[
i
],
paddingZ_
[
i
],
strideZ_
[
i
],
true
));
N_
.
push_back
(
outputD_
[
i
]
*
outputH_
[
i
]
*
outputW_
[
i
]);
CHECK
(
layerSize
==
0
||
N_
[
i
]
*
size_t
(
numFilters_
)
==
layerSize
);
layerSize
+=
N_
[
i
]
*
numFilters_
;
}
getOutput
().
setFrameHeight
(
outputH_
[
0
]);
getOutput
().
setFrameWidth
(
outputW_
[
0
]);
...
...
@@ -88,38 +83,46 @@ void Conv3DLayer::forward(PassType passType) {
int
batchSize
=
inputLayers_
[
0
]
->
getOutputValue
()
->
getHeight
();
int
outWidth
=
getSize
();
resetOutput
(
batchSize
,
outWidth
);
const
MatrixPtr
outMat
=
getOutputValue
();
for
(
size_t
i
=
0
;
i
!=
inputLayers_
.
size
();
++
i
)
{
REGISTER_TIMER_INFO
(
"FwdConv3D"
,
getName
().
c_str
());
const
MatrixPtr
&
inMat
=
getInputValue
(
i
);
int
width
=
inMat
->
getWidth
();
int
M
=
M_
[
i
];
int
N
=
N_
[
i
];
int
K
=
K_
[
i
];
Matrix
::
resizeOrCreate
(
colBuf_
,
K
*
groups_
[
i
],
N
,
false
,
useGpu_
);
MatrixPtr
wMat
=
weights_
[
i
]
->
getW
();
for
(
int
n
=
0
;
n
<
batchSize
;
++
n
)
{
colBuf_
->
vol2Col
(
inMat
->
getData
()
+
n
*
width
,
channels_
[
i
],
imgSizeD_
[
i
],
imgSizeH_
[
i
],
imgSizeW_
[
i
],
filterSizeZ_
[
i
],
filterSizeY_
[
i
],
filterSize_
[
i
],
strideZ_
[
i
],
strideY_
[
i
],
stride_
[
i
],
paddingZ_
[
i
],
paddingY_
[
i
],
padding_
[
i
]);
real
*
outData
=
outMat
->
getData
()
+
n
*
outWidth
;
MatrixPtr
outMatSub
=
Matrix
::
create
(
outData
,
groups_
[
i
]
*
M
,
N
,
false
,
useGpu_
);
for
(
int
g
=
0
;
g
<
groups_
[
i
];
g
++
)
{
MatrixPtr
wMatSub
=
wMat
->
subMatrix
(
g
*
M
,
M
);
MatrixPtr
in
=
colBuf_
->
subMatrix
(
g
*
K
,
K
);
MatrixPtr
out
=
outMatSub
->
subMatrix
(
g
*
M
,
M
);
out
->
mul
(
*
wMatSub
,
*
in
,
1.0
,
0.0
);
}
REGISTER_TIMER_INFO
(
"FwdConv3D"
,
getName
().
c_str
());
const
MatrixPtr
&
inMat
=
getInputValue
(
i
);
const
MatrixPtr
&
outMat
=
getOutputValue
();
int
M
=
M_
[
i
];
int
N
=
N_
[
i
];
int
K
=
K_
[
i
];
Matrix
::
resizeOrCreate
(
colBuf_
,
K
*
groups_
[
i
],
N
,
false
,
useGpu_
);
MatrixPtr
wMat
=
weights_
[
i
]
->
getW
();
for
(
int
n
=
0
;
n
<
batchSize
;
++
n
)
{
colBuf_
->
vol2Col
(
inMat
->
getData
()
+
n
*
inMat
->
getStride
(),
channels_
[
i
],
imgSizeD_
[
i
],
imgSizeH_
[
i
],
imgSizeW_
[
i
],
filterSizeZ_
[
i
],
filterSizeY_
[
i
],
filterSize_
[
i
],
strideZ_
[
i
],
strideY_
[
i
],
stride_
[
i
],
paddingZ_
[
i
],
paddingY_
[
i
],
padding_
[
i
]);
real
*
outData
=
outMat
->
getData
()
+
n
*
outMat
->
getStride
();
MatrixPtr
outMatSub
=
Matrix
::
create
(
outData
,
groups_
[
i
]
*
M
,
N
,
false
,
useGpu_
);
for
(
int
g
=
0
;
g
<
groups_
[
i
];
g
++
)
{
MatrixPtr
wMatSub
=
wMat
->
subMatrix
(
g
*
M
,
M
);
MatrixPtr
in
=
colBuf_
->
subMatrix
(
g
*
K
,
K
);
MatrixPtr
out
=
outMatSub
->
subMatrix
(
g
*
M
,
M
);
out
->
mul
(
*
wMatSub
,
*
in
,
1.0
,
1.0
);
}
}
}
if
(
nullptr
!=
this
->
biasParameter_
)
{
REGISTER_TIMER_INFO
(
"FwBiasTimer"
,
getName
().
c_str
());
this
->
addBias
();
REGISTER_TIMER_INFO
(
"FwBiasTimer"
,
getName
().
c_str
());
this
->
addBias
();
}
forwardActivation
();
}
...
...
@@ -128,20 +131,20 @@ void Conv3DLayer::backward(const UpdateCallback &callback) {
backwardActivation
();
if
(
biases_
&&
biases_
->
getWGrad
())
{
bpropBiases
();
biases_
->
getParameterPtr
()
->
incUpdate
(
callback
);
bpropBiases
();
biases_
->
getParameterPtr
()
->
incUpdate
(
callback
);
}
for
(
size_t
i
=
0
;
i
!=
inputLayers_
.
size
();
++
i
)
{
REGISTER_TIMER_INFO
(
"BwdConv3D"
,
getName
().
c_str
());
if
(
weights_
[
i
]
->
getWGrad
())
{
bpropWeights
(
i
);
}
if
(
this
->
needGradient_
)
{
bpropData
(
i
);
}
REGISTER_TIMER_INFO
(
"WeightUpdate"
,
getName
().
c_str
());
weights_
[
i
]
->
getParameterPtr
()
->
incUpdate
(
callback
);
REGISTER_TIMER_INFO
(
"BwdConv3D"
,
getName
().
c_str
());
if
(
weights_
[
i
]
->
getWGrad
())
{
bpropWeights
(
i
);
}
if
(
getInputGrad
(
i
)
)
{
bpropData
(
i
);
}
REGISTER_TIMER_INFO
(
"WeightUpdate"
,
getName
().
c_str
());
weights_
[
i
]
->
getParameterPtr
()
->
incUpdate
(
callback
);
}
}
...
...
@@ -149,28 +152,36 @@ void Conv3DLayer::bpropWeights(int i) {
int
M
=
M_
[
i
];
int
N
=
N_
[
i
];
int
K
=
K_
[
i
];
const
MatrixPtr
&
inMat
=
getInputValue
(
i
);
int
width
=
inMat
->
getWidth
();
const
MatrixPtr
&
inMat
=
getInputValue
(
i
);
Matrix
::
resizeOrCreate
(
colBuf_
,
K
*
groups_
[
i
],
N
,
false
,
useGpu_
);
MatrixPtr
wGradMat
=
weights_
[
i
]
->
getWGrad
();
real
*
outGradData
=
getOutputGrad
()
->
getData
();
int
batchSize
=
inputLayers_
[
0
]
->
getOutputValue
()
->
getHeight
();
for
(
int
n
=
0
;
n
<
batchSize
;
++
n
)
{
colBuf_
->
vol2Col
(
inMat
->
getData
()
+
n
*
width
,
channels_
[
i
],
imgSizeD_
[
i
],
imgSizeH_
[
i
],
imgSizeW_
[
i
],
filterSizeZ_
[
i
],
filterSizeY_
[
i
],
filterSize_
[
i
],
strideZ_
[
i
],
strideY_
[
i
],
stride_
[
i
],
paddingZ_
[
i
],
paddingY_
[
i
],
padding_
[
i
]);
outGradData
+=
n
*
getOutputGrad
()
->
getWidth
();
MatrixPtr
outGradSub
=
Matrix
::
create
(
outGradData
,
groups_
[
i
]
*
M
,
N
,
false
,
useGpu_
);
for
(
int
g
=
0
;
g
<
groups_
[
i
];
++
g
)
{
MatrixPtr
inMatSub
=
colBuf_
->
subMatrix
(
g
*
K
,
K
);
MatrixPtr
outG
=
outGradSub
->
subMatrix
(
g
*
M
,
M
);
MatrixPtr
wGradSub
=
wGradMat
->
subMatrix
(
g
*
M
,
M
);
wGradSub
->
mul
(
*
outG
,
*
(
inMatSub
->
getTranspose
()),
1.0
,
1.0
);
}
colBuf_
->
vol2Col
(
inMat
->
getData
()
+
n
*
inMat
->
getStride
(),
channels_
[
i
],
imgSizeD_
[
i
],
imgSizeH_
[
i
],
imgSizeW_
[
i
],
filterSizeZ_
[
i
],
filterSizeY_
[
i
],
filterSize_
[
i
],
strideZ_
[
i
],
strideY_
[
i
],
stride_
[
i
],
paddingZ_
[
i
],
paddingY_
[
i
],
padding_
[
i
]);
real
*
outGradData
=
getOutputGrad
()
->
getData
()
+
n
*
getOutputGrad
()
->
getStride
();
MatrixPtr
outGradSub
=
Matrix
::
create
(
outGradData
,
groups_
[
i
]
*
M
,
N
,
false
,
useGpu_
);
for
(
int
g
=
0
;
g
<
groups_
[
i
];
++
g
)
{
MatrixPtr
inMatSub
=
colBuf_
->
subMatrix
(
g
*
K
,
K
);
MatrixPtr
outG
=
outGradSub
->
subMatrix
(
g
*
M
,
M
);
MatrixPtr
wGradSub
=
wGradMat
->
subMatrix
(
g
*
M
,
M
);
wGradSub
->
mul
(
*
outG
,
*
(
inMatSub
->
getTranspose
()),
1.0
,
1.0
);
}
}
}
...
...
@@ -180,45 +191,54 @@ void Conv3DLayer::bpropData(int i) {
int
K
=
K_
[
i
];
Matrix
::
resizeOrCreate
(
colBuf_
,
K
*
groups_
[
i
],
N
,
false
,
useGpu_
);
MatrixPtr
wMat
=
weights_
[
i
]
->
getW
();
real
*
outGradData
=
getOutputGrad
()
->
getData
();
real
*
preGradData
=
getInputGrad
(
i
)
->
getData
();
int
batchSize
=
inputLayers_
[
0
]
->
getOutputValue
()
->
getHeight
();
for
(
int
n
=
0
;
n
<
batchSize
;
++
n
)
{
outGradData
+=
n
*
getOutputGrad
()
->
getWidth
();
preGradData
+=
n
*
getInputGrad
(
i
)
->
getWidth
();
MatrixPtr
outGradSub
=
Matrix
::
create
(
outGradData
,
M
*
groups_
[
i
],
N
,
false
,
useGpu_
);
for
(
int
g
=
0
;
g
<
groups_
[
i
];
++
g
)
{
MatrixPtr
wMatSub
=
wMat
->
subMatrix
(
g
*
M
,
M
);
MatrixPtr
outG
=
outGradSub
->
subMatrix
(
g
*
M
,
M
);
MatrixPtr
inGradMatSub
=
colBuf_
->
subMatrix
(
g
*
K
,
K
);
inGradMatSub
->
mul
(
*
(
wMatSub
->
getTranspose
()),
*
outG
,
1.0
,
0.0
);
}
colBuf_
->
col2Vol
(
preGradData
,
channels_
[
i
],
imgSizeD_
[
i
],
imgSizeH_
[
i
],
imgSizeW_
[
i
],
filterSizeZ_
[
i
],
filterSizeY_
[
i
],
filterSize_
[
i
],
strideZ_
[
i
],
strideY_
[
i
],
stride_
[
i
],
paddingZ_
[
i
],
paddingY_
[
i
],
padding_
[
i
],
1.0
,
1.0
);
real
*
outGradData
=
getOutputGrad
()
->
getData
()
+
n
*
getOutputGrad
()
->
getStride
();
real
*
preGradData
=
getInputGrad
(
i
)
->
getData
()
+
n
*
getInputGrad
(
i
)
->
getStride
();
MatrixPtr
outGradSub
=
Matrix
::
create
(
outGradData
,
M
*
groups_
[
i
],
N
,
false
,
useGpu_
);
for
(
int
g
=
0
;
g
<
groups_
[
i
];
++
g
)
{
MatrixPtr
wMatSub
=
wMat
->
subMatrix
(
g
*
M
,
M
);
MatrixPtr
outG
=
outGradSub
->
subMatrix
(
g
*
M
,
M
);
MatrixPtr
inGradMatSub
=
colBuf_
->
subMatrix
(
g
*
K
,
K
);
inGradMatSub
->
mul
(
*
(
wMatSub
->
getTranspose
()),
*
outG
,
1.0
,
0.0
);
}
colBuf_
->
col2Vol
(
preGradData
,
channels_
[
i
],
imgSizeD_
[
i
],
imgSizeH_
[
i
],
imgSizeW_
[
i
],
filterSizeZ_
[
i
],
filterSizeY_
[
i
],
filterSize_
[
i
],
strideZ_
[
i
],
strideY_
[
i
],
stride_
[
i
],
paddingZ_
[
i
],
paddingY_
[
i
],
padding_
[
i
],
1.0
,
1.0
);
}
}
void
Conv3DLayer
::
bpropBiases
()
{
MatrixPtr
outGradMat
=
getOutputGrad
();
if
(
this
->
sharedBiases_
)
{
biases_
->
getWGrad
()
->
collectSharedBias
(
*
outGradMat
,
1.0
f
);
biases_
->
getWGrad
()
->
collectSharedBias
(
*
outGradMat
,
1.0
f
);
}
else
{
biases_
->
getWGrad
()
->
collectBias
(
*
outGradMat
,
1.0
f
);
biases_
->
getWGrad
()
->
collectBias
(
*
outGradMat
,
1.0
f
);
}
}
void
Conv3DLayer
::
addBias
()
{
MatrixPtr
outMat
=
getOutputValue
();
if
(
this
->
sharedBiases_
)
{
outMat
->
addSharedBias
(
*
(
biases_
->
getW
()),
1.0
f
);
outMat
->
addSharedBias
(
*
(
biases_
->
getW
()),
1.0
f
);
}
else
{
outMat
->
addBias
(
*
(
biases_
->
getW
()),
1.0
f
);
outMat
->
addBias
(
*
(
biases_
->
getW
()),
1.0
f
);
}
}
...
...
paddle/gserver/layers/DeConv3DLayer.cpp
浏览文件 @
c792ef7d
...
...
@@ -12,43 +12,42 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "DeConv3DLayer.h"
#include "paddle/utils/Logging.h"
#include "paddle/utils/Stat.h"
#include "DeConv3DLayer.h"
namespace
paddle
{
REGISTER_LAYER
(
deconv3d
,
DeConv3DLayer
);
#define DECONV_OUTPUT_SIZE(IN_SIZE, STRID, PAD, KSIZE) \
(((IN_SIZE) - 1) * (STRID) -
2 * (PAD) + (KSIZE))
(((IN_SIZE)-1) * (STRID)-
2 * (PAD) + (KSIZE))
bool
DeConv3DLayer
::
init
(
const
LayerMap
&
layerMap
,
const
ParameterMap
&
parameterMap
)
{
const
ParameterMap
&
parameterMap
)
{
if
(
!
ConvBaseLayer
::
init
(
layerMap
,
parameterMap
))
return
false
;
// for Deconv, the dimension of Kernel is
// channel * output * depth * height * weigth
// Matrix storage format: (output * depth * height * weigth) x channel
for
(
int
index
=
0
;
index
<
config_
.
inputs
().
size
();
++
index
)
{
M_
.
push_back
(
filterChannels_
[
index
]);
K_
.
push_back
(
filterPixels_
[
index
]
*
(
numFilters_
/
groups_
[
index
]));
weights_
[
index
]
->
getW
()
->
reshape
(
filterPixels_
[
index
]
*
numFilters_
,
filterChannels_
[
index
]);
weights_
[
index
]
->
getWGrad
()
->
reshape
(
filterPixels_
[
index
]
*
numFilters_
,
filterChannels_
[
index
]);
K_
.
push_back
(
filterPixels_
[
index
]
*
(
numFilters_
/
groups_
[
index
]));
if
(
weights_
[
index
]
->
getW
())
weights_
[
index
]
->
getW
()
->
reshape
(
filterPixels_
[
index
]
*
numFilters_
,
filterChannels_
[
index
]);
if
(
weights_
[
index
]
->
getWGrad
())
weights_
[
index
]
->
getWGrad
()
->
reshape
(
filterPixels_
[
index
]
*
numFilters_
,
filterChannels_
[
index
]);
}
biases_
->
getWGrad
()
->
reshape
(
biases_
->
getWGrad
()
->
width_
,
biases_
->
getWGrad
()
->
height_
);
biases_
->
getW
()
->
reshape
(
biases_
->
getW
()
->
width_
,
biases_
->
getW
()
->
height_
);
if
(
biases_
->
getWGrad
())
biases_
->
getWGrad
()
->
reshape
(
biases_
->
getWGrad
()
->
width_
,
biases_
->
getWGrad
()
->
height_
);
if
(
biases_
->
getW
())
biases_
->
getW
()
->
reshape
(
biases_
->
getW
()
->
width_
,
biases_
->
getW
()
->
height_
);
CHECK
(
inputLayers_
.
size
()
==
parameters_
.
size
());
return
true
;
}
size_t
DeConv3DLayer
::
getSize
()
{
CHECK_NE
(
inputLayers_
.
size
(),
0UL
);
// imgSizeH_.clear();
...
...
@@ -64,18 +63,12 @@ size_t DeConv3DLayer::getSize() {
// imgSizeH_.push_back(inputLayers_[i]->getOutput().getFrameHeight());
// imgSizeW_.push_back(inputLayers_[i]->getOutput().getFrameWidth());
// imgSizeD_.push_back(inputLayers_[i]->getOutput().getFrameDepth());
outputW_
.
push_back
(
DECONV_OUTPUT_SIZE
(
imgSizeW_
[
i
],
stride_
[
i
],
padding_
[
i
],
filterSize_
[
i
]));
outputH_
.
push_back
(
DECONV_OUTPUT_SIZE
(
imgSizeH_
[
i
],
strideY_
[
i
],
paddingY_
[
i
],
filterSizeY_
[
i
]));
outputD_
.
push_back
(
DECONV_OUTPUT_SIZE
(
imgSizeD_
[
i
],
strideZ_
[
i
],
paddingZ_
[
i
],
filterSizeZ_
[
i
]));
outputW_
.
push_back
(
DECONV_OUTPUT_SIZE
(
imgSizeW_
[
i
],
stride_
[
i
],
padding_
[
i
],
filterSize_
[
i
]));
outputH_
.
push_back
(
DECONV_OUTPUT_SIZE
(
imgSizeH_
[
i
],
strideY_
[
i
],
paddingY_
[
i
],
filterSizeY_
[
i
]));
outputD_
.
push_back
(
DECONV_OUTPUT_SIZE
(
imgSizeD_
[
i
],
strideZ_
[
i
],
paddingZ_
[
i
],
filterSizeZ_
[
i
]));
No_
.
push_back
(
outputD_
[
i
]
*
outputH_
[
i
]
*
outputW_
[
i
]);
N_
.
push_back
(
imgSizeD_
[
i
]
*
imgSizeH_
[
i
]
*
imgSizeW_
[
i
]);
CHECK
(
layerSize
==
0
||
N_
[
i
]
*
size_t
(
numFilters_
)
==
layerSize
);
...
...
@@ -96,32 +89,37 @@ void DeConv3DLayer::forward(PassType passType) {
for
(
size_t
i
=
0
;
i
!=
inputLayers_
.
size
();
++
i
)
{
REGISTER_TIMER_INFO
(
"FwdDeConv3D"
,
getName
().
c_str
());
const
MatrixPtr
&
inMat
=
getInputValue
(
i
);
int
width
=
inMat
->
getWidth
();
const
MatrixPtr
&
inMat
=
getInputValue
(
i
);
int
M
=
M_
[
i
];
int
N
=
N_
[
i
];
int
K
=
K_
[
i
];
MatrixPtr
wMat
=
weights_
[
i
]
->
getW
();
Matrix
::
resizeOrCreate
(
colBuf_
,
K
*
groups_
[
i
]
,
N
,
false
,
useGpu_
);
Matrix
::
resizeOrCreate
(
colBuf_
,
K
*
groups_
[
i
],
N
,
false
,
useGpu_
);
for
(
int
n
=
0
;
n
<
batchSize
;
++
n
)
{
real
*
inData
=
inMat
->
getData
()
+
n
*
width
;
real
*
colBufData
=
colBuf_
->
getData
();
for
(
int
g
=
0
;
g
<
groups_
[
i
];
g
++
)
{
MatrixPtr
wMatSub
=
wMat
->
subMatrix
(
g
*
K
,
K
);
MatrixPtr
inMatSub
=
Matrix
::
create
(
inData
,
M
,
N
,
false
,
useGpu_
);
MatrixPtr
colBufDataSub
=
Matrix
::
create
(
colBufData
,
K
,
N
,
false
,
useGpu_
);
colBufDataSub
->
mul
(
*
wMatSub
,
*
inMatSub
,
1.0
,
0.0
);
colBufData
+=
K
*
N
;
inData
+=
M
*
N
;
real
*
inData
=
inMat
->
getData
()
+
n
*
inMat
->
getStride
();
for
(
int
g
=
0
;
g
<
groups_
[
i
];
++
g
)
{
MatrixPtr
inMatSub
=
Matrix
::
create
(
inData
,
M
,
N
,
false
,
useGpu_
);
MatrixPtr
wMatSub
=
wMat
->
subMatrix
(
g
*
K
,
K
);
MatrixPtr
colBufDataSub
=
colBuf_
->
subMatrix
(
g
*
K
,
K
);
colBufDataSub
->
mul
(
*
wMatSub
,
*
inMatSub
,
1.0
,
0.0
);
inData
+=
M
*
N
;
}
colBuf_
->
col2Vol
(
outMat
->
getData
()
+
n
*
outMat
->
getWidth
(),
numFilters_
,
outputD_
[
i
],
outputH_
[
i
],
outputW_
[
i
],
filterSizeZ_
[
i
],
filterSizeY_
[
i
],
filterSize_
[
i
],
strideZ_
[
i
],
strideY_
[
i
],
stride_
[
i
],
paddingZ_
[
i
],
paddingY_
[
i
],
padding_
[
i
],
1.0
,
1.0
);
colBuf_
->
col2Vol
(
outMat
->
getData
()
+
n
*
outMat
->
getStride
(),
numFilters_
,
outputD_
[
i
],
outputH_
[
i
],
outputW_
[
i
],
filterSizeZ_
[
i
],
filterSizeY_
[
i
],
filterSize_
[
i
],
strideZ_
[
i
],
strideY_
[
i
],
stride_
[
i
],
paddingZ_
[
i
],
paddingY_
[
i
],
padding_
[
i
],
1.0
,
1.0
);
}
}
if
(
nullptr
!=
this
->
biasParameter_
)
{
...
...
@@ -134,63 +132,69 @@ void DeConv3DLayer::forward(PassType passType) {
void
DeConv3DLayer
::
backward
(
const
UpdateCallback
&
callback
)
{
backwardActivation
();
int
batchSize
=
getOutputGrad
()
->
getHeight
();
int
outputWidth
=
getOutputGrad
()
->
getWidth
();
if
(
biases_
&&
biases_
->
getWGrad
())
{
bpropBiases
();
biases_
->
getParameterPtr
()
->
incUpdate
(
callback
);
}
for
(
size_t
i
=
0
;
i
<
inputLayers_
.
size
();
++
i
)
{
int
M
=
M_
[
i
];
int
N
=
N_
[
i
];
int
K
=
K_
[
i
];
Matrix
::
resizeOrCreate
(
colBuf_
,
K
*
groups_
[
i
],
N
,
false
,
useGpu_
);
const
MatrixPtr
&
inMat
=
getInputValue
(
i
);
for
(
int
n
=
0
;
n
<
batchSize
;
++
n
)
{
for
(
size_t
i
=
0
;
i
<
inputLayers_
.
size
();
++
i
)
{
if
(
weights_
[
i
]
->
getWGrad
()
||
this
->
needGradient_
)
{
int
M
=
M_
[
i
];
int
N
=
N_
[
i
];
int
K
=
K_
[
i
];
REGISTER_TIMER_INFO
(
"BwdDeConv3D"
,
getName
().
c_str
());
if
(
weights_
[
i
]
->
getWGrad
()
||
this
->
needGradient_
)
{
colBuf_
->
vol2Col
(
getOutputGrad
()
->
getData
()
+
n
*
outputWidth
,
numFilters_
,
outputD_
[
i
],
outputH_
[
i
],
outputW_
[
i
],
filterSizeZ_
[
i
],
filterSizeY_
[
i
],
filterSize_
[
i
],
strideZ_
[
i
],
strideY_
[
i
],
stride_
[
i
],
paddingZ_
[
i
],
paddingY_
[
i
],
padding_
[
i
]);
}
if
(
weights_
[
i
]
->
getWGrad
())
{
real
*
inData
=
inMat
->
getData
()
+
n
*
inMat
->
getWidth
();;
real
*
wGradData
=
weights_
[
i
]
->
getWGrad
()
->
getData
();
for
(
int
g
=
0
;
g
<
groups_
[
i
];
g
++
)
{
MatrixPtr
colBufDataSub
=
colBuf_
->
subMatrix
(
g
*
K
,
K
);
MatrixPtr
inMatSub
=
Matrix
::
create
(
inData
,
M
,
N
,
false
,
useGpu_
);
MatrixPtr
wGradMatSub
=
Matrix
::
create
(
wGradData
,
K
,
M
,
false
,
useGpu_
);
wGradMatSub
->
mul
(
*
colBufDataSub
,
*
(
inMatSub
->
getTranspose
()),
1.0
,
1.0
);
wGradData
+=
K
*
M
;
inData
+=
M
*
N
;
Matrix
::
resizeOrCreate
(
colBuf_
,
K
*
groups_
[
i
],
N
,
false
,
useGpu_
);
const
MatrixPtr
&
inMat
=
getInputValue
(
i
);
for
(
int
n
=
0
;
n
<
batchSize
;
++
n
)
{
colBuf_
->
vol2Col
(
getOutputGrad
()
->
getData
()
+
n
*
getOutputGrad
()
->
getStride
(),
numFilters_
,
outputD_
[
i
],
outputH_
[
i
],
outputW_
[
i
],
filterSizeZ_
[
i
],
filterSizeY_
[
i
],
filterSize_
[
i
],
strideZ_
[
i
],
strideY_
[
i
],
stride_
[
i
],
paddingZ_
[
i
],
paddingY_
[
i
],
padding_
[
i
]);
if
(
weights_
[
i
]
->
getWGrad
())
{
real
*
inData
=
inMat
->
getData
()
+
n
*
inMat
->
getStride
();
for
(
int
g
=
0
;
g
<
groups_
[
i
];
++
g
)
{
MatrixPtr
colBufDataSub
=
colBuf_
->
subMatrix
(
g
*
K
,
K
);
MatrixPtr
wGradMatSub
=
weights_
[
i
]
->
getWGrad
()
->
subMatrix
(
g
*
K
,
K
);
MatrixPtr
inMatSub
=
Matrix
::
create
(
inData
,
M
,
N
,
false
,
useGpu_
);
wGradMatSub
->
mul
(
*
colBufDataSub
,
*
(
inMatSub
->
getTranspose
()),
1.0
,
1.0
);
inData
+=
M
*
N
;
}
}
weights_
[
i
]
->
getParameterPtr
()
->
incUpdate
(
callback
);
}
if
(
this
->
needGradient_
)
{
real
*
preGrad
=
getInputGrad
(
i
)
->
getData
();
for
(
int
g
=
0
;
g
<
groups_
[
i
];
++
g
)
{
MatrixPtr
w
=
weights_
[
i
]
->
getW
()
->
subMatrix
(
g
*
K
,
K
);
MatrixPtr
outGradMat
=
colBuf_
->
subMatrix
(
g
*
K
,
K
);
MatrixPtr
inGradMatSub
=
Matrix
::
create
(
preGrad
,
M
,
N
,
false
,
useGpu_
);
inGradMatSub
->
mul
(
*
(
w
->
getTranspose
()),
*
outGradMat
,
1.0
,
0.0
)
;
preGrad
+=
M
*
N
;
if
(
getInputGrad
(
i
))
{
real
*
preGrad
=
getInputGrad
(
i
)
->
getData
()
+
n
*
getInputGrad
(
i
)
->
getStride
();
for
(
int
g
=
0
;
g
<
groups_
[
i
];
++
g
)
{
MatrixPtr
w
=
weights_
[
i
]
->
getW
()
->
subMatrix
(
g
*
K
,
K
);
MatrixPtr
outGradMat
=
colBuf_
->
subMatrix
(
g
*
K
,
K
);
MatrixPtr
inGradMatSub
=
Matrix
::
create
(
preGrad
,
M
,
N
,
false
,
useGpu_
);
inGradMatSub
->
mul
(
*
(
w
->
getTranspose
()),
*
outGradMat
,
1.0
,
1.0
);
preGrad
+=
M
*
N
;
}
}
}
REGISTER_TIMER_INFO
(
"WeightUpdate"
,
getName
().
c_str
());
weights_
[
i
]
->
getParameterPtr
()
->
incUpdate
(
callback
);
}
}
}
void
DeConv3DLayer
::
bpropWeights
(
int
i
)
{
}
void
DeConv3DLayer
::
bpropData
(
int
i
)
{
}
void
DeConv3DLayer
::
bpropWeights
(
int
i
)
{}
void
DeConv3DLayer
::
bpropData
(
int
i
)
{}
void
DeConv3DLayer
::
bpropBiases
()
{
MatrixPtr
outGradMat
=
getOutputGrad
();
const
MatrixPtr
&
outGradMat
=
getOutputGrad
();
if
(
this
->
sharedBiases_
)
{
biases_
->
getWGrad
()
->
collectSharedBias
(
*
outGradMat
,
1.0
f
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录