Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
fa10677a
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
fa10677a
编写于
8月 02, 2017
作者:
X
xzl
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
modify skipIm2col to need2col, delete useless variable colBuffer
上级
5229df52
变更
2
显示空白变更内容
内联
并排
Showing
2 changed file
with
26 addition
and
27 deletion
+26
-27
paddle/function/ConvOp.h
paddle/function/ConvOp.h
+4
-4
paddle/function/GemmConvOp.cpp
paddle/function/GemmConvOp.cpp
+22
-23
未找到文件。
paddle/function/ConvOp.h
浏览文件 @
fa10677a
...
@@ -110,8 +110,8 @@ protected:
...
@@ -110,8 +110,8 @@ protected:
}
}
// determine whether im2col needs to be performed
// determine whether im2col needs to be performed
inline
bool
is
Skip
Im2col
(
const
TensorShape
&
filter
)
const
{
inline
bool
is
Need
Im2col
(
const
TensorShape
&
filter
)
const
{
return
(
getFilterHeight
(
filter
)
==
1
&&
getFilterWidth
(
filter
)
==
1
&&
return
!
(
getFilterHeight
(
filter
)
==
1
&&
getFilterWidth
(
filter
)
==
1
&&
strideH
()
==
1
&&
strideW
()
==
1
&&
paddingH
()
==
0
&&
strideH
()
==
1
&&
strideW
()
==
1
&&
paddingH
()
==
0
&&
paddingW
()
==
0
);
paddingW
()
==
0
);
}
}
...
...
paddle/function/GemmConvOp.cpp
浏览文件 @
fa10677a
...
@@ -66,15 +66,15 @@ public:
...
@@ -66,15 +66,15 @@ public:
real
*
inputData
=
inputs
[
0
].
data
<
real
>
();
real
*
inputData
=
inputs
[
0
].
data
<
real
>
();
real
*
filterData
=
inputs
[
1
].
data
<
real
>
();
real
*
filterData
=
inputs
[
1
].
data
<
real
>
();
real
*
outputData
=
outputs
[
0
].
data
<
real
>
();
real
*
outputData
=
outputs
[
0
].
data
<
real
>
();
bool
skipIm2col
=
isSkip
Im2col
(
filter
);
bool
needIm2col
=
isNeed
Im2col
(
filter
);
TensorShape
imShape
=
TensorShape
imShape
=
TensorShape
({
inputChannels
/
groups_
,
inputHeight
,
inputWidth
});
TensorShape
({
inputChannels
/
groups_
,
inputHeight
,
inputWidth
});
TensorShape
colShape
;
TensorShape
colShape
;
real
*
colBuffer
,
*
colData
=
NULL
;
real
*
colData
=
NULL
;
if
(
!
skip
Im2col
)
{
if
(
need
Im2col
)
{
colShape
=
TensorShape
({
inputChannels
/
groups_
,
colShape
=
TensorShape
({
inputChannels
/
groups_
,
filterHeight
,
filterHeight
,
filterWidth
,
filterWidth
,
...
@@ -93,8 +93,7 @@ public:
...
@@ -93,8 +93,7 @@ public:
for
(
size_t
i
=
0
;
i
<
batchSize
;
i
++
)
{
for
(
size_t
i
=
0
;
i
<
batchSize
;
i
++
)
{
for
(
size_t
g
=
0
;
g
<
groups_
;
g
++
)
{
for
(
size_t
g
=
0
;
g
<
groups_
;
g
++
)
{
colBuffer
=
inputData
+
g
*
inputOffset
;
if
(
needIm2col
)
{
if
(
!
skipIm2col
)
{
im2col
(
inputData
+
g
*
inputOffset
,
im2col
(
inputData
+
g
*
inputOffset
,
imShape
,
imShape
,
colData
,
colData
,
...
@@ -103,7 +102,8 @@ public:
...
@@ -103,7 +102,8 @@ public:
strideW
(),
strideW
(),
paddingH
(),
paddingH
(),
paddingW
());
paddingW
());
colBuffer
=
colData
;
}
else
{
colData
=
inputData
+
g
*
inputOffset
;
}
}
int
M
=
outputChannels
/
groups_
;
int
M
=
outputChannels
/
groups_
;
int
N
=
outputHeight
*
outputWidth
;
int
N
=
outputHeight
*
outputWidth
;
...
@@ -116,7 +116,7 @@ public:
...
@@ -116,7 +116,7 @@ public:
1.0
f
,
1.0
f
,
filterData
+
g
*
filterOffset
,
filterData
+
g
*
filterOffset
,
K
,
K
,
col
Buffer
,
col
Data
,
N
,
N
,
beta
,
beta
,
outputData
+
g
*
outputOffset
,
outputData
+
g
*
outputOffset
,
...
@@ -169,15 +169,15 @@ public:
...
@@ -169,15 +169,15 @@ public:
real
*
outputGrad
=
inputs
[
0
].
data
<
real
>
();
real
*
outputGrad
=
inputs
[
0
].
data
<
real
>
();
real
*
filterData
=
inputs
[
1
].
data
<
real
>
();
real
*
filterData
=
inputs
[
1
].
data
<
real
>
();
real
*
inputGrad
=
outputs
[
0
].
data
<
real
>
();
real
*
inputGrad
=
outputs
[
0
].
data
<
real
>
();
bool
skipIm2col
=
isSkip
Im2col
(
filter
);
bool
needIm2col
=
isNeed
Im2col
(
filter
);
TensorShape
imShape
=
TensorShape
imShape
=
TensorShape
({
inputChannels
/
groups_
,
inputHeight
,
inputWidth
});
TensorShape
({
inputChannels
/
groups_
,
inputHeight
,
inputWidth
});
TensorShape
colShape
;
TensorShape
colShape
;
real
*
colBuffer
,
*
colData
=
NULL
;
real
*
colData
=
NULL
;
if
(
!
skip
Im2col
)
{
if
(
need
Im2col
)
{
colShape
=
TensorShape
({
inputChannels
/
groups_
,
colShape
=
TensorShape
({
inputChannels
/
groups_
,
filterHeight
,
filterHeight
,
filterWidth
,
filterWidth
,
...
@@ -200,10 +200,9 @@ public:
...
@@ -200,10 +200,9 @@ public:
int
K
=
outputChannels
/
groups_
;
int
K
=
outputChannels
/
groups_
;
int
N
=
outputHeight
*
outputWidth
;
int
N
=
outputHeight
*
outputWidth
;
int
M
=
inputChannels
/
groups_
*
filterHeight
*
filterWidth
;
int
M
=
inputChannels
/
groups_
*
filterHeight
*
filterWidth
;
colBuffer
=
colData
;
real
scale
=
0.0
f
;
real
scale
=
0.0
f
;
if
(
skip
Im2col
)
{
if
(
!
need
Im2col
)
{
col
Buffer
=
inputGrad
+
g
*
inputOffset
;
col
Data
=
inputGrad
+
g
*
inputOffset
;
scale
=
1.0
f
;
scale
=
1.0
f
;
}
}
gemm
(
CblasTrans
,
gemm
(
CblasTrans
,
...
@@ -217,12 +216,12 @@ public:
...
@@ -217,12 +216,12 @@ public:
outputGrad
+
g
*
outputOffset
,
outputGrad
+
g
*
outputOffset
,
N
,
N
,
scale
,
scale
,
col
Buffer
,
col
Data
,
N
);
N
);
if
(
!
skip
Im2col
)
{
if
(
need
Im2col
)
{
col2im
(
inputGrad
+
g
*
inputOffset
,
col2im
(
inputGrad
+
g
*
inputOffset
,
imShape
,
imShape
,
col
Buffer
,
col
Data
,
colShape
,
colShape
,
strideH
(),
strideH
(),
strideW
(),
strideW
(),
...
@@ -281,15 +280,15 @@ public:
...
@@ -281,15 +280,15 @@ public:
real
*
outputGrad
=
inputs
[
0
].
data
<
real
>
();
real
*
outputGrad
=
inputs
[
0
].
data
<
real
>
();
real
*
inputData
=
inputs
[
1
].
data
<
real
>
();
real
*
inputData
=
inputs
[
1
].
data
<
real
>
();
real
*
filterGrad
=
outputs
[
0
].
data
<
real
>
();
real
*
filterGrad
=
outputs
[
0
].
data
<
real
>
();
bool
skipIm2col
=
isSkip
Im2col
(
filter
);
bool
needIm2col
=
isNeed
Im2col
(
filter
);
TensorShape
imShape
=
TensorShape
imShape
=
TensorShape
({
inputChannels
/
groups_
,
inputHeight
,
inputWidth
});
TensorShape
({
inputChannels
/
groups_
,
inputHeight
,
inputWidth
});
TensorShape
colShape
;
TensorShape
colShape
;
real
*
colBuffer
,
*
colData
=
NULL
;
real
*
colData
=
NULL
;
if
(
!
skip
Im2col
)
{
if
(
need
Im2col
)
{
colShape
=
TensorShape
({
inputChannels
/
groups_
,
colShape
=
TensorShape
({
inputChannels
/
groups_
,
filterHeight
,
filterHeight
,
filterWidth
,
filterWidth
,
...
@@ -307,8 +306,7 @@ public:
...
@@ -307,8 +306,7 @@ public:
size_t
filterOffset
=
filter
.
getElements
()
/
groups_
;
size_t
filterOffset
=
filter
.
getElements
()
/
groups_
;
for
(
size_t
i
=
0
;
i
<
batchSize
;
i
++
)
{
for
(
size_t
i
=
0
;
i
<
batchSize
;
i
++
)
{
for
(
size_t
g
=
0
;
g
<
groups_
;
g
++
)
{
for
(
size_t
g
=
0
;
g
<
groups_
;
g
++
)
{
colBuffer
=
inputData
+
g
*
inputOffset
;
if
(
needIm2col
)
{
if
(
!
skipIm2col
)
{
im2col
(
inputData
+
g
*
inputOffset
,
im2col
(
inputData
+
g
*
inputOffset
,
imShape
,
imShape
,
colData
,
colData
,
...
@@ -317,7 +315,8 @@ public:
...
@@ -317,7 +315,8 @@ public:
strideW
(),
strideW
(),
paddingH
(),
paddingH
(),
paddingW
());
paddingW
());
colBuffer
=
colData
;
}
else
{
colData
=
inputData
+
g
*
inputOffset
;
}
}
int
M
=
outputChannels
/
groups_
;
int
M
=
outputChannels
/
groups_
;
int
K
=
outputHeight
*
outputWidth
;
int
K
=
outputHeight
*
outputWidth
;
...
@@ -330,7 +329,7 @@ public:
...
@@ -330,7 +329,7 @@ public:
1.0
f
,
1.0
f
,
outputGrad
+
g
*
outputOffset
,
outputGrad
+
g
*
outputOffset
,
K
,
K
,
col
Buffer
,
col
Data
,
K
,
K
,
i
==
0
?
beta
:
1.0
f
,
i
==
0
?
beta
:
1.0
f
,
filterGrad
+
g
*
filterOffset
,
filterGrad
+
g
*
filterOffset
,
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录