Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
67db9d35
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
67db9d35
编写于
9月 13, 2017
作者:
H
hedaoyuan
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Refine the GemmConvKernel.
上级
a7c18722
变更
1
显示空白变更内容
内联
并排
Showing
1 changed file
with
27 addition
and
20 deletion
+27
-20
paddle/operators/gemm_conv_op.h
paddle/operators/gemm_conv_op.h
+27
-20
未找到文件。
paddle/operators/gemm_conv_op.h
浏览文件 @
67db9d35
...
@@ -29,61 +29,68 @@ class GemmConvKernel : public framework::OpKernel {
...
@@ -29,61 +29,68 @@ class GemmConvKernel : public framework::OpKernel {
public:
public:
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
const
Tensor
*
input
=
context
.
Input
<
Tensor
>
(
"Input"
);
const
Tensor
*
input
=
context
.
Input
<
Tensor
>
(
"Input"
);
Tensor
*
filter
=
const_cast
<
Tensor
*>
(
context
.
Input
<
Tensor
>
(
"Filter"
));
// The filter will be reshaped in the calculations,
// so here use an assignment operation,
// that avoids modifying the variable in the Scope.
Tensor
filter
=
*
context
.
Input
<
Tensor
>
(
"Filter"
);
Tensor
*
output
=
context
.
Output
<
Tensor
>
(
"Output"
);
Tensor
*
output
=
context
.
Output
<
Tensor
>
(
"Output"
);
output
->
mutable_data
<
T
>
(
context
.
GetPlace
());
output
->
mutable_data
<
T
>
(
context
.
GetPlace
());
std
::
vector
<
int
>
strides
=
context
.
Attr
<
std
::
vector
<
int
>>
(
"strides"
);
std
::
vector
<
int
>
strides
=
context
.
Attr
<
std
::
vector
<
int
>>
(
"strides"
);
std
::
vector
<
int
>
paddings
=
context
.
Attr
<
std
::
vector
<
int
>>
(
"paddings"
);
std
::
vector
<
int
>
paddings
=
context
.
Attr
<
std
::
vector
<
int
>>
(
"paddings"
);
auto
filter_dims
=
filter
->
dims
();
int
batch_size
=
input
->
dims
()[
0
];
int
batch_size
=
input
->
dims
()[
0
];
int
input_channels
=
input
->
dims
()[
1
];
int
input_channels
=
input
->
dims
()[
1
];
int
filter_height
=
filter
->
dims
()[
filter
->
dims
().
size
()
-
2
];
int
filter_height
=
filter
.
dims
()[
filter
.
dims
().
size
()
-
2
];
int
filter_width
=
filter
->
dims
()[
filter
->
dims
().
size
()
-
1
];
int
filter_width
=
filter
.
dims
()[
filter
.
dims
().
size
()
-
1
];
int
output_channels
=
output
->
dims
()[
1
];
int
output_height
=
output
->
dims
()[
2
];
int
output_height
=
output
->
dims
()[
2
];
int
output_width
=
output
->
dims
()[
3
];
int
output_width
=
output
->
dims
()[
3
];
paddle
::
operators
::
math
::
Im2ColFunctor
<
paddle
::
operators
::
math
::
Im2ColFunctor
<
paddle
::
operators
::
math
::
ColFormat
::
kCFO
,
Place
,
T
>
paddle
::
operators
::
math
::
ColFormat
::
kCFO
,
Place
,
T
>
im2col
;
im2col
;
// use col_shape in the im2col calculation
framework
::
DDim
col_shape
=
{
input_channels
,
filter_height
,
filter_width
,
framework
::
DDim
col_shape
=
{
input_channels
,
filter_height
,
filter_width
,
output_height
,
output_width
};
output_height
,
output_width
};
// use col_matrix_shape in the gemm calculation
framework
::
DDim
col_matrix_shape
=
{
input_channels
*
filter_height
*
filter_width
,
output_height
*
output_width
};
Tensor
col
;
Tensor
col
;
col
.
mutable_data
<
float
>
(
col_shape
,
context
.
GetPlace
());
col
.
mutable_data
<
float
>
(
col_shape
,
context
.
GetPlace
());
// col_matrix shares the same piece of data with col,
auto
*
device_context
=
// but will be reshaped into a two-dimensional matrix shape
const_cast
<
platform
::
DeviceContext
*>
(
context
.
device_context_
);
// to call the matrix multiplication interface.
Tensor
col_matrix
=
col
;
col_matrix
.
Resize
(
col_matrix_shape
);
framework
::
DDim
input_shape
=
{
input
->
dims
()[
1
],
input
->
dims
()[
2
],
framework
::
DDim
input_shape
=
{
input
->
dims
()[
1
],
input
->
dims
()[
2
],
input
->
dims
()[
3
]};
input
->
dims
()[
3
]};
framework
::
DDim
filter_matrix_shape
=
{
framework
::
DDim
filter_matrix_shape
=
{
filter
->
dims
()[
0
],
output_channels
,
framework
::
product
(
filter
.
dims
())
/
output_channels
};
filter
->
dims
()[
1
]
*
filter
->
dims
()[
2
]
*
filter
->
dims
()[
3
]}
;
filter
.
Resize
(
filter_matrix_shape
)
;
framework
::
DDim
col_matrix_shape
=
{
input_channels
*
filter_height
*
filter_width
,
framework
::
DDim
output_matrix_shape
=
{
output_channels
,
output_height
*
output_width
};
output_height
*
output_width
};
framework
::
DDim
output_matrix_shape
=
{
output
->
dims
()[
1
],
output
->
dims
()[
2
]
*
output
->
dims
()[
3
]};
auto
*
device_context
=
filter
->
Resize
(
filter_matrix_shape
);
const_cast
<
platform
::
DeviceContext
*>
(
context
.
device_context_
);
// convolution operator: im2col + gemm
// convolution operator: im2col + gemm
for
(
int
i
=
0
;
i
<
batch_size
;
i
++
)
{
for
(
int
i
=
0
;
i
<
batch_size
;
i
++
)
{
// im2col
// im2col
Tensor
in_slice
=
input
->
Slice
<
T
>
(
i
,
i
+
1
);
Tensor
in_slice
=
input
->
Slice
<
T
>
(
i
,
i
+
1
);
in_slice
.
Resize
(
input_shape
);
in_slice
.
Resize
(
input_shape
);
col
.
Resize
(
col_shape
);
im2col
(
in_slice
,
col
,
strides
[
0
],
strides
[
1
],
paddings
[
0
],
paddings
[
1
],
im2col
(
in_slice
,
col
,
strides
[
0
],
strides
[
1
],
paddings
[
0
],
paddings
[
1
],
device_context
);
device_context
);
// gemm
// gemm
Tensor
out_slice
=
output
->
Slice
<
T
>
(
i
,
i
+
1
);
Tensor
out_slice
=
output
->
Slice
<
T
>
(
i
,
i
+
1
);
out_slice
.
Resize
(
output_matrix_shape
);
out_slice
.
Resize
(
output_matrix_shape
);
col
.
Resize
(
col_matrix_shape
);
math
::
matmul
<
Place
,
T
>
(
filter
,
false
,
col_matrix
,
false
,
T
(
1.0
),
math
::
matmul
<
Place
,
T
>
(
*
filter
,
false
,
col
,
false
,
T
(
1.0
),
&
out_slice
,
&
out_slice
,
T
(
0.0
),
device_context
);
T
(
0.0
),
device_context
);
}
}
filter
->
Resize
(
filter_dims
);
}
}
};
};
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录