Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
43cee33a
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
43cee33a
编写于
8月 02, 2018
作者:
T
tensor-tang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add mkl packed gemm
上级
0964de11
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
118 addition
and
0 deletion
+118
-0
paddle/fluid/operators/math/blas.h
paddle/fluid/operators/math/blas.h
+37
-0
paddle/fluid/operators/math/blas_impl.h
paddle/fluid/operators/math/blas_impl.h
+73
-0
paddle/fluid/platform/dynload/mklml.h
paddle/fluid/platform/dynload/mklml.h
+8
-0
未找到文件。
paddle/fluid/operators/math/blas.h
浏览文件 @
43cee33a
...
...
@@ -90,6 +90,23 @@ class Blas {
void
GEMM
(
bool
transA
,
bool
transB
,
int
M
,
int
N
,
int
K
,
T
alpha
,
const
T
*
A
,
int
lda
,
const
T
*
B
,
int
ldb
,
T
beta
,
T
*
C
,
int
ldc
)
const
;
template
<
typename
T
>
T
*
GEMM_ALLOC
(
const
CBLAS_IDENTIFIER
id
,
const
int
M
,
const
int
N
,
const
int
K
)
const
;
template
<
typename
T
>
void
GEMM_PACK
(
const
CBLAS_IDENTIFIER
id
,
const
CBLAS_TRANSPOSE
trans
,
int
M
,
int
N
,
int
K
,
const
T
alpha
,
const
T
*
src
,
const
int
ld
,
T
*
dst
)
const
;
template
<
typename
T
>
void
GEMM_COMPUTE
(
int
transA
,
int
transB
,
int
M
,
int
N
,
int
K
,
const
T
*
A
,
const
int
lda
,
const
T
*
B
,
const
int
ldb
,
T
beta
,
T
*
C
,
const
int
ldc
)
const
;
template
<
typename
T
>
void
GEMM_FREE
(
T
*
data
)
const
;
template
<
typename
T
>
void
MatMul
(
const
framework
::
Tensor
&
mat_a
,
bool
trans_a
,
const
framework
::
Tensor
&
mat_b
,
bool
trans_b
,
T
alpha
,
...
...
@@ -146,6 +163,26 @@ class BlasT : private Blas<DeviceContext> {
Base
()
->
template
GEMM
<
T
>(
args
...);
}
template
<
typename
...
ARGS
>
T
*
GEMM_ALLOC
(
ARGS
...
args
)
const
{
Base
()
->
template
GEMM_ALLOC
<
T
>(
args
...);
}
template
<
typename
...
ARGS
>
void
GEMM_PACK
(
ARGS
...
args
)
const
{
Base
()
->
template
GEMM_PACK
<
T
>(
args
...);
}
template
<
typename
...
ARGS
>
void
GEMM_COMPUTE
(
ARGS
...
args
)
const
{
Base
()
->
template
GEMM_COMPUTE
<
T
>(
args
...);
}
template
<
typename
...
ARGS
>
void
GEMM_FREE
(
ARGS
...
args
)
const
{
Base
()
->
template
GEMM_FREE
<
T
>(
args
...);
}
template
<
typename
...
ARGS
>
void
MatMul
(
ARGS
...
args
)
const
{
Base
()
->
template
MatMul
<
T
>(
args
...);
...
...
paddle/fluid/operators/math/blas_impl.h
浏览文件 @
43cee33a
...
...
@@ -31,6 +31,26 @@ struct CBlas<float> {
platform
::
dynload
::
cblas_sgemm
(
args
...);
}
template
<
typename
...
ARGS
>
static
float
*
GEMM_ALLOC
(
ARGS
...
args
)
{
return
platform
::
dynload
::
cblas_sgemm_alloc
(
args
...);
}
template
<
typename
...
ARGS
>
static
void
GEMM_PACK
(
ARGS
...
args
)
{
platform
::
dynload
::
cblas_sgemm_pack
(
args
...);
}
template
<
typename
...
ARGS
>
static
void
GEMM_COMPUTE
(
ARGS
...
args
)
{
platform
::
dynload
::
cblas_sgemm_compute
(
args
...);
}
template
<
typename
...
ARGS
>
static
void
GEMM_FREE
(
ARGS
...
args
)
{
platform
::
dynload
::
cblas_sgemm_free
(
args
...);
}
#ifdef PADDLE_WITH_LIBXSMM
template
<
typename
...
ARGS
>
static
void
SMM_GEMM
(
ARGS
...
args
)
{
...
...
@@ -71,6 +91,26 @@ struct CBlas<double> {
platform
::
dynload
::
cblas_dgemm
(
args
...);
}
template
<
typename
...
ARGS
>
static
double
*
GEMM_ALLOC
(
ARGS
...
args
)
{
return
platform
::
dynload
::
cblas_dgemm_alloc
(
args
...);
}
template
<
typename
...
ARGS
>
static
void
GEMM_PACK
(
ARGS
...
args
)
{
platform
::
dynload
::
cblas_dgemm_pack
(
args
...);
}
template
<
typename
...
ARGS
>
static
void
GEMM_COMPUTE
(
ARGS
...
args
)
{
platform
::
dynload
::
cblas_dgemm_compute
(
args
...);
}
template
<
typename
...
ARGS
>
static
void
GEMM_FREE
(
ARGS
...
args
)
{
platform
::
dynload
::
cblas_dgemm_free
(
args
...);
}
#ifdef PADDLE_WITH_LIBXSMM
template
<
typename
...
ARGS
>
static
void
SMM_GEMM
(
ARGS
...
args
)
{
...
...
@@ -224,6 +264,39 @@ inline void GEMM_WARP(CBLAS_ORDER order, CBLAS_TRANSPOSE transA,
beta
,
C
,
ldc
);
}
template
<
>
template
<
typename
T
>
T
*
Blas
<
platform
::
CPUDeviceContext
>::
GEMM_ALLOC
(
const
CBLAS_IDENTIFIER
id
,
const
int
M
,
const
int
N
,
const
int
K
)
const
{
return
CBlas
<
T
>::
GEMM_ALLOC
(
id
,
M
,
N
,
K
);
}
template
<
>
template
<
typename
T
>
void
Blas
<
platform
::
CPUDeviceContext
>::
GEMM_PACK
(
const
CBLAS_IDENTIFIER
id
,
const
CBLAS_TRANSPOSE
trans
,
int
M
,
int
N
,
int
K
,
const
T
alpha
,
const
T
*
src
,
const
int
ld
,
T
*
dst
)
const
{
CBlas
<
T
>::
GEMM_PACK
(
CblasRowMajor
,
id
,
trans
,
M
,
N
,
K
,
alpha
,
src
,
ld
,
dst
);
}
template
<
>
template
<
typename
T
>
void
Blas
<
platform
::
CPUDeviceContext
>::
GEMM_COMPUTE
(
int
transA
,
int
transB
,
int
M
,
int
N
,
int
K
,
const
T
*
A
,
const
int
lda
,
const
T
*
B
,
const
int
ldb
,
T
beta
,
T
*
C
,
const
int
ldc
)
const
{
CBlas
<
T
>::
GEMM_COMPUTE
(
CblasRowMajor
,
transA
,
transB
,
M
,
N
,
K
,
A
,
lda
,
B
,
ldb
,
beta
,
C
,
ldc
);
}
template
<
>
template
<
typename
T
>
void
Blas
<
platform
::
CPUDeviceContext
>::
GEMM_FREE
(
T
*
data
)
const
{
CBlas
<
T
>::
GEMM_FREE
(
data
);
}
template
<
>
template
<
typename
T
>
void
Blas
<
platform
::
CPUDeviceContext
>::
GEMM
(
CBLAS_TRANSPOSE
transA
,
...
...
paddle/fluid/platform/dynload/mklml.h
浏览文件 @
43cee33a
...
...
@@ -60,6 +60,14 @@ extern void* mklml_dso_handle;
__macro(cblas_dgemm_batch); \
__macro(vsAdd); \
__macro(vdAdd); \
__macro(cblas_sgemm_alloc); \
__macro(cblas_sgemm_pack); \
__macro(cblas_sgemm_compute); \
__macro(cblas_sgemm_free); \
__macro(cblas_dgemm_alloc); \
__macro(cblas_dgemm_pack); \
__macro(cblas_dgemm_compute); \
__macro(cblas_dgemm_free); \
__macro(MKL_Set_Num_Threads)
MKLML_ROUTINE_EACH
(
DECLARE_DYNAMIC_LOAD_MKLML_WRAP
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录