Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
08159359
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
08159359
编写于
8月 07, 2017
作者:
Q
qijun
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix typo error
上级
090247dd
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
26 addition
and
95 deletion
+26
-95
paddle/operators/math/math_function.cc
paddle/operators/math/math_function.cc
+26
-95
未找到文件。
paddle/operators/math/math_function.cc
浏览文件 @
08159359
...
@@ -19,74 +19,29 @@ namespace operators {
...
@@ -19,74 +19,29 @@ namespace operators {
namespace
math
{
namespace
math
{
template
<
>
template
<
>
void
gemm
<
platform
::
CPUPlace
,
float
>
(
const
CBLAS_TRANSPOSE
transA
,
void
gemm
<
platform
::
CPUPlace
,
float
>
(
const
CBLAS_TRANSPOSE
transB
,
const
CBLAS_TRANSPOSE
transA
,
const
CBLAS_TRANSPOSE
transB
,
const
int
M
,
const
int
M
,
const
int
N
,
const
int
K
,
const
float
alpha
,
const
float
*
A
,
const
int
lda
,
const
int
N
,
const
float
*
B
,
const
int
ldb
,
const
float
beta
,
float
*
C
,
const
int
ldc
,
const
int
K
,
platform
::
DeviceContext
*
context
)
{
const
float
alpha
,
cblas_sgemm
(
CblasRowMajor
,
transA
,
transB
,
M
,
N
,
K
,
alpha
,
A
,
lda
,
B
,
ldb
,
const
float
*
A
,
beta
,
C
,
ldc
);
const
int
lda
,
const
float
*
B
,
const
int
ldb
,
const
float
beta
,
float
*
C
,
const
int
ldc
,
platform
::
DeviceContext
*
context
)
{
cblas_sgemm
(
CblasRowMajor
,
transA
,
transB
,
M
,
N
,
K
,
alpha
,
A
,
lda
,
B
,
ldb
,
beta
,
C
,
ldc
);
}
}
template
<
>
template
<
>
void
gemm
<
platform
::
CPUPlace
,
double
>
(
const
CBLAS_TRANSPOSE
transA
,
void
gemm
<
platform
::
CPUPlace
,
double
>
(
const
CBLAS_TRANSPOSE
transB
,
const
CBLAS_TRANSPOSE
transA
,
const
CBLAS_TRANSPOSE
transB
,
const
int
M
,
const
int
M
,
const
int
N
,
const
int
K
,
const
double
alpha
,
const
double
*
A
,
const
int
N
,
const
int
lda
,
const
double
*
B
,
const
int
ldb
,
const
double
beta
,
double
*
C
,
const
int
K
,
const
int
ldc
,
platform
::
DeviceContext
*
context
)
{
const
double
alpha
,
cblas_dgemm
(
CblasRowMajor
,
transA
,
transB
,
M
,
N
,
K
,
alpha
,
A
,
lda
,
B
,
ldb
,
const
double
*
A
,
beta
,
C
,
ldc
);
const
int
lda
,
const
double
*
B
,
const
int
ldb
,
const
double
beta
,
double
*
C
,
const
int
ldc
,
platform
::
DeviceContext
*
context
)
{
cblas_dgemm
(
CblasRowMajor
,
transA
,
transB
,
M
,
N
,
K
,
alpha
,
A
,
lda
,
B
,
ldb
,
beta
,
C
,
ldc
);
}
}
template
<
>
template
<
>
void
matmul
<
platform
::
CPUPlace
,
float
>
(
const
framework
::
Tensor
&
in1
,
void
matmul
<
platform
::
CPUPlace
,
float
>
(
const
framework
::
Tensor
&
in1
,
bool
in1_T
,
bool
in1_T
,
const
framework
::
Tensor
&
in2
,
bool
in2_T
,
const
framework
::
Tensor
&
in2
,
float
alpha
,
framework
::
Tensor
*
out
,
bool
in2_T
,
float
alpha
,
framework
::
Tensor
*
out
,
float
beta
,
float
beta
,
platform
::
DeviceContext
*
context
)
{
platform
::
DeviceContext
*
context
)
{
auto
in1_dim
=
in1
.
dims
();
auto
in1_dim
=
in1
.
dims
();
...
@@ -111,30 +66,17 @@ void matmul<platform::CPUPlace, float>(const framework::Tensor& in1,
...
@@ -111,30 +66,17 @@ void matmul<platform::CPUPlace, float>(const framework::Tensor& in1,
CBLAS_TRANSPOSE
in1_Trans
=
(
in1_T
==
false
)
?
CblasNoTrans
:
CblasTrans
;
CBLAS_TRANSPOSE
in1_Trans
=
(
in1_T
==
false
)
?
CblasNoTrans
:
CblasTrans
;
CBLAS_TRANSPOSE
in2_Trans
=
(
in1_T
==
false
)
?
CblasNoTrans
:
CblasTrans
;
CBLAS_TRANSPOSE
in2_Trans
=
(
in1_T
==
false
)
?
CblasNoTrans
:
CblasTrans
;
gemm
<
platform
::
CPUPlace
,
float
>
(
in1_Trans
,
gemm
<
platform
::
CPUPlace
,
float
>
(
in1_Trans
,
in2_Trans
,
M
,
N
,
K
,
alpha
,
in2_Trans
,
in1
.
data
<
float
>
(),
K
,
in2
.
data
<
float
>
(),
N
,
M
,
beta
,
out
->
data
<
float
>
(),
N
,
context
);
N
,
K
,
alpha
,
in1
.
data
<
float
>
(),
K
,
in2
.
data
<
float
>
(),
N
,
beta
,
out
->
data
<
float
>
(),
N
,
context
);
}
}
template
<
>
template
<
>
void
matmul
<
platform
::
G
PUPlace
,
double
>
(
const
framework
::
Tensor
&
in1
,
void
matmul
<
platform
::
C
PUPlace
,
double
>
(
const
framework
::
Tensor
&
in1
,
bool
in1_T
,
bool
in1_T
,
const
framework
::
Tensor
&
in2
,
const
framework
::
Tensor
&
in2
,
bool
in2_T
,
bool
in2_T
,
float
alpha
,
float
alpha
,
framework
::
Tensor
*
out
,
float
beta
,
framework
::
Tensor
*
out
,
float
beta
,
platform
::
DeviceContext
*
context
)
{
platform
::
DeviceContext
*
context
)
{
auto
in1_dim
=
in1
.
dims
();
auto
in1_dim
=
in1
.
dims
();
auto
in2_dim
=
in2
.
dims
();
auto
in2_dim
=
in2
.
dims
();
...
@@ -157,20 +99,9 @@ void matmul<platform::GPUPlace, double>(const framework::Tensor& in1,
...
@@ -157,20 +99,9 @@ void matmul<platform::GPUPlace, double>(const framework::Tensor& in1,
CBLAS_TRANSPOSE
in1_Trans
=
(
in1_T
==
false
)
?
CblasNoTrans
:
CblasTrans
;
CBLAS_TRANSPOSE
in1_Trans
=
(
in1_T
==
false
)
?
CblasNoTrans
:
CblasTrans
;
CBLAS_TRANSPOSE
in2_Trans
=
(
in1_T
==
false
)
?
CblasNoTrans
:
CblasTrans
;
CBLAS_TRANSPOSE
in2_Trans
=
(
in1_T
==
false
)
?
CblasNoTrans
:
CblasTrans
;
gemm
<
platform
::
CPUPlace
,
double
>
(
in1_Trans
,
gemm
<
platform
::
CPUPlace
,
double
>
(
in1_Trans
,
in2_Trans
,
M
,
N
,
K
,
alpha
,
in2_Trans
,
in1
.
data
<
double
>
(),
K
,
in2
.
data
<
double
>
(),
N
,
M
,
beta
,
out
->
data
<
double
>
(),
N
,
context
);
N
,
K
,
alpha
,
in1
.
data
<
double
>
(),
K
,
in2
.
data
<
double
>
(),
N
,
beta
,
out
->
data
<
double
>
(),
N
,
context
);
}
}
}
// namespace math
}
// namespace math
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录