Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
f45aced5
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 1 年 前同步成功
通知
2299
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
f45aced5
编写于
3月 24, 2019
作者:
D
dengkaipeng
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add jit test. develop=test
上级
51536f7f
变更
6
隐藏空白更改
内联
并排
Showing
6 changed file
with
112 addition
and
26 deletion
+112
-26
paddle/fluid/operators/jit/more/mix/mix.cc
paddle/fluid/operators/jit/more/mix/mix.cc
+5
-5
paddle/fluid/operators/jit/more/mix/mix.h
paddle/fluid/operators/jit/more/mix/mix.h
+1
-1
paddle/fluid/operators/jit/more/mkl/mkl.cc
paddle/fluid/operators/jit/more/mkl/mkl.cc
+4
-4
paddle/fluid/operators/jit/more/mkl/mkl.h
paddle/fluid/operators/jit/more/mkl/mkl.h
+5
-5
paddle/fluid/operators/jit/refer/refer.h
paddle/fluid/operators/jit/refer/refer.h
+11
-7
paddle/fluid/operators/jit/test.cc
paddle/fluid/operators/jit/test.cc
+86
-4
未找到文件。
paddle/fluid/operators/jit/more/mix/mix.cc
浏览文件 @
f45aced5
...
@@ -50,7 +50,7 @@ void VTanh(const T* x, T* y, int n) {
...
@@ -50,7 +50,7 @@ void VTanh(const T* x, T* y, int n) {
compute_addbias
(
&
b
,
y
,
y
,
n
);
compute_addbias
(
&
b
,
y
,
y
,
n
);
}
}
void
Softmax
(
const
T
*
x
,
T
*
y
,
int
n
,
int
bs
,
int
m
)
{
void
Softmax
(
const
T
*
x
,
T
*
y
,
int
n
,
int
bs
,
int
remain
)
{
auto
compute_hmax
=
KernelFuncs
<
HMaxTuple
<
T
>
,
CPUPlace
>::
Cache
().
At
(
n
);
auto
compute_hmax
=
KernelFuncs
<
HMaxTuple
<
T
>
,
CPUPlace
>::
Cache
().
At
(
n
);
auto
compute_hsum
=
KernelFuncs
<
HSumTuple
<
T
>
,
CPUPlace
>::
Cache
().
At
(
n
);
auto
compute_hsum
=
KernelFuncs
<
HSumTuple
<
T
>
,
CPUPlace
>::
Cache
().
At
(
n
);
auto
compute_vscal
=
KernelFuncs
<
VScalTuple
<
T
>
,
CPUPlace
>::
Cache
().
At
(
n
);
auto
compute_vscal
=
KernelFuncs
<
VScalTuple
<
T
>
,
CPUPlace
>::
Cache
().
At
(
n
);
...
@@ -66,15 +66,15 @@ void Softmax(const T* x, T* y, int n, int bs, int m) {
...
@@ -66,15 +66,15 @@ void Softmax(const T* x, T* y, int n, int bs, int m) {
scalar
=
static_cast
<
T
>
(
0
)
-
scalar
;
scalar
=
static_cast
<
T
>
(
0
)
-
scalar
;
compute_vaddbias
(
&
scalar
,
x
,
y
,
n
);
// x - max
compute_vaddbias
(
&
scalar
,
x
,
y
,
n
);
// x - max
compute_vexp
(
y
,
y
,
n
);
compute_vexp
(
y
,
y
,
n
);
if
(
m
==
1
)
{
if
(
remain
==
1
)
{
compute_hsum
(
y
,
&
scalar
,
n
);
compute_hsum
(
y
,
&
scalar
,
n
);
scalar
=
static_cast
<
T
>
(
1
)
/
scalar
;
scalar
=
static_cast
<
T
>
(
1
)
/
scalar
;
compute_vscal
(
&
scalar
,
y
,
y
,
n
);
compute_vscal
(
&
scalar
,
y
,
y
,
n
);
}
else
{
}
else
{
for
(
int
j
=
0
;
j
<
m
;
++
j
)
{
for
(
int
j
=
0
;
j
<
remain
;
++
j
)
{
compute_stridesum
(
&
y
[
j
],
&
scalar
,
n
,
m
);
compute_stridesum
(
&
y
[
j
],
&
scalar
,
n
,
remain
);
scalar
=
static_cast
<
T
>
(
1
)
/
scalar
;
scalar
=
static_cast
<
T
>
(
1
)
/
scalar
;
compute_stridescal
(
&
scalar
,
&
y
[
j
],
&
y
[
j
],
n
,
m
);
compute_stridescal
(
&
scalar
,
&
y
[
j
],
&
y
[
j
],
n
,
remain
);
}
}
}
}
x
+=
n
;
x
+=
n
;
...
...
paddle/fluid/operators/jit/more/mix/mix.h
浏览文件 @
f45aced5
...
@@ -26,7 +26,7 @@ using T = float;
...
@@ -26,7 +26,7 @@ using T = float;
void
VSigmoid
(
const
T
*
x
,
T
*
y
,
int
n
);
void
VSigmoid
(
const
T
*
x
,
T
*
y
,
int
n
);
void
VTanh
(
const
T
*
x
,
T
*
y
,
int
n
);
void
VTanh
(
const
T
*
x
,
T
*
y
,
int
n
);
void
Softmax
(
const
T
*
x
,
T
*
y
,
int
n
,
int
bs
,
int
m
);
void
Softmax
(
const
T
*
x
,
T
*
y
,
int
n
,
int
bs
,
int
remain
);
void
LSTMCtHt
(
lstm_t
*
step
,
const
lstm_attr_t
*
attr
);
void
LSTMCtHt
(
lstm_t
*
step
,
const
lstm_attr_t
*
attr
);
void
LSTMC1H1
(
lstm_t
*
step
,
const
lstm_attr_t
*
attr
);
void
LSTMC1H1
(
lstm_t
*
step
,
const
lstm_attr_t
*
attr
);
...
...
paddle/fluid/operators/jit/more/mkl/mkl.cc
浏览文件 @
f45aced5
...
@@ -81,7 +81,7 @@ void VScal<double>(const double* a, const double* x, double* y, int n) {
...
@@ -81,7 +81,7 @@ void VScal<double>(const double* a, const double* x, double* y, int n) {
template
<
>
template
<
>
void
StrideScal
<
float
>
(
const
float
*
a
,
const
float
*
x
,
float
*
y
,
int
n
,
int
stride
)
{
void
StrideScal
<
float
>
(
const
float
*
a
,
const
float
*
x
,
float
*
y
,
int
n
,
int
stride
)
{
if
(
x
==
y
)
{
if
(
x
==
y
)
{
platform
::
dynload
::
cblas_sscal
(
n
,
*
a
,
y
,
stride
);
platform
::
dynload
::
cblas_sscal
(
n
/
stride
,
*
a
,
y
,
stride
);
}
else
{
}
else
{
refer
::
StrideScal
<
float
>
(
a
,
x
,
y
,
n
,
stride
);
refer
::
StrideScal
<
float
>
(
a
,
x
,
y
,
n
,
stride
);
}
}
...
@@ -90,7 +90,7 @@ void StrideScal<float>(const float* a, const float* x, float* y, int n, int stri
...
@@ -90,7 +90,7 @@ void StrideScal<float>(const float* a, const float* x, float* y, int n, int stri
template
<
>
template
<
>
void
StrideScal
<
double
>
(
const
double
*
a
,
const
double
*
x
,
double
*
y
,
int
n
,
int
stride
)
{
void
StrideScal
<
double
>
(
const
double
*
a
,
const
double
*
x
,
double
*
y
,
int
n
,
int
stride
)
{
if
(
x
==
y
)
{
if
(
x
==
y
)
{
platform
::
dynload
::
cblas_dscal
(
n
,
*
a
,
y
,
stride
);
platform
::
dynload
::
cblas_dscal
(
n
/
stride
,
*
a
,
y
,
stride
);
}
else
{
}
else
{
refer
::
StrideScal
<
double
>
(
a
,
x
,
y
,
n
,
stride
);
refer
::
StrideScal
<
double
>
(
a
,
x
,
y
,
n
,
stride
);
}
}
...
@@ -148,12 +148,12 @@ void ASum<double>(const double* x, double* res, int n) {
...
@@ -148,12 +148,12 @@ void ASum<double>(const double* x, double* res, int n) {
template
<
>
template
<
>
void
StrideASum
<
float
>
(
const
float
*
x
,
float
*
res
,
int
n
,
int
stride
)
{
void
StrideASum
<
float
>
(
const
float
*
x
,
float
*
res
,
int
n
,
int
stride
)
{
res
[
0
]
=
platform
::
dynload
::
cblas_sasum
(
n
,
x
,
stride
);
res
[
0
]
=
platform
::
dynload
::
cblas_sasum
(
n
/
stride
,
x
,
stride
);
}
}
template
<
>
template
<
>
void
StrideASum
<
double
>
(
const
double
*
x
,
double
*
res
,
int
n
,
int
stride
)
{
void
StrideASum
<
double
>
(
const
double
*
x
,
double
*
res
,
int
n
,
int
stride
)
{
res
[
0
]
=
platform
::
dynload
::
cblas_dasum
(
n
,
x
,
stride
);
res
[
0
]
=
platform
::
dynload
::
cblas_dasum
(
n
/
stride
,
x
,
stride
);
}
}
// TODO(TJ): tuning me carefully on AVX, AVX2 and AVX512
// TODO(TJ): tuning me carefully on AVX, AVX2 and AVX512
...
...
paddle/fluid/operators/jit/more/mkl/mkl.h
浏览文件 @
f45aced5
...
@@ -135,7 +135,7 @@ template <typename T>
...
@@ -135,7 +135,7 @@ template <typename T>
void
StrideScal
(
const
T
*
a
,
const
T
*
x
,
T
*
y
,
int
n
,
int
stride
);
void
StrideScal
(
const
T
*
a
,
const
T
*
x
,
T
*
y
,
int
n
,
int
stride
);
template
<
typename
T
>
template
<
typename
T
>
void
Softmax
(
const
T
*
x
,
T
*
y
,
int
n
,
int
bs
,
int
m
=
1
)
{
void
Softmax
(
const
T
*
x
,
T
*
y
,
int
n
,
int
bs
,
int
remain
=
1
)
{
std
::
vector
<
T
>
entities
(
bs
);
std
::
vector
<
T
>
entities
(
bs
);
for
(
int
i
=
0
;
i
<
bs
;
++
i
)
{
for
(
int
i
=
0
;
i
<
bs
;
++
i
)
{
entities
[
i
]
=
x
[
i
*
n
];
entities
[
i
]
=
x
[
i
*
n
];
...
@@ -149,15 +149,15 @@ void Softmax(const T* x, T* y, int n, int bs, int m=1) {
...
@@ -149,15 +149,15 @@ void Softmax(const T* x, T* y, int n, int bs, int m=1) {
VExp
(
y
,
y
,
n
*
bs
);
VExp
(
y
,
y
,
n
*
bs
);
for
(
int
i
=
0
;
i
<
bs
;
++
i
)
{
for
(
int
i
=
0
;
i
<
bs
;
++
i
)
{
T
sum
;
T
sum
;
if
(
m
==
1
)
{
if
(
remain
==
1
)
{
ASum
(
&
y
[
i
*
n
],
&
sum
,
n
);
ASum
(
&
y
[
i
*
n
],
&
sum
,
n
);
sum
=
static_cast
<
T
>
(
1
)
/
sum
;
sum
=
static_cast
<
T
>
(
1
)
/
sum
;
VScal
(
&
sum
,
&
y
[
i
*
n
],
&
y
[
i
*
n
],
n
);
VScal
(
&
sum
,
&
y
[
i
*
n
],
&
y
[
i
*
n
],
n
);
}
else
{
}
else
{
for
(
int
j
=
0
;
j
<
m
;
++
j
)
{
for
(
int
j
=
0
;
j
<
remain
;
++
j
)
{
StrideASum
(
&
y
[
i
*
n
+
j
],
&
sum
,
n
/
m
,
m
);
StrideASum
(
&
y
[
i
*
n
+
j
],
&
sum
,
n
,
remain
);
sum
=
static_cast
<
T
>
(
1
)
/
sum
;
sum
=
static_cast
<
T
>
(
1
)
/
sum
;
StrideScal
(
&
sum
,
&
y
[
i
*
n
+
j
],
&
y
[
i
*
n
+
j
],
n
/
m
,
m
);
StrideScal
(
&
sum
,
&
y
[
i
*
n
+
j
],
&
y
[
i
*
n
+
j
],
n
,
remain
);
}
}
}
}
}
}
...
...
paddle/fluid/operators/jit/refer/refer.h
浏览文件 @
f45aced5
...
@@ -421,30 +421,34 @@ void StrideASum(const T* x, T* res, int n, int stride) {
...
@@ -421,30 +421,34 @@ void StrideASum(const T* x, T* res, int n, int stride) {
template
<
typename
T
>
template
<
typename
T
>
void
StrideScal
(
const
T
*
a
,
const
T
*
x
,
T
*
y
,
int
n
,
int
stride
)
{
void
StrideScal
(
const
T
*
a
,
const
T
*
x
,
T
*
y
,
int
n
,
int
stride
)
{
for
(
int
i
=
0
;
i
<
n
;
i
+=
stride
)
{
for
(
int
i
=
0
;
i
<
n
;
++
i
)
{
y
[
i
]
=
x
[
i
]
*
a
[
0
];
if
(
i
%
stride
==
0
)
{
y
[
i
]
=
x
[
i
]
*
a
[
0
];
}
else
{
y
[
i
]
=
x
[
i
];
}
}
}
}
}
// y = e^(x - max(x))
// y = e^(x - max(x))
// y = y / sum(y)
// y = y / sum(y)
template
<
typename
T
>
template
<
typename
T
>
void
Softmax
(
const
T
*
x
,
T
*
y
,
int
n
,
int
bs
=
1
,
int
m
=
1
)
{
void
Softmax
(
const
T
*
x
,
T
*
y
,
int
n
,
int
bs
=
1
,
int
remain
=
1
)
{
for
(
int
i
=
0
;
i
<
bs
;
++
i
)
{
for
(
int
i
=
0
;
i
<
bs
;
++
i
)
{
T
scalar
;
T
scalar
;
HMax
(
x
,
&
scalar
,
n
);
HMax
(
x
,
&
scalar
,
n
);
scalar
=
static_cast
<
T
>
(
0
)
-
scalar
;
scalar
=
static_cast
<
T
>
(
0
)
-
scalar
;
VAddBias
(
&
scalar
,
x
,
y
,
n
);
// x - max
VAddBias
(
&
scalar
,
x
,
y
,
n
);
// x - max
VExp
(
y
,
y
,
n
);
VExp
(
y
,
y
,
n
);
if
(
m
==
1
)
{
if
(
remain
==
1
)
{
HSum
(
y
,
&
scalar
,
n
);
HSum
(
y
,
&
scalar
,
n
);
scalar
=
static_cast
<
T
>
(
1
)
/
scalar
;
scalar
=
static_cast
<
T
>
(
1
)
/
scalar
;
VScal
(
&
scalar
,
y
,
y
,
n
);
VScal
(
&
scalar
,
y
,
y
,
n
);
}
else
{
}
else
{
for
(
int
j
=
0
;
j
<
m
;
j
++
)
{
for
(
int
j
=
0
;
j
<
remain
;
j
++
)
{
StrideASum
(
&
y
[
j
],
&
scalar
,
n
,
m
);
StrideASum
(
&
y
[
j
],
&
scalar
,
n
,
remain
);
scalar
=
static_cast
<
T
>
(
1
)
/
scalar
;
scalar
=
static_cast
<
T
>
(
1
)
/
scalar
;
StrideScal
(
&
scalar
,
&
y
[
j
],
&
y
[
j
],
n
,
m
);
StrideScal
(
&
scalar
,
&
y
[
j
],
&
y
[
j
],
n
,
remain
);
}
}
}
}
x
+=
n
;
x
+=
n
;
...
...
paddle/fluid/operators/jit/test.cc
浏览文件 @
f45aced5
...
@@ -723,11 +723,10 @@ void TestKernelSoftmax() {
...
@@ -723,11 +723,10 @@ void TestKernelSoftmax() {
VLOG
(
10
)
<<
"Test JITKernel: "
<<
jit
::
to_string
(
KernelTuple
::
kernel_type
);
VLOG
(
10
)
<<
"Test JITKernel: "
<<
jit
::
to_string
(
KernelTuple
::
kernel_type
);
for
(
int
bs
:
{
1
,
2
,
10
})
{
for
(
int
bs
:
{
1
,
2
,
10
})
{
for
(
int
n
:
TestSizes
())
{
for
(
int
n
:
TestSizes
())
{
for
(
int
m
:
{
1
,
2
})
{
for
(
int
m
:
{
1
,
2
,
3
})
{
// remain
if
(
m
>
n
||
n
%
m
!=
0
)
{
if
(
m
>
n
||
n
%
m
!=
0
)
{
continue
;
continue
;
}
}
VLOG
(
10
)
<<
"Softmax: "
<<
bs
<<
", "
<<
n
<<
", "
<<
m
;
auto
ref
=
jit
::
GetReferFunc
<
KernelTuple
>
();
auto
ref
=
jit
::
GetReferFunc
<
KernelTuple
>
();
EXPECT_TRUE
(
ref
!=
nullptr
);
EXPECT_TRUE
(
ref
!=
nullptr
);
std
::
vector
<
T
>
x
(
bs
*
n
),
y
(
bs
*
n
);
std
::
vector
<
T
>
x
(
bs
*
n
),
y
(
bs
*
n
);
...
@@ -766,6 +765,86 @@ void TestKernelSoftmax() {
...
@@ -766,6 +765,86 @@ void TestKernelSoftmax() {
}
}
}
}
template
<
typename
KernelTuple
,
typename
PlaceType
>
void
TestKernelStrideASum
()
{
using
T
=
typename
KernelTuple
::
data_type
;
VLOG
(
10
)
<<
"Test JITKernel: "
<<
jit
::
to_string
(
KernelTuple
::
kernel_type
);
for
(
int
d
:
TestSizes
())
{
for
(
int
m
:
{
1
,
2
,
3
})
{
// stride
if
(
m
>
d
||
d
%
m
!=
0
)
{
continue
;
}
auto
ref
=
jit
::
GetReferFunc
<
KernelTuple
>
();
EXPECT_TRUE
(
ref
!=
nullptr
);
std
::
vector
<
T
>
x
(
d
);
RandomVec
<
T
>
(
d
,
x
.
data
());
T
ref_res
;
ref
(
x
.
data
(),
&
ref_res
,
d
,
m
);
auto
verifier
=
[](
const
typename
KernelTuple
::
func_type
tgt
,
const
std
::
vector
<
T
>&
x
,
const
T
ref_res
,
const
int
m
)
{
EXPECT_TRUE
(
tgt
!=
nullptr
);
T
tgt_res
;
tgt
(
x
.
data
(),
&
tgt_res
,
x
.
size
(),
m
);
ExpectEQ
<
T
>
(
&
tgt_res
,
&
ref_res
,
1
);
};
TestAllImpls
<
KernelTuple
,
PlaceType
>
(
d
,
verifier
,
x
,
ref_res
,
m
);
}
}
}
template
<
typename
KernelTuple
,
typename
PlaceType
>
void
TestKernelStrideScal
()
{
using
T
=
typename
KernelTuple
::
data_type
;
VLOG
(
10
)
<<
"Test JITKernel: "
<<
jit
::
to_string
(
KernelTuple
::
kernel_type
);
// for (int d : TestSizes()) {
// for (int m : {1, 2, 3}) { // stride
for
(
int
d
:
{
4
})
{
for
(
int
m
:
{
2
})
{
// stride
if
(
m
>
d
||
d
%
m
!=
0
)
{
continue
;
}
auto
ref
=
jit
::
GetReferFunc
<
KernelTuple
>
();
EXPECT_TRUE
(
ref
!=
nullptr
);
const
T
a
=
static_cast
<
T
>
(
3
);
std
::
vector
<
T
>
x
(
d
),
yref
(
d
);
std
::
vector
<
T
>
xinp
(
d
);
// inplace test
RandomVec
<
T
>
(
d
,
x
.
data
());
std
::
copy
(
x
.
begin
(),
x
.
end
(),
xinp
.
begin
());
const
T
*
x_data
=
x
.
data
();
T
*
yref_data
=
yref
.
data
();
T
*
xinp_data
=
xinp
.
data
();
// test refer code inplace
ref
(
&
a
,
x_data
,
yref_data
,
d
,
m
);
ref
(
&
a
,
xinp_data
,
xinp_data
,
d
,
m
);
ExpectEQ
<
T
>
(
xinp_data
,
yref_data
,
d
);
auto
verifier
=
[](
const
typename
KernelTuple
::
func_type
tgt
,
const
T
a
,
const
std
::
vector
<
T
>&
x
,
const
std
::
vector
<
T
>&
yref
,
const
int
m
)
{
EXPECT_TRUE
(
tgt
!=
nullptr
);
EXPECT_EQ
(
yref
.
size
(),
x
.
size
());
const
T
*
x_data
=
x
.
data
();
const
T
*
yref_data
=
yref
.
data
();
const
int
d
=
yref
.
size
();
std
::
vector
<
T
>
ytgt
(
d
);
T
*
ytgt_data
=
ytgt
.
data
();
// test normal
tgt
(
&
a
,
x_data
,
ytgt_data
,
d
,
m
);
ExpectEQ
<
T
>
(
ytgt_data
,
yref_data
,
d
);
// test inplace x
std
::
copy
(
x
.
begin
(),
x
.
end
(),
ytgt
.
begin
());
tgt
(
&
a
,
ytgt_data
,
ytgt_data
,
d
,
m
);
ExpectEQ
<
T
>
(
ytgt_data
,
yref_data
,
d
);
};
TestAllImpls
<
KernelTuple
,
PlaceType
>
(
d
,
verifier
,
a
,
x
,
yref
,
m
);
}
}
}
template
<
typename
KernelTuple
,
typename
PlaceType
>
template
<
typename
KernelTuple
,
typename
PlaceType
>
void
TestKernelSgd
()
{
void
TestKernelSgd
()
{
using
T
=
typename
KernelTuple
::
data_type
;
using
T
=
typename
KernelTuple
::
data_type
;
...
@@ -918,7 +997,7 @@ TEST(JITKernel_pool, more) {
...
@@ -918,7 +997,7 @@ TEST(JITKernel_pool, more) {
EXPECT_EQ
(
kers
.
size
(),
10UL
);
EXPECT_EQ
(
kers
.
size
(),
10UL
);
#else
#else
#ifdef PADDLE_WITH_MKLML
#ifdef PADDLE_WITH_MKLML
EXPECT_EQ
(
kers
.
size
(),
2
1
UL
);
EXPECT_EQ
(
kers
.
size
(),
2
2
UL
);
#else
#else
EXPECT_EQ
(
kers
.
size
(),
8UL
);
EXPECT_EQ
(
kers
.
size
(),
8UL
);
#endif
#endif
...
@@ -927,7 +1006,7 @@ TEST(JITKernel_pool, more) {
...
@@ -927,7 +1006,7 @@ TEST(JITKernel_pool, more) {
TEST
(
JITKernel_pool
,
refer
)
{
TEST
(
JITKernel_pool
,
refer
)
{
const
auto
&
kers
=
jit
::
ReferKernelPool
::
Instance
().
AllKernels
();
const
auto
&
kers
=
jit
::
ReferKernelPool
::
Instance
().
AllKernels
();
EXPECT_EQ
(
kers
.
size
(),
29
UL
);
EXPECT_EQ
(
kers
.
size
(),
31
UL
);
}
}
// test helper
// test helper
...
@@ -1298,3 +1377,6 @@ TEST_CPU_KERNEL(MatMul);
...
@@ -1298,3 +1377,6 @@ TEST_CPU_KERNEL(MatMul);
TEST_CPU_KERNEL
(
Softmax
);
TEST_CPU_KERNEL
(
Softmax
);
TEST_CPU_KERNEL
(
Sgd
);
TEST_CPU_KERNEL
(
Sgd
);
TEST_CPU_KERNEL
(
VBroadcast
);
TEST_CPU_KERNEL
(
VBroadcast
);
TEST_CPU_KERNEL
(
StrideASum
);
TEST_CPU_KERNEL
(
StrideScal
);
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录