Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
e54f203c
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
e54f203c
编写于
4月 15, 2018
作者:
D
dzhwinter
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
"move to a new PR"
上级
494c262a
变更
5
展开全部
隐藏空白更改
内联
并排
Showing
5 changed file
with
246 addition
and
354 deletion
+246
-354
paddle/fluid/operators/activation_op.cc
paddle/fluid/operators/activation_op.cc
+186
-330
paddle/fluid/operators/activation_op.h
paddle/fluid/operators/activation_op.h
+48
-10
paddle/fluid/pybind/pybind.cc
paddle/fluid/pybind/pybind.cc
+4
-0
python/paddle/fluid/layer_helper.py
python/paddle/fluid/layer_helper.py
+5
-1
python/paddle/fluid/tests/unittests/test_activation_op.py
python/paddle/fluid/tests/unittests/test_activation_op.py
+3
-13
未找到文件。
paddle/fluid/operators/activation_op.cc
浏览文件 @
e54f203c
此差异已折叠。
点击以展开。
paddle/fluid/operators/activation_op.h
浏览文件 @
e54f203c
...
@@ -10,6 +10,9 @@ See the License for the specific language governing permissions and
...
@@ -10,6 +10,9 @@ See the License for the specific language governing permissions and
limitations under the License. */
limitations under the License. */
#pragma once
#pragma once
#include <glog/logging.h>
#include <string>
#include <unordered_set>
#include <utility>
#include <utility>
#include <vector>
#include <vector>
...
@@ -25,6 +28,16 @@ limitations under the License. */
...
@@ -25,6 +28,16 @@ limitations under the License. */
namespace
paddle
{
namespace
paddle
{
namespace
operators
{
namespace
operators
{
/* Use ugly global variable, for the using in python layer side
Please refer to the layer_helper.py and get the details.
*/
static
std
::
unordered_set
<
std
::
string
>
InplaceOpSet
=
{
"sigmoid"
,
"exp"
,
"relu"
,
"tanh"
,
"sqrt"
,
"ceil"
,
"floor"
,
"reciprocal"
,
"relu6"
,
"soft_relu"
,
"hard_sigmoid"
,
};
static
bool
IsInplace
(
std
::
string
op
)
{
return
InplaceOpSet
.
count
(
op
);
}
template
<
typename
DeviceContext
,
typename
Functor
>
template
<
typename
DeviceContext
,
typename
Functor
>
class
ActivationKernel
class
ActivationKernel
:
public
framework
::
OpKernel
<
typename
Functor
::
ELEMENT_TYPE
>
{
:
public
framework
::
OpKernel
<
typename
Functor
::
ELEMENT_TYPE
>
{
...
@@ -60,7 +73,6 @@ class ActivationGradKernel
...
@@ -60,7 +73,6 @@ class ActivationGradKernel
public:
public:
using
T
=
typename
Functor
::
ELEMENT_TYPE
;
using
T
=
typename
Functor
::
ELEMENT_TYPE
;
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
auto
*
X
=
context
.
Input
<
framework
::
Tensor
>
(
"X"
);
auto
*
Out
=
context
.
Input
<
framework
::
Tensor
>
(
"Out"
);
auto
*
Out
=
context
.
Input
<
framework
::
Tensor
>
(
"Out"
);
auto
*
dOut
=
auto
*
dOut
=
context
.
Input
<
framework
::
Tensor
>
(
framework
::
GradVarName
(
"Out"
));
context
.
Input
<
framework
::
Tensor
>
(
framework
::
GradVarName
(
"Out"
));
...
@@ -68,7 +80,6 @@ class ActivationGradKernel
...
@@ -68,7 +80,6 @@ class ActivationGradKernel
dX
->
mutable_data
<
T
>
(
context
.
GetPlace
());
dX
->
mutable_data
<
T
>
(
context
.
GetPlace
());
auto
dout
=
framework
::
EigenVector
<
T
>::
Flatten
(
*
dOut
);
auto
dout
=
framework
::
EigenVector
<
T
>::
Flatten
(
*
dOut
);
auto
x
=
framework
::
EigenVector
<
T
>::
Flatten
(
*
X
);
auto
out
=
framework
::
EigenVector
<
T
>::
Flatten
(
*
Out
);
auto
out
=
framework
::
EigenVector
<
T
>::
Flatten
(
*
Out
);
auto
dx
=
framework
::
EigenVector
<
T
>::
Flatten
(
*
dX
);
auto
dx
=
framework
::
EigenVector
<
T
>::
Flatten
(
*
dX
);
auto
*
place
=
auto
*
place
=
...
@@ -78,7 +89,16 @@ class ActivationGradKernel
...
@@ -78,7 +89,16 @@ class ActivationGradKernel
for
(
auto
&
attr
:
attrs
)
{
for
(
auto
&
attr
:
attrs
)
{
*
attr
.
second
=
context
.
Attr
<
float
>
(
attr
.
first
);
*
attr
.
second
=
context
.
Attr
<
float
>
(
attr
.
first
);
}
}
functor
(
*
place
,
x
,
out
,
dout
,
dx
);
bool
inplace
=
functor
.
Inplace
();
if
(
!
inplace
)
{
auto
*
X
=
context
.
Input
<
framework
::
Tensor
>
(
"X"
);
auto
x
=
framework
::
EigenVector
<
T
>::
Flatten
(
*
X
);
functor
(
*
place
,
x
,
out
,
dout
,
dx
);
}
else
{
VLOG
(
10
)
<<
" Inplace activation "
;
auto
x
=
framework
::
EigenVector
<
T
>::
Flatten
(
*
dX
);
functor
(
*
place
,
x
,
out
,
dout
,
dx
);
}
}
}
};
};
...
@@ -89,6 +109,14 @@ struct BaseActivationFunctor {
...
@@ -89,6 +109,14 @@ struct BaseActivationFunctor {
using
AttrPair
=
std
::
vector
<
std
::
pair
<
const
char
*
,
float
*>>
;
using
AttrPair
=
std
::
vector
<
std
::
pair
<
const
char
*
,
float
*>>
;
AttrPair
GetAttrs
()
{
return
AttrPair
();
}
AttrPair
GetAttrs
()
{
return
AttrPair
();
}
/* NOTE(*): Output reuse X memory if X is not dependented by its Gradient.
For example, sigmoid op's gradient didn't involve x, so its output can
reuse
input memory. But abs op's gradient use x, it can not be inplaced.
gradient did use x.
*/
bool
Inplace
()
const
{
return
false
;
}
};
};
// sigmoid(x) = 1 / (1 + exp(-x))
// sigmoid(x) = 1 / (1 + exp(-x))
...
@@ -102,6 +130,7 @@ struct SigmoidFunctor : public BaseActivationFunctor<T> {
...
@@ -102,6 +130,7 @@ struct SigmoidFunctor : public BaseActivationFunctor<T> {
template
<
typename
T
>
template
<
typename
T
>
struct
SigmoidGradFunctor
:
public
BaseActivationFunctor
<
T
>
{
struct
SigmoidGradFunctor
:
public
BaseActivationFunctor
<
T
>
{
bool
Inplace
()
const
{
return
IsInplace
(
"sigmoid"
);
}
template
<
typename
Device
,
typename
X
,
typename
Out
,
typename
dOut
,
template
<
typename
Device
,
typename
X
,
typename
Out
,
typename
dOut
,
typename
dX
>
typename
dX
>
void
operator
()(
Device
d
,
X
x
,
Out
out
,
dOut
dout
,
dX
dx
)
const
{
void
operator
()(
Device
d
,
X
x
,
Out
out
,
dOut
dout
,
dX
dx
)
const
{
...
@@ -156,6 +185,7 @@ struct ExpFunctor : public BaseActivationFunctor<T> {
...
@@ -156,6 +185,7 @@ struct ExpFunctor : public BaseActivationFunctor<T> {
template
<
typename
T
>
template
<
typename
T
>
struct
ExpGradFunctor
:
public
BaseActivationFunctor
<
T
>
{
struct
ExpGradFunctor
:
public
BaseActivationFunctor
<
T
>
{
bool
Inplace
()
const
{
return
IsInplace
(
"exp"
);
}
template
<
typename
Device
,
typename
X
,
typename
Out
,
typename
dOut
,
template
<
typename
Device
,
typename
X
,
typename
Out
,
typename
dOut
,
typename
dX
>
typename
dX
>
void
operator
()(
Device
d
,
X
x
,
Out
out
,
dOut
dout
,
dX
dx
)
const
{
void
operator
()(
Device
d
,
X
x
,
Out
out
,
dOut
dout
,
dX
dx
)
const
{
...
@@ -174,10 +204,11 @@ struct ReluFunctor : public BaseActivationFunctor<T> {
...
@@ -174,10 +204,11 @@ struct ReluFunctor : public BaseActivationFunctor<T> {
template
<
typename
T
>
template
<
typename
T
>
struct
ReluGradFunctor
:
public
BaseActivationFunctor
<
T
>
{
struct
ReluGradFunctor
:
public
BaseActivationFunctor
<
T
>
{
bool
Inplace
()
const
{
return
IsInplace
(
"relu"
);
}
template
<
typename
Device
,
typename
X
,
typename
Out
,
typename
dOut
,
template
<
typename
Device
,
typename
X
,
typename
Out
,
typename
dOut
,
typename
dX
>
typename
dX
>
void
operator
()(
Device
d
,
X
x
,
Out
out
,
dOut
dout
,
dX
dx
)
const
{
void
operator
()(
Device
d
,
X
x
,
Out
out
,
dOut
dout
,
dX
dx
)
const
{
dx
.
device
(
d
)
=
dout
*
(
x
>
static_cast
<
T
>
(
0
)).
template
cast
<
T
>();
dx
.
device
(
d
)
=
dout
*
(
out
>
static_cast
<
T
>
(
0
)).
template
cast
<
T
>();
}
}
};
};
...
@@ -192,6 +223,7 @@ struct TanhFunctor : public BaseActivationFunctor<T> {
...
@@ -192,6 +223,7 @@ struct TanhFunctor : public BaseActivationFunctor<T> {
template
<
typename
T
>
template
<
typename
T
>
struct
TanhGradFunctor
:
public
BaseActivationFunctor
<
T
>
{
struct
TanhGradFunctor
:
public
BaseActivationFunctor
<
T
>
{
bool
Inplace
()
const
{
return
IsInplace
(
"tanh"
);
}
template
<
typename
Device
,
typename
X
,
typename
Out
,
typename
dOut
,
template
<
typename
Device
,
typename
X
,
typename
Out
,
typename
dOut
,
typename
dX
>
typename
dX
>
void
operator
()(
Device
d
,
X
x
,
Out
out
,
dOut
dout
,
dX
dx
)
const
{
void
operator
()(
Device
d
,
X
x
,
Out
out
,
dOut
dout
,
dX
dx
)
const
{
...
@@ -297,6 +329,7 @@ struct SqrtFunctor : public BaseActivationFunctor<T> {
...
@@ -297,6 +329,7 @@ struct SqrtFunctor : public BaseActivationFunctor<T> {
template
<
typename
T
>
template
<
typename
T
>
struct
SqrtGradFunctor
:
public
BaseActivationFunctor
<
T
>
{
struct
SqrtGradFunctor
:
public
BaseActivationFunctor
<
T
>
{
bool
Inplace
()
const
{
return
IsInplace
(
"sqrt"
);
}
template
<
typename
Device
,
typename
X
,
typename
Out
,
typename
dOut
,
template
<
typename
Device
,
typename
X
,
typename
Out
,
typename
dOut
,
typename
dX
>
typename
dX
>
void
operator
()(
Device
d
,
X
x
,
Out
out
,
dOut
dout
,
dX
dx
)
const
{
void
operator
()(
Device
d
,
X
x
,
Out
out
,
dOut
dout
,
dX
dx
)
const
{
...
@@ -316,10 +349,11 @@ struct CeilFunctor : public BaseActivationFunctor<T> {
...
@@ -316,10 +349,11 @@ struct CeilFunctor : public BaseActivationFunctor<T> {
template
<
typename
T
>
template
<
typename
T
>
struct
ZeroGradFunctor
:
public
BaseActivationFunctor
<
T
>
{
struct
ZeroGradFunctor
:
public
BaseActivationFunctor
<
T
>
{
bool
Inplace
()
const
{
return
IsInplace
(
"ceil"
);
}
template
<
typename
Device
,
typename
X
,
typename
Out
,
typename
dOut
,
template
<
typename
Device
,
typename
X
,
typename
Out
,
typename
dOut
,
typename
dX
>
typename
dX
>
void
operator
()(
Device
d
,
X
x
,
Out
out
,
dOut
dout
,
dX
dx
)
const
{
void
operator
()(
Device
d
,
X
x
,
Out
out
,
dOut
dout
,
dX
dx
)
const
{
dx
.
device
(
d
)
=
static_cast
<
T
>
(
0
)
/
x
;
dx
.
device
(
d
)
=
static_cast
<
T
>
(
0
)
/
out
;
}
}
};
};
...
@@ -432,6 +466,7 @@ struct ReciprocalFunctor : public BaseActivationFunctor<T> {
...
@@ -432,6 +466,7 @@ struct ReciprocalFunctor : public BaseActivationFunctor<T> {
template
<
typename
T
>
template
<
typename
T
>
struct
ReciprocalGradFunctor
:
public
BaseActivationFunctor
<
T
>
{
struct
ReciprocalGradFunctor
:
public
BaseActivationFunctor
<
T
>
{
bool
Inplace
()
const
{
return
IsInplace
(
"reciprocal"
);
}
template
<
typename
Device
,
typename
X
,
typename
Out
,
typename
dOut
,
template
<
typename
Device
,
typename
X
,
typename
Out
,
typename
dOut
,
typename
dX
>
typename
dX
>
void
operator
()(
Device
d
,
X
x
,
Out
out
,
dOut
dout
,
dX
dx
)
const
{
void
operator
()(
Device
d
,
X
x
,
Out
out
,
dOut
dout
,
dX
dx
)
const
{
...
@@ -531,12 +566,14 @@ struct Relu6GradFunctor : public BaseActivationFunctor<T> {
...
@@ -531,12 +566,14 @@ struct Relu6GradFunctor : public BaseActivationFunctor<T> {
typename
BaseActivationFunctor
<
T
>::
AttrPair
GetAttrs
()
{
typename
BaseActivationFunctor
<
T
>::
AttrPair
GetAttrs
()
{
return
{{
"threshold"
,
&
threshold
}};
return
{{
"threshold"
,
&
threshold
}};
}
}
bool
Inplace
()
const
{
return
IsInplace
(
"relu6"
);
}
template
<
typename
Device
,
typename
X
,
typename
Out
,
typename
dOut
,
template
<
typename
Device
,
typename
X
,
typename
Out
,
typename
dOut
,
typename
dX
>
typename
dX
>
void
operator
()(
Device
d
,
X
x
,
Out
out
,
dOut
dout
,
dX
dx
)
const
{
void
operator
()(
Device
d
,
X
x
,
Out
out
,
dOut
dout
,
dX
dx
)
const
{
dx
.
device
(
d
)
=
dout
*
dx
.
device
(
d
)
=
((
x
>
static_cast
<
T
>
(
0
))
*
(
x
<
static_cast
<
T
>
(
threshold
)))
dout
*
.
template
cast
<
T
>();
((
out
>
static_cast
<
T
>
(
0
))
*
(
out
<
static_cast
<
T
>
(
threshold
)))
.
template
cast
<
T
>();
}
}
};
};
...
@@ -611,11 +648,12 @@ struct SoftReluGradFunctor : public BaseActivationFunctor<T> {
...
@@ -611,11 +648,12 @@ struct SoftReluGradFunctor : public BaseActivationFunctor<T> {
typename
BaseActivationFunctor
<
T
>::
AttrPair
GetAttrs
()
{
typename
BaseActivationFunctor
<
T
>::
AttrPair
GetAttrs
()
{
return
{{
"threshold"
,
&
threshold
}};
return
{{
"threshold"
,
&
threshold
}};
}
}
bool
Inplace
()
const
{
return
IsInplace
(
"softrelu"
);
}
template
<
typename
Device
,
typename
X
,
typename
Out
,
typename
dOut
,
template
<
typename
Device
,
typename
X
,
typename
Out
,
typename
dOut
,
typename
dX
>
typename
dX
>
void
operator
()(
Device
d
,
X
x
,
Out
out
,
dOut
dout
,
dX
dx
)
const
{
void
operator
()(
Device
d
,
X
x
,
Out
out
,
dOut
dout
,
dX
dx
)
const
{
auto
tmp
=
static_cast
<
T
>
(
threshold
);
auto
tmp
=
static_cast
<
T
>
(
threshold
);
auto
temp
=
((
x
>
-
tmp
)
*
(
x
<
tmp
)).
template
cast
<
T
>().
eval
();
auto
temp
=
((
out
>
-
tmp
)
*
(
out
<
tmp
)).
template
cast
<
T
>().
eval
();
dx
.
device
(
d
)
=
dout
*
(
static_cast
<
T
>
(
1
)
-
(
-
out
).
exp
())
*
temp
;
dx
.
device
(
d
)
=
dout
*
(
static_cast
<
T
>
(
1
)
-
(
-
out
).
exp
())
*
temp
;
}
}
};
};
...
@@ -791,7 +829,7 @@ struct HardSigmoidGradFunctor : public BaseActivationFunctor<T> {
...
@@ -791,7 +829,7 @@ struct HardSigmoidGradFunctor : public BaseActivationFunctor<T> {
typename
BaseActivationFunctor
<
T
>::
AttrPair
GetAttrs
()
{
typename
BaseActivationFunctor
<
T
>::
AttrPair
GetAttrs
()
{
return
{{
"slope"
,
&
slope
},
{
"offset"
,
&
offset
}};
return
{{
"slope"
,
&
slope
},
{
"offset"
,
&
offset
}};
}
}
bool
Inplace
()
{
return
IsInplace
(
"hard_sigmoid"
);
}
template
<
typename
Device
,
typename
X
,
typename
Out
,
typename
dOut
,
template
<
typename
Device
,
typename
X
,
typename
Out
,
typename
dOut
,
typename
dX
>
typename
dX
>
void
operator
()(
Device
d
,
X
x
,
Out
out
,
dOut
dout
,
dX
dx
)
const
{
void
operator
()(
Device
d
,
X
x
,
Out
out
,
dOut
dout
,
dX
dx
)
const
{
...
...
paddle/fluid/pybind/pybind.cc
浏览文件 @
e54f203c
...
@@ -33,6 +33,7 @@ limitations under the License. */
...
@@ -33,6 +33,7 @@ limitations under the License. */
#include "paddle/fluid/framework/prune.h"
#include "paddle/fluid/framework/prune.h"
#include "paddle/fluid/framework/reader.h"
#include "paddle/fluid/framework/reader.h"
#include "paddle/fluid/framework/selected_rows.h"
#include "paddle/fluid/framework/selected_rows.h"
#include "paddle/fluid/operators/activation_op.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/place.h"
#include "paddle/fluid/platform/place.h"
#include "paddle/fluid/platform/profiler.h"
#include "paddle/fluid/platform/profiler.h"
...
@@ -461,6 +462,9 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -461,6 +462,9 @@ All parameter, weight, gradient are variables in Paddle.
self
.
back
().
set_lod
(
t
.
lod
());
self
.
back
().
set_lod
(
t
.
lod
());
});
});
m
.
def
(
"IsInplace"
,
[](
std
::
string
op
)
->
bool
{
return
operators
::
IsInplace
(
op
);
});
m
.
def
(
"op_support_gpu"
,
OpSupportGPU
);
m
.
def
(
"op_support_gpu"
,
OpSupportGPU
);
#ifdef PADDLE_WITH_CUDA
#ifdef PADDLE_WITH_CUDA
m
.
def
(
"get_cuda_device_count"
,
platform
::
GetCUDADeviceCount
);
m
.
def
(
"get_cuda_device_count"
,
platform
::
GetCUDADeviceCount
);
...
...
python/paddle/fluid/layer_helper.py
浏览文件 @
e54f203c
...
@@ -19,6 +19,7 @@ from framework import Variable, Parameter, default_main_program, default_startup
...
@@ -19,6 +19,7 @@ from framework import Variable, Parameter, default_main_program, default_startup
import
unique_name
import
unique_name
from
paddle.fluid.initializer
import
Constant
,
Xavier
from
paddle.fluid.initializer
import
Constant
,
Xavier
from
param_attr
import
ParamAttr
,
WeightNormParamAttr
from
param_attr
import
ParamAttr
,
WeightNormParamAttr
import
core
class
LayerHelper
(
object
):
class
LayerHelper
(
object
):
...
@@ -398,13 +399,16 @@ class LayerHelper(object):
...
@@ -398,13 +399,16 @@ class LayerHelper(object):
return
input_var
return
input_var
if
isinstance
(
act
,
basestring
):
if
isinstance
(
act
,
basestring
):
act
=
{
'type'
:
act
}
act
=
{
'type'
:
act
}
tmp
=
self
.
create_tmp_variable
(
dtype
=
input_var
.
dtype
)
if
'use_mkldnn'
in
self
.
kwargs
:
if
'use_mkldnn'
in
self
.
kwargs
:
act
[
'use_mkldnn'
]
=
self
.
kwargs
.
get
(
'use_mkldnn'
)
act
[
'use_mkldnn'
]
=
self
.
kwargs
.
get
(
'use_mkldnn'
)
act_type
=
act
.
pop
(
'type'
)
act_type
=
act
.
pop
(
'type'
)
if
'use_mkldnn'
in
self
.
kwargs
:
if
'use_mkldnn'
in
self
.
kwargs
:
act
[
'use_mkldnn'
]
=
self
.
kwargs
.
get
(
'use_mkldnn'
)
act
[
'use_mkldnn'
]
=
self
.
kwargs
.
get
(
'use_mkldnn'
)
tmp
=
input_var
# NOTE(dzhwinter): some activation support inplace compution.
if
not
core
.
IsInplace
(
act_type
):
tmp
=
self
.
create_tmp_variable
(
dtype
=
input_var
.
dtype
)
self
.
append_op
(
self
.
append_op
(
type
=
act_type
,
type
=
act_type
,
inputs
=
{
"X"
:
[
input_var
]},
inputs
=
{
"X"
:
[
input_var
]},
...
...
python/paddle/fluid/tests/unittests/test_activation_op.py
浏览文件 @
e54f203c
...
@@ -361,10 +361,7 @@ class TestCeil(OpTest):
...
@@ -361,10 +361,7 @@ class TestCeil(OpTest):
def
test_check_output
(
self
):
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
()
def
test_check_grad
(
self
):
# The same reason with TestFloor
if
self
.
dtype
==
np
.
float16
:
return
self
.
check_grad
([
'X'
],
'Out'
,
max_relative_error
=
0.007
)
def
init_dtype
(
self
):
def
init_dtype
(
self
):
pass
pass
...
@@ -396,10 +393,8 @@ class TestFloor(OpTest):
...
@@ -396,10 +393,8 @@ class TestFloor(OpTest):
def
test_check_output
(
self
):
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
()
def
test_check_grad
(
self
):
# the gradient on floor, ceil, round is undefined.
if
self
.
dtype
==
np
.
float16
:
# we return zero as gradient, but the numpy return nan
return
self
.
check_grad
([
'X'
],
'Out'
,
max_relative_error
=
0.007
)
def
init_dtype
(
self
):
def
init_dtype
(
self
):
pass
pass
...
@@ -501,11 +496,6 @@ class TestRound(OpTest):
...
@@ -501,11 +496,6 @@ class TestRound(OpTest):
def
test_check_output
(
self
):
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
()
def
test_check_grad
(
self
):
if
self
.
dtype
==
np
.
float16
:
return
self
.
check_grad
([
'X'
],
'Out'
,
max_relative_error
=
0.007
)
def
init_dtype
(
self
):
def
init_dtype
(
self
):
pass
pass
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录