Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
67248018
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
67248018
编写于
9月 21, 2018
作者:
S
sneaxiy
浏览文件
操作
浏览文件
下载
差异文件
fix conflict
上级
3ad3635d
a54c423e
变更
10
显示空白变更内容
内联
并排
Showing
10 changed file
with
310 addition
and
30 deletion
+310
-30
paddle/fluid/API.spec
paddle/fluid/API.spec
+19
-19
paddle/fluid/framework/op_desc.cc
paddle/fluid/framework/op_desc.cc
+5
-0
paddle/fluid/framework/shape_inference.cc
paddle/fluid/framework/shape_inference.cc
+10
-0
paddle/fluid/framework/shape_inference.h
paddle/fluid/framework/shape_inference.h
+2
-0
paddle/fluid/operators/concat_op.cc
paddle/fluid/operators/concat_op.cc
+14
-2
python/paddle/fluid/clip.py
python/paddle/fluid/clip.py
+1
-1
python/paddle/fluid/layers/layer_function_generator.py
python/paddle/fluid/layers/layer_function_generator.py
+27
-1
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+71
-0
python/paddle/fluid/layers/ops.py
python/paddle/fluid/layers/ops.py
+9
-7
python/paddle/fluid/tests/unittests/test_layers.py
python/paddle/fluid/tests/unittests/test_layers.py
+152
-0
未找到文件。
paddle/fluid/API.spec
浏览文件 @
67248018
...
...
@@ -167,6 +167,9 @@ paddle.fluid.layers.stanh ArgSpec(args=['x', 'scale_a', 'scale_b', 'name'], vara
paddle.fluid.layers.hard_sigmoid ArgSpec(args=['x', 'slope', 'offset', 'name'], varargs=None, keywords=None, defaults=(0.2, 0.5, None))
paddle.fluid.layers.swish ArgSpec(args=['x', 'beta', 'name'], varargs=None, keywords=None, defaults=(1.0, None))
paddle.fluid.layers.prelu ArgSpec(args=['x', 'mode', 'param_attr', 'name'], varargs=None, keywords=None, defaults=(None, None))
paddle.fluid.layers.brelu ArgSpec(args=['x', 't_min', 't_max', 'name'], varargs=None, keywords=None, defaults=(0.0, 24.0, None))
paddle.fluid.layers.leaky_relu ArgSpec(args=['x', 'alpha', 'name'], varargs=None, keywords=None, defaults=(0.02, None))
paddle.fluid.layers.soft_relu ArgSpec(args=['x', 'threshold', 'name'], varargs=None, keywords=None, defaults=(40.0, None))
paddle.fluid.layers.flatten ArgSpec(args=['x', 'axis', 'name'], varargs=None, keywords=None, defaults=(1, None))
paddle.fluid.layers.sequence_mask ArgSpec(args=['x', 'maxlen', 'dtype', 'name'], varargs=None, keywords=None, defaults=(None, 'int64', None))
paddle.fluid.layers.stack ArgSpec(args=['x', 'axis'], varargs=None, keywords=None, defaults=(0,))
...
...
@@ -262,26 +265,23 @@ paddle.fluid.layers.sum ArgSpec(args=[], varargs='args', keywords='kwargs', defa
paddle.fluid.layers.slice ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.shape ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.maxout ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.sigmoid ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.logsigmoid ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.exp ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.tanh ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.tanh_shrink ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.softshrink ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.sqrt ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.abs ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.ceil ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.floor ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.cos ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.sin ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.round ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.reciprocal ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.square ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.softplus ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.softsign ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.brelu ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.leaky_relu ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.soft_relu ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.sigmoid ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.logsigmoid ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.exp ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.tanh ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.tanh_shrink ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.sqrt ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.abs ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.ceil ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.floor ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.cos ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.sin ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.round ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.reciprocal ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.square ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.softplus ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.softsign ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.uniform_random ArgSpec(args=['shape', 'dtype', 'min', 'max', 'seed'], varargs=None, keywords=None, defaults=(None, None, None, None))
paddle.fluid.layers.hard_shrink ArgSpec(args=['x', 'threshold'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.cumsum ArgSpec(args=['x', 'axis', 'exclusive', 'reverse'], varargs=None, keywords=None, defaults=(None, None, None))
...
...
paddle/fluid/framework/op_desc.cc
浏览文件 @
67248018
...
...
@@ -54,6 +54,10 @@ class CompileTimeInferShapeContext : public InferShapeContext {
size_t
j
=
0
)
const
override
{
PADDLE_ENFORCE_LT
(
i
,
Inputs
(
in
).
size
());
PADDLE_ENFORCE_LT
(
j
,
Outputs
(
out
).
size
());
PADDLE_ENFORCE
(
Inputs
(
in
)[
i
]
!=
framework
::
kEmptyVarName
,
"The %s[%d] is @EMPTY@"
,
in
,
i
);
PADDLE_ENFORCE
(
Outputs
(
out
)[
j
]
!=
framework
::
kEmptyVarName
,
"The %s[%d] is @EMPTY@"
,
out
,
j
);
auto
*
in_var
=
block_
.
FindVarRecursive
(
Inputs
(
in
)[
i
]);
auto
*
out_var
=
block_
.
FindVarRecursive
(
Outputs
(
out
)[
j
]);
if
(
in_var
->
GetType
()
!=
proto
::
VarType
::
LOD_TENSOR
)
{
...
...
@@ -63,6 +67,7 @@ class CompileTimeInferShapeContext : public InferShapeContext {
PADDLE_ENFORCE_EQ
(
in_var
->
GetType
(),
proto
::
VarType
::
LOD_TENSOR
,
"The %d-th output of Output(%s) must be LoDTensor."
,
j
,
out
);
out_var
->
SetLoDLevel
(
in_var
->
GetLoDLevel
());
}
...
...
paddle/fluid/framework/shape_inference.cc
浏览文件 @
67248018
...
...
@@ -46,6 +46,16 @@ std::vector<DDim> InferShapeContext::GetReaderDims(
return
this
->
GetRepeatedDims
(
arg_names
[
0
]);
}
void
InferShapeContext
::
ShareLoDs
(
const
std
::
string
&
in
,
const
std
::
string
&
out
)
const
{
PADDLE_ENFORCE_EQ
(
Inputs
(
in
).
size
(),
Outputs
(
out
).
size
(),
"The number of arguments in %s and %s is not equal."
,
in
,
out
);
for
(
size_t
i
=
0
;
i
<
in
.
size
();
++
i
)
{
ShareLoD
(
in
,
out
,
i
,
i
);
}
}
DDim
InferShapeContext
::
GetInputsElementDim
(
const
std
::
string
&
name
,
int
idx
)
const
{
const
std
::
vector
<
std
::
string
>
&
names
=
Inputs
(
name
);
...
...
paddle/fluid/framework/shape_inference.h
浏览文件 @
67248018
...
...
@@ -56,6 +56,8 @@ class InferShapeContext {
virtual
const
std
::
vector
<
std
::
string
>
&
Outputs
(
const
std
::
string
&
name
)
const
=
0
;
void
ShareLoDs
(
const
std
::
string
&
in
,
const
std
::
string
&
out
)
const
;
virtual
void
ShareLoD
(
const
std
::
string
&
in
,
const
std
::
string
&
out
,
size_t
i
=
0
,
size_t
j
=
0
)
const
=
0
;
...
...
paddle/fluid/operators/concat_op.cc
浏览文件 @
67248018
...
...
@@ -94,8 +94,20 @@ class ConcatOpGrad : public framework::OperatorWithKernel {
:
OperatorWithKernel
(
type
,
inputs
,
outputs
,
attrs
)
{}
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
ctx
->
SetOutputsDim
(
framework
::
GradVarName
(
"X"
),
ctx
->
GetInputsDim
(
"X"
));
ctx
->
ShareLoD
(
"X"
,
framework
::
GradVarName
(
"X"
));
auto
in_x
=
"X"
;
auto
out_x_g_n
=
framework
::
GradVarName
(
in_x
);
ctx
->
SetOutputsDim
(
out_x_g_n
,
ctx
->
GetInputsDim
(
in_x
));
auto
&
in_names
=
ctx
->
Inputs
(
in_x
);
auto
&
out_names
=
ctx
->
Outputs
(
out_x_g_n
);
PADDLE_ENFORCE_EQ
(
in_names
.
size
(),
out_names
.
size
(),
"The number of arguments in %s[%d] and %s[%d] is not equal."
,
in_x
,
in_names
.
size
(),
out_x_g_n
,
out_names
.
size
());
for
(
size_t
i
=
0
;
i
<
in_names
.
size
();
++
i
)
{
if
(
out_names
[
i
]
!=
framework
::
kEmptyVarName
)
{
ctx
->
ShareLoD
(
in_x
,
out_x_g_n
,
i
,
i
);
}
}
}
};
...
...
python/paddle/fluid/clip.py
浏览文件 @
67248018
...
...
@@ -280,7 +280,7 @@ class GradientClipByGlobalNorm(BaseGradientClipAttr):
group_scale_name
=
self
.
group_name
+
"_scale"
if
group_scale_name
not
in
self
.
context
:
group_norm_var
=
layers
.
sums
(
input
=
self
.
context
[
self
.
group_name
])
layers
.
sqrt
(
x
=
group_norm_var
,
out
=
group_norm_var
)
group_norm_var
=
layers
.
sqrt
(
x
=
group_norm_var
)
clip_var
=
self
.
context
[
self
.
group_name
+
"_clip"
]
group_scale_var
=
layers
.
elementwise_div
(
x
=
clip_var
,
...
...
python/paddle/fluid/layers/layer_function_generator.py
浏览文件 @
67248018
...
...
@@ -23,7 +23,10 @@ from ..proto import framework_pb2
from
..framework
import
OpProtoHolder
,
Variable
from
..layer_helper
import
LayerHelper
__all__
=
[
'deprecated'
,
'generate_layer_fn'
,
'autodoc'
,
'templatedoc'
]
__all__
=
[
'deprecated'
,
'generate_layer_fn'
,
'generate_layer_fn_noattr'
,
'autodoc'
,
'templatedoc'
]
def
_convert_
(
name
):
...
...
@@ -212,6 +215,29 @@ def generate_layer_fn(op_type):
return
func
def
generate_layer_fn_noattr
(
op_type
):
"""Register the Python layer for an Operator without Attribute.
Args:
op_type: The name of the operator to be created.
This function takes in the operator type (sigmoid, exp , tanh etc) and
creates the operator functionality.
"""
op_proto
=
OpProtoHolder
.
instance
().
get_op_proto
(
op_type
)
def
func
(
x
,
name
=
None
):
helper
=
LayerHelper
(
op_type
,
**
locals
())
output
=
helper
.
create_tmp_variable
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
op_type
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
output
})
return
output
func
.
__name__
=
op_type
func
.
__doc__
=
_generate_doc_string_
(
op_proto
)
return
func
def
deprecated
(
func_or_class
):
"""
Deprecated warning decorator. It will result a warning message.
...
...
python/paddle/fluid/layers/nn.py
浏览文件 @
67248018
...
...
@@ -114,6 +114,9 @@ __all__ = [
'hard_sigmoid'
,
'swish'
,
'prelu'
,
'brelu'
,
'leaky_relu'
,
'soft_relu'
,
'flatten'
,
'sequence_mask'
,
'stack'
,
...
...
@@ -6104,6 +6107,74 @@ def prelu(x, mode, param_attr=None, name=None):
return
out
@
templatedoc
()
def
brelu
(
x
,
t_min
=
0.0
,
t_max
=
24.0
,
name
=
None
):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
t_min(${t_min_type}|0.0): ${t_min_comment}
t_max(${t_max_type}|24.0): ${t_max_comment}
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
Returns:
output(${out_type}): ${out_comment}
"""
helper
=
LayerHelper
(
'brelu'
,
**
locals
())
out
=
helper
.
create_tmp_variable
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'brelu'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
't_min'
:
t_min
,
't_max'
:
t_max
})
return
out
@
templatedoc
()
def
leaky_relu
(
x
,
alpha
=
0.02
,
name
=
None
):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
alpha(${alpha_type}|0.02): ${alpha_comment}
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
Returns:
output(${out_type}): ${out_comment}
"""
helper
=
LayerHelper
(
'leaky_relu'
,
**
locals
())
out
=
helper
.
create_tmp_variable
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'leaky_relu'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'alpha'
:
alpha
})
return
out
@
templatedoc
()
def
soft_relu
(
x
,
threshold
=
40.0
,
name
=
None
):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
threshold(${threshold_type}|40.0): ${threshold_comment}
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
Returns:
output(${out_type}): ${out_comment}
"""
helper
=
LayerHelper
(
'soft_relu'
,
**
locals
())
out
=
helper
.
create_tmp_variable
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'soft_relu'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'threshold'
:
threshold
})
return
out
def
flatten
(
x
,
axis
=
1
,
name
=
None
):
"""
**Flatten layer**
...
...
python/paddle/fluid/layers/ops.py
浏览文件 @
67248018
...
...
@@ -13,15 +13,14 @@
# limitations under the License.
from
__future__
import
print_function
from
.layer_function_generator
import
generate_layer_fn
from
.layer_function_generator
import
generate_layer_fn
,
generate_layer_fn_noattr
__activations__
=
[
__activations_
noattr_
_
=
[
'sigmoid'
,
'logsigmoid'
,
'exp'
,
'tanh'
,
'tanh_shrink'
,
'softshrink'
,
'sqrt'
,
'abs'
,
'ceil'
,
...
...
@@ -33,9 +32,6 @@ __activations__ = [
'square'
,
'softplus'
,
'softsign'
,
'brelu'
,
'leaky_relu'
,
'soft_relu'
,
]
__all__
=
[
...
...
@@ -56,7 +52,8 @@ __all__ = [
'slice'
,
'shape'
,
'maxout'
,
]
+
__activations__
'softshrink'
,
]
for
_OP
in
set
(
__all__
):
globals
()[
_OP
]
=
generate_layer_fn
(
_OP
)
...
...
@@ -66,6 +63,11 @@ for _OP in set(__all__):
# e.g.: test_program_code.py, test_dist_train.py
globals
()[
'_scale'
]
=
generate_layer_fn
(
'scale'
)
__all__
+=
__activations_noattr__
for
_OP
in
set
(
__activations_noattr__
):
globals
()[
_OP
]
=
generate_layer_fn_noattr
(
_OP
)
__all__
+=
[
"uniform_random"
]
_uniform_random_
=
generate_layer_fn
(
'uniform_random'
)
...
...
python/paddle/fluid/tests/unittests/test_layers.py
浏览文件 @
67248018
...
...
@@ -573,6 +573,158 @@ class TestBook(unittest.TestCase):
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
def
test_brelu
(
self
):
program
=
Program
()
with
program_guard
(
program
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
brelu
(
input
,
t_min
=
1.0
,
t_max
=
20.0
,
name
=
'brelu'
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
def
test_leaky_relu
(
self
):
program
=
Program
()
with
program_guard
(
program
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
leaky_relu
(
input
,
alpha
=
0.1
,
name
=
'leaky_relu'
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
def
test_soft_relu
(
self
):
program
=
Program
()
with
program_guard
(
program
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
soft_relu
(
input
,
threshold
=
30.0
,
name
=
'soft_relu'
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
def
test_sigmoid
(
self
):
program
=
Program
()
with
program_guard
(
program
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
sigmoid
(
input
,
name
=
'sigmoid'
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
def
test_logsigmoid
(
self
):
program
=
Program
()
with
program_guard
(
program
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
logsigmoid
(
input
,
name
=
'logsigmoid'
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
def
test_exp
(
self
):
program
=
Program
()
with
program_guard
(
program
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
exp
(
input
,
name
=
'exp'
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
def
test_tanh
(
self
):
program
=
Program
()
with
program_guard
(
program
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
tanh
(
input
,
name
=
'tanh'
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
def
test_tanh_shrink
(
self
):
program
=
Program
()
with
program_guard
(
program
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
tanh_shrink
(
input
,
name
=
'tanh_shrink'
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
def
test_sqrt
(
self
):
program
=
Program
()
with
program_guard
(
program
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
sqrt
(
input
,
name
=
'sqrt'
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
def
test_abs
(
self
):
program
=
Program
()
with
program_guard
(
program
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
abs
(
input
,
name
=
'abs'
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
def
test_ceil
(
self
):
program
=
Program
()
with
program_guard
(
program
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
ceil
(
input
,
name
=
'ceil'
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
def
test_floor
(
self
):
program
=
Program
()
with
program_guard
(
program
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
floor
(
input
,
name
=
'floor'
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
def
test_cos
(
self
):
program
=
Program
()
with
program_guard
(
program
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
cos
(
input
,
name
=
'cos'
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
def
test_sin
(
self
):
program
=
Program
()
with
program_guard
(
program
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
sin
(
input
,
name
=
'sin'
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
def
test_round
(
self
):
program
=
Program
()
with
program_guard
(
program
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
round
(
input
,
name
=
'round'
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
def
test_reciprocal
(
self
):
program
=
Program
()
with
program_guard
(
program
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
reciprocal
(
input
,
name
=
'reciprocal'
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
def
test_square
(
self
):
program
=
Program
()
with
program_guard
(
program
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
square
(
input
,
name
=
'square'
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
def
test_softplus
(
self
):
program
=
Program
()
with
program_guard
(
program
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
softplus
(
input
,
name
=
'softplus'
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
def
test_softsign
(
self
):
program
=
Program
()
with
program_guard
(
program
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
softsign
(
input
,
name
=
'softsign'
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
def
test_roi_perspective_transform
(
self
):
program
=
Program
()
with
program_guard
(
program
):
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录