Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleDetection
提交
9bd44b94
P
PaddleDetection
项目概览
PaddlePaddle
/
PaddleDetection
大约 1 年 前同步成功
通知
695
Star
11112
Fork
2696
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
184
列表
看板
标记
里程碑
合并请求
40
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
184
Issue
184
列表
看板
标记
里程碑
合并请求
40
合并请求
40
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
9bd44b94
编写于
4月 10, 2019
作者:
L
lujun
提交者:
GitHub
4月 10, 2019
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #16561 from junjun315/move-api-to-root
Move dygraph api to root
上级
95410652
14db0680
变更
26
展开全部
显示空白变更内容
内联
并排
Showing
26 changed file
with
731 addition
and
295 deletion
+731
-295
paddle/fluid/API.spec
paddle/fluid/API.spec
+1
-0
python/paddle/fluid/__init__.py
python/paddle/fluid/__init__.py
+2
-0
python/paddle/fluid/dygraph/base.py
python/paddle/fluid/dygraph/base.py
+1
-1
python/paddle/fluid/dygraph/checkpoint.py
python/paddle/fluid/dygraph/checkpoint.py
+1
-9
python/paddle/fluid/dygraph/layer_object_helper.py
python/paddle/fluid/dygraph/layer_object_helper.py
+1
-1
python/paddle/fluid/dygraph/layers.py
python/paddle/fluid/dygraph/layers.py
+3
-3
python/paddle/fluid/dygraph/nn.py
python/paddle/fluid/dygraph/nn.py
+515
-73
python/paddle/fluid/framework.py
python/paddle/fluid/framework.py
+33
-29
python/paddle/fluid/initializer.py
python/paddle/fluid/initializer.py
+8
-8
python/paddle/fluid/layer_helper.py
python/paddle/fluid/layer_helper.py
+1
-1
python/paddle/fluid/layer_helper_base.py
python/paddle/fluid/layer_helper_base.py
+4
-4
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+22
-22
python/paddle/fluid/optimizer.py
python/paddle/fluid/optimizer.py
+4
-4
python/paddle/fluid/tests/unittests/test_base_layer.py
python/paddle/fluid/tests/unittests/test_base_layer.py
+5
-5
python/paddle/fluid/tests/unittests/test_imperative_basic.py
python/paddle/fluid/tests/unittests/test_imperative_basic.py
+28
-28
python/paddle/fluid/tests/unittests/test_imperative_checkpoint.py
...addle/fluid/tests/unittests/test_imperative_checkpoint.py
+7
-7
python/paddle/fluid/tests/unittests/test_imperative_deepcf.py
...on/paddle/fluid/tests/unittests/test_imperative_deepcf.py
+14
-17
python/paddle/fluid/tests/unittests/test_imperative_gan.py
python/paddle/fluid/tests/unittests/test_imperative_gan.py
+9
-9
python/paddle/fluid/tests/unittests/test_imperative_gnn.py
python/paddle/fluid/tests/unittests/test_imperative_gnn.py
+5
-8
python/paddle/fluid/tests/unittests/test_imperative_mnist.py
python/paddle/fluid/tests/unittests/test_imperative_mnist.py
+5
-5
python/paddle/fluid/tests/unittests/test_imperative_optimizer.py
...paddle/fluid/tests/unittests/test_imperative_optimizer.py
+5
-5
python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py
...n/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py
+10
-11
python/paddle/fluid/tests/unittests/test_imperative_resnet.py
...on/paddle/fluid/tests/unittests/test_imperative_resnet.py
+10
-10
python/paddle/fluid/tests/unittests/test_imperative_se_resnext.py
...addle/fluid/tests/unittests/test_imperative_se_resnext.py
+6
-6
python/paddle/fluid/tests/unittests/test_imperative_transformer.py
...ddle/fluid/tests/unittests/test_imperative_transformer.py
+10
-8
python/paddle/fluid/tests/unittests/test_layers.py
python/paddle/fluid/tests/unittests/test_layers.py
+21
-21
未找到文件。
paddle/fluid/API.spec
浏览文件 @
9bd44b94
...
...
@@ -13,6 +13,7 @@ paddle.fluid.name_scope (ArgSpec(args=['prefix'], varargs=None, keywords=None, d
paddle.fluid.cuda_places (ArgSpec(args=['device_ids'], varargs=None, keywords=None, defaults=(None,)), ('document', '7d9a51fc9cf3c5245b5227080a8064c3'))
paddle.fluid.cpu_places (ArgSpec(args=['device_count'], varargs=None, keywords=None, defaults=(None,)), ('document', '4c0cd83f0b401fc2ff84c70974e5d210'))
paddle.fluid.cuda_pinned_places (ArgSpec(args=['device_count'], varargs=None, keywords=None, defaults=(None,)), ('document', 'd0c3ebd813c39958c92b78e3eef7e912'))
paddle.fluid.in_dygraph_mode (ArgSpec(args=[], varargs=None, keywords=None, defaults=None), ('document', 'f06314a1cb30c96b5808dde2219c2dae'))
paddle.fluid.Executor.__init__ (ArgSpec(args=['self', 'place'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.Executor.close (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', 'f5369953dd0c443961cf79f7a00e1a03'))
paddle.fluid.Executor.infer_from_dataset (ArgSpec(args=['self', 'program', 'dataset', 'scope', 'thread', 'debug', 'fetch_list', 'fetch_info', 'print_period'], varargs=None, keywords=None, defaults=(None, None, None, 0, False, None, None, 100)), ('document', '9c7decb955b9c4f718114179c8985581'))
...
...
python/paddle/fluid/__init__.py
浏览文件 @
9bd44b94
...
...
@@ -66,6 +66,8 @@ from . import compiler
from
.compiler
import
*
from
paddle.fluid.layers.math_op_patch
import
monkey_patch_variable
from
.
import
install_check
from
.dygraph.nn
import
*
from
.dygraph.layers
import
*
Tensor
=
LoDTensor
...
...
python/paddle/fluid/dygraph/base.py
浏览文件 @
9bd44b94
...
...
@@ -22,7 +22,7 @@ __all__ = ['enabled', 'guard', 'to_variable']
def
enabled
():
return
framework
.
_
in_dygraph_mode
()
return
framework
.
in_dygraph_mode
()
@
signature_safe_contextmanager
...
...
python/paddle/fluid/dygraph/checkpoint.py
浏览文件 @
9bd44b94
...
...
@@ -97,20 +97,12 @@ def load_persistables(vardict, dirname, filename=None):
Examples:
.. code-block:: python
my_layer = layer(fluid.
dygraph.
Layer)
my_layer = layer(fluid.Layer)
param_path = "./my_paddle_model"
param_dict = fluid.dygraph.load_persistables(my_layer.parameters(), param_path)
param_1 = param_dict['PtbModel_0.w_1']
or:
my_layer = layer(fluid.dygraph.Layer)
param_path = "./my_paddle_model"
filename = "model.file"
param_dict = fluid.dygraph.load_persistables(my_layer.state_dict(), param_path,
filename=filename)
param_1 = param_dict['PtbModel_0.w_1']
"""
if
isinstance
(
vardict
,
collections
.
OrderedDict
):
return
_load_var_from_file
(
vardict
,
dirname
,
filename
)
...
...
python/paddle/fluid/dygraph/layer_object_helper.py
浏览文件 @
9bd44b94
...
...
@@ -16,7 +16,7 @@ from __future__ import print_function
import
copy
import
six
from
..framework
import
Parameter
,
_
in_dygraph_mode
from
..framework
import
Parameter
,
in_dygraph_mode
from
..param_attr
import
ParamAttr
from
..
import
core
from
six.moves
import
zip
...
...
python/paddle/fluid/dygraph/layers.py
浏览文件 @
9bd44b94
...
...
@@ -139,14 +139,14 @@ class Layer(core.Layer):
def
clear_gradients
(
self
):
for
p
in
self
.
parameters
():
p
.
_
clear_gradient
()
p
.
clear_gradient
()
def
_
build_once
(
self
,
*
args
):
def
build_once
(
self
,
*
args
):
pass
def
__call__
(
self
,
*
inputs
):
if
not
self
.
_built
:
self
.
_
build_once
(
*
inputs
)
self
.
build_once
(
*
inputs
)
outputs
=
self
.
forward
(
*
inputs
)
self
.
_built
=
True
...
...
python/paddle/fluid/dygraph/nn.py
浏览文件 @
9bd44b94
此差异已折叠。
点击以展开。
python/paddle/fluid/framework.py
浏览文件 @
9bd44b94
...
...
@@ -67,6 +67,7 @@ __all__ = [
'cuda_places'
,
'cpu_places'
,
'cuda_pinned_places'
,
'in_dygraph_mode'
,
]
EMPTY_VAR_NAME
=
core
.
kEmptyVarName
()
...
...
@@ -79,7 +80,10 @@ _dygraph_tracer_ = None
_dygraph_current_expected_place_
=
None
def
_in_dygraph_mode
():
def
in_dygraph_mode
():
'''
Returns(bool): True if the program is running in dynamic graph mode
'''
return
_dygraph_tracer_
is
not
None
...
...
@@ -396,7 +400,7 @@ class Variable(object):
if
not
isinstance
(
dtype
,
core
.
VarDesc
.
VarType
):
dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
if
_
in_dygraph_mode
():
if
in_dygraph_mode
():
# record vars in tracer rather than blocks
self
.
_ivar
=
kwargs
.
get
(
"ivar"
,
None
)
if
not
self
.
_ivar
:
...
...
@@ -482,21 +486,21 @@ class Variable(object):
self
.
block
.
vars
[
name
]
=
self
self
.
op
=
None
self
.
stop_gradient
=
stop_gradient
self
.
_
stop_gradient
=
stop_gradient
self
.
is_data
=
is_data
def
_
numpy
(
self
):
def
numpy
(
self
):
new_ivar
=
self
.
_ivar
.
_copy_to
(
core
.
CPUPlace
(),
True
)
return
np
.
array
(
new_ivar
.
value
().
get_tensor
())
def
_
backward
(
self
):
def
backward
(
self
):
self
.
_ivar
.
_run_backward
()
def
_
gradient
(
self
):
def
gradient
(
self
):
new_ivar
=
self
.
_ivar
.
_grad_ivar
().
_copy_to
(
core
.
CPUPlace
(),
True
)
return
np
.
array
(
new_ivar
.
value
().
get_tensor
())
def
_
clear_gradient
(
self
):
def
clear_gradient
(
self
):
self
.
_ivar
.
_clear_gradient
()
def
__str__
(
self
):
...
...
@@ -516,7 +520,7 @@ class Variable(object):
Returns:
str: The debug string.
"""
if
_
in_dygraph_mode
():
if
in_dygraph_mode
():
# TODO(panyx0718): add more dygraph debug info.
return
'name %s, dtype: %s shape: %s'
%
(
self
.
name
,
self
.
dtype
,
self
.
shape
)
...
...
@@ -535,7 +539,7 @@ class Variable(object):
__repr__
=
__str__
def
_
set_desc
(
self
,
input
):
def
set_desc
(
self
,
input
):
"""
Set the variable description.
...
...
@@ -548,43 +552,43 @@ class Variable(object):
self
.
desc
=
input
@
property
def
_
stop_gradient
(
self
):
if
_
in_dygraph_mode
():
def
stop_gradient
(
self
):
if
in_dygraph_mode
():
return
self
.
_ivar
.
stop_gradient
else
:
return
self
.
stop_gradient
return
self
.
_
stop_gradient
@
_
stop_gradient
.
setter
def
_
stop_gradient
(
self
,
s
):
if
_
in_dygraph_mode
():
@
stop_gradient
.
setter
def
stop_gradient
(
self
,
s
):
if
in_dygraph_mode
():
self
.
_ivar
.
stop_gradient
=
s
else
:
self
.
stop_gradient
=
s
self
.
_
stop_gradient
=
s
@
property
def
persistable
(
self
):
if
_
in_dygraph_mode
():
if
in_dygraph_mode
():
return
self
.
_ivar
.
persistable
else
:
return
self
.
desc
.
persistable
()
@
persistable
.
setter
def
persistable
(
self
,
p
):
if
_
in_dygraph_mode
():
if
in_dygraph_mode
():
return
self
.
_ivar
.
persistable
else
:
self
.
desc
.
set_persistable
(
p
)
@
property
def
name
(
self
):
if
_
in_dygraph_mode
():
if
in_dygraph_mode
():
return
self
.
_ivar
.
name
else
:
return
cpt
.
to_text
(
self
.
desc
.
name
())
@
name
.
setter
def
name
(
self
,
new_name
):
if
_
in_dygraph_mode
():
if
in_dygraph_mode
():
self
.
_ivar
.
name
=
new_name
else
:
self
.
desc
.
set_name
(
new_name
)
...
...
@@ -592,14 +596,14 @@ class Variable(object):
@
property
def
shape
(
self
):
# convert to tuple, make it as same as numpy API.
if
_
in_dygraph_mode
():
if
in_dygraph_mode
():
return
self
.
_ivar
.
shape
else
:
return
tuple
(
self
.
desc
.
shape
())
@
property
def
dtype
(
self
):
if
_
in_dygraph_mode
():
if
in_dygraph_mode
():
return
self
.
_ivar
.
dtype
else
:
return
self
.
desc
.
dtype
()
...
...
@@ -611,7 +615,7 @@ class Variable(object):
@
property
def
type
(
self
):
if
_
in_dygraph_mode
():
if
in_dygraph_mode
():
return
self
.
_ivar
.
dtype
else
:
return
self
.
desc
.
type
()
...
...
@@ -721,7 +725,7 @@ class Variable(object):
name
=
unique_name
.
generate
(
"."
.
join
(
self
.
name
)),
dtype
=
self
.
dtype
,
persistable
=
self
.
persistable
,
stop_gradient
=
self
.
_
stop_gradient
,
)
stop_gradient
=
self
.
stop_gradient
,
)
else
:
return
self
...
...
@@ -930,7 +934,7 @@ class Operator(object):
inputs
=
None
,
outputs
=
None
,
attrs
=
None
):
if
_
in_dygraph_mode
():
if
in_dygraph_mode
():
if
type
is
None
:
raise
ValueError
(
"`type` to initialized an Operator can not be None."
)
...
...
@@ -1049,7 +1053,7 @@ class Operator(object):
for
arg
in
out_args
:
out_arg_names
.
append
(
cpt
.
to_text
(
arg
.
name
))
# TODO(minqiyang): could we remove variable's op in static mode?
if
not
_
in_dygraph_mode
():
if
not
in_dygraph_mode
():
arg
.
op
=
self
self
.
desc
.
set_output
(
out_proto
.
name
,
out_arg_names
)
...
...
@@ -1095,7 +1099,7 @@ class Operator(object):
@
property
def
type
(
self
):
if
_
in_dygraph_mode
():
if
in_dygraph_mode
():
return
self
.
iop
.
type
else
:
return
self
.
desc
.
type
()
...
...
@@ -1638,7 +1642,7 @@ class Block(object):
Returns:
Operator: the append Operator.
"""
if
_
in_dygraph_mode
():
if
in_dygraph_mode
():
op
=
Operator
(
block
=
self
,
desc
=
None
,
...
...
@@ -1710,7 +1714,7 @@ class Block(object):
return
self
.
ops
[
start
:
end
]
def
_prepend_op
(
self
,
*
args
,
**
kwargs
):
if
_
in_dygraph_mode
():
if
in_dygraph_mode
():
op
=
Operator
(
self
,
None
,
...
...
python/paddle/fluid/initializer.py
浏览文件 @
9bd44b94
...
...
@@ -165,7 +165,7 @@ class ConstantInitializer(Initializer):
'force_cpu'
:
self
.
_force_cpu
or
force_init_on_cpu
()
},
stop_gradient
=
True
)
if
not
framework
.
_
in_dygraph_mode
():
if
not
framework
.
in_dygraph_mode
():
var
.
op
=
op
return
op
...
...
@@ -245,7 +245,7 @@ class UniformInitializer(Initializer):
attrs
=
{
"in_dtype"
:
out_var
.
dtype
,
"out_dtype"
:
var
.
dtype
})
if
not
framework
.
_
in_dygraph_mode
():
if
not
framework
.
in_dygraph_mode
():
var
.
op
=
op
return
op
...
...
@@ -324,7 +324,7 @@ class NormalInitializer(Initializer):
outputs
=
{
"Out"
:
var
},
attrs
=
{
"in_dtype"
:
out_var
.
dtype
,
"out_dtype"
:
var
.
dtype
})
if
not
framework
.
_
in_dygraph_mode
():
if
not
framework
.
in_dygraph_mode
():
var
.
op
=
op
return
op
...
...
@@ -403,7 +403,7 @@ class TruncatedNormalInitializer(Initializer):
outputs
=
{
"Out"
:
var
},
attrs
=
{
"in_dtype"
:
out_var
.
dtype
,
"out_dtype"
:
var
.
dtype
})
if
not
framework
.
_
in_dygraph_mode
():
if
not
framework
.
in_dygraph_mode
():
var
.
op
=
op
return
op
...
...
@@ -509,7 +509,7 @@ class XavierInitializer(Initializer):
"seed"
:
self
.
_seed
},
stop_gradient
=
True
)
if
not
framework
.
_
in_dygraph_mode
():
if
not
framework
.
in_dygraph_mode
():
var
.
op
=
op
return
op
...
...
@@ -610,7 +610,7 @@ class MSRAInitializer(Initializer):
"seed"
:
self
.
_seed
},
stop_gradient
=
True
)
if
not
framework
.
_
in_dygraph_mode
():
if
not
framework
.
in_dygraph_mode
():
var
.
op
=
op
return
op
...
...
@@ -709,7 +709,7 @@ class BilinearInitializer(Initializer):
'shape'
:
list
(
shape
),
value_name
:
values
})
if
not
framework
.
_
in_dygraph_mode
():
if
not
framework
.
in_dygraph_mode
():
var
.
op
=
op
return
op
...
...
@@ -768,7 +768,7 @@ class NumpyArrayInitializer(Initializer):
value_name
:
values
},
stop_gradient
=
True
)
if
not
framework
.
_
in_dygraph_mode
():
if
not
framework
.
in_dygraph_mode
():
var
.
op
=
op
return
op
...
...
python/paddle/fluid/layer_helper.py
浏览文件 @
9bd44b94
...
...
@@ -17,7 +17,7 @@ from __future__ import print_function
import
copy
import
six
from
.framework
import
Parameter
,
dtype_is_floating
,
_
in_dygraph_mode
from
.framework
import
Parameter
,
dtype_is_floating
,
in_dygraph_mode
from
.
import
unique_name
from
paddle.fluid.initializer
import
Constant
,
Xavier
from
.param_attr
import
ParamAttr
...
...
python/paddle/fluid/layer_helper_base.py
浏览文件 @
9bd44b94
...
...
@@ -17,7 +17,7 @@ from __future__ import print_function
import
copy
import
numpy
as
np
from
.framework
import
Variable
,
default_main_program
,
default_startup_program
,
_
in_dygraph_mode
,
_current_expected_place
from
.framework
import
Variable
,
default_main_program
,
default_startup_program
,
in_dygraph_mode
,
_current_expected_place
from
.
import
unique_name
from
.param_attr
import
ParamAttr
,
WeightNormParamAttr
from
.
import
core
...
...
@@ -54,7 +54,7 @@ class LayerHelperBase(object):
Return Variable construct from value
"""
if
isinstance
(
value
,
np
.
ndarray
):
assert
_
in_dygraph_mode
(
assert
in_dygraph_mode
(
),
"to_variable could only be called in dygraph mode"
if
not
block
:
...
...
@@ -302,7 +302,7 @@ class LayerHelperBase(object):
param
=
self
.
_create_weight_normalize
(
attr
,
shape
,
dtype
)
WeightNormParamAttr
.
params_with_weight_norm
.
append
(
param
)
return
param
if
_
in_dygraph_mode
():
if
in_dygraph_mode
():
# In dygraph mode, we want the returned parameter to be
# initialized so that it can be used imperatively.
return
self
.
main_program
.
global_block
().
create_parameter
(
...
...
@@ -370,7 +370,7 @@ class LayerHelperBase(object):
initializer: initializer to use
"""
assert
isinstance
(
var
,
Variable
)
if
_
in_dygraph_mode
():
if
in_dygraph_mode
():
initializer
(
var
,
var
.
block
)
else
:
self
.
startup_program
.
global_block
().
create_var
(
...
...
python/paddle/fluid/layers/nn.py
浏览文件 @
9bd44b94
...
...
@@ -23,7 +23,7 @@ import os
import
inspect
from
..layer_helper
import
LayerHelper
from
..initializer
import
Normal
,
Constant
,
NumpyArrayInitializer
from
..framework
import
Variable
,
OpProtoHolder
,
_
in_dygraph_mode
from
..framework
import
Variable
,
OpProtoHolder
,
in_dygraph_mode
from
..dygraph
import
base
from
..param_attr
import
ParamAttr
from
.layer_function_generator
import
autodoc
,
templatedoc
,
_generate_doc_string_
...
...
@@ -481,7 +481,7 @@ def dynamic_lstm(input,
forward, _ = fluid.layers.dynamic_lstm(
input=forward_proj, size=hidden_dim * 4, use_peepholes=False)
"""
assert
_
in_dygraph_mode
(
assert
in_dygraph_mode
(
)
is
not
True
,
"please use lstm instead of dynamic_lstm in dygraph mode!"
assert
bias_attr
is
not
False
,
"bias_attr should not be False in dynamic_lstmp."
helper
=
LayerHelper
(
'lstm'
,
**
locals
())
...
...
@@ -867,7 +867,7 @@ def dynamic_lstmp(input,
proj_activation="tanh")
"""
assert
_
in_dygraph_mode
(
assert
in_dygraph_mode
(
)
is
not
True
,
"please use lstm instead of dynamic_lstmp in dygraph mode!"
assert
bias_attr
is
not
False
,
"bias_attr should not be False in dynamic_lstmp."
...
...
@@ -1041,7 +1041,7 @@ def dynamic_gru(input,
hidden = fluid.layers.dynamic_gru(input=x, size=hidden_dim)
"""
assert
_
in_dygraph_mode
(
assert
in_dygraph_mode
(
)
is
not
True
,
"please use gru instead of dynamic_gru in dygraph mode!"
helper
=
LayerHelper
(
'gru'
,
**
locals
())
...
...
@@ -1760,7 +1760,7 @@ def sequence_conv(input,
Variable: output of sequence_conv
"""
assert
not
_
in_dygraph_mode
(),
(
assert
not
in_dygraph_mode
(),
(
"sequence layer is not supported in dygraph mode yet."
)
helper
=
LayerHelper
(
'sequence_conv'
,
**
locals
())
dtype
=
helper
.
input_dtype
()
...
...
@@ -1821,7 +1821,7 @@ def sequence_softmax(input, use_cudnn=False, name=None):
dtype='float32', lod_level=1)
x_sequence_softmax = fluid.layers.sequence_softmax(input=x)
"""
assert
not
_
in_dygraph_mode
(),
(
assert
not
in_dygraph_mode
(),
(
"sequence layer is not supported in dygraph mode yet."
)
helper
=
LayerHelper
(
'sequence_softmax'
,
**
locals
())
dtype
=
helper
.
input_dtype
()
...
...
@@ -2315,7 +2315,7 @@ def sequence_pool(input, pool_type, is_test=False):
last_x = fluid.layers.sequence_pool(input=x, pool_type='last')
first_x = fluid.layers.sequence_pool(input=x, pool_type='first')
"""
assert
not
_
in_dygraph_mode
(),
(
assert
not
in_dygraph_mode
(),
(
"sequence layer is not supported in dygraph mode yet."
)
helper
=
LayerHelper
(
'sequence_pool'
,
**
locals
())
dtype
=
helper
.
input_dtype
()
...
...
@@ -2356,7 +2356,7 @@ def sequence_concat(input, name=None):
out = fluid.layers.sequence_concat(input=[seq1, seq2, seq3])
"""
assert
not
_
in_dygraph_mode
(),
(
assert
not
in_dygraph_mode
(),
(
"sequence layer is not supported in dygraph mode yet."
)
helper
=
LayerHelper
(
'sequence_concat'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
())
...
...
@@ -2485,7 +2485,7 @@ def sequence_slice(input, offset, length, name=None):
subseqs = fluid.layers.sequence_slice(input=seqs, offset=offset,
length=length)
"""
assert
not
_
in_dygraph_mode
(),
(
assert
not
in_dygraph_mode
(),
(
"sequence layer is not supported in dygraph mode yet."
)
helper
=
LayerHelper
(
"sequence_slice"
,
**
locals
())
dtype
=
helper
.
input_dtype
()
...
...
@@ -3307,7 +3307,7 @@ def layer_norm(input,
>>> dtype='float32')
>>> x = fluid.layers.layer_norm(input=data, begin_norm_axis=1)
"""
assert
_
in_dygraph_mode
(
assert
in_dygraph_mode
(
)
is
not
True
,
"please use FC instead of fc in dygraph mode!"
helper
=
LayerHelper
(
'layer_norm'
,
**
locals
())
dtype
=
helper
.
input_dtype
()
...
...
@@ -3946,7 +3946,7 @@ def sequence_expand(x, y, ref_level=-1, name=None):
dtype='float32', lod_level=1)
out = layers.sequence_expand(x=x, y=y, ref_level=0)
"""
assert
not
_
in_dygraph_mode
(),
(
assert
not
in_dygraph_mode
(),
(
"sequence layer is not supported in dygraph mode yet."
)
helper
=
LayerHelper
(
'sequence_expand'
,
input
=
x
,
**
locals
())
dtype
=
helper
.
input_dtype
()
...
...
@@ -4014,7 +4014,7 @@ def sequence_expand_as(x, y, name=None):
dtype='float32', lod_level=1)
out = layers.sequence_expand_as(x=x, y=y)
"""
assert
not
_
in_dygraph_mode
(),
(
assert
not
in_dygraph_mode
(),
(
"sequence layer is not supported in dygraph mode yet."
)
helper
=
LayerHelper
(
'sequence_expand_as'
,
input
=
x
,
**
locals
())
dtype
=
helper
.
input_dtype
()
...
...
@@ -4062,7 +4062,7 @@ def sequence_pad(x, pad_value, maxlen=None, name=None):
out = fluid.layers.sequence_pad(x=x, pad_value=pad_value)
"""
assert
not
_
in_dygraph_mode
(),
(
assert
not
in_dygraph_mode
(),
(
"sequence layer is not supported in dygraph mode yet."
)
helper
=
LayerHelper
(
'sequence_pad'
,
input
=
x
,
**
locals
())
dtype
=
helper
.
input_dtype
()
...
...
@@ -4130,7 +4130,7 @@ def sequence_unpad(x, length, name=None):
out = fluid.layers.sequence_unpad(x=x, length=len)
"""
assert
not
_
in_dygraph_mode
(),
(
assert
not
in_dygraph_mode
(),
(
"sequence layer is not supported in dygraph mode yet."
)
helper
=
LayerHelper
(
'sequence_unpad'
,
input
=
x
,
**
locals
())
dtype
=
helper
.
input_dtype
()
...
...
@@ -5305,7 +5305,7 @@ def sequence_reshape(input, new_dim):
x = fluid.layers.data(shape=[5, 20], dtype='float32', lod_level=1)
x_reshaped = fluid.layers.sequence_reshape(input=x, new_dim=10)
"""
assert
not
_
in_dygraph_mode
(),
(
assert
not
in_dygraph_mode
(),
(
"sequence layer is not supported in dygraph mode yet."
)
helper
=
LayerHelper
(
'sequence_reshape'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
helper
.
input_dtype
())
...
...
@@ -5841,7 +5841,7 @@ def im2sequence(input,
input=layer, stride=[1, 1], filter_size=[2, 2])
"""
assert
not
_
in_dygraph_mode
(),
(
assert
not
in_dygraph_mode
(),
(
"sequence layer is not supported in dygraph mode yet."
)
if
isinstance
(
filter_size
,
int
):
...
...
@@ -6485,7 +6485,7 @@ def squeeze(input, axes, name=None):
x = layers.data(name='x', shape=[5, 1, 10])
y = layers.sequeeze(input=x, axes=[1])
"""
assert
not
_
in_dygraph_mode
(),
(
assert
not
in_dygraph_mode
(),
(
"squeeze layer is not supported in dygraph mode yet."
)
helper
=
LayerHelper
(
"squeeze"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
input
.
dtype
)
...
...
@@ -7631,7 +7631,7 @@ def sequence_scatter(input, index, updates, name=None):
output = fluid.layers.sequence_scatter(input, index, updates)
"""
assert
not
_
in_dygraph_mode
(),
(
assert
not
in_dygraph_mode
(),
(
"sequence layer is not supported in dygraph mode yet."
)
helper
=
LayerHelper
(
'sequence_scatter'
,
**
locals
())
dtype
=
helper
.
input_dtype
()
...
...
@@ -8721,7 +8721,7 @@ def sequence_enumerate(input, win_size, pad_value=0, name=None):
x = fluid.layers.data(shape[30, 1], dtype='int32', lod_level=1)
out = fluid.layers.sequence_enumerate(input=x, win_size=3, pad_value=0)
"""
assert
not
_
in_dygraph_mode
(),
(
assert
not
in_dygraph_mode
(),
(
"sequence layer is not supported in dygraph mode yet."
)
helper
=
LayerHelper
(
'sequence_enumerate'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
...
...
@@ -8762,7 +8762,7 @@ def sequence_mask(x, maxlen=None, dtype='int64', name=None):
Variable: The output sequence mask.
"""
assert
not
_
in_dygraph_mode
(),
(
assert
not
in_dygraph_mode
(),
(
"sequence layer is not supported in dygraph mode yet."
)
helper
=
LayerHelper
(
'sequence_mask'
,
**
locals
())
...
...
@@ -9241,7 +9241,7 @@ def _elementwise_op(helper):
op_type
=
helper
.
layer_type
x
=
helper
.
kwargs
.
get
(
'x'
,
None
)
y
=
helper
.
kwargs
.
get
(
'y'
,
None
)
if
_
in_dygraph_mode
():
if
in_dygraph_mode
():
x
=
base
.
to_variable
(
x
)
y
=
base
.
to_variable
(
y
)
...
...
@@ -9814,7 +9814,7 @@ def sequence_reverse(x, name=None):
Returns:
out(${y_type}): ${y_comment}
"""
assert
not
_
in_dygraph_mode
(),
(
assert
not
in_dygraph_mode
(),
(
"sequence layer is not supported in dygraph mode yet."
)
helper
=
LayerHelper
(
"sequence_reverse"
,
**
locals
())
if
name
is
None
:
...
...
python/paddle/fluid/optimizer.py
浏览文件 @
9bd44b94
...
...
@@ -55,7 +55,7 @@ class Optimizer(object):
"""
def
__init__
(
self
,
learning_rate
,
regularization
=
None
,
name
=
None
):
if
framework
.
_
in_dygraph_mode
():
if
framework
.
in_dygraph_mode
():
if
not
isinstance
(
learning_rate
,
float
)
and
\
not
isinstance
(
learning_rate
,
LearningRateDecay
):
raise
TypeError
(
...
...
@@ -205,7 +205,7 @@ class Optimizer(object):
name
=
self
.
_name
+
"_"
+
name
if
(
name
in
self
.
_accumulators
and
param
.
name
in
self
.
_accumulators
[
name
]):
if
framework
.
_
in_dygraph_mode
():
if
framework
.
in_dygraph_mode
():
return
self
.
_accumulators
[
name
][
param
.
name
]
raise
Exception
(
"Accumulator {} already exists for parameter {}"
.
format
(
name
,
param
.
name
))
...
...
@@ -363,7 +363,7 @@ class Optimizer(object):
See examples in `apply_gradients`.
"""
self
.
_dtype
=
loss
.
dtype
if
framework
.
_
in_dygraph_mode
():
if
framework
.
in_dygraph_mode
():
if
parameter_list
is
not
None
:
parameters
=
parameter_list
else
:
...
...
@@ -448,7 +448,7 @@ class Optimizer(object):
Returns:
list: A list of operators appended to the current program.
"""
if
framework
.
_
in_dygraph_mode
():
if
framework
.
in_dygraph_mode
():
with
program_guard
(
framework
.
default_main_program
(),
framework
.
default_startup_program
()):
optimize_ops
=
self
.
_create_optimization_pass
(
params_grads
)
...
...
python/paddle/fluid/tests/unittests/test_base_layer.py
浏览文件 @
9bd44b94
...
...
@@ -18,7 +18,7 @@ import numpy as np
import
paddle.fluid
as
fluid
class
L1
(
fluid
.
dygraph
.
Layer
):
class
L1
(
fluid
.
Layer
):
def
__init__
(
self
,
prefix
):
super
(
L1
,
self
).
__init__
(
prefix
)
self
.
_param_attr
=
fluid
.
ParamAttr
(
...
...
@@ -32,7 +32,7 @@ class L1(fluid.dygraph.Layer):
return
self
.
w1
+
self
.
w2
class
L2
(
fluid
.
dygraph
.
Layer
):
class
L2
(
fluid
.
Layer
):
def
__init__
(
self
,
prefix
):
super
(
L2
,
self
).
__init__
(
prefix
)
self
.
layer1
=
L1
(
self
.
full_name
())
...
...
@@ -42,7 +42,7 @@ class L2(fluid.dygraph.Layer):
return
self
.
layer1
()
+
self
.
layer2
()
class
L3
(
fluid
.
dygraph
.
Layer
):
class
L3
(
fluid
.
Layer
):
def
__init__
(
self
,
prefix
):
super
(
L3
,
self
).
__init__
(
prefix
)
self
.
layer1
=
L2
(
self
.
full_name
())
...
...
@@ -59,7 +59,7 @@ class TestBaseLayer(unittest.TestCase):
ret
=
l
()
self
.
assertEqual
(
l
.
w1
.
name
,
"test_one_level/L1_0.w_0"
)
self
.
assertEqual
(
l
.
w2
.
name
,
"test_one_level/L1_0.w_1"
)
self
.
assertTrue
(
np
.
allclose
(
ret
.
_
numpy
(),
0.2
*
np
.
ones
([
2
,
2
])))
self
.
assertTrue
(
np
.
allclose
(
ret
.
numpy
(),
0.2
*
np
.
ones
([
2
,
2
])))
def
test_three_level
(
self
):
with
fluid
.
dygraph
.
guard
():
...
...
@@ -72,7 +72,7 @@ class TestBaseLayer(unittest.TestCase):
self
.
assertEqual
(
names
[
3
],
"test_three_level/L3_0/L2_0/L1_1.w_1"
)
self
.
assertEqual
(
names
[
4
],
"test_three_level/L3_0/L2_1/L1_0.w_0"
)
self
.
assertEqual
(
names
[
5
],
"test_three_level/L3_0/L2_1/L1_0.w_1"
)
self
.
assertTrue
(
np
.
allclose
(
ret
.
_
numpy
(),
0.8
*
np
.
ones
([
2
,
2
])))
self
.
assertTrue
(
np
.
allclose
(
ret
.
numpy
(),
0.8
*
np
.
ones
([
2
,
2
])))
if
__name__
==
'__main__'
:
...
...
python/paddle/fluid/tests/unittests/test_imperative_basic.py
浏览文件 @
9bd44b94
...
...
@@ -18,11 +18,11 @@ import numpy as np
import
paddle.fluid
as
fluid
from
paddle.fluid
import
core
from
paddle.fluid
.dygraph.nn
import
FC
from
paddle.fluid
import
FC
from
test_imperative_base
import
new_program_scope
class
MyLayer
(
fluid
.
dygraph
.
Layer
):
class
MyLayer
(
fluid
.
Layer
):
def
__init__
(
self
,
name_scope
):
super
(
MyLayer
,
self
).
__init__
(
name_scope
)
...
...
@@ -34,7 +34,7 @@ class MyLayer(fluid.dygraph.Layer):
return
[
x
]
class
MyPyLayer
(
fluid
.
dygraph
.
PyLayer
):
class
MyPyLayer
(
fluid
.
PyLayer
):
def
__init__
(
self
):
super
(
MyPyLayer
,
self
).
__init__
()
...
...
@@ -48,7 +48,7 @@ class MyPyLayer(fluid.dygraph.PyLayer):
return
np
.
array
(
dout
)
*
(
1
-
np
.
square
(
np
.
array
(
out
)))
class
MLP
(
fluid
.
dygraph
.
Layer
):
class
MLP
(
fluid
.
Layer
):
def
__init__
(
self
,
name_scope
):
super
(
MLP
,
self
).
__init__
(
name_scope
)
self
.
_fc1
=
FC
(
self
.
full_name
(),
...
...
@@ -71,7 +71,7 @@ class MLP(fluid.dygraph.Layer):
return
x
class
SimpleRNNCell
(
fluid
.
dygraph
.
Layer
):
class
SimpleRNNCell
(
fluid
.
Layer
):
def
__init__
(
self
,
name_scope
,
step_input_size
,
hidden_size
,
output_size
,
param_attr
):
super
(
SimpleRNNCell
,
self
).
__init__
(
name_scope
)
...
...
@@ -81,7 +81,7 @@ class SimpleRNNCell(fluid.dygraph.Layer):
self
.
_dtype
=
core
.
VarDesc
.
VarType
.
FP32
self
.
param_attr
=
param_attr
def
_
build_once
(
self
,
inputs
,
pre_hidden
):
def
build_once
(
self
,
inputs
,
pre_hidden
):
i2h_param_shape
=
[
self
.
step_input_size
,
self
.
hidden_size
]
h2h_param_shape
=
[
self
.
hidden_size
,
self
.
hidden_size
]
h2o_param_shape
=
[
self
.
output_size
,
self
.
hidden_size
]
...
...
@@ -159,7 +159,7 @@ class SimpleRNNCell(fluid.dygraph.Layer):
return
reduce_out
,
hidden
class
SimpleRNN
(
fluid
.
dygraph
.
Layer
):
class
SimpleRNN
(
fluid
.
Layer
):
def
__init__
(
self
,
name_scope
):
super
(
SimpleRNN
,
self
).
__init__
(
name_scope
)
self
.
seq_len
=
4
...
...
@@ -200,22 +200,22 @@ class TestImperative(unittest.TestCase):
inputs
.
append
(
fluid
.
dygraph
.
base
.
to_variable
(
x
))
ret
=
fluid
.
layers
.
sums
(
inputs
)
loss
=
fluid
.
layers
.
reduce_sum
(
ret
)
loss
.
_
backward
()
self
.
assertTrue
(
np
.
allclose
(
ret
.
_
numpy
(),
x
*
10
))
self
.
assertTrue
(
np
.
allclose
(
inputs
[
0
].
_
gradient
(),
x
))
loss
.
backward
()
self
.
assertTrue
(
np
.
allclose
(
ret
.
numpy
(),
x
*
10
))
self
.
assertTrue
(
np
.
allclose
(
inputs
[
0
].
gradient
(),
x
))
def
test_layer
(
self
):
with
fluid
.
dygraph
.
guard
():
cl
=
core
.
Layer
()
cl
.
forward
([])
l
=
fluid
.
dygraph
.
Layer
(
"l"
)
l
=
fluid
.
Layer
(
"l"
)
self
.
assertRaises
(
NotImplementedError
,
l
.
forward
,
[])
def
test_pylayer_func_id
(
self
):
with
fluid
.
dygraph
.
guard
():
class
PyLayer1
(
fluid
.
dygraph
.
PyLayer
):
class
PyLayer1
(
fluid
.
PyLayer
):
def
__init__
(
self
):
super
(
PyLayer1
,
self
).
__init__
()
...
...
@@ -227,7 +227,7 @@ class TestImperative(unittest.TestCase):
def
backward
(
input
):
return
input
class
PyLayer2
(
fluid
.
dygraph
.
PyLayer
):
class
PyLayer2
(
fluid
.
PyLayer
):
def
__init__
(
self
):
super
(
PyLayer2
,
self
).
__init__
()
...
...
@@ -257,9 +257,9 @@ class TestImperative(unittest.TestCase):
my_py_layer
=
MyPyLayer
()
var_inp
=
fluid
.
dygraph
.
base
.
to_variable
(
np_inp
)
outs
=
my_py_layer
(
var_inp
)
dy_out
=
np
.
sum
(
outs
[
0
].
_
numpy
())
outs
[
0
].
_
backward
()
dy_grad
=
var_inp
.
_
gradient
()
dy_out
=
np
.
sum
(
outs
[
0
].
numpy
())
outs
[
0
].
backward
()
dy_grad
=
var_inp
.
gradient
()
with
new_program_scope
():
inp
=
fluid
.
layers
.
data
(
...
...
@@ -287,9 +287,9 @@ class TestImperative(unittest.TestCase):
l
=
MyLayer
(
"my_layer"
)
x
=
l
(
var_inp
)[
0
]
self
.
assertIsNotNone
(
x
)
dy_out
=
x
.
_
numpy
()
x
.
_
backward
()
dy_grad
=
l
.
_x_for_debug
.
_
gradient
()
dy_out
=
x
.
numpy
()
x
.
backward
()
dy_grad
=
l
.
_x_for_debug
.
gradient
()
with
new_program_scope
():
inp
=
fluid
.
layers
.
data
(
...
...
@@ -314,9 +314,9 @@ class TestImperative(unittest.TestCase):
var_inp
=
fluid
.
dygraph
.
base
.
to_variable
(
np_inp
)
mlp
=
MLP
(
"mlp"
)
out
=
mlp
(
var_inp
)
dy_out
=
out
.
_
numpy
()
out
.
_
backward
()
dy_grad
=
mlp
.
_fc1
.
_w
.
_
gradient
()
dy_out
=
out
.
numpy
()
out
.
backward
()
dy_grad
=
mlp
.
_fc1
.
_w
.
gradient
()
with
new_program_scope
():
inp
=
fluid
.
layers
.
data
(
...
...
@@ -358,7 +358,7 @@ class TestImperative(unittest.TestCase):
x
=
fluid
.
layers
.
elementwise_add
(
inp1
,
inp2
)
else
:
x
=
fluid
.
layers
.
elementwise_sub
(
inp1
,
inp2
)
dygraph_result
=
x
.
_
numpy
()
dygraph_result
=
x
.
numpy
()
# static graph
with
new_program_scope
():
...
...
@@ -407,11 +407,11 @@ class TestImperative(unittest.TestCase):
var_inp
=
fluid
.
layers
.
reshape
(
var_inp
,
shape
=
[
1
,
4
,
3
])
simple_rnn
=
SimpleRNN
(
"simple_rnn"
)
outs
,
pre_hiddens
=
simple_rnn
.
forward
(
var_inp
)
dy_out
=
outs
[
3
].
_
numpy
()
outs
[
3
].
_
backward
()
dy_grad_h2o
=
simple_rnn
.
_cell
.
_h2o_w
.
_
gradient
()
dy_grad_h2h
=
simple_rnn
.
_cell
.
_h2h_w
.
_
gradient
()
dy_grad_i2h
=
simple_rnn
.
_cell
.
_i2h_w
.
_
gradient
()
dy_out
=
outs
[
3
].
numpy
()
outs
[
3
].
backward
()
dy_grad_h2o
=
simple_rnn
.
_cell
.
_h2o_w
.
gradient
()
dy_grad_h2h
=
simple_rnn
.
_cell
.
_h2h_w
.
gradient
()
dy_grad_i2h
=
simple_rnn
.
_cell
.
_i2h_w
.
gradient
()
with
new_program_scope
():
inp
=
fluid
.
layers
.
data
(
...
...
python/paddle/fluid/tests/unittests/test_imperative_checkpoint.py
浏览文件 @
9bd44b94
...
...
@@ -18,11 +18,11 @@ import numpy as np
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid.optimizer
import
SGDOptimizer
from
paddle.fluid
.dygraph.nn
import
Conv2D
,
Pool2D
,
FC
from
paddle.fluid
import
Conv2D
,
Pool2D
,
FC
from
paddle.fluid.dygraph.base
import
to_variable
class
SimpleImgConvPool
(
fluid
.
dygraph
.
Layer
):
class
SimpleImgConvPool
(
fluid
.
Layer
):
def
__init__
(
self
,
name_scope
,
num_channels
,
...
...
@@ -71,7 +71,7 @@ class SimpleImgConvPool(fluid.dygraph.Layer):
return
x
class
MNIST
(
fluid
.
dygraph
.
Layer
):
class
MNIST
(
fluid
.
Layer
):
def
__init__
(
self
,
name_scope
):
super
(
MNIST
,
self
).
__init__
(
name_scope
)
...
...
@@ -125,21 +125,21 @@ class TestDygraphCheckpoint(unittest.TestCase):
img
=
to_variable
(
dy_x_data
)
label
=
to_variable
(
y_data
)
label
.
_
stop_gradient
=
True
label
.
stop_gradient
=
True
cost
=
mnist
(
img
)
loss
=
fluid
.
layers
.
cross_entropy
(
cost
,
label
)
avg_loss
=
fluid
.
layers
.
mean
(
loss
)
dy_out
=
avg_loss
.
_
numpy
()
dy_out
=
avg_loss
.
numpy
()
avg_loss
.
_
backward
()
avg_loss
.
backward
()
sgd
.
minimize
(
avg_loss
)
fluid
.
dygraph
.
save_persistables
(
mnist
,
"save_dir"
)
mnist
.
clear_gradients
()
for
param
in
mnist
.
parameters
():
dy_param_init_value
[
param
.
name
]
=
param
.
_
numpy
()
dy_param_init_value
[
param
.
name
]
=
param
.
numpy
()
mnist
.
load_dict
(
fluid
.
dygraph
.
load_persistables
(
mnist
,
"save_dir"
))
...
...
python/paddle/fluid/tests/unittests/test_imperative_deepcf.py
浏览文件 @
9bd44b94
...
...
@@ -32,11 +32,11 @@ NUM_BATCHES = int(os.environ.get('NUM_BATCHES', 5))
NUM_EPOCHES
=
int
(
os
.
environ
.
get
(
'NUM_EPOCHES'
,
1
))
class
DMF
(
fluid
.
dygraph
.
Layer
):
class
DMF
(
fluid
.
Layer
):
def
__init__
(
self
,
name_scope
):
super
(
DMF
,
self
).
__init__
(
name_scope
)
self
.
_user_latent
=
fluid
.
dygraph
.
FC
(
self
.
full_name
(),
256
)
self
.
_item_latent
=
fluid
.
dygraph
.
FC
(
self
.
full_name
(),
256
)
self
.
_user_latent
=
fluid
.
FC
(
self
.
full_name
(),
256
)
self
.
_item_latent
=
fluid
.
FC
(
self
.
full_name
(),
256
)
self
.
_user_layers
=
[]
self
.
_item_layers
=
[]
...
...
@@ -45,13 +45,11 @@ class DMF(fluid.dygraph.Layer):
self
.
_user_layers
.
append
(
self
.
add_sublayer
(
'user_layer_%d'
%
i
,
fluid
.
dygraph
.
FC
(
self
.
full_name
(),
self
.
_hid_sizes
[
i
],
act
=
'relu'
)))
fluid
.
FC
(
self
.
full_name
(),
self
.
_hid_sizes
[
i
],
act
=
'relu'
)))
self
.
_item_layers
.
append
(
self
.
add_sublayer
(
'item_layer_%d'
%
i
,
fluid
.
dygraph
.
FC
(
self
.
full_name
(),
self
.
_hid_sizes
[
i
],
act
=
'relu'
)))
fluid
.
FC
(
self
.
full_name
(),
self
.
_hid_sizes
[
i
],
act
=
'relu'
)))
def
forward
(
self
,
users
,
items
):
users
=
self
.
_user_latent
(
users
)
...
...
@@ -63,19 +61,18 @@ class DMF(fluid.dygraph.Layer):
return
fluid
.
layers
.
elementwise_mul
(
users
,
items
)
class
MLP
(
fluid
.
dygraph
.
Layer
):
class
MLP
(
fluid
.
Layer
):
def
__init__
(
self
,
name_scope
):
super
(
MLP
,
self
).
__init__
(
name_scope
)
self
.
_user_latent
=
fluid
.
dygraph
.
FC
(
self
.
full_name
(),
256
)
self
.
_item_latent
=
fluid
.
dygraph
.
FC
(
self
.
full_name
(),
256
)
self
.
_user_latent
=
fluid
.
FC
(
self
.
full_name
(),
256
)
self
.
_item_latent
=
fluid
.
FC
(
self
.
full_name
(),
256
)
self
.
_match_layers
=
[]
self
.
_hid_sizes
=
[
128
,
64
]
for
i
in
range
(
len
(
self
.
_hid_sizes
)):
self
.
_match_layers
.
append
(
self
.
add_sublayer
(
'match_layer_%d'
%
i
,
fluid
.
dygraph
.
FC
(
self
.
full_name
(),
self
.
_hid_sizes
[
i
],
act
=
'relu'
)))
fluid
.
FC
(
self
.
full_name
(),
self
.
_hid_sizes
[
i
],
act
=
'relu'
)))
self
.
_mat
def
forward
(
self
,
users
,
items
):
...
...
@@ -88,7 +85,7 @@ class MLP(fluid.dygraph.Layer):
return
match_vec
class
DeepCF
(
fluid
.
dygraph
.
Layer
):
class
DeepCF
(
fluid
.
Layer
):
def
__init__
(
self
,
name_scope
,
num_users
,
num_items
,
matrix
):
super
(
DeepCF
,
self
).
__init__
(
name_scope
)
self
.
_num_users
=
num_users
...
...
@@ -99,11 +96,11 @@ class DeepCF(fluid.dygraph.Layer):
matrix
.
dtype
,
is_bias
=
False
,
default_initializer
=
fluid
.
initializer
.
NumpyArrayInitializer
(
matrix
))
self
.
_rating_matrix
.
_
stop_gradient
=
True
self
.
_rating_matrix
.
stop_gradient
=
True
self
.
_mlp
=
MLP
(
self
.
full_name
())
self
.
_dmf
=
DMF
(
self
.
full_name
())
self
.
_match_fc
=
fluid
.
dygraph
.
FC
(
self
.
full_name
(),
1
,
act
=
'sigmoid'
)
self
.
_match_fc
=
fluid
.
FC
(
self
.
full_name
(),
1
,
act
=
'sigmoid'
)
def
forward
(
self
,
users
,
items
):
# users_emb = self._user_emb(users)
...
...
@@ -255,10 +252,10 @@ class TestDygraphDeepCF(unittest.TestCase):
fluid
.
layers
.
log_loss
(
prediction
,
to_variable
(
labels_np
[
slice
:
slice
+
BATCH_SIZE
])))
loss
.
_
backward
()
loss
.
backward
()
adam
.
minimize
(
loss
)
deepcf
.
clear_gradients
()
dy_loss
=
loss
.
_
numpy
()
dy_loss
=
loss
.
numpy
()
sys
.
stderr
.
write
(
'dynamic loss: %s %s
\n
'
%
(
slice
,
dy_loss
))
self
.
assertEqual
(
static_loss
,
dy_loss
)
...
...
python/paddle/fluid/tests/unittests/test_imperative_gan.py
浏览文件 @
9bd44b94
...
...
@@ -22,12 +22,12 @@ import paddle
import
paddle.fluid
as
fluid
import
paddle.fluid.core
as
core
from
paddle.fluid.optimizer
import
SGDOptimizer
from
paddle.fluid
.dygraph.nn
import
Conv2D
,
Pool2D
,
FC
from
paddle.fluid
import
Conv2D
,
Pool2D
,
FC
from
test_imperative_base
import
new_program_scope
from
paddle.fluid.dygraph.base
import
to_variable
class
Discriminator
(
fluid
.
dygraph
.
Layer
):
class
Discriminator
(
fluid
.
Layer
):
def
__init__
(
self
,
name_scope
):
super
(
Discriminator
,
self
).
__init__
(
name_scope
)
self
.
_fc1
=
FC
(
self
.
full_name
(),
size
=
32
,
act
=
'elu'
)
...
...
@@ -38,7 +38,7 @@ class Discriminator(fluid.dygraph.Layer):
return
self
.
_fc2
(
x
)
class
Generator
(
fluid
.
dygraph
.
Layer
):
class
Generator
(
fluid
.
Layer
):
def
__init__
(
self
,
name_scope
):
super
(
Generator
,
self
).
__init__
(
name_scope
)
self
.
_fc1
=
FC
(
self
.
full_name
(),
size
=
64
,
act
=
'elu'
)
...
...
@@ -150,7 +150,7 @@ class TestDygraphGAN(unittest.TestCase):
x
=
d_fake
,
label
=
to_variable
(
np
.
zeros
([
2
,
1
],
np
.
float32
))))
d_loss
=
d_loss_real
+
d_loss_fake
d_loss
.
_
backward
()
d_loss
.
backward
()
sgd
.
minimize
(
d_loss
)
discriminator
.
clear_gradients
()
generator
.
clear_gradients
()
...
...
@@ -160,15 +160,15 @@ class TestDygraphGAN(unittest.TestCase):
g_loss
=
fluid
.
layers
.
reduce_mean
(
fluid
.
layers
.
sigmoid_cross_entropy_with_logits
(
x
=
d_fake
,
label
=
to_variable
(
np
.
ones
([
2
,
1
],
np
.
float32
))))
g_loss
.
_
backward
()
g_loss
.
backward
()
sgd
.
minimize
(
g_loss
)
for
p
in
discriminator
.
parameters
():
dy_params
[
p
.
name
]
=
p
.
_
numpy
()
dy_params
[
p
.
name
]
=
p
.
numpy
()
for
p
in
generator
.
parameters
():
dy_params
[
p
.
name
]
=
p
.
_
numpy
()
dy_params
[
p
.
name
]
=
p
.
numpy
()
dy_g_loss
=
g_loss
.
_
numpy
()
dy_d_loss
=
d_loss
.
_
numpy
()
dy_g_loss
=
g_loss
.
numpy
()
dy_d_loss
=
d_loss
.
numpy
()
self
.
assertEqual
(
dy_g_loss
,
static_g_loss
)
self
.
assertEqual
(
dy_d_loss
,
static_d_loss
)
...
...
python/paddle/fluid/tests/unittests/test_imperative_gnn.py
浏览文件 @
9bd44b94
...
...
@@ -15,14 +15,12 @@
import
contextlib
import
unittest
import
numpy
as
np
import
six
import
sys
import
paddle
import
paddle.fluid
as
fluid
import
paddle.fluid.core
as
core
from
paddle.fluid.optimizer
import
AdamOptimizer
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Pool2D
,
FC
from
test_imperative_base
import
new_program_scope
from
paddle.fluid.dygraph.base
import
to_variable
...
...
@@ -31,7 +29,7 @@ def gen_data():
pass
class
GraphConv
(
fluid
.
dygraph
.
Layer
):
class
GraphConv
(
fluid
.
Layer
):
def
__init__
(
self
,
name_scope
,
in_features
,
out_features
):
super
(
GraphConv
,
self
).
__init__
(
name_scope
)
...
...
@@ -50,7 +48,7 @@ class GraphConv(fluid.dygraph.Layer):
return
fluid
.
layers
.
matmul
(
adj
,
support
)
+
self
.
bias
class
GCN
(
fluid
.
dygraph
.
Layer
):
class
GCN
(
fluid
.
Layer
):
def
__init__
(
self
,
name_scope
,
num_hidden
):
super
(
GCN
,
self
).
__init__
(
name_scope
)
self
.
gc
=
GraphConv
(
self
.
full_name
(),
num_hidden
,
32
)
...
...
@@ -134,10 +132,9 @@ class TestDygraphGNN(unittest.TestCase):
loss
=
fluid
.
layers
.
reduce_sum
(
loss
)
adam
=
AdamOptimizer
(
learning_rate
=
1e-3
)
adam
.
minimize
(
loss
)
self
.
assertEqual
(
static_loss
,
loss
.
_numpy
())
self
.
assertTrue
(
np
.
allclose
(
static_weight
,
model
.
gc
.
weight
.
_numpy
()))
sys
.
stderr
.
write
(
'%s %s
\n
'
%
(
static_loss
,
loss
.
_numpy
()))
self
.
assertEqual
(
static_loss
,
loss
.
numpy
())
self
.
assertTrue
(
np
.
allclose
(
static_weight
,
model
.
gc
.
weight
.
numpy
()))
sys
.
stderr
.
write
(
'%s %s
\n
'
%
(
static_loss
,
loss
.
numpy
()))
if
__name__
==
'__main__'
:
...
...
python/paddle/fluid/tests/unittests/test_imperative_mnist.py
浏览文件 @
9bd44b94
...
...
@@ -128,25 +128,25 @@ class TestImperativeMnist(unittest.TestCase):
img
=
to_variable
(
dy_x_data
)
label
=
to_variable
(
y_data
)
label
.
_
stop_gradient
=
True
label
.
stop_gradient
=
True
cost
=
mnist
(
img
)
loss
=
fluid
.
layers
.
cross_entropy
(
cost
,
label
)
avg_loss
=
fluid
.
layers
.
mean
(
loss
)
dy_out
=
avg_loss
.
_
numpy
()
dy_out
=
avg_loss
.
numpy
()
if
epoch
==
0
and
batch_id
==
0
:
for
param
in
mnist
.
parameters
():
dy_param_init_value
[
param
.
name
]
=
param
.
_
numpy
()
dy_param_init_value
[
param
.
name
]
=
param
.
numpy
()
avg_loss
.
_
backward
()
avg_loss
.
backward
()
sgd
.
minimize
(
avg_loss
)
mnist
.
clear_gradients
()
dy_param_value
=
{}
for
param
in
mnist
.
parameters
():
dy_param_value
[
param
.
name
]
=
param
.
_
numpy
()
dy_param_value
[
param
.
name
]
=
param
.
numpy
()
with
new_program_scope
():
fluid
.
default_startup_program
().
random_seed
=
seed
...
...
python/paddle/fluid/tests/unittests/test_imperative_optimizer.py
浏览文件 @
9bd44b94
...
...
@@ -28,7 +28,7 @@ from paddle.fluid.dygraph.base import to_variable
from
test_imperative_base
import
new_program_scope
class
MLP
(
fluid
.
dygraph
.
Layer
):
class
MLP
(
fluid
.
Layer
):
def
__init__
(
self
,
name_scope
,
param_attr
=
None
,
bias_attr
=
None
):
super
(
MLP
,
self
).
__init__
(
name_scope
)
...
...
@@ -75,18 +75,18 @@ class TestImperativeOptimizerBase(unittest.TestCase):
cost
=
mlp
(
img
)
avg_loss
=
fluid
.
layers
.
reduce_mean
(
cost
)
dy_out
=
avg_loss
.
_
numpy
()
dy_out
=
avg_loss
.
numpy
()
if
batch_id
==
0
:
for
param
in
mlp
.
parameters
():
dy_param_init_value
[
param
.
name
]
=
param
.
_
numpy
()
dy_param_init_value
[
param
.
name
]
=
param
.
numpy
()
avg_loss
.
_
backward
()
avg_loss
.
backward
()
optimizer
.
minimize
(
avg_loss
)
mlp
.
clear_gradients
()
dy_param_value
=
{}
for
param
in
mlp
.
parameters
():
dy_param_value
[
param
.
name
]
=
param
.
_
numpy
()
dy_param_value
[
param
.
name
]
=
param
.
numpy
()
with
new_program_scope
():
fluid
.
default_startup_program
().
random_seed
=
seed
...
...
python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py
浏览文件 @
9bd44b94
...
...
@@ -24,10 +24,9 @@ from paddle.fluid.dygraph.base import to_variable
from
test_imperative_base
import
new_program_scope
import
numpy
as
np
import
six
from
paddle.fluid.backward
import
append_backward
class
SimpleLSTMRNN
(
fluid
.
dygraph
.
Layer
):
class
SimpleLSTMRNN
(
fluid
.
Layer
):
def
__init__
(
self
,
name_scope
,
hidden_size
,
...
...
@@ -45,7 +44,7 @@ class SimpleLSTMRNN(fluid.dygraph.Layer):
self
.
cell_array
=
[]
self
.
hidden_array
=
[]
def
_
build_once
(
self
,
input_embedding
,
init_hidden
=
None
,
init_cell
=
None
):
def
build_once
(
self
,
input_embedding
,
init_hidden
=
None
,
init_cell
=
None
):
self
.
weight_1_arr
=
[]
self
.
weight_2_arr
=
[]
self
.
bias_arr
=
[]
...
...
@@ -132,7 +131,7 @@ class SimpleLSTMRNN(fluid.dygraph.Layer):
return
real_res
,
last_hidden
,
last_cell
class
PtbModel
(
fluid
.
dygraph
.
Layer
):
class
PtbModel
(
fluid
.
Layer
):
def
__init__
(
self
,
name_scope
,
hidden_size
,
...
...
@@ -177,7 +176,7 @@ class PtbModel(fluid.dygraph.Layer):
default_initializer
=
fluid
.
initializer
.
UniformInitializer
(
low
=-
self
.
init_scale
,
high
=
self
.
init_scale
))
def
_
build_once
(
self
,
input
,
label
,
init_hidden
,
init_cell
):
def
build_once
(
self
,
input
,
label
,
init_hidden
,
init_cell
):
pass
def
forward
(
self
,
input
,
label
,
init_hidden
,
init_cell
):
...
...
@@ -260,13 +259,13 @@ class TestDygraphPtbRnn(unittest.TestCase):
init_cell
)
if
i
==
0
:
for
param
in
ptb_model
.
parameters
():
dy_param_init
[
param
.
name
]
=
param
.
_
numpy
()
dy_loss
.
_
backward
()
dy_param_init
[
param
.
name
]
=
param
.
numpy
()
dy_loss
.
backward
()
sgd
.
minimize
(
dy_loss
)
ptb_model
.
clear_gradients
()
if
i
==
batch_num
-
1
:
for
param
in
ptb_model
.
parameters
():
dy_param_updated
[
param
.
name
]
=
param
.
_
numpy
()
dy_param_updated
[
param
.
name
]
=
param
.
numpy
()
with
new_program_scope
():
fluid
.
default_startup_program
().
random_seed
=
seed
...
...
@@ -334,11 +333,11 @@ class TestDygraphPtbRnn(unittest.TestCase):
static_param_updated
[
static_param_name_list
[
k
-
3
]]
=
out
[
k
]
self
.
assertTrue
(
np
.
array_equal
(
static_loss_value
,
dy_loss
.
_
numpy
()))
self
.
assertTrue
(
np
.
array_equal
(
static_loss_value
,
dy_loss
.
numpy
()))
self
.
assertTrue
(
np
.
array_equal
(
static_last_cell_value
,
last_cell
.
_
numpy
()))
np
.
array_equal
(
static_last_cell_value
,
last_cell
.
numpy
()))
self
.
assertTrue
(
np
.
array_equal
(
static_last_hidden_value
,
last_hidden
.
_
numpy
()))
np
.
array_equal
(
static_last_hidden_value
,
last_hidden
.
numpy
()))
for
key
,
value
in
six
.
iteritems
(
static_param_init
):
self
.
assertTrue
(
np
.
array_equal
(
value
,
dy_param_init
[
key
]))
for
key
,
value
in
six
.
iteritems
(
static_param_updated
):
...
...
python/paddle/fluid/tests/unittests/test_imperative_resnet.py
浏览文件 @
9bd44b94
...
...
@@ -21,7 +21,7 @@ import paddle
import
paddle.fluid
as
fluid
from
paddle.fluid
import
core
from
paddle.fluid.layer_helper
import
LayerHelper
from
paddle.fluid
.dygraph.nn
import
Conv2D
,
Pool2D
,
BatchNorm
,
FC
from
paddle.fluid
import
Conv2D
,
Pool2D
,
BatchNorm
,
FC
from
paddle.fluid.dygraph.base
import
to_variable
from
test_imperative_base
import
new_program_scope
...
...
@@ -68,7 +68,7 @@ def optimizer_setting(params):
return
optimizer
class
ConvBNLayer
(
fluid
.
dygraph
.
Layer
):
class
ConvBNLayer
(
fluid
.
Layer
):
def
__init__
(
self
,
name_scope
,
num_channels
,
...
...
@@ -99,7 +99,7 @@ class ConvBNLayer(fluid.dygraph.Layer):
return
y
class
BottleneckBlock
(
fluid
.
dygraph
.
Layer
):
class
BottleneckBlock
(
fluid
.
Layer
):
def
__init__
(
self
,
name_scope
,
num_channels
,
...
...
@@ -156,7 +156,7 @@ class BottleneckBlock(fluid.dygraph.Layer):
return
layer_helper
.
append_activation
(
y
)
class
ResNet
(
fluid
.
dygraph
.
Layer
):
class
ResNet
(
fluid
.
Layer
):
def
__init__
(
self
,
name_scope
,
layers
=
50
,
class_dim
=
102
):
super
(
ResNet
,
self
).
__init__
(
name_scope
)
...
...
@@ -247,7 +247,7 @@ class TestDygraphResnet(unittest.TestCase):
dy_param_init_value
=
{}
for
param
in
resnet
.
parameters
():
dy_param_init_value
[
param
.
name
]
=
param
.
_
numpy
()
dy_param_init_value
[
param
.
name
]
=
param
.
numpy
()
for
batch_id
,
data
in
enumerate
(
train_reader
()):
if
batch_id
>=
batch_num
:
...
...
@@ -260,20 +260,20 @@ class TestDygraphResnet(unittest.TestCase):
img
=
to_variable
(
dy_x_data
)
label
=
to_variable
(
y_data
)
label
.
_
stop_gradient
=
True
label
.
stop_gradient
=
True
out
=
resnet
(
img
)
loss
=
fluid
.
layers
.
cross_entropy
(
input
=
out
,
label
=
label
)
avg_loss
=
fluid
.
layers
.
mean
(
x
=
loss
)
dy_out
=
avg_loss
.
_
numpy
()
dy_out
=
avg_loss
.
numpy
()
if
batch_id
==
0
:
for
param
in
resnet
.
parameters
():
if
param
.
name
not
in
dy_param_init_value
:
dy_param_init_value
[
param
.
name
]
=
param
.
_
numpy
()
dy_param_init_value
[
param
.
name
]
=
param
.
numpy
()
avg_loss
.
_
backward
()
avg_loss
.
backward
()
dy_grad_value
=
{}
for
param
in
resnet
.
parameters
():
...
...
@@ -288,7 +288,7 @@ class TestDygraphResnet(unittest.TestCase):
dy_param_value
=
{}
for
param
in
resnet
.
parameters
():
dy_param_value
[
param
.
name
]
=
param
.
_
numpy
()
dy_param_value
[
param
.
name
]
=
param
.
numpy
()
with
new_program_scope
():
fluid
.
default_startup_program
().
random_seed
=
seed
...
...
python/paddle/fluid/tests/unittests/test_imperative_se_resnext.py
浏览文件 @
9bd44b94
...
...
@@ -333,7 +333,7 @@ class TestImperativeResneXt(unittest.TestCase):
dy_param_init_value
=
{}
for
param
in
se_resnext
.
parameters
():
dy_param_init_value
[
param
.
name
]
=
param
.
_
numpy
()
dy_param_init_value
[
param
.
name
]
=
param
.
numpy
()
for
epoch_id
in
range
(
epoch_num
):
for
batch_id
,
data
in
enumerate
(
train_reader
()):
...
...
@@ -349,19 +349,19 @@ class TestImperativeResneXt(unittest.TestCase):
img
=
to_variable
(
dy_x_data
)
label
=
to_variable
(
y_data
)
label
.
_
stop_gradient
=
True
label
.
stop_gradient
=
True
out
=
se_resnext
(
img
)
loss
=
fluid
.
layers
.
cross_entropy
(
input
=
out
,
label
=
label
)
avg_loss
=
fluid
.
layers
.
mean
(
x
=
loss
)
dy_out
=
avg_loss
.
_
numpy
()
dy_out
=
avg_loss
.
numpy
()
if
batch_id
==
0
:
for
param
in
se_resnext
.
parameters
():
if
param
.
name
not
in
dy_param_init_value
:
dy_param_init_value
[
param
.
name
]
=
param
.
_
numpy
()
avg_loss
.
_
backward
()
dy_param_init_value
[
param
.
name
]
=
param
.
numpy
()
avg_loss
.
backward
()
#dy_grad_value = {}
#for param in se_resnext.parameters():
...
...
@@ -375,7 +375,7 @@ class TestImperativeResneXt(unittest.TestCase):
dy_param_value
=
{}
for
param
in
se_resnext
.
parameters
():
dy_param_value
[
param
.
name
]
=
param
.
_
numpy
()
dy_param_value
[
param
.
name
]
=
param
.
numpy
()
with
new_program_scope
():
fluid
.
default_startup_program
().
random_seed
=
seed
...
...
python/paddle/fluid/tests/unittests/test_imperative_transformer.py
浏览文件 @
9bd44b94
...
...
@@ -16,7 +16,8 @@ from __future__ import print_function
import
unittest
import
paddle.fluid
as
fluid
from
paddle.fluid.dygraph
import
Embedding
,
LayerNorm
,
FC
,
to_variable
,
Layer
,
guard
from
paddle.fluid
import
Embedding
,
LayerNorm
,
FC
,
Layer
from
paddle.fluid.dygraph
import
to_variable
,
guard
from
test_imperative_base
import
new_program_scope
from
paddle.fluid
import
core
import
numpy
as
np
...
...
@@ -992,14 +993,14 @@ class TestDygraphTransformer(unittest.TestCase):
enc_inputs
,
dec_inputs
,
label
,
weights
)
if
i
==
0
:
for
param
in
transformer
.
parameters
():
dy_param_init
[
param
.
name
]
=
param
.
_
numpy
()
dy_param_init
[
param
.
name
]
=
param
.
numpy
()
dy_avg_cost
.
_
backward
()
dy_avg_cost
.
backward
()
optimizer
.
minimize
(
dy_avg_cost
)
transformer
.
clear_gradients
()
if
i
==
batch_num
-
1
:
for
param
in
transformer
.
parameters
():
dy_param_updated
[
param
.
name
]
=
param
.
_
numpy
()
dy_param_updated
[
param
.
name
]
=
param
.
numpy
()
with
new_program_scope
():
fluid
.
default_startup_program
().
random_seed
=
seed
...
...
@@ -1076,13 +1077,14 @@ class TestDygraphTransformer(unittest.TestCase):
4
]]
=
out
[
k
]
self
.
assertTrue
(
np
.
array_equal
(
static_avg_cost_value
,
dy_avg_cost
.
_
numpy
()))
np
.
array_equal
(
static_avg_cost_value
,
dy_avg_cost
.
numpy
()))
self
.
assertTrue
(
np
.
array_equal
(
static_sum_cost_value
,
dy_sum_cost
.
_
numpy
()))
np
.
array_equal
(
static_sum_cost_value
,
dy_sum_cost
.
numpy
()))
self
.
assertTrue
(
np
.
array_equal
(
static_predict_value
,
dy_predict
.
_
numpy
()))
np
.
array_equal
(
static_predict_value
,
dy_predict
.
numpy
()))
self
.
assertTrue
(
np
.
array_equal
(
static_token_num_value
,
dy_token_num
.
_numpy
()))
np
.
array_equal
(
static_token_num_value
,
dy_token_num
.
numpy
()))
for
key
,
value
in
six
.
iteritems
(
static_param_init
):
self
.
assertTrue
(
np
.
array_equal
(
value
,
dy_param_init
[
key
]))
for
key
,
value
in
six
.
iteritems
(
static_param_updated
):
...
...
python/paddle/fluid/tests/unittests/test_layers.py
浏览文件 @
9bd44b94
...
...
@@ -114,7 +114,7 @@ class TestLayer(LayerTest):
dy_ret
=
fc2
(
ret
)
self
.
assertTrue
(
np
.
array_equal
(
static_ret
,
static_ret2
))
self
.
assertTrue
(
np
.
array_equal
(
static_ret
,
dy_ret
.
_
numpy
()))
self
.
assertTrue
(
np
.
array_equal
(
static_ret
,
dy_ret
.
numpy
()))
def
test_layer_norm
(
self
):
inp
=
np
.
ones
([
3
,
32
,
32
],
dtype
=
'float32'
)
...
...
@@ -142,7 +142,7 @@ class TestLayer(LayerTest):
dy_ret
=
lm
(
base
.
to_variable
(
inp
))
self
.
assertTrue
(
np
.
allclose
(
static_ret
,
static_ret2
))
self
.
assertTrue
(
np
.
allclose
(
dy_ret
.
_
numpy
(),
static_ret2
))
self
.
assertTrue
(
np
.
allclose
(
dy_ret
.
numpy
(),
static_ret2
))
def
test_relu
(
self
):
with
self
.
static_graph
():
...
...
@@ -156,7 +156,7 @@ class TestLayer(LayerTest):
t
=
np
.
ones
([
3
,
3
],
dtype
=
'float32'
)
dy_ret
=
layers
.
relu
(
base
.
to_variable
(
t
))
self
.
assertTrue
(
np
.
allclose
(
static_ret
,
dy_ret
.
_
numpy
()))
self
.
assertTrue
(
np
.
allclose
(
static_ret
,
dy_ret
.
numpy
()))
def
test_matmul
(
self
):
with
self
.
static_graph
():
...
...
@@ -177,7 +177,7 @@ class TestLayer(LayerTest):
t2
=
np
.
ones
([
3
,
3
],
dtype
=
'float32'
)
dy_ret
=
layers
.
matmul
(
base
.
to_variable
(
t
),
base
.
to_variable
(
t2
))
self
.
assertTrue
(
np
.
allclose
(
static_ret
,
dy_ret
.
_
numpy
()))
self
.
assertTrue
(
np
.
allclose
(
static_ret
,
dy_ret
.
numpy
()))
def
test_conv2d
(
self
):
with
self
.
static_graph
():
...
...
@@ -204,7 +204,7 @@ class TestLayer(LayerTest):
'conv2d'
,
num_channels
=
3
,
num_filters
=
3
,
filter_size
=
[
2
,
2
])
dy_ret
=
conv2d
(
base
.
to_variable
(
images
))
self
.
assertTrue
(
np
.
allclose
(
static_ret
,
dy_ret
.
_
numpy
()))
self
.
assertTrue
(
np
.
allclose
(
static_ret
,
dy_ret
.
numpy
()))
self
.
assertTrue
(
np
.
allclose
(
static_ret
,
static_ret2
))
def
test_gru_unit
(
self
):
...
...
@@ -246,7 +246,7 @@ class TestLayer(LayerTest):
for
i
in
range
(
len
(
static_ret
)):
self
.
assertTrue
(
np
.
allclose
(
static_ret
[
i
],
static_ret2
[
i
]))
self
.
assertTrue
(
np
.
allclose
(
static_ret
[
i
],
dy_ret
[
i
].
_
numpy
()))
self
.
assertTrue
(
np
.
allclose
(
static_ret
[
i
],
dy_ret
[
i
].
numpy
()))
def
test_elementwise_math
(
self
):
n
=
np
.
ones
([
3
,
3
],
dtype
=
'float32'
)
...
...
@@ -288,8 +288,8 @@ class TestLayer(LayerTest):
ret
=
layers
.
elementwise_sub
(
ret
,
n5
)
dy_ret
=
layers
.
elementwise_mul
(
ret
,
n6
)
self
.
assertTrue
(
np
.
allclose
(
static_ret
,
dy_ret
.
_
numpy
()),
'%s vs %s'
%
(
static_ret
,
dy_ret
.
_
numpy
()))
np
.
allclose
(
static_ret
,
dy_ret
.
numpy
()),
'%s vs %s'
%
(
static_ret
,
dy_ret
.
numpy
()))
def
test_elementwise_minmax
(
self
):
n
=
np
.
ones
([
3
,
3
],
dtype
=
'float32'
)
...
...
@@ -299,8 +299,8 @@ class TestLayer(LayerTest):
min_ret
=
layers
.
elementwise_min
(
n
,
n2
)
max_ret
=
layers
.
elementwise_max
(
n
,
n2
)
self
.
assertTrue
(
np
.
allclose
(
n
,
min_ret
.
_
numpy
()))
self
.
assertTrue
(
np
.
allclose
(
n2
,
max_ret
.
_
numpy
()))
self
.
assertTrue
(
np
.
allclose
(
n
,
min_ret
.
numpy
()))
self
.
assertTrue
(
np
.
allclose
(
n2
,
max_ret
.
numpy
()))
def
test_sequence_conv
(
self
):
inp_np
=
np
.
arange
(
12
).
reshape
([
3
,
4
]).
astype
(
'float32'
)
...
...
@@ -367,7 +367,7 @@ class TestLayer(LayerTest):
'conv2d_transpose'
,
num_filters
=
10
,
output_size
=
28
)
dy_rlt
=
conv2d_transpose
(
base
.
to_variable
(
inp_np
))
self
.
assertTrue
(
np
.
allclose
(
static_rlt2
,
static_rlt
))
self
.
assertTrue
(
np
.
allclose
(
dy_rlt
.
_
numpy
(),
static_rlt
))
self
.
assertTrue
(
np
.
allclose
(
dy_rlt
.
numpy
(),
static_rlt
))
def
test_bilinear_tensor_product
(
self
):
inp_np_x
=
np
.
array
([[
1
,
2
,
3
]]).
astype
(
'float32'
)
...
...
@@ -410,7 +410,7 @@ class TestLayer(LayerTest):
dy_rlt
=
btp
(
base
.
to_variable
(
inp_np_x
),
base
.
to_variable
(
inp_np_y
))
self
.
assertTrue
(
np
.
allclose
(
static_rlt2
,
static_rlt
))
self
.
assertTrue
(
np
.
allclose
(
dy_rlt
.
_
numpy
(),
static_rlt
))
self
.
assertTrue
(
np
.
allclose
(
dy_rlt
.
numpy
(),
static_rlt
))
def
test_prelu
(
self
):
inp_np
=
np
.
ones
([
5
,
200
,
100
,
100
]).
astype
(
'float32'
)
...
...
@@ -451,7 +451,7 @@ class TestLayer(LayerTest):
dy_rlt
=
prelu
(
base
.
to_variable
(
inp_np
))
self
.
assertTrue
(
np
.
allclose
(
static_rlt2
,
static_rlt
))
self
.
assertTrue
(
np
.
allclose
(
dy_rlt
.
_
numpy
(),
static_rlt
))
self
.
assertTrue
(
np
.
allclose
(
dy_rlt
.
numpy
(),
static_rlt
))
def
test_embeding
(
self
):
inp_word
=
np
.
array
([[[
1
]]]).
astype
(
'int64'
)
...
...
@@ -484,7 +484,7 @@ class TestLayer(LayerTest):
static_rlt3
=
emb2
(
base
.
to_variable
(
inp_word
))
self
.
assertTrue
(
np
.
allclose
(
static_rlt2
,
static_rlt
))
self
.
assertTrue
(
np
.
allclose
(
static_rlt3
.
_
numpy
(),
static_rlt
))
self
.
assertTrue
(
np
.
allclose
(
static_rlt3
.
numpy
(),
static_rlt
))
def
test_nce
(
self
):
window_size
=
5
...
...
@@ -598,7 +598,7 @@ class TestLayer(LayerTest):
nce_loss3
=
nce
(
embs3
,
words
[
label_word
])
self
.
assertTrue
(
np
.
allclose
(
static_rlt2
,
static_rlt
))
self
.
assertTrue
(
np
.
allclose
(
nce_loss3
.
_
numpy
(),
static_rlt
))
self
.
assertTrue
(
np
.
allclose
(
nce_loss3
.
numpy
(),
static_rlt
))
def
test_conv3d
(
self
):
with
self
.
static_graph
():
...
...
@@ -625,7 +625,7 @@ class TestLayer(LayerTest):
conv3d
=
nn
.
Conv3D
(
'conv3d'
,
num_filters
=
3
,
filter_size
=
2
)
dy_ret
=
conv3d
(
base
.
to_variable
(
images
))
self
.
assertTrue
(
np
.
allclose
(
static_ret
,
dy_ret
.
_
numpy
()))
self
.
assertTrue
(
np
.
allclose
(
static_ret
,
dy_ret
.
numpy
()))
self
.
assertTrue
(
np
.
allclose
(
static_ret
,
static_ret2
))
def
test_row_conv
(
self
):
...
...
@@ -719,7 +719,7 @@ class TestLayer(LayerTest):
groupNorm
=
nn
.
GroupNorm
(
'GroupNorm'
,
groups
=
2
)
dy_ret
=
groupNorm
(
base
.
to_variable
(
input
))
self
.
assertTrue
(
np
.
allclose
(
static_ret
,
dy_ret
.
_
numpy
()))
self
.
assertTrue
(
np
.
allclose
(
static_ret
,
dy_ret
.
numpy
()))
self
.
assertTrue
(
np
.
allclose
(
static_ret
,
static_ret2
))
def
test_spectral_norm
(
self
):
...
...
@@ -769,7 +769,7 @@ class TestLayer(LayerTest):
spectralNorm
=
nn
.
SpectralNorm
(
'SpectralNorm'
,
dim
=
1
,
power_iters
=
2
)
dy_ret
=
spectralNorm
(
base
.
to_variable
(
input
))
self
.
assertTrue
(
np
.
allclose
(
static_ret
,
dy_ret
.
_
numpy
()))
self
.
assertTrue
(
np
.
allclose
(
static_ret
,
dy_ret
.
numpy
()))
self
.
assertTrue
(
np
.
allclose
(
static_ret
,
static_ret2
))
def
test_tree_conv
(
self
):
...
...
@@ -842,7 +842,7 @@ class TestLayer(LayerTest):
dy_ret
=
treeConv
(
base
.
to_variable
(
vectors
),
base
.
to_variable
(
adj
))
self
.
assertTrue
(
np
.
allclose
(
static_ret
,
static_ret2
))
self
.
assertTrue
(
np
.
allclose
(
static_ret
,
dy_ret
.
_
numpy
()))
self
.
assertTrue
(
np
.
allclose
(
static_ret
,
dy_ret
.
numpy
()))
def
test_conv3d_transpose
(
self
):
input_array
=
np
.
arange
(
0
,
48
).
reshape
(
...
...
@@ -872,7 +872,7 @@ class TestLayer(LayerTest):
use_cudnn
=
False
)
dy_rlt
=
conv3d_transpose
(
base
.
to_variable
(
input_array
))
self
.
assertTrue
(
np
.
allclose
(
static_rlt2
,
static_rlt
))
self
.
assertTrue
(
np
.
allclose
(
dy_rlt
.
_
numpy
(),
static_rlt
))
self
.
assertTrue
(
np
.
allclose
(
dy_rlt
.
numpy
(),
static_rlt
))
class
TestBook
(
LayerTest
):
...
...
@@ -907,7 +907,7 @@ class TestBook(LayerTest):
if
isinstance
(
dy_result
,
tuple
):
dy_result
=
dy_result
[
0
]
self
.
assertTrue
(
np
.
array_equal
(
static_result
[
0
],
dy_result
.
_
numpy
()))
self
.
assertTrue
(
np
.
array_equal
(
static_result
[
0
],
dy_result
.
numpy
()))
def
_get_np_data
(
self
,
shape
,
dtype
,
append_batch_size
=
True
):
np
.
random
.
seed
(
self
.
seed
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录