Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
d4571470
P
Paddle
项目概览
PaddlePaddle
/
Paddle
接近 2 年 前同步成功
通知
2323
Star
20933
Fork
5424
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
d4571470
编写于
3月 30, 2023
作者:
W
wanghuancoder
提交者:
GitHub
3月 30, 2023
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Del old dygraph varbase (#52236)
* delete old dygraph op test
上级
b8850521
变更
60
展开全部
隐藏空白更改
内联
并排
Showing
60 changed file
with
310 addition
and
3699 deletion
+310
-3699
python/paddle/__init__.py
python/paddle/__init__.py
+1
-5
python/paddle/amp/grad_scaler.py
python/paddle/amp/grad_scaler.py
+1
-1
python/paddle/distributed/auto_parallel/engine.py
python/paddle/distributed/auto_parallel/engine.py
+1
-1
python/paddle/distributed/fleet/recompute/recompute.py
python/paddle/distributed/fleet/recompute/recompute.py
+5
-5
python/paddle/distributed/fleet/recompute/recompute_hybrid.py
...on/paddle/distributed/fleet/recompute/recompute_hybrid.py
+3
-3
python/paddle/distributed/fleet/utils/hybrid_parallel_util.py
...on/paddle/distributed/fleet/utils/hybrid_parallel_util.py
+2
-2
python/paddle/distributed/parallel.py
python/paddle/distributed/parallel.py
+5
-7
python/paddle/fluid/data_feeder.py
python/paddle/fluid/data_feeder.py
+4
-14
python/paddle/fluid/dygraph/base.py
python/paddle/fluid/dygraph/base.py
+28
-62
python/paddle/fluid/dygraph/math_op_patch.py
python/paddle/fluid/dygraph/math_op_patch.py
+4
-14
python/paddle/fluid/dygraph/varbase_patch_methods.py
python/paddle/fluid/dygraph/varbase_patch_methods.py
+25
-45
python/paddle/fluid/framework.py
python/paddle/fluid/framework.py
+24
-264
python/paddle/fluid/layer_helper_base.py
python/paddle/fluid/layer_helper_base.py
+9
-19
python/paddle/fluid/layers/control_flow.py
python/paddle/fluid/layers/control_flow.py
+1
-1
python/paddle/fluid/optimizer.py
python/paddle/fluid/optimizer.py
+2
-2
python/paddle/fluid/tests/unittests/dygraph_to_static/test_len.py
...addle/fluid/tests/unittests/dygraph_to_static/test_len.py
+1
-1
python/paddle/fluid/tests/unittests/dygraph_to_static/test_loop.py
...ddle/fluid/tests/unittests/dygraph_to_static/test_loop.py
+1
-1
python/paddle/fluid/tests/unittests/dygraph_to_static/test_partial_program.py
...tests/unittests/dygraph_to_static/test_partial_program.py
+1
-3
python/paddle/fluid/tests/unittests/dygraph_to_static/test_return.py
...le/fluid/tests/unittests/dygraph_to_static/test_return.py
+1
-1
python/paddle/fluid/tests/unittests/dygraph_to_static/test_save_inference_model.py
.../unittests/dygraph_to_static/test_save_inference_model.py
+2
-2
python/paddle/fluid/tests/unittests/dygraph_to_static/test_typehint.py
.../fluid/tests/unittests/dygraph_to_static/test_typehint.py
+1
-1
python/paddle/fluid/tests/unittests/mkldnn/test_pool2d_bf16_mkldnn_op.py
...luid/tests/unittests/mkldnn/test_pool2d_bf16_mkldnn_op.py
+1
-1
python/paddle/fluid/tests/unittests/op_test.py
python/paddle/fluid/tests/unittests/op_test.py
+0
-2853
python/paddle/fluid/tests/unittests/test_base_layer.py
python/paddle/fluid/tests/unittests/test_base_layer.py
+19
-44
python/paddle/fluid/tests/unittests/test_conv2d_op_depthwise_conv.py
...le/fluid/tests/unittests/test_conv2d_op_depthwise_conv.py
+7
-7
python/paddle/fluid/tests/unittests/test_eager_run_program.py
...on/paddle/fluid/tests/unittests/test_eager_run_program.py
+1
-1
python/paddle/fluid/tests/unittests/test_einsum_v2.py
python/paddle/fluid/tests/unittests/test_einsum_v2.py
+1
-1
python/paddle/fluid/tests/unittests/test_imperative_save_load_v2.py
...dle/fluid/tests/unittests/test_imperative_save_load_v2.py
+8
-8
python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_dataset.py
...d/tests/unittests/test_multiprocess_dataloader_dataset.py
+7
-21
python/paddle/fluid/tests/unittests/test_nonzero_api.py
python/paddle/fluid/tests/unittests/test_nonzero_api.py
+1
-1
python/paddle/fluid/tests/unittests/test_paddle_save_load.py
python/paddle/fluid/tests/unittests/test_paddle_save_load.py
+6
-6
python/paddle/fluid/tests/unittests/test_parameter.py
python/paddle/fluid/tests/unittests/test_parameter.py
+1
-4
python/paddle/fluid/tests/unittests/test_pylayer_op.py
python/paddle/fluid/tests/unittests/test_pylayer_op.py
+0
-5
python/paddle/fluid/tests/unittests/test_run_program_op.py
python/paddle/fluid/tests/unittests/test_run_program_op.py
+3
-8
python/paddle/fluid/tests/unittests/test_trapezoid.py
python/paddle/fluid/tests/unittests/test_trapezoid.py
+0
-6
python/paddle/fluid/tests/unittests/test_var_base.py
python/paddle/fluid/tests/unittests/test_var_base.py
+4
-6
python/paddle/fluid/tests/unittests/xpu/test_unbind_op_xpu.py
...on/paddle/fluid/tests/unittests/xpu/test_unbind_op_xpu.py
+1
-3
python/paddle/framework/__init__.py
python/paddle/framework/__init__.py
+1
-2
python/paddle/framework/io.py
python/paddle/framework/io.py
+12
-22
python/paddle/framework/io_utils.py
python/paddle/framework/io_utils.py
+3
-1
python/paddle/hapi/model.py
python/paddle/hapi/model.py
+2
-2
python/paddle/hapi/model_summary.py
python/paddle/hapi/model_summary.py
+3
-1
python/paddle/jit/api.py
python/paddle/jit/api.py
+5
-6
python/paddle/jit/dy2static/base_transformer.py
python/paddle/jit/dy2static/base_transformer.py
+1
-1
python/paddle/jit/dy2static/function_spec.py
python/paddle/jit/dy2static/function_spec.py
+2
-2
python/paddle/jit/dy2static/partial_program.py
python/paddle/jit/dy2static/partial_program.py
+37
-80
python/paddle/jit/translated_layer.py
python/paddle/jit/translated_layer.py
+42
-102
python/paddle/nn/layer/layers.py
python/paddle/nn/layer/layers.py
+4
-6
python/paddle/optimizer/optimizer.py
python/paddle/optimizer/optimizer.py
+1
-1
python/paddle/static/input.py
python/paddle/static/input.py
+1
-1
python/paddle/static/nn/control_flow.py
python/paddle/static/nn/control_flow.py
+1
-1
python/paddle/static/quantization/tests/test_imperative_ptq.py
...n/paddle/static/quantization/tests/test_imperative_ptq.py
+0
-5
python/paddle/static/quantization/tests/test_imperative_qat_user_defined.py
...ic/quantization/tests/test_imperative_qat_user_defined.py
+0
-3
python/paddle/tensor/creation.py
python/paddle/tensor/creation.py
+6
-6
python/paddle/tensor/logic.py
python/paddle/tensor/logic.py
+1
-5
python/paddle/tensor/manipulation.py
python/paddle/tensor/manipulation.py
+0
-1
python/paddle/utils/cpp_extension/extension_utils.py
python/paddle/utils/cpp_extension/extension_utils.py
+1
-1
tools/check_file_diff_approvals.sh
tools/check_file_diff_approvals.sh
+0
-16
tools/count_api_without_core_ops.py
tools/count_api_without_core_ops.py
+1
-1
tools/jetson_infer_op.py
tools/jetson_infer_op.py
+4
-4
未找到文件。
python/paddle/__init__.py
浏览文件 @
d4571470
...
...
@@ -56,11 +56,7 @@ from .framework.dtype import bool # noqa: F401
from
.framework.dtype
import
complex64
# noqa: F401
from
.framework.dtype
import
complex128
# noqa: F401
if
fluid
.
framework
.
global_var
.
_in_eager_mode_
:
Tensor
=
framework
.
core
.
eager
.
Tensor
else
:
from
.framework
import
VarBase
as
Tensor
# noqa: F401
Tensor
=
framework
.
core
.
eager
.
Tensor
# noqa: F401
Tensor
.
__qualname__
=
'Tensor'
# noqa: F401
import
paddle.distributed
# noqa: F401
import
paddle.sysconfig
# noqa: F401
...
...
python/paddle/amp/grad_scaler.py
浏览文件 @
d4571470
...
...
@@ -179,7 +179,7 @@ class AmpScaler:
scaled.backward()
scaler.minimize(optimizer, scaled)
"""
check_type
(
var
,
"var"
,
core
.
VarBase
,
'AmpScaler.scale()'
)
check_type
(
var
,
"var"
,
core
.
eager
.
Tensor
,
'AmpScaler.scale()'
)
if
not
self
.
_enable
:
return
var
...
...
python/paddle/distributed/auto_parallel/engine.py
浏览文件 @
d4571470
...
...
@@ -266,7 +266,7 @@ class Engine:
specs
.
append
(
spec
)
else
:
specs
.
append
(
spec
.
batch
(
batch_size
))
elif
isinstance
(
item
,
(
Variable
,
core
.
VarBase
,
core
.
eager
.
Tensor
)):
elif
isinstance
(
item
,
(
Variable
,
core
.
eager
.
Tensor
)):
spec
=
InputSpec
.
from_tensor
(
item
,
name
)
_adjust_item_spec
(
num_shards
,
spec
)
if
batch_size
is
None
:
...
...
python/paddle/distributed/fleet/recompute/recompute.py
浏览文件 @
d4571470
...
...
@@ -31,7 +31,7 @@ __all__ = []
def
detach_variable
(
inputs
):
out
=
[]
for
inp
in
inputs
:
if
not
isinstance
(
inp
,
(
core
.
eager
.
Tensor
,
core
.
VarBase
)
):
if
not
isinstance
(
inp
,
core
.
eager
.
Tensor
):
out
.
append
(
inp
)
continue
...
...
@@ -172,7 +172,7 @@ class RecomputeFunction(PyLayer):
detached_inputs
=
detach_variable
(
tuple
(
inputs
))
outputs
=
ctx
.
run_function
(
*
detached_inputs
,
**
ctx
.
kwargs
)
if
isinstance
(
outputs
,
(
core
.
VarBase
,
core
.
eager
.
Tensor
)
):
if
isinstance
(
outputs
,
core
.
eager
.
Tensor
):
outputs
=
(
outputs
,)
assert
len
(
outputs
)
==
len
(
args
)
...
...
@@ -185,7 +185,7 @@ class RecomputeFunction(PyLayer):
backward_inputs_with_grad
=
[]
for
i
in
range
(
len
(
outputs
)):
if
(
isinstance
(
outputs
[
i
],
(
core
.
VarBase
,
core
.
eager
.
Tensor
)
)
isinstance
(
outputs
[
i
],
core
.
eager
.
Tensor
)
and
not
outputs
[
i
].
stop_gradient
):
forward_outputs_with_grad
.
append
(
outputs
[
i
])
...
...
@@ -206,13 +206,13 @@ class RecomputeFunction(PyLayer):
grads
=
tuple
(
inp
.
_grad_ivar
()
for
inp
in
detached_inputs
if
isinstance
(
inp
,
(
core
.
VarBase
,
core
.
eager
.
Tensor
)
)
if
isinstance
(
inp
,
core
.
eager
.
Tensor
)
)
else
:
grads
=
[
inp
.
_grad_ivar
()
for
inp
in
detached_inputs
if
isinstance
(
inp
,
(
core
.
VarBase
,
core
.
eager
.
Tensor
)
)
if
isinstance
(
inp
,
core
.
eager
.
Tensor
)
]
return
grads
...
...
python/paddle/distributed/fleet/recompute/recompute_hybrid.py
浏览文件 @
d4571470
...
...
@@ -220,7 +220,7 @@ class _HPRecomputeFunction(PyLayer):
detached_inputs
=
detach_variable
(
tuple
(
inputs
))
outputs
=
ctx
.
run_function
(
*
detached_inputs
,
**
ctx
.
kwargs
)
if
isinstance
(
outputs
,
(
core
.
VarBase
,
core
.
eager
.
Tensor
)
):
if
isinstance
(
outputs
,
core
.
eager
.
Tensor
):
outputs
=
(
outputs
,)
assert
len
(
outputs
)
==
len
(
args
)
...
...
@@ -229,7 +229,7 @@ class _HPRecomputeFunction(PyLayer):
for
i
in
range
(
len
(
outputs
)):
if
(
isinstance
(
outputs
[
i
],
(
core
.
VarBase
,
core
.
eager
.
Tensor
)
)
isinstance
(
outputs
[
i
],
core
.
eager
.
Tensor
)
and
not
outputs
[
i
].
stop_gradient
):
forward_outputs_with_grad
.
append
(
outputs
[
i
])
...
...
@@ -245,7 +245,7 @@ class _HPRecomputeFunction(PyLayer):
grads
=
tuple
(
inp
.
_grad_ivar
()
for
inp
in
detached_inputs
if
isinstance
(
inp
,
(
core
.
VarBase
,
core
.
eager
.
Tensor
)
)
if
isinstance
(
inp
,
core
.
eager
.
Tensor
)
)
return
grads
...
...
python/paddle/distributed/fleet/utils/hybrid_parallel_util.py
浏览文件 @
d4571470
...
...
@@ -156,7 +156,7 @@ def broadcast_input_data(hcg, *inputs, **kwargs):
place
=
eval
(
f
"paddle.
{
dev
.
upper
()
}
Place"
)(
dev_idx
)
for
v
in
inputs
:
if
isinstance
(
v
,
(
core
.
VarBase
,
core
.
eager
.
Tensor
)
):
if
isinstance
(
v
,
core
.
eager
.
Tensor
):
with
framework
.
no_grad
():
if
in_dygraph_mode
()
and
not
eval
(
f
"v.place.is_
{
dev
}
_place"
)():
v_gpu
=
v
.
_copy_to
(
place
,
True
)
...
...
@@ -167,7 +167,7 @@ def broadcast_input_data(hcg, *inputs, **kwargs):
logger
.
warning
(
"it doesn't support data type {}"
.
format
(
type
(
v
)))
for
k
,
v
in
kwargs
.
items
():
if
isinstance
(
v
,
(
core
.
VarBase
,
core
.
eager
.
Tensor
)
):
if
isinstance
(
v
,
core
.
eager
.
Tensor
):
with
framework
.
no_grad
():
if
in_dygraph_mode
()
and
not
eval
(
f
"v.place.is_
{
dev
}
_place"
)():
v_gpu
=
v
.
_copy_to
(
place
,
True
)
...
...
python/paddle/distributed/parallel.py
浏览文件 @
d4571470
...
...
@@ -44,7 +44,7 @@ from paddle.distributed.fleet.base.private_helper_function import ( # noqa: F40
from
paddle.distributed.fleet.launch_utils
import
check_backend
# (TODO: GhostScreaming) It will be removed later.
from
paddle.framework
import
ParamBase
,
_set_expected_place
from
paddle.framework
import
_set_expected_place
from
paddle.framework
import
base
as
imperative_base
from
paddle.framework
import
core
,
in_dygraph_mode
,
to_variable
from
paddle.nn.layer
import
layers
...
...
@@ -158,14 +158,14 @@ def sync_params_buffers(
):
model_vars
=
[]
for
_
,
param
in
model
.
_obtain_parameters_buffers
().
items
():
if
not
isinstance
(
param
,
(
core
.
VarBase
,
core
.
eager
.
Tensor
)
):
if
not
isinstance
(
param
,
core
.
eager
.
Tensor
):
raise
TypeError
(
"The data type of '%s' must be Varbase or eager.Tensor"
%
param
.
name
)
# is_distributed param not need to sync when in mp mode
if
isinstance
(
param
,
(
ParamBase
,
core
.
eager
.
Tensor
)
):
if
isinstance
(
param
,
core
.
eager
.
Tensor
):
if
is_model_parallel
:
if
hasattr
(
param
,
"is_distributed"
)
and
param
.
is_distributed
:
continue
...
...
@@ -379,9 +379,7 @@ class DataParallel(layers.Layer):
self
.
find_unused_parameters
=
find_unused_parameters
self
.
grad_need_sync
=
True
self
.
group
=
group
self
.
var_dtype
=
(
core
.
eager
.
Tensor
if
in_dygraph_mode
()
else
core
.
VarBase
)
self
.
var_dtype
=
core
.
eager
.
Tensor
# NOTE(chenweihang): The ParallelStrategy here is not strictly a strategy.
# It just stores some environment variables, which can be constructed by
...
...
@@ -491,7 +489,7 @@ class DataParallel(layers.Layer):
)
def
_find_varbase
(
self
,
obj
):
var_type
=
core
.
eager
.
Tensor
if
in_dygraph_mode
()
else
core
.
VarBase
var_type
=
core
.
eager
.
Tensor
if
isinstance
(
obj
,
var_type
):
return
[
obj
]
if
isinstance
(
obj
,
(
list
,
tuple
)):
...
...
python/paddle/fluid/data_feeder.py
浏览文件 @
d4571470
...
...
@@ -116,32 +116,22 @@ def check_type(input, input_name, expected_type, op_name, extra_message=''):
return
# NOTE: `in_declarative_mode` is used to determined whether this op is called under
# @to_static in transformation from dygrah to static layer. We add
VarBase
in
# expected_type to skip checking because
varBase
may be created and used in unusual way.
# @to_static in transformation from dygrah to static layer. We add
Tensor
in
# expected_type to skip checking because
Tensor
may be created and used in unusual way.
from
.dygraph.base
import
in_declarative_mode
# Need a better design to be fix this.
if
in_declarative_mode
():
if
not
isinstance
(
expected_type
,
tuple
):
expected_type
=
(
expected_type
,)
expected_type
+=
(
core
.
VarBase
,)
if
_in_eager_without_dygraph_check
():
expected_type
+=
(
core
.
eager
.
Tensor
,)
elif
isinstance
(
input
,
core
.
VarBase
):
expected_type
+=
(
core
.
eager
.
Tensor
,)
elif
isinstance
(
input
,
core
.
eager
.
Tensor
):
raise
TypeError
(
"Please use `with fluid.dygraph.guard()` as context or `fluid.enable_dygraph()` to switch to imperative mode firstly. "
"Because received '{}' in {} is a imperative Variable."
.
format
(
input_name
,
op_name
)
)
elif
hasattr
(
core
,
"eager"
):
if
isinstance
(
input
,
core
.
eager
.
Tensor
):
raise
TypeError
(
"Please use `with fluid.dygraph.guard()` as context or `fluid.enable_dygraph()` to switch to imperative mode firstly. "
"Because received '{}' in {} is a imperative Variable."
.
format
(
input_name
,
op_name
)
)
if
not
isinstance
(
input
,
expected_type
):
raise
TypeError
(
"The type of '%s' in %s must be %s, but received %s. %s"
...
...
python/paddle/fluid/dygraph/base.py
浏览文件 @
d4571470
...
...
@@ -127,20 +127,18 @@ def _convert_into_variable(tensor):
"""
Convert Varbase into Variable.
"""
if
isinstance
(
tensor
,
(
core
.
eager
.
Tensor
,
core
.
VarBase
)
):
if
isinstance
(
tensor
,
core
.
eager
.
Tensor
):
# Check whether has been created before.
new_var
=
tensor
.
block
.
_find_var_recursive
(
tensor
.
name
)
if
new_var
is
not
None
:
assert
isinstance
(
new_var
,
framework
.
Variable
)
# Convert ParamBase into Parameter with same attributes in dy2stat.
elif
isinstance
(
tensor
,
(
framework
.
EagerParamBase
,
framework
.
ParamBase
)
):
# Convert EagerParamBase into Parameter with same attributes in dy2stat.
elif
isinstance
(
tensor
,
framework
.
EagerParamBase
):
new_var
=
tensor
.
_to_static_var
(
to_parameter
=
True
)
else
:
# Note(Aurelius84): Convert
VarBase
in self._buffers into Variable with
# Note(Aurelius84): Convert
Tensor
in self._buffers into Variable with
# same attributes and set persistable=True to allow saving this var.
# Because users can create a
VarBase
in `__init__` like a
# Because users can create a
Tensor
in `__init__` like a
# `mask` Tensor or `hidden_0` in RNN layers, which is equivalent to a Parameter
# and necessary for inferring. It will be pruned if it's not necessary for inferring.
...
...
@@ -587,7 +585,6 @@ def guard(place=None):
train
=
framework
.
Program
()
startup
=
framework
.
Program
()
tracer
=
Tracer
()
VarBase
=
core
.
VarBase
if
place
is
not
None
:
expected_place
=
_get_paddle_place
(
place
)
...
...
@@ -757,24 +754,14 @@ def grad(
if
isinstance
(
in_out_list
,
(
list
,
tuple
)):
assert
len
(
in_out_list
)
>
0
,
"{} cannot be empty"
.
format
(
name
)
for
each_var
in
in_out_list
:
if
_in_eager_without_dygraph_check
():
assert
isinstance
(
each_var
,
core
.
eager
.
Tensor
),
"Elements of {} must be Tensor"
.
format
(
name
)
else
:
assert
isinstance
(
each_var
,
core
.
VarBase
),
"Elements of {} must be Variable"
.
format
(
name
)
assert
isinstance
(
each_var
,
core
.
eager
.
Tensor
),
"Elements of {} must be Tensor"
.
format
(
name
)
return
in_out_list
else
:
if
_in_eager_without_dygraph_check
():
assert
isinstance
(
in_out_list
,
core
.
eager
.
Tensor
),
"{} must be Tensor or list of Tensor"
.
format
(
name
)
else
:
assert
isinstance
(
in_out_list
,
core
.
VarBase
),
"{} must be Variable or list of Variable"
.
format
(
name
)
assert
isinstance
(
in_out_list
,
core
.
eager
.
Tensor
),
"{} must be Tensor or list of Tensor"
.
format
(
name
)
return
[
in_out_list
]
outputs
=
check_in_out
(
outputs
,
'outputs'
)
...
...
@@ -786,14 +773,9 @@ def grad(
for
each_var
in
grad_outputs
:
if
each_var
is
not
None
:
if
_in_eager_without_dygraph_check
():
assert
isinstance
(
each_var
,
core
.
eager
.
Tensor
),
"grad_outputs must be None, a Variable or a list containing None or Variables"
else
:
assert
isinstance
(
each_var
,
core
.
VarBase
),
"grad_outputs must be None, a Variable or a list containing None or Variables"
assert
isinstance
(
each_var
,
core
.
eager
.
Tensor
),
"grad_outputs must be None, a Variable or a list containing None or Variables"
else
:
grad_outputs
=
[]
...
...
@@ -804,21 +786,16 @@ def grad(
if
no_grad_vars
is
None
:
no_grad_vars
=
[]
elif
isinstance
(
no_grad_vars
,
(
core
.
VarBase
,
core
.
eager
.
Tensor
)
):
elif
isinstance
(
no_grad_vars
,
core
.
eager
.
Tensor
):
no_grad_vars
=
[
no_grad_vars
]
elif
isinstance
(
no_grad_vars
,
core
.
eager
.
Tensor
):
no_grad_vars
=
[
no_grad_vars
]
elif
isinstance
(
no_grad_vars
,
(
list
,
tuple
,
set
)):
no_grad_vars
=
list
(
no_grad_vars
)
for
var
in
no_grad_vars
:
if
_in_eager_without_dygraph_check
():
assert
isinstance
(
var
,
core
.
eager
.
Tensor
),
"no_grad_vars can only contains Tensor"
else
:
assert
isinstance
(
var
,
core
.
VarBase
),
"no_grad_vars can only contains Variable"
assert
isinstance
(
var
,
core
.
eager
.
Tensor
),
"no_grad_vars can only contains Tensor"
else
:
if
_in_eager_without_dygraph_check
():
raise
AssertionError
(
...
...
@@ -932,7 +909,6 @@ def to_variable(value, name=None, zero_copy=None, dtype=None):
tuple
,
np
.
ndarray
,
core
.
eager
.
Tensor
,
core
.
VarBase
,
framework
.
Variable
,
core
.
Tensor
,
core
.
LoDTensor
,
...
...
@@ -942,10 +918,10 @@ def to_variable(value, name=None, zero_copy=None, dtype=None):
"The type of 'value' in fluid.dygraph.to_variable must be %s, but received %s."
%
(
support_type
,
type
(
value
))
)
if
isinstance
(
value
,
(
core
.
eager
.
Tensor
,
core
.
VarBase
,
framework
.
Variable
)):
if
isinstance
(
value
,
(
core
.
eager
.
Tensor
,
framework
.
Variable
)):
return
value
elif
isinstance
(
value
,
(
core
.
Tensor
,
core
.
LoDTensor
)):
return
core
.
VarBase
(
value
)
return
core
.
eager
.
Tensor
(
value
)
else
:
if
isinstance
(
framework
.
_current_expected_place
(),
framework
.
core
.
CPUPlace
...
...
@@ -974,21 +950,11 @@ def to_variable(value, name=None, zero_copy=None, dtype=None):
if
value
.
dtype
!=
dtype
:
value
=
value
.
astype
(
dtype
)
if
_in_eager_without_dygraph_check
():
return
core
.
eager
.
Tensor
(
value
,
framework
.
_current_expected_place
(),
False
,
zero_copy
,
name
if
name
else
None
,
True
,
)
else
:
py_var
=
core
.
VarBase
(
value
=
value
,
place
=
framework
.
_current_expected_place
(),
persistable
=
False
,
zero_copy
=
zero_copy
,
name
=
name
if
name
else
''
,
)
return
py_var
return
core
.
eager
.
Tensor
(
value
,
framework
.
_current_expected_place
(),
False
,
zero_copy
,
name
if
name
else
None
,
True
,
)
python/paddle/fluid/dygraph/math_op_patch.py
浏览文件 @
d4571470
...
...
@@ -62,7 +62,6 @@ _complex_dtypes = [
core
.
VarDesc
.
VarType
.
COMPLEX128
,
]
_already_patch_varbase
=
False
_already_patch_eager_tensor
=
False
...
...
@@ -251,10 +250,7 @@ def monkey_patch_math_varbase():
# 2. create varbase for scalar
lhs_dtype
=
self
.
dtype
if
framework
.
global_var
.
_in_eager_mode_
:
other_var_should_be
=
core
.
eager
.
Tensor
else
:
other_var_should_be
=
core
.
VarBase
other_var_should_be
=
core
.
eager
.
Tensor
if
not
isinstance
(
other_var
,
other_var_should_be
):
if
isinstance
(
other_var
,
complex
):
import
paddle
...
...
@@ -483,17 +479,11 @@ def monkey_patch_math_varbase():
'__ne__'
,
]
global
_already_patch_varbase
global
_already_patch_eager_tensor
if
framework
.
global_var
.
_in_eager_mode_
:
local_already_patch
=
_already_patch_eager_tensor
_already_patch_eager_tensor
=
True
local_tensor
=
core
.
eager
.
Tensor
else
:
local_already_patch
=
_already_patch_varbase
_already_patch_varbase
=
True
local_tensor
=
core
.
VarBase
local_already_patch
=
_already_patch_eager_tensor
_already_patch_eager_tensor
=
True
local_tensor
=
core
.
eager
.
Tensor
if
not
local_already_patch
:
if
framework
.
global_var
.
_in_eager_mode_
:
...
...
python/paddle/fluid/dygraph/varbase_patch_methods.py
浏览文件 @
d4571470
...
...
@@ -26,7 +26,6 @@ from .. import unique_name
from
..framework
import
(
Variable
,
Parameter
,
ParamBase
,
_getitem_impl_
,
_setitem_impl_
,
EagerParamBase
,
...
...
@@ -94,12 +93,12 @@ def monkey_patch_varbase():
**Notes**:
**This API is ONLY available in Dygraph mode**
Transform a
VarBase
into static Variable with same attributes. It's a low level interface used
Transform a
Tensor
into static Variable with same attributes. It's a low level interface used
in dy2static and shall not be called directly.
Args:
to_parameter (bool): It takes effect only if the input a
VarBase
. If set True,
the
VarBase
will be converted into framework.Parameters. Otherwise, it will
to_parameter (bool): It takes effect only if the input a
Tensor
. If set True,
the
Tensor
will be converted into framework.Parameters. Otherwise, it will
be converted into framework.Variable. Default False.
Examples:
...
...
@@ -120,7 +119,7 @@ def monkey_patch_varbase():
# It will fail. So, for propery that different between dynamic and static graph, should not getattr(self, attr, None).
attr_not_need_keys
=
[
'grad'
,
'T'
,
'place'
,
'_place_str'
]
param_keys
=
[
'stop_gradient'
,
'trainable'
]
if
isinstance
(
self
,
(
ParamBase
,
EagerParamBase
)
):
if
isinstance
(
self
,
EagerParamBase
):
attr_kwargs
=
self
.
__dict__
.
copy
()
for
key
in
param_keys
:
attr_kwargs
[
key
]
=
getattr
(
self
,
key
)
...
...
@@ -144,7 +143,7 @@ def monkey_patch_varbase():
attr_kwargs
.
update
(
kwargs
)
if
to_parameter
or
isinstance
(
self
,
(
ParamBase
,
EagerParamBase
)
):
if
to_parameter
or
isinstance
(
self
,
EagerParamBase
):
del
attr_kwargs
[
'persistable'
]
# NOTE(Aurelius84): All parameters should be placed into global block.
attr_kwargs
[
'block'
]
=
attr_kwargs
[
'block'
].
program
.
global_block
()
...
...
@@ -183,13 +182,10 @@ def monkey_patch_varbase():
out = linear(t) # call with different weight
"""
if
framework
.
global_var
.
_in_eager_mode_
:
base_tensor
=
core
.
eager
.
Tensor
else
:
base_tensor
=
core
.
VarBase
base_tensor
=
core
.
eager
.
Tensor
assert
isinstance
(
value
,
(
np
.
ndarray
,
base_tensor
,
dict
,
str
)
),
"Variable set_value function, arguments type only support Variable, numpy,
VarBase
, dict, string."
),
"Variable set_value function, arguments type only support Variable, numpy,
Tensor
, dict, string."
if
isinstance
(
value
,
(
dict
,
str
)):
assert
len
(
self
)
==
len
(
...
...
@@ -219,8 +215,7 @@ def monkey_patch_varbase():
self
.
name
,
self
.
dtype
,
dtype
)
# NOTE(wuweilong): self could be VarBase or Tensor, the subsequent behavior are defined in different files
# if self is VarBase, method value() return Variable that bindded in imperative.cc, get_tensor() bindded in pybind.cc
# NOTE(wuweilong): self could be Tensor, the subsequent behavior are defined in different files
# if self is Tensor, method value() return self that defined in this file, get_tensor() defined in eager_method.cc
# this Interface behavior will be unifed in the future.
self
.
value
().
get_tensor
().
set
(
...
...
@@ -665,7 +660,7 @@ def monkey_patch_varbase():
def
__str__
(
self
):
"""
Convert a
VarBase
object to a readable string.
Convert a
Tensor
object to a readable string.
Returns(str): A readable string.
...
...
@@ -680,14 +675,9 @@ def monkey_patch_varbase():
# [[0.30574632, 0.55739117, 0.30902600, 0.39413780, 0.44830436],
# [0.79010487, 0.53972793, 0.09495186, 0.44267157, 0.72112119]])
"""
if
framework
.
global_var
.
_in_eager_mode_
:
from
paddle.tensor.to_string
import
tensor_to_string
return
tensor_to_string
(
self
)
else
:
from
paddle.tensor.to_string
import
to_string
from
paddle.tensor.to_string
import
tensor_to_string
return
to_string
(
self
)
return
tensor_
to_string
(
self
)
def
__deepcopy__
(
self
,
memo
):
"""
...
...
@@ -714,10 +704,7 @@ def monkey_patch_varbase():
raise
RuntimeError
(
"Only Leaf Tensor support the deepcopy at the moment, non-Leaf Tensors contains graph information that does't support deepcopy"
)
if
framework
.
global_var
.
_in_eager_mode_
:
new_varbase
=
core
.
eager
.
Tensor
()
else
:
new_varbase
=
core
.
VarBase
()
new_varbase
=
core
.
eager
.
Tensor
()
new_varbase
.
name
=
self
.
name
+
unique_name
.
generate
(
"_deepcopy"
)
memo
[
id
(
self
)]
=
new_varbase
new_varbase
.
copy_
(
self
,
True
)
...
...
@@ -1055,26 +1042,19 @@ def monkey_patch_varbase():
(
"to_dense"
,
to_dense
),
(
"to_sparse_coo"
,
to_sparse_coo
),
):
if
framework
.
global_var
.
_in_eager_mode_
:
setattr
(
core
.
eager
.
Tensor
,
method_name
,
method
)
else
:
setattr
(
core
.
VarBase
,
method_name
,
method
)
if
framework
.
global_var
.
_in_eager_mode_
:
setattr
(
core
.
eager
.
Tensor
,
"_set_grad_ivar"
,
_set_grad_ivar
)
setattr
(
core
.
eager
.
Tensor
,
"value"
,
value
)
setattr
(
core
.
eager
.
Tensor
,
"cpu"
,
cpu
)
setattr
(
core
.
eager
.
Tensor
,
"cuda"
,
cuda
)
setattr
(
core
.
eager
.
Tensor
,
"pin_memory"
,
pin_memory
)
setattr
(
core
.
eager
.
Tensor
,
"_slice"
,
_slice
)
setattr
(
core
.
eager
.
Tensor
,
"_numel"
,
_numel
)
setattr
(
core
.
eager
.
Tensor
,
"_uva"
,
_uva
)
setattr
(
core
.
eager
.
Tensor
,
"_clear_data"
,
_clear_data
)
setattr
(
core
.
eager
.
Tensor
,
"__hash__"
,
__hash__
)
setattr
(
core
.
eager
.
Tensor
,
"_use_gpudnn"
,
_use_gpudnn
)
else
:
setattr
(
core
.
VarBase
,
"__name__"
,
"Tensor"
)
setattr
(
core
.
VarBase
,
"grad"
,
grad
)
setattr
(
core
.
eager
.
Tensor
,
method_name
,
method
)
setattr
(
core
.
eager
.
Tensor
,
"_set_grad_ivar"
,
_set_grad_ivar
)
setattr
(
core
.
eager
.
Tensor
,
"value"
,
value
)
setattr
(
core
.
eager
.
Tensor
,
"cpu"
,
cpu
)
setattr
(
core
.
eager
.
Tensor
,
"cuda"
,
cuda
)
setattr
(
core
.
eager
.
Tensor
,
"pin_memory"
,
pin_memory
)
setattr
(
core
.
eager
.
Tensor
,
"_slice"
,
_slice
)
setattr
(
core
.
eager
.
Tensor
,
"_numel"
,
_numel
)
setattr
(
core
.
eager
.
Tensor
,
"_uva"
,
_uva
)
setattr
(
core
.
eager
.
Tensor
,
"_clear_data"
,
_clear_data
)
setattr
(
core
.
eager
.
Tensor
,
"__hash__"
,
__hash__
)
setattr
(
core
.
eager
.
Tensor
,
"_use_gpudnn"
,
_use_gpudnn
)
global
_already_patch_repr
if
not
_already_patch_repr
:
...
...
python/paddle/fluid/framework.py
浏览文件 @
d4571470
...
...
@@ -192,9 +192,9 @@ extra_op_attrs = {
# to make sure in most case, we find new dygraph mode first with only one if statement.
def
_update_monkey_methods
(
is_eager
):
def
_update_monkey_methods
():
"""
Update monkey methods of
VarBase
or eager.Tensor while
Update monkey methods of
Tensor
or eager.Tensor while
switching eager mode and legacy mode.
"""
from
paddle
import
_C_ops
,
_legacy_C_ops
...
...
@@ -204,46 +204,23 @@ def _update_monkey_methods(is_eager):
global
_already_patch_eager_tensor
global
_already_patch_varbase
assert
isinstance
(
is_eager
,
bool
)
# switch into eager mode
if
is_eager
:
if
not
_already_patch_eager_tensor
:
monkey_patch_varbase
()
monkey_patch_math_varbase
()
if
not
_already_patch_eager_tensor
:
monkey_patch_varbase
()
monkey_patch_math_varbase
()
_already_patch_eager_tensor
=
True
# switch back into legacy mode
else
:
if
not
_already_patch_varbase
:
monkey_patch_varbase
()
monkey_patch_math_varbase
()
_already_patch_varbase
=
True
_already_patch_eager_tensor
=
True
# switch Paddle.Tensor bind type
_switch_tensor_bind_type
(
is_eager
)
_switch_tensor_bind_type
()
def
_switch_tensor_bind_type
(
is_eager
):
def
_switch_tensor_bind_type
():
import
paddle
if
is_eager
:
paddle
.
Tensor
=
core
.
eager
.
Tensor
else
:
paddle
.
Tensor
=
core
.
VarBase
paddle
.
Tensor
=
core
.
eager
.
Tensor
paddle
.
Tensor
.
__qualname__
=
'Tensor'
def
_enable_legacy_dygraph
():
global_var
.
_in_eager_mode_
=
False
_update_monkey_methods
(
is_eager
=
False
)
def
_disable_legacy_dygraph
():
global_var
.
_in_eager_mode_
=
True
_update_monkey_methods
(
is_eager
=
True
)
def
_in_eager_without_dygraph_check
():
return
global_var
.
_in_eager_mode_
...
...
@@ -253,36 +230,6 @@ def _in_eager_without_dygraph_check():
_is_first_import_
=
True
def
_fallback_legacy_dygraph
():
global
_is_first_import_
need_fallback
=
False
# Only enable eager on CPU/GPU/XPU
is_not_support
=
(
core
.
is_compiled_with_npu
()
or
core
.
is_compiled_with_ipu
()
or
core
.
is_compiled_with_mlu
()
)
if
global_var
.
_in_eager_mode_
and
is_not_support
:
# switch into legacy dygraph mode
warnings
.
warn
(
"We will fallback into legacy dygraph on NPU/XPU/MLU/IPU/ROCM devices. Because we only support new eager dygraph mode on CPU/GPU currently. "
)
global_var
.
_in_eager_mode_
=
False
if
not
_is_first_import_
:
_enable_legacy_dygraph
()
need_fallback
=
True
need_fallback
=
False
_is_first_import_
=
False
return
need_fallback
# switch into legacy mode if need while import paddle
_fallback_legacy_dygraph
()
def
in_dygraph_mode
():
"""
...
...
@@ -319,19 +266,6 @@ def _non_static_mode():
return
global_var
.
_dygraph_tracer_
is
not
None
@
signature_safe_contextmanager
def
_test_eager_guard
(
place
=
None
):
# FIXME(dev): We haven't fully verified eager mode on NPU et.al but
# only GPU/CPU/XPU. Remove this after we improve this feature.
already_fallback
=
_fallback_legacy_dygraph
()
if
not
already_fallback
:
_disable_legacy_dygraph
()
try
:
yield
finally
:
pass
global_ipu_index
=
-
1
global_ipu_stage
=
-
1
ipu_index_attr_name
=
'ipu_index'
...
...
@@ -634,11 +568,11 @@ def _set_pipeline_stage(stage):
# NOTE(zhiqiu): This decorator is used for the APIs of Variable which is only
# used to make Variable and
VarBase has same interfaces, like numpy. Since VarBase
is not exposed in our
# official docments, logically, we want to keep
VarBase
and logically consistent. While, actually,
# used to make Variable and
Tensor has same interfaces, like numpy. Since Tensor
is not exposed in our
# official docments, logically, we want to keep
Tensor
and logically consistent. While, actually,
# in our implementation, there some APIs not supported, like numpy, because Variable contains the desc.
# So, those APIs are listed under class Variable to generate docs only.
# TODO(zhiqiu): We should make
VarBase
consistent with Variable in future, for example, by inheritting
# TODO(zhiqiu): We should make
Tensor
consistent with Variable in future, for example, by inheritting
# same base class.
def
_fake_interface_only_
(
func
):
def
__impl__
(
*
args
,
**
kwargs
):
...
...
@@ -759,23 +693,6 @@ def _set_expected_place(place):
_set_dygraph_tracer_expected_place
(
place
)
# TODO(zhiqiu): remove this function.
def
_var_base_to_np
(
var_base
):
"""
convert VarBase tp numpy
Args:
var_base(VarBase) : the VarBase to convert
Returns (np.ndarray): the np.ndarray contain the value of VarBase
"""
warnings
.
warn
(
"paddle.fluid.framework._var_base_to_np is deprecated, please use var_base.numpy() instead of _var_base_to_np(var_base)."
)
return
var_base
.
numpy
(
False
)
def
_cpu_num
():
if
"CPU_NUM"
not
in
os
.
environ
.
keys
():
if
multiprocessing
.
cpu_count
()
>
1
:
...
...
@@ -1392,24 +1309,15 @@ def _varbase_creator(
if
not
isinstance
(
dtype
,
core
.
VarDesc
.
VarType
):
dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
if
global_var
.
_in_eager_mode_
:
eager_tensor
=
core
.
eager
.
Tensor
(
dtype
if
dtype
else
core
.
VarDesc
.
VarType
.
FP32
,
list
(
shape
)
if
shape
else
[],
name
,
type
if
type
else
core
.
VarDesc
.
VarType
.
LOD_TENSOR
,
True
if
persistable
else
False
,
)
eager_tensor
.
retain_grads
()
return
eager_tensor
else
:
return
core
.
VarBase
(
dtype
if
dtype
else
core
.
VarDesc
.
VarType
.
FP32
,
list
(
shape
)
if
shape
else
[],
name
,
type
if
type
else
core
.
VarDesc
.
VarType
.
LOD_TENSOR
,
True
if
persistable
else
False
,
)
eager_tensor
=
core
.
eager
.
Tensor
(
dtype
if
dtype
else
core
.
VarDesc
.
VarType
.
FP32
,
list
(
shape
)
if
shape
else
[],
name
,
type
if
type
else
core
.
VarDesc
.
VarType
.
LOD_TENSOR
,
True
if
persistable
else
False
,
)
eager_tensor
.
retain_grads
()
return
eager_tensor
def
_all_is_type
(
vals
,
expected_type
):
...
...
@@ -3141,7 +3049,7 @@ class Operator:
in_arg_names
.
append
(
arg
)
elif
isinstance
(
arg
,
bytes
):
in_arg_names
.
append
(
arg
.
decode
())
elif
isinstance
(
arg
,
(
Variable
,
core
.
VarBase
)):
elif
isinstance
(
arg
,
(
Variable
,
core
.
eager
.
Tensor
)):
in_arg_names
.
append
(
arg
.
name
)
else
:
raise
TypeError
(
...
...
@@ -4274,7 +4182,7 @@ class Block:
op_desc
=
self
.
desc
.
append_op
()
inputs
=
kwargs
.
get
(
"inputs"
,
None
)
outputs
=
kwargs
.
get
(
"outputs"
,
None
)
# NOTE(Aurelius84): In case of @to_static, all
VarBase
(s) should
# NOTE(Aurelius84): In case of @to_static, all
Tensor
(s) should
# be converted into Variable(s) with same name and block location.
# This is ONE and ONLY logic of type transformation of dy2static.
ignore_ops
=
{
...
...
@@ -7205,155 +7113,7 @@ class Parameter(Variable, metaclass=ParameterMetaClass):
__repr__
=
__str__
class
ParamBase
(
core
.
VarBase
):
"""
ParamBase is derived from Tensor( Which is the concept in Dygraph Mode).
A ParamBase is a persistable Tensor, and will be updated by optimizers
after each iteration.
The training of a neural network is essentially the updating of
its ParamBase.
Relative to a general Tensor, a ParamBase has several its own
member variables:
Args:
trainable(bool): True if the ParamBase need to be updated after
iterations.
optimize_attr(map): ParamBase attributes related with optimizing.
Currently, it only contains 'learning_rate'.
Default: {'learning_rate': 1.0}
regularizer(WeightDecayRegularizer): The Regularizer which will
be applied on the ParamBase. Default: None
do_model_average(bool): True if the model average strategy will
be applied on this ParamBase.
need_clip (bool): Whether the parameter gradient need to be cliped
in optimizer. Default is True.
"""
@
dygraph_only
def
__init__
(
self
,
shape
,
dtype
,
**
kwargs
):
if
shape
is
None
:
raise
ValueError
(
"The shape of Parameter should not be None"
)
if
dtype
is
None
:
raise
ValueError
(
"The dtype of Parameter should not be None"
)
for
each
in
shape
:
if
each
<
0
:
raise
ValueError
(
"Each dimension of shape for Parameter must be greater than 0, but received %s"
%
list
(
shape
)
)
if
dtype
is
not
None
:
if
not
isinstance
(
dtype
,
core
.
VarDesc
.
VarType
):
dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
name
=
kwargs
.
get
(
'name'
,
unique_name
.
generate
(
'_param_base'
))
super
().
__init__
(
dtype
if
dtype
else
core
.
VarDesc
.
VarType
.
FP32
,
list
(
shape
)
if
shape
else
[],
name
,
core
.
VarDesc
.
VarType
.
LOD_TENSOR
,
True
,
)
trainable
=
kwargs
.
get
(
'trainable'
,
True
)
self
.
stop_gradient
=
not
trainable
self
.
optimize_attr
=
kwargs
.
get
(
'optimize_attr'
,
{
'learning_rate'
:
1.0
})
self
.
regularizer
=
kwargs
.
get
(
'regularizer'
,
None
)
self
.
do_model_average
=
kwargs
.
get
(
'do_model_average'
,
None
)
self
.
need_clip
=
kwargs
.
get
(
'need_clip'
,
True
)
self
.
is_distributed
=
kwargs
.
get
(
'is_distributed'
,
False
)
# self.block = default_main_program().global_block()
@
property
def
trainable
(
self
):
return
not
self
.
stop_gradient
@
trainable
.
setter
def
trainable
(
self
,
trainable
):
if
isinstance
(
trainable
,
bool
):
self
.
stop_gradient
=
not
trainable
else
:
raise
ValueError
(
"The type of trainable MUST be bool, but the type is "
,
type
(
trainable
),
)
def
__str__
(
self
):
"""
Convert a ParamBase object to a readable string.
Returns(str): A readable string.
Examples:
.. code-block:: python
import paddle
linear = paddle.nn.Linear(3, 3)
print(linear.weight)
# Parameter containing:
# Tensor(shape=[3, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=False,
# [[ 0.48948765, 0.05829060, -0.25524026],
# [-0.70368278, 0.52986908, -0.68742192],
# [-0.54217887, 0.48439729, 0.34082305]])
"""
return
"Parameter containing:
\n
{tensor}"
.
format
(
tensor
=
super
().
__str__
()
)
def
__deepcopy__
(
self
,
memo
):
"""
Deep copy parameter, it will always performs Tensor copy.
Examples:
.. code-block:: python
import paddle
import copy
linear = paddle.nn.Linear(1, 3)
linear_copy = copy.deepcopy(linear)
print(linear.weight)
# Parameter containing:
# Tensor(shape=[1, 3], dtype=float32, place=CPUPlace, stop_gradient=False,
# [[-0.30929261, -0.90929240, -1.07851017]])
print(linear_copy.weight)
# Parameter containing:
# Tensor(shape=[1, 3], dtype=float32, place=CPUPlace, stop_gradient=False,
# [[-0.30929261, -0.90929240, -1.07851017]])
"""
state
=
copy
.
deepcopy
(
self
.
__dict__
,
memo
)
state
[
"name"
]
=
self
.
name
+
unique_name
.
generate
(
"_deepcopy"
)
new_param
=
ParamBase
(
self
.
shape
,
self
.
dtype
,
**
state
)
memo
[
id
(
self
)]
=
new_param
new_param
.
copy_
(
self
,
True
)
return
new_param
def
_copy_to
(
self
,
device
,
blocking
):
state
=
copy
.
deepcopy
(
self
.
__dict__
)
new_param
=
ParamBase
(
self
.
shape
,
self
.
dtype
,
**
state
)
core
.
varbase_copy
(
self
,
new_param
,
device
,
blocking
)
return
new_param
__repr__
=
__str__
if
hasattr
(
core
,
"eager"
):
_core_eager_eagertensor
=
core
.
eager
.
Tensor
else
:
_core_eager_eagertensor
=
object
class
EagerParamBase
(
_core_eager_eagertensor
):
class
EagerParamBase
(
core
.
eager
.
Tensor
):
"""
EagerParamBase is derived from Tensor( Which is the concept in Eager-Dygraph Mode).
A EagerParamBase is a persistable Tensor, and will be updated by optimizers
...
...
python/paddle/fluid/layer_helper_base.py
浏览文件 @
d4571470
...
...
@@ -88,25 +88,15 @@ class LayerHelperBase:
"""
if
isinstance
(
value
,
np
.
ndarray
):
if
_in_eager_without_dygraph_check
():
return
core
.
eager
.
Tensor
(
value
,
_current_expected_place
(),
False
,
False
,
name
if
name
else
None
,
True
,
)
else
:
py_var
=
core
.
VarBase
(
value
=
value
,
name
=
name
if
name
else
''
,
persistable
=
False
,
place
=
_current_expected_place
(),
zero_copy
=
False
,
)
return
py_var
elif
isinstance
(
value
,
(
core
.
VarBase
,
Variable
,
core
.
eager
.
Tensor
)):
return
core
.
eager
.
Tensor
(
value
,
_current_expected_place
(),
False
,
False
,
name
if
name
else
None
,
True
,
)
elif
isinstance
(
value
,
(
Variable
,
core
.
eager
.
Tensor
)):
return
value
else
:
raise
TypeError
(
...
...
python/paddle/fluid/layers/control_flow.py
浏览文件 @
d4571470
...
...
@@ -1053,7 +1053,7 @@ def assign_skip_lod_tensor_array(input, output):
return
True
return
False
if
not
isinstance
(
input
,
(
Variable
,
core
.
VarBase
)):
if
not
isinstance
(
input
,
(
Variable
,
core
.
eager
.
Tensor
)):
if
isinstance
(
output
,
Variable
)
and
isinstance
(
input
,
support_ret_buildin_type
):
...
...
python/paddle/fluid/optimizer.py
浏览文件 @
d4571470
...
...
@@ -309,7 +309,7 @@ class Optimizer:
self
.
_learning_rate
.
step_num
=
global_step
[
0
]
else
:
raise
RuntimeError
(
"Type not supprt, value in state dict must be [
VarBase
, Variable, numpy], the type is "
,
"Type not supprt, value in state dict must be [
Tensor
, Variable, numpy], the type is "
,
type
(
global_step
),
)
...
...
@@ -320,7 +320,7 @@ class Optimizer:
load_para
=
state_dict
[
param
.
name
]
if
isinstance
(
load_para
,
Variable
):
load_para_np
=
load_para
.
numpy
()
elif
isinstance
(
load_para
,
core
.
VarBase
):
elif
isinstance
(
load_para
,
core
.
eager
.
Tensor
):
load_para_np
=
load_para
.
numpy
()
elif
isinstance
(
load_para
,
np
.
ndarray
):
load_para_np
=
load_para
...
...
python/paddle/fluid/tests/unittests/dygraph_to_static/test_len.py
浏览文件 @
d4571470
...
...
@@ -62,7 +62,7 @@ class TestLen(unittest.TestCase):
else
:
out
=
self
.
func
(
self
.
x_data
)
if
isinstance
(
out
,
fluid
.
core
.
VarBase
):
if
isinstance
(
out
,
fluid
.
core
.
eager
.
Tensor
):
out
=
out
.
numpy
()
return
out
...
...
python/paddle/fluid/tests/unittests/dygraph_to_static/test_loop.py
浏览文件 @
d4571470
...
...
@@ -320,7 +320,7 @@ class TestTransformWhileLoop(unittest.TestCase):
def
_run
(
self
,
to_static
):
with
fluid
.
dygraph
.
guard
(
self
.
place
):
# Set the input of dyfunc to
VarBase
# Set the input of dyfunc to
Tensor
tensor_x
=
fluid
.
dygraph
.
to_variable
(
self
.
x
,
zero_copy
=
False
)
if
to_static
:
ret
=
paddle
.
jit
.
to_static
(
self
.
dyfunc
)(
tensor_x
)
...
...
python/paddle/fluid/tests/unittests/dygraph_to_static/test_partial_program.py
浏览文件 @
d4571470
...
...
@@ -116,9 +116,7 @@ class TestWithNestedOutput(unittest.TestCase):
self
.
assertTrue
(
len
(
dygraph_res
)
==
len
(
static_res
))
for
dy_var
,
st_var
in
zip
(
dygraph_res
,
static_res
):
if
isinstance
(
dy_var
,
(
fluid
.
core
.
VarBase
,
fluid
.
core
.
eager
.
Tensor
)
):
if
isinstance
(
dy_var
,
fluid
.
core
.
eager
.
Tensor
):
np
.
testing
.
assert_allclose
(
dy_var
.
numpy
(),
st_var
.
numpy
(),
rtol
=
1e-05
)
...
...
python/paddle/fluid/tests/unittests/dygraph_to_static/test_return.py
浏览文件 @
d4571470
...
...
@@ -282,7 +282,7 @@ class TestReturnBase(unittest.TestCase):
res
=
self
.
dygraph_func
(
self
.
input
)
if
isinstance
(
res
,
(
tuple
,
list
)):
return
tuple
(
r
.
numpy
()
for
r
in
res
)
elif
isinstance
(
res
,
(
core
.
VarBase
,
core
.
eager
.
Tensor
)
):
elif
isinstance
(
res
,
core
.
eager
.
Tensor
):
return
res
.
numpy
()
return
res
...
...
python/paddle/fluid/tests/unittests/dygraph_to_static/test_save_inference_model.py
浏览文件 @
d4571470
...
...
@@ -161,13 +161,13 @@ class TestPartialProgramRaiseError(unittest.TestCase):
concrete_program
.
parameters
=
params
[
0
]
# TypeError: Type of self._params should be list or tuple,
# but received <class 'paddle.fluid.framework.ParamBase'>.
# but received <class 'paddle.fluid.framework.
Eager
ParamBase'>.
with
self
.
assertRaises
(
TypeError
):
partial_program_from
(
concrete_program
)
params
[
0
]
=
"linear.w.0"
concrete_program
.
parameters
=
params
# TypeError: Type of self._params[0] should be framework.ParamBase,
# TypeError: Type of self._params[0] should be framework.
Eager
ParamBase,
# but received <type 'str'>.
with
self
.
assertRaises
(
TypeError
):
partial_program_from
(
concrete_program
)
...
...
python/paddle/fluid/tests/unittests/dygraph_to_static/test_typehint.py
浏览文件 @
d4571470
...
...
@@ -53,7 +53,7 @@ class TestTransformWhileLoop(unittest.TestCase):
def
_run
(
self
,
to_static
):
with
fluid
.
dygraph
.
guard
(
self
.
place
):
# Set the input of dyfunc to
VarBase
# Set the input of dyfunc to
Tensor
tensor_x
=
fluid
.
dygraph
.
to_variable
(
self
.
x
,
zero_copy
=
False
)
if
to_static
:
ret
=
paddle
.
jit
.
to_static
(
self
.
dyfunc
)(
tensor_x
)
...
...
python/paddle/fluid/tests/unittests/mkldnn/test_pool2d_bf16_mkldnn_op.py
浏览文件 @
d4571470
...
...
@@ -21,7 +21,7 @@ from paddle import enable_static
from
paddle.fluid
import
core
sys
.
path
.
append
(
".."
)
from
op_test
import
OpTest
,
OpTestTool
,
convert_float_to_uint16
from
eager_
op_test
import
OpTest
,
OpTestTool
,
convert_float_to_uint16
from
test_pool2d_op
import
(
TestPool2D_Op_Mixin
,
adaptive_end_index
,
...
...
python/paddle/fluid/tests/unittests/op_test.py
已删除
100644 → 0
浏览文件 @
b8850521
此差异已折叠。
点击以展开。
python/paddle/fluid/tests/unittests/test_base_layer.py
浏览文件 @
d4571470
...
...
@@ -19,7 +19,7 @@ import numpy as np
import
paddle
from
paddle
import
fluid
from
paddle.fluid.dygraph
import
to_variable
from
paddle.fluid.framework
import
EagerParamBase
,
ParamBase
,
in_dygraph_mode
from
paddle.fluid.framework
import
EagerParamBase
class
L1
(
paddle
.
nn
.
Layer
):
...
...
@@ -184,14 +184,9 @@ class TestBuffer(unittest.TestCase):
with
self
.
assertRaisesRegex
(
TypeError
,
"buffer should be a Paddle.Tensor"
):
if
in_dygraph_mode
():
net
.
register_buffer
(
"buffer_name"
,
EagerParamBase
([
2
,
2
],
'float32'
)
)
else
:
net
.
register_buffer
(
"buffer_name"
,
ParamBase
([
2
,
2
],
'float32'
)
)
net
.
register_buffer
(
"buffer_name"
,
EagerParamBase
([
2
,
2
],
'float32'
)
)
with
self
.
assertRaisesRegex
(
KeyError
,
"name of buffer can not contain"
...
...
@@ -208,10 +203,7 @@ class TestBuffer(unittest.TestCase):
net
.
register_buffer
(
"attr_name"
,
var
)
del
net
.
attr_name
if
in_dygraph_mode
():
net
.
attr_name
=
EagerParamBase
([
2
,
2
],
'float32'
)
else
:
net
.
attr_name
=
ParamBase
([
2
,
2
],
'float32'
)
net
.
attr_name
=
EagerParamBase
([
2
,
2
],
'float32'
)
with
self
.
assertRaisesRegex
(
KeyError
,
"already exists"
):
net
.
register_buffer
(
"attr_name"
,
var
)
...
...
@@ -278,11 +270,8 @@ class TestBuffer(unittest.TestCase):
self
.
assertEqual
(
len
(
net
.
buffers
()),
1
)
self
.
assertEqual
(
len
(
net
.
state_dict
()),
0
)
# Re-assign a ParamBase will remove the buffer.
if
in_dygraph_mode
():
net
.
buffer_name
=
EagerParamBase
([
2
,
2
],
'float32'
)
else
:
net
.
buffer_name
=
ParamBase
([
2
,
2
],
'float32'
)
# Re-assign a EagerParamBase will remove the buffer.
net
.
buffer_name
=
EagerParamBase
([
2
,
2
],
'float32'
)
self
.
assertEqual
(
len
(
net
.
buffers
()),
0
)
self
.
assertEqual
(
len
(
net
.
state_dict
()),
1
)
...
...
@@ -403,12 +392,9 @@ class TestLayerTo(unittest.TestCase):
paddle
.
fluid
.
core
.
VarDesc
.
VarType
.
FP64
,
)
for
p
in
self
.
linear
.
parameters
():
if
in_dygraph_mode
():
self
.
assertTrue
(
isinstance
(
p
,
paddle
.
fluid
.
framework
.
EagerParamBase
)
)
else
:
self
.
assertTrue
(
isinstance
(
p
,
paddle
.
fluid
.
framework
.
ParamBase
))
self
.
assertTrue
(
isinstance
(
p
,
paddle
.
fluid
.
framework
.
EagerParamBase
)
)
if
paddle
.
fluid
.
is_compiled_with_cuda
():
self
.
linear
.
to
(
device
=
paddle
.
CUDAPlace
(
0
))
...
...
@@ -435,14 +421,9 @@ class TestLayerTo(unittest.TestCase):
self
.
linear
.
weight
.
_grad_ivar
().
place
.
gpu_device_id
(),
0
)
for
p
in
self
.
linear
.
parameters
():
if
in_dygraph_mode
():
self
.
assertTrue
(
isinstance
(
p
,
paddle
.
fluid
.
framework
.
EagerParamBase
)
)
else
:
self
.
assertTrue
(
isinstance
(
p
,
paddle
.
fluid
.
framework
.
ParamBase
)
)
self
.
assertTrue
(
isinstance
(
p
,
paddle
.
fluid
.
framework
.
EagerParamBase
)
)
self
.
linear
.
to
(
device
=
paddle
.
CPUPlace
())
self
.
assertTrue
(
self
.
linear
.
weight
.
place
.
is_cpu_place
())
...
...
@@ -489,12 +470,9 @@ class TestLayerTo(unittest.TestCase):
paddle
.
fluid
.
core
.
VarDesc
.
VarType
.
FP64
,
)
for
p
in
self
.
linear
.
parameters
():
if
in_dygraph_mode
():
self
.
assertTrue
(
isinstance
(
p
,
paddle
.
fluid
.
framework
.
EagerParamBase
)
)
else
:
self
.
assertTrue
(
isinstance
(
p
,
paddle
.
fluid
.
framework
.
ParamBase
))
self
.
assertTrue
(
isinstance
(
p
,
paddle
.
fluid
.
framework
.
EagerParamBase
)
)
def
func_test_to_api_numpy_dtype
(
self
):
self
.
linear
.
to
(
dtype
=
np
.
float64
)
...
...
@@ -527,12 +505,9 @@ class TestLayerTo(unittest.TestCase):
paddle
.
fluid
.
core
.
VarDesc
.
VarType
.
FP64
,
)
for
p
in
self
.
linear
.
parameters
():
if
in_dygraph_mode
():
self
.
assertTrue
(
isinstance
(
p
,
paddle
.
fluid
.
framework
.
EagerParamBase
)
)
else
:
self
.
assertTrue
(
isinstance
(
p
,
paddle
.
fluid
.
framework
.
ParamBase
))
self
.
assertTrue
(
isinstance
(
p
,
paddle
.
fluid
.
framework
.
EagerParamBase
)
)
def
func_test_to_api_none_buffer
(
self
):
model
=
paddle
.
nn
.
Linear
(
2
,
4
)
...
...
python/paddle/fluid/tests/unittests/test_conv2d_op_depthwise_conv.py
浏览文件 @
d4571470
...
...
@@ -30,7 +30,7 @@ from test_conv2d_op import (
)
from
paddle.fluid
import
core
from
paddle.fluid.tests.unittests.op_test
import
get_numeric_gradient
from
paddle.fluid.tests.unittests.
eager_
op_test
import
get_numeric_gradient
from
paddle.fluid.tests.unittests.testsuite
import
create_op
# ----------------TestDepthwiseConv -----
...
...
@@ -416,14 +416,14 @@ def create_test_fp16_class(parent, grad_check=True):
place
=
core
.
CUDAPlace
(
0
)
if
core
.
is_float16_supported
(
place
)
and
grad_check
:
self
.
check_grad_with_place
(
place
,
[
'Input'
],
'Output'
,
no_grad_set
=
set
([
'Filter'
])
place
,
[
'Input'
],
'Output'
,
no_grad_set
=
{
'Filter'
}
)
def
test_check_grad_no_input
(
self
):
place
=
core
.
CUDAPlace
(
0
)
if
core
.
is_float16_supported
(
place
)
and
grad_check
:
self
.
check_grad_with_place
(
place
,
[
'Filter'
],
'Output'
,
no_grad_set
=
set
([
'Input'
])
place
,
[
'Filter'
],
'Output'
,
no_grad_set
=
{
'Input'
}
)
cls_name
=
"{0}_{1}"
.
format
(
parent
.
__name__
,
"FP16OP"
)
...
...
@@ -464,7 +464,7 @@ def create_test_bf16_class(parent, atol=1e-2):
place
,
[
'Input'
],
'Output'
,
no_grad_set
=
set
([
'Filter'
])
,
no_grad_set
=
{
'Filter'
}
,
user_defined_grads
=
[
numeric_grads
],
)
...
...
@@ -475,7 +475,7 @@ def create_test_bf16_class(parent, atol=1e-2):
place
,
[
'Filter'
],
'Output'
,
no_grad_set
=
set
([
'Input'
])
,
no_grad_set
=
{
'Input'
}
,
user_defined_grads
=
[
numeric_grads
],
)
...
...
@@ -503,14 +503,14 @@ def create_test_channel_last_fp16_class(parent, grad_check=True):
place
=
core
.
CUDAPlace
(
0
)
if
core
.
is_float16_supported
(
place
)
and
grad_check
:
self
.
check_grad_with_place
(
place
,
[
'Input'
],
'Output'
,
no_grad_set
=
set
([
'Filter'
])
place
,
[
'Input'
],
'Output'
,
no_grad_set
=
{
'Filter'
}
)
def
test_check_grad_no_input
(
self
):
place
=
core
.
CUDAPlace
(
0
)
if
core
.
is_float16_supported
(
place
)
and
grad_check
:
self
.
check_grad_with_place
(
place
,
[
'Filter'
],
'Output'
,
no_grad_set
=
set
([
'Input'
])
place
,
[
'Filter'
],
'Output'
,
no_grad_set
=
{
'Input'
}
)
def
init_data_format
(
self
):
...
...
python/paddle/fluid/tests/unittests/test_eager_run_program.py
浏览文件 @
d4571470
...
...
@@ -44,7 +44,7 @@ def _append_backward_desc(main_program, outs):
# def _set_grad_type(params, train_program):
# # NOTE: if user set sparse gradient mode, the param's gradient
# # will be SelectedRows, not LoDTensor. But tracer will just
# # set param grad
VarBase by forward VarBase
(LoDTensor)
# # set param grad
Tensor by forward Tensor
(LoDTensor)
# # If we don't change grad_var type here, RunProgramOp need
# # transform SelectedRows to LoDTensor forcibly, it may not
# # be user wanted result.
...
...
python/paddle/fluid/tests/unittests/test_einsum_v2.py
浏览文件 @
d4571470
...
...
@@ -600,7 +600,7 @@ class TestSimpleUndiagonal2(unittest.TestCase):
class
TestSimpleComplexGrad
(
unittest
.
TestCase
):
"""
EinsumOp support complex grad. but op_test don't support numeric grad for complex dtype.
EinsumOp support complex grad. but
eager_
op_test don't support numeric grad for complex dtype.
"""
def
test_shape
(
self
):
...
...
python/paddle/fluid/tests/unittests/test_imperative_save_load_v2.py
浏览文件 @
d4571470
...
...
@@ -321,7 +321,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
self
.
opti_dict
=
adam
.
state_dict
()
self
.
base_opti
=
{}
for
k
,
v
in
self
.
opti_dict
.
items
():
if
isinstance
(
v
,
(
core
.
VarBase
,
core
.
eager
.
Tensor
)
):
if
isinstance
(
v
,
core
.
eager
.
Tensor
):
self
.
base_opti
[
v
.
name
]
=
v
.
numpy
()
self
.
assertTrue
(
np
.
sum
(
np
.
abs
(
v
.
numpy
()))
!=
0
)
else
:
...
...
@@ -423,7 +423,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
opti_dict
=
adam
.
state_dict
()
# set to zero
for
k
,
v
in
opti_dict
.
items
():
if
isinstance
(
v
,
(
core
.
VarBase
,
core
.
eager
.
Tensor
)
):
if
isinstance
(
v
,
core
.
eager
.
Tensor
):
np_t
=
v
.
numpy
()
var
=
v
.
value
().
get_tensor
()
var
.
set
(
np
.
zeros_like
(
np_t
),
place
)
...
...
@@ -440,7 +440,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
opti_dict
=
adam
.
state_dict
()
for
k
,
v
in
opti_dict
.
items
():
if
isinstance
(
v
,
(
core
.
VarBase
,
core
.
eager
.
Tensor
)
):
if
isinstance
(
v
,
core
.
eager
.
Tensor
):
np
.
testing
.
assert_array_equal
(
v
.
numpy
(),
self
.
base_opti
[
v
.
name
]
)
...
...
@@ -545,7 +545,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
opti_dict
=
adam
.
state_dict
()
# set to zero
for
k
,
v
in
opti_dict
.
items
():
if
isinstance
(
v
,
(
core
.
VarBase
,
core
.
eager
.
Tensor
)
):
if
isinstance
(
v
,
core
.
eager
.
Tensor
):
np_t
=
v
.
numpy
()
var
=
v
.
value
().
get_tensor
()
var
.
set
(
np
.
zeros_like
(
np_t
),
place
)
...
...
@@ -558,7 +558,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
adam
.
set_state_dict
(
self
.
opti_dict
)
opti_dict
=
adam
.
state_dict
()
for
k
,
v
in
opti_dict
.
items
():
if
isinstance
(
v
,
(
core
.
VarBase
,
core
.
eager
.
Tensor
)
):
if
isinstance
(
v
,
core
.
eager
.
Tensor
):
np
.
testing
.
assert_array_equal
(
v
.
numpy
(),
self
.
base_opti
[
v
.
name
]
)
...
...
@@ -664,7 +664,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
np_opti_dict
=
{}
# set to zero
for
k
,
v
in
opti_dict
.
items
():
if
isinstance
(
v
,
(
core
.
VarBase
,
core
.
eager
.
Tensor
)
):
if
isinstance
(
v
,
core
.
eager
.
Tensor
):
np_t
=
v
.
numpy
()
np_opti_dict
[
v
.
name
]
=
np_t
var
=
v
.
value
().
get_tensor
()
...
...
@@ -680,7 +680,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
opti_dict
=
adam
.
state_dict
()
for
k
,
v
in
opti_dict
.
items
():
if
isinstance
(
v
,
(
core
.
VarBase
,
core
.
eager
.
Tensor
)
):
if
isinstance
(
v
,
core
.
eager
.
Tensor
):
np
.
testing
.
assert_array_equal
(
v
.
numpy
(),
self
.
base_opti
[
v
.
name
]
)
...
...
@@ -953,7 +953,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
np_state_dict
=
{}
for
k
,
v
in
self
.
opti_dict
.
items
():
if
isinstance
(
v
,
(
core
.
VarBase
,
core
.
eager
.
Tensor
)
):
if
isinstance
(
v
,
core
.
eager
.
Tensor
):
np_opti_dict
[
v
.
name
]
=
v
.
numpy
()
else
:
np_opti_dict
[
k
]
=
v
...
...
python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_dataset.py
浏览文件 @
d4571470
...
...
@@ -82,12 +82,8 @@ class TestTensorDataset(unittest.TestCase):
assert
len
(
label
)
==
1
assert
input
.
shape
==
[
1
,
3
,
4
]
assert
label
.
shape
==
[
1
,
1
]
assert
isinstance
(
input
,
(
fluid
.
core
.
VarBase
,
fluid
.
core
.
eager
.
Tensor
)
)
assert
isinstance
(
label
,
(
fluid
.
core
.
VarBase
,
fluid
.
core
.
eager
.
Tensor
)
)
assert
isinstance
(
input
,
fluid
.
core
.
eager
.
Tensor
)
assert
isinstance
(
label
,
fluid
.
core
.
eager
.
Tensor
)
assert
np
.
allclose
(
input
.
numpy
(),
input_np
[
i
])
assert
np
.
allclose
(
label
.
numpy
(),
label_np
[
i
])
...
...
@@ -184,12 +180,8 @@ class TestSubsetDataset(unittest.TestCase):
assert
len
(
label
)
==
1
assert
input
.
shape
==
[
1
,
3
,
4
]
assert
label
.
shape
==
[
1
,
1
]
assert
isinstance
(
input
,
(
fluid
.
core
.
VarBase
,
fluid
.
core
.
eager
.
Tensor
)
)
assert
isinstance
(
label
,
(
fluid
.
core
.
VarBase
,
fluid
.
core
.
eager
.
Tensor
)
)
assert
isinstance
(
input
,
fluid
.
core
.
eager
.
Tensor
)
assert
isinstance
(
label
,
fluid
.
core
.
eager
.
Tensor
)
elements_list
=
[]
for
_
,
(
input
,
label
)
in
enumerate
(
dataloader
()):
...
...
@@ -285,12 +277,8 @@ class TestNumpyMixTensorDataset(TestTensorDataset):
assert
len
(
label
)
==
1
assert
input
.
shape
==
[
1
,
IMAGE_SIZE
]
assert
label
.
shape
==
[
1
,
1
]
assert
isinstance
(
input
,
(
fluid
.
core
.
VarBase
,
fluid
.
core
.
eager
.
Tensor
)
)
assert
isinstance
(
label
,
(
fluid
.
core
.
VarBase
,
fluid
.
core
.
eager
.
Tensor
)
)
assert
isinstance
(
input
,
fluid
.
core
.
eager
.
Tensor
)
assert
isinstance
(
label
,
fluid
.
core
.
eager
.
Tensor
)
class
ComplextDataset
(
Dataset
):
...
...
@@ -385,9 +373,7 @@ class TestSingleFieldDataset(unittest.TestCase):
)
for
i
,
data
in
enumerate
(
dataloader
()):
assert
isinstance
(
data
,
(
fluid
.
core
.
VarBase
,
fluid
.
core
.
eager
.
Tensor
)
)
assert
isinstance
(
data
,
fluid
.
core
.
eager
.
Tensor
)
assert
data
.
shape
==
[
2
,
2
,
3
]
def
test_main
(
self
):
...
...
python/paddle/fluid/tests/unittests/test_nonzero_api.py
浏览文件 @
d4571470
...
...
@@ -20,7 +20,7 @@ from eager_op_test import OpTest
import
paddle
from
paddle
import
fluid
from
paddle.fluid
import
Program
,
program_guard
from
paddle.fluid.tests.unittests.op_test
import
convert_float_to_uint16
from
paddle.fluid.tests.unittests.
eager_
op_test
import
convert_float_to_uint16
def
call_nonzero
(
x
):
...
...
python/paddle/fluid/tests/unittests/test_paddle_save_load.py
浏览文件 @
d4571470
...
...
@@ -360,7 +360,7 @@ class TestSaveLoadAny(unittest.TestCase):
self
.
assertTrue
(
isinstance
(
t_dygraph
,
(
paddle
.
fluid
.
core
.
VarBase
,
paddle
.
fluid
.
core
.
eager
.
Tensor
)
,
paddle
.
fluid
.
core
.
eager
.
Tensor
,
)
)
np
.
testing
.
assert_array_equal
(
tensor
.
numpy
(),
np_dygraph
)
...
...
@@ -788,14 +788,14 @@ class TestSaveLoadAny(unittest.TestCase):
self
.
assertTrue
(
isinstance
(
load_tensor3
[
0
],
(
fluid
.
core
.
VarBase
,
fluid
.
core
.
eager
.
Tensor
)
,
fluid
.
core
.
eager
.
Tensor
,
)
)
np
.
testing
.
assert_array_equal
(
load_tensor3
[
0
].
numpy
(),
obj3
[
0
])
self
.
assertTrue
(
isinstance
(
load_tensor3
[
1
],
(
fluid
.
core
.
VarBase
,
fluid
.
core
.
eager
.
Tensor
)
,
fluid
.
core
.
eager
.
Tensor
,
)
)
np
.
testing
.
assert_array_equal
(
load_tensor3
[
1
].
numpy
(),
obj3
[
1
])
...
...
@@ -804,7 +804,7 @@ class TestSaveLoadAny(unittest.TestCase):
self
.
assertTrue
(
isinstance
(
load_tensor3
[
2
][
"state_dict"
][
k
],
(
fluid
.
core
.
VarBase
,
fluid
.
core
.
eager
.
Tensor
)
,
fluid
.
core
.
eager
.
Tensor
,
)
)
np
.
testing
.
assert_array_equal
(
...
...
@@ -815,7 +815,7 @@ class TestSaveLoadAny(unittest.TestCase):
self
.
assertTrue
(
isinstance
(
load_tensor3
[
2
][
"opt"
][
k
],
(
fluid
.
core
.
VarBase
,
fluid
.
core
.
eager
.
Tensor
)
,
fluid
.
core
.
eager
.
Tensor
,
)
)
np
.
testing
.
assert_array_equal
(
...
...
@@ -825,7 +825,7 @@ class TestSaveLoadAny(unittest.TestCase):
self
.
assertTrue
(
isinstance
(
load_tensor4
[
0
],
(
fluid
.
core
.
VarBase
,
fluid
.
core
.
eager
.
Tensor
)
,
fluid
.
core
.
eager
.
Tensor
,
)
)
np
.
testing
.
assert_array_equal
(
load_tensor4
[
0
].
numpy
(),
obj4
[
0
])
...
...
python/paddle/fluid/tests/unittests/test_parameter.py
浏览文件 @
d4571470
...
...
@@ -21,7 +21,7 @@ import paddle
from
paddle.fluid
import
core
from
paddle.fluid.dygraph
import
guard
from
paddle.fluid.executor
import
Executor
from
paddle.fluid.framework
import
ParamBase
,
Variable
,
default_main_program
from
paddle.fluid.framework
import
Variable
,
default_main_program
paddle
.
enable_static
()
main_program
=
default_main_program
()
...
...
@@ -73,9 +73,6 @@ class ParameterChecks(unittest.TestCase):
pram_copy2
=
copy
.
deepcopy
(
param
,
memo
)
self
.
assertEqual
(
id
(
param_copy
),
id
(
pram_copy2
))
zero_dim_param
=
ParamBase
(
shape
=
[],
dtype
=
'float32'
)
self
.
assertEqual
(
zero_dim_param
.
shape
,
[])
def
func_exception
(
self
):
b
=
main_program
.
global_block
()
with
self
.
assertRaises
(
ValueError
):
...
...
python/paddle/fluid/tests/unittests/test_pylayer_op.py
浏览文件 @
d4571470
...
...
@@ -20,11 +20,6 @@ import paddle
from
paddle.autograd.py_layer
import
PyLayer
class
FakeTensor
(
paddle
.
fluid
.
core
.
VarBase
):
def
__init__
(
self
):
pass
class
TestPyLayer
(
unittest
.
TestCase
):
def
test_simple_pylayer_multiple_output
(
self
):
class
tanh
(
PyLayer
):
...
...
python/paddle/fluid/tests/unittests/test_run_program_op.py
浏览文件 @
d4571470
...
...
@@ -175,14 +175,9 @@ class RunProgramOpTest(unittest.TestCase):
def
prepare_dygraph_input
(
self
,
place
,
return_param_list
=
False
):
def
create_var_base
(
is_input
,
name
,
np_value
,
stop_gradient
):
if
global_var
.
_in_eager_mode_
:
var
=
core
.
eager
.
Tensor
(
value
=
np_value
,
name
=
name
,
place
=
place
,
zero_copy
=
True
)
else
:
var
=
core
.
VarBase
(
value
=
np_value
,
name
=
name
,
place
=
place
,
zero_copy
=
True
)
var
=
core
.
eager
.
Tensor
(
value
=
np_value
,
name
=
name
,
place
=
place
,
zero_copy
=
True
)
var
.
stop_gradient
=
stop_gradient
return
var
...
...
python/paddle/fluid/tests/unittests/test_trapezoid.py
浏览文件 @
d4571470
...
...
@@ -17,7 +17,6 @@ import unittest
import
numpy
as
np
import
paddle
from
paddle.fluid.framework
import
_test_eager_guard
class
TestTrapezoidAPI
(
unittest
.
TestCase
):
...
...
@@ -61,9 +60,6 @@ class TestTrapezoidAPI(unittest.TestCase):
np
.
testing
.
assert_allclose
(
out
,
self
.
output
,
rtol
=
1e-05
)
def
test_dygraph
(
self
):
with
_test_eager_guard
():
self
.
setUp
()
self
.
func_dygraph
()
self
.
setUp
()
self
.
func_dygraph
()
...
...
@@ -267,8 +263,6 @@ class Testfp16Trapezoid(TestTrapezoidAPI):
out
=
self
.
paddle_api
(
y
=
y
,
x
=
x
)
def
test_fp16_dygraph
(
self
):
with
_test_eager_guard
():
self
.
func_dygraph
()
self
.
func_dygraph
()
...
...
python/paddle/fluid/tests/unittests/test_var_base.py
浏览文件 @
d4571470
...
...
@@ -1018,9 +1018,7 @@ class TestVarBase(unittest.TestCase):
def
test_var_base_to_np
(
self
):
with
fluid
.
dygraph
.
guard
():
var
=
fluid
.
dygraph
.
to_variable
(
self
.
array
)
np
.
testing
.
assert_array_equal
(
var
.
numpy
(),
fluid
.
framework
.
_var_base_to_np
(
var
)
)
np
.
testing
.
assert_array_equal
(
var
.
numpy
(),
var
.
numpy
(
False
))
def
test_var_base_as_np
(
self
):
with
fluid
.
dygraph
.
guard
():
...
...
@@ -1051,7 +1049,7 @@ class TestVarBase(unittest.TestCase):
def
test_to_static_var
(
self
):
with
fluid
.
dygraph
.
guard
():
# Convert
VarBase
into Variable or Parameter
# Convert
Tensor
into Variable or Parameter
var_base
=
fluid
.
dygraph
.
to_variable
(
self
.
array
,
name
=
"var_base_1"
)
static_var
=
var_base
.
_to_static_var
()
self
.
_assert_to_static
(
var_base
,
static_var
)
...
...
@@ -1060,7 +1058,7 @@ class TestVarBase(unittest.TestCase):
static_param
=
var_base
.
_to_static_var
(
to_parameter
=
True
)
self
.
_assert_to_static
(
var_base
,
static_param
,
True
)
# Convert ParamBase into Parameter
# Convert
Eager
ParamBase into Parameter
fc
=
paddle
.
nn
.
Linear
(
10
,
20
,
...
...
@@ -1078,7 +1076,7 @@ class TestVarBase(unittest.TestCase):
if
is_param
:
self
.
assertTrue
(
isinstance
(
static_var
,
fluid
.
framework
.
Parameter
))
self
.
assertTrue
(
static_var
.
persistable
,
True
)
if
isinstance
(
var_base
,
fluid
.
framework
.
ParamBase
):
if
isinstance
(
var_base
,
fluid
.
framework
.
Eager
ParamBase
):
for
attr
in
[
'trainable'
,
'is_distributed'
,
'do_model_average'
]:
self
.
assertEqual
(
getattr
(
var_base
,
attr
),
getattr
(
static_var
,
attr
)
...
...
python/paddle/fluid/tests/unittests/xpu/test_unbind_op_xpu.py
浏览文件 @
d4571470
...
...
@@ -27,7 +27,6 @@ from xpu.get_test_cover_info import (
import
paddle
from
paddle
import
fluid
,
tensor
from
paddle.fluid
import
Program
,
program_guard
from
paddle.fluid.framework
import
_test_eager_guard
paddle
.
enable_static
()
...
...
@@ -74,8 +73,7 @@ class XPUTestUnbindOP(XPUOpTestWrapper):
np
.
testing
.
assert_array_equal
(
x
.
grad
.
numpy
(),
np_grad
)
def
test_unbind_dygraph_final_state
(
self
):
with
_test_eager_guard
():
self
.
test_unbind_dygraph
()
self
.
test_unbind_dygraph
()
class
TestLayersUnbind
(
unittest
.
TestCase
):
def
test_layers_unbind
(
self
):
...
...
python/paddle/framework/__init__.py
浏览文件 @
d4571470
...
...
@@ -27,7 +27,6 @@ from ..fluid.core import CUDAPinnedPlace # noqa: F401
from
..fluid.core
import
NPUPlace
# noqa: F401
from
..fluid.core
import
MLUPlace
# noqa: F401
from
..fluid.core
import
CustomPlace
# noqa: F401
from
..fluid.core
import
VarBase
# noqa: F401
from
..fluid
import
core
# noqa: F401
from
..fluid.dygraph
import
base
,
to_variable
...
...
@@ -51,7 +50,7 @@ from ..fluid.dygraph import monkey_patch_math_varbase
from
..fluid.framework
import
disable_signal_handler
# noqa: F401
from
..fluid.framework
import
get_flags
# noqa: F401
from
..fluid.framework
import
set_flags
# noqa: F401
from
..fluid.framework
import
Parameter
,
ParamBase
from
..fluid.framework
import
Parameter
from
..fluid.dygraph.base
import
enable_dygraph
as
disable_static
# noqa: F401
from
..fluid.dygraph.base
import
disable_dygraph
as
enable_static
# noqa: F401
from
..fluid.framework
import
_non_static_mode
as
in_dynamic_mode
# noqa: F401
...
...
python/paddle/framework/io.py
浏览文件 @
d4571470
...
...
@@ -29,7 +29,6 @@ from paddle import fluid
from
paddle.fluid
import
core
from
paddle.fluid.framework
import
(
EagerParamBase
,
ParamBase
,
Program
,
Variable
,
_current_expected_place
,
...
...
@@ -55,7 +54,7 @@ def _build_saved_state_dict(state_dict):
save_dict
=
{}
name_table
=
{}
for
key
,
value
in
state_dict
.
items
():
if
isinstance
(
value
,
(
Variable
,
core
.
VarBase
,
core
.
eager
.
Tensor
)):
if
isinstance
(
value
,
(
Variable
,
core
.
eager
.
Tensor
)):
if
value
.
type
==
core
.
VarDesc
.
VarType
.
VOCAB
:
save_dict
[
key
]
=
value
.
value
().
get_map_tensor
()
else
:
...
...
@@ -118,8 +117,8 @@ def _load_state_dict_from_save_inference_model(model_path, config):
def
_load_state_dict_from_save_params
(
model_path
):
# Try to load all the files in the directory in
VarBase
format,
# the file name is used as the name of
VarBase
# Try to load all the files in the directory in
Tensor
format,
# the file name is used as the name of
Tensor
load_var_list
=
[]
# 1. load file names
...
...
@@ -131,7 +130,7 @@ def _load_state_dict_from_save_params(model_path):
var_name
=
tmp_var_name
.
replace
(
"
\\
"
,
"/"
)
var_name_list
.
append
(
var_name
)
# 2. create and load
VarBase
# 2. create and load
Tensor
with
fluid
.
dygraph
.
guard
():
for
name
in
var_name_list
:
new_var
=
_varbase_creator
(
name
=
name
,
persistable
=
True
)
...
...
@@ -320,17 +319,13 @@ def _pickle_save(obj, f, protocol):
def
add_dispatch_table
():
# This is not a good method, because the pickle module has been modified.
pickle
.
dispatch_table
[
core
.
VarBase
]
=
reduce_varbase
pickle
.
dispatch_table
[
ParamBase
]
=
reduce_varbase
pickle
.
dispatch_table
[
core
.
eager
.
Tensor
]
=
reduce_varbase
pickle
.
dispatch_table
[
EagerParamBase
]
=
reduce_varbase
pickle
.
dispatch_table
[
core
.
LoDTensor
]
=
reduce_LoDTensor
pickle
.
dispatch_table
.
update
(
dispatch_table_layer
)
def
pop_dispatch_table
():
pickle
.
dispatch_table
.
pop
(
core
.
VarBase
)
pickle
.
dispatch_table
.
pop
(
core
.
LoDTensor
)
pickle
.
dispatch_table
.
pop
(
ParamBase
)
pickle
.
dispatch_table
.
pop
(
core
.
eager
.
Tensor
)
pickle
.
dispatch_table
.
pop
(
EagerParamBase
)
for
k
in
dispatch_table_layer
:
...
...
@@ -349,9 +344,7 @@ def _pickle_save(obj, f, protocol):
pickler
=
pickle
.
Pickler
(
f
,
protocol
)
pickler
.
dispatch_table
=
copyreg
.
dispatch_table
.
copy
()
pickler
.
dispatch_table
[
core
.
VarBase
]
=
reduce_varbase
pickler
.
dispatch_table
[
core
.
LoDTensor
]
=
reduce_LoDTensor
pickler
.
dispatch_table
[
ParamBase
]
=
reduce_varbase
pickler
.
dispatch_table
[
core
.
eager
.
Tensor
]
=
reduce_varbase
pickler
.
dispatch_table
[
EagerParamBase
]
=
reduce_varbase
pickler
.
dispatch_table
.
update
(
dispatch_table_layer
)
...
...
@@ -390,24 +383,21 @@ def _is_state_dict(obj):
(
paddle
.
nn
.
Layer
,
Program
,
core
.
VarBase
,
core
.
eager
.
Tensor
,
core
.
LoDTensor
,
core
.
SelectedRows
,
),
)
# If the value of a dict is a core.
VarBase
/LoDTensor or a dict
# that does not contain a paddle type(Layer, Program,
VarBase
, LoDTensor, SelectedRows),
# If the value of a dict is a core.
Tensor
/LoDTensor or a dict
# that does not contain a paddle type(Layer, Program,
Tensor
, LoDTensor, SelectedRows),
# the dict is considered to be a state_ dict.
for
key
,
value
in
obj
.
items
():
if
isinstance
(
value
,
dict
):
for
k
,
v
in
value
.
items
():
if
_contain_x
(
v
,
condition
):
return
False
elif
not
isinstance
(
value
,
(
core
.
VarBase
,
core
.
eager
.
Tensor
,
core
.
LoDTensor
)
):
elif
not
isinstance
(
value
,
(
core
.
eager
.
Tensor
,
core
.
LoDTensor
)):
return
False
return
True
...
...
@@ -415,8 +405,8 @@ def _is_state_dict(obj):
def
_transformed_from_varbase
(
obj
):
# In paddle2.1 version,
VarBase
is saved as tuple(tensor.name, tensor.numpy()).
# When executing paddle.load, use this function to determine whether to restore to
VarBase
/LoDTensor.
# In paddle2.1 version,
Tensor
is saved as tuple(tensor.name, tensor.numpy()).
# When executing paddle.load, use this function to determine whether to restore to
Tensor
/LoDTensor.
if
isinstance
(
obj
,
tuple
)
and
len
(
obj
)
==
2
:
name_types
=
str
if
isinstance
(
obj
[
0
],
name_types
)
and
isinstance
(
obj
[
1
],
np
.
ndarray
):
...
...
@@ -426,7 +416,7 @@ def _transformed_from_varbase(obj):
def
_transformed_from_lodtensor
(
obj
):
# In paddle2.1 version, LoDTensor is saved as np.array(tensor).
# When executing paddle.load, use this function to determine whether to restore to
VarBase
/LoDTensor.
# When executing paddle.load, use this function to determine whether to restore to
Tensor
/LoDTensor.
if
isinstance
(
obj
,
np
.
ndarray
):
return
True
return
False
...
...
@@ -498,7 +488,7 @@ def _parse_every_object(obj, condition_func, convert_func):
else
:
if
isinstance
(
obj
,
Iterable
)
and
not
isinstance
(
obj
,
(
str
,
np
.
ndarray
,
core
.
VarBase
,
core
.
eager
.
Tensor
,
core
.
LoDTensor
),
(
str
,
np
.
ndarray
,
core
.
eager
.
Tensor
,
core
.
LoDTensor
),
):
raise
NotImplementedError
(
"The iteratable objects supported are tuple, list, dict, OrderedDict, string. But received {}."
.
format
(
...
...
@@ -642,7 +632,7 @@ def _save_binary_var(obj, path):
_save_lod_tensor
(
obj
,
path
)
elif
isinstance
(
obj
,
core
.
SelectedRows
):
_save_selected_rows
(
obj
,
path
)
elif
isinstance
(
obj
,
(
core
.
VarBase
,
core
.
eager
.
Tensor
)
):
elif
isinstance
(
obj
,
core
.
eager
.
Tensor
):
_save_lod_tensor
(
obj
.
value
().
get_tensor
(),
path
)
else
:
# Since the concept of 'Tensor' is only exposed to users, the error message can only contain tensor instead of 'LoDTensor' or 'SelectedRows'
...
...
python/paddle/framework/io_utils.py
浏览文件 @
d4571470
...
...
@@ -180,7 +180,9 @@ def _load_program_scope(main=None, startup=None, scope=None):
@
static_only
def
_legacy_static_save
(
param_dict
,
model_path
,
protocol
=
2
):
def
get_tensor
(
var
):
if
isinstance
(
var
,
(
core
.
VarBase
,
core
.
eager
.
Tensor
,
core
.
LoDTensor
)):
if
isinstance
(
var
,
core
.
eager
.
Tensor
):
return
np
.
array
(
var
)
elif
isinstance
(
var
,
core
.
LoDTensor
):
return
np
.
array
(
var
)
return
var
...
...
python/paddle/hapi/model.py
浏览文件 @
d4571470
...
...
@@ -58,9 +58,9 @@ def to_list(value):
def
to_numpy
(
var
):
assert
isinstance
(
var
,
(
Variable
,
fluid
.
core
.
VarBase
,
fluid
.
core
.
eager
.
Tensor
)
var
,
(
Variable
,
fluid
.
core
.
eager
.
Tensor
)
),
"not a variable"
if
isinstance
(
var
,
(
fluid
.
core
.
VarBase
,
fluid
.
core
.
eager
.
Tensor
)
):
if
isinstance
(
var
,
fluid
.
core
.
eager
.
Tensor
):
return
np
.
array
(
var
)
t
=
global_scope
().
find_var
(
var
.
name
).
get_tensor
()
return
np
.
array
(
t
)
...
...
python/paddle/hapi/model_summary.py
浏览文件 @
d4571470
...
...
@@ -261,7 +261,9 @@ def summary_string(model, input_size=None, dtypes=None, input=None):
depth
=
len
(
list
(
model
.
sublayers
()))
def
_get_shape_from_tensor
(
x
):
if
isinstance
(
x
,
(
paddle
.
fluid
.
Variable
,
paddle
.
fluid
.
core
.
VarBase
)):
if
isinstance
(
x
,
(
paddle
.
fluid
.
Variable
,
paddle
.
fluid
.
core
.
eager
.
Tensor
)
):
return
list
(
x
.
shape
)
elif
isinstance
(
x
,
(
list
,
tuple
)):
return
[
_get_shape_from_tensor
(
xx
)
for
xx
in
x
]
...
...
python/paddle/jit/api.py
浏览文件 @
d4571470
...
...
@@ -59,7 +59,6 @@ from paddle.nn import Layer
from
paddle.fluid.executor
import
Executor
,
scope_guard
from
paddle.fluid.framework
import
(
Block
,
ParamBase
,
Program
,
Variable
,
Parameter
,
...
...
@@ -390,7 +389,7 @@ class _SaveLoadConfig:
%
type
(
input
)
)
for
var
in
spec
:
if
not
isinstance
(
var
,
core
.
VarBase
):
if
not
isinstance
(
var
,
core
.
eager
.
Tensor
):
raise
TypeError
(
"The element in config `output_spec` list should be 'Variable', but received element's type is %s."
%
type
(
var
)
...
...
@@ -543,7 +542,7 @@ def _get_input_var_names(inputs, input_spec):
# name is None, the input_spec only can be InputSpec
raise
ValueError
(
name_none_error
%
spec
)
elif
spec
.
name
not
in
input_var_names
:
# the input_spec can be `InputSpec` or `
VarBase
`
# the input_spec can be `InputSpec` or `
Tensor
`
raise
ValueError
(
name_no_exists_error
%
spec
.
name
)
else
:
result_list
.
append
(
spec
.
name
)
...
...
@@ -973,7 +972,7 @@ def save(layer, path, input_spec=None, **configs):
for
var
in
paddle
.
utils
.
flatten
(
input_spec
):
if
isinstance
(
var
,
paddle
.
static
.
InputSpec
):
inner_input_spec
.
append
(
var
)
elif
isinstance
(
var
,
(
core
.
VarBase
,
core
.
eager
.
Tensor
,
Variable
)):
elif
isinstance
(
var
,
(
core
.
eager
.
Tensor
,
Variable
)):
inner_input_spec
.
append
(
paddle
.
static
.
InputSpec
.
from_tensor
(
var
)
)
...
...
@@ -1132,7 +1131,7 @@ def save(layer, path, input_spec=None, **configs):
extra_info_dict
[
'stop_gradient'
]
=
param_or_buffer
.
stop_gradient
if
isinstance
(
param_or_buffer
,
(
ParamBase
,
EagerParamBase
)
):
if
isinstance
(
param_or_buffer
,
EagerParamBase
):
extra_info_dict
[
'trainable'
]
=
param_or_buffer
.
trainable
extra_var_info
[
param_or_buffer
.
name
]
=
extra_info_dict
...
...
@@ -1151,7 +1150,7 @@ def save(layer, path, input_spec=None, **configs):
# NOTE(chenweihang): [ Get output variables ]
# the rule is like [ Get input variables name ]. For output var,
# we only support
VarBase
spec, and actually, we only need the
# we only support
Tensor
spec, and actually, we only need the
# var name of output, and we don't recommended to use output_spec
# print(concrete_program.main_program)
# print(concrete_program.outputs, configs.output_spec)
...
...
python/paddle/jit/dy2static/base_transformer.py
浏览文件 @
d4571470
...
...
@@ -205,7 +205,7 @@ class ForNodeVisitor:
In this process, the semantics of for does not change.
Now only can parse 3 type statements (Here var is
VarBase
(Tensor) or python variable):
Now only can parse 3 type statements (Here var is
Tensor
(Tensor) or python variable):
1). for x in range(var[*]|var.numpy()[*])
2). for x in var|var.numpy()
3). for i, x enumerate(var|var.numpy())
...
...
python/paddle/jit/dy2static/function_spec.py
浏览文件 @
d4571470
...
...
@@ -286,7 +286,7 @@ def _replace_value_with_input_spec(args):
if
isinstance
(
input_var
,
np
.
ndarray
):
input_var
=
paddle
.
static
.
InputSpec
.
from_numpy
(
input_var
)
input_var
.
stop_gradient
=
True
elif
isinstance
(
input_var
,
(
core
.
VarBase
,
core
.
eager
.
Tensor
)
):
elif
isinstance
(
input_var
,
core
.
eager
.
Tensor
):
stop_gradient
=
input_var
.
stop_gradient
input_var
=
paddle
.
static
.
InputSpec
.
from_tensor
(
input_var
)
input_var
.
stop_gradient
=
stop_gradient
...
...
@@ -341,7 +341,7 @@ def convert_to_input_spec(inputs, input_spec):
# without specific InputSpec, raise warning.
if
len
(
inputs
)
>
len
(
input_spec
):
for
rest_input
in
inputs
[
len
(
input_spec
)
:]:
if
isinstance
(
rest_input
,
(
core
.
VarBase
,
np
.
ndarray
)):
if
isinstance
(
rest_input
,
(
core
.
eager
.
Tensor
,
np
.
ndarray
)):
logging_utils
.
warn
(
"The inputs constain `{}` without specificing InputSpec, its shape and dtype will be treated immutable. "
"Please specific InputSpec information in `@to_static` if you expect them as mutable inputs."
.
format
(
...
...
python/paddle/jit/dy2static/partial_program.py
浏览文件 @
d4571470
...
...
@@ -59,9 +59,7 @@ class NestSequence:
def
_get_var_ids
(
self
):
var_ids
=
[]
for
idx
,
var
in
enumerate
(
self
.
__input_list
):
if
isinstance
(
var
,
(
framework
.
Variable
,
core
.
VarBase
,
core
.
eager
.
Tensor
)
):
if
isinstance
(
var
,
(
framework
.
Variable
,
core
.
eager
.
Tensor
)):
var_ids
.
append
(
idx
)
return
var_ids
...
...
@@ -73,9 +71,7 @@ class NestSequence:
if
need_check
:
warning_types
=
set
()
for
var
in
self
.
__input_list
:
if
not
isinstance
(
var
,
(
framework
.
Variable
,
core
.
VarBase
,
core
.
eager
.
Tensor
)
):
if
not
isinstance
(
var
,
(
framework
.
Variable
,
core
.
eager
.
Tensor
)):
warning_types
.
add
(
type
(
var
))
if
warning_types
:
logging_utils
.
warn
(
...
...
@@ -161,7 +157,7 @@ class PartialProgramLayer:
main_program(Program): The main program that contains ops need to be executed.
inputs(list[Variable]): The input list of the decorated function by `@to_static`.
outputs(list[Variable]): The output list of the decorated function by `@to_static`.
parameters(list[
VarBase
]|None): All trainable parameters included in the program. Default None.
parameters(list[
Tensor
]|None): All trainable parameters included in the program. Default None.
Returns:
Layer: A Layer object that run all ops internally in static graph mode.
...
...
@@ -669,22 +665,13 @@ class PartialProgramLayer:
if
"@GRAD"
in
name
:
var_desc
=
block
.
vars
[
name
].
desc
var_base
=
None
if
not
framework
.
global_var
.
_in_eager_mode_
:
var_base
=
core
.
VarBase
(
var_desc
.
dtype
(),
var_desc
.
shape
(),
var_desc
.
name
(),
var_desc
.
type
(),
False
,
)
else
:
var_base
=
core
.
eager
.
Tensor
(
var_desc
.
dtype
(),
var_desc
.
shape
(),
var_desc
.
name
(),
var_desc
.
type
(),
False
,
)
var_base
=
core
.
eager
.
Tensor
(
var_desc
.
dtype
(),
var_desc
.
shape
(),
var_desc
.
name
(),
var_desc
.
type
(),
False
,
)
double_grads
.
append
(
var_base
)
return
self
.
_valid_vars
(
double_grads
)
...
...
@@ -866,29 +853,20 @@ class PartialProgramLayer:
assert
isinstance
(
inputs
,
(
tuple
,
list
))
# Flatten inputs with nested structure into single list.
flatten_inputs
=
paddle
.
utils
.
flatten
(
inputs
)
# Convert variable into
VarBase
and feed in training data.
# Convert variable into
Tensor
and feed in training data.
input_vars
=
[]
expected_place
=
framework
.
_current_expected_place
()
for
i
,
value
in
enumerate
(
flatten_inputs
):
if
isinstance
(
value
,
np
.
ndarray
):
var
=
None
if
not
framework
.
global_var
.
_in_eager_mode_
:
var
=
core
.
VarBase
(
value
=
value
,
name
=
self
.
_inputs
[
i
].
desc
.
name
(),
persistable
=
False
,
place
=
expected_place
,
zero_copy
=
True
,
)
else
:
var
=
core
.
eager
.
Tensor
(
value
=
value
,
name
=
self
.
_inputs
[
i
].
desc
.
name
(),
persistable
=
False
,
place
=
expected_place
,
zero_copy
=
True
,
)
elif
isinstance
(
value
,
(
core
.
VarBase
,
core
.
eager
.
Tensor
)):
var
=
core
.
eager
.
Tensor
(
value
=
value
,
name
=
self
.
_inputs
[
i
].
desc
.
name
(),
persistable
=
False
,
place
=
expected_place
,
zero_copy
=
True
,
)
elif
isinstance
(
value
,
core
.
eager
.
Tensor
):
# NOTE(Aurelius84): If var is on CPUPlace, it will be transformed multi times
# into CUDAPlace when it's as input of multi Ops. so we move it in advance
# to avoid this problem.
...
...
@@ -904,7 +882,7 @@ class PartialProgramLayer:
continue
input_vars
.
append
(
var
)
# mapping from name(string) ->
VarBase
# mapping from name(string) ->
Tensor
out_varbase_map
=
{}
def
create_out
(
var_id
):
...
...
@@ -916,27 +894,18 @@ class PartialProgramLayer:
if
var_desc
.
name
()
in
out_varbase_map
:
return
out_varbase_map
[
var_desc
.
name
()]
if
not
framework
.
global_var
.
_in_eager_mode_
:
var_base
=
core
.
VarBase
(
var_desc
.
dtype
(),
var_desc
.
shape
(),
var_desc
.
name
(),
var_desc
.
type
(),
False
,
)
else
:
var_base
=
core
.
eager
.
Tensor
(
var_desc
.
dtype
(),
var_desc
.
shape
(),
var_desc
.
name
(),
var_desc
.
type
(),
False
,
)
var_base
=
core
.
eager
.
Tensor
(
var_desc
.
dtype
(),
var_desc
.
shape
(),
var_desc
.
name
(),
var_desc
.
type
(),
False
,
)
var_base
.
stop_gradient
=
var
.
stop_gradient
out_varbase_map
[
var_desc
.
name
()]
=
var_base
return
var_base
# Create
VarBase
to receive output data.
# Create
Tensor
to receive output data.
out_vars
=
list
(
map
(
create_out
,
self
.
_outputs
.
var_ids
))
return
input_vars
,
out_vars
...
...
@@ -947,21 +916,11 @@ class PartialProgramLayer:
inner_scope
=
self
.
_get_scope
(
program_id
=
program_id
,
use_scope_cache
=
use_scope_cache
)
if
not
framework
.
global_var
.
_in_eager_mode_
:
tmp_scope_vec
=
core
.
VarBase
(
core
.
VarDesc
.
VarType
.
FP32
,
[],
"program_out_scope"
,
core
.
VarDesc
.
VarType
.
STEP_SCOPES
,
True
,
)
tmp_scope_vec
.
value
().
set_scope
(
inner_scope
)
else
:
tmp_scope_vec
=
[
inner_scope
]
tmp_scope_vec
=
[
inner_scope
]
return
tmp_scope_vec
def
_create_cuda_graph_vec
(
self
):
var
=
core
.
VarBase
(
var
=
core
.
eager
.
Tensor
(
core
.
VarDesc
.
VarType
.
FP32
,
[],
"cuda_graph"
,
...
...
@@ -973,7 +932,7 @@ class PartialProgramLayer:
def
_restore_out
(
self
,
out_vars
):
"""
Restores same nested outputs by only replacing the Variable with
VarBase
.
Restores same nested outputs by only replacing the Variable with
Tensor
.
"""
flatten_outputs
=
self
.
_outputs
.
tolist
()
...
...
@@ -990,9 +949,7 @@ class PartialProgramLayer:
return
main_program
.
clone
(
for_test
=
True
)
def
_is_no_value
(
self
,
var
):
if
isinstance
(
var
,
(
core
.
VarBase
,
core
.
eager
.
Tensor
))
and
var
.
shape
==
[
1
]:
if
isinstance
(
var
,
core
.
eager
.
Tensor
)
and
var
.
shape
==
[
1
]:
# NOTE: .numpy() will insert MemcpySync operation, it hits performance.
if
var
.
numpy
()[
0
]
==
RETURN_NO_VALUE_MAGIC_NUM
:
return
True
...
...
@@ -1002,7 +959,7 @@ class PartialProgramLayer:
"""
Removes invalid value for various-length return statement
"""
if
isinstance
(
out_vars
,
(
core
.
VarBase
,
core
.
eager
.
Tensor
)
):
if
isinstance
(
out_vars
,
core
.
eager
.
Tensor
):
if
self
.
_is_no_value
(
out_vars
):
return
None
return
out_vars
...
...
@@ -1029,7 +986,7 @@ class PartialProgramLayer:
def
_set_grad_type
(
self
,
params
,
train_program
):
# NOTE: if user set sparse gradient mode, the param's gradient
# will be SelectedRows, not LoDTensor. But tracer will just
# set param grad
VarBase by forward VarBase
(LoDTensor)
# set param grad
Tensor by forward Tensor
(LoDTensor)
# If we don't change grad_var type here, RunProgramOp need
# transform SelectedRows to LoDTensor forcibly, it may not
# be user wanted result.
...
...
@@ -1057,9 +1014,9 @@ class PartialProgramLayer:
def
_check_params_all_inited
(
self
,
main_program
):
"""
Check all params from main program are already initialized, see details as follows:
1. all parameters in self._params should be type `framework.ParamBase` which are created in dygraph.
1. all parameters in self._params should be type `framework.
Eager
ParamBase` which are created in dygraph.
2. all parameters from transformed program can be found in self._params.
Because they share same data with ParamBase of original dygraph.
Because they share same data with
Eager
ParamBase of original dygraph.
"""
if
not
isinstance
(
self
.
_params
,
(
list
,
tuple
)):
raise
TypeError
(
...
...
@@ -1070,7 +1027,7 @@ class PartialProgramLayer:
param_and_buffer_names_set
=
set
()
for
i
,
var
in
enumerate
(
self
.
_params
):
# self._params constains parameters and buffers with persistable=True.
if
not
isinstance
(
var
,
(
core
.
VarBase
,
core
.
eager
.
Tensor
)
):
if
not
isinstance
(
var
,
core
.
eager
.
Tensor
):
raise
TypeError
(
'Type of self._params[{}] in PartialProgramLayer should be Parameter or Variable, but received {}.'
.
format
(
i
,
type
(
var
)
...
...
python/paddle/jit/translated_layer.py
浏览文件 @
d4571470
...
...
@@ -648,22 +648,13 @@ def _load_persistable_vars_by_program(
orig_each_name
=
program_holder
.
_suffix_varname_dict
[
each_var
.
name
()]
if
_is_parameter
(
each_var
,
program_holder
.
infer_program
):
# create output varbase
if
framework
.
_in_eager_without_dygraph_check
():
new_var
=
framework
.
EagerParamBase
(
shape
=
each_var
.
shape
(),
dtype
=
each_var
.
dtype
(),
name
=
each_var
.
name
(),
type
=
each_var
.
type
(),
persistable
=
True
,
)
else
:
new_var
=
framework
.
ParamBase
(
shape
=
each_var
.
shape
(),
dtype
=
each_var
.
dtype
(),
name
=
each_var
.
name
(),
type
=
each_var
.
type
(),
persistable
=
True
,
)
new_var
=
framework
.
EagerParamBase
(
shape
=
each_var
.
shape
(),
dtype
=
each_var
.
dtype
(),
name
=
each_var
.
name
(),
type
=
each_var
.
type
(),
persistable
=
True
,
)
else
:
new_var
=
framework
.
_varbase_creator
(
type
=
each_var
.
type
(),
...
...
@@ -746,24 +737,12 @@ def _load_persistable_vars(
# create output varbase
if
extra_var_info
[
name
].
get
(
'trainable'
,
None
)
is
not
None
:
# use default shape and dtype
if
framework
.
_in_eager_without_dygraph_check
():
new_var
=
framework
.
EagerParamBase
(
shape
=
[
1
],
# only to pass check, this shape is not meaningful
dtype
=
core
.
VarDesc
.
VarType
.
FP32
,
name
=
new_name
,
persistable
=
True
,
)
else
:
new_var
=
framework
.
ParamBase
(
shape
=
[
1
],
# only to pass check, this shape is not meaningful
dtype
=
core
.
VarDesc
.
VarType
.
FP32
,
name
=
new_name
,
persistable
=
True
,
)
new_var
=
framework
.
EagerParamBase
(
shape
=
[
1
],
# only to pass check, this shape is not meaningful
dtype
=
core
.
VarDesc
.
VarType
.
FP32
,
name
=
new_name
,
persistable
=
True
,
)
else
:
new_var
=
framework
.
_varbase_creator
(
name
=
new_name
,
persistable
=
True
...
...
@@ -901,29 +880,20 @@ def _run_dygraph(instance, input, program_holder):
# 1. prepare inputs, outputs, attrs
input_vars
=
[]
for
i
,
value
in
enumerate
(
input
):
if
not
isinstance
(
value
,
(
np
.
ndarray
,
core
.
VarBase
,
core
.
eager
.
Tensor
)):
if
not
isinstance
(
value
,
(
np
.
ndarray
,
core
.
eager
.
Tensor
)):
raise
TypeError
(
"The type of input in TranslatedLayer must be numpy array or Variable(
VarBase
), but received %s."
"The type of input in TranslatedLayer must be numpy array or Variable(
Tensor
), but received %s."
%
type
(
value
)
)
# NOTE: In order to unify the API, firstly convert the input to
VarBase
# NOTE: In order to unify the API, firstly convert the input to
Tensor
if
isinstance
(
value
,
np
.
ndarray
):
if
framework
.
_in_eager_without_dygraph_check
():
var
=
core
.
eager
.
Tensor
(
value
=
value
,
name
=
program_holder
.
input_descs
[
i
].
name
(),
persistable
=
False
,
place
=
framework
.
_current_expected_place
(),
zero_copy
=
True
,
)
else
:
var
=
core
.
VarBase
(
value
=
value
,
name
=
program_holder
.
input_descs
[
i
].
name
(),
persistable
=
False
,
place
=
framework
.
_current_expected_place
(),
zero_copy
=
True
,
)
var
=
core
.
eager
.
Tensor
(
value
=
value
,
name
=
program_holder
.
input_descs
[
i
].
name
(),
persistable
=
False
,
place
=
framework
.
_current_expected_place
(),
zero_copy
=
True
,
)
else
:
var
=
value
# NOTE: we changed var name here,
...
...
@@ -950,55 +920,27 @@ def _run_dygraph(instance, input, program_holder):
output_vars
=
[]
for
var_desc
in
program_holder
.
output_descs
:
if
framework
.
_in_eager_without_dygraph_check
():
var
=
core
.
eager
.
Tensor
(
dtype
=
var_desc
.
dtype
(),
dims
=
var_desc
.
shape
(),
name
=
var_desc
.
name
(),
type
=
var_desc
.
type
(),
persistable
=
False
,
)
else
:
var
=
core
.
VarBase
(
var_desc
.
dtype
(),
var_desc
.
shape
(),
var_desc
.
name
(),
var_desc
.
type
(),
False
,
)
var
=
core
.
eager
.
Tensor
(
dtype
=
var_desc
.
dtype
(),
dims
=
var_desc
.
shape
(),
name
=
var_desc
.
name
(),
type
=
var_desc
.
type
(),
persistable
=
False
,
)
output_vars
.
append
(
var
)
# hold forward variables
if
framework
.
_in_eager_without_dygraph_check
():
tmp_scope_vec
=
[
program_holder
.
scope
]
else
:
tmp_scope_vec
=
core
.
VarBase
(
core
.
VarDesc
.
VarType
.
FP32
,
[],
"program_out_scope"
,
core
.
VarDesc
.
VarType
.
STEP_SCOPES
,
True
,
)
tmp_scope_vec
.
value
().
set_scope
(
program_holder
.
scope
)
tmp_scope_vec
=
[
program_holder
.
scope
]
double_grad_vars
=
[]
for
var_desc
in
program_holder
.
double_grad_descs
:
if
framework
.
_in_eager_without_dygraph_check
():
var
=
core
.
eager
.
Tensor
(
dtype
=
var_desc
.
dtype
(),
dims
=
var_desc
.
shape
(),
name
=
var_desc
.
name
(),
type
=
var_desc
.
type
(),
persistable
=
False
,
)
else
:
var
=
core
.
VarBase
(
var_desc
.
dtype
(),
var_desc
.
shape
(),
var_desc
.
name
(),
var_desc
.
type
(),
False
,
)
var
=
core
.
eager
.
Tensor
(
dtype
=
var_desc
.
dtype
(),
dims
=
var_desc
.
shape
(),
name
=
var_desc
.
name
(),
type
=
var_desc
.
type
(),
persistable
=
False
,
)
double_grad_vars
.
append
(
var
)
# 2. run program by op
...
...
@@ -1064,7 +1006,7 @@ def _run_dygraph(instance, input, program_holder):
# NOTE: [ why need set param's gradient type here ]
# if user set sparse gradient mode, the param's gradient
# will be SelectedRows, not LoDTensor. But tracer will just
# set param grad
VarBase by forward VarBase
(LoDTensor)
# set param grad
Tensor by forward Tensor
(LoDTensor)
# If we don't change grad_var type here, RunProgramOp need
# transform SelectedRows to LoDTensor forcibly, it may not
# be user wanted result.
...
...
@@ -1454,20 +1396,18 @@ class TranslatedLayer(layers.Layer):
# When add parameter or buffer to Layer by follow apis,
# the variable name can't contain `.`, beccause which may cause
# AttributeError when access the newly added parameter or buffer
# in the form of `self.**.**``, but the ParamBase or BarBase
# in the form of `self.**.**``, but the
Eager
ParamBase or BarBase
# name contains `.` originally, such as `linear_0.w_0`, so here
# need to generate new var name for each var
self
.
_persistable_var_name_dict
=
{}
# the TranslatedLayer object holded var names count started from 0
with
unique_name
.
guard
():
for
name
,
var
in
persistable_vars
.
items
():
if
isinstance
(
var
,
(
framework
.
ParamBase
,
framework
.
EagerParamBase
)
):
if
isinstance
(
var
,
framework
.
EagerParamBase
):
dy_name
=
_generate_unique_var_name
(
PARAMETER_NAME_PREFIX
)
self
.
_persistable_var_name_dict
[
name
]
=
dy_name
self
.
add_parameter
(
dy_name
,
var
)
elif
isinstance
(
var
,
(
core
.
VarBase
,
core
.
eager
.
Tensor
)
):
elif
isinstance
(
var
,
core
.
eager
.
Tensor
):
dy_name
=
_generate_unique_var_name
(
BUFFER_NAME_PREFIX
)
self
.
_persistable_var_name_dict
[
name
]
=
dy_name
self
.
register_buffer
(
dy_name
,
var
)
...
...
python/paddle/nn/layer/layers.py
浏览文件 @
d4571470
此差异已折叠。
点击以展开。
python/paddle/optimizer/optimizer.py
浏览文件 @
d4571470
...
...
@@ -383,7 +383,7 @@ class Optimizer:
if
isinstance
(
load_para
,
Variable
):
load_para_np
=
np
.
array
(
load_para
)
elif
isinstance
(
load_para
,
core
.
VarBase
):
elif
isinstance
(
load_para
,
core
.
eager
.
Tensor
):
load_para_np
=
np
.
array
(
load_para
)
elif
isinstance
(
load_para
,
np
.
ndarray
):
load_para_np
=
load_para
...
...
python/paddle/static/input.py
浏览文件 @
d4571470
...
...
@@ -195,7 +195,7 @@ class InputSpec:
print(x_spec) # InputSpec(shape=(2, 2), dtype=paddle.float32, name=x)
"""
if
isinstance
(
tensor
,
(
Variable
,
core
.
VarBase
,
core
.
eager
.
Tensor
)):
if
isinstance
(
tensor
,
(
Variable
,
core
.
eager
.
Tensor
)):
return
cls
(
tensor
.
shape
,
tensor
.
dtype
,
name
or
tensor
.
name
)
else
:
raise
ValueError
(
...
...
python/paddle/static/nn/control_flow.py
浏览文件 @
d4571470
...
...
@@ -368,7 +368,7 @@ def assign_skip_lod_tensor_array(input, output):
return
True
return
False
if
not
isinstance
(
input
,
(
Variable
,
core
.
VarBase
)):
if
not
isinstance
(
input
,
(
Variable
,
core
.
eager
.
Tensor
)):
if
isinstance
(
output
,
Variable
)
and
isinstance
(
input
,
support_ret_buildin_type
):
...
...
python/paddle/static/quantization/tests/test_imperative_ptq.py
浏览文件 @
d4571470
此差异已折叠。
点击以展开。
python/paddle/static/quantization/tests/test_imperative_qat_user_defined.py
浏览文件 @
d4571470
此差异已折叠。
点击以展开。
python/paddle/tensor/creation.py
浏览文件 @
d4571470
此差异已折叠。
点击以展开。
python/paddle/tensor/logic.py
浏览文件 @
d4571470
此差异已折叠。
点击以展开。
python/paddle/tensor/manipulation.py
浏览文件 @
d4571470
此差异已折叠。
点击以展开。
python/paddle/utils/cpp_extension/extension_utils.py
浏览文件 @
d4571470
此差异已折叠。
点击以展开。
tools/check_file_diff_approvals.sh
浏览文件 @
d4571470
此差异已折叠。
点击以展开。
tools/count_api_without_core_ops.py
浏览文件 @
d4571470
此差异已折叠。
点击以展开。
tools/jetson_infer_op.py
浏览文件 @
d4571470
此差异已折叠。
点击以展开。
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录