Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
73df2b1e
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 1 年 前同步成功
通知
2298
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
73df2b1e
编写于
3月 29, 2023
作者:
zhouweiwei2014
提交者:
GitHub
3月 29, 2023
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[Zero-Dim] change Tensor.numpy() usage to other equivalent usage, avoid hack (#52197)
上级
d966301e
变更
19
隐藏空白更改
内联
并排
Showing
19 changed file
with
46 addition
and
47 deletion
+46
-47
paddle/fluid/pybind/eager_method.cc
paddle/fluid/pybind/eager_method.cc
+4
-3
python/paddle/distributed/fleet/meta_parallel/pp_utils/p2p_communication.py
...ributed/fleet/meta_parallel/pp_utils/p2p_communication.py
+1
-1
python/paddle/fluid/dataloader/dataset.py
python/paddle/fluid/dataloader/dataset.py
+1
-1
python/paddle/fluid/dygraph/math_op_patch.py
python/paddle/fluid/dygraph/math_op_patch.py
+4
-4
python/paddle/fluid/dygraph/varbase_patch_methods.py
python/paddle/fluid/dygraph/varbase_patch_methods.py
+4
-4
python/paddle/fluid/layers/control_flow.py
python/paddle/fluid/layers/control_flow.py
+2
-2
python/paddle/framework/io.py
python/paddle/framework/io.py
+4
-4
python/paddle/framework/io_utils.py
python/paddle/framework/io_utils.py
+1
-3
python/paddle/hapi/model.py
python/paddle/hapi/model.py
+1
-1
python/paddle/metric/metrics.py
python/paddle/metric/metrics.py
+7
-7
python/paddle/nn/decode.py
python/paddle/nn/decode.py
+1
-1
python/paddle/nn/functional/common.py
python/paddle/nn/functional/common.py
+1
-1
python/paddle/nn/functional/pooling.py
python/paddle/nn/functional/pooling.py
+2
-2
python/paddle/optimizer/optimizer.py
python/paddle/optimizer/optimizer.py
+2
-2
python/paddle/quantization/imperative/ptq_quantizer.py
python/paddle/quantization/imperative/ptq_quantizer.py
+4
-4
python/paddle/static/nn/metric.py
python/paddle/static/nn/metric.py
+1
-1
python/paddle/tensor/array.py
python/paddle/tensor/array.py
+2
-2
python/paddle/tensor/manipulation.py
python/paddle/tensor/manipulation.py
+2
-2
python/paddle/utils/layers_utils.py
python/paddle/utils/layers_utils.py
+2
-2
未找到文件。
paddle/fluid/pybind/eager_method.cc
浏览文件 @
73df2b1e
...
...
@@ -141,10 +141,11 @@ static PyObject* tensor_method_numpy(TensorObject* self,
"order to avoid this problem, "
"0D Tensor will be changed to 1D numpy currently, but it's not "
"correct and will be "
"removed in future. Please modify "
" 'Tensor.numpy()[0]' to 'float(Tensor)' as soon as "
"removed in future. For Tensor contain only one element, Please "
"modify "
" 'Tensor.numpy()[0]' to 'Tensor.item()' as soon as "
"possible, "
"otherwise 'Tensor.numpy()[0]' will raise error"
;
"otherwise 'Tensor.numpy()[0]' will raise error
in future.
"
;
py_rank
=
1
;
py_dims
[
0
]
=
1
;
py_strides
[
0
]
=
sizeof_dtype
*
numel
;
...
...
python/paddle/distributed/fleet/meta_parallel/pp_utils/p2p_communication.py
浏览文件 @
73df2b1e
...
...
@@ -83,7 +83,7 @@ class SendRecvMeta:
# recv stop_gradient
stop_grad
=
paddle
.
to_tensor
([
0
])
paddle
.
distributed
.
recv
(
stop_grad
,
src
=
src_rank
,
group
=
group
)
return
shape
.
numpy
().
tolist
(),
dtype
.
item
(),
stop_grad
.
item
()
return
shape
.
tolist
(),
dtype
.
item
(),
stop_grad
.
item
()
def
recv_meta
(
self
,
group
):
tensor_type
=
paddle
.
to_tensor
([
0
])
...
...
python/paddle/fluid/dataloader/dataset.py
浏览文件 @
73df2b1e
...
...
@@ -514,7 +514,7 @@ def random_split(dataset, lengths, generator=None):
)
# TODO(@Joejiong): support Variable or Tensor type with .tolist class member function.
# For example var.item() and var.tolist()
indices
=
paddle
.
randperm
(
sum
(
lengths
)).
numpy
().
tolist
()
indices
=
paddle
.
randperm
(
sum
(
lengths
)).
tolist
()
return
[
Subset
(
dataset
,
indices
[
offset
-
length
:
offset
])
for
offset
,
length
in
zip
(
_accumulate
(
lengths
),
lengths
)
...
...
python/paddle/fluid/dygraph/math_op_patch.py
浏览文件 @
73df2b1e
...
...
@@ -140,21 +140,21 @@ def monkey_patch_math_varbase():
),
"only one element variable can be converted to float."
tensor
=
var
.
value
().
get_tensor
()
assert
tensor
.
_is_initialized
(),
"variable's tensor is not initialized"
return
float
(
var
.
numpy
().
flatten
()[
0
]
)
return
float
(
var
.
item
()
)
def
_long_
(
var
):
numel
=
np
.
prod
(
var
.
shape
)
assert
numel
==
1
,
"only one element variable can be converted to long."
tensor
=
var
.
value
().
get_tensor
()
assert
tensor
.
_is_initialized
(),
"variable's tensor is not initialized"
return
int
(
var
.
numpy
().
flatten
()[
0
]
)
return
int
(
var
.
item
()
)
def
_int_
(
var
):
numel
=
np
.
prod
(
var
.
shape
)
assert
numel
==
1
,
"only one element variable can be converted to int."
tensor
=
var
.
value
().
get_tensor
()
assert
tensor
.
_is_initialized
(),
"variable's tensor is not initialized"
return
int
(
var
.
numpy
().
flatten
()[
0
]
)
return
int
(
var
.
item
()
)
def
_len_
(
var
):
assert
var
.
ndim
>
0
,
"len() of a 0D tensor is wrong"
...
...
@@ -172,7 +172,7 @@ def monkey_patch_math_varbase():
),
"only one element variable can be converted to python index."
tensor
=
var
.
value
().
get_tensor
()
assert
tensor
.
_is_initialized
(),
"variable's tensor is not initialized"
return
int
(
var
.
numpy
().
flatten
()[
0
]
)
return
int
(
var
.
item
()
)
@
property
def
_ndim_
(
var
):
...
...
python/paddle/fluid/dygraph/varbase_patch_methods.py
浏览文件 @
73df2b1e
...
...
@@ -379,8 +379,8 @@ def monkey_patch_varbase():
if
self
.
grad
is
None
:
return
None
if
self
.
grad
.
is_selected_rows
():
return
(
np
.
array
(
self
.
grad
.
numpy
()
),
np
.
array
(
self
.
grad
.
rows
()))
return
self
.
grad
.
numpy
(
)
return
(
np
.
array
(
self
.
grad
),
np
.
array
(
self
.
grad
.
rows
()))
return
np
.
array
(
self
.
grad
)
else
:
if
self
.
_grad_ivar
()
is
None
:
return
None
...
...
@@ -735,11 +735,11 @@ def monkey_patch_varbase():
),
"When Variable is used as the condition of if/while , Variable can only contain one element."
if
framework
.
global_var
.
_in_eager_mode_
:
assert
self
.
_is_initialized
(),
"tensor not initialized"
return
bool
(
np
.
all
(
self
.
numpy
()
>
0
)
)
return
bool
(
self
.
item
()
>
0
)
else
:
tensor
=
self
.
value
().
get_tensor
()
assert
tensor
.
_is_initialized
(),
"tensor not initialized"
return
bool
(
np
.
all
(
tensor
.
__array__
()
>
0
)
)
return
bool
(
self
.
item
()
>
0
)
def
__bool__
(
self
):
return
self
.
__nonzero__
()
...
...
python/paddle/fluid/layers/control_flow.py
浏览文件 @
73df2b1e
...
...
@@ -1150,7 +1150,7 @@ def while_loop(cond, body, loop_vars, is_test=False, name=None):
)
if
in_dygraph_mode
():
now_cond
=
pre_cond
.
numpy
().
item
()
now_cond
=
pre_cond
.
item
()
while
now_cond
:
output_vars
=
body
(
*
loop_vars
)
if
not
isinstance
(
output_vars
,
(
list
,
tuple
)):
...
...
@@ -1160,7 +1160,7 @@ def while_loop(cond, body, loop_vars, is_test=False, name=None):
"body in while_loop should return the same arity "
"(length and structure) and types as loop_vars"
)
now_cond
=
cond
(
*
output_vars
).
numpy
().
item
()
now_cond
=
cond
(
*
output_vars
).
item
()
map_structure
(
assign_skip_lod_tensor_array
,
output_vars
,
loop_vars
)
return
loop_vars
else
:
...
...
python/paddle/framework/io.py
浏览文件 @
73df2b1e
...
...
@@ -63,7 +63,7 @@ def _build_saved_state_dict(state_dict):
raise
ValueError
(
"The saved tensor is not initialized. If you used group sharded, please use save_group_sharded_model."
)
save_dict
[
key
]
=
value
.
numpy
(
)
save_dict
[
key
]
=
np
.
array
(
value
)
name_table
[
key
]
=
value
.
name
else
:
save_dict
[
key
]
=
value
...
...
@@ -92,7 +92,7 @@ def _load_state_dict_from_save_inference_model(model_path, config):
# 3. construct state_dict
load_param_dict
=
{}
for
var_name
in
persistable_var_dict
:
load_param_dict
[
var_name
]
=
persistable_var_dict
[
var_name
].
numpy
(
)
load_param_dict
[
var_name
]
=
np
.
array
(
persistable_var_dict
[
var_name
]
)
# if *.info exists, we can recover structured_name
var_info_filename
=
str
(
config
.
params_filename
)
+
".info"
...
...
@@ -146,7 +146,7 @@ def _load_state_dict_from_save_params(model_path):
# 3. construct state_dict
load_param_dict
=
{}
for
var
in
load_var_list
:
load_param_dict
[
var
.
name
]
=
var
.
numpy
(
)
load_param_dict
[
var
.
name
]
=
np
.
array
(
var
)
return
load_param_dict
...
...
@@ -291,7 +291,7 @@ def _pickle_save(obj, f, protocol):
)
def
reduce_varbase
(
self
):
data
=
self
.
numpy
(
)
data
=
np
.
array
(
self
)
name
=
self
.
name
return
(
tuple
,
((
name
,
data
),))
...
...
python/paddle/framework/io_utils.py
浏览文件 @
73df2b1e
...
...
@@ -180,9 +180,7 @@ def _load_program_scope(main=None, startup=None, scope=None):
@
static_only
def
_legacy_static_save
(
param_dict
,
model_path
,
protocol
=
2
):
def
get_tensor
(
var
):
if
isinstance
(
var
,
(
core
.
VarBase
,
core
.
eager
.
Tensor
)):
return
var
.
numpy
()
elif
isinstance
(
var
,
core
.
LoDTensor
):
if
isinstance
(
var
,
(
core
.
VarBase
,
core
.
eager
.
Tensor
,
core
.
LoDTensor
)):
return
np
.
array
(
var
)
return
var
...
...
python/paddle/hapi/model.py
浏览文件 @
73df2b1e
...
...
@@ -61,7 +61,7 @@ def to_numpy(var):
var
,
(
Variable
,
fluid
.
core
.
VarBase
,
fluid
.
core
.
eager
.
Tensor
)
),
"not a variable"
if
isinstance
(
var
,
(
fluid
.
core
.
VarBase
,
fluid
.
core
.
eager
.
Tensor
)):
return
var
.
numpy
(
)
return
np
.
array
(
var
)
t
=
global_scope
().
find_var
(
var
.
name
).
get_tensor
()
return
np
.
array
(
t
)
...
...
python/paddle/metric/metrics.py
浏览文件 @
73df2b1e
...
...
@@ -292,7 +292,7 @@ class Accuracy(Metric):
Tensor: the accuracy of current step.
"""
if
isinstance
(
correct
,
(
paddle
.
Tensor
,
paddle
.
fluid
.
core
.
eager
.
Tensor
)):
correct
=
correct
.
numpy
(
)
correct
=
np
.
array
(
correct
)
num_samples
=
np
.
prod
(
np
.
array
(
correct
.
shape
[:
-
1
]))
accs
=
[]
for
i
,
k
in
enumerate
(
self
.
topk
):
...
...
@@ -420,12 +420,12 @@ class Precision(Metric):
The data type is 'int32' or 'int64'.
"""
if
isinstance
(
preds
,
(
paddle
.
Tensor
,
paddle
.
fluid
.
core
.
eager
.
Tensor
)):
preds
=
preds
.
numpy
(
)
preds
=
np
.
array
(
preds
)
elif
not
_is_numpy_
(
preds
):
raise
ValueError
(
"The 'preds' must be a numpy ndarray or Tensor."
)
if
isinstance
(
labels
,
(
paddle
.
Tensor
,
paddle
.
fluid
.
core
.
eager
.
Tensor
)):
labels
=
labels
.
numpy
(
)
labels
=
np
.
array
(
labels
)
elif
not
_is_numpy_
(
labels
):
raise
ValueError
(
"The 'labels' must be a numpy ndarray or Tensor."
)
...
...
@@ -553,12 +553,12 @@ class Recall(Metric):
Shape: [batch_size, 1], Dtype: 'int32' or 'int64'.
"""
if
isinstance
(
preds
,
(
paddle
.
Tensor
,
paddle
.
fluid
.
core
.
eager
.
Tensor
)):
preds
=
preds
.
numpy
(
)
preds
=
np
.
array
(
preds
)
elif
not
_is_numpy_
(
preds
):
raise
ValueError
(
"The 'preds' must be a numpy ndarray or Tensor."
)
if
isinstance
(
labels
,
(
paddle
.
Tensor
,
paddle
.
fluid
.
core
.
eager
.
Tensor
)):
labels
=
labels
.
numpy
(
)
labels
=
np
.
array
(
labels
)
elif
not
_is_numpy_
(
labels
):
raise
ValueError
(
"The 'labels' must be a numpy ndarray or Tensor."
)
...
...
@@ -705,12 +705,12 @@ class Auc(Metric):
representing the label of the instance i.
"""
if
isinstance
(
labels
,
(
paddle
.
Tensor
,
paddle
.
fluid
.
core
.
eager
.
Tensor
)):
labels
=
labels
.
numpy
(
)
labels
=
np
.
array
(
labels
)
elif
not
_is_numpy_
(
labels
):
raise
ValueError
(
"The 'labels' must be a numpy ndarray or Tensor."
)
if
isinstance
(
preds
,
(
paddle
.
Tensor
,
paddle
.
fluid
.
core
.
eager
.
Tensor
)):
preds
=
preds
.
numpy
(
)
preds
=
np
.
array
(
preds
)
elif
not
_is_numpy_
(
preds
):
raise
ValueError
(
"The 'preds' must be a numpy ndarray or Tensor."
)
...
...
python/paddle/nn/decode.py
浏览文件 @
73df2b1e
...
...
@@ -712,7 +712,7 @@ def _dynamic_decode_imperative(
step_idx
=
0
step_idx_tensor
=
paddle
.
full
(
shape
=
[
1
],
fill_value
=
step_idx
,
dtype
=
"int64"
)
while
cond
.
numpy
():
while
cond
.
item
():
(
step_outputs
,
next_states
,
next_inputs
,
next_finished
)
=
decoder
.
step
(
step_idx_tensor
,
inputs
,
states
,
**
kwargs
)
...
...
python/paddle/nn/functional/common.py
浏览文件 @
73df2b1e
...
...
@@ -490,7 +490,7 @@ def interpolate(
else
:
if
in_dynamic_mode
():
if
isinstance
(
out_shape
,
Variable
):
out_shape
=
list
(
out_shape
.
numpy
())
out_shape
=
list
(
out_shape
.
numpy
(
False
))
else
:
out_shape
=
list
(
out_shape
)
...
...
python/paddle/nn/functional/pooling.py
浏览文件 @
73df2b1e
...
...
@@ -706,7 +706,7 @@ def _unpool_output_size(x, kernel_size, stride, padding, output_size):
else
:
for
i
,
var
in
enumerate
(
output_size
):
if
isinstance
(
var
,
Variable
):
output_size
[
i
]
=
var
.
numpy
().
item
()
output_size
[
i
]
=
var
.
item
()
if
len
(
output_size
)
==
len
(
kernel_size
)
+
2
:
output_size
=
output_size
[
2
:]
...
...
@@ -1609,7 +1609,7 @@ def adaptive_avg_pool2d(x, output_size, data_format='NCHW', name=None):
if
in_dygraph_mode
():
output_size
=
[
item
.
numpy
().
item
(
0
)
if
isinstance
(
item
,
Variable
)
else
item
item
.
item
(
0
)
if
isinstance
(
item
,
Variable
)
else
item
for
item
in
output_size
]
# output_size support Variable in static graph mode
...
...
python/paddle/optimizer/optimizer.py
浏览文件 @
73df2b1e
...
...
@@ -382,9 +382,9 @@ class Optimizer:
load_para
=
state_dict
[
var_tmp
.
name
]
if
isinstance
(
load_para
,
Variable
):
load_para_np
=
load_para
.
numpy
(
)
load_para_np
=
np
.
array
(
load_para
)
elif
isinstance
(
load_para
,
core
.
VarBase
):
load_para_np
=
load_para
.
numpy
(
)
load_para_np
=
np
.
array
(
load_para
)
elif
isinstance
(
load_para
,
np
.
ndarray
):
load_para_np
=
load_para
else
:
...
...
python/paddle/quantization/imperative/ptq_quantizer.py
浏览文件 @
73df2b1e
...
...
@@ -54,13 +54,13 @@ def combine_abs_max_and_hist(
return
origin_max
,
origin_hist
elif
origin_max
==
0.0
:
new_hist
,
_
=
np
.
histogram
(
paddle
.
abs
(
tensor
).
numpy
(),
range
=
(
0
,
new_max
),
bins
=
bins
paddle
.
abs
(
tensor
).
numpy
(
False
),
range
=
(
0
,
new_max
),
bins
=
bins
)
new_hist
=
new_hist
.
astype
(
np
.
float32
)
return
new_max
,
new_hist
elif
new_max
<=
origin_max
:
new_hist
,
_
=
np
.
histogram
(
paddle
.
abs
(
tensor
).
numpy
(),
range
=
(
0
,
origin_max
),
bins
=
bins
paddle
.
abs
(
tensor
).
numpy
(
False
),
range
=
(
0
,
origin_max
),
bins
=
bins
)
new_hist
=
new_hist
.
astype
(
np
.
float32
)
new_hist
+=
origin_hist
...
...
@@ -84,7 +84,7 @@ def combine_abs_max_and_hist(
sampled_hist
=
sampled_hist
.
astype
(
np
.
float32
)
new_hist
,
_
=
np
.
histogram
(
paddle
.
abs
(
tensor
).
numpy
(),
range
=
(
0
,
new_max
),
bins
=
bins
paddle
.
abs
(
tensor
).
numpy
(
False
),
range
=
(
0
,
new_max
),
bins
=
bins
)
new_hist
=
new_hist
.
astype
(
np
.
float32
)
new_hist
+=
sampled_hist
...
...
@@ -189,7 +189,7 @@ class BaseHistQuantizer(BaseQuantizer, metaclass=abc.ABCMeta):
self
.
hists
.
append
(
None
)
else
:
hist
,
_
=
np
.
histogram
(
paddle
.
abs
(
tensor
).
numpy
(),
paddle
.
abs
(
tensor
).
numpy
(
False
),
range
=
(
0.0
,
abs_max_vals
[
idx
]),
bins
=
self
.
bins
,
)
...
...
python/paddle/static/nn/metric.py
浏览文件 @
73df2b1e
...
...
@@ -76,7 +76,7 @@ def accuracy(input, label, k=1, correct=None, total=None):
if
total
is
None
:
total
=
_varbase_creator
(
dtype
=
"int32"
)
_k
=
k
.
numpy
().
item
(
0
)
if
isinstance
(
k
,
Variable
)
else
k
_k
=
k
.
item
(
0
)
if
isinstance
(
k
,
Variable
)
else
k
topk_out
,
topk_indices
=
_legacy_C_ops
.
top_k_v2
(
input
,
'k'
,
_k
,
'sorted'
,
False
)
...
...
python/paddle/tensor/array.py
浏览文件 @
73df2b1e
...
...
@@ -119,7 +119,7 @@ def array_read(array, i):
assert
i
.
shape
==
[
1
],
"The shape of index 'i' should be [1] in dygraph mode"
i
=
i
.
numpy
().
item
(
0
)
i
=
i
.
item
(
0
)
return
array
[
i
]
else
:
check_variable_and_dtype
(
i
,
'i'
,
[
'int64'
],
'array_read'
)
...
...
@@ -179,7 +179,7 @@ def array_write(x, i, array=None):
assert
i
.
shape
==
[
1
],
"The shape of index 'i' should be [1] in dygraph mode"
i
=
i
.
numpy
().
item
(
0
)
i
=
i
.
item
(
0
)
if
array
is
None
:
array
=
create_array
(
x
.
dtype
)
assert
isinstance
(
...
...
python/paddle/tensor/manipulation.py
浏览文件 @
73df2b1e
...
...
@@ -334,7 +334,7 @@ def slice(input, axes, starts, ends):
for
item
in
starts
]
elif
isinstance
(
starts
,
tmp_tensor_type
):
tensor_t
=
starts
.
numpy
()
tensor_t
=
starts
.
numpy
(
False
)
starts
=
[
ele
for
ele
in
tensor_t
]
infer_flags
=
[
-
1
for
i
in
range
(
len
(
axes
))]
...
...
@@ -344,7 +344,7 @@ def slice(input, axes, starts, ends):
for
item
in
ends
]
elif
isinstance
(
ends
,
tmp_tensor_type
):
tensor_t
=
ends
.
numpy
()
tensor_t
=
ends
.
numpy
(
False
)
ends
=
[
ele
for
ele
in
tensor_t
]
infer_flags
=
[
-
1
for
i
in
range
(
len
(
axes
))]
...
...
python/paddle/utils/layers_utils.py
浏览文件 @
73df2b1e
...
...
@@ -456,12 +456,12 @@ def convert_shape_to_list(shape):
if
isinstance
(
shape
,
(
list
,
tuple
)):
shape
=
list
(
map
(
lambda
x
:
x
.
numpy
().
flat
[
0
]
if
isinstance
(
x
,
Variable
)
else
x
,
lambda
x
:
x
.
item
(
0
)
if
isinstance
(
x
,
Variable
)
else
x
,
shape
,
)
)
else
:
shape
=
shape
.
numpy
().
astype
(
int
).
tolist
()
shape
=
shape
.
astype
(
int
).
tolist
()
return
shape
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录