Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
d7035454
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 1 年 前同步成功
通知
2298
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
d7035454
编写于
3月 20, 2023
作者:
zhouweiwei2014
提交者:
GitHub
3月 20, 2023
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[Zero-Dim] fix Tensor.numpy, cntrol whether to hack process to 1D (#51757)
上级
e8530a35
变更
19
隐藏空白更改
内联
并排
Showing
19 changed file
with
78 addition
and
68 deletion
+78
-68
paddle/fluid/pybind/eager_method.cc
paddle/fluid/pybind/eager_method.cc
+24
-10
python/paddle/distributed/fleet/meta_parallel/pipeline_parallel.py
...ddle/distributed/fleet/meta_parallel/pipeline_parallel.py
+4
-4
python/paddle/fluid/dygraph/varbase_patch_methods.py
python/paddle/fluid/dygraph/varbase_patch_methods.py
+1
-1
python/paddle/fluid/framework.py
python/paddle/fluid/framework.py
+1
-1
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+2
-2
python/paddle/fluid/optimizer.py
python/paddle/fluid/optimizer.py
+2
-2
python/paddle/fluid/tests/unittests/test_zero_dim_tensor.py
python/paddle/fluid/tests/unittests/test_zero_dim_tensor.py
+9
-1
python/paddle/geometric/message_passing/utils.py
python/paddle/geometric/message_passing/utils.py
+1
-1
python/paddle/incubate/operators/graph_send_recv.py
python/paddle/incubate/operators/graph_send_recv.py
+1
-1
python/paddle/nn/functional/common.py
python/paddle/nn/functional/common.py
+2
-2
python/paddle/nn/functional/vision.py
python/paddle/nn/functional/vision.py
+2
-6
python/paddle/nn/initializer/assign.py
python/paddle/nn/initializer/assign.py
+1
-1
python/paddle/optimizer/adam.py
python/paddle/optimizer/adam.py
+4
-4
python/paddle/optimizer/adamw.py
python/paddle/optimizer/adamw.py
+2
-2
python/paddle/quantization/imperative/ptq_quantizer.py
python/paddle/quantization/imperative/ptq_quantizer.py
+1
-1
python/paddle/static/nn/control_flow.py
python/paddle/static/nn/control_flow.py
+3
-3
python/paddle/tensor/manipulation.py
python/paddle/tensor/manipulation.py
+11
-20
python/paddle/tensor/math.py
python/paddle/tensor/math.py
+4
-4
python/paddle/tensor/to_string.py
python/paddle/tensor/to_string.py
+3
-2
未找到文件。
paddle/fluid/pybind/eager_method.cc
浏览文件 @
d7035454
...
...
@@ -123,15 +123,29 @@ static PyObject* tensor_method_numpy(TensorObject* self,
size_t
py_rank
=
tensor_dims
.
size
();
size_t
numel
=
1
;
if
(
py_rank
==
0
)
{
// 0D Tensor hack process to 1D numpy, will remove in future
VLOG
(
0
)
<<
"Warning:: 0D Tensor cannot be used as Tensor.numpy()[0], Now "
"0D will be changed to 1D numpy to avoid this problem, but it's "
"not correct and will be removed in future. Please change "
"'Tensor.numpy()[0]' to 'float(Tensor)' or "
"'Tensor.numpy().item()' as soon as possible."
;
py_rank
=
1
;
py_dims
[
0
]
=
1
;
py_strides
[
0
]
=
sizeof_dtype
*
numel
;
Py_ssize_t
args_num
=
PyTuple_Size
(
args
);
bool
set_to_1d
=
true
;
if
(
args_num
==
(
Py_ssize_t
)
1
)
{
PyObject
*
obj
=
PyTuple_GET_ITEM
(
args
,
0
);
if
(
obj
==
Py_False
)
{
set_to_1d
=
false
;
}
}
if
(
set_to_1d
)
{
// 0D Tensor hack process to 1D numpy, will remove in future
VLOG
(
0
)
<<
"Warning:: 0D Tensor cannot be used as 'Tensor.numpy()[0]' . In "
"order to avoid this problem, "
"0D Tensor will be changed to 1D numpy currently, but it's not "
"correct and will be "
"removed in future. Please modify "
" 'Tensor.numpy()[0]' to 'float(Tensor)' as soon as "
"possible, "
"otherwise 'Tensor.numpy()[0]' will raise error"
;
py_rank
=
1
;
py_dims
[
0
]
=
1
;
py_strides
[
0
]
=
sizeof_dtype
*
numel
;
}
}
else
{
for
(
int
i
=
tensor_dims
.
size
()
-
1
;
i
>=
0
;
--
i
)
{
py_dims
[
i
]
=
static_cast
<
size_t
>
(
tensor_dims
[
i
]);
...
...
@@ -143,7 +157,7 @@ static PyObject* tensor_method_numpy(TensorObject* self,
PyObject
*
array
=
api
.
PyArray_NewFromDescr_
(
api
.
PyArray_Type_
,
api
.
PyArray_DescrFromType_
(
numpy_dtype
),
tensor_dims
.
size
()
,
py_rank
,
py_dims
,
py_strides
,
nullptr
,
...
...
python/paddle/distributed/fleet/meta_parallel/pipeline_parallel.py
浏览文件 @
d7035454
...
...
@@ -415,9 +415,9 @@ class PipelineParallel(MetaParallelBase):
),
"train_batch() in last stage should obtain vaild loss"
loss
=
self
.
total_loss
.
detach
()
is_fp32
=
(
paddle
.
to_tensor
(
1
)
paddle
.
full
([],
1
,
'int64'
)
if
loss
.
dtype
==
paddle
.
float32
else
paddle
.
to_tensor
(
0
)
else
paddle
.
full
([],
0
,
'int64'
)
)
paddle
.
distributed
.
broadcast
(
is_fp32
,
src
=
self
.
global_rank
,
sync_op
=
True
,
group
=
self
.
pp_group
...
...
@@ -426,7 +426,7 @@ class PipelineParallel(MetaParallelBase):
loss
,
src
=
self
.
global_rank
,
sync_op
=
True
,
group
=
self
.
pp_group
)
else
:
is_fp32
=
paddle
.
to_tensor
(
1
)
is_fp32
=
paddle
.
full
([],
1
,
'int64'
)
paddle
.
distributed
.
broadcast
(
is_fp32
,
src
=
self
.
_hcg
.
get_rank_from_stage
(
self
.
num_stages
-
1
),
...
...
@@ -435,7 +435,7 @@ class PipelineParallel(MetaParallelBase):
)
loss
=
(
paddle
.
zeros
(
shape
=
[
1
],
dtype
=
"float32"
)
if
is_fp32
.
numpy
()[
0
]
if
is_fp32
.
item
()
else
paddle
.
zeros
(
shape
=
[
1
],
dtype
=
"float16"
)
)
paddle
.
distributed
.
broadcast
(
...
...
python/paddle/fluid/dygraph/varbase_patch_methods.py
浏览文件 @
d7035454
...
...
@@ -764,7 +764,7 @@ def monkey_patch_varbase():
print(type(x_array)) #<class 'numpy.ndarray'>
print(x_array.shape) #(2, 2)
"""
array
=
self
.
numpy
()
array
=
self
.
numpy
(
False
)
if
dtype
:
array
=
array
.
astype
(
dtype
)
return
array
...
...
python/paddle/fluid/framework.py
浏览文件 @
d7035454
...
...
@@ -773,7 +773,7 @@ def _var_base_to_np(var_base):
"paddle.fluid.framework._var_base_to_np is deprecated, please use var_base.numpy() instead of _var_base_to_np(var_base)."
)
return
var_base
.
numpy
()
return
var_base
.
numpy
(
False
)
def
_cpu_num
():
...
...
python/paddle/fluid/layers/nn.py
浏览文件 @
d7035454
...
...
@@ -698,10 +698,10 @@ def unsqueeze(input, axes, name=None):
if
isinstance
(
axes
,
int
):
axes
=
[
axes
]
elif
isinstance
(
axes
,
Variable
):
axes
=
axes
.
numpy
().
tolist
()
axes
=
axes
.
tolist
()
elif
isinstance
(
axes
,
(
list
,
tuple
)):
axes
=
[
item
.
numpy
().
item
(
0
)
if
isinstance
(
item
,
Variable
)
else
item
item
.
item
(
0
)
if
isinstance
(
item
,
Variable
)
else
item
for
item
in
axes
]
return
_C_ops
.
unsqueeze
(
input
,
axes
)
...
...
python/paddle/fluid/optimizer.py
浏览文件 @
d7035454
...
...
@@ -2432,12 +2432,12 @@ class AdamOptimizer(Optimizer):
_beta1
=
(
self
.
_beta1
if
not
isinstance
(
self
.
_beta1
,
Variable
)
else
self
.
_beta1
.
numpy
().
item
(
0
)
else
self
.
_beta1
.
item
(
0
)
)
_beta2
=
(
self
.
_beta2
if
not
isinstance
(
self
.
_beta2
,
Variable
)
else
self
.
_beta2
.
numpy
().
item
(
0
)
else
self
.
_beta2
.
item
(
0
)
)
master_weight
=
None
_
,
_
,
_
,
_
,
_
,
_
=
_legacy_C_ops
.
adam
(
...
...
python/paddle/fluid/tests/unittests/test_zero_dim_tensor.py
浏览文件 @
d7035454
...
...
@@ -952,7 +952,15 @@ class TestSundryAPI(unittest.TestCase):
def
test_numpy
(
self
):
x
=
paddle
.
full
([],
0.5
)
np
.
testing
.
assert_array_equal
(
x
.
numpy
(),
np
.
array
(
0.5
))
# 0D Tensor hack to 1D Numpy defaut, will remove in future
x_np
=
x
.
numpy
()
np
.
testing
.
assert_array_equal
(
x_np
.
shape
,
(
1
,))
np
.
testing
.
assert_array_equal
(
x_np
,
np
.
array
([
0.5
]))
# return origin correct numpy
x_np
=
x
.
numpy
(
False
)
np
.
testing
.
assert_array_equal
(
x_np
.
shape
,
())
np
.
testing
.
assert_array_equal
(
x_np
,
np
.
array
(
0.5
))
def
test_numel
(
self
):
out
=
paddle
.
numel
(
self
.
x
)
...
...
python/paddle/geometric/message_passing/utils.py
浏览文件 @
d7035454
...
...
@@ -29,7 +29,7 @@ def convert_out_size_to_list(out_size):
elif
isinstance
(
out_size
,
(
int
,
np
.
int32
,
np
.
int64
)):
out_size
=
[
out_size
]
else
:
out_size
=
[
out_size
.
numpy
().
astype
(
int
)[
0
]
]
out_size
=
[
int
(
out_size
)
]
return
out_size
...
...
python/paddle/incubate/operators/graph_send_recv.py
浏览文件 @
d7035454
...
...
@@ -182,7 +182,7 @@ def convert_out_size_to_list(out_size):
elif
isinstance
(
out_size
,
(
int
,
np
.
int32
,
np
.
int64
)):
out_size
=
[
out_size
]
else
:
out_size
=
[
out_size
.
numpy
().
astype
(
int
)[
0
]
]
out_size
=
[
int
(
out_size
)
]
return
out_size
...
...
python/paddle/nn/functional/common.py
浏览文件 @
d7035454
...
...
@@ -502,7 +502,7 @@ def interpolate(
for
i
,
dim
in
enumerate
(
out_shape
):
if
isinstance
(
dim
,
Variable
):
out_shape
[
i
]
=
dim
.
numpy
().
item
()
out_shape
[
i
]
=
dim
.
item
()
if
not
(
_is_list_or_turple_
(
out_shape
)):
raise
TypeError
(
"size should be a list or tuple or Variable."
)
# Validate the shape
...
...
@@ -1692,7 +1692,7 @@ def pad(x, pad, mode='constant', value=0.0, data_format="NCHW", name=None):
if
in_dygraph_mode
():
if
isinstance
(
pad
,
Variable
):
pad
=
pad
.
numpy
().
tolist
()
pad
=
pad
.
tolist
()
out
=
_C_ops
.
pad3d
(
x
,
pad
,
mode
,
value
,
data_format
)
else
:
attrs
=
{
'mode'
:
mode
,
'value'
:
value
,
'data_format'
:
data_format
}
...
...
python/paddle/nn/functional/vision.py
浏览文件 @
d7035454
...
...
@@ -86,17 +86,13 @@ def affine_grid(theta, out_shape, align_corners=True, name=None):
if
in_dygraph_mode
():
_out_shape
=
(
out_shape
.
numpy
().
tolist
()
if
isinstance
(
out_shape
,
Variable
)
else
out_shape
out_shape
.
tolist
()
if
isinstance
(
out_shape
,
Variable
)
else
out_shape
)
theta
=
theta
.
_use_gpudnn
(
use_cudnn
)
return
_C_ops
.
affine_grid
(
theta
,
_out_shape
,
align_corners
)
elif
in_dynamic_mode
():
_out_shape
=
(
out_shape
.
numpy
().
tolist
()
if
isinstance
(
out_shape
,
Variable
)
else
out_shape
out_shape
.
tolist
()
if
isinstance
(
out_shape
,
Variable
)
else
out_shape
)
return
_legacy_C_ops
.
affine_grid
(
theta
,
...
...
python/paddle/nn/initializer/assign.py
浏览文件 @
d7035454
...
...
@@ -211,6 +211,6 @@ class Assign(NumpyArrayInitializer):
# TODO: value is already is a tensor, accounting efficiency maybe it does not need to convert tensor to numpy data and then initialized.
if
isinstance
(
value
,
paddle
.
static
.
Variable
):
value
=
value
.
numpy
()
value
=
value
.
numpy
(
False
)
super
().
__init__
(
value
)
python/paddle/optimizer/adam.py
浏览文件 @
d7035454
...
...
@@ -310,12 +310,12 @@ class Adam(Optimizer):
_beta1
=
(
self
.
_beta1
if
not
isinstance
(
self
.
_beta1
,
Variable
)
else
self
.
_beta1
.
numpy
().
item
(
0
)
else
self
.
_beta1
.
item
(
0
)
)
_beta2
=
(
self
.
_beta2
if
not
isinstance
(
self
.
_beta2
,
Variable
)
else
self
.
_beta2
.
numpy
().
item
(
0
)
else
self
.
_beta2
.
item
(
0
)
)
_
,
_
,
_
,
_
,
_
,
_
=
_C_ops
.
adam_
(
...
...
@@ -623,12 +623,12 @@ class Adam(Optimizer):
_beta1
=
(
self
.
_beta1
if
not
isinstance
(
self
.
_beta1
,
Variable
)
else
self
.
_beta1
.
numpy
().
item
(
0
)
else
self
.
_beta1
.
item
(
0
)
)
_beta2
=
(
self
.
_beta2
if
not
isinstance
(
self
.
_beta2
,
Variable
)
else
self
.
_beta2
.
numpy
().
item
(
0
)
else
self
.
_beta2
.
item
(
0
)
)
if
framework
.
in_dygraph_mode
():
...
...
python/paddle/optimizer/adamw.py
浏览文件 @
d7035454
...
...
@@ -434,12 +434,12 @@ class AdamW(Optimizer):
_beta1
=
(
self
.
_beta1
if
not
isinstance
(
self
.
_beta1
,
Variable
)
else
self
.
_beta1
.
numpy
().
item
(
0
)
else
self
.
_beta1
.
item
(
0
)
)
_beta2
=
(
self
.
_beta2
if
not
isinstance
(
self
.
_beta2
,
Variable
)
else
self
.
_beta2
.
numpy
().
item
(
0
)
else
self
.
_beta2
.
item
(
0
)
)
_
,
_
,
_
,
_
,
_
,
_
=
_C_ops
.
adamw_
(
...
...
python/paddle/quantization/imperative/ptq_quantizer.py
浏览文件 @
d7035454
...
...
@@ -24,7 +24,7 @@ from . import utils
def
abs_max_value
(
tensor
):
return
float
(
paddle
.
max
(
paddle
.
abs
(
tensor
))
.
numpy
()
)
return
float
(
paddle
.
max
(
paddle
.
abs
(
tensor
)))
def
merge_max_value
(
old
,
new
):
...
...
python/paddle/static/nn/control_flow.py
浏览文件 @
d7035454
...
...
@@ -466,7 +466,7 @@ def while_loop(cond, body, loop_vars, is_test=False, name=None):
)
if
_non_static_mode
():
now_cond
=
pre_cond
.
numpy
().
item
()
now_cond
=
pre_cond
.
item
()
while
now_cond
:
output_vars
=
body
(
*
loop_vars
)
if
not
isinstance
(
output_vars
,
(
list
,
tuple
)):
...
...
@@ -476,7 +476,7 @@ def while_loop(cond, body, loop_vars, is_test=False, name=None):
"body in while_loop should return the same arity "
"(length and structure) and types as loop_vars"
)
now_cond
=
cond
(
*
output_vars
).
numpy
().
item
()
now_cond
=
cond
(
*
output_vars
).
item
()
map_structure
(
assign_skip_lod_tensor_array
,
output_vars
,
loop_vars
)
return
loop_vars
...
...
@@ -968,7 +968,7 @@ def cond(pred, true_fn=None, false_fn=None, name=None, return_names=None):
if
_non_static_mode
():
assert
isinstance
(
pred
,
Variable
),
"The pred in cond must be Variable"
assert
pred
.
size
==
1
,
"condition input's numel should be 1"
pred
=
pred
.
numpy
().
item
()
pred
=
pred
.
item
()
if
pred
:
if
true_fn
is
not
None
:
if
not
callable
(
true_fn
):
...
...
python/paddle/tensor/manipulation.py
浏览文件 @
d7035454
...
...
@@ -330,9 +330,7 @@ def slice(input, axes, starts, ends):
if
isinstance
(
starts
,
(
list
,
tuple
)):
starts
=
[
item
.
numpy
().
item
(
0
)
if
isinstance
(
item
,
tmp_tensor_type
)
else
item
item
.
item
(
0
)
if
isinstance
(
item
,
tmp_tensor_type
)
else
item
for
item
in
starts
]
elif
isinstance
(
starts
,
tmp_tensor_type
):
...
...
@@ -342,9 +340,7 @@ def slice(input, axes, starts, ends):
if
isinstance
(
ends
,
(
list
,
tuple
)):
ends
=
[
item
.
numpy
().
item
(
0
)
if
isinstance
(
item
,
tmp_tensor_type
)
else
item
item
.
item
(
0
)
if
isinstance
(
item
,
tmp_tensor_type
)
else
item
for
item
in
ends
]
elif
isinstance
(
ends
,
tmp_tensor_type
):
...
...
@@ -1069,7 +1065,8 @@ def tolist(x):
print(expectlist) #[0, 1, 2, 3, 4]
"""
return
x
.
numpy
().
tolist
()
# TODO(zhouwei): will remove 0D Tensor.numpy() hack
return
x
.
numpy
(
False
).
tolist
()
def
concat
(
x
,
axis
=
0
,
name
=
None
):
...
...
@@ -1118,7 +1115,6 @@ def concat(x, axis=0, name=None):
input
=
x
if
in_dygraph_mode
():
if
isinstance
(
axis
,
Variable
):
axis
=
axis
.
numpy
()
axis
=
axis
.
item
(
0
)
if
not
isinstance
(
input
,
Variable
):
input
=
[
t
for
t
in
input
if
t
.
shape
.
count
(
0
)
==
0
]
...
...
@@ -1953,7 +1949,6 @@ def split(x, num_or_sections, axis=0, name=None):
dim
=
axis
if
in_dygraph_mode
():
if
isinstance
(
dim
,
Variable
):
dim
=
dim
.
numpy
()
dim
=
dim
.
item
(
0
)
assert
len
(
input
.
shape
)
+
dim
>=
0
,
"(rank(x) + axis) must >= 0"
dim
=
(
len
(
input
.
shape
)
+
dim
)
if
dim
<
0
else
dim
...
...
@@ -1962,9 +1957,7 @@ def split(x, num_or_sections, axis=0, name=None):
if
paddle
.
utils
.
_contain_var
(
num_or_sections
):
for
index
,
item
in
enumerate
(
num_or_sections
):
if
isinstance
(
item
,
Variable
):
num_or_sections
[
index
]
=
num_or_sections
[
index
].
numpy
()[
0
]
num_or_sections
[
index
]
=
num_or_sections
[
index
].
item
()
elif
not
isinstance
(
num_or_sections
,
int
):
raise
TypeError
(
"The type of 'num_or_sections' in split must be int, list or tuple in imperative mode, but "
...
...
@@ -2594,10 +2587,10 @@ def unsqueeze(x, axis, name=None):
if
isinstance
(
axes
,
int
):
axes
=
[
axes
]
elif
isinstance
(
axes
,
Variable
):
axes
=
axes
.
numpy
().
tolist
()
axes
=
axes
.
tolist
()
elif
isinstance
(
axes
,
(
list
,
tuple
)):
axes
=
[
item
.
numpy
().
item
(
0
)
if
isinstance
(
item
,
Variable
)
else
item
item
.
item
(
0
)
if
isinstance
(
item
,
Variable
)
else
item
for
item
in
axes
]
return
_C_ops
.
unsqueeze
(
input
,
axes
)
...
...
@@ -2660,10 +2653,10 @@ def unsqueeze_(x, axis, name=None):
if
isinstance
(
axes
,
int
):
axes
=
[
axes
]
elif
isinstance
(
axes
,
Variable
):
axes
=
axes
.
numpy
().
tolist
()
axes
=
axes
.
tolist
()
elif
isinstance
(
axes
,
(
list
,
tuple
)):
axes
=
[
item
.
numpy
().
item
(
0
)
if
isinstance
(
item
,
Variable
)
else
item
item
.
item
(
0
)
if
isinstance
(
item
,
Variable
)
else
item
for
item
in
axes
]
return
_C_ops
.
unsqueeze_
(
input
,
axes
)
...
...
@@ -3149,7 +3142,7 @@ def tile(x, repeat_times, name=None):
assert
(
repeat_times
.
ndim
==
1
),
"Only support ndim == 1 while repeat_times is a Tensor."
repeat_times
=
repeat_times
.
numpy
().
tolist
()
repeat_times
=
repeat_times
.
tolist
()
return
_C_ops
.
tile
(
x
,
repeat_times
)
else
:
...
...
@@ -3649,9 +3642,7 @@ def reshape_(x, shape, name=None):
tmp_tensor_type
=
core
.
eager
.
Tensor
if
isinstance
(
shape
,
(
list
,
tuple
)):
shape
=
[
item
.
numpy
().
item
(
0
)
if
isinstance
(
item
,
tmp_tensor_type
)
else
item
item
.
item
(
0
)
if
isinstance
(
item
,
tmp_tensor_type
)
else
item
for
item
in
shape
]
if
shape
==
x
.
shape
:
...
...
python/paddle/tensor/math.py
浏览文件 @
d7035454
...
...
@@ -2872,9 +2872,9 @@ def clip(x, min=None, max=None, name=None):
if
in_dygraph_mode
():
if
isinstance
(
min
,
Variable
):
min
=
min
.
numpy
().
item
(
0
)
min
=
min
.
item
(
0
)
if
isinstance
(
max
,
Variable
):
max
=
max
.
numpy
().
item
(
0
)
max
=
max
.
item
(
0
)
min
=
min_
if
min
is
None
else
min
max
=
max_
if
max
is
None
else
max
return
_C_ops
.
clip
(
x
,
min
,
max
)
...
...
@@ -2939,9 +2939,9 @@ def clip_(x, min=None, max=None, name=None):
fmin
=
float
(
np
.
finfo
(
np
.
float32
).
min
)
fmax
=
float
(
np
.
finfo
(
np
.
float32
).
max
)
if
isinstance
(
min
,
Variable
):
min
=
min
.
numpy
().
item
(
0
)
min
=
min
.
item
(
0
)
if
isinstance
(
max
,
Variable
):
max
=
max
.
numpy
().
item
(
0
)
max
=
max
.
item
(
0
)
min
=
fmin
if
min
is
None
else
min
max
=
fmax
if
max
is
None
else
max
...
...
python/paddle/tensor/to_string.py
浏览文件 @
d7035454
...
...
@@ -257,7 +257,7 @@ def to_string(var, prefix='Tensor'):
if
var
.
dtype
==
core
.
VarDesc
.
VarType
.
BF16
:
var
=
var
.
astype
(
'float32'
)
np_var
=
var
.
numpy
()
np_var
=
var
.
numpy
(
False
)
if
len
(
var
.
shape
)
==
0
:
size
=
0
...
...
@@ -291,7 +291,8 @@ def _format_dense_tensor(tensor, indent):
if
tensor
.
dtype
==
core
.
VarDesc
.
VarType
.
BF16
:
tensor
=
tensor
.
astype
(
'float32'
)
np_tensor
=
tensor
.
numpy
()
# TODO(zhouwei): will remove 0D Tensor.numpy() hack
np_tensor
=
tensor
.
numpy
(
False
)
if
len
(
tensor
.
shape
)
==
0
:
size
=
0
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录