未验证 提交 d7035454 编写于 作者: zhouweiwei2014's avatar zhouweiwei2014 提交者: GitHub

[Zero-Dim] fix Tensor.numpy, cntrol whether to hack process to 1D (#51757)

上级 e8530a35
...@@ -123,15 +123,29 @@ static PyObject* tensor_method_numpy(TensorObject* self, ...@@ -123,15 +123,29 @@ static PyObject* tensor_method_numpy(TensorObject* self,
size_t py_rank = tensor_dims.size(); size_t py_rank = tensor_dims.size();
size_t numel = 1; size_t numel = 1;
if (py_rank == 0) { if (py_rank == 0) {
Py_ssize_t args_num = PyTuple_Size(args);
bool set_to_1d = true;
if (args_num == (Py_ssize_t)1) {
PyObject* obj = PyTuple_GET_ITEM(args, 0);
if (obj == Py_False) {
set_to_1d = false;
}
}
if (set_to_1d) {
// 0D Tensor hack process to 1D numpy, will remove in future // 0D Tensor hack process to 1D numpy, will remove in future
VLOG(0) << "Warning:: 0D Tensor cannot be used as Tensor.numpy()[0], Now " VLOG(0)
"0D will be changed to 1D numpy to avoid this problem, but it's " << "Warning:: 0D Tensor cannot be used as 'Tensor.numpy()[0]' . In "
"not correct and will be removed in future. Please change " "order to avoid this problem, "
"'Tensor.numpy()[0]' to 'float(Tensor)' or " "0D Tensor will be changed to 1D numpy currently, but it's not "
"'Tensor.numpy().item()' as soon as possible."; "correct and will be "
"removed in future. Please modify "
" 'Tensor.numpy()[0]' to 'float(Tensor)' as soon as "
"possible, "
"otherwise 'Tensor.numpy()[0]' will raise error";
py_rank = 1; py_rank = 1;
py_dims[0] = 1; py_dims[0] = 1;
py_strides[0] = sizeof_dtype * numel; py_strides[0] = sizeof_dtype * numel;
}
} else { } else {
for (int i = tensor_dims.size() - 1; i >= 0; --i) { for (int i = tensor_dims.size() - 1; i >= 0; --i) {
py_dims[i] = static_cast<size_t>(tensor_dims[i]); py_dims[i] = static_cast<size_t>(tensor_dims[i]);
...@@ -143,7 +157,7 @@ static PyObject* tensor_method_numpy(TensorObject* self, ...@@ -143,7 +157,7 @@ static PyObject* tensor_method_numpy(TensorObject* self,
PyObject* array = api.PyArray_NewFromDescr_( PyObject* array = api.PyArray_NewFromDescr_(
api.PyArray_Type_, api.PyArray_Type_,
api.PyArray_DescrFromType_(numpy_dtype), api.PyArray_DescrFromType_(numpy_dtype),
tensor_dims.size(), py_rank,
py_dims, py_dims,
py_strides, py_strides,
nullptr, nullptr,
......
...@@ -415,9 +415,9 @@ class PipelineParallel(MetaParallelBase): ...@@ -415,9 +415,9 @@ class PipelineParallel(MetaParallelBase):
), "train_batch() in last stage should obtain vaild loss" ), "train_batch() in last stage should obtain vaild loss"
loss = self.total_loss.detach() loss = self.total_loss.detach()
is_fp32 = ( is_fp32 = (
paddle.to_tensor(1) paddle.full([], 1, 'int64')
if loss.dtype == paddle.float32 if loss.dtype == paddle.float32
else paddle.to_tensor(0) else paddle.full([], 0, 'int64')
) )
paddle.distributed.broadcast( paddle.distributed.broadcast(
is_fp32, src=self.global_rank, sync_op=True, group=self.pp_group is_fp32, src=self.global_rank, sync_op=True, group=self.pp_group
...@@ -426,7 +426,7 @@ class PipelineParallel(MetaParallelBase): ...@@ -426,7 +426,7 @@ class PipelineParallel(MetaParallelBase):
loss, src=self.global_rank, sync_op=True, group=self.pp_group loss, src=self.global_rank, sync_op=True, group=self.pp_group
) )
else: else:
is_fp32 = paddle.to_tensor(1) is_fp32 = paddle.full([], 1, 'int64')
paddle.distributed.broadcast( paddle.distributed.broadcast(
is_fp32, is_fp32,
src=self._hcg.get_rank_from_stage(self.num_stages - 1), src=self._hcg.get_rank_from_stage(self.num_stages - 1),
...@@ -435,7 +435,7 @@ class PipelineParallel(MetaParallelBase): ...@@ -435,7 +435,7 @@ class PipelineParallel(MetaParallelBase):
) )
loss = ( loss = (
paddle.zeros(shape=[1], dtype="float32") paddle.zeros(shape=[1], dtype="float32")
if is_fp32.numpy()[0] if is_fp32.item()
else paddle.zeros(shape=[1], dtype="float16") else paddle.zeros(shape=[1], dtype="float16")
) )
paddle.distributed.broadcast( paddle.distributed.broadcast(
......
...@@ -764,7 +764,7 @@ def monkey_patch_varbase(): ...@@ -764,7 +764,7 @@ def monkey_patch_varbase():
print(type(x_array)) #<class 'numpy.ndarray'> print(type(x_array)) #<class 'numpy.ndarray'>
print(x_array.shape) #(2, 2) print(x_array.shape) #(2, 2)
""" """
array = self.numpy() array = self.numpy(False)
if dtype: if dtype:
array = array.astype(dtype) array = array.astype(dtype)
return array return array
......
...@@ -773,7 +773,7 @@ def _var_base_to_np(var_base): ...@@ -773,7 +773,7 @@ def _var_base_to_np(var_base):
"paddle.fluid.framework._var_base_to_np is deprecated, please use var_base.numpy() instead of _var_base_to_np(var_base)." "paddle.fluid.framework._var_base_to_np is deprecated, please use var_base.numpy() instead of _var_base_to_np(var_base)."
) )
return var_base.numpy() return var_base.numpy(False)
def _cpu_num(): def _cpu_num():
......
...@@ -698,10 +698,10 @@ def unsqueeze(input, axes, name=None): ...@@ -698,10 +698,10 @@ def unsqueeze(input, axes, name=None):
if isinstance(axes, int): if isinstance(axes, int):
axes = [axes] axes = [axes]
elif isinstance(axes, Variable): elif isinstance(axes, Variable):
axes = axes.numpy().tolist() axes = axes.tolist()
elif isinstance(axes, (list, tuple)): elif isinstance(axes, (list, tuple)):
axes = [ axes = [
item.numpy().item(0) if isinstance(item, Variable) else item item.item(0) if isinstance(item, Variable) else item
for item in axes for item in axes
] ]
return _C_ops.unsqueeze(input, axes) return _C_ops.unsqueeze(input, axes)
......
...@@ -2432,12 +2432,12 @@ class AdamOptimizer(Optimizer): ...@@ -2432,12 +2432,12 @@ class AdamOptimizer(Optimizer):
_beta1 = ( _beta1 = (
self._beta1 self._beta1
if not isinstance(self._beta1, Variable) if not isinstance(self._beta1, Variable)
else self._beta1.numpy().item(0) else self._beta1.item(0)
) )
_beta2 = ( _beta2 = (
self._beta2 self._beta2
if not isinstance(self._beta2, Variable) if not isinstance(self._beta2, Variable)
else self._beta2.numpy().item(0) else self._beta2.item(0)
) )
master_weight = None master_weight = None
_, _, _, _, _, _ = _legacy_C_ops.adam( _, _, _, _, _, _ = _legacy_C_ops.adam(
......
...@@ -952,7 +952,15 @@ class TestSundryAPI(unittest.TestCase): ...@@ -952,7 +952,15 @@ class TestSundryAPI(unittest.TestCase):
def test_numpy(self): def test_numpy(self):
x = paddle.full([], 0.5) x = paddle.full([], 0.5)
np.testing.assert_array_equal(x.numpy(), np.array(0.5)) # 0D Tensor hack to 1D Numpy defaut, will remove in future
x_np = x.numpy()
np.testing.assert_array_equal(x_np.shape, (1,))
np.testing.assert_array_equal(x_np, np.array([0.5]))
# return origin correct numpy
x_np = x.numpy(False)
np.testing.assert_array_equal(x_np.shape, ())
np.testing.assert_array_equal(x_np, np.array(0.5))
def test_numel(self): def test_numel(self):
out = paddle.numel(self.x) out = paddle.numel(self.x)
......
...@@ -29,7 +29,7 @@ def convert_out_size_to_list(out_size): ...@@ -29,7 +29,7 @@ def convert_out_size_to_list(out_size):
elif isinstance(out_size, (int, np.int32, np.int64)): elif isinstance(out_size, (int, np.int32, np.int64)):
out_size = [out_size] out_size = [out_size]
else: else:
out_size = [out_size.numpy().astype(int)[0]] out_size = [int(out_size)]
return out_size return out_size
......
...@@ -182,7 +182,7 @@ def convert_out_size_to_list(out_size): ...@@ -182,7 +182,7 @@ def convert_out_size_to_list(out_size):
elif isinstance(out_size, (int, np.int32, np.int64)): elif isinstance(out_size, (int, np.int32, np.int64)):
out_size = [out_size] out_size = [out_size]
else: else:
out_size = [out_size.numpy().astype(int)[0]] out_size = [int(out_size)]
return out_size return out_size
......
...@@ -502,7 +502,7 @@ def interpolate( ...@@ -502,7 +502,7 @@ def interpolate(
for i, dim in enumerate(out_shape): for i, dim in enumerate(out_shape):
if isinstance(dim, Variable): if isinstance(dim, Variable):
out_shape[i] = dim.numpy().item() out_shape[i] = dim.item()
if not (_is_list_or_turple_(out_shape)): if not (_is_list_or_turple_(out_shape)):
raise TypeError("size should be a list or tuple or Variable.") raise TypeError("size should be a list or tuple or Variable.")
# Validate the shape # Validate the shape
...@@ -1692,7 +1692,7 @@ def pad(x, pad, mode='constant', value=0.0, data_format="NCHW", name=None): ...@@ -1692,7 +1692,7 @@ def pad(x, pad, mode='constant', value=0.0, data_format="NCHW", name=None):
if in_dygraph_mode(): if in_dygraph_mode():
if isinstance(pad, Variable): if isinstance(pad, Variable):
pad = pad.numpy().tolist() pad = pad.tolist()
out = _C_ops.pad3d(x, pad, mode, value, data_format) out = _C_ops.pad3d(x, pad, mode, value, data_format)
else: else:
attrs = {'mode': mode, 'value': value, 'data_format': data_format} attrs = {'mode': mode, 'value': value, 'data_format': data_format}
......
...@@ -86,17 +86,13 @@ def affine_grid(theta, out_shape, align_corners=True, name=None): ...@@ -86,17 +86,13 @@ def affine_grid(theta, out_shape, align_corners=True, name=None):
if in_dygraph_mode(): if in_dygraph_mode():
_out_shape = ( _out_shape = (
out_shape.numpy().tolist() out_shape.tolist() if isinstance(out_shape, Variable) else out_shape
if isinstance(out_shape, Variable)
else out_shape
) )
theta = theta._use_gpudnn(use_cudnn) theta = theta._use_gpudnn(use_cudnn)
return _C_ops.affine_grid(theta, _out_shape, align_corners) return _C_ops.affine_grid(theta, _out_shape, align_corners)
elif in_dynamic_mode(): elif in_dynamic_mode():
_out_shape = ( _out_shape = (
out_shape.numpy().tolist() out_shape.tolist() if isinstance(out_shape, Variable) else out_shape
if isinstance(out_shape, Variable)
else out_shape
) )
return _legacy_C_ops.affine_grid( return _legacy_C_ops.affine_grid(
theta, theta,
......
...@@ -211,6 +211,6 @@ class Assign(NumpyArrayInitializer): ...@@ -211,6 +211,6 @@ class Assign(NumpyArrayInitializer):
# TODO: value is already is a tensor, accounting efficiency maybe it does not need to convert tensor to numpy data and then initialized. # TODO: value is already is a tensor, accounting efficiency maybe it does not need to convert tensor to numpy data and then initialized.
if isinstance(value, paddle.static.Variable): if isinstance(value, paddle.static.Variable):
value = value.numpy() value = value.numpy(False)
super().__init__(value) super().__init__(value)
...@@ -310,12 +310,12 @@ class Adam(Optimizer): ...@@ -310,12 +310,12 @@ class Adam(Optimizer):
_beta1 = ( _beta1 = (
self._beta1 self._beta1
if not isinstance(self._beta1, Variable) if not isinstance(self._beta1, Variable)
else self._beta1.numpy().item(0) else self._beta1.item(0)
) )
_beta2 = ( _beta2 = (
self._beta2 self._beta2
if not isinstance(self._beta2, Variable) if not isinstance(self._beta2, Variable)
else self._beta2.numpy().item(0) else self._beta2.item(0)
) )
_, _, _, _, _, _ = _C_ops.adam_( _, _, _, _, _, _ = _C_ops.adam_(
...@@ -623,12 +623,12 @@ class Adam(Optimizer): ...@@ -623,12 +623,12 @@ class Adam(Optimizer):
_beta1 = ( _beta1 = (
self._beta1 self._beta1
if not isinstance(self._beta1, Variable) if not isinstance(self._beta1, Variable)
else self._beta1.numpy().item(0) else self._beta1.item(0)
) )
_beta2 = ( _beta2 = (
self._beta2 self._beta2
if not isinstance(self._beta2, Variable) if not isinstance(self._beta2, Variable)
else self._beta2.numpy().item(0) else self._beta2.item(0)
) )
if framework.in_dygraph_mode(): if framework.in_dygraph_mode():
......
...@@ -434,12 +434,12 @@ class AdamW(Optimizer): ...@@ -434,12 +434,12 @@ class AdamW(Optimizer):
_beta1 = ( _beta1 = (
self._beta1 self._beta1
if not isinstance(self._beta1, Variable) if not isinstance(self._beta1, Variable)
else self._beta1.numpy().item(0) else self._beta1.item(0)
) )
_beta2 = ( _beta2 = (
self._beta2 self._beta2
if not isinstance(self._beta2, Variable) if not isinstance(self._beta2, Variable)
else self._beta2.numpy().item(0) else self._beta2.item(0)
) )
_, _, _, _, _, _ = _C_ops.adamw_( _, _, _, _, _, _ = _C_ops.adamw_(
......
...@@ -24,7 +24,7 @@ from . import utils ...@@ -24,7 +24,7 @@ from . import utils
def abs_max_value(tensor): def abs_max_value(tensor):
return float(paddle.max(paddle.abs(tensor)).numpy()) return float(paddle.max(paddle.abs(tensor)))
def merge_max_value(old, new): def merge_max_value(old, new):
......
...@@ -466,7 +466,7 @@ def while_loop(cond, body, loop_vars, is_test=False, name=None): ...@@ -466,7 +466,7 @@ def while_loop(cond, body, loop_vars, is_test=False, name=None):
) )
if _non_static_mode(): if _non_static_mode():
now_cond = pre_cond.numpy().item() now_cond = pre_cond.item()
while now_cond: while now_cond:
output_vars = body(*loop_vars) output_vars = body(*loop_vars)
if not isinstance(output_vars, (list, tuple)): if not isinstance(output_vars, (list, tuple)):
...@@ -476,7 +476,7 @@ def while_loop(cond, body, loop_vars, is_test=False, name=None): ...@@ -476,7 +476,7 @@ def while_loop(cond, body, loop_vars, is_test=False, name=None):
"body in while_loop should return the same arity " "body in while_loop should return the same arity "
"(length and structure) and types as loop_vars" "(length and structure) and types as loop_vars"
) )
now_cond = cond(*output_vars).numpy().item() now_cond = cond(*output_vars).item()
map_structure(assign_skip_lod_tensor_array, output_vars, loop_vars) map_structure(assign_skip_lod_tensor_array, output_vars, loop_vars)
return loop_vars return loop_vars
...@@ -968,7 +968,7 @@ def cond(pred, true_fn=None, false_fn=None, name=None, return_names=None): ...@@ -968,7 +968,7 @@ def cond(pred, true_fn=None, false_fn=None, name=None, return_names=None):
if _non_static_mode(): if _non_static_mode():
assert isinstance(pred, Variable), "The pred in cond must be Variable" assert isinstance(pred, Variable), "The pred in cond must be Variable"
assert pred.size == 1, "condition input's numel should be 1" assert pred.size == 1, "condition input's numel should be 1"
pred = pred.numpy().item() pred = pred.item()
if pred: if pred:
if true_fn is not None: if true_fn is not None:
if not callable(true_fn): if not callable(true_fn):
......
...@@ -330,9 +330,7 @@ def slice(input, axes, starts, ends): ...@@ -330,9 +330,7 @@ def slice(input, axes, starts, ends):
if isinstance(starts, (list, tuple)): if isinstance(starts, (list, tuple)):
starts = [ starts = [
item.numpy().item(0) item.item(0) if isinstance(item, tmp_tensor_type) else item
if isinstance(item, tmp_tensor_type)
else item
for item in starts for item in starts
] ]
elif isinstance(starts, tmp_tensor_type): elif isinstance(starts, tmp_tensor_type):
...@@ -342,9 +340,7 @@ def slice(input, axes, starts, ends): ...@@ -342,9 +340,7 @@ def slice(input, axes, starts, ends):
if isinstance(ends, (list, tuple)): if isinstance(ends, (list, tuple)):
ends = [ ends = [
item.numpy().item(0) item.item(0) if isinstance(item, tmp_tensor_type) else item
if isinstance(item, tmp_tensor_type)
else item
for item in ends for item in ends
] ]
elif isinstance(ends, tmp_tensor_type): elif isinstance(ends, tmp_tensor_type):
...@@ -1069,7 +1065,8 @@ def tolist(x): ...@@ -1069,7 +1065,8 @@ def tolist(x):
print(expectlist) #[0, 1, 2, 3, 4] print(expectlist) #[0, 1, 2, 3, 4]
""" """
return x.numpy().tolist() # TODO(zhouwei): will remove 0D Tensor.numpy() hack
return x.numpy(False).tolist()
def concat(x, axis=0, name=None): def concat(x, axis=0, name=None):
...@@ -1118,7 +1115,6 @@ def concat(x, axis=0, name=None): ...@@ -1118,7 +1115,6 @@ def concat(x, axis=0, name=None):
input = x input = x
if in_dygraph_mode(): if in_dygraph_mode():
if isinstance(axis, Variable): if isinstance(axis, Variable):
axis = axis.numpy()
axis = axis.item(0) axis = axis.item(0)
if not isinstance(input, Variable): if not isinstance(input, Variable):
input = [t for t in input if t.shape.count(0) == 0] input = [t for t in input if t.shape.count(0) == 0]
...@@ -1953,7 +1949,6 @@ def split(x, num_or_sections, axis=0, name=None): ...@@ -1953,7 +1949,6 @@ def split(x, num_or_sections, axis=0, name=None):
dim = axis dim = axis
if in_dygraph_mode(): if in_dygraph_mode():
if isinstance(dim, Variable): if isinstance(dim, Variable):
dim = dim.numpy()
dim = dim.item(0) dim = dim.item(0)
assert len(input.shape) + dim >= 0, "(rank(x) + axis) must >= 0" assert len(input.shape) + dim >= 0, "(rank(x) + axis) must >= 0"
dim = (len(input.shape) + dim) if dim < 0 else dim dim = (len(input.shape) + dim) if dim < 0 else dim
...@@ -1962,9 +1957,7 @@ def split(x, num_or_sections, axis=0, name=None): ...@@ -1962,9 +1957,7 @@ def split(x, num_or_sections, axis=0, name=None):
if paddle.utils._contain_var(num_or_sections): if paddle.utils._contain_var(num_or_sections):
for index, item in enumerate(num_or_sections): for index, item in enumerate(num_or_sections):
if isinstance(item, Variable): if isinstance(item, Variable):
num_or_sections[index] = num_or_sections[index].numpy()[ num_or_sections[index] = num_or_sections[index].item()
0
]
elif not isinstance(num_or_sections, int): elif not isinstance(num_or_sections, int):
raise TypeError( raise TypeError(
"The type of 'num_or_sections' in split must be int, list or tuple in imperative mode, but " "The type of 'num_or_sections' in split must be int, list or tuple in imperative mode, but "
...@@ -2594,10 +2587,10 @@ def unsqueeze(x, axis, name=None): ...@@ -2594,10 +2587,10 @@ def unsqueeze(x, axis, name=None):
if isinstance(axes, int): if isinstance(axes, int):
axes = [axes] axes = [axes]
elif isinstance(axes, Variable): elif isinstance(axes, Variable):
axes = axes.numpy().tolist() axes = axes.tolist()
elif isinstance(axes, (list, tuple)): elif isinstance(axes, (list, tuple)):
axes = [ axes = [
item.numpy().item(0) if isinstance(item, Variable) else item item.item(0) if isinstance(item, Variable) else item
for item in axes for item in axes
] ]
return _C_ops.unsqueeze(input, axes) return _C_ops.unsqueeze(input, axes)
...@@ -2660,10 +2653,10 @@ def unsqueeze_(x, axis, name=None): ...@@ -2660,10 +2653,10 @@ def unsqueeze_(x, axis, name=None):
if isinstance(axes, int): if isinstance(axes, int):
axes = [axes] axes = [axes]
elif isinstance(axes, Variable): elif isinstance(axes, Variable):
axes = axes.numpy().tolist() axes = axes.tolist()
elif isinstance(axes, (list, tuple)): elif isinstance(axes, (list, tuple)):
axes = [ axes = [
item.numpy().item(0) if isinstance(item, Variable) else item item.item(0) if isinstance(item, Variable) else item
for item in axes for item in axes
] ]
return _C_ops.unsqueeze_(input, axes) return _C_ops.unsqueeze_(input, axes)
...@@ -3149,7 +3142,7 @@ def tile(x, repeat_times, name=None): ...@@ -3149,7 +3142,7 @@ def tile(x, repeat_times, name=None):
assert ( assert (
repeat_times.ndim == 1 repeat_times.ndim == 1
), "Only support ndim == 1 while repeat_times is a Tensor." ), "Only support ndim == 1 while repeat_times is a Tensor."
repeat_times = repeat_times.numpy().tolist() repeat_times = repeat_times.tolist()
return _C_ops.tile(x, repeat_times) return _C_ops.tile(x, repeat_times)
else: else:
...@@ -3649,9 +3642,7 @@ def reshape_(x, shape, name=None): ...@@ -3649,9 +3642,7 @@ def reshape_(x, shape, name=None):
tmp_tensor_type = core.eager.Tensor tmp_tensor_type = core.eager.Tensor
if isinstance(shape, (list, tuple)): if isinstance(shape, (list, tuple)):
shape = [ shape = [
item.numpy().item(0) item.item(0) if isinstance(item, tmp_tensor_type) else item
if isinstance(item, tmp_tensor_type)
else item
for item in shape for item in shape
] ]
if shape == x.shape: if shape == x.shape:
......
...@@ -2872,9 +2872,9 @@ def clip(x, min=None, max=None, name=None): ...@@ -2872,9 +2872,9 @@ def clip(x, min=None, max=None, name=None):
if in_dygraph_mode(): if in_dygraph_mode():
if isinstance(min, Variable): if isinstance(min, Variable):
min = min.numpy().item(0) min = min.item(0)
if isinstance(max, Variable): if isinstance(max, Variable):
max = max.numpy().item(0) max = max.item(0)
min = min_ if min is None else min min = min_ if min is None else min
max = max_ if max is None else max max = max_ if max is None else max
return _C_ops.clip(x, min, max) return _C_ops.clip(x, min, max)
...@@ -2939,9 +2939,9 @@ def clip_(x, min=None, max=None, name=None): ...@@ -2939,9 +2939,9 @@ def clip_(x, min=None, max=None, name=None):
fmin = float(np.finfo(np.float32).min) fmin = float(np.finfo(np.float32).min)
fmax = float(np.finfo(np.float32).max) fmax = float(np.finfo(np.float32).max)
if isinstance(min, Variable): if isinstance(min, Variable):
min = min.numpy().item(0) min = min.item(0)
if isinstance(max, Variable): if isinstance(max, Variable):
max = max.numpy().item(0) max = max.item(0)
min = fmin if min is None else min min = fmin if min is None else min
max = fmax if max is None else max max = fmax if max is None else max
......
...@@ -257,7 +257,7 @@ def to_string(var, prefix='Tensor'): ...@@ -257,7 +257,7 @@ def to_string(var, prefix='Tensor'):
if var.dtype == core.VarDesc.VarType.BF16: if var.dtype == core.VarDesc.VarType.BF16:
var = var.astype('float32') var = var.astype('float32')
np_var = var.numpy() np_var = var.numpy(False)
if len(var.shape) == 0: if len(var.shape) == 0:
size = 0 size = 0
...@@ -291,7 +291,8 @@ def _format_dense_tensor(tensor, indent): ...@@ -291,7 +291,8 @@ def _format_dense_tensor(tensor, indent):
if tensor.dtype == core.VarDesc.VarType.BF16: if tensor.dtype == core.VarDesc.VarType.BF16:
tensor = tensor.astype('float32') tensor = tensor.astype('float32')
np_tensor = tensor.numpy() # TODO(zhouwei): will remove 0D Tensor.numpy() hack
np_tensor = tensor.numpy(False)
if len(tensor.shape) == 0: if len(tensor.shape) == 0:
size = 0 size = 0
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册