未验证 提交 73df2b1e 编写于 作者: zhouweiwei2014's avatar zhouweiwei2014 提交者: GitHub

[Zero-Dim] change Tensor.numpy() usage to other equivalent usage, avoid hack (#52197)

上级 d966301e
......@@ -141,10 +141,11 @@ static PyObject* tensor_method_numpy(TensorObject* self,
"order to avoid this problem, "
"0D Tensor will be changed to 1D numpy currently, but it's not "
"correct and will be "
"removed in future. Please modify "
" 'Tensor.numpy()[0]' to 'float(Tensor)' as soon as "
"removed in future. For Tensor contain only one element, Please "
"modify "
" 'Tensor.numpy()[0]' to 'Tensor.item()' as soon as "
"possible, "
"otherwise 'Tensor.numpy()[0]' will raise error";
"otherwise 'Tensor.numpy()[0]' will raise error in future.";
py_rank = 1;
py_dims[0] = 1;
py_strides[0] = sizeof_dtype * numel;
......
......@@ -83,7 +83,7 @@ class SendRecvMeta:
# recv stop_gradient
stop_grad = paddle.to_tensor([0])
paddle.distributed.recv(stop_grad, src=src_rank, group=group)
return shape.numpy().tolist(), dtype.item(), stop_grad.item()
return shape.tolist(), dtype.item(), stop_grad.item()
def recv_meta(self, group):
tensor_type = paddle.to_tensor([0])
......
......@@ -514,7 +514,7 @@ def random_split(dataset, lengths, generator=None):
)
# TODO(@Joejiong): support Variable or Tensor type with .tolist class member function.
# For example var.item() and var.tolist()
indices = paddle.randperm(sum(lengths)).numpy().tolist()
indices = paddle.randperm(sum(lengths)).tolist()
return [
Subset(dataset, indices[offset - length : offset])
for offset, length in zip(_accumulate(lengths), lengths)
......
......@@ -140,21 +140,21 @@ def monkey_patch_math_varbase():
), "only one element variable can be converted to float."
tensor = var.value().get_tensor()
assert tensor._is_initialized(), "variable's tensor is not initialized"
return float(var.numpy().flatten()[0])
return float(var.item())
def _long_(var):
numel = np.prod(var.shape)
assert numel == 1, "only one element variable can be converted to long."
tensor = var.value().get_tensor()
assert tensor._is_initialized(), "variable's tensor is not initialized"
return int(var.numpy().flatten()[0])
return int(var.item())
def _int_(var):
numel = np.prod(var.shape)
assert numel == 1, "only one element variable can be converted to int."
tensor = var.value().get_tensor()
assert tensor._is_initialized(), "variable's tensor is not initialized"
return int(var.numpy().flatten()[0])
return int(var.item())
def _len_(var):
assert var.ndim > 0, "len() of a 0D tensor is wrong"
......@@ -172,7 +172,7 @@ def monkey_patch_math_varbase():
), "only one element variable can be converted to python index."
tensor = var.value().get_tensor()
assert tensor._is_initialized(), "variable's tensor is not initialized"
return int(var.numpy().flatten()[0])
return int(var.item())
@property
def _ndim_(var):
......
......@@ -379,8 +379,8 @@ def monkey_patch_varbase():
if self.grad is None:
return None
if self.grad.is_selected_rows():
return (np.array(self.grad.numpy()), np.array(self.grad.rows()))
return self.grad.numpy()
return (np.array(self.grad), np.array(self.grad.rows()))
return np.array(self.grad)
else:
if self._grad_ivar() is None:
return None
......@@ -735,11 +735,11 @@ def monkey_patch_varbase():
), "When Variable is used as the condition of if/while , Variable can only contain one element."
if framework.global_var._in_eager_mode_:
assert self._is_initialized(), "tensor not initialized"
return bool(np.all(self.numpy() > 0))
return bool(self.item() > 0)
else:
tensor = self.value().get_tensor()
assert tensor._is_initialized(), "tensor not initialized"
return bool(np.all(tensor.__array__() > 0))
return bool(self.item() > 0)
def __bool__(self):
return self.__nonzero__()
......
......@@ -1150,7 +1150,7 @@ def while_loop(cond, body, loop_vars, is_test=False, name=None):
)
if in_dygraph_mode():
now_cond = pre_cond.numpy().item()
now_cond = pre_cond.item()
while now_cond:
output_vars = body(*loop_vars)
if not isinstance(output_vars, (list, tuple)):
......@@ -1160,7 +1160,7 @@ def while_loop(cond, body, loop_vars, is_test=False, name=None):
"body in while_loop should return the same arity "
"(length and structure) and types as loop_vars"
)
now_cond = cond(*output_vars).numpy().item()
now_cond = cond(*output_vars).item()
map_structure(assign_skip_lod_tensor_array, output_vars, loop_vars)
return loop_vars
else:
......
......@@ -63,7 +63,7 @@ def _build_saved_state_dict(state_dict):
raise ValueError(
"The saved tensor is not initialized. If you used group sharded, please use save_group_sharded_model."
)
save_dict[key] = value.numpy()
save_dict[key] = np.array(value)
name_table[key] = value.name
else:
save_dict[key] = value
......@@ -92,7 +92,7 @@ def _load_state_dict_from_save_inference_model(model_path, config):
# 3. construct state_dict
load_param_dict = {}
for var_name in persistable_var_dict:
load_param_dict[var_name] = persistable_var_dict[var_name].numpy()
load_param_dict[var_name] = np.array(persistable_var_dict[var_name])
# if *.info exists, we can recover structured_name
var_info_filename = str(config.params_filename) + ".info"
......@@ -146,7 +146,7 @@ def _load_state_dict_from_save_params(model_path):
# 3. construct state_dict
load_param_dict = {}
for var in load_var_list:
load_param_dict[var.name] = var.numpy()
load_param_dict[var.name] = np.array(var)
return load_param_dict
......@@ -291,7 +291,7 @@ def _pickle_save(obj, f, protocol):
)
def reduce_varbase(self):
data = self.numpy()
data = np.array(self)
name = self.name
return (tuple, ((name, data),))
......
......@@ -180,9 +180,7 @@ def _load_program_scope(main=None, startup=None, scope=None):
@static_only
def _legacy_static_save(param_dict, model_path, protocol=2):
def get_tensor(var):
if isinstance(var, (core.VarBase, core.eager.Tensor)):
return var.numpy()
elif isinstance(var, core.LoDTensor):
if isinstance(var, (core.VarBase, core.eager.Tensor, core.LoDTensor)):
return np.array(var)
return var
......
......@@ -61,7 +61,7 @@ def to_numpy(var):
var, (Variable, fluid.core.VarBase, fluid.core.eager.Tensor)
), "not a variable"
if isinstance(var, (fluid.core.VarBase, fluid.core.eager.Tensor)):
return var.numpy()
return np.array(var)
t = global_scope().find_var(var.name).get_tensor()
return np.array(t)
......
......@@ -292,7 +292,7 @@ class Accuracy(Metric):
Tensor: the accuracy of current step.
"""
if isinstance(correct, (paddle.Tensor, paddle.fluid.core.eager.Tensor)):
correct = correct.numpy()
correct = np.array(correct)
num_samples = np.prod(np.array(correct.shape[:-1]))
accs = []
for i, k in enumerate(self.topk):
......@@ -420,12 +420,12 @@ class Precision(Metric):
The data type is 'int32' or 'int64'.
"""
if isinstance(preds, (paddle.Tensor, paddle.fluid.core.eager.Tensor)):
preds = preds.numpy()
preds = np.array(preds)
elif not _is_numpy_(preds):
raise ValueError("The 'preds' must be a numpy ndarray or Tensor.")
if isinstance(labels, (paddle.Tensor, paddle.fluid.core.eager.Tensor)):
labels = labels.numpy()
labels = np.array(labels)
elif not _is_numpy_(labels):
raise ValueError("The 'labels' must be a numpy ndarray or Tensor.")
......@@ -553,12 +553,12 @@ class Recall(Metric):
Shape: [batch_size, 1], Dtype: 'int32' or 'int64'.
"""
if isinstance(preds, (paddle.Tensor, paddle.fluid.core.eager.Tensor)):
preds = preds.numpy()
preds = np.array(preds)
elif not _is_numpy_(preds):
raise ValueError("The 'preds' must be a numpy ndarray or Tensor.")
if isinstance(labels, (paddle.Tensor, paddle.fluid.core.eager.Tensor)):
labels = labels.numpy()
labels = np.array(labels)
elif not _is_numpy_(labels):
raise ValueError("The 'labels' must be a numpy ndarray or Tensor.")
......@@ -705,12 +705,12 @@ class Auc(Metric):
representing the label of the instance i.
"""
if isinstance(labels, (paddle.Tensor, paddle.fluid.core.eager.Tensor)):
labels = labels.numpy()
labels = np.array(labels)
elif not _is_numpy_(labels):
raise ValueError("The 'labels' must be a numpy ndarray or Tensor.")
if isinstance(preds, (paddle.Tensor, paddle.fluid.core.eager.Tensor)):
preds = preds.numpy()
preds = np.array(preds)
elif not _is_numpy_(preds):
raise ValueError("The 'preds' must be a numpy ndarray or Tensor.")
......
......@@ -712,7 +712,7 @@ def _dynamic_decode_imperative(
step_idx = 0
step_idx_tensor = paddle.full(shape=[1], fill_value=step_idx, dtype="int64")
while cond.numpy():
while cond.item():
(step_outputs, next_states, next_inputs, next_finished) = decoder.step(
step_idx_tensor, inputs, states, **kwargs
)
......
......@@ -490,7 +490,7 @@ def interpolate(
else:
if in_dynamic_mode():
if isinstance(out_shape, Variable):
out_shape = list(out_shape.numpy())
out_shape = list(out_shape.numpy(False))
else:
out_shape = list(out_shape)
......
......@@ -706,7 +706,7 @@ def _unpool_output_size(x, kernel_size, stride, padding, output_size):
else:
for i, var in enumerate(output_size):
if isinstance(var, Variable):
output_size[i] = var.numpy().item()
output_size[i] = var.item()
if len(output_size) == len(kernel_size) + 2:
output_size = output_size[2:]
......@@ -1609,7 +1609,7 @@ def adaptive_avg_pool2d(x, output_size, data_format='NCHW', name=None):
if in_dygraph_mode():
output_size = [
item.numpy().item(0) if isinstance(item, Variable) else item
item.item(0) if isinstance(item, Variable) else item
for item in output_size
]
# output_size support Variable in static graph mode
......
......@@ -382,9 +382,9 @@ class Optimizer:
load_para = state_dict[var_tmp.name]
if isinstance(load_para, Variable):
load_para_np = load_para.numpy()
load_para_np = np.array(load_para)
elif isinstance(load_para, core.VarBase):
load_para_np = load_para.numpy()
load_para_np = np.array(load_para)
elif isinstance(load_para, np.ndarray):
load_para_np = load_para
else:
......
......@@ -54,13 +54,13 @@ def combine_abs_max_and_hist(
return origin_max, origin_hist
elif origin_max == 0.0:
new_hist, _ = np.histogram(
paddle.abs(tensor).numpy(), range=(0, new_max), bins=bins
paddle.abs(tensor).numpy(False), range=(0, new_max), bins=bins
)
new_hist = new_hist.astype(np.float32)
return new_max, new_hist
elif new_max <= origin_max:
new_hist, _ = np.histogram(
paddle.abs(tensor).numpy(), range=(0, origin_max), bins=bins
paddle.abs(tensor).numpy(False), range=(0, origin_max), bins=bins
)
new_hist = new_hist.astype(np.float32)
new_hist += origin_hist
......@@ -84,7 +84,7 @@ def combine_abs_max_and_hist(
sampled_hist = sampled_hist.astype(np.float32)
new_hist, _ = np.histogram(
paddle.abs(tensor).numpy(), range=(0, new_max), bins=bins
paddle.abs(tensor).numpy(False), range=(0, new_max), bins=bins
)
new_hist = new_hist.astype(np.float32)
new_hist += sampled_hist
......@@ -189,7 +189,7 @@ class BaseHistQuantizer(BaseQuantizer, metaclass=abc.ABCMeta):
self.hists.append(None)
else:
hist, _ = np.histogram(
paddle.abs(tensor).numpy(),
paddle.abs(tensor).numpy(False),
range=(0.0, abs_max_vals[idx]),
bins=self.bins,
)
......
......@@ -76,7 +76,7 @@ def accuracy(input, label, k=1, correct=None, total=None):
if total is None:
total = _varbase_creator(dtype="int32")
_k = k.numpy().item(0) if isinstance(k, Variable) else k
_k = k.item(0) if isinstance(k, Variable) else k
topk_out, topk_indices = _legacy_C_ops.top_k_v2(
input, 'k', _k, 'sorted', False
)
......
......@@ -119,7 +119,7 @@ def array_read(array, i):
assert i.shape == [
1
], "The shape of index 'i' should be [1] in dygraph mode"
i = i.numpy().item(0)
i = i.item(0)
return array[i]
else:
check_variable_and_dtype(i, 'i', ['int64'], 'array_read')
......@@ -179,7 +179,7 @@ def array_write(x, i, array=None):
assert i.shape == [
1
], "The shape of index 'i' should be [1] in dygraph mode"
i = i.numpy().item(0)
i = i.item(0)
if array is None:
array = create_array(x.dtype)
assert isinstance(
......
......@@ -334,7 +334,7 @@ def slice(input, axes, starts, ends):
for item in starts
]
elif isinstance(starts, tmp_tensor_type):
tensor_t = starts.numpy()
tensor_t = starts.numpy(False)
starts = [ele for ele in tensor_t]
infer_flags = [-1 for i in range(len(axes))]
......@@ -344,7 +344,7 @@ def slice(input, axes, starts, ends):
for item in ends
]
elif isinstance(ends, tmp_tensor_type):
tensor_t = ends.numpy()
tensor_t = ends.numpy(False)
ends = [ele for ele in tensor_t]
infer_flags = [-1 for i in range(len(axes))]
......
......@@ -456,12 +456,12 @@ def convert_shape_to_list(shape):
if isinstance(shape, (list, tuple)):
shape = list(
map(
lambda x: x.numpy().flat[0] if isinstance(x, Variable) else x,
lambda x: x.item(0) if isinstance(x, Variable) else x,
shape,
)
)
else:
shape = shape.numpy().astype(int).tolist()
shape = shape.astype(int).tolist()
return shape
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册