diff --git a/paddle/fluid/pybind/eager_method.cc b/paddle/fluid/pybind/eager_method.cc index 4b72e9fb81e6575ac628edca80335994097d9fdb..1913973b087c571eafbe9c5f8e4c65e66050d8de 100644 --- a/paddle/fluid/pybind/eager_method.cc +++ b/paddle/fluid/pybind/eager_method.cc @@ -141,10 +141,11 @@ static PyObject* tensor_method_numpy(TensorObject* self, "order to avoid this problem, " "0D Tensor will be changed to 1D numpy currently, but it's not " "correct and will be " - "removed in future. Please modify " - " 'Tensor.numpy()[0]' to 'float(Tensor)' as soon as " + "removed in future. For Tensor contain only one element, Please " + "modify " + " 'Tensor.numpy()[0]' to 'Tensor.item()' as soon as " "possible, " - "otherwise 'Tensor.numpy()[0]' will raise error"; + "otherwise 'Tensor.numpy()[0]' will raise error in future."; py_rank = 1; py_dims[0] = 1; py_strides[0] = sizeof_dtype * numel; diff --git a/python/paddle/distributed/fleet/meta_parallel/pp_utils/p2p_communication.py b/python/paddle/distributed/fleet/meta_parallel/pp_utils/p2p_communication.py index d00a1e18cdb18b477a621e97b96b957a8415d352..81385f9a0a4a10e9edd6636e6a14b57a49ad12e4 100644 --- a/python/paddle/distributed/fleet/meta_parallel/pp_utils/p2p_communication.py +++ b/python/paddle/distributed/fleet/meta_parallel/pp_utils/p2p_communication.py @@ -83,7 +83,7 @@ class SendRecvMeta: # recv stop_gradient stop_grad = paddle.to_tensor([0]) paddle.distributed.recv(stop_grad, src=src_rank, group=group) - return shape.numpy().tolist(), dtype.item(), stop_grad.item() + return shape.tolist(), dtype.item(), stop_grad.item() def recv_meta(self, group): tensor_type = paddle.to_tensor([0]) diff --git a/python/paddle/fluid/dataloader/dataset.py b/python/paddle/fluid/dataloader/dataset.py index 6d62cd9fe0a0374261bda5dc0ace1f9b6fd2c21f..3701da0b33ec71efc18bd630ac64b168834d724e 100755 --- a/python/paddle/fluid/dataloader/dataset.py +++ b/python/paddle/fluid/dataloader/dataset.py @@ -514,7 +514,7 @@ def random_split(dataset, lengths, generator=None): ) # TODO(@Joejiong): support Variable or Tensor type with .tolist class member function. # For example var.item() and var.tolist() - indices = paddle.randperm(sum(lengths)).numpy().tolist() + indices = paddle.randperm(sum(lengths)).tolist() return [ Subset(dataset, indices[offset - length : offset]) for offset, length in zip(_accumulate(lengths), lengths) diff --git a/python/paddle/fluid/dygraph/math_op_patch.py b/python/paddle/fluid/dygraph/math_op_patch.py index 74a174674f64c028a34b7f6a8c66ffbf6712a86e..a0d99724cbd69d79baaeb0e613a8668bd4b0e127 100644 --- a/python/paddle/fluid/dygraph/math_op_patch.py +++ b/python/paddle/fluid/dygraph/math_op_patch.py @@ -140,21 +140,21 @@ def monkey_patch_math_varbase(): ), "only one element variable can be converted to float." tensor = var.value().get_tensor() assert tensor._is_initialized(), "variable's tensor is not initialized" - return float(var.numpy().flatten()[0]) + return float(var.item()) def _long_(var): numel = np.prod(var.shape) assert numel == 1, "only one element variable can be converted to long." tensor = var.value().get_tensor() assert tensor._is_initialized(), "variable's tensor is not initialized" - return int(var.numpy().flatten()[0]) + return int(var.item()) def _int_(var): numel = np.prod(var.shape) assert numel == 1, "only one element variable can be converted to int." tensor = var.value().get_tensor() assert tensor._is_initialized(), "variable's tensor is not initialized" - return int(var.numpy().flatten()[0]) + return int(var.item()) def _len_(var): assert var.ndim > 0, "len() of a 0D tensor is wrong" @@ -172,7 +172,7 @@ def monkey_patch_math_varbase(): ), "only one element variable can be converted to python index." tensor = var.value().get_tensor() assert tensor._is_initialized(), "variable's tensor is not initialized" - return int(var.numpy().flatten()[0]) + return int(var.item()) @property def _ndim_(var): diff --git a/python/paddle/fluid/dygraph/varbase_patch_methods.py b/python/paddle/fluid/dygraph/varbase_patch_methods.py index 9c245d086b2111e7b6ca836e5964bc2663a35ba7..7d1d5005b22d40235f62e5c83393ef560e17762d 100644 --- a/python/paddle/fluid/dygraph/varbase_patch_methods.py +++ b/python/paddle/fluid/dygraph/varbase_patch_methods.py @@ -379,8 +379,8 @@ def monkey_patch_varbase(): if self.grad is None: return None if self.grad.is_selected_rows(): - return (np.array(self.grad.numpy()), np.array(self.grad.rows())) - return self.grad.numpy() + return (np.array(self.grad), np.array(self.grad.rows())) + return np.array(self.grad) else: if self._grad_ivar() is None: return None @@ -735,11 +735,11 @@ def monkey_patch_varbase(): ), "When Variable is used as the condition of if/while , Variable can only contain one element." if framework.global_var._in_eager_mode_: assert self._is_initialized(), "tensor not initialized" - return bool(np.all(self.numpy() > 0)) + return bool(self.item() > 0) else: tensor = self.value().get_tensor() assert tensor._is_initialized(), "tensor not initialized" - return bool(np.all(tensor.__array__() > 0)) + return bool(self.item() > 0) def __bool__(self): return self.__nonzero__() diff --git a/python/paddle/fluid/layers/control_flow.py b/python/paddle/fluid/layers/control_flow.py index b1f9a03d0d9231bb55289c60fc298ee3590c5a21..a78a649e781bd2d2bcf5b11043de209a30573c29 100755 --- a/python/paddle/fluid/layers/control_flow.py +++ b/python/paddle/fluid/layers/control_flow.py @@ -1150,7 +1150,7 @@ def while_loop(cond, body, loop_vars, is_test=False, name=None): ) if in_dygraph_mode(): - now_cond = pre_cond.numpy().item() + now_cond = pre_cond.item() while now_cond: output_vars = body(*loop_vars) if not isinstance(output_vars, (list, tuple)): @@ -1160,7 +1160,7 @@ def while_loop(cond, body, loop_vars, is_test=False, name=None): "body in while_loop should return the same arity " "(length and structure) and types as loop_vars" ) - now_cond = cond(*output_vars).numpy().item() + now_cond = cond(*output_vars).item() map_structure(assign_skip_lod_tensor_array, output_vars, loop_vars) return loop_vars else: diff --git a/python/paddle/framework/io.py b/python/paddle/framework/io.py index a003d7fcf38d568f4fcd1a71f14ca1a395858212..e5c0380b68cca836528d6d0c4eedfb2aa33b3b69 100644 --- a/python/paddle/framework/io.py +++ b/python/paddle/framework/io.py @@ -63,7 +63,7 @@ def _build_saved_state_dict(state_dict): raise ValueError( "The saved tensor is not initialized. If you used group sharded, please use save_group_sharded_model." ) - save_dict[key] = value.numpy() + save_dict[key] = np.array(value) name_table[key] = value.name else: save_dict[key] = value @@ -92,7 +92,7 @@ def _load_state_dict_from_save_inference_model(model_path, config): # 3. construct state_dict load_param_dict = {} for var_name in persistable_var_dict: - load_param_dict[var_name] = persistable_var_dict[var_name].numpy() + load_param_dict[var_name] = np.array(persistable_var_dict[var_name]) # if *.info exists, we can recover structured_name var_info_filename = str(config.params_filename) + ".info" @@ -146,7 +146,7 @@ def _load_state_dict_from_save_params(model_path): # 3. construct state_dict load_param_dict = {} for var in load_var_list: - load_param_dict[var.name] = var.numpy() + load_param_dict[var.name] = np.array(var) return load_param_dict @@ -291,7 +291,7 @@ def _pickle_save(obj, f, protocol): ) def reduce_varbase(self): - data = self.numpy() + data = np.array(self) name = self.name return (tuple, ((name, data),)) diff --git a/python/paddle/framework/io_utils.py b/python/paddle/framework/io_utils.py index 752b938e7f4cc762ec843926fcaf78178fa592a6..b067a31e339dbee89bb992437a859bef1cdc8d88 100644 --- a/python/paddle/framework/io_utils.py +++ b/python/paddle/framework/io_utils.py @@ -180,9 +180,7 @@ def _load_program_scope(main=None, startup=None, scope=None): @static_only def _legacy_static_save(param_dict, model_path, protocol=2): def get_tensor(var): - if isinstance(var, (core.VarBase, core.eager.Tensor)): - return var.numpy() - elif isinstance(var, core.LoDTensor): + if isinstance(var, (core.VarBase, core.eager.Tensor, core.LoDTensor)): return np.array(var) return var diff --git a/python/paddle/hapi/model.py b/python/paddle/hapi/model.py index a4537fd62aa7198edc0d990490ea392af2475e83..e25f88584ac37366769b0914c31f8e941e72e0c9 100644 --- a/python/paddle/hapi/model.py +++ b/python/paddle/hapi/model.py @@ -61,7 +61,7 @@ def to_numpy(var): var, (Variable, fluid.core.VarBase, fluid.core.eager.Tensor) ), "not a variable" if isinstance(var, (fluid.core.VarBase, fluid.core.eager.Tensor)): - return var.numpy() + return np.array(var) t = global_scope().find_var(var.name).get_tensor() return np.array(t) diff --git a/python/paddle/metric/metrics.py b/python/paddle/metric/metrics.py index bba9082308d97b82e94241cd36792156ad7efa12..16ef97a3f044b3a4ab86842b0e6df7ef189e6a19 100644 --- a/python/paddle/metric/metrics.py +++ b/python/paddle/metric/metrics.py @@ -292,7 +292,7 @@ class Accuracy(Metric): Tensor: the accuracy of current step. """ if isinstance(correct, (paddle.Tensor, paddle.fluid.core.eager.Tensor)): - correct = correct.numpy() + correct = np.array(correct) num_samples = np.prod(np.array(correct.shape[:-1])) accs = [] for i, k in enumerate(self.topk): @@ -420,12 +420,12 @@ class Precision(Metric): The data type is 'int32' or 'int64'. """ if isinstance(preds, (paddle.Tensor, paddle.fluid.core.eager.Tensor)): - preds = preds.numpy() + preds = np.array(preds) elif not _is_numpy_(preds): raise ValueError("The 'preds' must be a numpy ndarray or Tensor.") if isinstance(labels, (paddle.Tensor, paddle.fluid.core.eager.Tensor)): - labels = labels.numpy() + labels = np.array(labels) elif not _is_numpy_(labels): raise ValueError("The 'labels' must be a numpy ndarray or Tensor.") @@ -553,12 +553,12 @@ class Recall(Metric): Shape: [batch_size, 1], Dtype: 'int32' or 'int64'. """ if isinstance(preds, (paddle.Tensor, paddle.fluid.core.eager.Tensor)): - preds = preds.numpy() + preds = np.array(preds) elif not _is_numpy_(preds): raise ValueError("The 'preds' must be a numpy ndarray or Tensor.") if isinstance(labels, (paddle.Tensor, paddle.fluid.core.eager.Tensor)): - labels = labels.numpy() + labels = np.array(labels) elif not _is_numpy_(labels): raise ValueError("The 'labels' must be a numpy ndarray or Tensor.") @@ -705,12 +705,12 @@ class Auc(Metric): representing the label of the instance i. """ if isinstance(labels, (paddle.Tensor, paddle.fluid.core.eager.Tensor)): - labels = labels.numpy() + labels = np.array(labels) elif not _is_numpy_(labels): raise ValueError("The 'labels' must be a numpy ndarray or Tensor.") if isinstance(preds, (paddle.Tensor, paddle.fluid.core.eager.Tensor)): - preds = preds.numpy() + preds = np.array(preds) elif not _is_numpy_(preds): raise ValueError("The 'preds' must be a numpy ndarray or Tensor.") diff --git a/python/paddle/nn/decode.py b/python/paddle/nn/decode.py index d48ebd68da7ec823bc73a3a825a1ccf8939b02fe..b5e1e287ecf6faa53d28cbdecdaed838dfb2bd42 100644 --- a/python/paddle/nn/decode.py +++ b/python/paddle/nn/decode.py @@ -712,7 +712,7 @@ def _dynamic_decode_imperative( step_idx = 0 step_idx_tensor = paddle.full(shape=[1], fill_value=step_idx, dtype="int64") - while cond.numpy(): + while cond.item(): (step_outputs, next_states, next_inputs, next_finished) = decoder.step( step_idx_tensor, inputs, states, **kwargs ) diff --git a/python/paddle/nn/functional/common.py b/python/paddle/nn/functional/common.py index ea51e214826191dae61cf6d880890dc35f56502c..c23c6985079b00fbcef8dc26adb2b521209f8e2c 100644 --- a/python/paddle/nn/functional/common.py +++ b/python/paddle/nn/functional/common.py @@ -490,7 +490,7 @@ def interpolate( else: if in_dynamic_mode(): if isinstance(out_shape, Variable): - out_shape = list(out_shape.numpy()) + out_shape = list(out_shape.numpy(False)) else: out_shape = list(out_shape) diff --git a/python/paddle/nn/functional/pooling.py b/python/paddle/nn/functional/pooling.py index e0c127aacb0d333e5ec647967dca2cb2d530f3e1..99d72ddb48086fd5ac28faf992c6ac34466c7bcc 100755 --- a/python/paddle/nn/functional/pooling.py +++ b/python/paddle/nn/functional/pooling.py @@ -706,7 +706,7 @@ def _unpool_output_size(x, kernel_size, stride, padding, output_size): else: for i, var in enumerate(output_size): if isinstance(var, Variable): - output_size[i] = var.numpy().item() + output_size[i] = var.item() if len(output_size) == len(kernel_size) + 2: output_size = output_size[2:] @@ -1609,7 +1609,7 @@ def adaptive_avg_pool2d(x, output_size, data_format='NCHW', name=None): if in_dygraph_mode(): output_size = [ - item.numpy().item(0) if isinstance(item, Variable) else item + item.item(0) if isinstance(item, Variable) else item for item in output_size ] # output_size support Variable in static graph mode diff --git a/python/paddle/optimizer/optimizer.py b/python/paddle/optimizer/optimizer.py index d7bfc0b22963205fa201efa63529b97128328ab0..7b2f456473ac3182b777e5bfae213c82829900ec 100644 --- a/python/paddle/optimizer/optimizer.py +++ b/python/paddle/optimizer/optimizer.py @@ -382,9 +382,9 @@ class Optimizer: load_para = state_dict[var_tmp.name] if isinstance(load_para, Variable): - load_para_np = load_para.numpy() + load_para_np = np.array(load_para) elif isinstance(load_para, core.VarBase): - load_para_np = load_para.numpy() + load_para_np = np.array(load_para) elif isinstance(load_para, np.ndarray): load_para_np = load_para else: diff --git a/python/paddle/quantization/imperative/ptq_quantizer.py b/python/paddle/quantization/imperative/ptq_quantizer.py index 97d61ebbf0f34d5b594c770d6bf584bcfbc6570f..00891ffa9f4116c8427ad8e7b0c5a5da8073765c 100644 --- a/python/paddle/quantization/imperative/ptq_quantizer.py +++ b/python/paddle/quantization/imperative/ptq_quantizer.py @@ -54,13 +54,13 @@ def combine_abs_max_and_hist( return origin_max, origin_hist elif origin_max == 0.0: new_hist, _ = np.histogram( - paddle.abs(tensor).numpy(), range=(0, new_max), bins=bins + paddle.abs(tensor).numpy(False), range=(0, new_max), bins=bins ) new_hist = new_hist.astype(np.float32) return new_max, new_hist elif new_max <= origin_max: new_hist, _ = np.histogram( - paddle.abs(tensor).numpy(), range=(0, origin_max), bins=bins + paddle.abs(tensor).numpy(False), range=(0, origin_max), bins=bins ) new_hist = new_hist.astype(np.float32) new_hist += origin_hist @@ -84,7 +84,7 @@ def combine_abs_max_and_hist( sampled_hist = sampled_hist.astype(np.float32) new_hist, _ = np.histogram( - paddle.abs(tensor).numpy(), range=(0, new_max), bins=bins + paddle.abs(tensor).numpy(False), range=(0, new_max), bins=bins ) new_hist = new_hist.astype(np.float32) new_hist += sampled_hist @@ -189,7 +189,7 @@ class BaseHistQuantizer(BaseQuantizer, metaclass=abc.ABCMeta): self.hists.append(None) else: hist, _ = np.histogram( - paddle.abs(tensor).numpy(), + paddle.abs(tensor).numpy(False), range=(0.0, abs_max_vals[idx]), bins=self.bins, ) diff --git a/python/paddle/static/nn/metric.py b/python/paddle/static/nn/metric.py index b2d10abaf87b5513887fd3f2fa2cfc49d1c5cd3e..cec17f0df6eeff35a832d3409266cf1b7af8cb06 100644 --- a/python/paddle/static/nn/metric.py +++ b/python/paddle/static/nn/metric.py @@ -76,7 +76,7 @@ def accuracy(input, label, k=1, correct=None, total=None): if total is None: total = _varbase_creator(dtype="int32") - _k = k.numpy().item(0) if isinstance(k, Variable) else k + _k = k.item(0) if isinstance(k, Variable) else k topk_out, topk_indices = _legacy_C_ops.top_k_v2( input, 'k', _k, 'sorted', False ) diff --git a/python/paddle/tensor/array.py b/python/paddle/tensor/array.py index 84fc94b5eec85b91c4d97bc14677ee70473c60c2..f3a9930c7068eb307200f4140ae8317ec1bcd29e 100644 --- a/python/paddle/tensor/array.py +++ b/python/paddle/tensor/array.py @@ -119,7 +119,7 @@ def array_read(array, i): assert i.shape == [ 1 ], "The shape of index 'i' should be [1] in dygraph mode" - i = i.numpy().item(0) + i = i.item(0) return array[i] else: check_variable_and_dtype(i, 'i', ['int64'], 'array_read') @@ -179,7 +179,7 @@ def array_write(x, i, array=None): assert i.shape == [ 1 ], "The shape of index 'i' should be [1] in dygraph mode" - i = i.numpy().item(0) + i = i.item(0) if array is None: array = create_array(x.dtype) assert isinstance( diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index 0b9d998bad72194fcfb4b1eb64205764391143f7..33c094b2a8c94f478282266efccf18a8908cf205 100644 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -334,7 +334,7 @@ def slice(input, axes, starts, ends): for item in starts ] elif isinstance(starts, tmp_tensor_type): - tensor_t = starts.numpy() + tensor_t = starts.numpy(False) starts = [ele for ele in tensor_t] infer_flags = [-1 for i in range(len(axes))] @@ -344,7 +344,7 @@ def slice(input, axes, starts, ends): for item in ends ] elif isinstance(ends, tmp_tensor_type): - tensor_t = ends.numpy() + tensor_t = ends.numpy(False) ends = [ele for ele in tensor_t] infer_flags = [-1 for i in range(len(axes))] diff --git a/python/paddle/utils/layers_utils.py b/python/paddle/utils/layers_utils.py index b1fb8460237b4d0483c1321817bbdaf4103cbd1a..3c728211e4a735584fc689953e0a7e5fcdb41e38 100644 --- a/python/paddle/utils/layers_utils.py +++ b/python/paddle/utils/layers_utils.py @@ -456,12 +456,12 @@ def convert_shape_to_list(shape): if isinstance(shape, (list, tuple)): shape = list( map( - lambda x: x.numpy().flat[0] if isinstance(x, Variable) else x, + lambda x: x.item(0) if isinstance(x, Variable) else x, shape, ) ) else: - shape = shape.numpy().astype(int).tolist() + shape = shape.astype(int).tolist() return shape