未验证 提交 d95eaa17 编写于 作者: zhouweiwei2014's avatar zhouweiwei2014 提交者: GitHub

fix Tensor.item to np.array(Tensor).item (#52483)

上级 ea8aa432
......@@ -143,7 +143,7 @@ static PyObject* tensor_method_numpy(TensorObject* self,
"correct and will be "
"removed in future. For Tensor contain only one element, Please "
"modify "
" 'Tensor.numpy()[0]' to 'Tensor.item()' as soon as "
" 'Tensor.numpy()[0]' to 'float(Tensor)' as soon as "
"possible, "
"otherwise 'Tensor.numpy()[0]' will raise error in future.";
py_rank = 1;
......
......@@ -139,21 +139,21 @@ def monkey_patch_math_varbase():
), "only one element variable can be converted to float."
tensor = var.value().get_tensor()
assert tensor._is_initialized(), "variable's tensor is not initialized"
return float(var.item())
return float(np.array(var).flatten()[0])
def _long_(var):
numel = np.prod(var.shape)
assert numel == 1, "only one element variable can be converted to long."
tensor = var.value().get_tensor()
assert tensor._is_initialized(), "variable's tensor is not initialized"
return int(var.item())
return int(np.array(var).flatten()[0])
def _int_(var):
numel = np.prod(var.shape)
assert numel == 1, "only one element variable can be converted to int."
tensor = var.value().get_tensor()
assert tensor._is_initialized(), "variable's tensor is not initialized"
return int(var.item())
return int(np.array(var).flatten()[0])
def _len_(var):
assert var.ndim > 0, "len() of a 0D tensor is wrong"
......@@ -171,7 +171,7 @@ def monkey_patch_math_varbase():
), "only one element variable can be converted to python index."
tensor = var.value().get_tensor()
assert tensor._is_initialized(), "variable's tensor is not initialized"
return int(var.item())
return int(np.array(var).flatten()[0])
@property
def _ndim_(var):
......
......@@ -718,11 +718,11 @@ def monkey_patch_varbase():
), "When Variable is used as the condition of if/while , Variable can only contain one element."
if framework.global_var._in_eager_mode_:
assert self._is_initialized(), "tensor not initialized"
return bool(self.item() > 0)
return bool(np.array(self) > 0)
else:
tensor = self.value().get_tensor()
assert tensor._is_initialized(), "tensor not initialized"
return bool(self.item() > 0)
return bool(np.array(tensor) > 0)
def __bool__(self):
return self.__nonzero__()
......
......@@ -712,7 +712,7 @@ def _dynamic_decode_imperative(
step_idx = 0
step_idx_tensor = paddle.full(shape=[1], fill_value=step_idx, dtype="int64")
while cond.item():
while np.array(cond).item():
(step_outputs, next_states, next_inputs, next_finished) = decoder.step(
step_idx_tensor, inputs, states, **kwargs
)
......
......@@ -12,6 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from paddle import _C_ops, _legacy_C_ops, in_dynamic_mode
from paddle.fluid.framework import Variable, in_dygraph_mode
......@@ -706,7 +708,7 @@ def _unpool_output_size(x, kernel_size, stride, padding, output_size):
else:
for i, var in enumerate(output_size):
if isinstance(var, Variable):
output_size[i] = var.item()
output_size[i] = np.array(var).item()
if len(output_size) == len(kernel_size) + 2:
output_size = output_size[2:]
......@@ -1609,7 +1611,7 @@ def adaptive_avg_pool2d(x, output_size, data_format='NCHW', name=None):
if in_dygraph_mode():
output_size = [
item.item(0) if isinstance(item, Variable) else item
np.array(item).item(0) if isinstance(item, Variable) else item
for item in output_size
]
# output_size support Variable in static graph mode
......
......@@ -14,6 +14,8 @@
"""
All layers just related to metric.
"""
import numpy as np
import paddle
from paddle import _legacy_C_ops
from paddle.fluid.data_feeder import check_variable_and_dtype
......@@ -76,7 +78,7 @@ def accuracy(input, label, k=1, correct=None, total=None):
if total is None:
total = _varbase_creator(dtype="int32")
_k = k.item(0) if isinstance(k, Variable) else k
_k = np.array(k).item(0) if isinstance(k, Variable) else k
topk_out, topk_indices = _legacy_C_ops.top_k_v2(
input, 'k', _k, 'sorted', False
)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册