未验证 提交 d95eaa17 编写于 作者: zhouweiwei2014's avatar zhouweiwei2014 提交者: GitHub

fix Tensor.item to np.array(Tensor).item (#52483)

上级 ea8aa432
...@@ -143,7 +143,7 @@ static PyObject* tensor_method_numpy(TensorObject* self, ...@@ -143,7 +143,7 @@ static PyObject* tensor_method_numpy(TensorObject* self,
"correct and will be " "correct and will be "
"removed in future. For Tensor contain only one element, Please " "removed in future. For Tensor contain only one element, Please "
"modify " "modify "
" 'Tensor.numpy()[0]' to 'Tensor.item()' as soon as " " 'Tensor.numpy()[0]' to 'float(Tensor)' as soon as "
"possible, " "possible, "
"otherwise 'Tensor.numpy()[0]' will raise error in future."; "otherwise 'Tensor.numpy()[0]' will raise error in future.";
py_rank = 1; py_rank = 1;
......
...@@ -139,21 +139,21 @@ def monkey_patch_math_varbase(): ...@@ -139,21 +139,21 @@ def monkey_patch_math_varbase():
), "only one element variable can be converted to float." ), "only one element variable can be converted to float."
tensor = var.value().get_tensor() tensor = var.value().get_tensor()
assert tensor._is_initialized(), "variable's tensor is not initialized" assert tensor._is_initialized(), "variable's tensor is not initialized"
return float(var.item()) return float(np.array(var).flatten()[0])
def _long_(var): def _long_(var):
numel = np.prod(var.shape) numel = np.prod(var.shape)
assert numel == 1, "only one element variable can be converted to long." assert numel == 1, "only one element variable can be converted to long."
tensor = var.value().get_tensor() tensor = var.value().get_tensor()
assert tensor._is_initialized(), "variable's tensor is not initialized" assert tensor._is_initialized(), "variable's tensor is not initialized"
return int(var.item()) return int(np.array(var).flatten()[0])
def _int_(var): def _int_(var):
numel = np.prod(var.shape) numel = np.prod(var.shape)
assert numel == 1, "only one element variable can be converted to int." assert numel == 1, "only one element variable can be converted to int."
tensor = var.value().get_tensor() tensor = var.value().get_tensor()
assert tensor._is_initialized(), "variable's tensor is not initialized" assert tensor._is_initialized(), "variable's tensor is not initialized"
return int(var.item()) return int(np.array(var).flatten()[0])
def _len_(var): def _len_(var):
assert var.ndim > 0, "len() of a 0D tensor is wrong" assert var.ndim > 0, "len() of a 0D tensor is wrong"
...@@ -171,7 +171,7 @@ def monkey_patch_math_varbase(): ...@@ -171,7 +171,7 @@ def monkey_patch_math_varbase():
), "only one element variable can be converted to python index." ), "only one element variable can be converted to python index."
tensor = var.value().get_tensor() tensor = var.value().get_tensor()
assert tensor._is_initialized(), "variable's tensor is not initialized" assert tensor._is_initialized(), "variable's tensor is not initialized"
return int(var.item()) return int(np.array(var).flatten()[0])
@property @property
def _ndim_(var): def _ndim_(var):
......
...@@ -718,11 +718,11 @@ def monkey_patch_varbase(): ...@@ -718,11 +718,11 @@ def monkey_patch_varbase():
), "When Variable is used as the condition of if/while , Variable can only contain one element." ), "When Variable is used as the condition of if/while , Variable can only contain one element."
if framework.global_var._in_eager_mode_: if framework.global_var._in_eager_mode_:
assert self._is_initialized(), "tensor not initialized" assert self._is_initialized(), "tensor not initialized"
return bool(self.item() > 0) return bool(np.array(self) > 0)
else: else:
tensor = self.value().get_tensor() tensor = self.value().get_tensor()
assert tensor._is_initialized(), "tensor not initialized" assert tensor._is_initialized(), "tensor not initialized"
return bool(self.item() > 0) return bool(np.array(tensor) > 0)
def __bool__(self): def __bool__(self):
return self.__nonzero__() return self.__nonzero__()
......
...@@ -712,7 +712,7 @@ def _dynamic_decode_imperative( ...@@ -712,7 +712,7 @@ def _dynamic_decode_imperative(
step_idx = 0 step_idx = 0
step_idx_tensor = paddle.full(shape=[1], fill_value=step_idx, dtype="int64") step_idx_tensor = paddle.full(shape=[1], fill_value=step_idx, dtype="int64")
while cond.item(): while np.array(cond).item():
(step_outputs, next_states, next_inputs, next_finished) = decoder.step( (step_outputs, next_states, next_inputs, next_finished) = decoder.step(
step_idx_tensor, inputs, states, **kwargs step_idx_tensor, inputs, states, **kwargs
) )
......
...@@ -12,6 +12,8 @@ ...@@ -12,6 +12,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import numpy as np
from paddle import _C_ops, _legacy_C_ops, in_dynamic_mode from paddle import _C_ops, _legacy_C_ops, in_dynamic_mode
from paddle.fluid.framework import Variable, in_dygraph_mode from paddle.fluid.framework import Variable, in_dygraph_mode
...@@ -706,7 +708,7 @@ def _unpool_output_size(x, kernel_size, stride, padding, output_size): ...@@ -706,7 +708,7 @@ def _unpool_output_size(x, kernel_size, stride, padding, output_size):
else: else:
for i, var in enumerate(output_size): for i, var in enumerate(output_size):
if isinstance(var, Variable): if isinstance(var, Variable):
output_size[i] = var.item() output_size[i] = np.array(var).item()
if len(output_size) == len(kernel_size) + 2: if len(output_size) == len(kernel_size) + 2:
output_size = output_size[2:] output_size = output_size[2:]
...@@ -1609,7 +1611,7 @@ def adaptive_avg_pool2d(x, output_size, data_format='NCHW', name=None): ...@@ -1609,7 +1611,7 @@ def adaptive_avg_pool2d(x, output_size, data_format='NCHW', name=None):
if in_dygraph_mode(): if in_dygraph_mode():
output_size = [ output_size = [
item.item(0) if isinstance(item, Variable) else item np.array(item).item(0) if isinstance(item, Variable) else item
for item in output_size for item in output_size
] ]
# output_size support Variable in static graph mode # output_size support Variable in static graph mode
......
...@@ -14,6 +14,8 @@ ...@@ -14,6 +14,8 @@
""" """
All layers just related to metric. All layers just related to metric.
""" """
import numpy as np
import paddle import paddle
from paddle import _legacy_C_ops from paddle import _legacy_C_ops
from paddle.fluid.data_feeder import check_variable_and_dtype from paddle.fluid.data_feeder import check_variable_and_dtype
...@@ -76,7 +78,7 @@ def accuracy(input, label, k=1, correct=None, total=None): ...@@ -76,7 +78,7 @@ def accuracy(input, label, k=1, correct=None, total=None):
if total is None: if total is None:
total = _varbase_creator(dtype="int32") total = _varbase_creator(dtype="int32")
_k = k.item(0) if isinstance(k, Variable) else k _k = np.array(k).item(0) if isinstance(k, Variable) else k
topk_out, topk_indices = _legacy_C_ops.top_k_v2( topk_out, topk_indices = _legacy_C_ops.top_k_v2(
input, 'k', _k, 'sorted', False input, 'k', _k, 'sorted', False
) )
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册