未验证 提交 fe919400 编写于 作者: J JYChen 提交者: GitHub

throw warning at __getitem__, not slice_utils (#53579)

上级 f3f3d57a
...@@ -925,15 +925,29 @@ static PyObject* tensor__getitem_index_not_tensor(TensorObject* self, ...@@ -925,15 +925,29 @@ static PyObject* tensor__getitem_index_not_tensor(TensorObject* self,
} }
bool set_to_1d = FLAGS_set_to_1d; bool set_to_1d = FLAGS_set_to_1d;
if (!none_axes.empty()) {
if (set_to_1d) { if (set_to_1d) {
// NOTE(zoooo0820): When all axes are decreased, the output will be 1-D // NOTE(zoooo0820): When all axes are decreased, the output will be 1-D
// with FLAGS_set_to_1d=True. In this case, one `None` should be pop out, // with FLAGS_set_to_1d=True. In this case, one `None` should be pop out,
// otherwise the output shape will be not correct. // otherwise the output shape will be not correct.
if (static_cast<int>(decrease_axis.size()) == tensor->dims().size()) { if (static_cast<int>(decrease_axis.size()) == tensor->dims().size()) {
VLOG(0)
<< "Warning: In Tensor '__getitem__', if the number of scalar "
"elements "
"in the index is equal to the rank of the Tensor, the output "
"should "
"be 0-D. In order to be consistent with the behavior of previous "
"versions, it will be processed to 1-D. But it is not correct and "
"will be "
"removed in release 2.6. "
"If 1-D is still wanted, please modify the index element from "
"scalar to slice "
"(e.g. 'x[i]' => 'x[i:i+1]'). ";
if (!none_axes.empty()) {
none_axes.pop_back(); none_axes.pop_back();
} }
} }
}
if (!none_axes.empty()) { if (!none_axes.empty()) {
paddle::Tensor new_out; paddle::Tensor new_out;
{ {
...@@ -955,7 +969,6 @@ static PyObject* tensor__getitem_index_not_tensor(TensorObject* self, ...@@ -955,7 +969,6 @@ static PyObject* tensor__getitem_index_not_tensor(TensorObject* self,
} }
return ToPyObject(new_out); return ToPyObject(new_out);
} }
}
// the index is a list // the index is a list
if (list_select_flag) { if (list_select_flag) {
......
...@@ -1067,7 +1067,7 @@ void BindImperative(py::module *m_ptr) { ...@@ -1067,7 +1067,7 @@ void BindImperative(py::module *m_ptr) {
} }
bool set_to_1d = FLAGS_set_to_1d; bool set_to_1d = FLAGS_set_to_1d;
if (!none_axes.empty()) {
if (set_to_1d) { if (set_to_1d) {
// NOTE(zoooo0820): When all axes are decreased, the output // NOTE(zoooo0820): When all axes are decreased, the output
// will be 1-D with FLAGS_set_to_1d=True. In this case, one // will be 1-D with FLAGS_set_to_1d=True. In this case, one
...@@ -1075,9 +1075,27 @@ void BindImperative(py::module *m_ptr) { ...@@ -1075,9 +1075,27 @@ void BindImperative(py::module *m_ptr) {
// not correct. // not correct.
if (static_cast<int>(decrease_axis.size()) == if (static_cast<int>(decrease_axis.size()) ==
tensor->dims().size()) { tensor->dims().size()) {
VLOG(0) << "Warning: In Tensor '__getitem__', if the number "
"of scalar "
"elements "
"in the index is equal to the rank of the Tensor, "
"the output "
"should "
"be 0-D. In order to be consistent with the "
"behavior of previous "
"versions, it will be processed to 1-D. But it is "
"not correct and "
"will be "
"removed in release 2.6. "
"If 1-D is still wanted, please modify the index "
"element from "
"scalar to slice "
"(e.g. 'x[i]' => 'x[i:i+1]'). ";
if (!none_axes.empty()) {
none_axes.pop_back(); none_axes.pop_back();
} }
} }
}
if (!none_axes.empty()) { if (!none_axes.empty()) {
// Deal with cases that decrease_axes is not empty // Deal with cases that decrease_axes is not empty
// For example: // For example:
...@@ -1105,7 +1123,6 @@ void BindImperative(py::module *m_ptr) { ...@@ -1105,7 +1123,6 @@ void BindImperative(py::module *m_ptr) {
return new_out; return new_out;
} }
}
// the index is a list // the index is a list
if (list_select_flag) { if (list_select_flag) {
......
...@@ -208,18 +208,6 @@ inline DDim GetDecreasedDims(const DDim slice_dims, ...@@ -208,18 +208,6 @@ inline DDim GetDecreasedDims(const DDim slice_dims,
if (FLAGS_set_to_1d && new_shape.size() == 0) { if (FLAGS_set_to_1d && new_shape.size() == 0) {
// NOTE(zoooo0820): Hack procssing to 1-D, when axes decrease to 0-D in // NOTE(zoooo0820): Hack procssing to 1-D, when axes decrease to 0-D in
// slice. This will remove in release 2.6. // slice. This will remove in release 2.6.
VLOG(0)
<< "Warning:: In Tensor '__getitem__', if the number of scalar "
"elements "
"in the index is equal to the rank of the Tensor, the output "
"should "
"be 0-D. In order to be consistent with the behavior of previous "
"versions, it will be processed to 1-D. But it is not correct and "
"will be "
"removed in release 2.6. "
"If 1-D is still wanted, please modify the index element from "
"scalar to slice "
"(e.g. 'x[i]' => 'x[i:i+1]'). ";
new_shape.push_back(1); new_shape.push_back(1);
} }
decreased_dims = phi::make_ddim(new_shape); decreased_dims = phi::make_ddim(new_shape);
......
...@@ -17,6 +17,7 @@ import numpy as np ...@@ -17,6 +17,7 @@ import numpy as np
from . import unique_name from . import unique_name
from . import core from . import core
import paddle import paddle
import warnings
MAX_INTEGER = 2**31 - 1 MAX_INTEGER = 2**31 - 1
...@@ -579,6 +580,9 @@ def _getitem_impl_(var, item): ...@@ -579,6 +580,9 @@ def _getitem_impl_(var, item):
# otherwise the output shape will be not correct. # otherwise the output shape will be not correct.
set_to_1d = paddle.get_flags('FLAGS_set_to_1d')['FLAGS_set_to_1d'] set_to_1d = paddle.get_flags('FLAGS_set_to_1d')['FLAGS_set_to_1d']
if set_to_1d and len(decrease_axes) == len(var.shape): if set_to_1d and len(decrease_axes) == len(var.shape):
warnings.warn(
"Warning: In Tensor '__getitem__', if the number of scalar elements in the index is equal to the rank of the Tensor, the output should be 0-D. In order to be consistent with the behavior of previous versions, it will be processed to 1-D. But it is not correct and will be removed in release 2.6. If 1-D is still wanted, please modify the index element from scalar to slice (e.g. 'x[i]' => 'x[i:i+1]')."
)
none_axes = none_axes[1:] none_axes = none_axes[1:]
if len(none_axes) > 0: if len(none_axes) > 0:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册