未验证 提交 c1f5df52 编写于 作者: W wanghuancoder 提交者: GitHub

optimized transformation form tensor to numpy (#26447)

* optimized transformation form tensor to numpy, test=develop

* optimized transformation form tensor to numpy, pass pre-commit, test=develop

* modify fetchophandle zerocopy to deepcopy in PE&CUP, test=develop

* modify py:array construct, test=develop

* fix _fetch_var to use deep copy, test=develop
上级 c80fcf90
......@@ -117,7 +117,7 @@ static void TransData(const framework::LoDTensor &src_item,
TensorCopy(src_item, platform::CPUPlace(), dst_item);
#endif
} else {
dst_item->ShareDataWith(src_item);
TensorCopy(src_item, platform::CPUPlace(), dst_item);
}
} else {
dst_item->clear();
......
......@@ -19,6 +19,7 @@ limitations under the License. */
#include <memory>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/framework/lod_tensor.h"
......@@ -564,9 +565,9 @@ inline py::array TensorToPyArray(const framework::Tensor &tensor,
if (!is_gpu_tensor && !is_xpu_tensor) {
if (!need_deep_copy) {
return py::array(py::buffer_info(
const_cast<void *>(tensor_buf_ptr), sizeof_dtype, py_dtype_str,
static_cast<size_t>(tensor.dims().size()), py_dims, py_strides));
auto base = py::cast(std::move(tensor));
return py::array(py::dtype(py_dtype_str.c_str()), py_dims, py_strides,
const_cast<void *>(tensor_buf_ptr), base);
} else {
py::array py_arr(py::dtype(py_dtype_str.c_str()), py_dims, py_strides);
PADDLE_ENFORCE_EQ(
......
......@@ -110,7 +110,7 @@ def scope_guard(scope):
_switch_scope(ex)
def as_numpy(tensor):
def as_numpy(tensor, copy=False):
"""
Convert a Tensor to a numpy.ndarray, its only support Tensor without LoD information.
For higher dimensional sequence data, please use LoDTensor directly.
......@@ -129,6 +129,7 @@ def as_numpy(tensor):
Args:
tensor(Variable): a instance of Tensor
copy(bool, optional): Whether to use deep copy.
Returns:
numpy.ndarray
......@@ -145,7 +146,10 @@ def as_numpy(tensor):
Please set the parameter 'return_numpy' as 'False' to \
return LoDTensor itself directly.")
if tensor._is_initialized():
return np.array(tensor)
if copy:
return np.array(tensor)
else:
return np.asarray(tensor)
else:
return None
......@@ -350,7 +354,7 @@ def _fetch_var(name, scope=None, return_numpy=True):
" program.")
tensor = var.get_tensor()
if return_numpy:
tensor = as_numpy(tensor)
tensor = as_numpy(tensor, copy=True)
return tensor
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册