未验证 提交 b07d239c 编写于 作者: W wanghuancoder 提交者: GitHub

[Eager] Slice (#40587)

* fix some slice bug, test=develop

* eager slice, test=develop

* eager slice, test=develop

* refine, test=develop

* refine, test=develop

* fix bug, test=develop

* refine, test=develop

* rename function name, test=develop
上级 2f50ae99
...@@ -176,6 +176,20 @@ void Tracer::TraceOp(const std::string& type, const NameVarMap<VarType>& ins, ...@@ -176,6 +176,20 @@ void Tracer::TraceOp(const std::string& type, const NameVarMap<VarType>& ins,
const std::map<std::string, std::string>& inplace_map, const std::map<std::string, std::string>& inplace_map,
paddle::framework::AttributeMap* passed_default_attrs_, paddle::framework::AttributeMap* passed_default_attrs_,
bool use_default_attr_map) { bool use_default_attr_map) {
TraceOpImpl<VarType>(type, ins, outs, attrs, place, trace_backward,
inplace_map, passed_default_attrs_,
use_default_attr_map);
}
template <typename VarType>
void Tracer::TraceOpImpl(const std::string& type,
const NameVarMap<VarType>& ins,
const NameVarMap<VarType>& outs,
framework::AttributeMap& attrs,
const platform::Place& place, bool trace_backward,
const std::map<std::string, std::string>& inplace_map,
paddle::framework::AttributeMap* passed_default_attrs_,
bool use_default_attr_map) {
platform::RecordEvent op_type_record_event( platform::RecordEvent op_type_record_event(
type + " trace_op", platform::TracerEventType::Operator, 1); type + " trace_op", platform::TracerEventType::Operator, 1);
platform::ScopedFlushDenormal flush; platform::ScopedFlushDenormal flush;
...@@ -340,25 +354,33 @@ void Tracer::TraceOp(const std::string& type, const NameVarBaseMap& ins, ...@@ -340,25 +354,33 @@ void Tracer::TraceOp(const std::string& type, const NameVarBaseMap& ins,
void Tracer::TraceOp(const std::string& type, const NameTensorMap& ins, void Tracer::TraceOp(const std::string& type, const NameTensorMap& ins,
const NameTensorMap& outs, const NameTensorMap& outs,
paddle::framework::AttributeMap attrs, paddle::framework::AttributeMap& attrs,
const paddle::platform::Place& place, const paddle::platform::Place& place,
paddle::framework::AttributeMap* default_attrs, paddle::framework::AttributeMap* default_attrs,
bool use_default_attr_map, bool use_default_attr_map,
const std::map<std::string, std::string>& inplace_map) { const std::map<std::string, std::string>& inplace_map) {
VLOG(6) << "Running On Eager TraceOp with use_default_attr_map: " VLOG(6) << "Running On Eager TraceOp with use_default_attr_map: "
<< use_default_attr_map; << use_default_attr_map;
TraceOp<egr::EagerVariable>(type, ins, outs, std::move(attrs), place, false, TraceOpImpl<egr::EagerVariable>(type, ins, outs, attrs, place, false,
inplace_map, default_attrs, use_default_attr_map); inplace_map, default_attrs,
use_default_attr_map);
}
void Tracer::TraceOp(const std::string& type, const NameTensorMap& ins,
const NameTensorMap& outs,
paddle::framework::AttributeMap attrs) {
VLOG(6) << "Running On Eager TraceOp(4 agrs): ";
TraceOpImpl<egr::EagerVariable>(type, ins, outs, attrs, expected_place_,
false, {}, nullptr, true);
} }
void Tracer::TraceOp(const std::string& type, const NameTensorMap& ins, void Tracer::TraceOp(const std::string& type, const NameTensorMap& ins,
const NameTensorMap& outs, const NameTensorMap& outs,
paddle::framework::AttributeMap attrs, paddle::framework::AttributeMap& attrs,
const std::map<std::string, std::string>& inplace_map) { const std::map<std::string, std::string>& inplace_map) {
VLOG(6) << "Running On Eager TraceOp(less): "; VLOG(6) << "Running On Eager TraceOp(less): ";
TraceOp<egr::EagerVariable>(type, ins, outs, std::move(attrs), TraceOpImpl<egr::EagerVariable>(type, ins, outs, attrs, expected_place_,
expected_place_, false, inplace_map, nullptr, false, inplace_map, nullptr, true);
true);
} }
void Tracer::SetExpectedPlace(platform::Place place) { void Tracer::SetExpectedPlace(platform::Place place) {
......
...@@ -74,16 +74,32 @@ class Tracer { ...@@ -74,16 +74,32 @@ class Tracer {
paddle::framework::AttributeMap* passed_default_attrs_ = nullptr, paddle::framework::AttributeMap* passed_default_attrs_ = nullptr,
bool use_default_attr_map = true); bool use_default_attr_map = true);
template <typename VarType>
void TraceOpImpl(
const std::string& type, const NameVarMap<VarType>& ins,
const NameVarMap<VarType>& outs,
framework::AttributeMap& attrs, // NOLINT
const platform::Place& place, bool trace_backward,
const std::map<std::string, std::string>& inplace_map = {},
paddle::framework::AttributeMap* passed_default_attrs_ = nullptr,
bool use_default_attr_map = true);
void TraceOp(const std::string& type, const NameVarBaseMap& ins, void TraceOp(const std::string& type, const NameVarBaseMap& ins,
const NameVarBaseMap& outs, framework::AttributeMap attrs, const NameVarBaseMap& outs, framework::AttributeMap attrs,
const std::map<std::string, std::string>& inplace_map = {}); const std::map<std::string, std::string>& inplace_map = {});
void TraceOp(const std::string& type, const NameTensorMap& ins, void TraceOp(const std::string& type, const NameTensorMap& ins,
const NameTensorMap& outs, paddle::framework::AttributeMap attrs, const NameTensorMap& outs,
paddle::framework::AttributeMap& attrs, // NOLINT
const std::map<std::string, std::string>& inplace_map = {}); const std::map<std::string, std::string>& inplace_map = {});
void TraceOp(const std::string& type, const NameTensorMap& ins, void TraceOp(const std::string& type, const NameTensorMap& ins,
const NameTensorMap& outs, paddle::framework::AttributeMap attrs, const NameTensorMap& outs,
paddle::framework::AttributeMap attrs);
void TraceOp(const std::string& type, const NameTensorMap& ins,
const NameTensorMap& outs,
paddle::framework::AttributeMap& attrs, // NOLINT
const paddle::platform::Place& place, const paddle::platform::Place& place,
paddle::framework::AttributeMap* default_attrs, paddle::framework::AttributeMap* default_attrs,
bool use_default_attr_map, bool use_default_attr_map,
......
...@@ -25,6 +25,7 @@ limitations under the License. */ ...@@ -25,6 +25,7 @@ limitations under the License. */
#include "paddle/fluid/eager/hooks.h" #include "paddle/fluid/eager/hooks.h"
#include "paddle/fluid/eager/utils.h" #include "paddle/fluid/eager/utils.h"
#include "paddle/fluid/framework/convert_utils.h" #include "paddle/fluid/framework/convert_utils.h"
#include "paddle/fluid/framework/python_headers.h"
#include "paddle/fluid/memory/allocation/allocator.h" #include "paddle/fluid/memory/allocation/allocator.h"
#include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/enforce.h"
...@@ -32,12 +33,14 @@ limitations under the License. */ ...@@ -32,12 +33,14 @@ limitations under the License. */
#include "paddle/fluid/pybind/eager_utils.h" #include "paddle/fluid/pybind/eager_utils.h"
#include "paddle/fluid/pybind/exception.h" #include "paddle/fluid/pybind/exception.h"
#include "paddle/fluid/pybind/slice_utils.h" #include "paddle/fluid/pybind/slice_utils.h"
#include "paddle/fluid/pybind/tensor_py.h"
#include "paddle/phi/api/include/api.h" #include "paddle/phi/api/include/api.h"
#include "paddle/phi/common/data_type.h" #include "paddle/phi/common/data_type.h"
#include "paddle/phi/core/compat/convert_utils.h" #include "paddle/phi/core/compat/convert_utils.h"
#include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/sparse_coo_tensor.h" #include "paddle/phi/core/sparse_coo_tensor.h"
#include "paddle/phi/core/sparse_csr_tensor.h" #include "paddle/phi/core/sparse_csr_tensor.h"
#include "pybind11/detail/internals.h"
namespace paddle { namespace paddle {
namespace pybind { namespace pybind {
...@@ -150,12 +153,22 @@ bool PyCheckTensor(PyObject* obj) { ...@@ -150,12 +153,22 @@ bool PyCheckTensor(PyObject* obj) {
static PyObject* tensor_method_numpy(TensorObject* self, PyObject* args, static PyObject* tensor_method_numpy(TensorObject* self, PyObject* args,
PyObject* kwargs) { PyObject* kwargs) {
EAGER_TRY EAGER_TRY
PADDLE_ENFORCE_EQ( auto& api = pybind11::detail::npy_api::get();
self->tensor.initialized(), true, if (!self->tensor.impl()) {
platform::errors::InvalidArgument( Py_intptr_t py_dims[paddle::framework::DDim::kMaxRank];
"Tensor data of %s is Empty that indicates we have null tensor for " Py_intptr_t py_strides[paddle::framework::DDim::kMaxRank];
"now, please check if it has no data and initialize it first.", py_dims[0] = 0;
self->tensor.name())); py_strides[0] = 0;
PyObject* array = api.PyArray_NewFromDescr_(
api.PyArray_Type_,
api.PyArray_DescrFromType_(pybind11::detail::npy_api::NPY_FLOAT_), 1,
py_dims, py_strides, nullptr,
pybind11::detail::npy_api::NPY_ARRAY_ALIGNED_ |
pybind11::detail::npy_api::NPY_ARRAY_WRITEABLE_,
nullptr);
return array;
}
auto tensor_dims = self->tensor.shape(); auto tensor_dims = self->tensor.shape();
auto numpy_dtype = TensorDtype2NumpyDtype(self->tensor.type()); auto numpy_dtype = TensorDtype2NumpyDtype(self->tensor.type());
auto sizeof_dtype = paddle::framework::DataTypeSize(self->tensor.type()); auto sizeof_dtype = paddle::framework::DataTypeSize(self->tensor.type());
...@@ -167,7 +180,7 @@ static PyObject* tensor_method_numpy(TensorObject* self, PyObject* args, ...@@ -167,7 +180,7 @@ static PyObject* tensor_method_numpy(TensorObject* self, PyObject* args,
py_strides[i] = sizeof_dtype * numel; py_strides[i] = sizeof_dtype * numel;
numel *= py_dims[i]; numel *= py_dims[i];
} }
auto& api = pybind11::detail::npy_api::get();
PyObject* array = api.PyArray_NewFromDescr_( PyObject* array = api.PyArray_NewFromDescr_(
api.PyArray_Type_, api.PyArray_DescrFromType_(numpy_dtype), api.PyArray_Type_, api.PyArray_DescrFromType_(numpy_dtype),
tensor_dims.size(), py_dims, py_strides, nullptr, tensor_dims.size(), py_dims, py_strides, nullptr,
...@@ -175,6 +188,10 @@ static PyObject* tensor_method_numpy(TensorObject* self, PyObject* args, ...@@ -175,6 +188,10 @@ static PyObject* tensor_method_numpy(TensorObject* self, PyObject* args,
pybind11::detail::npy_api::NPY_ARRAY_WRITEABLE_, pybind11::detail::npy_api::NPY_ARRAY_WRITEABLE_,
nullptr); nullptr);
if (!self->tensor.impl()->initialized()) {
return array;
}
if (self->tensor.is_cpu() || self->tensor.is_gpu_pinned()) { if (self->tensor.is_cpu() || self->tensor.is_gpu_pinned()) {
auto dense_tensor = auto dense_tensor =
std::dynamic_pointer_cast<phi::DenseTensor>(self->tensor.impl()); std::dynamic_pointer_cast<phi::DenseTensor>(self->tensor.impl());
...@@ -213,6 +230,20 @@ static PyObject* tensor_method__is_initialized(TensorObject* self, ...@@ -213,6 +230,20 @@ static PyObject* tensor_method__is_initialized(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
static PyObject* tensor_method__is_dense_tensor_hold_allocation(
TensorObject* self, PyObject* args, PyObject* kwargs) {
EAGER_TRY
auto dense_tensor =
std::dynamic_pointer_cast<phi::DenseTensor>(self->tensor.impl());
if (dense_tensor) {
return ToPyObject(dense_tensor->IsInitialized());
} else {
return ToPyObject(false);
}
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static PyObject* tensor_method__copy_to(TensorObject* self, PyObject* args, static PyObject* tensor_method__copy_to(TensorObject* self, PyObject* args,
PyObject* kwargs) { PyObject* kwargs) {
EAGER_TRY EAGER_TRY
...@@ -552,10 +583,13 @@ static PyObject* tensor__getitem_index_not_tensor(TensorObject* self, ...@@ -552,10 +583,13 @@ static PyObject* tensor__getitem_index_not_tensor(TensorObject* self,
} }
if (op_type == "slice") { if (op_type == "slice") {
out = slice_dygraph_function(self->tensor, paddle::experimental::Tensor(), out = slice_dygraph_function(self->tensor, paddle::experimental::Tensor(),
paddle::experimental::Tensor(), paddle::experimental::Tensor(), {}, {},
std::move(attrs)); std::move(attrs));
} else if (op_type == "strided_slice") { } else if (op_type == "strided_slice") {
out = strided_slice_dygraph_function(self->tensor, attrs); out = strided_slice_dygraph_function(
self->tensor, paddle::experimental::Tensor(),
paddle::experimental::Tensor(), paddle::experimental::Tensor(), {},
{}, {}, attrs);
} else { } else {
PADDLE_THROW(platform::errors::InvalidArgument( PADDLE_THROW(platform::errors::InvalidArgument(
"Slice is only support slice and strided_slice, but we got %s which " "Slice is only support slice and strided_slice, but we got %s which "
...@@ -604,6 +638,7 @@ static PyObject* tensor__getitem_index_not_tensor(TensorObject* self, ...@@ -604,6 +638,7 @@ static PyObject* tensor__getitem_index_not_tensor(TensorObject* self,
auto select_index = paddle::experimental::Tensor( auto select_index = paddle::experimental::Tensor(
egr::Controller::Instance().GenerateUniqueName()); egr::Controller::Instance().GenerateUniqueName());
auto idx_tensor = std::make_shared<phi::DenseTensor>(); auto idx_tensor = std::make_shared<phi::DenseTensor>();
select_index.set_impl(idx_tensor);
auto* dev_ctx = platform::DeviceContextPool::Instance().Get( auto* dev_ctx = platform::DeviceContextPool::Instance().Get(
egr::Controller::Instance().GetExpectedPlace()); egr::Controller::Instance().GetExpectedPlace());
paddle::framework::TensorFromVector(list_select_idxs, *dev_ctx, paddle::framework::TensorFromVector(list_select_idxs, *dev_ctx,
...@@ -617,6 +652,216 @@ static PyObject* tensor__getitem_index_not_tensor(TensorObject* self, ...@@ -617,6 +652,216 @@ static PyObject* tensor__getitem_index_not_tensor(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
static PyObject* tensor_method__setitem_eager_tensor(TensorObject* self,
PyObject* args,
PyObject* kwargs) {
EAGER_TRY
VLOG(4) << "Call __setitem_eager_tensor";
auto self_tensor = static_cast<phi::DenseTensor*>(self->tensor.impl().get());
PyObject* _index = PyTuple_GET_ITEM(args, 0);
PyObject* value_obj = PyTuple_GET_ITEM(args, 1);
// NOTE(zhiqiu): PyTuple_Pack increases refcount while PyTuple_New
// https://github.com/python/cpython/blob/24b63c695ae0a95b06379eaadace66735abac1e2/Objects/tupleobject.c#L251
PyObject* index_ptr =
!PyTuple_Check(_index) ? PyTuple_Pack(1, _index) : _index;
DEFINE_PADDLE_SCOPE_GUARD([index_ptr, &_index]() {
if (!PyTuple_Check(_index)) {
Py_DECREF(index_ptr);
VLOG(4) << "Call Py_DECREF";
}
});
// TODO(pangyoki) add inplace(BumpInplaceVersion) if need
// 1. Check argumnets
bool parse_index = true;
// Check whether _index can be parsed.
const int size = PyTuple_GET_SIZE(index_ptr);
for (int dim = 0; dim < size; ++dim) {
PyObject* slice_item = PyTuple_GetItem(index_ptr, dim);
if (!(PyCheckInteger(slice_item) || PySlice_Check(slice_item) ||
slice_item == Py_Ellipsis || slice_item == Py_None)) {
parse_index = false;
break;
}
}
// 2. Call op set_value to speed up if the condition is met,
// otherwise call TensorToPyArray.
// TODO(liym27): Try not to call TensorToPyArray because it always
// copys data to cpu place, which reduces performance.
if (parse_index) {
std::vector<int> axes, starts, ends, steps, decrease_axes, none_axes,
infer_flags, list_select_idxs;
// if index is a list, list_select_flag will be true
bool list_select_flag = false;
ParseIndexingSlice(self_tensor, index_ptr, &axes, &starts, &ends, &steps,
&decrease_axes, &none_axes, &infer_flags,
&list_select_idxs, &list_select_flag);
framework::AttributeMap attrs = {{"axes", axes},
{"starts", starts},
{"ends", ends},
{"steps", steps},
{"decrease_axes", decrease_axes},
{"none_axes", none_axes}};
if (egr::Controller::Instance().HasGrad()) {
PADDLE_ENFORCE_EQ(
egr::egr_utils_api::IsLeafTensor(self->tensor) &&
!egr::EagerUtils::autograd_meta(&self->tensor)->StopGradient(),
false, platform::errors::InvalidArgument(
"Leaf Tensor (%s) that doesn't stop gradient can't use "
"inplace strategy.",
self->tensor.name()));
}
paddle::experimental::Tensor value_tensor;
if (PyCheckTensor(value_obj)) {
value_tensor = reinterpret_cast<TensorObject*>(value_obj)->tensor;
// pass the stop_gradient from value to tensor
if (!egr::EagerUtils::autograd_meta(&value_tensor)->StopGradient() &&
egr::EagerUtils::autograd_meta(&self->tensor)->StopGradient()) {
egr::EagerUtils::autograd_meta(&self->tensor)->SetStopGradient(false);
}
} else if (py::isinstance<py::array>(value_obj)) {
paddle::experimental::Tensor value_tensor_tmp(
std::make_shared<phi::DenseTensor>(),
egr::Controller::Instance().GenerateUniqueName());
py::object value_obj_tmp(py::handle(value_obj), true);
py::object value = value_obj_tmp;
if (self->tensor.dtype() == paddle::experimental::DataType::FLOAT32) {
if (!py::isinstance<py::array_t<float>>(value_obj_tmp)) {
value = pybind11::detail::CastNumpyArray<float>(value_obj_tmp);
}
} else if (self->tensor.dtype() ==
paddle::experimental::DataType::FLOAT64) {
if (!py::isinstance<py::array_t<double>>(value_obj_tmp)) {
value = pybind11::detail::CastNumpyArray<double>(value_obj_tmp);
}
} else if (self->tensor.dtype() ==
paddle::experimental::DataType::INT32) {
if (!py::isinstance<py::array_t<int32_t>>(value_obj_tmp)) {
value = pybind11::detail::CastNumpyArray<int32_t>(value_obj_tmp);
}
} else if (self->tensor.dtype() ==
paddle::experimental::DataType::INT64) {
if (!py::isinstance<py::array_t<int64_t>>(value_obj_tmp)) {
value = pybind11::detail::CastNumpyArray<int64_t>(value_obj_tmp);
}
} else if (self->tensor.dtype() == paddle::experimental::DataType::BOOL) {
if (!py::isinstance<py::array_t<bool>>(value_obj_tmp)) {
value = pybind11::detail::CastNumpyArray<bool>(value_obj_tmp);
}
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"When assign a numpy.np value to a paddle.Tensor, "
"the data type of the paddle.Tensor must be bool, "
"float32, int32 or int64, "
"please check the type of tensor."));
}
if (value_tensor_tmp.place() == paddle::PlaceType::kUNK) {
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
SetTensorFromPyArray(
static_cast<phi::DenseTensor*>(value_tensor_tmp.impl().get()),
value, platform::Place(platform::CUDAPlace(0)), false);
#else
SetTensorFromPyArray(
static_cast<phi::DenseTensor*>(value_tensor_tmp.impl().get()),
value, platform::Place(platform::CPUPlace()), false);
#endif
} else {
SetTensorFromPyArray(
static_cast<phi::DenseTensor*>(value_tensor_tmp.impl().get()),
value, value_tensor_tmp.inner_place(), false);
}
value_tensor = value_tensor_tmp;
} else {
py::object value_obj_tmp(py::handle(value_obj), true);
// convert the value to self data type
if (py::isinstance<py::float_>(value_obj_tmp) ||
py::isinstance<py::int_>(value_obj_tmp) ||
py::isinstance<py::bool_>(value_obj_tmp)) {
if (self->tensor.dtype() == paddle::experimental::DataType::FLOAT32) {
attrs["fp32_values"] =
std::vector<float>{value_obj_tmp.cast<float>()};
} else if (self->tensor.dtype() ==
paddle::experimental::DataType::FLOAT64) {
attrs["fp64_values"] =
std::vector<double>{value_obj_tmp.cast<double>()};
} else if (self->tensor.dtype() ==
paddle::experimental::DataType::INT32) {
attrs["int32_values"] =
std::vector<int32_t>{value_obj_tmp.cast<int32_t>()};
} else if (self->tensor.dtype() ==
paddle::experimental::DataType::INT64) {
attrs["int64_values"] =
std::vector<int64_t>{value_obj_tmp.cast<int64_t>()};
} else if (self->tensor.dtype() ==
paddle::experimental::DataType::BOOL) {
attrs["bool_values"] = std::vector<int>{value_obj_tmp.cast<bool>()};
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"When assign a value to a paddle.Tensor, "
"the data type of the paddle.Tensor must be bool, "
"float32, int32 or int64, "
"please check the type of tensor."));
}
attrs["shape"] = std::vector<int64_t>{1};
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"Value type error. The assign value allows "
"numpy.ndarray, integer, float or bool, "
"but received %s.",
Py_TYPE(value_obj)));
}
}
{
// Release gil and do tracing
py::gil_scoped_release release;
self->tensor = set_value_dygraph_function(self->tensor, value_tensor, {},
{}, {}, attrs);
}
} else {
auto self_numpy = TensorToPyArray(*self_tensor);
VLOG(4) << "parse_index is false";
if (PyCheckTensor(_index)) {
VLOG(4) << "index is tensor";
auto index_tensor = static_cast<phi::DenseTensor*>(
reinterpret_cast<TensorObject*>(_index)->tensor.impl().get());
auto index_numpy = TensorToPyArray(*index_tensor);
self_numpy[index_numpy] = py::object(py::handle(value_obj), true);
} else {
VLOG(4) << "index is not tensor";
self_numpy[_index] = py::object(py::handle(value_obj), true);
}
if (self->tensor.place() == paddle::PlaceType::kUNK) {
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
SetTensorFromPyArray(self_tensor, self_numpy,
platform::Place(platform::CUDAPlace(0)), false);
#else
SetTensorFromPyArray(self_tensor, self_numpy,
platform::Place(platform::CPUPlace()), false);
#endif
} else {
SetTensorFromPyArray(self_tensor, self_numpy, self->tensor.inner_place(),
false);
}
}
Py_INCREF(Py_None);
return Py_None;
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static PyObject* tensor_register_grad_hook(TensorObject* self, PyObject* args, static PyObject* tensor_register_grad_hook(TensorObject* self, PyObject* args,
PyObject* kwargs) { PyObject* kwargs) {
EAGER_TRY EAGER_TRY
...@@ -825,6 +1070,10 @@ PyMethodDef variable_methods[] = { ...@@ -825,6 +1070,10 @@ PyMethodDef variable_methods[] = {
{"_is_initialized", {"_is_initialized",
(PyCFunction)(void (*)(void))tensor_method__is_initialized, (PyCFunction)(void (*)(void))tensor_method__is_initialized,
METH_VARARGS | METH_KEYWORDS, NULL}, METH_VARARGS | METH_KEYWORDS, NULL},
{"_is_dense_tensor_hold_allocation",
(PyCFunction)(
void (*)(void))tensor_method__is_dense_tensor_hold_allocation,
METH_VARARGS | METH_KEYWORDS, NULL},
{"_copy_to", (PyCFunction)(void (*)(void))tensor_method__copy_to, {"_copy_to", (PyCFunction)(void (*)(void))tensor_method__copy_to,
METH_VARARGS | METH_KEYWORDS, NULL}, METH_VARARGS | METH_KEYWORDS, NULL},
{"copy_", (PyCFunction)(void (*)(void))tensor_method_copy_, {"copy_", (PyCFunction)(void (*)(void))tensor_method_copy_,
...@@ -857,6 +1106,9 @@ PyMethodDef variable_methods[] = { ...@@ -857,6 +1106,9 @@ PyMethodDef variable_methods[] = {
{"_getitem_index_not_tensor", {"_getitem_index_not_tensor",
(PyCFunction)(void (*)(void))tensor__getitem_index_not_tensor, (PyCFunction)(void (*)(void))tensor__getitem_index_not_tensor,
METH_VARARGS | METH_KEYWORDS, NULL}, METH_VARARGS | METH_KEYWORDS, NULL},
{"__setitem_eager_tensor__",
(PyCFunction)(void (*)(void))tensor_method__setitem_eager_tensor,
METH_VARARGS | METH_KEYWORDS, NULL},
{"_register_grad_hook", {"_register_grad_hook",
(PyCFunction)(void (*)(void))tensor_register_grad_hook, (PyCFunction)(void (*)(void))tensor_register_grad_hook,
METH_VARARGS | METH_KEYWORDS, NULL}, METH_VARARGS | METH_KEYWORDS, NULL},
......
...@@ -52,6 +52,12 @@ PyObject* tensor_properties_get_type(TensorObject* self, void* closure) { ...@@ -52,6 +52,12 @@ PyObject* tensor_properties_get_type(TensorObject* self, void* closure) {
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
PyObject* tensor_properties_is_leaf(TensorObject* self, void* closure) {
EAGER_TRY
return ToPyObject(egr::egr_utils_api::IsLeafTensor(self->tensor));
EAGER_CATCH_AND_THROW_RETURN_NULL
}
int tensor_properties_set_name(TensorObject* self, PyObject* value, int tensor_properties_set_name(TensorObject* self, PyObject* value,
void* closure) { void* closure) {
EAGER_TRY EAGER_TRY
...@@ -179,6 +185,7 @@ struct PyGetSetDef variable_properties[] = { ...@@ -179,6 +185,7 @@ struct PyGetSetDef variable_properties[] = {
nullptr}, nullptr},
{"dtype", (getter)tensor_properties_get_dtype, nullptr, nullptr, nullptr}, {"dtype", (getter)tensor_properties_get_dtype, nullptr, nullptr, nullptr},
{"type", (getter)tensor_properties_get_type, nullptr, nullptr, nullptr}, {"type", (getter)tensor_properties_get_type, nullptr, nullptr, nullptr},
{"is_leaf", (getter)tensor_properties_is_leaf, nullptr, nullptr, nullptr},
{nullptr, nullptr, nullptr, nullptr, nullptr}}; {nullptr, nullptr, nullptr, nullptr, nullptr}};
} // namespace pybind } // namespace pybind
......
...@@ -386,46 +386,6 @@ GetVarBaseListFromPyHandle(const py::handle &handle) { ...@@ -386,46 +386,6 @@ GetVarBaseListFromPyHandle(const py::handle &handle) {
return result; return result;
} }
// cast numpy type form S to T, this may allocate new memory
template <class T, class S>
static py::array_t<T> CastNumpyType(py::array_t<S> array) {
if (std::is_same<T, S>::value) {
return array;
}
auto dim = array.ndim();
std::vector<py::ssize_t> result_shape(dim);
for (auto i = 0; i < dim; i++) {
result_shape[i] = array.shape(i);
}
py::array_t<T> result(result_shape);
return py::vectorize([](S s) { return static_cast<T>(s); })(array);
}
template <class T>
static py::array_t<T> CastNumpyArray(const py::object &array) {
if (py::isinstance<py::array_t<float>>(array)) {
return CastNumpyType<T>(array.cast<py::array_t<float>>());
} else if (py::isinstance<py::array_t<double>>(array)) {
return CastNumpyType<T>(array.cast<py::array_t<double>>());
} else if (py::isinstance<py::array_t<int32_t>>(array)) {
return CastNumpyType<T>(array.cast<py::array_t<int32_t>>());
} else if (py::isinstance<py::array_t<int64_t>>(array)) {
return CastNumpyType<T>(array.cast<py::array_t<int64_t>>());
} else if (py::isinstance<py::array_t<bool>>(array)) {
return CastNumpyType<T>(array.cast<py::array_t<bool>>());
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"Value type error. The assign numpy value allows integer, float, "
"double and bool, "
"but received %s.",
Py_TYPE(array.ptr())->tp_name));
}
// can't reach here
return py::array_t<T>();
}
static imperative::NameVarBaseMap ConvertToNameVarBaseMap( static imperative::NameVarBaseMap ConvertToNameVarBaseMap(
const PyNameVarBaseMap &map) { const PyNameVarBaseMap &map) {
imperative::NameVarBaseMap result; imperative::NameVarBaseMap result;
...@@ -854,27 +814,29 @@ void BindImperative(py::module *m_ptr) { ...@@ -854,27 +814,29 @@ void BindImperative(py::module *m_ptr) {
py::object value = value_obj; py::object value = value_obj;
if (self->DataType() == framework::proto::VarType::FP32) { if (self->DataType() == framework::proto::VarType::FP32) {
if (!py::isinstance<py::array_t<float>>(value_obj)) { if (!py::isinstance<py::array_t<float>>(value_obj)) {
value = CastNumpyArray<float>(value_obj); value = pybind11::detail::CastNumpyArray<float>(value_obj);
} }
} else if (self->DataType() == } else if (self->DataType() ==
framework::proto::VarType::FP64) { framework::proto::VarType::FP64) {
if (!py::isinstance<py::array_t<double>>(value_obj)) { if (!py::isinstance<py::array_t<double>>(value_obj)) {
value = CastNumpyArray<double>(value_obj); value = pybind11::detail::CastNumpyArray<double>(value_obj);
} }
} else if (self->DataType() == } else if (self->DataType() ==
framework::proto::VarType::INT32) { framework::proto::VarType::INT32) {
if (!py::isinstance<py::array_t<int32_t>>(value_obj)) { if (!py::isinstance<py::array_t<int32_t>>(value_obj)) {
value = CastNumpyArray<int32_t>(value_obj); value =
pybind11::detail::CastNumpyArray<int32_t>(value_obj);
} }
} else if (self->DataType() == } else if (self->DataType() ==
framework::proto::VarType::INT64) { framework::proto::VarType::INT64) {
if (!py::isinstance<py::array_t<int64_t>>(value_obj)) { if (!py::isinstance<py::array_t<int64_t>>(value_obj)) {
value = CastNumpyArray<int64_t>(value_obj); value =
pybind11::detail::CastNumpyArray<int64_t>(value_obj);
} }
} else if (self->DataType() == } else if (self->DataType() ==
framework::proto::VarType::BOOL) { framework::proto::VarType::BOOL) {
if (!py::isinstance<py::array_t<bool>>(value_obj)) { if (!py::isinstance<py::array_t<bool>>(value_obj)) {
value = CastNumpyArray<bool>(value_obj); value = pybind11::detail::CastNumpyArray<bool>(value_obj);
} }
} else { } else {
PADDLE_THROW(platform::errors::InvalidArgument( PADDLE_THROW(platform::errors::InvalidArgument(
......
...@@ -38,7 +38,15 @@ std::map<std::string, std::set<std::string>> op_ins_map = { ...@@ -38,7 +38,15 @@ std::map<std::string, std::set<std::string>> op_ins_map = {
{"assign", {"X"}}, {"assign", {"X"}},
{"reshape2", {"X", "Shape"}}, {"reshape2", {"X", "Shape"}},
{"expand", {"X", "ExpandTimes"}}, {"expand", {"X", "ExpandTimes"}},
{"slice", {"Input", "StartsTensor", "EndsTensor"}}, {"slice",
{"Input", "StartsTensor", "EndsTensor", "StartsTensorList",
"EndsTensorList"}},
{"strided_slice",
{"Input", "StartsTensor", "EndsTensor", "StridesTensor",
"StartsTensorList", "EndsTensorList", "StridesTensorList"}},
{"set_value",
{"Input", "ValueTensor", "StartsTensorList", "EndsTensorList",
"StepsTensorList"}},
{"fake_quantize_dequantize_moving_average_abs_max", {"fake_quantize_dequantize_moving_average_abs_max",
{"X", "InScale", "InAccum", "InState"}}, {"X", "InScale", "InAccum", "InState"}},
{"nll_loss", {"X", "Label", "Weight"}}, {"nll_loss", {"X", "Label", "Weight"}},
......
...@@ -52,6 +52,46 @@ constexpr int NPY_UINT16_ = 4; ...@@ -52,6 +52,46 @@ constexpr int NPY_UINT16_ = 4;
constexpr int NPY_COMPLEX64 = 14; constexpr int NPY_COMPLEX64 = 14;
constexpr int NPY_COMPLEX128 = 15; constexpr int NPY_COMPLEX128 = 15;
// cast numpy type form S to T, this may allocate new memory
template <class T, class S>
static py::array_t<T> CastNumpyType(py::array_t<S> array) {
if (std::is_same<T, S>::value) {
return array;
}
auto dim = array.ndim();
std::vector<py::ssize_t> result_shape(dim);
for (auto i = 0; i < dim; i++) {
result_shape[i] = array.shape(i);
}
py::array_t<T> result(result_shape);
return py::vectorize([](S s) { return static_cast<T>(s); })(array);
}
template <class T>
static py::array_t<T> CastNumpyArray(const py::object &array) {
if (py::isinstance<py::array_t<float>>(array)) {
return CastNumpyType<T>(array.cast<py::array_t<float>>());
} else if (py::isinstance<py::array_t<double>>(array)) {
return CastNumpyType<T>(array.cast<py::array_t<double>>());
} else if (py::isinstance<py::array_t<int32_t>>(array)) {
return CastNumpyType<T>(array.cast<py::array_t<int32_t>>());
} else if (py::isinstance<py::array_t<int64_t>>(array)) {
return CastNumpyType<T>(array.cast<py::array_t<int64_t>>());
} else if (py::isinstance<py::array_t<bool>>(array)) {
return CastNumpyType<T>(array.cast<py::array_t<bool>>());
} else {
PADDLE_THROW(paddle::platform::errors::InvalidArgument(
"Value type error. The assign numpy value allows integer, float, "
"double and bool, "
"but received %s.",
Py_TYPE(array.ptr())->tp_name));
}
// can't reach here
return py::array_t<T>();
}
// Note: Since float16 is not a builtin type in C++, we register // Note: Since float16 is not a builtin type in C++, we register
// paddle::platform::float16 as numpy.float16. // paddle::platform::float16 as numpy.float16.
// Ref: https://github.com/pybind/pybind11/issues/1776 // Ref: https://github.com/pybind/pybind11/issues/1776
......
...@@ -767,6 +767,9 @@ def monkey_patch_varbase(): ...@@ -767,6 +767,9 @@ def monkey_patch_varbase():
# Call _setitem_impl_ when item contains tensor or list. # Call _setitem_impl_ when item contains tensor or list.
return _setitem_impl_(self, item, value) return _setitem_impl_(self, item, value)
else:
if core._in_eager_mode():
return self.__setitem_eager_tensor__(item, value)
else: else:
# Call c++ func __setitem_varbase__ to speedup. # Call c++ func __setitem_varbase__ to speedup.
return self.__setitem_varbase__(item, value) return self.__setitem_varbase__(item, value)
......
...@@ -11189,8 +11189,8 @@ def slice(input, axes, starts, ends): ...@@ -11189,8 +11189,8 @@ def slice(input, axes, starts, ends):
ends_tensor.stop_gradient = True ends_tensor.stop_gradient = True
infer_flags = list(-1 for i in range(len(axes))) infer_flags = list(-1 for i in range(len(axes)))
return _C_ops.slice(input, starts_tensor, ends_tensor, 'axes', axes, return _C_ops.slice(input, starts_tensor, ends_tensor, None, None,
'infer_flags', infer_flags, *attrs) 'axes', axes, 'infer_flags', infer_flags, *attrs)
if not isinstance(starts, (list, tuple, Variable)): if not isinstance(starts, (list, tuple, Variable)):
raise ValueError( raise ValueError(
......
...@@ -632,7 +632,7 @@ def assign(input, output=None): ...@@ -632,7 +632,7 @@ def assign(input, output=None):
dtype = VarDesc.VarType.FP32 dtype = VarDesc.VarType.FP32
if dtype == VarDesc.VarType.BOOL: if dtype == VarDesc.VarType.BOOL:
value_name = "bool_values" value_name = "bool_values"
values = [bool(v) for v in input.flat] values = [int(v) for v in input.flat]
elif dtype == VarDesc.VarType.FP32: elif dtype == VarDesc.VarType.FP32:
value_name = "fp32_values" value_name = "fp32_values"
values = [float(v) for v in input.flat] values = [float(v) for v in input.flat]
......
...@@ -71,7 +71,7 @@ class TestAssignValueNPUOp4(TestAssignValueNPUOp): ...@@ -71,7 +71,7 @@ class TestAssignValueNPUOp4(TestAssignValueNPUOp):
def init_data(self): def init_data(self):
self.value = numpy.random.choice( self.value = numpy.random.choice(
a=[False, True], size=(2, 5)).astype(numpy.bool) a=[False, True], size=(2, 5)).astype(numpy.bool)
self.attrs["bool_values"] = [bool(v) for v in self.value.flat] self.attrs["bool_values"] = [int(v) for v in self.value.flat]
class TestAssignApi(unittest.TestCase): class TestAssignApi(unittest.TestCase):
......
...@@ -58,7 +58,7 @@ class TestAssignValueOp4(TestAssignValueOp): ...@@ -58,7 +58,7 @@ class TestAssignValueOp4(TestAssignValueOp):
def init_data(self): def init_data(self):
self.value = numpy.random.choice( self.value = numpy.random.choice(
a=[False, True], size=(2, 5)).astype(numpy.bool) a=[False, True], size=(2, 5)).astype(numpy.bool)
self.attrs["bool_values"] = [bool(v) for v in self.value.flat] self.attrs["bool_values"] = [int(v) for v in self.value.flat]
class TestAssignApi(unittest.TestCase): class TestAssignApi(unittest.TestCase):
......
...@@ -22,6 +22,7 @@ import numpy as np ...@@ -22,6 +22,7 @@ import numpy as np
import paddle import paddle
from paddle.fluid.layer_helper import LayerHelper from paddle.fluid.layer_helper import LayerHelper
from functools import reduce from functools import reduce
from paddle.fluid.framework import _test_eager_guard, _in_eager_mode
class TestSetValueBase(unittest.TestCase): class TestSetValueBase(unittest.TestCase):
...@@ -69,7 +70,7 @@ class TestSetValueApi(TestSetValueBase): ...@@ -69,7 +70,7 @@ class TestSetValueApi(TestSetValueBase):
paddle.enable_static() paddle.enable_static()
return out return out
def test_api(self): def func_test_api(self):
static_out = self._run_static() static_out = self._run_static()
dynamic_out = self._run_dynamic() dynamic_out = self._run_dynamic()
self._get_answer() self._get_answer()
...@@ -82,6 +83,11 @@ class TestSetValueApi(TestSetValueBase): ...@@ -82,6 +83,11 @@ class TestSetValueApi(TestSetValueBase):
(self.data == dynamic_out).all(), (self.data == dynamic_out).all(),
msg=error_msg.format("dynamic", self.data, dynamic_out)) msg=error_msg.format("dynamic", self.data, dynamic_out))
def test_api(self):
with _test_eager_guard():
self.func_test_api()
self.func_test_api()
# 1. Test different type of item: int, Python slice, Paddle Tensor # 1. Test different type of item: int, Python slice, Paddle Tensor
# 1.1 item is int # 1.1 item is int
...@@ -995,9 +1001,9 @@ class TestBackward(unittest.TestCase): ...@@ -995,9 +1001,9 @@ class TestBackward(unittest.TestCase):
fetch_list=[var.name + "@GRAD", z.name + "@GRAD"]) fetch_list=[var.name + "@GRAD", z.name + "@GRAD"])
self.assertTrue((var_grad == z_grad[0, :]).all()) self.assertTrue((var_grad == z_grad[0, :]).all())
def test_dynamic(self):
paddle.disable_static() paddle.disable_static()
def func_test_dynamic(self):
model = Model() model = Model()
x = paddle.ones([1, 12, 3, 3]).astype("float32") x = paddle.ones([1, 12, 3, 3]).astype("float32")
y = paddle.ones([1, 12, 3, 3]).astype("float32") y = paddle.ones([1, 12, 3, 3]).astype("float32")
...@@ -1006,11 +1012,18 @@ class TestBackward(unittest.TestCase): ...@@ -1006,11 +1012,18 @@ class TestBackward(unittest.TestCase):
self.assertTrue(var.grad.shape == x.grad[0, :, 0, 0].shape) self.assertTrue(var.grad.shape == x.grad[0, :, 0, 0].shape)
# #
# TODO(pangyoki) add inplace and delete if
if not _in_eager_mode():
self.assertTrue((0 == x.grad[0, :, 0, 0]).all()) self.assertTrue((0 == x.grad[0, :, 0, 0]).all())
def test_dynamic(self):
with _test_eager_guard():
self.func_test_dynamic()
self.func_test_dynamic()
class TestGradientTruncated(unittest.TestCase): class TestGradientTruncated(unittest.TestCase):
def test_consistent_with_competitor(self): def func_test_consistent_with_competitor(self):
paddle.disable_static() paddle.disable_static()
def set_value(t, value): def set_value(t, value):
...@@ -1182,6 +1195,11 @@ class TestGradientTruncated(unittest.TestCase): ...@@ -1182,6 +1195,11 @@ class TestGradientTruncated(unittest.TestCase):
self.assertTrue(~x.stop_gradient) self.assertTrue(~x.stop_gradient)
self.assertTrue(~x.is_leaf) self.assertTrue(~x.is_leaf)
def test_consistent_with_competitor(self):
with _test_eager_guard():
self.func_test_consistent_with_competitor()
self.func_test_consistent_with_competitor()
def test_static_graph(self): def test_static_graph(self):
paddle.enable_static() paddle.enable_static()
...@@ -1328,6 +1346,7 @@ class TestGradientTruncated(unittest.TestCase): ...@@ -1328,6 +1346,7 @@ class TestGradientTruncated(unittest.TestCase):
self.assertTrue((numel(out1[0][0:5:3].shape) == out3[0]).all()) self.assertTrue((numel(out1[0][0:5:3].shape) == out3[0]).all())
array = array[0] array = array[0]
paddle.disable_static()
class TestSetValueInplace(unittest.TestCase): class TestSetValueInplace(unittest.TestCase):
......
...@@ -22,6 +22,7 @@ import copy ...@@ -22,6 +22,7 @@ import copy
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.framework import _test_eager_guard, _in_eager_mode
class TestVarBase(unittest.TestCase): class TestVarBase(unittest.TestCase):
...@@ -874,7 +875,7 @@ class TestVarBase(unittest.TestCase): ...@@ -874,7 +875,7 @@ class TestVarBase(unittest.TestCase):
col = np.array([2, 1, 3]) col = np.array([2, 1, 3])
self.assertTrue(np.array_equal(array[row, col], x[row, col].numpy())) self.assertTrue(np.array_equal(array[row, col], x[row, col].numpy()))
def test_slice(self): def func_test_slice(self):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
self._test_slice() self._test_slice()
self._test_slice_for_tensor_attr() self._test_slice_for_tensor_attr()
...@@ -899,6 +900,11 @@ class TestVarBase(unittest.TestCase): ...@@ -899,6 +900,11 @@ class TestVarBase(unittest.TestCase):
mask = np.array([1, 0, 1, 0], dtype=bool) mask = np.array([1, 0, 1, 0], dtype=bool)
var[paddle.to_tensor([0, 1]), mask] var[paddle.to_tensor([0, 1]), mask]
def test_slice(self):
with _test_eager_guard():
self.func_test_slice()
self.func_test_slice()
def test_var_base_to_np(self): def test_var_base_to_np(self):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
var = fluid.dygraph.to_variable(self.array) var = fluid.dygraph.to_variable(self.array)
...@@ -1125,7 +1131,6 @@ class TestVarBase(unittest.TestCase): ...@@ -1125,7 +1131,6 @@ class TestVarBase(unittest.TestCase):
class TestVarBaseSetitem(unittest.TestCase): class TestVarBaseSetitem(unittest.TestCase):
def setUp(self): def setUp(self):
paddle.disable_static()
self.set_dtype() self.set_dtype()
self.tensor_x = paddle.to_tensor(np.ones((4, 2, 3)).astype(self.dtype)) self.tensor_x = paddle.to_tensor(np.ones((4, 2, 3)).astype(self.dtype))
self.np_value = np.random.random((2, 3)).astype(self.dtype) self.np_value = np.random.random((2, 3)).astype(self.dtype)
...@@ -1135,11 +1140,12 @@ class TestVarBaseSetitem(unittest.TestCase): ...@@ -1135,11 +1140,12 @@ class TestVarBaseSetitem(unittest.TestCase):
self.dtype = "int32" self.dtype = "int32"
def _test(self, value): def _test(self, value):
paddle.disable_static() if not _in_eager_mode():
self.assertEqual(self.tensor_x.inplace_version, 0) self.assertEqual(self.tensor_x.inplace_version, 0)
id_origin = id(self.tensor_x) id_origin = id(self.tensor_x)
self.tensor_x[0] = value self.tensor_x[0] = value
if not _in_eager_mode():
self.assertEqual(self.tensor_x.inplace_version, 1) self.assertEqual(self.tensor_x.inplace_version, 1)
if isinstance(value, (six.integer_types, float)): if isinstance(value, (six.integer_types, float)):
...@@ -1152,27 +1158,47 @@ class TestVarBaseSetitem(unittest.TestCase): ...@@ -1152,27 +1158,47 @@ class TestVarBaseSetitem(unittest.TestCase):
self.assertEqual(id_origin, id(self.tensor_x)) self.assertEqual(id_origin, id(self.tensor_x))
self.tensor_x[1:2] = value self.tensor_x[1:2] = value
if not _in_eager_mode():
self.assertEqual(self.tensor_x.inplace_version, 2) self.assertEqual(self.tensor_x.inplace_version, 2)
self.assertTrue(np.array_equal(self.tensor_x[1].numpy(), result)) self.assertTrue(np.array_equal(self.tensor_x[1].numpy(), result))
self.assertEqual(id_origin, id(self.tensor_x)) self.assertEqual(id_origin, id(self.tensor_x))
self.tensor_x[...] = value self.tensor_x[...] = value
if not _in_eager_mode():
self.assertEqual(self.tensor_x.inplace_version, 3) self.assertEqual(self.tensor_x.inplace_version, 3)
self.assertTrue(np.array_equal(self.tensor_x[3].numpy(), result)) self.assertTrue(np.array_equal(self.tensor_x[3].numpy(), result))
self.assertEqual(id_origin, id(self.tensor_x)) self.assertEqual(id_origin, id(self.tensor_x))
def test_value_tensor(self): def func_test_value_tensor(self):
paddle.disable_static()
self._test(self.tensor_value) self._test(self.tensor_value)
def test_value_numpy(self): def test_value_tensor(self):
paddle.disable_static() with _test_eager_guard():
self.setUp()
self.func_test_value_tensor()
self.setUp()
self.func_test_value_tensor()
def func_test_value_numpy(self):
self._test(self.np_value) self._test(self.np_value)
def test_value_int(self): def test_value_numpy(self):
paddle.disable_static() with _test_eager_guard():
self.setUp()
self.func_test_value_numpy()
self.setUp()
self.func_test_value_numpy()
def func_test_value_int(self):
self._test(10) self._test(10)
def test_value_int(self):
with _test_eager_guard():
self.setUp()
self.func_test_value_int()
self.setUp()
self.func_test_value_int()
class TestVarBaseSetitemInt64(TestVarBaseSetitem): class TestVarBaseSetitemInt64(TestVarBaseSetitem):
def set_dtype(self): def set_dtype(self):
......
...@@ -382,7 +382,7 @@ def _getitem_impl_(var, item): ...@@ -382,7 +382,7 @@ def _getitem_impl_(var, item):
idx = assign(np.array(slice_item).astype("int32")) idx = assign(np.array(slice_item).astype("int32"))
return index_select(var, index=idx, axis=0) return index_select(var, index=idx, axis=0)
elif isinstance(slice_item, (Variable)): elif isinstance(slice_item, (Variable, core.eager.Tensor)):
if len(item) == 1: if len(item) == 1:
from ..tensor import index_select, gather_nd from ..tensor import index_select, gather_nd
...@@ -636,7 +636,7 @@ def _setitem_impl_(var, item, value): ...@@ -636,7 +636,7 @@ def _setitem_impl_(var, item, value):
shape = list(value.shape) shape = list(value.shape)
if dtype == core.VarDesc.VarType.BOOL: if dtype == core.VarDesc.VarType.BOOL:
value_name = "bool_values" value_name = "bool_values"
values = [bool(v) for v in value.flat] values = [int(v) for v in value.flat]
elif dtype == core.VarDesc.VarType.FP32: elif dtype == core.VarDesc.VarType.FP32:
value_name = "fp32_values" value_name = "fp32_values"
values = [float(v) for v in value.flat] values = [float(v) for v in value.flat]
...@@ -657,7 +657,7 @@ def _setitem_impl_(var, item, value): ...@@ -657,7 +657,7 @@ def _setitem_impl_(var, item, value):
attrs[value_name] = values attrs[value_name] = values
attrs["shape"] = shape attrs["shape"] = shape
elif isinstance(value, Variable): elif isinstance(value, (Variable, core.eager.Tensor)):
inputs["ValueTensor"] = value inputs["ValueTensor"] = value
else: else:
raise TypeError( raise TypeError(
...@@ -665,7 +665,9 @@ def _setitem_impl_(var, item, value): ...@@ -665,7 +665,9 @@ def _setitem_impl_(var, item, value):
"paddle.Tensor to a paddle.Tensor, but received {}".format( "paddle.Tensor to a paddle.Tensor, but received {}".format(
type(value))) type(value)))
if paddle.fluid.framework.in_dygraph_mode(): if paddle.fluid.framework.in_dygraph_mode(
) and not paddle.fluid.framework._in_eager_mode():
# TODO(pangyoki) add inplace(BumpInplaceVersion) if need
var._bump_inplace_version() var._bump_inplace_version()
cur_block = default_main_program().current_block() cur_block = default_main_program().current_block()
......
...@@ -3789,13 +3789,13 @@ def diff(x, n=1, axis=-1, prepend=None, append=None, name=None): ...@@ -3789,13 +3789,13 @@ def diff(x, n=1, axis=-1, prepend=None, append=None, name=None):
attrs_1 += ('starts', starts_1) attrs_1 += ('starts', starts_1)
ends_1 = [dim_len - 1] ends_1 = [dim_len - 1]
attrs_1 += ('ends', ends_1) attrs_1 += ('ends', ends_1)
input_front = _C_ops.slice(new_input, None, None, 'axes', axes, \ input_front = _C_ops.slice(new_input, None, None, None, None, 'axes', axes, \
'infer_flags', infer_flags, *attrs_1) 'infer_flags', infer_flags, *attrs_1)
starts_2 = [1] starts_2 = [1]
attrs_2 += ('starts', starts_2) attrs_2 += ('starts', starts_2)
ends_2 = [dim_len] ends_2 = [dim_len]
attrs_2 += ('ends', ends_2) attrs_2 += ('ends', ends_2)
input_back = _C_ops.slice(new_input, None, None, 'axes', axes, \ input_back = _C_ops.slice(new_input, None, None, None, None, 'axes', axes, \
'infer_flags', infer_flags, *attrs_2) 'infer_flags', infer_flags, *attrs_2)
if x.dtype == paddle.bool: if x.dtype == paddle.bool:
......
...@@ -317,7 +317,7 @@ def tensor_to_string(tensor, prefix='Tensor'): ...@@ -317,7 +317,7 @@ def tensor_to_string(tensor, prefix='Tensor'):
_template = "{prefix}(shape={shape}, dtype={dtype}, place={place}, stop_gradient={stop_gradient},\n{indent}{data})" _template = "{prefix}(shape={shape}, dtype={dtype}, place={place}, stop_gradient={stop_gradient},\n{indent}{data})"
if not tensor._is_initialized(): if not tensor._is_dense_tensor_hold_allocation():
return "Tensor(Not initialized)" return "Tensor(Not initialized)"
if tensor.is_sparse(): if tensor.is_sparse():
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册