diff --git a/paddle/fluid/imperative/tracer.cc b/paddle/fluid/imperative/tracer.cc index d18c8e96c49b6a993fbd0a8d632212ae8d7f8c6d..3d4cfa2df31798e476dbd96945b69d781edfa421 100644 --- a/paddle/fluid/imperative/tracer.cc +++ b/paddle/fluid/imperative/tracer.cc @@ -176,6 +176,20 @@ void Tracer::TraceOp(const std::string& type, const NameVarMap& ins, const std::map& inplace_map, paddle::framework::AttributeMap* passed_default_attrs_, bool use_default_attr_map) { + TraceOpImpl(type, ins, outs, attrs, place, trace_backward, + inplace_map, passed_default_attrs_, + use_default_attr_map); +} + +template +void Tracer::TraceOpImpl(const std::string& type, + const NameVarMap& ins, + const NameVarMap& outs, + framework::AttributeMap& attrs, + const platform::Place& place, bool trace_backward, + const std::map& inplace_map, + paddle::framework::AttributeMap* passed_default_attrs_, + bool use_default_attr_map) { platform::RecordEvent op_type_record_event( type + " trace_op", platform::TracerEventType::Operator, 1); platform::ScopedFlushDenormal flush; @@ -340,25 +354,33 @@ void Tracer::TraceOp(const std::string& type, const NameVarBaseMap& ins, void Tracer::TraceOp(const std::string& type, const NameTensorMap& ins, const NameTensorMap& outs, - paddle::framework::AttributeMap attrs, + paddle::framework::AttributeMap& attrs, const paddle::platform::Place& place, paddle::framework::AttributeMap* default_attrs, bool use_default_attr_map, const std::map& inplace_map) { VLOG(6) << "Running On Eager TraceOp with use_default_attr_map: " << use_default_attr_map; - TraceOp(type, ins, outs, std::move(attrs), place, false, - inplace_map, default_attrs, use_default_attr_map); + TraceOpImpl(type, ins, outs, attrs, place, false, + inplace_map, default_attrs, + use_default_attr_map); +} + +void Tracer::TraceOp(const std::string& type, const NameTensorMap& ins, + const NameTensorMap& outs, + paddle::framework::AttributeMap attrs) { + VLOG(6) << "Running On Eager TraceOp(4 agrs): "; + TraceOpImpl(type, ins, outs, attrs, expected_place_, + false, {}, nullptr, true); } void Tracer::TraceOp(const std::string& type, const NameTensorMap& ins, const NameTensorMap& outs, - paddle::framework::AttributeMap attrs, + paddle::framework::AttributeMap& attrs, const std::map& inplace_map) { VLOG(6) << "Running On Eager TraceOp(less): "; - TraceOp(type, ins, outs, std::move(attrs), - expected_place_, false, inplace_map, nullptr, - true); + TraceOpImpl(type, ins, outs, attrs, expected_place_, + false, inplace_map, nullptr, true); } void Tracer::SetExpectedPlace(platform::Place place) { diff --git a/paddle/fluid/imperative/tracer.h b/paddle/fluid/imperative/tracer.h index f24961885c9b85b03c561f60f375b1a21bf086dd..4e671d52457e203b7d64420490d9420db013b673 100644 --- a/paddle/fluid/imperative/tracer.h +++ b/paddle/fluid/imperative/tracer.h @@ -74,16 +74,32 @@ class Tracer { paddle::framework::AttributeMap* passed_default_attrs_ = nullptr, bool use_default_attr_map = true); + template + void TraceOpImpl( + const std::string& type, const NameVarMap& ins, + const NameVarMap& outs, + framework::AttributeMap& attrs, // NOLINT + const platform::Place& place, bool trace_backward, + const std::map& inplace_map = {}, + paddle::framework::AttributeMap* passed_default_attrs_ = nullptr, + bool use_default_attr_map = true); + void TraceOp(const std::string& type, const NameVarBaseMap& ins, const NameVarBaseMap& outs, framework::AttributeMap attrs, const std::map& inplace_map = {}); void TraceOp(const std::string& type, const NameTensorMap& ins, - const NameTensorMap& outs, paddle::framework::AttributeMap attrs, + const NameTensorMap& outs, + paddle::framework::AttributeMap& attrs, // NOLINT const std::map& inplace_map = {}); void TraceOp(const std::string& type, const NameTensorMap& ins, - const NameTensorMap& outs, paddle::framework::AttributeMap attrs, + const NameTensorMap& outs, + paddle::framework::AttributeMap attrs); + + void TraceOp(const std::string& type, const NameTensorMap& ins, + const NameTensorMap& outs, + paddle::framework::AttributeMap& attrs, // NOLINT const paddle::platform::Place& place, paddle::framework::AttributeMap* default_attrs, bool use_default_attr_map, diff --git a/paddle/fluid/pybind/eager_method.cc b/paddle/fluid/pybind/eager_method.cc index cce663e410ddb5d4d987e58d16d578ec85ee6142..52a43c4ebe8d8811ceac406d4d68aa3f1963f7ce 100644 --- a/paddle/fluid/pybind/eager_method.cc +++ b/paddle/fluid/pybind/eager_method.cc @@ -25,6 +25,7 @@ limitations under the License. */ #include "paddle/fluid/eager/hooks.h" #include "paddle/fluid/eager/utils.h" #include "paddle/fluid/framework/convert_utils.h" +#include "paddle/fluid/framework/python_headers.h" #include "paddle/fluid/memory/allocation/allocator.h" #include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/platform/enforce.h" @@ -32,12 +33,14 @@ limitations under the License. */ #include "paddle/fluid/pybind/eager_utils.h" #include "paddle/fluid/pybind/exception.h" #include "paddle/fluid/pybind/slice_utils.h" +#include "paddle/fluid/pybind/tensor_py.h" #include "paddle/phi/api/include/api.h" #include "paddle/phi/common/data_type.h" #include "paddle/phi/core/compat/convert_utils.h" #include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/sparse_coo_tensor.h" #include "paddle/phi/core/sparse_csr_tensor.h" +#include "pybind11/detail/internals.h" namespace paddle { namespace pybind { @@ -150,12 +153,22 @@ bool PyCheckTensor(PyObject* obj) { static PyObject* tensor_method_numpy(TensorObject* self, PyObject* args, PyObject* kwargs) { EAGER_TRY - PADDLE_ENFORCE_EQ( - self->tensor.initialized(), true, - platform::errors::InvalidArgument( - "Tensor data of %s is Empty that indicates we have null tensor for " - "now, please check if it has no data and initialize it first.", - self->tensor.name())); + auto& api = pybind11::detail::npy_api::get(); + if (!self->tensor.impl()) { + Py_intptr_t py_dims[paddle::framework::DDim::kMaxRank]; + Py_intptr_t py_strides[paddle::framework::DDim::kMaxRank]; + py_dims[0] = 0; + py_strides[0] = 0; + + PyObject* array = api.PyArray_NewFromDescr_( + api.PyArray_Type_, + api.PyArray_DescrFromType_(pybind11::detail::npy_api::NPY_FLOAT_), 1, + py_dims, py_strides, nullptr, + pybind11::detail::npy_api::NPY_ARRAY_ALIGNED_ | + pybind11::detail::npy_api::NPY_ARRAY_WRITEABLE_, + nullptr); + return array; + } auto tensor_dims = self->tensor.shape(); auto numpy_dtype = TensorDtype2NumpyDtype(self->tensor.type()); auto sizeof_dtype = paddle::framework::DataTypeSize(self->tensor.type()); @@ -167,7 +180,7 @@ static PyObject* tensor_method_numpy(TensorObject* self, PyObject* args, py_strides[i] = sizeof_dtype * numel; numel *= py_dims[i]; } - auto& api = pybind11::detail::npy_api::get(); + PyObject* array = api.PyArray_NewFromDescr_( api.PyArray_Type_, api.PyArray_DescrFromType_(numpy_dtype), tensor_dims.size(), py_dims, py_strides, nullptr, @@ -175,6 +188,10 @@ static PyObject* tensor_method_numpy(TensorObject* self, PyObject* args, pybind11::detail::npy_api::NPY_ARRAY_WRITEABLE_, nullptr); + if (!self->tensor.impl()->initialized()) { + return array; + } + if (self->tensor.is_cpu() || self->tensor.is_gpu_pinned()) { auto dense_tensor = std::dynamic_pointer_cast(self->tensor.impl()); @@ -213,6 +230,20 @@ static PyObject* tensor_method__is_initialized(TensorObject* self, EAGER_CATCH_AND_THROW_RETURN_NULL } +static PyObject* tensor_method__is_dense_tensor_hold_allocation( + TensorObject* self, PyObject* args, PyObject* kwargs) { + EAGER_TRY + auto dense_tensor = + std::dynamic_pointer_cast(self->tensor.impl()); + if (dense_tensor) { + return ToPyObject(dense_tensor->IsInitialized()); + } else { + return ToPyObject(false); + } + + EAGER_CATCH_AND_THROW_RETURN_NULL +} + static PyObject* tensor_method__copy_to(TensorObject* self, PyObject* args, PyObject* kwargs) { EAGER_TRY @@ -552,10 +583,13 @@ static PyObject* tensor__getitem_index_not_tensor(TensorObject* self, } if (op_type == "slice") { out = slice_dygraph_function(self->tensor, paddle::experimental::Tensor(), - paddle::experimental::Tensor(), + paddle::experimental::Tensor(), {}, {}, std::move(attrs)); } else if (op_type == "strided_slice") { - out = strided_slice_dygraph_function(self->tensor, attrs); + out = strided_slice_dygraph_function( + self->tensor, paddle::experimental::Tensor(), + paddle::experimental::Tensor(), paddle::experimental::Tensor(), {}, + {}, {}, attrs); } else { PADDLE_THROW(platform::errors::InvalidArgument( "Slice is only support slice and strided_slice, but we got %s which " @@ -604,6 +638,7 @@ static PyObject* tensor__getitem_index_not_tensor(TensorObject* self, auto select_index = paddle::experimental::Tensor( egr::Controller::Instance().GenerateUniqueName()); auto idx_tensor = std::make_shared(); + select_index.set_impl(idx_tensor); auto* dev_ctx = platform::DeviceContextPool::Instance().Get( egr::Controller::Instance().GetExpectedPlace()); paddle::framework::TensorFromVector(list_select_idxs, *dev_ctx, @@ -617,6 +652,216 @@ static PyObject* tensor__getitem_index_not_tensor(TensorObject* self, EAGER_CATCH_AND_THROW_RETURN_NULL } +static PyObject* tensor_method__setitem_eager_tensor(TensorObject* self, + PyObject* args, + PyObject* kwargs) { + EAGER_TRY + VLOG(4) << "Call __setitem_eager_tensor"; + + auto self_tensor = static_cast(self->tensor.impl().get()); + + PyObject* _index = PyTuple_GET_ITEM(args, 0); + PyObject* value_obj = PyTuple_GET_ITEM(args, 1); + // NOTE(zhiqiu): PyTuple_Pack increases refcount while PyTuple_New + // https://github.com/python/cpython/blob/24b63c695ae0a95b06379eaadace66735abac1e2/Objects/tupleobject.c#L251 + PyObject* index_ptr = + !PyTuple_Check(_index) ? PyTuple_Pack(1, _index) : _index; + DEFINE_PADDLE_SCOPE_GUARD([index_ptr, &_index]() { + if (!PyTuple_Check(_index)) { + Py_DECREF(index_ptr); + VLOG(4) << "Call Py_DECREF"; + } + }); + + // TODO(pangyoki) add inplace(BumpInplaceVersion) if need + + // 1. Check argumnets + bool parse_index = true; + + // Check whether _index can be parsed. + const int size = PyTuple_GET_SIZE(index_ptr); + for (int dim = 0; dim < size; ++dim) { + PyObject* slice_item = PyTuple_GetItem(index_ptr, dim); + if (!(PyCheckInteger(slice_item) || PySlice_Check(slice_item) || + slice_item == Py_Ellipsis || slice_item == Py_None)) { + parse_index = false; + break; + } + } + + // 2. Call op set_value to speed up if the condition is met, + // otherwise call TensorToPyArray. + // TODO(liym27): Try not to call TensorToPyArray because it always + // copys data to cpu place, which reduces performance. + if (parse_index) { + std::vector axes, starts, ends, steps, decrease_axes, none_axes, + infer_flags, list_select_idxs; + // if index is a list, list_select_flag will be true + bool list_select_flag = false; + ParseIndexingSlice(self_tensor, index_ptr, &axes, &starts, &ends, &steps, + &decrease_axes, &none_axes, &infer_flags, + &list_select_idxs, &list_select_flag); + + framework::AttributeMap attrs = {{"axes", axes}, + {"starts", starts}, + {"ends", ends}, + {"steps", steps}, + {"decrease_axes", decrease_axes}, + {"none_axes", none_axes}}; + + if (egr::Controller::Instance().HasGrad()) { + PADDLE_ENFORCE_EQ( + egr::egr_utils_api::IsLeafTensor(self->tensor) && + !egr::EagerUtils::autograd_meta(&self->tensor)->StopGradient(), + false, platform::errors::InvalidArgument( + "Leaf Tensor (%s) that doesn't stop gradient can't use " + "inplace strategy.", + self->tensor.name())); + } + + paddle::experimental::Tensor value_tensor; + + if (PyCheckTensor(value_obj)) { + value_tensor = reinterpret_cast(value_obj)->tensor; + + // pass the stop_gradient from value to tensor + if (!egr::EagerUtils::autograd_meta(&value_tensor)->StopGradient() && + egr::EagerUtils::autograd_meta(&self->tensor)->StopGradient()) { + egr::EagerUtils::autograd_meta(&self->tensor)->SetStopGradient(false); + } + } else if (py::isinstance(value_obj)) { + paddle::experimental::Tensor value_tensor_tmp( + std::make_shared(), + egr::Controller::Instance().GenerateUniqueName()); + py::object value_obj_tmp(py::handle(value_obj), true); + py::object value = value_obj_tmp; + if (self->tensor.dtype() == paddle::experimental::DataType::FLOAT32) { + if (!py::isinstance>(value_obj_tmp)) { + value = pybind11::detail::CastNumpyArray(value_obj_tmp); + } + } else if (self->tensor.dtype() == + paddle::experimental::DataType::FLOAT64) { + if (!py::isinstance>(value_obj_tmp)) { + value = pybind11::detail::CastNumpyArray(value_obj_tmp); + } + } else if (self->tensor.dtype() == + paddle::experimental::DataType::INT32) { + if (!py::isinstance>(value_obj_tmp)) { + value = pybind11::detail::CastNumpyArray(value_obj_tmp); + } + } else if (self->tensor.dtype() == + paddle::experimental::DataType::INT64) { + if (!py::isinstance>(value_obj_tmp)) { + value = pybind11::detail::CastNumpyArray(value_obj_tmp); + } + } else if (self->tensor.dtype() == paddle::experimental::DataType::BOOL) { + if (!py::isinstance>(value_obj_tmp)) { + value = pybind11::detail::CastNumpyArray(value_obj_tmp); + } + } else { + PADDLE_THROW(platform::errors::InvalidArgument( + "When assign a numpy.np value to a paddle.Tensor, " + "the data type of the paddle.Tensor must be bool, " + "float32, int32 or int64, " + "please check the type of tensor.")); + } + + if (value_tensor_tmp.place() == paddle::PlaceType::kUNK) { +#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) + SetTensorFromPyArray( + static_cast(value_tensor_tmp.impl().get()), + value, platform::Place(platform::CUDAPlace(0)), false); +#else + SetTensorFromPyArray( + static_cast(value_tensor_tmp.impl().get()), + value, platform::Place(platform::CPUPlace()), false); +#endif + } else { + SetTensorFromPyArray( + static_cast(value_tensor_tmp.impl().get()), + value, value_tensor_tmp.inner_place(), false); + } + + value_tensor = value_tensor_tmp; + } else { + py::object value_obj_tmp(py::handle(value_obj), true); + // convert the value to self data type + if (py::isinstance(value_obj_tmp) || + py::isinstance(value_obj_tmp) || + py::isinstance(value_obj_tmp)) { + if (self->tensor.dtype() == paddle::experimental::DataType::FLOAT32) { + attrs["fp32_values"] = + std::vector{value_obj_tmp.cast()}; + } else if (self->tensor.dtype() == + paddle::experimental::DataType::FLOAT64) { + attrs["fp64_values"] = + std::vector{value_obj_tmp.cast()}; + } else if (self->tensor.dtype() == + paddle::experimental::DataType::INT32) { + attrs["int32_values"] = + std::vector{value_obj_tmp.cast()}; + } else if (self->tensor.dtype() == + paddle::experimental::DataType::INT64) { + attrs["int64_values"] = + std::vector{value_obj_tmp.cast()}; + } else if (self->tensor.dtype() == + paddle::experimental::DataType::BOOL) { + attrs["bool_values"] = std::vector{value_obj_tmp.cast()}; + } else { + PADDLE_THROW(platform::errors::InvalidArgument( + "When assign a value to a paddle.Tensor, " + "the data type of the paddle.Tensor must be bool, " + "float32, int32 or int64, " + "please check the type of tensor.")); + } + attrs["shape"] = std::vector{1}; + + } else { + PADDLE_THROW(platform::errors::InvalidArgument( + "Value type error. The assign value allows " + "numpy.ndarray, integer, float or bool, " + "but received %s.", + Py_TYPE(value_obj))); + } + } + + { + // Release gil and do tracing + py::gil_scoped_release release; + self->tensor = set_value_dygraph_function(self->tensor, value_tensor, {}, + {}, {}, attrs); + } + } else { + auto self_numpy = TensorToPyArray(*self_tensor); + VLOG(4) << "parse_index is false"; + if (PyCheckTensor(_index)) { + VLOG(4) << "index is tensor"; + auto index_tensor = static_cast( + reinterpret_cast(_index)->tensor.impl().get()); + auto index_numpy = TensorToPyArray(*index_tensor); + self_numpy[index_numpy] = py::object(py::handle(value_obj), true); + } else { + VLOG(4) << "index is not tensor"; + self_numpy[_index] = py::object(py::handle(value_obj), true); + } + if (self->tensor.place() == paddle::PlaceType::kUNK) { +#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) + SetTensorFromPyArray(self_tensor, self_numpy, + platform::Place(platform::CUDAPlace(0)), false); +#else + SetTensorFromPyArray(self_tensor, self_numpy, + platform::Place(platform::CPUPlace()), false); +#endif + } else { + SetTensorFromPyArray(self_tensor, self_numpy, self->tensor.inner_place(), + false); + } + } + Py_INCREF(Py_None); + return Py_None; + EAGER_CATCH_AND_THROW_RETURN_NULL +} + static PyObject* tensor_register_grad_hook(TensorObject* self, PyObject* args, PyObject* kwargs) { EAGER_TRY @@ -825,6 +1070,10 @@ PyMethodDef variable_methods[] = { {"_is_initialized", (PyCFunction)(void (*)(void))tensor_method__is_initialized, METH_VARARGS | METH_KEYWORDS, NULL}, + {"_is_dense_tensor_hold_allocation", + (PyCFunction)( + void (*)(void))tensor_method__is_dense_tensor_hold_allocation, + METH_VARARGS | METH_KEYWORDS, NULL}, {"_copy_to", (PyCFunction)(void (*)(void))tensor_method__copy_to, METH_VARARGS | METH_KEYWORDS, NULL}, {"copy_", (PyCFunction)(void (*)(void))tensor_method_copy_, @@ -857,6 +1106,9 @@ PyMethodDef variable_methods[] = { {"_getitem_index_not_tensor", (PyCFunction)(void (*)(void))tensor__getitem_index_not_tensor, METH_VARARGS | METH_KEYWORDS, NULL}, + {"__setitem_eager_tensor__", + (PyCFunction)(void (*)(void))tensor_method__setitem_eager_tensor, + METH_VARARGS | METH_KEYWORDS, NULL}, {"_register_grad_hook", (PyCFunction)(void (*)(void))tensor_register_grad_hook, METH_VARARGS | METH_KEYWORDS, NULL}, diff --git a/paddle/fluid/pybind/eager_properties.cc b/paddle/fluid/pybind/eager_properties.cc index ff8980d727e70a41223878f22f019353f8b71972..a610c31ee8946b9e3e9f3bfdf50c7448d4755c8d 100644 --- a/paddle/fluid/pybind/eager_properties.cc +++ b/paddle/fluid/pybind/eager_properties.cc @@ -52,6 +52,12 @@ PyObject* tensor_properties_get_type(TensorObject* self, void* closure) { EAGER_CATCH_AND_THROW_RETURN_NULL } +PyObject* tensor_properties_is_leaf(TensorObject* self, void* closure) { + EAGER_TRY + return ToPyObject(egr::egr_utils_api::IsLeafTensor(self->tensor)); + EAGER_CATCH_AND_THROW_RETURN_NULL +} + int tensor_properties_set_name(TensorObject* self, PyObject* value, void* closure) { EAGER_TRY @@ -179,6 +185,7 @@ struct PyGetSetDef variable_properties[] = { nullptr}, {"dtype", (getter)tensor_properties_get_dtype, nullptr, nullptr, nullptr}, {"type", (getter)tensor_properties_get_type, nullptr, nullptr, nullptr}, + {"is_leaf", (getter)tensor_properties_is_leaf, nullptr, nullptr, nullptr}, {nullptr, nullptr, nullptr, nullptr, nullptr}}; } // namespace pybind diff --git a/paddle/fluid/pybind/imperative.cc b/paddle/fluid/pybind/imperative.cc index 3a2c93309f34454ae0ce2d3419e3fce474f7c06b..7a00f91da2e36425e42e108176251093a9e9d982 100644 --- a/paddle/fluid/pybind/imperative.cc +++ b/paddle/fluid/pybind/imperative.cc @@ -386,46 +386,6 @@ GetVarBaseListFromPyHandle(const py::handle &handle) { return result; } -// cast numpy type form S to T, this may allocate new memory -template -static py::array_t CastNumpyType(py::array_t array) { - if (std::is_same::value) { - return array; - } - auto dim = array.ndim(); - std::vector result_shape(dim); - for (auto i = 0; i < dim; i++) { - result_shape[i] = array.shape(i); - } - - py::array_t result(result_shape); - - return py::vectorize([](S s) { return static_cast(s); })(array); -} - -template -static py::array_t CastNumpyArray(const py::object &array) { - if (py::isinstance>(array)) { - return CastNumpyType(array.cast>()); - } else if (py::isinstance>(array)) { - return CastNumpyType(array.cast>()); - } else if (py::isinstance>(array)) { - return CastNumpyType(array.cast>()); - } else if (py::isinstance>(array)) { - return CastNumpyType(array.cast>()); - } else if (py::isinstance>(array)) { - return CastNumpyType(array.cast>()); - } else { - PADDLE_THROW(platform::errors::InvalidArgument( - "Value type error. The assign numpy value allows integer, float, " - "double and bool, " - "but received %s.", - Py_TYPE(array.ptr())->tp_name)); - } - // can't reach here - return py::array_t(); -} - static imperative::NameVarBaseMap ConvertToNameVarBaseMap( const PyNameVarBaseMap &map) { imperative::NameVarBaseMap result; @@ -854,27 +814,29 @@ void BindImperative(py::module *m_ptr) { py::object value = value_obj; if (self->DataType() == framework::proto::VarType::FP32) { if (!py::isinstance>(value_obj)) { - value = CastNumpyArray(value_obj); + value = pybind11::detail::CastNumpyArray(value_obj); } } else if (self->DataType() == framework::proto::VarType::FP64) { if (!py::isinstance>(value_obj)) { - value = CastNumpyArray(value_obj); + value = pybind11::detail::CastNumpyArray(value_obj); } } else if (self->DataType() == framework::proto::VarType::INT32) { if (!py::isinstance>(value_obj)) { - value = CastNumpyArray(value_obj); + value = + pybind11::detail::CastNumpyArray(value_obj); } } else if (self->DataType() == framework::proto::VarType::INT64) { if (!py::isinstance>(value_obj)) { - value = CastNumpyArray(value_obj); + value = + pybind11::detail::CastNumpyArray(value_obj); } } else if (self->DataType() == framework::proto::VarType::BOOL) { if (!py::isinstance>(value_obj)) { - value = CastNumpyArray(value_obj); + value = pybind11::detail::CastNumpyArray(value_obj); } } else { PADDLE_THROW(platform::errors::InvalidArgument( diff --git a/paddle/fluid/pybind/op_function_generator.h b/paddle/fluid/pybind/op_function_generator.h index 0a389153b0ee4bc5de3309a4184afb02871cbccd..65b5beb865d1c267d50e3251d5a0e711942f737a 100644 --- a/paddle/fluid/pybind/op_function_generator.h +++ b/paddle/fluid/pybind/op_function_generator.h @@ -38,7 +38,15 @@ std::map> op_ins_map = { {"assign", {"X"}}, {"reshape2", {"X", "Shape"}}, {"expand", {"X", "ExpandTimes"}}, - {"slice", {"Input", "StartsTensor", "EndsTensor"}}, + {"slice", + {"Input", "StartsTensor", "EndsTensor", "StartsTensorList", + "EndsTensorList"}}, + {"strided_slice", + {"Input", "StartsTensor", "EndsTensor", "StridesTensor", + "StartsTensorList", "EndsTensorList", "StridesTensorList"}}, + {"set_value", + {"Input", "ValueTensor", "StartsTensorList", "EndsTensorList", + "StepsTensorList"}}, {"fake_quantize_dequantize_moving_average_abs_max", {"X", "InScale", "InAccum", "InState"}}, {"nll_loss", {"X", "Label", "Weight"}}, diff --git a/paddle/fluid/pybind/tensor_py.h b/paddle/fluid/pybind/tensor_py.h index 6849fcb039410f95d829b9bb793a856f1485bd6c..bf459bd46842168a31608e40fadf38721976ca6d 100644 --- a/paddle/fluid/pybind/tensor_py.h +++ b/paddle/fluid/pybind/tensor_py.h @@ -52,6 +52,46 @@ constexpr int NPY_UINT16_ = 4; constexpr int NPY_COMPLEX64 = 14; constexpr int NPY_COMPLEX128 = 15; +// cast numpy type form S to T, this may allocate new memory +template +static py::array_t CastNumpyType(py::array_t array) { + if (std::is_same::value) { + return array; + } + auto dim = array.ndim(); + std::vector result_shape(dim); + for (auto i = 0; i < dim; i++) { + result_shape[i] = array.shape(i); + } + + py::array_t result(result_shape); + + return py::vectorize([](S s) { return static_cast(s); })(array); +} + +template +static py::array_t CastNumpyArray(const py::object &array) { + if (py::isinstance>(array)) { + return CastNumpyType(array.cast>()); + } else if (py::isinstance>(array)) { + return CastNumpyType(array.cast>()); + } else if (py::isinstance>(array)) { + return CastNumpyType(array.cast>()); + } else if (py::isinstance>(array)) { + return CastNumpyType(array.cast>()); + } else if (py::isinstance>(array)) { + return CastNumpyType(array.cast>()); + } else { + PADDLE_THROW(paddle::platform::errors::InvalidArgument( + "Value type error. The assign numpy value allows integer, float, " + "double and bool, " + "but received %s.", + Py_TYPE(array.ptr())->tp_name)); + } + // can't reach here + return py::array_t(); +} + // Note: Since float16 is not a builtin type in C++, we register // paddle::platform::float16 as numpy.float16. // Ref: https://github.com/pybind/pybind11/issues/1776 diff --git a/python/paddle/fluid/dygraph/varbase_patch_methods.py b/python/paddle/fluid/dygraph/varbase_patch_methods.py index 24284ca78c1ce98116945d7578e0f0cdf557e89b..2ca923f8634878c7a110dd7fc711459295a42427 100644 --- a/python/paddle/fluid/dygraph/varbase_patch_methods.py +++ b/python/paddle/fluid/dygraph/varbase_patch_methods.py @@ -768,8 +768,11 @@ def monkey_patch_varbase(): return _setitem_impl_(self, item, value) else: - # Call c++ func __setitem_varbase__ to speedup. - return self.__setitem_varbase__(item, value) + if core._in_eager_mode(): + return self.__setitem_eager_tensor__(item, value) + else: + # Call c++ func __setitem_varbase__ to speedup. + return self.__setitem_varbase__(item, value) @framework.dygraph_only def _grad_ivar(self): diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index c72d51af8ae43d3593b688fb71ab09fde14ec6c2..63a2aeabc2384618acbefdd45b887d7c659b76fb 100755 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -11189,8 +11189,8 @@ def slice(input, axes, starts, ends): ends_tensor.stop_gradient = True infer_flags = list(-1 for i in range(len(axes))) - return _C_ops.slice(input, starts_tensor, ends_tensor, 'axes', axes, - 'infer_flags', infer_flags, *attrs) + return _C_ops.slice(input, starts_tensor, ends_tensor, None, None, + 'axes', axes, 'infer_flags', infer_flags, *attrs) if not isinstance(starts, (list, tuple, Variable)): raise ValueError( diff --git a/python/paddle/fluid/layers/tensor.py b/python/paddle/fluid/layers/tensor.py index 683bf2bc81572364e08f9d123e540477f192a513..c5accd9ada8f7db14a1b32fdacb85c3ef9525482 100644 --- a/python/paddle/fluid/layers/tensor.py +++ b/python/paddle/fluid/layers/tensor.py @@ -632,7 +632,7 @@ def assign(input, output=None): dtype = VarDesc.VarType.FP32 if dtype == VarDesc.VarType.BOOL: value_name = "bool_values" - values = [bool(v) for v in input.flat] + values = [int(v) for v in input.flat] elif dtype == VarDesc.VarType.FP32: value_name = "fp32_values" values = [float(v) for v in input.flat] diff --git a/python/paddle/fluid/tests/unittests/npu/test_assign_value_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_assign_value_op_npu.py index d51976e1a1962d830a9f08021fcd7c3cafc174bf..71d4b45e61b186f4afcc798458af28e17722b9c1 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_assign_value_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_assign_value_op_npu.py @@ -71,7 +71,7 @@ class TestAssignValueNPUOp4(TestAssignValueNPUOp): def init_data(self): self.value = numpy.random.choice( a=[False, True], size=(2, 5)).astype(numpy.bool) - self.attrs["bool_values"] = [bool(v) for v in self.value.flat] + self.attrs["bool_values"] = [int(v) for v in self.value.flat] class TestAssignApi(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_assign_value_op.py b/python/paddle/fluid/tests/unittests/test_assign_value_op.py index adf238c43d21a8a6ca80bf27f57fb6cf96bb0467..2abdbdc5940f72640979716ec5ba09812248a452 100644 --- a/python/paddle/fluid/tests/unittests/test_assign_value_op.py +++ b/python/paddle/fluid/tests/unittests/test_assign_value_op.py @@ -58,7 +58,7 @@ class TestAssignValueOp4(TestAssignValueOp): def init_data(self): self.value = numpy.random.choice( a=[False, True], size=(2, 5)).astype(numpy.bool) - self.attrs["bool_values"] = [bool(v) for v in self.value.flat] + self.attrs["bool_values"] = [int(v) for v in self.value.flat] class TestAssignApi(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_set_value_op.py b/python/paddle/fluid/tests/unittests/test_set_value_op.py index 42225468bc41c80bb9f095f3742a24fadfa045f4..f7b145d358ec9d725447d07b133c5b06bb34da56 100644 --- a/python/paddle/fluid/tests/unittests/test_set_value_op.py +++ b/python/paddle/fluid/tests/unittests/test_set_value_op.py @@ -22,6 +22,7 @@ import numpy as np import paddle from paddle.fluid.layer_helper import LayerHelper from functools import reduce +from paddle.fluid.framework import _test_eager_guard, _in_eager_mode class TestSetValueBase(unittest.TestCase): @@ -69,7 +70,7 @@ class TestSetValueApi(TestSetValueBase): paddle.enable_static() return out - def test_api(self): + def func_test_api(self): static_out = self._run_static() dynamic_out = self._run_dynamic() self._get_answer() @@ -82,6 +83,11 @@ class TestSetValueApi(TestSetValueBase): (self.data == dynamic_out).all(), msg=error_msg.format("dynamic", self.data, dynamic_out)) + def test_api(self): + with _test_eager_guard(): + self.func_test_api() + self.func_test_api() + # 1. Test different type of item: int, Python slice, Paddle Tensor # 1.1 item is int @@ -995,9 +1001,9 @@ class TestBackward(unittest.TestCase): fetch_list=[var.name + "@GRAD", z.name + "@GRAD"]) self.assertTrue((var_grad == z_grad[0, :]).all()) - - def test_dynamic(self): paddle.disable_static() + + def func_test_dynamic(self): model = Model() x = paddle.ones([1, 12, 3, 3]).astype("float32") y = paddle.ones([1, 12, 3, 3]).astype("float32") @@ -1006,11 +1012,18 @@ class TestBackward(unittest.TestCase): self.assertTrue(var.grad.shape == x.grad[0, :, 0, 0].shape) # - self.assertTrue((0 == x.grad[0, :, 0, 0]).all()) + # TODO(pangyoki) add inplace and delete if + if not _in_eager_mode(): + self.assertTrue((0 == x.grad[0, :, 0, 0]).all()) + + def test_dynamic(self): + with _test_eager_guard(): + self.func_test_dynamic() + self.func_test_dynamic() class TestGradientTruncated(unittest.TestCase): - def test_consistent_with_competitor(self): + def func_test_consistent_with_competitor(self): paddle.disable_static() def set_value(t, value): @@ -1182,6 +1195,11 @@ class TestGradientTruncated(unittest.TestCase): self.assertTrue(~x.stop_gradient) self.assertTrue(~x.is_leaf) + def test_consistent_with_competitor(self): + with _test_eager_guard(): + self.func_test_consistent_with_competitor() + self.func_test_consistent_with_competitor() + def test_static_graph(self): paddle.enable_static() @@ -1328,6 +1346,7 @@ class TestGradientTruncated(unittest.TestCase): self.assertTrue((numel(out1[0][0:5:3].shape) == out3[0]).all()) array = array[0] + paddle.disable_static() class TestSetValueInplace(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_var_base.py b/python/paddle/fluid/tests/unittests/test_var_base.py index 57a7f94bedce9fb3cd9981e6ae21f6d902fd04d9..4b3e935426f9f1d79b91b7301b5cd6725960fb54 100644 --- a/python/paddle/fluid/tests/unittests/test_var_base.py +++ b/python/paddle/fluid/tests/unittests/test_var_base.py @@ -22,6 +22,7 @@ import copy import paddle import paddle.fluid as fluid import paddle.fluid.core as core +from paddle.fluid.framework import _test_eager_guard, _in_eager_mode class TestVarBase(unittest.TestCase): @@ -874,7 +875,7 @@ class TestVarBase(unittest.TestCase): col = np.array([2, 1, 3]) self.assertTrue(np.array_equal(array[row, col], x[row, col].numpy())) - def test_slice(self): + def func_test_slice(self): with fluid.dygraph.guard(): self._test_slice() self._test_slice_for_tensor_attr() @@ -899,6 +900,11 @@ class TestVarBase(unittest.TestCase): mask = np.array([1, 0, 1, 0], dtype=bool) var[paddle.to_tensor([0, 1]), mask] + def test_slice(self): + with _test_eager_guard(): + self.func_test_slice() + self.func_test_slice() + def test_var_base_to_np(self): with fluid.dygraph.guard(): var = fluid.dygraph.to_variable(self.array) @@ -1125,7 +1131,6 @@ class TestVarBase(unittest.TestCase): class TestVarBaseSetitem(unittest.TestCase): def setUp(self): - paddle.disable_static() self.set_dtype() self.tensor_x = paddle.to_tensor(np.ones((4, 2, 3)).astype(self.dtype)) self.np_value = np.random.random((2, 3)).astype(self.dtype) @@ -1135,12 +1140,13 @@ class TestVarBaseSetitem(unittest.TestCase): self.dtype = "int32" def _test(self, value): - paddle.disable_static() - self.assertEqual(self.tensor_x.inplace_version, 0) + if not _in_eager_mode(): + self.assertEqual(self.tensor_x.inplace_version, 0) id_origin = id(self.tensor_x) self.tensor_x[0] = value - self.assertEqual(self.tensor_x.inplace_version, 1) + if not _in_eager_mode(): + self.assertEqual(self.tensor_x.inplace_version, 1) if isinstance(value, (six.integer_types, float)): result = np.zeros((2, 3)).astype(self.dtype) + value @@ -1152,27 +1158,47 @@ class TestVarBaseSetitem(unittest.TestCase): self.assertEqual(id_origin, id(self.tensor_x)) self.tensor_x[1:2] = value - self.assertEqual(self.tensor_x.inplace_version, 2) + if not _in_eager_mode(): + self.assertEqual(self.tensor_x.inplace_version, 2) self.assertTrue(np.array_equal(self.tensor_x[1].numpy(), result)) self.assertEqual(id_origin, id(self.tensor_x)) self.tensor_x[...] = value - self.assertEqual(self.tensor_x.inplace_version, 3) + if not _in_eager_mode(): + self.assertEqual(self.tensor_x.inplace_version, 3) self.assertTrue(np.array_equal(self.tensor_x[3].numpy(), result)) self.assertEqual(id_origin, id(self.tensor_x)) - def test_value_tensor(self): - paddle.disable_static() + def func_test_value_tensor(self): self._test(self.tensor_value) - def test_value_numpy(self): - paddle.disable_static() + def test_value_tensor(self): + with _test_eager_guard(): + self.setUp() + self.func_test_value_tensor() + self.setUp() + self.func_test_value_tensor() + + def func_test_value_numpy(self): self._test(self.np_value) - def test_value_int(self): - paddle.disable_static() + def test_value_numpy(self): + with _test_eager_guard(): + self.setUp() + self.func_test_value_numpy() + self.setUp() + self.func_test_value_numpy() + + def func_test_value_int(self): self._test(10) + def test_value_int(self): + with _test_eager_guard(): + self.setUp() + self.func_test_value_int() + self.setUp() + self.func_test_value_int() + class TestVarBaseSetitemInt64(TestVarBaseSetitem): def set_dtype(self): diff --git a/python/paddle/fluid/variable_index.py b/python/paddle/fluid/variable_index.py index f3763cb447f39688409b141a8137d89e3a7f236a..1c7e4fb5f1ad04b4216f6c8ded1ebaa9a46e3134 100644 --- a/python/paddle/fluid/variable_index.py +++ b/python/paddle/fluid/variable_index.py @@ -382,7 +382,7 @@ def _getitem_impl_(var, item): idx = assign(np.array(slice_item).astype("int32")) return index_select(var, index=idx, axis=0) - elif isinstance(slice_item, (Variable)): + elif isinstance(slice_item, (Variable, core.eager.Tensor)): if len(item) == 1: from ..tensor import index_select, gather_nd @@ -636,7 +636,7 @@ def _setitem_impl_(var, item, value): shape = list(value.shape) if dtype == core.VarDesc.VarType.BOOL: value_name = "bool_values" - values = [bool(v) for v in value.flat] + values = [int(v) for v in value.flat] elif dtype == core.VarDesc.VarType.FP32: value_name = "fp32_values" values = [float(v) for v in value.flat] @@ -657,7 +657,7 @@ def _setitem_impl_(var, item, value): attrs[value_name] = values attrs["shape"] = shape - elif isinstance(value, Variable): + elif isinstance(value, (Variable, core.eager.Tensor)): inputs["ValueTensor"] = value else: raise TypeError( @@ -665,7 +665,9 @@ def _setitem_impl_(var, item, value): "paddle.Tensor to a paddle.Tensor, but received {}".format( type(value))) - if paddle.fluid.framework.in_dygraph_mode(): + if paddle.fluid.framework.in_dygraph_mode( + ) and not paddle.fluid.framework._in_eager_mode(): + # TODO(pangyoki) add inplace(BumpInplaceVersion) if need var._bump_inplace_version() cur_block = default_main_program().current_block() diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index ec68acc5b9f14ac018be1fc98b7b9b72188cde5f..a79fe5172f4be358f6c735acc4614da0549036bb 100755 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -3789,13 +3789,13 @@ def diff(x, n=1, axis=-1, prepend=None, append=None, name=None): attrs_1 += ('starts', starts_1) ends_1 = [dim_len - 1] attrs_1 += ('ends', ends_1) - input_front = _C_ops.slice(new_input, None, None, 'axes', axes, \ + input_front = _C_ops.slice(new_input, None, None, None, None, 'axes', axes, \ 'infer_flags', infer_flags, *attrs_1) starts_2 = [1] attrs_2 += ('starts', starts_2) ends_2 = [dim_len] attrs_2 += ('ends', ends_2) - input_back = _C_ops.slice(new_input, None, None, 'axes', axes, \ + input_back = _C_ops.slice(new_input, None, None, None, None, 'axes', axes, \ 'infer_flags', infer_flags, *attrs_2) if x.dtype == paddle.bool: diff --git a/python/paddle/tensor/to_string.py b/python/paddle/tensor/to_string.py index f164bbc466f18da9b7145533c32369a85d6124df..91e5cfe97c6cdb503fba343a8a8d16a956aaffaf 100644 --- a/python/paddle/tensor/to_string.py +++ b/python/paddle/tensor/to_string.py @@ -317,7 +317,7 @@ def tensor_to_string(tensor, prefix='Tensor'): _template = "{prefix}(shape={shape}, dtype={dtype}, place={place}, stop_gradient={stop_gradient},\n{indent}{data})" - if not tensor._is_initialized(): + if not tensor._is_dense_tensor_hold_allocation(): return "Tensor(Not initialized)" if tensor.is_sparse():