diff --git a/paddle/fluid/pybind/CMakeLists.txt b/paddle/fluid/pybind/CMakeLists.txt index 588caed5a452ef456eff682be123d02296ba5736..41708ef8611e42571ec4a9a932042185b9692425 100644 --- a/paddle/fluid/pybind/CMakeLists.txt +++ b/paddle/fluid/pybind/CMakeLists.txt @@ -212,6 +212,12 @@ if(WITH_PYTHON) add_custom_target(op_function_generator_cmd ALL DEPENDS ${impl_file}) list(APPEND PYBIND_DEPS interpretercore standalone_executor) + + cc_library(paddle_eager + SRCS eager.cc eager_functions.cc eager_method.cc eager_properties.cc eager_utils.cc + DEPS autograd_meta grad_node_info pten global_utils utils eager_api accumulation_node backward python) + list(APPEND PYBIND_DEPS paddle_eager) + cc_library(paddle_pybind SHARED SRCS ${PYBIND_SRCS} DEPS ${PYBIND_DEPS} ${GLOB_OP_LIB} ${GLOB_OPERATOR_DEPS}) diff --git a/paddle/fluid/pybind/eager.cc b/paddle/fluid/pybind/eager.cc new file mode 100644 index 0000000000000000000000000000000000000000..5be000844bcf17acca270ca8a91739cc9e8c602b --- /dev/null +++ b/paddle/fluid/pybind/eager.cc @@ -0,0 +1,132 @@ +/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at +http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ +// disable numpy compile error +#include + +#include +#include + +#include "paddle/fluid/eager/api/all.h" +#include "paddle/fluid/eager/autograd_meta.h" +#include "paddle/fluid/eager/utils.h" +#include "paddle/fluid/memory/allocation/allocator.h" +#include "paddle/fluid/memory/memcpy.h" +#include "paddle/fluid/platform/enforce.h" +#include "paddle/fluid/pybind/eager.h" +#include "paddle/fluid/pybind/eager_utils.h" +#include "paddle/pten/common/data_type.h" +#include "paddle/pten/core/convert_utils.h" +#include "paddle/pten/core/dense_tensor.h" +#include "paddle/pten/include/core.h" +#pragma GCC diagnostic ignored "-Wmissing-field-initializers" + +namespace paddle { +namespace pybind { + +namespace py = ::pybind11; + +PyTypeObject* p_eager_tensor_type; + +PyObject* eagertensor_new(PyTypeObject* type, PyObject* args, + PyObject* kwargs) { + PyObject* obj = type->tp_alloc(type, 0); + if (obj) { + auto v = reinterpret_cast(obj); + new (&(v->eagertensor)) egr::EagerTensor(); + } + return obj; +} + +static void eagertensor_dealloc(EagerTensorObject* self) { + self->eagertensor.~EagerTensor(); + Py_TYPE(self)->tp_free(reinterpret_cast(self)); +} + +extern struct PyGetSetDef variable_properties[]; + +extern PyMethodDef variable_methods[]; + +PyTypeObject eager_tensor_type = { + PyVarObject_HEAD_INIT(NULL, 0) "core_avx.eager.EagerTensor", /* tp_name */ + sizeof(EagerTensorObject), /* tp_basicsize */ + 0, /* tp_itemsize */ + (destructor)eagertensor_dealloc, /* tp_dealloc */ + 0, /* tp_vectorcall_offset */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ + 0, /* tp_reserved */ + 0, /* tp_repr */ + 0, /* tp_as_number */ + 0, /* tp_as_sequence */ + 0, /* tp_as_mapping */ + 0, /* tp_hash */ + 0, /* tp_call */ + 0, /* tp_str */ + 0, /* tp_getattro */ + 0, /* tp_setattro */ + 0, /* tp_as_buffer */ + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | + Py_TPFLAGS_HEAPTYPE, /* tp_flags */ + 0, /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + variable_methods, /* tp_methods */ + 0, /* tp_members */ + variable_properties, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + 0, /* tp_alloc */ + eagertensor_new, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ + 0, /* tp_version_tag */ + 0 /* tp_finalize */ +}; + +void BindEager(pybind11::module* module) { + auto m = module->def_submodule("eager"); + + p_eager_tensor_type = &eager_tensor_type; + if (PyType_Ready(&eager_tensor_type) < 0) { + PADDLE_THROW(platform::errors::Fatal( + "Init Paddle erroe in BindEager(PyType_Ready).")); + return; + } + + Py_INCREF(&eager_tensor_type); + if (PyModule_AddObject(m.ptr(), "EagerTensor", + reinterpret_cast(&eager_tensor_type)) < 0) { + Py_DECREF(&eager_tensor_type); + Py_DECREF(m.ptr()); + PADDLE_THROW(platform::errors::Fatal( + "Init Paddle erroe in BindEager(PyModule_AddObject).")); + return; + } + + BindFunctions(m.ptr()); +} + +} // namespace pybind +} // namespace paddle diff --git a/paddle/fluid/pybind/eager.h b/paddle/fluid/pybind/eager.h new file mode 100644 index 0000000000000000000000000000000000000000..c1a869d9b89faa1ef481efe99e63d6ce57033e54 --- /dev/null +++ b/paddle/fluid/pybind/eager.h @@ -0,0 +1,24 @@ +/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ +#pragma once + +#include +#include "pybind11/pybind11.h" +#include "pybind11/stl.h" + +namespace paddle { +namespace pybind { + +void BindEager(pybind11::module* m); +void BindFunctions(PyObject* module); + +} // namespace pybind +} // namespace paddle diff --git a/paddle/fluid/pybind/eager_functions.cc b/paddle/fluid/pybind/eager_functions.cc new file mode 100644 index 0000000000000000000000000000000000000000..8c0f9ddf19f120c8cb8b9a674ddbdea36ecb201f --- /dev/null +++ b/paddle/fluid/pybind/eager_functions.cc @@ -0,0 +1,223 @@ +/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at +http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ +// disable numpy compile error +#include + +#include +#include + +#include "pybind11/numpy.h" +#include "pybind11/pybind11.h" + +#include "paddle/fluid/eager/accumulation/accumulation_node.h" +#include "paddle/fluid/eager/api/all.h" +#include "paddle/fluid/eager/autograd_meta.h" +#include "paddle/fluid/eager/backward.h" +#include "paddle/fluid/eager/utils.h" +#include "paddle/fluid/memory/allocation/allocator.h" +#include "paddle/fluid/memory/memcpy.h" +#include "paddle/fluid/platform/enforce.h" +#include "paddle/fluid/pybind/eager.h" +#include "paddle/fluid/pybind/eager_utils.h" +#include "paddle/fluid/pybind/exception.h" +#include "paddle/pten/api/lib/utils/allocator.h" +#include "paddle/pten/api/lib/utils/storage.h" +#include "paddle/pten/api/lib/utils/tensor_utils.h" +#include "paddle/pten/common/data_type.h" +#include "paddle/pten/core/convert_utils.h" +#include "paddle/pten/core/dense_tensor.h" +#include "paddle/pten/include/core.h" + +namespace paddle { +namespace pybind { + +namespace py = ::pybind11; + +extern PyTypeObject* p_eager_tensor_type; + +size_t PyArray_Size_(PyObject* numpy_data) { + size_t res = 1; + auto dims = pybind11::detail::array_proxy(numpy_data)->dimensions; + auto nd = pybind11::detail::array_proxy(numpy_data)->nd; + while (nd--) { + res *= (*dims++); + } + return res; +} + +class EagerNumpyAllocation : public paddle::memory::allocation::Allocation { + public: + explicit EagerNumpyAllocation(PyObject* numpy_data, pten::DataType dtype) + : Allocation( + static_cast(pybind11::detail::array_proxy(numpy_data)->data), + pten::DataTypeSize(dtype) * PyArray_Size_(numpy_data), + paddle::platform::CPUPlace()), + arr_(numpy_data) { + PADDLE_ENFORCE_NOT_NULL(arr_, platform::errors::InvalidArgument( + "The underlying PyObject pointer of " + "numpy array cannot be nullptr")); + PADDLE_ENFORCE_NE( + arr_, Py_None, + platform::errors::PreconditionNotMet( + "The underlying PyObject pointer of numpy array cannot be None")); + Py_INCREF(arr_); + } + ~EagerNumpyAllocation() override { + py::gil_scoped_acquire gil; + Py_DECREF(arr_); + } + + private: + PyObject* arr_; +}; + +static PyObject* eager_api_set_expected_place(PyObject* self, PyObject* args, + PyObject* kwargs) { + EAGER_TRY + auto place = CastPyArg2Place(PyTuple_GET_ITEM(args, 0), 0); + egr::Controller::Instance().SetExpectedPlace(place); + + Py_INCREF(Py_None); + return Py_None; + EAGER_CATCH_AND_THROW_RETURN_NULL +} + +static PyObject* eager_api_scale(PyObject* self, PyObject* args, + PyObject* kwargs) { + EAGER_TRY + // TODO(jiabin): Sync Tensor and Variable here when we support + egr::EagerTensor ret = + egr::scale(reinterpret_cast(PyTuple_GET_ITEM(args, 0)) + ->eagertensor, + CastPyArg2AttrFloat(PyTuple_GET_ITEM(args, 1), 1), + CastPyArg2AttrFloat(PyTuple_GET_ITEM(args, 2), 2), + CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 3), 3), + CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 4), 4)); + return ToPyObject(ret); + EAGER_CATCH_AND_THROW_RETURN_NULL +} + +static PyObject* eager_api_numpy_to_tensor(PyObject* numpy_data, + pten::DataType dtype, + const paddle::platform::Place& place, + bool stop_gradient) { + std::vector vec_dims; + auto numpy_shape = pybind11::detail::array_proxy(numpy_data)->dimensions; + int rank = pybind11::detail::array_proxy(numpy_data)->nd; + for (int i = 0; i < rank; i++) { + vec_dims.push_back(static_cast(numpy_shape[i])); + } + paddle::framework::DDim dims = paddle::framework::make_ddim(vec_dims); + + // TODO(jiabin): Support GPU later + auto meta = pten::DenseTensorMeta(dtype, dims); + auto holder = std::make_shared(numpy_data, dtype); + auto shared_storage = + pten::make_intrusive(holder, 0); + std::shared_ptr densetensor( + new pten::DenseTensor(std::move(shared_storage), std::move(meta))); + + PyObject* obj = p_eager_tensor_type->tp_alloc(p_eager_tensor_type, 0); + if (obj) { + auto v = reinterpret_cast(obj); + new (&(v->eagertensor)) egr::EagerTensor(); + v->eagertensor.set_impl(densetensor); + v->eagertensor.set_name(egr::Controller::Instance().GenerateUniqueName()); + auto meta = egr::EagerUtils::autograd_meta(&(v->eagertensor)); + meta->SetStopGradient(stop_gradient); + + // Created tensor will be leaf tensor + // So we append AccumulationNode to it. + auto accumulation_node = std::make_shared(); + meta->SetGradNode(accumulation_node); + + // TODO(jiabin): Shall we increase ref cnt here to make python ref cnt num + // correctly? + } else { + PADDLE_THROW(platform::errors::Fatal( + "tp_alloc return null, can not new a PyObject.")); + } + + return obj; +} + +static PyObject* eager_api_to_tensor(PyObject* self, PyObject* args, + PyObject* kwargs) { + EAGER_TRY + // TODO(jiabin): Support Kwargs here + PyObject* data = PyTuple_GET_ITEM(args, 0); + auto str_dtype = CastPyArg2AttrString(PyTuple_GET_ITEM(args, 1), 1); + pten::DataType dtype = pten::String2DataType(str_dtype); + auto place = CastPyArg2Place(PyTuple_GET_ITEM(args, 2), 2); + bool stop_gradient = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 3), 3); + // TODO(jiabin): Support this when python given name + // auto str_name = CastPyArg2AttrString(PyTuple_GET_ITEM(args, 4), 4); + + if (pybind11::detail::npy_api::get().PyArray_Check_(data)) { + return eager_api_numpy_to_tensor(data, dtype, place, stop_gradient); + } else { + PADDLE_THROW(platform::errors::InvalidArgument( + "Eater to_tensor only support numpy to tensor.")); + Py_INCREF(Py_None); + return Py_None; + } + EAGER_CATCH_AND_THROW_RETURN_NULL +} + +static PyObject* eager_api_retain_grad_for_tensor(PyObject* self, + PyObject* args, + PyObject* kwargs) { + EAGER_TRY + egr::egr_utils_api::RetainGradForTensor( + CastPyArg2EagerTensor(PyTuple_GET_ITEM(args, 0), 0)); + Py_INCREF(Py_None); + return Py_None; + EAGER_CATCH_AND_THROW_RETURN_NULL +} + +static PyObject* eager_api_run_backward(PyObject* self, PyObject* args, + PyObject* kwargs) { + EAGER_TRY + auto tensors = CastPyArg2VectorOfEagerTensor(PyTuple_GET_ITEM(args, 0), 0); + auto grad_tensors = + CastPyArg2VectorOfEagerTensor(PyTuple_GET_ITEM(args, 1), 1); + RunBackward(tensors, grad_tensors, + CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 2), 2)); + Py_INCREF(Py_None); + return Py_None; + EAGER_CATCH_AND_THROW_RETURN_NULL +} + +PyMethodDef variable_functions[] = { + {"to_tensor", (PyCFunction)(void (*)(void))eager_api_to_tensor, + METH_VARARGS | METH_KEYWORDS, NULL}, + {"scale", (PyCFunction)(void (*)(void))eager_api_scale, + METH_VARARGS | METH_KEYWORDS, NULL}, + {"_set_expected_place", + (PyCFunction)(void (*)(void))eager_api_set_expected_place, + METH_VARARGS | METH_KEYWORDS, NULL}, + {"retain_grad_for_tensor", + (PyCFunction)(void (*)(void))eager_api_retain_grad_for_tensor, + METH_VARARGS | METH_KEYWORDS, NULL}, + {"run_backward", (PyCFunction)(void (*)(void))eager_api_run_backward, + METH_VARARGS | METH_KEYWORDS, NULL}, + {NULL, NULL, 0, NULL}}; + +void BindFunctions(PyObject* module) { + if (PyModule_AddFunctions(module, variable_functions) < 0) { + PADDLE_THROW(platform::errors::Fatal( + "Init Paddle erroe in BindFunctions(PyModule_AddFunctions).")); + return; + } +} + +} // namespace pybind +} // namespace paddle diff --git a/paddle/fluid/pybind/eager_method.cc b/paddle/fluid/pybind/eager_method.cc new file mode 100644 index 0000000000000000000000000000000000000000..f040566260c74a1eb91ace4c7ec27ca46d627747 --- /dev/null +++ b/paddle/fluid/pybind/eager_method.cc @@ -0,0 +1,109 @@ +/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at +http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ +// disable numpy compile error +#include + +#include +#include + +#include "pybind11/numpy.h" +#include "pybind11/pybind11.h" + +#include "paddle/fluid/eager/api/all.h" +#include "paddle/fluid/eager/autograd_meta.h" +#include "paddle/fluid/memory/allocation/allocator.h" +#include "paddle/fluid/memory/memcpy.h" +#include "paddle/fluid/platform/enforce.h" +#include "paddle/fluid/pybind/eager.h" +#include "paddle/fluid/pybind/eager_utils.h" +#include "paddle/fluid/pybind/exception.h" +#include "paddle/pten/common/data_type.h" +#include "paddle/pten/core/convert_utils.h" +#include "paddle/pten/core/dense_tensor.h" +#include "paddle/pten/include/core.h" +namespace paddle { +namespace pybind { + +extern PyTypeObject* pEagerTensorType; + +static PyObject* eager_tensor_method_numpy(EagerTensorObject* self, + PyObject* args, PyObject* kwargs) { + EAGER_TRY + if (!self->eagertensor.initialized()) { + Py_INCREF(Py_None); + return Py_None; + } + auto tensor_dims = self->eagertensor.shape(); + auto numpy_dtype = pten::TensorDtype2NumpyDtype(self->eagertensor.type()); + auto sizeof_dtype = pten::DataTypeSize(self->eagertensor.type()); + Py_intptr_t py_dims[paddle::framework::DDim::kMaxRank]; + Py_intptr_t py_strides[paddle::framework::DDim::kMaxRank]; + size_t numel = 1; + for (int i = tensor_dims.size() - 1; i >= 0; --i) { + py_dims[i] = static_cast(tensor_dims[i]); + py_strides[i] = sizeof_dtype * numel; + numel *= py_dims[i]; + } + auto& api = pybind11::detail::npy_api::get(); + PyObject* array = api.PyArray_NewFromDescr_( + api.PyArray_Type_, api.PyArray_DescrFromType_(numpy_dtype), + tensor_dims.size(), py_dims, py_strides, nullptr, + pybind11::detail::npy_api::NPY_ARRAY_ALIGNED_ | + pybind11::detail::npy_api::NPY_ARRAY_WRITEABLE_, + nullptr); + + if (self->eagertensor.is_cpu()) { + auto dense_tensor = + std::dynamic_pointer_cast(self->eagertensor.impl()); + platform::CPUPlace place; + // deep copy + paddle::memory::Copy(place, reinterpret_cast( + pybind11::detail::array_proxy(array)->data), + place, dense_tensor->data(), sizeof_dtype * numel); +#if defined(PADDLE_WITH_CUDA) + } else if (self->eagertensor.is_cuda()) { + auto dense_tensor = + std::dynamic_pointer_cast(self->eagertensor.impl()); + + paddle::platform::GpuMemcpySync( + pybind11::detail::array_proxy(array)->data, dense_tensor->data(), + pten::DataTypeSize(dense_tensor->dtype()) * dense_tensor->numel(), + cudaMemcpyDeviceToHost); +#endif + } else { + PADDLE_THROW(platform::errors::InvalidArgument( + "Tensor.numpy() only support cpu tensor.")); + Py_INCREF(Py_None); + return Py_None; + } + + return array; + EAGER_CATCH_AND_THROW_RETURN_NULL +} + +static PyObject* eager_tensor_method_is_initialized(EagerTensorObject* self, + PyObject* args, + PyObject* kwargs) { + EAGER_TRY + return ToPyObject(self->eagertensor.initialized()); + EAGER_CATCH_AND_THROW_RETURN_NULL +} + +PyMethodDef variable_methods[] = { + {"numpy", (PyCFunction)(void (*)(void))eager_tensor_method_numpy, + METH_VARARGS | METH_KEYWORDS, NULL}, + {"_is_initialized", + (PyCFunction)(void (*)(void))eager_tensor_method_is_initialized, + METH_VARARGS | METH_KEYWORDS, NULL}, + {NULL, NULL, 0, NULL}}; + +} // namespace pybind +} // namespace paddle diff --git a/paddle/fluid/pybind/eager_properties.cc b/paddle/fluid/pybind/eager_properties.cc new file mode 100644 index 0000000000000000000000000000000000000000..a13e4836d141a80eca6fe95d27b8f4ec019400e0 --- /dev/null +++ b/paddle/fluid/pybind/eager_properties.cc @@ -0,0 +1,155 @@ +/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at +http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ +// disable numpy compile error +#include + +#include +#include + +#include "paddle/fluid/eager/api/all.h" +#include "paddle/fluid/eager/autograd_meta.h" +#include "paddle/fluid/eager/utils.h" +#include "paddle/fluid/memory/allocation/allocator.h" +#include "paddle/fluid/memory/memcpy.h" +#include "paddle/fluid/platform/enforce.h" +#include "paddle/fluid/pybind/eager.h" +#include "paddle/fluid/pybind/eager_utils.h" +#include "paddle/fluid/pybind/exception.h" +#include "paddle/pten/common/data_type.h" +#include "paddle/pten/core/convert_utils.h" +#include "paddle/pten/core/dense_tensor.h" +#include "paddle/pten/include/core.h" +#pragma GCC diagnostic ignored "-Wwrite-strings" + +namespace paddle { +namespace pybind { + +extern PyTypeObject* p_eager_tensor_type; + +PyObject* eager_tensor_properties_get_name(EagerTensorObject* self, + void* closure) { + EAGER_TRY + return ToPyObject(self->eagertensor.name()); + EAGER_CATCH_AND_THROW_RETURN_NULL +} + +int eager_tensor_properties_set_name(EagerTensorObject* self, PyObject* value, + void* closure) { + EAGER_TRY + self->eagertensor.set_name(CastPyArg2AttrString(value, 0)); + return 0; + EAGER_CATCH_AND_THROW_RETURN_ZERO +} + +PyObject* eager_tensor_properties_get_stop_gradient(EagerTensorObject* self, + void* closure) { + EAGER_TRY + auto meta = egr::EagerUtils::autograd_meta(&self->eagertensor); + return ToPyObject(meta->StopGradient()); + EAGER_CATCH_AND_THROW_RETURN_NULL +} + +PyObject* eager_tensor_properties_get_grad(EagerTensorObject* self, + void* closure) { + EAGER_TRY + auto meta = egr::EagerUtils::unsafe_autograd_meta(self->eagertensor); + return ToPyObject(meta->Grad()); + EAGER_CATCH_AND_THROW_RETURN_NULL +} + +int eager_tensor_properties_set_stop_gradient(EagerTensorObject* self, + PyObject* value, void* closure) { + EAGER_TRY + auto meta = egr::EagerUtils::autograd_meta(&self->eagertensor); + meta->SetStopGradient(CastPyArg2AttrBoolean(value, 0)); + return 0; + EAGER_CATCH_AND_THROW_RETURN_ZERO +} + +PyObject* eager_tensor_properties_get_persistable(EagerTensorObject* self, + void* closure) { + EAGER_TRY + auto meta = egr::EagerUtils::autograd_meta(&self->eagertensor); + return ToPyObject(meta->Persistable()); + EAGER_CATCH_AND_THROW_RETURN_NULL +} + +int eager_tensor_properties_set_persistable(EagerTensorObject* self, + PyObject* value, void* closure) { + EAGER_TRY + auto meta = egr::EagerUtils::autograd_meta(&self->eagertensor); + meta->SetPersistable(CastPyArg2AttrBoolean(value, 0)); + return 0; + EAGER_CATCH_AND_THROW_RETURN_ZERO +} + +PyObject* eager_tensor_properties_get_shape(EagerTensorObject* self, + void* closure) { + EAGER_TRY + auto ddim = self->eagertensor.shape(); + std::vector value; + size_t rank = static_cast(ddim.size()); + value.resize(rank); + for (size_t i = 0; i < rank; i++) { + value[i] = ddim[i]; + } + + return ToPyObject(value); + EAGER_CATCH_AND_THROW_RETURN_NULL +} + +PyObject* eager_tensor_properties_get_place(EagerTensorObject* self, + void* closure) { + EAGER_TRY + return ToPyObject(self->eagertensor.place()); + EAGER_CATCH_AND_THROW_RETURN_NULL +} + +PyObject* eager_tensor_properties_get_place_str(EagerTensorObject* self, + void* closure) { + EAGER_TRY + std::stringstream ostr; + ostr << self->eagertensor.place(); + return ToPyObject(ostr.str()); + EAGER_CATCH_AND_THROW_RETURN_NULL +} + +PyObject* eager_tensor_properties_get_dtype(EagerTensorObject* self, + void* closure) { + EAGER_TRY + return ToPyObject(pten::DataType2String(self->eagertensor.type())); + EAGER_CATCH_AND_THROW_RETURN_NULL +} + +struct PyGetSetDef variable_properties[] = { + {"grad", (getter)eager_tensor_properties_get_grad, nullptr, nullptr, + nullptr}, + {"name", (getter)eager_tensor_properties_get_name, + (setter)eager_tensor_properties_set_name, nullptr, nullptr}, + {"stop_gradient", (getter)eager_tensor_properties_get_stop_gradient, + (setter)eager_tensor_properties_set_stop_gradient, nullptr, nullptr}, + {"persistable", (getter)eager_tensor_properties_get_persistable, + (setter)eager_tensor_properties_set_persistable, nullptr, nullptr}, + {"shape", (getter)eager_tensor_properties_get_shape, nullptr, nullptr, + nullptr}, + // {"is_leaf", (getter)eager_tensor_properties_get_is_leaf, nullptr, + // nullptr, + // nullptr}, + {"place", (getter)eager_tensor_properties_get_place, nullptr, nullptr, + nullptr}, + {"_place_str", (getter)eager_tensor_properties_get_place_str, nullptr, + nullptr, nullptr}, + {"dtype", (getter)eager_tensor_properties_get_dtype, nullptr, nullptr, + nullptr}, + {nullptr, nullptr, nullptr, nullptr, nullptr}}; + +} // namespace pybind +} // namespace paddle diff --git a/paddle/fluid/pybind/eager_utils.cc b/paddle/fluid/pybind/eager_utils.cc new file mode 100644 index 0000000000000000000000000000000000000000..9268fc8e7b976c8fbfd2c47a64c3bdac3c382835 --- /dev/null +++ b/paddle/fluid/pybind/eager_utils.cc @@ -0,0 +1,339 @@ +/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at +http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include + +#include +#include + +#include "paddle/fluid/eager/api/all.h" +#include "paddle/fluid/eager/autograd_meta.h" +#include "paddle/fluid/memory/allocation/allocator.h" +#include "paddle/fluid/platform/enforce.h" +#include "paddle/fluid/pybind/eager.h" +#include "paddle/fluid/pybind/eager_utils.h" +#include "paddle/pten/common/data_type.h" +#include "paddle/pten/core/convert_utils.h" +#include "paddle/pten/core/dense_tensor.h" +#include "paddle/pten/include/core.h" + +namespace paddle { +namespace pybind { + +extern PyTypeObject* p_eager_tensor_type; + +extern PyTypeObject* g_place_pytype; +extern PyTypeObject* g_cudaplace_pytype; +extern PyTypeObject* g_cpuplace_pytype; +extern PyTypeObject* g_xpuplace_pytype; +extern PyTypeObject* g_npuplace_pytype; +extern PyTypeObject* g_cudapinnedplace_pytype; + +bool PyObject_CheckLongOrConvertToLong(PyObject** obj) { + if ((PyLong_Check(*obj) && !PyBool_Check(*obj))) { + return true; + } + + if (std::string((reinterpret_cast((*obj)->ob_type))->tp_name) + .find("numpy") != std::string::npos) { + auto to = PyNumber_Long(*obj); + if (to) { + *obj = to; + return true; + } + } + + return false; +} + +bool PyObject_CheckFloatOrConvertToFloat(PyObject** obj) { + // sometimes users provide PyLong or numpy.int64 but attr is float + if (PyFloat_Check(*obj) || PyLong_Check(*obj)) { + return true; + } + if (std::string((reinterpret_cast((*obj)->ob_type))->tp_name) + .find("numpy") != std::string::npos) { + auto to = PyNumber_Float(*obj); + if (to) { + *obj = to; + return true; + } + } + return false; +} + +bool PyObject_CheckStr(PyObject* obj) { return PyUnicode_Check(obj); } + +bool CastPyArg2AttrBoolean(PyObject* obj, ssize_t arg_pos) { + if (obj == Py_None) { + return false; // To be compatible with QA integration testing. Some + // test case pass in None. + } else if (obj == Py_True) { + return true; + } else if (obj == Py_False) { + return false; + } else { + PADDLE_THROW(platform::errors::InvalidArgument( + "argument (position %d) must be " + "bool, but got %s", + arg_pos + 1, (reinterpret_cast(obj->ob_type))->tp_name)); + } +} + +int CastPyArg2AttrInt(PyObject* obj, ssize_t arg_pos) { + if (PyObject_CheckLongOrConvertToLong(&obj)) { + return static_cast(PyLong_AsLong(obj)); + } else { + PADDLE_THROW(platform::errors::InvalidArgument( + "argument (position %d) must be " + "int, but got %s", + arg_pos + 1, (reinterpret_cast(obj->ob_type))->tp_name)); + } +} + +int64_t CastPyArg2AttrLong(PyObject* obj, ssize_t arg_pos) { + if (PyObject_CheckLongOrConvertToLong(&obj)) { + return (int64_t)PyLong_AsLong(obj); // NOLINT + } else { + PADDLE_THROW(platform::errors::InvalidArgument( + "argument (position %d) must be " + "long, but got %s", + arg_pos + 1, (reinterpret_cast(obj->ob_type))->tp_name)); + } +} + +float CastPyArg2AttrFloat(PyObject* obj, ssize_t arg_pos) { + if (PyObject_CheckFloatOrConvertToFloat(&obj)) { + return static_cast(PyFloat_AsDouble(obj)); + } else { + PADDLE_THROW(platform::errors::InvalidArgument( + "argument (position %d) must be " + "float, but got %s", + arg_pos + 1, (reinterpret_cast(obj->ob_type))->tp_name)); + } +} + +std::string CastPyArg2AttrString(PyObject* obj, ssize_t arg_pos) { + if (PyObject_CheckStr(obj)) { + Py_ssize_t size; + const char* data; + data = PyUnicode_AsUTF8AndSize(obj, &size); + return std::string(data, static_cast(size)); + } else { + PADDLE_THROW(platform::errors::InvalidArgument( + "argument (position %d) must be " + "str, but got %s", + arg_pos + 1, (reinterpret_cast(obj->ob_type))->tp_name)); + return ""; + } +} + +egr::EagerTensor CastPyArg2EagerTensor(PyObject* obj, ssize_t arg_pos) { + if (PyObject_IsInstance(obj, + reinterpret_cast(p_eager_tensor_type))) { + return reinterpret_cast(obj)->eagertensor; + } else { + PADDLE_THROW(platform::errors::InvalidArgument( + "argument (position %d) must be " + "EagerTensor, but got %s", + arg_pos + 1, reinterpret_cast(obj->ob_type)->tp_name)); + } +} + +std::vector CastPyArg2VectorOfEagerTensor(PyObject* obj, + ssize_t arg_pos) { + std::vector result; + if (PyList_Check(obj)) { + Py_ssize_t len = PyList_Size(obj); + PyObject* item = nullptr; + for (Py_ssize_t i = 0; i < len; i++) { + item = PyList_GetItem(obj, i); + if (PyObject_IsInstance( + item, reinterpret_cast(p_eager_tensor_type))) { + result.emplace_back( + reinterpret_cast(item)->eagertensor); + } else { + PADDLE_THROW(platform::errors::InvalidArgument( + "argument (position %d) must be " + "list of bool, but got %s at pos %d", + arg_pos + 1, + reinterpret_cast(item->ob_type)->tp_name, i)); + } + } + } else if (PyTuple_Check(obj)) { + Py_ssize_t len = PyTuple_Size(obj); + PyObject* item = nullptr; + for (Py_ssize_t i = 0; i < len; i++) { + item = PyTuple_GetItem(obj, i); + if (PyObject_IsInstance( + item, reinterpret_cast(p_eager_tensor_type))) { + result.emplace_back( + reinterpret_cast(item)->eagertensor); + } else { + PADDLE_THROW(platform::errors::InvalidArgument( + "argument (position %d) must be " + "list of bool, but got %s at pos %d", + arg_pos + 1, + reinterpret_cast(item->ob_type)->tp_name, i)); + } + } + } else { + PADDLE_THROW(platform::errors::InvalidArgument( + "argument (position %d) must be " + "list or tuple, but got %s", + arg_pos + 1, reinterpret_cast(obj->ob_type)->tp_name)); + } + return result; +} + +platform::Place CastPyArg2Place(PyObject* obj, ssize_t arg_pos) { + platform::Place place; + if (PyObject_IsInstance(obj, reinterpret_cast(g_place_pytype))) { + place = ::pybind11::handle(obj).cast(); + } else if (PyObject_IsInstance( + obj, reinterpret_cast(g_cudaplace_pytype))) { + place = ::pybind11::handle(obj).cast(); + } else if (PyObject_IsInstance( + obj, reinterpret_cast(g_cpuplace_pytype))) { + place = ::pybind11::handle(obj).cast(); + } else if (PyObject_IsInstance( + obj, reinterpret_cast(g_xpuplace_pytype))) { + place = ::pybind11::handle(obj).cast(); + } else if (PyObject_IsInstance( + obj, reinterpret_cast(g_npuplace_pytype))) { + place = ::pybind11::handle(obj).cast(); + } else if (PyObject_IsInstance( + obj, reinterpret_cast(g_cudapinnedplace_pytype))) { + place = ::pybind11::handle(obj).cast(); + } else { + PADDLE_THROW(platform::errors::InvalidArgument( + "argument (position %d) must be " + "one of(Place,CUDAPlace,CPUPlace,XPUPlace,NPUPlace,CUDAPinnedPlace), " + "but got %s", + arg_pos + 1, reinterpret_cast(obj->ob_type)->tp_name)); + } + return place; +} + +PyObject* ToPyObject(bool value) { + if (value) { + Py_INCREF(Py_True); + return Py_True; + } else { + Py_INCREF(Py_False); + return Py_False; + } +} + +PyObject* ToPyObject(int value) { return PyLong_FromLong(value); } + +PyObject* ToPyObject(int64_t value) { return PyLong_FromLongLong(value); } + +PyObject* ToPyObject(float value) { return PyLong_FromDouble(value); } + +PyObject* ToPyObject(double value) { return PyLong_FromDouble(value); } + +PyObject* ToPyObject(const char* value) { return PyUnicode_FromString(value); } + +PyObject* ToPyObject(const std::string& value) { + return PyUnicode_FromString(value.c_str()); +} + +PyObject* ToPyObject(const egr::EagerTensor& value) { + PyObject* obj = p_eager_tensor_type->tp_alloc(p_eager_tensor_type, 0); + if (obj) { + auto v = reinterpret_cast(obj); + new (&(v->eagertensor)) egr::EagerTensor(); + v->eagertensor = value; + } else { + PADDLE_THROW(platform::errors::Fatal( + "tp_alloc return null, can not new a PyObject.")); + } + return obj; +} + +PyObject* ToPyObject(const std::vector& value) { + PyObject* result = PyList_New((Py_ssize_t)value.size()); + + for (size_t i = 0; i < value.size(); i++) { + PyList_SET_ITEM(result, static_cast(i), ToPyObject(value[i])); + } + + return result; +} + +PyObject* ToPyObject(const std::vector& value) { + PyObject* result = PyList_New((Py_ssize_t)value.size()); + + for (size_t i = 0; i < value.size(); i++) { + PyList_SET_ITEM(result, static_cast(i), ToPyObject(value[i])); + } + + return result; +} + +PyObject* ToPyObject(const std::vector& value) { + PyObject* result = PyList_New((Py_ssize_t)value.size()); + + for (size_t i = 0; i < value.size(); i++) { + PyList_SET_ITEM(result, (Py_ssize_t)i, ToPyObject(value[i])); + } + + return result; +} + +PyObject* ToPyObject(const std::vector& value) { + PyObject* result = PyList_New((Py_ssize_t)value.size()); + + for (size_t i = 0; i < value.size(); i++) { + PyList_SET_ITEM(result, static_cast(i), ToPyObject(value[i])); + } + + return result; +} + +PyObject* ToPyObject(const std::vector& value) { + PyObject* result = PyList_New((Py_ssize_t)value.size()); + + for (size_t i = 0; i < value.size(); i++) { + PyList_SET_ITEM(result, static_cast(i), ToPyObject(value[i])); + } + + return result; +} + +PyObject* ToPyObject(const std::vector& value) { + PyObject* result = PyList_New((Py_ssize_t)value.size()); + + for (size_t i = 0; i < value.size(); i++) { + PyObject* obj = p_eager_tensor_type->tp_alloc(p_eager_tensor_type, 0); + if (obj) { + auto v = reinterpret_cast(obj); + new (&(v->eagertensor)) egr::EagerTensor(); + v->eagertensor = value[i]; + } else { + PADDLE_THROW(platform::errors::Fatal( + "tp_alloc return null, can not new a PyObject.")); + } + PyList_SET_ITEM(result, static_cast(i), obj); + } + + return result; +} + +PyObject* ToPyObject(const platform::Place& value) { + auto obj = ::pybind11::cast(value); + obj.inc_ref(); + return obj.ptr(); +} + +} // namespace pybind +} // namespace paddle diff --git a/paddle/fluid/pybind/eager_utils.h b/paddle/fluid/pybind/eager_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..49f56a61c31f1f9bd6a41c4446fd9b952b9d546d --- /dev/null +++ b/paddle/fluid/pybind/eager_utils.h @@ -0,0 +1,54 @@ +/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ +#pragma once + +#include +#include "pybind11/pybind11.h" +#include "pybind11/stl.h" + +namespace paddle { +namespace pybind { + +typedef struct { + PyObject_HEAD egr::EagerTensor eagertensor; +} EagerTensorObject; + +bool PyObject_CheckLongOrConvertToLong(PyObject** obj); +bool PyObject_CheckFloatOrConvertToFloat(PyObject** obj); +bool PyObject_CheckStr(PyObject* obj); +bool CastPyArg2AttrBoolean(PyObject* obj, ssize_t arg_pos); +int CastPyArg2AttrInt(PyObject* obj, ssize_t arg_pos); +int64_t CastPyArg2AttrLong(PyObject* obj, ssize_t arg_pos); +float CastPyArg2AttrFloat(PyObject* obj, ssize_t arg_pos); +std::string CastPyArg2AttrString(PyObject* obj, ssize_t arg_pos); +egr::EagerTensor CastPyArg2EagerTensor(PyObject* obj, ssize_t arg_pos); +std::vector CastPyArg2VectorOfEagerTensor(PyObject* obj, + ssize_t arg_pos); +platform::Place CastPyArg2Place(PyObject* obj, ssize_t arg_pos); + +PyObject* ToPyObject(int value); +PyObject* ToPyObject(bool value); +PyObject* ToPyObject(int64_t value); +PyObject* ToPyObject(float value); +PyObject* ToPyObject(double value); +PyObject* ToPyObject(const char* value); +PyObject* ToPyObject(const std::string& value); +PyObject* ToPyObject(const egr::EagerTensor& value); +PyObject* ToPyObject(const std::vector& value); +PyObject* ToPyObject(const std::vector& value); +PyObject* ToPyObject(const std::vector& value); +PyObject* ToPyObject(const std::vector& value); +PyObject* ToPyObject(const std::vector& value); +PyObject* ToPyObject(const std::vector& value); +PyObject* ToPyObject(const platform::Place& value); + +} // namespace pybind +} // namespace paddle diff --git a/paddle/fluid/pybind/exception.cc b/paddle/fluid/pybind/exception.cc index 3d07985ff654e6b7c9020957747ea2149d4b5866..362a3e44fab6254bef591bfd144e071821846271 100644 --- a/paddle/fluid/pybind/exception.cc +++ b/paddle/fluid/pybind/exception.cc @@ -81,5 +81,48 @@ void BindException(pybind11::module* m) { }); } +void ThrowExceptionToPython(std::exception_ptr p) { + static PyObject* EOFExceptionException = + PyErr_NewException("paddle.EOFException", PyExc_Exception, NULL); + static PyObject* EnforceNotMetException = + PyErr_NewException("paddle.EnforceNotMet", PyExc_Exception, NULL); + try { + if (p) std::rethrow_exception(p); + } catch (const platform::EOFException& e) { + PyErr_SetString(EOFExceptionException, e.what()); + } catch (const platform::EnforceNotMet& e) { + switch (e.code()) { + case paddle::platform::error::INVALID_ARGUMENT: + PyErr_SetString(PyExc_ValueError, e.what()); + break; + case paddle::platform::error::NOT_FOUND: + case paddle::platform::error::ALREADY_EXISTS: + case paddle::platform::error::PRECONDITION_NOT_MET: + case paddle::platform::error::PERMISSION_DENIED: + case paddle::platform::error::EXECUTION_TIMEOUT: + case paddle::platform::error::UNAVAILABLE: + PyErr_SetString(PyExc_RuntimeError, e.what()); + break; + case paddle::platform::error::OUT_OF_RANGE: + PyErr_SetString(PyExc_IndexError, e.what()); + break; + case paddle::platform::error::RESOURCE_EXHAUSTED: + PyErr_SetString(PyExc_MemoryError, e.what()); + break; + case paddle::platform::error::UNIMPLEMENTED: + PyErr_SetString(PyExc_NotImplementedError, e.what()); + break; + case paddle::platform::error::FATAL: + PyErr_SetString(PyExc_SystemError, e.what()); + break; + case paddle::platform::error::EXTERNAL: + PyErr_SetString(PyExc_OSError, e.what()); + break; + default: + PyErr_SetString(EnforceNotMetException, e.what()); + break; + } + } +} } // namespace pybind } // namespace paddle diff --git a/paddle/fluid/pybind/exception.h b/paddle/fluid/pybind/exception.h index 5e054267361f2c62b3ad36581be0ad17ce0718de..cf82f464a11f292b8ba09dc4cdba4eb3db6e1d96 100644 --- a/paddle/fluid/pybind/exception.h +++ b/paddle/fluid/pybind/exception.h @@ -18,10 +18,26 @@ limitations under the License. */ #include "paddle/fluid/platform/enforce.h" #include "pybind11/pybind11.h" +#define EAGER_TRY try { +#define EAGER_CATCH_AND_THROW_RETURN_NULL \ + } \ + catch (...) { \ + ThrowExceptionToPython(std::current_exception()); \ + return nullptr; \ + } + +#define EAGER_CATCH_AND_THROW_RETURN_ZERO \ + } \ + catch (...) { \ + ThrowExceptionToPython(std::current_exception()); \ + return 0; \ + } + namespace paddle { namespace pybind { void BindException(pybind11::module* m); +void ThrowExceptionToPython(std::exception_ptr p); } // namespace pybind } // namespace paddle diff --git a/paddle/fluid/pybind/op_function.h b/paddle/fluid/pybind/op_function.h index 324cd4b1b161f57c178492dfa66ba71fc553866d..5535ffd950f37d1e56b5c2520c57a418025424fd 100644 --- a/paddle/fluid/pybind/op_function.h +++ b/paddle/fluid/pybind/op_function.h @@ -29,6 +29,7 @@ #include "paddle/fluid/framework/variable.h" #include "paddle/fluid/imperative/tracer.h" #include "paddle/fluid/imperative/type_defs.h" +#include "paddle/fluid/pybind/exception.h" #include "paddle/fluid/pybind/imperative.h" namespace py = pybind11; @@ -992,50 +993,6 @@ void InitOpsAttrTypeMap() { } } -void ThrowExceptionToPython(std::exception_ptr p) { - static PyObject* EOFExceptionException = - PyErr_NewException("paddle.EOFException", PyExc_Exception, NULL); - static PyObject* EnforceNotMetException = - PyErr_NewException("paddle.EnforceNotMet", PyExc_Exception, NULL); - try { - if (p) std::rethrow_exception(p); - } catch (const platform::EOFException& e) { - PyErr_SetString(EOFExceptionException, e.what()); - } catch (const platform::EnforceNotMet& e) { - switch (e.code()) { - case paddle::platform::error::INVALID_ARGUMENT: - PyErr_SetString(PyExc_ValueError, e.what()); - break; - case paddle::platform::error::NOT_FOUND: - case paddle::platform::error::ALREADY_EXISTS: - case paddle::platform::error::PRECONDITION_NOT_MET: - case paddle::platform::error::PERMISSION_DENIED: - case paddle::platform::error::EXECUTION_TIMEOUT: - case paddle::platform::error::UNAVAILABLE: - PyErr_SetString(PyExc_RuntimeError, e.what()); - break; - case paddle::platform::error::OUT_OF_RANGE: - PyErr_SetString(PyExc_IndexError, e.what()); - break; - case paddle::platform::error::RESOURCE_EXHAUSTED: - PyErr_SetString(PyExc_MemoryError, e.what()); - break; - case paddle::platform::error::UNIMPLEMENTED: - PyErr_SetString(PyExc_NotImplementedError, e.what()); - break; - case paddle::platform::error::FATAL: - PyErr_SetString(PyExc_SystemError, e.what()); - break; - case paddle::platform::error::EXTERNAL: - PyErr_SetString(PyExc_OSError, e.what()); - break; - default: - PyErr_SetString(EnforceNotMetException, e.what()); - break; - } - } -} - } // namespace pybind } // namespace paddle diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index c06d6961c4fd941c2754ca6f0fd48513359f153a..5fc1f27eff36f68f0cd4e1f464ea4c5118b1d7af 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -75,6 +75,7 @@ limitations under the License. */ #include "paddle/fluid/platform/place.h" #include "paddle/fluid/platform/profiler.h" #include "paddle/fluid/pybind/cuda_streams_py.h" +#include "paddle/fluid/pybind/eager.h" #include "paddle/fluid/pybind/io.h" #include "paddle/utils/none.h" #ifdef PADDLE_WITH_ASCEND @@ -150,6 +151,14 @@ PYBIND11_MAKE_OPAQUE(paddle::framework::FetchType); namespace paddle { namespace pybind { + +PyTypeObject *g_place_pytype = nullptr; +PyTypeObject *g_cudaplace_pytype = nullptr; +PyTypeObject *g_cpuplace_pytype = nullptr; +PyTypeObject *g_xpuplace_pytype = nullptr; +PyTypeObject *g_npuplace_pytype = nullptr; +PyTypeObject *g_cudapinnedplace_pytype = nullptr; + bool IsCompiledWithCUDA() { #if !defined(PADDLE_WITH_CUDA) && !defined(PADDLE_WITH_HIP) return false; @@ -524,6 +533,7 @@ PYBIND11_MODULE(core_avx, m) { PYBIND11_MODULE(core_noavx, m) { #endif + BindEager(&m); BindCudaStream(&m); // Not used, just make sure cpu_info.cc is linked. @@ -1599,7 +1609,7 @@ All parameter, weight, gradient are variables in Paddle. #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) py::class_(m, "Communicator").def(py::init<>()); #endif - py::class_(m, "CUDAPlace", R"DOC( + py::class_ cudaplace(m, "CUDAPlace", R"DOC( CUDAPlace is a descriptor of a device. It represents a GPU device allocated or to be allocated with Tensor or LoDTensor. @@ -1622,7 +1632,9 @@ All parameter, weight, gradient are variables in Paddle. place = paddle.CUDAPlace(0) - )DOC") + )DOC"); + g_cudaplace_pytype = reinterpret_cast(cudaplace.ptr()); + cudaplace .def("__init__", [](platform::CUDAPlace &self, int dev_id) { #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) @@ -1680,13 +1692,15 @@ All parameter, weight, gradient are variables in Paddle. .def("__repr__", string::to_string) .def("__str__", string::to_string); - py::class_(m, "XPUPlace", R"DOC( + py::class_ xpuplace(m, "XPUPlace", R"DOC( **Note**: Examples: .. code-block:: python import paddle.fluid as fluid xpu_place = fluid.XPUPlace(0) - )DOC") + )DOC"); + g_xpuplace_pytype = reinterpret_cast(xpuplace.ptr()); + xpuplace .def("__init__", [](platform::XPUPlace &self, int dev_id) { #ifdef PADDLE_WITH_XPU @@ -1756,7 +1770,7 @@ All parameter, weight, gradient are variables in Paddle. }); #endif - py::class_(m, "CPUPlace", R"DOC( + py::class_ cpuplace(m, "CPUPlace", R"DOC( CPUPlace is a descriptor of a device. It represents a CPU device on which a tensor will be allocated and a model will run. @@ -1766,8 +1780,9 @@ All parameter, weight, gradient are variables in Paddle. import paddle cpu_place = paddle.CPUPlace() - )DOC") - .def(py::init<>()) + )DOC"); + g_cpuplace_pytype = reinterpret_cast(cpuplace.ptr()); + cpuplace.def(py::init<>()) .def("_type", &PlaceIndex) .def("_equals", &IsSamePlace) .def("_equals", &IsSamePlace) @@ -1779,7 +1794,8 @@ All parameter, weight, gradient are variables in Paddle. .def("__repr__", string::to_string) .def("__str__", string::to_string); - py::class_(m, "CUDAPinnedPlace", R"DOC( + py::class_ cudapinnedplace( + m, "CUDAPinnedPlace", R"DOC( CUDAPinnedPlace is a descriptor of a device. It refers to the page locked memory allocated by the CUDA function `cudaHostAlloc()` in the host memory. The host operating system will not paging and exchanging the memory. @@ -1793,7 +1809,10 @@ All parameter, weight, gradient are variables in Paddle. import paddle place = paddle.CUDAPinnedPlace() - )DOC") + )DOC"); + g_cudapinnedplace_pytype = + reinterpret_cast(cudapinnedplace.ptr()); + cudapinnedplace .def("__init__", [](platform::CUDAPinnedPlace &self) { #if !defined(PADDLE_WITH_CUDA) && !defined(PADDLE_WITH_HIP) @@ -1819,7 +1838,7 @@ All parameter, weight, gradient are variables in Paddle. .def("__str__", string::to_string); // NPUPlace - py::class_(m, "NPUPlace", R"DOC( + py::class_ npuplace(m, "NPUPlace", R"DOC( NPUPlace is a descriptor of a device. It represents a NPU device on which a tensor will be allocated and a model will run. @@ -1828,7 +1847,9 @@ All parameter, weight, gradient are variables in Paddle. import paddle npu_place = paddle.NPUPlace(0) - )DOC") + )DOC"); + g_npuplace_pytype = reinterpret_cast(npuplace.ptr()); + npuplace .def("__init__", [](platform::NPUPlace &self, int dev_id) { #ifdef PADDLE_WITH_ASCEND_CL @@ -1879,8 +1900,9 @@ All parameter, weight, gradient are variables in Paddle. [](const platform::NPUPlace &self) { return self.GetDeviceId(); }) .def("__str__", string::to_string); - py::class_(m, "Place") - .def(py::init<>()) + py::class_ platformplace(m, "Place"); + g_place_pytype = reinterpret_cast(platformplace.ptr()); + platformplace.def(py::init<>()) .def("_type", &PlaceIndex) .def("_equals", &IsSamePlace) .def("_equals", &IsSamePlace) diff --git a/paddle/pten/core/CMakeLists.txt b/paddle/pten/core/CMakeLists.txt index e19d0a490cef39ce6e4f38714d2e0d9ecb73f7d2..0a2504f50327c16a2b0b5333299252d9c63fef67 100644 --- a/paddle/pten/core/CMakeLists.txt +++ b/paddle/pten/core/CMakeLists.txt @@ -1,9 +1,9 @@ if(WITH_GPU) - cc_library(convert_utils SRCS convert_utils.cc DEPS data_type place gpu_info) + cc_library(convert_utils SRCS convert_utils.cc DEPS data_type place gpu_info python) elseif(WITH_ROCM) - cc_library(convert_utils SRCS convert_utils.cc DEPS data_type place gpu_info) + cc_library(convert_utils SRCS convert_utils.cc DEPS data_type place gpu_info python) else() - cc_library(convert_utils SRCS convert_utils.cc DEPS data_type place) + cc_library(convert_utils SRCS convert_utils.cc DEPS data_type place python) endif() cc_library(kernel_factory SRCS kernel_factory.cc DEPS enforce) diff --git a/paddle/pten/core/convert_utils.cc b/paddle/pten/core/convert_utils.cc index 92709647dac00dbcd443e80ee4ecf9a2a5e46208..8b54813eadf327c8400bcf9575eed1f1fe091fd7 100644 --- a/paddle/pten/core/convert_utils.cc +++ b/paddle/pten/core/convert_utils.cc @@ -11,8 +11,9 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - #include "paddle/pten/core/convert_utils.h" +#include "paddle/fluid/operators/py_func_op.h" +#include "paddle/fluid/pybind/tensor_py.h" // See Note [ Why still include the fluid headers? ] #include "paddle/fluid/platform/gpu_info.h" @@ -180,4 +181,127 @@ pten::LoD TransToPtenLoD(const paddle::framework::LoD& lod) { return out; } +size_t DataTypeSize(DataType dtype) { + switch (dtype) { + case DataType::UNDEFINED: + return 0; + case DataType::BOOL: + return sizeof(bool); + case DataType::INT8: + return sizeof(int8_t); + case DataType::UINT8: + return sizeof(uint8_t); + case DataType::INT16: + return sizeof(int16_t); + case DataType::INT32: + return sizeof(int); + case DataType::INT64: + return sizeof(int64_t); + case DataType::FLOAT16: + return sizeof(paddle::platform::float16); + case DataType::FLOAT32: + return sizeof(float); + case DataType::FLOAT64: + return sizeof(double); + case DataType::COMPLEX64: + return sizeof(paddle::platform::complex); + case DataType::COMPLEX128: + return sizeof(paddle::platform::complex); + default: + return 0; + } +} + +DataType String2DataType(const std::string& str) { + if (str == "bool") { + return DataType::BOOL; + } else if (str == "float16") { + return DataType::FLOAT16; + } else if (str == "float32") { + return DataType::FLOAT32; + } else if (str == "float64") { + return DataType::FLOAT64; + } else if (str == "int8") { + return DataType::INT8; + } else if (str == "int16") { + return DataType::INT16; + } else if (str == "int32") { + return DataType::INT32; + } else if (str == "int64") { + return DataType::INT64; + } else if (str == "uint8") { + return DataType::UINT8; + } else if (str == "complex64") { + return DataType::COMPLEX64; + } else if (str == "complex128") { + return DataType::COMPLEX128; + } else { + return DataType::UNDEFINED; + } +} + +std::string DataType2String(DataType dtype) { + switch (dtype) { + case DataType::BOOL: + return "bool"; + case DataType::INT8: + return "int8"; + case DataType::UINT8: + return "uint8"; + case DataType::INT16: + return "int16"; + case DataType::INT32: + return "int32"; + case DataType::INT64: + return "int64"; + case DataType::FLOAT16: + return "float16"; + case DataType::FLOAT32: + return "float32"; + case DataType::FLOAT64: + return "float64"; + case DataType::COMPLEX64: + return "complex64"; + case DataType::COMPLEX128: + return "complex128"; + default: + PADDLE_THROW(paddle::platform::errors::InvalidArgument( + "Unknow pten::DataType, the int value = %d.", + static_cast(dtype))); + return ""; + } +} + +int TensorDtype2NumpyDtype(pten::DataType dtype) { + switch (dtype) { + case pten::DataType::BOOL: + return pybind11::detail::npy_api::NPY_BOOL_; + case pten::DataType::INT8: + return pybind11::detail::npy_api::NPY_INT8_; + case pten::DataType::UINT8: + return pybind11::detail::npy_api::NPY_UINT8_; + case pten::DataType::INT16: + return pybind11::detail::npy_api::NPY_INT16_; + case pten::DataType::INT32: + return pybind11::detail::npy_api::NPY_INT32_; + case pten::DataType::INT64: + return pybind11::detail::npy_api::NPY_INT64_; + case pten::DataType::FLOAT16: + return pybind11::detail::NPY_FLOAT16_; + case pten::DataType::FLOAT32: + return pybind11::detail::npy_api::NPY_FLOAT_; + case pten::DataType::FLOAT64: + return pybind11::detail::npy_api::NPY_DOUBLE_; + case pten::DataType::COMPLEX64: + return pybind11::detail::NPY_COMPLEX64; + case pten::DataType::COMPLEX128: + return pybind11::detail::NPY_COMPLEX128; + default: + PADDLE_THROW(paddle::platform::errors::InvalidArgument( + "Unknow pten::DataType, the int value = %d.", + static_cast(dtype))); + return 0; + } +} + } // namespace pten diff --git a/paddle/pten/core/convert_utils.h b/paddle/pten/core/convert_utils.h index 0b807c48bc150501c5f69c0e8a5e338bfa13e92a..e5990eb0a89f035edcae7e868720838cb3f78e7c 100644 --- a/paddle/pten/core/convert_utils.h +++ b/paddle/pten/core/convert_utils.h @@ -45,4 +45,9 @@ paddle::framework::DataLayout TransToFluidDataLayout(const DataLayout& layout); paddle::framework::LoD TransToFluidLoD(const pten::LoD& lod); pten::LoD TransToPtenLoD(const paddle::framework::LoD& lod); +size_t DataTypeSize(DataType dtype); +DataType String2DataType(const std::string& str); +std::string DataType2String(DataType dtype); +int TensorDtype2NumpyDtype(pten::DataType dtype); + } // namespace pten diff --git a/python/paddle/fluid/__init__.py b/python/paddle/fluid/__init__.py index 5683750c4d8298dbf824f8e366549b7fa81ced65..5482413dbbc5d10c1829514cf2d76c2236ba51a1 100644 --- a/python/paddle/fluid/__init__.py +++ b/python/paddle/fluid/__init__.py @@ -55,6 +55,7 @@ from . import initializer from .initializer import set_global_initializer from . import layers from . import dygraph +from . import eager from . import contrib from . import nets from . import optimizer @@ -90,6 +91,7 @@ from .dygraph.base import enable_dygraph, disable_dygraph from .io import save, load, load_program_state, set_program_state from .dygraph.checkpoint import save_dygraph, load_dygraph from .dygraph.varbase_patch_methods import monkey_patch_varbase +from .eager.eager_tensor_patch_methods import monkey_patch_eagertensor from . import generator from .core import _cuda_synchronize from .generator import Generator @@ -113,6 +115,7 @@ __all__ = framework.__all__ + executor.__all__ + \ 'contrib', 'data', 'dygraph', + 'eager', 'enable_dygraph', 'disable_dygraph', 'enable_imperative', @@ -211,6 +214,7 @@ def __bootstrap__(): monkey_patch_variable() __bootstrap__() monkey_patch_varbase() +monkey_patch_eagertensor() # NOTE(zhiqiu): register npu_finalize on the exit of Python, # do some clean up manually. diff --git a/python/paddle/fluid/eager/__init__.py b/python/paddle/fluid/eager/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1dc82ef69979c16135b4f4c9fbb56f0f5b680fb3 --- /dev/null +++ b/python/paddle/fluid/eager/__init__.py @@ -0,0 +1,20 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and + +# incubate directory is mainly for internal use +# after we have tested incubate APIs in industrial application for a period +# we will move stable functions into fluid + +from . import eager_tensor_patch_methods + +__all__ = [] diff --git a/python/paddle/fluid/eager/eager_tensor_patch_methods.py b/python/paddle/fluid/eager/eager_tensor_patch_methods.py new file mode 100644 index 0000000000000000000000000000000000000000..206c5cf23e6dad372535b5bfd3fbf98fbacce1d6 --- /dev/null +++ b/python/paddle/fluid/eager/eager_tensor_patch_methods.py @@ -0,0 +1,23 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle.fluid.core as core + + +def monkey_patch_eagertensor(): + def __str__(self): + from paddle.tensor.to_string import eager_tensor_to_string + return eager_tensor_to_string(self) + + setattr(core.eager.EagerTensor, "__str__", __str__) diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 04042eac953ba2f09a50d10f5b9cf0b2f45c93c3..ee7aa4560364e69f876a9dd6f0336db07377ee2c 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -45,6 +45,8 @@ __all__ = [ 'Program', 'default_startup_program', 'default_main_program', + 'eager_guard', + 'in_eager_mode', 'program_guard', 'name_scope', 'cuda_places', @@ -75,6 +77,21 @@ _current_device = None global_prog_seed = 0 _current_pipeline_stage = None _global_flags_ = core.globals() +_eager_mode_ = False + + +@signature_safe_contextmanager +def eager_guard(): + global _eager_mode_ + _eager_mode_ = True + try: + yield + finally: + _eager_mode_ = False + + +def in_eager_mode(): + return _eager_mode_ def require_version(min_version, max_version=None): @@ -340,7 +357,10 @@ def _set_dygraph_tracer_expected_place(place): def _set_expected_place(place): global _global_expected_place_ _global_expected_place_ = place - _set_dygraph_tracer_expected_place(place) + if in_eager_mode(): + return core.eager._set_expected_place(place) + else: + _set_dygraph_tracer_expected_place(place) # TODO(zhiqiu): remove this function. diff --git a/python/paddle/fluid/tests/unittests/test_egr_python_api.py b/python/paddle/fluid/tests/unittests/test_egr_python_api.py new file mode 100644 index 0000000000000000000000000000000000000000..c497c7f9bd80a901cb18f7fa9dabe2bfff00f0e8 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_egr_python_api.py @@ -0,0 +1,103 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle.fluid.core as core +import paddle.fluid.eager.eager_tensor_patch_methods as eager_tensor_patch_methods +import paddle +import numpy as np +from paddle.fluid import eager_guard +import unittest + + +class EagerScaleTestCase(unittest.TestCase): + def test_scale_base(self): + with eager_guard(): + paddle.set_device("cpu") + arr = np.ones([4, 16, 16, 32]).astype('float32') + tensor = paddle.to_tensor(arr, 'float32', core.CPUPlace()) + print(tensor) + tensor = core.eager.scale(tensor, 2.0, 0.9, True, False) + for i in range(0, 100): + tensor = core.eager.scale(tensor, 2.0, 0.9, True, False) + print(tensor) + self.assertEqual(tensor.shape, [4, 16, 16, 32]) + self.assertEqual(tensor.stop_gradient, True) + + def test_retain_grad_and_run_backward(self): + with eager_guard(): + paddle.set_device("cpu") + + input_data = np.ones([4, 16, 16, 32]).astype('float32') + data_eager = paddle.to_tensor(input_data, 'float32', + core.CPUPlace(), False) + + grad_data = np.ones([4, 16, 16, 32]).astype('float32') + grad_eager = paddle.to_tensor(grad_data, 'float32', core.CPUPlace()) + + core.eager.retain_grad_for_tensor(data_eager) + + out_eager = core.eager.scale(data_eager, 1.0, 0.9, True, True) + self.assertFalse(data_eager.grad._is_initialized()) + core.eager.run_backward([out_eager], [grad_eager], False) + self.assertTrue(data_eager.grad._is_initialized()) + self.assertTrue(np.array_equal(data_eager.grad.numpy(), input_data)) + + +class EagerDtypeTestCase(unittest.TestCase): + def check_to_tesnsor_and_numpy(self, dtype): + with eager_guard(): + arr = np.random.random([4, 16, 16, 32]).astype(dtype) + tensor = paddle.to_tensor(arr, dtype) + self.assertEqual(tensor.dtype, dtype) + self.assertTrue(np.array_equal(arr, tensor.numpy())) + + def test_dtype_base(self): + self.check_to_tesnsor_and_numpy('bool') + self.check_to_tesnsor_and_numpy('int8') + self.check_to_tesnsor_and_numpy('uint8') + self.check_to_tesnsor_and_numpy('int16') + self.check_to_tesnsor_and_numpy('int32') + self.check_to_tesnsor_and_numpy('int64') + self.check_to_tesnsor_and_numpy('float16') + self.check_to_tesnsor_and_numpy('float32') + self.check_to_tesnsor_and_numpy('float64') + self.check_to_tesnsor_and_numpy('complex64') + self.check_to_tesnsor_and_numpy('complex128') + + +class EagerTensorPropertiesTestCase(unittest.TestCase): + def test_properties(self): + with eager_guard(): + paddle.set_device("cpu") + arr = np.ones([4, 16, 16, 32]).astype('float32') + tensor = paddle.to_tensor(arr, 'float32', core.CPUPlace()) + self.assertEqual(tensor.shape, [4, 16, 16, 32]) + tensor.name = 'tensor_name_test' + self.assertEqual(tensor.name, 'tensor_name_test') + self.assertEqual(tensor.persistable, False) + tensor.persistable = True + self.assertEqual(tensor.persistable, True) + tensor.persistable = False + self.assertEqual(tensor.persistable, False) + self.assertTrue(tensor.place.is_cpu_place()) + self.assertEqual(tensor._place_str, 'CPUPlace') + self.assertEqual(tensor.stop_gradient, True) + tensor.stop_gradient = False + self.assertEqual(tensor.stop_gradient, False) + tensor.stop_gradient = True + self.assertEqual(tensor.stop_gradient, True) + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/tensor/creation.py b/python/paddle/tensor/creation.py index 72b6bd29fd9e78ec813c418c70216f824edebe76..812c7e8b5ac04def51c369d2d0ed53772d0cb85e 100644 --- a/python/paddle/tensor/creation.py +++ b/python/paddle/tensor/creation.py @@ -31,6 +31,7 @@ from ..fluid.framework import convert_np_dtype_to_dtype_, in_dygraph_mode, _varb from ..fluid.layers import linspace # noqa: F401 import paddle from paddle import _C_ops +from ..fluid.framework import in_eager_mode __all__ = [] @@ -115,6 +116,12 @@ def to_tensor(data, dtype=None, place=None, stop_gradient=True): ) != _current_expected_place()._get_device_id(): place = _current_expected_place() + if in_eager_mode(): + if dtype is None: + dtype = paddle.get_default_dtype() + return core.eager.to_tensor(data, + convert_dtype(dtype), place, stop_gradient) + if not isinstance(data, np.ndarray): def _handle_dtype(data, dtype): diff --git a/python/paddle/tensor/to_string.py b/python/paddle/tensor/to_string.py index f640882893034d9c5e706629b09bc60c07764e7d..6fd20457fe619a21e7479329a56678326b1357ac 100644 --- a/python/paddle/tensor/to_string.py +++ b/python/paddle/tensor/to_string.py @@ -255,3 +255,39 @@ def to_string(var, prefix='Tensor'): stop_gradient=var.stop_gradient, indent=' ' * indent, data=data) + + +def eager_tensor_to_string(tensor, prefix='Tensor'): + indent = len(prefix) + 1 + + _template = "{prefix}(shape={shape}, dtype={dtype}, place={place}, stop_gradient={stop_gradient},\n{indent}{data})" + + if not tensor._is_initialized(): + return "Tensor(Not initialized)" + + np_tensor = tensor.numpy() + + if len(tensor.shape) == 0: + size = 0 + else: + size = 1 + for dim in tensor.shape: + size *= dim + + sumary = False + if size > DEFAULT_PRINT_OPTIONS.threshold: + sumary = True + + max_width, signed = _get_max_width(_to_summary(np_tensor)) + + data = _format_tensor( + np_tensor, sumary, indent=indent, max_width=max_width, signed=signed) + + return _template.format( + prefix=prefix, + shape=tensor.shape, + dtype=tensor.dtype, + place=tensor._place_str, + stop_gradient=tensor.stop_gradient, + indent=' ' * indent, + data=data) diff --git a/python/setup.py.in b/python/setup.py.in index e01019ed7da778457abec36263bc7df121cc8ca4..5690fccf89dda36809831e882f3609d6b3042a7e 100644 --- a/python/setup.py.in +++ b/python/setup.py.in @@ -307,6 +307,7 @@ packages=['paddle', 'paddle.fluid.dygraph', 'paddle.fluid.dygraph.dygraph_to_static', 'paddle.fluid.dygraph.amp', + 'paddle.fluid.eager', 'paddle.fluid.proto', 'paddle.fluid.proto.profiler', 'paddle.fluid.distributed',