未验证 提交 07b4fe93 编写于 作者: W wanghuancoder 提交者: GitHub

[Eager] publish python c api for eager (#37550)

* refine a test case, test=develop

* publish python c api for eager, test=develop

* revert modify about test_allclose_layer.py, test=develop

* refine, test=develop

* refine, test=develop

* refine, test=develop

* refine, test=develop

* refine, test=develop

* refine, test=develop

* delete numpy includes, use pybind11 numpy.h, test=develop

* refine, test=develop

* refine, test=develop

* refine, test=develop

* suport eager error msg, and add grad test case, test=develop

* refine, test=develop

* refine, test=develop
上级 c58c4ede
......@@ -212,6 +212,12 @@ if(WITH_PYTHON)
add_custom_target(op_function_generator_cmd ALL DEPENDS ${impl_file})
list(APPEND PYBIND_DEPS interpretercore standalone_executor)
cc_library(paddle_eager
SRCS eager.cc eager_functions.cc eager_method.cc eager_properties.cc eager_utils.cc
DEPS autograd_meta grad_node_info pten global_utils utils eager_api accumulation_node backward python)
list(APPEND PYBIND_DEPS paddle_eager)
cc_library(paddle_pybind SHARED
SRCS ${PYBIND_SRCS}
DEPS ${PYBIND_DEPS} ${GLOB_OP_LIB} ${GLOB_OPERATOR_DEPS})
......
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
// disable numpy compile error
#include <Python.h>
#include <string>
#include <vector>
#include "paddle/fluid/eager/api/all.h"
#include "paddle/fluid/eager/autograd_meta.h"
#include "paddle/fluid/eager/utils.h"
#include "paddle/fluid/memory/allocation/allocator.h"
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/pybind/eager.h"
#include "paddle/fluid/pybind/eager_utils.h"
#include "paddle/pten/common/data_type.h"
#include "paddle/pten/core/convert_utils.h"
#include "paddle/pten/core/dense_tensor.h"
#include "paddle/pten/include/core.h"
#pragma GCC diagnostic ignored "-Wmissing-field-initializers"
namespace paddle {
namespace pybind {
namespace py = ::pybind11;
PyTypeObject* p_eager_tensor_type;
PyObject* eagertensor_new(PyTypeObject* type, PyObject* args,
PyObject* kwargs) {
PyObject* obj = type->tp_alloc(type, 0);
if (obj) {
auto v = reinterpret_cast<EagerTensorObject*>(obj);
new (&(v->eagertensor)) egr::EagerTensor();
}
return obj;
}
static void eagertensor_dealloc(EagerTensorObject* self) {
self->eagertensor.~EagerTensor();
Py_TYPE(self)->tp_free(reinterpret_cast<PyObject*>(self));
}
extern struct PyGetSetDef variable_properties[];
extern PyMethodDef variable_methods[];
PyTypeObject eager_tensor_type = {
PyVarObject_HEAD_INIT(NULL, 0) "core_avx.eager.EagerTensor", /* tp_name */
sizeof(EagerTensorObject), /* tp_basicsize */
0, /* tp_itemsize */
(destructor)eagertensor_dealloc, /* tp_dealloc */
0, /* tp_vectorcall_offset */
0, /* tp_getattr */
0, /* tp_setattr */
0, /* tp_reserved */
0, /* tp_repr */
0, /* tp_as_number */
0, /* tp_as_sequence */
0, /* tp_as_mapping */
0, /* tp_hash */
0, /* tp_call */
0, /* tp_str */
0, /* tp_getattro */
0, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE |
Py_TPFLAGS_HEAPTYPE, /* tp_flags */
0, /* tp_doc */
0, /* tp_traverse */
0, /* tp_clear */
0, /* tp_richcompare */
0, /* tp_weaklistoffset */
0, /* tp_iter */
0, /* tp_iternext */
variable_methods, /* tp_methods */
0, /* tp_members */
variable_properties, /* tp_getset */
0, /* tp_base */
0, /* tp_dict */
0, /* tp_descr_get */
0, /* tp_descr_set */
0, /* tp_dictoffset */
0, /* tp_init */
0, /* tp_alloc */
eagertensor_new, /* tp_new */
0, /* tp_free */
0, /* tp_is_gc */
0, /* tp_bases */
0, /* tp_mro */
0, /* tp_cache */
0, /* tp_subclasses */
0, /* tp_weaklist */
0, /* tp_del */
0, /* tp_version_tag */
0 /* tp_finalize */
};
void BindEager(pybind11::module* module) {
auto m = module->def_submodule("eager");
p_eager_tensor_type = &eager_tensor_type;
if (PyType_Ready(&eager_tensor_type) < 0) {
PADDLE_THROW(platform::errors::Fatal(
"Init Paddle erroe in BindEager(PyType_Ready)."));
return;
}
Py_INCREF(&eager_tensor_type);
if (PyModule_AddObject(m.ptr(), "EagerTensor",
reinterpret_cast<PyObject*>(&eager_tensor_type)) < 0) {
Py_DECREF(&eager_tensor_type);
Py_DECREF(m.ptr());
PADDLE_THROW(platform::errors::Fatal(
"Init Paddle erroe in BindEager(PyModule_AddObject)."));
return;
}
BindFunctions(m.ptr());
}
} // namespace pybind
} // namespace paddle
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <Python.h>
#include "pybind11/pybind11.h"
#include "pybind11/stl.h"
namespace paddle {
namespace pybind {
void BindEager(pybind11::module* m);
void BindFunctions(PyObject* module);
} // namespace pybind
} // namespace paddle
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
// disable numpy compile error
#include <Python.h>
#include <string>
#include <vector>
#include "pybind11/numpy.h"
#include "pybind11/pybind11.h"
#include "paddle/fluid/eager/accumulation/accumulation_node.h"
#include "paddle/fluid/eager/api/all.h"
#include "paddle/fluid/eager/autograd_meta.h"
#include "paddle/fluid/eager/backward.h"
#include "paddle/fluid/eager/utils.h"
#include "paddle/fluid/memory/allocation/allocator.h"
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/pybind/eager.h"
#include "paddle/fluid/pybind/eager_utils.h"
#include "paddle/fluid/pybind/exception.h"
#include "paddle/pten/api/lib/utils/allocator.h"
#include "paddle/pten/api/lib/utils/storage.h"
#include "paddle/pten/api/lib/utils/tensor_utils.h"
#include "paddle/pten/common/data_type.h"
#include "paddle/pten/core/convert_utils.h"
#include "paddle/pten/core/dense_tensor.h"
#include "paddle/pten/include/core.h"
namespace paddle {
namespace pybind {
namespace py = ::pybind11;
extern PyTypeObject* p_eager_tensor_type;
size_t PyArray_Size_(PyObject* numpy_data) {
size_t res = 1;
auto dims = pybind11::detail::array_proxy(numpy_data)->dimensions;
auto nd = pybind11::detail::array_proxy(numpy_data)->nd;
while (nd--) {
res *= (*dims++);
}
return res;
}
class EagerNumpyAllocation : public paddle::memory::allocation::Allocation {
public:
explicit EagerNumpyAllocation(PyObject* numpy_data, pten::DataType dtype)
: Allocation(
static_cast<void*>(pybind11::detail::array_proxy(numpy_data)->data),
pten::DataTypeSize(dtype) * PyArray_Size_(numpy_data),
paddle::platform::CPUPlace()),
arr_(numpy_data) {
PADDLE_ENFORCE_NOT_NULL(arr_, platform::errors::InvalidArgument(
"The underlying PyObject pointer of "
"numpy array cannot be nullptr"));
PADDLE_ENFORCE_NE(
arr_, Py_None,
platform::errors::PreconditionNotMet(
"The underlying PyObject pointer of numpy array cannot be None"));
Py_INCREF(arr_);
}
~EagerNumpyAllocation() override {
py::gil_scoped_acquire gil;
Py_DECREF(arr_);
}
private:
PyObject* arr_;
};
static PyObject* eager_api_set_expected_place(PyObject* self, PyObject* args,
PyObject* kwargs) {
EAGER_TRY
auto place = CastPyArg2Place(PyTuple_GET_ITEM(args, 0), 0);
egr::Controller::Instance().SetExpectedPlace(place);
Py_INCREF(Py_None);
return Py_None;
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static PyObject* eager_api_scale(PyObject* self, PyObject* args,
PyObject* kwargs) {
EAGER_TRY
// TODO(jiabin): Sync Tensor and Variable here when we support
egr::EagerTensor ret =
egr::scale(reinterpret_cast<EagerTensorObject*>(PyTuple_GET_ITEM(args, 0))
->eagertensor,
CastPyArg2AttrFloat(PyTuple_GET_ITEM(args, 1), 1),
CastPyArg2AttrFloat(PyTuple_GET_ITEM(args, 2), 2),
CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 3), 3),
CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 4), 4));
return ToPyObject(ret);
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static PyObject* eager_api_numpy_to_tensor(PyObject* numpy_data,
pten::DataType dtype,
const paddle::platform::Place& place,
bool stop_gradient) {
std::vector<int64_t> vec_dims;
auto numpy_shape = pybind11::detail::array_proxy(numpy_data)->dimensions;
int rank = pybind11::detail::array_proxy(numpy_data)->nd;
for (int i = 0; i < rank; i++) {
vec_dims.push_back(static_cast<int64_t>(numpy_shape[i]));
}
paddle::framework::DDim dims = paddle::framework::make_ddim(vec_dims);
// TODO(jiabin): Support GPU later
auto meta = pten::DenseTensorMeta(dtype, dims);
auto holder = std::make_shared<EagerNumpyAllocation>(numpy_data, dtype);
auto shared_storage =
pten::make_intrusive<paddle::experimental::SharedStorage>(holder, 0);
std::shared_ptr<pten::DenseTensor> densetensor(
new pten::DenseTensor(std::move(shared_storage), std::move(meta)));
PyObject* obj = p_eager_tensor_type->tp_alloc(p_eager_tensor_type, 0);
if (obj) {
auto v = reinterpret_cast<EagerTensorObject*>(obj);
new (&(v->eagertensor)) egr::EagerTensor();
v->eagertensor.set_impl(densetensor);
v->eagertensor.set_name(egr::Controller::Instance().GenerateUniqueName());
auto meta = egr::EagerUtils::autograd_meta(&(v->eagertensor));
meta->SetStopGradient(stop_gradient);
// Created tensor will be leaf tensor
// So we append AccumulationNode to it.
auto accumulation_node = std::make_shared<egr::GradNodeAccumulation>();
meta->SetGradNode(accumulation_node);
// TODO(jiabin): Shall we increase ref cnt here to make python ref cnt num
// correctly?
} else {
PADDLE_THROW(platform::errors::Fatal(
"tp_alloc return null, can not new a PyObject."));
}
return obj;
}
static PyObject* eager_api_to_tensor(PyObject* self, PyObject* args,
PyObject* kwargs) {
EAGER_TRY
// TODO(jiabin): Support Kwargs here
PyObject* data = PyTuple_GET_ITEM(args, 0);
auto str_dtype = CastPyArg2AttrString(PyTuple_GET_ITEM(args, 1), 1);
pten::DataType dtype = pten::String2DataType(str_dtype);
auto place = CastPyArg2Place(PyTuple_GET_ITEM(args, 2), 2);
bool stop_gradient = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 3), 3);
// TODO(jiabin): Support this when python given name
// auto str_name = CastPyArg2AttrString(PyTuple_GET_ITEM(args, 4), 4);
if (pybind11::detail::npy_api::get().PyArray_Check_(data)) {
return eager_api_numpy_to_tensor(data, dtype, place, stop_gradient);
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"Eater to_tensor only support numpy to tensor."));
Py_INCREF(Py_None);
return Py_None;
}
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static PyObject* eager_api_retain_grad_for_tensor(PyObject* self,
PyObject* args,
PyObject* kwargs) {
EAGER_TRY
egr::egr_utils_api::RetainGradForTensor(
CastPyArg2EagerTensor(PyTuple_GET_ITEM(args, 0), 0));
Py_INCREF(Py_None);
return Py_None;
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static PyObject* eager_api_run_backward(PyObject* self, PyObject* args,
PyObject* kwargs) {
EAGER_TRY
auto tensors = CastPyArg2VectorOfEagerTensor(PyTuple_GET_ITEM(args, 0), 0);
auto grad_tensors =
CastPyArg2VectorOfEagerTensor(PyTuple_GET_ITEM(args, 1), 1);
RunBackward(tensors, grad_tensors,
CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 2), 2));
Py_INCREF(Py_None);
return Py_None;
EAGER_CATCH_AND_THROW_RETURN_NULL
}
PyMethodDef variable_functions[] = {
{"to_tensor", (PyCFunction)(void (*)(void))eager_api_to_tensor,
METH_VARARGS | METH_KEYWORDS, NULL},
{"scale", (PyCFunction)(void (*)(void))eager_api_scale,
METH_VARARGS | METH_KEYWORDS, NULL},
{"_set_expected_place",
(PyCFunction)(void (*)(void))eager_api_set_expected_place,
METH_VARARGS | METH_KEYWORDS, NULL},
{"retain_grad_for_tensor",
(PyCFunction)(void (*)(void))eager_api_retain_grad_for_tensor,
METH_VARARGS | METH_KEYWORDS, NULL},
{"run_backward", (PyCFunction)(void (*)(void))eager_api_run_backward,
METH_VARARGS | METH_KEYWORDS, NULL},
{NULL, NULL, 0, NULL}};
void BindFunctions(PyObject* module) {
if (PyModule_AddFunctions(module, variable_functions) < 0) {
PADDLE_THROW(platform::errors::Fatal(
"Init Paddle erroe in BindFunctions(PyModule_AddFunctions)."));
return;
}
}
} // namespace pybind
} // namespace paddle
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
// disable numpy compile error
#include <Python.h>
#include <string>
#include <vector>
#include "pybind11/numpy.h"
#include "pybind11/pybind11.h"
#include "paddle/fluid/eager/api/all.h"
#include "paddle/fluid/eager/autograd_meta.h"
#include "paddle/fluid/memory/allocation/allocator.h"
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/pybind/eager.h"
#include "paddle/fluid/pybind/eager_utils.h"
#include "paddle/fluid/pybind/exception.h"
#include "paddle/pten/common/data_type.h"
#include "paddle/pten/core/convert_utils.h"
#include "paddle/pten/core/dense_tensor.h"
#include "paddle/pten/include/core.h"
namespace paddle {
namespace pybind {
extern PyTypeObject* pEagerTensorType;
static PyObject* eager_tensor_method_numpy(EagerTensorObject* self,
PyObject* args, PyObject* kwargs) {
EAGER_TRY
if (!self->eagertensor.initialized()) {
Py_INCREF(Py_None);
return Py_None;
}
auto tensor_dims = self->eagertensor.shape();
auto numpy_dtype = pten::TensorDtype2NumpyDtype(self->eagertensor.type());
auto sizeof_dtype = pten::DataTypeSize(self->eagertensor.type());
Py_intptr_t py_dims[paddle::framework::DDim::kMaxRank];
Py_intptr_t py_strides[paddle::framework::DDim::kMaxRank];
size_t numel = 1;
for (int i = tensor_dims.size() - 1; i >= 0; --i) {
py_dims[i] = static_cast<size_t>(tensor_dims[i]);
py_strides[i] = sizeof_dtype * numel;
numel *= py_dims[i];
}
auto& api = pybind11::detail::npy_api::get();
PyObject* array = api.PyArray_NewFromDescr_(
api.PyArray_Type_, api.PyArray_DescrFromType_(numpy_dtype),
tensor_dims.size(), py_dims, py_strides, nullptr,
pybind11::detail::npy_api::NPY_ARRAY_ALIGNED_ |
pybind11::detail::npy_api::NPY_ARRAY_WRITEABLE_,
nullptr);
if (self->eagertensor.is_cpu()) {
auto dense_tensor =
std::dynamic_pointer_cast<pten::DenseTensor>(self->eagertensor.impl());
platform::CPUPlace place;
// deep copy
paddle::memory::Copy(place, reinterpret_cast<void*>(
pybind11::detail::array_proxy(array)->data),
place, dense_tensor->data(), sizeof_dtype * numel);
#if defined(PADDLE_WITH_CUDA)
} else if (self->eagertensor.is_cuda()) {
auto dense_tensor =
std::dynamic_pointer_cast<pten::DenseTensor>(self->eagertensor.impl());
paddle::platform::GpuMemcpySync(
pybind11::detail::array_proxy(array)->data, dense_tensor->data(),
pten::DataTypeSize(dense_tensor->dtype()) * dense_tensor->numel(),
cudaMemcpyDeviceToHost);
#endif
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"Tensor.numpy() only support cpu tensor."));
Py_INCREF(Py_None);
return Py_None;
}
return array;
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static PyObject* eager_tensor_method_is_initialized(EagerTensorObject* self,
PyObject* args,
PyObject* kwargs) {
EAGER_TRY
return ToPyObject(self->eagertensor.initialized());
EAGER_CATCH_AND_THROW_RETURN_NULL
}
PyMethodDef variable_methods[] = {
{"numpy", (PyCFunction)(void (*)(void))eager_tensor_method_numpy,
METH_VARARGS | METH_KEYWORDS, NULL},
{"_is_initialized",
(PyCFunction)(void (*)(void))eager_tensor_method_is_initialized,
METH_VARARGS | METH_KEYWORDS, NULL},
{NULL, NULL, 0, NULL}};
} // namespace pybind
} // namespace paddle
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
// disable numpy compile error
#include <Python.h>
#include <string>
#include <vector>
#include "paddle/fluid/eager/api/all.h"
#include "paddle/fluid/eager/autograd_meta.h"
#include "paddle/fluid/eager/utils.h"
#include "paddle/fluid/memory/allocation/allocator.h"
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/pybind/eager.h"
#include "paddle/fluid/pybind/eager_utils.h"
#include "paddle/fluid/pybind/exception.h"
#include "paddle/pten/common/data_type.h"
#include "paddle/pten/core/convert_utils.h"
#include "paddle/pten/core/dense_tensor.h"
#include "paddle/pten/include/core.h"
#pragma GCC diagnostic ignored "-Wwrite-strings"
namespace paddle {
namespace pybind {
extern PyTypeObject* p_eager_tensor_type;
PyObject* eager_tensor_properties_get_name(EagerTensorObject* self,
void* closure) {
EAGER_TRY
return ToPyObject(self->eagertensor.name());
EAGER_CATCH_AND_THROW_RETURN_NULL
}
int eager_tensor_properties_set_name(EagerTensorObject* self, PyObject* value,
void* closure) {
EAGER_TRY
self->eagertensor.set_name(CastPyArg2AttrString(value, 0));
return 0;
EAGER_CATCH_AND_THROW_RETURN_ZERO
}
PyObject* eager_tensor_properties_get_stop_gradient(EagerTensorObject* self,
void* closure) {
EAGER_TRY
auto meta = egr::EagerUtils::autograd_meta(&self->eagertensor);
return ToPyObject(meta->StopGradient());
EAGER_CATCH_AND_THROW_RETURN_NULL
}
PyObject* eager_tensor_properties_get_grad(EagerTensorObject* self,
void* closure) {
EAGER_TRY
auto meta = egr::EagerUtils::unsafe_autograd_meta(self->eagertensor);
return ToPyObject(meta->Grad());
EAGER_CATCH_AND_THROW_RETURN_NULL
}
int eager_tensor_properties_set_stop_gradient(EagerTensorObject* self,
PyObject* value, void* closure) {
EAGER_TRY
auto meta = egr::EagerUtils::autograd_meta(&self->eagertensor);
meta->SetStopGradient(CastPyArg2AttrBoolean(value, 0));
return 0;
EAGER_CATCH_AND_THROW_RETURN_ZERO
}
PyObject* eager_tensor_properties_get_persistable(EagerTensorObject* self,
void* closure) {
EAGER_TRY
auto meta = egr::EagerUtils::autograd_meta(&self->eagertensor);
return ToPyObject(meta->Persistable());
EAGER_CATCH_AND_THROW_RETURN_NULL
}
int eager_tensor_properties_set_persistable(EagerTensorObject* self,
PyObject* value, void* closure) {
EAGER_TRY
auto meta = egr::EagerUtils::autograd_meta(&self->eagertensor);
meta->SetPersistable(CastPyArg2AttrBoolean(value, 0));
return 0;
EAGER_CATCH_AND_THROW_RETURN_ZERO
}
PyObject* eager_tensor_properties_get_shape(EagerTensorObject* self,
void* closure) {
EAGER_TRY
auto ddim = self->eagertensor.shape();
std::vector<int64_t> value;
size_t rank = static_cast<size_t>(ddim.size());
value.resize(rank);
for (size_t i = 0; i < rank; i++) {
value[i] = ddim[i];
}
return ToPyObject(value);
EAGER_CATCH_AND_THROW_RETURN_NULL
}
PyObject* eager_tensor_properties_get_place(EagerTensorObject* self,
void* closure) {
EAGER_TRY
return ToPyObject(self->eagertensor.place());
EAGER_CATCH_AND_THROW_RETURN_NULL
}
PyObject* eager_tensor_properties_get_place_str(EagerTensorObject* self,
void* closure) {
EAGER_TRY
std::stringstream ostr;
ostr << self->eagertensor.place();
return ToPyObject(ostr.str());
EAGER_CATCH_AND_THROW_RETURN_NULL
}
PyObject* eager_tensor_properties_get_dtype(EagerTensorObject* self,
void* closure) {
EAGER_TRY
return ToPyObject(pten::DataType2String(self->eagertensor.type()));
EAGER_CATCH_AND_THROW_RETURN_NULL
}
struct PyGetSetDef variable_properties[] = {
{"grad", (getter)eager_tensor_properties_get_grad, nullptr, nullptr,
nullptr},
{"name", (getter)eager_tensor_properties_get_name,
(setter)eager_tensor_properties_set_name, nullptr, nullptr},
{"stop_gradient", (getter)eager_tensor_properties_get_stop_gradient,
(setter)eager_tensor_properties_set_stop_gradient, nullptr, nullptr},
{"persistable", (getter)eager_tensor_properties_get_persistable,
(setter)eager_tensor_properties_set_persistable, nullptr, nullptr},
{"shape", (getter)eager_tensor_properties_get_shape, nullptr, nullptr,
nullptr},
// {"is_leaf", (getter)eager_tensor_properties_get_is_leaf, nullptr,
// nullptr,
// nullptr},
{"place", (getter)eager_tensor_properties_get_place, nullptr, nullptr,
nullptr},
{"_place_str", (getter)eager_tensor_properties_get_place_str, nullptr,
nullptr, nullptr},
{"dtype", (getter)eager_tensor_properties_get_dtype, nullptr, nullptr,
nullptr},
{nullptr, nullptr, nullptr, nullptr, nullptr}};
} // namespace pybind
} // namespace paddle
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <Python.h>
#include <string>
#include <vector>
#include "paddle/fluid/eager/api/all.h"
#include "paddle/fluid/eager/autograd_meta.h"
#include "paddle/fluid/memory/allocation/allocator.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/pybind/eager.h"
#include "paddle/fluid/pybind/eager_utils.h"
#include "paddle/pten/common/data_type.h"
#include "paddle/pten/core/convert_utils.h"
#include "paddle/pten/core/dense_tensor.h"
#include "paddle/pten/include/core.h"
namespace paddle {
namespace pybind {
extern PyTypeObject* p_eager_tensor_type;
extern PyTypeObject* g_place_pytype;
extern PyTypeObject* g_cudaplace_pytype;
extern PyTypeObject* g_cpuplace_pytype;
extern PyTypeObject* g_xpuplace_pytype;
extern PyTypeObject* g_npuplace_pytype;
extern PyTypeObject* g_cudapinnedplace_pytype;
bool PyObject_CheckLongOrConvertToLong(PyObject** obj) {
if ((PyLong_Check(*obj) && !PyBool_Check(*obj))) {
return true;
}
if (std::string((reinterpret_cast<PyTypeObject*>((*obj)->ob_type))->tp_name)
.find("numpy") != std::string::npos) {
auto to = PyNumber_Long(*obj);
if (to) {
*obj = to;
return true;
}
}
return false;
}
bool PyObject_CheckFloatOrConvertToFloat(PyObject** obj) {
// sometimes users provide PyLong or numpy.int64 but attr is float
if (PyFloat_Check(*obj) || PyLong_Check(*obj)) {
return true;
}
if (std::string((reinterpret_cast<PyTypeObject*>((*obj)->ob_type))->tp_name)
.find("numpy") != std::string::npos) {
auto to = PyNumber_Float(*obj);
if (to) {
*obj = to;
return true;
}
}
return false;
}
bool PyObject_CheckStr(PyObject* obj) { return PyUnicode_Check(obj); }
bool CastPyArg2AttrBoolean(PyObject* obj, ssize_t arg_pos) {
if (obj == Py_None) {
return false; // To be compatible with QA integration testing. Some
// test case pass in None.
} else if (obj == Py_True) {
return true;
} else if (obj == Py_False) {
return false;
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"argument (position %d) must be "
"bool, but got %s",
arg_pos + 1, (reinterpret_cast<PyTypeObject*>(obj->ob_type))->tp_name));
}
}
int CastPyArg2AttrInt(PyObject* obj, ssize_t arg_pos) {
if (PyObject_CheckLongOrConvertToLong(&obj)) {
return static_cast<int>(PyLong_AsLong(obj));
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"argument (position %d) must be "
"int, but got %s",
arg_pos + 1, (reinterpret_cast<PyTypeObject*>(obj->ob_type))->tp_name));
}
}
int64_t CastPyArg2AttrLong(PyObject* obj, ssize_t arg_pos) {
if (PyObject_CheckLongOrConvertToLong(&obj)) {
return (int64_t)PyLong_AsLong(obj); // NOLINT
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"argument (position %d) must be "
"long, but got %s",
arg_pos + 1, (reinterpret_cast<PyTypeObject*>(obj->ob_type))->tp_name));
}
}
float CastPyArg2AttrFloat(PyObject* obj, ssize_t arg_pos) {
if (PyObject_CheckFloatOrConvertToFloat(&obj)) {
return static_cast<float>(PyFloat_AsDouble(obj));
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"argument (position %d) must be "
"float, but got %s",
arg_pos + 1, (reinterpret_cast<PyTypeObject*>(obj->ob_type))->tp_name));
}
}
std::string CastPyArg2AttrString(PyObject* obj, ssize_t arg_pos) {
if (PyObject_CheckStr(obj)) {
Py_ssize_t size;
const char* data;
data = PyUnicode_AsUTF8AndSize(obj, &size);
return std::string(data, static_cast<size_t>(size));
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"argument (position %d) must be "
"str, but got %s",
arg_pos + 1, (reinterpret_cast<PyTypeObject*>(obj->ob_type))->tp_name));
return "";
}
}
egr::EagerTensor CastPyArg2EagerTensor(PyObject* obj, ssize_t arg_pos) {
if (PyObject_IsInstance(obj,
reinterpret_cast<PyObject*>(p_eager_tensor_type))) {
return reinterpret_cast<EagerTensorObject*>(obj)->eagertensor;
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"argument (position %d) must be "
"EagerTensor, but got %s",
arg_pos + 1, reinterpret_cast<PyTypeObject*>(obj->ob_type)->tp_name));
}
}
std::vector<egr::EagerTensor> CastPyArg2VectorOfEagerTensor(PyObject* obj,
ssize_t arg_pos) {
std::vector<egr::EagerTensor> result;
if (PyList_Check(obj)) {
Py_ssize_t len = PyList_Size(obj);
PyObject* item = nullptr;
for (Py_ssize_t i = 0; i < len; i++) {
item = PyList_GetItem(obj, i);
if (PyObject_IsInstance(
item, reinterpret_cast<PyObject*>(p_eager_tensor_type))) {
result.emplace_back(
reinterpret_cast<EagerTensorObject*>(item)->eagertensor);
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"argument (position %d) must be "
"list of bool, but got %s at pos %d",
arg_pos + 1,
reinterpret_cast<PyTypeObject*>(item->ob_type)->tp_name, i));
}
}
} else if (PyTuple_Check(obj)) {
Py_ssize_t len = PyTuple_Size(obj);
PyObject* item = nullptr;
for (Py_ssize_t i = 0; i < len; i++) {
item = PyTuple_GetItem(obj, i);
if (PyObject_IsInstance(
item, reinterpret_cast<PyObject*>(p_eager_tensor_type))) {
result.emplace_back(
reinterpret_cast<EagerTensorObject*>(item)->eagertensor);
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"argument (position %d) must be "
"list of bool, but got %s at pos %d",
arg_pos + 1,
reinterpret_cast<PyTypeObject*>(item->ob_type)->tp_name, i));
}
}
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"argument (position %d) must be "
"list or tuple, but got %s",
arg_pos + 1, reinterpret_cast<PyTypeObject*>(obj->ob_type)->tp_name));
}
return result;
}
platform::Place CastPyArg2Place(PyObject* obj, ssize_t arg_pos) {
platform::Place place;
if (PyObject_IsInstance(obj, reinterpret_cast<PyObject*>(g_place_pytype))) {
place = ::pybind11::handle(obj).cast<platform::Place>();
} else if (PyObject_IsInstance(
obj, reinterpret_cast<PyObject*>(g_cudaplace_pytype))) {
place = ::pybind11::handle(obj).cast<platform::CUDAPlace>();
} else if (PyObject_IsInstance(
obj, reinterpret_cast<PyObject*>(g_cpuplace_pytype))) {
place = ::pybind11::handle(obj).cast<platform::CPUPlace>();
} else if (PyObject_IsInstance(
obj, reinterpret_cast<PyObject*>(g_xpuplace_pytype))) {
place = ::pybind11::handle(obj).cast<platform::XPUPlace>();
} else if (PyObject_IsInstance(
obj, reinterpret_cast<PyObject*>(g_npuplace_pytype))) {
place = ::pybind11::handle(obj).cast<platform::NPUPlace>();
} else if (PyObject_IsInstance(
obj, reinterpret_cast<PyObject*>(g_cudapinnedplace_pytype))) {
place = ::pybind11::handle(obj).cast<platform::CUDAPinnedPlace>();
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"argument (position %d) must be "
"one of(Place,CUDAPlace,CPUPlace,XPUPlace,NPUPlace,CUDAPinnedPlace), "
"but got %s",
arg_pos + 1, reinterpret_cast<PyTypeObject*>(obj->ob_type)->tp_name));
}
return place;
}
PyObject* ToPyObject(bool value) {
if (value) {
Py_INCREF(Py_True);
return Py_True;
} else {
Py_INCREF(Py_False);
return Py_False;
}
}
PyObject* ToPyObject(int value) { return PyLong_FromLong(value); }
PyObject* ToPyObject(int64_t value) { return PyLong_FromLongLong(value); }
PyObject* ToPyObject(float value) { return PyLong_FromDouble(value); }
PyObject* ToPyObject(double value) { return PyLong_FromDouble(value); }
PyObject* ToPyObject(const char* value) { return PyUnicode_FromString(value); }
PyObject* ToPyObject(const std::string& value) {
return PyUnicode_FromString(value.c_str());
}
PyObject* ToPyObject(const egr::EagerTensor& value) {
PyObject* obj = p_eager_tensor_type->tp_alloc(p_eager_tensor_type, 0);
if (obj) {
auto v = reinterpret_cast<EagerTensorObject*>(obj);
new (&(v->eagertensor)) egr::EagerTensor();
v->eagertensor = value;
} else {
PADDLE_THROW(platform::errors::Fatal(
"tp_alloc return null, can not new a PyObject."));
}
return obj;
}
PyObject* ToPyObject(const std::vector<bool>& value) {
PyObject* result = PyList_New((Py_ssize_t)value.size());
for (size_t i = 0; i < value.size(); i++) {
PyList_SET_ITEM(result, static_cast<Py_ssize_t>(i), ToPyObject(value[i]));
}
return result;
}
PyObject* ToPyObject(const std::vector<int>& value) {
PyObject* result = PyList_New((Py_ssize_t)value.size());
for (size_t i = 0; i < value.size(); i++) {
PyList_SET_ITEM(result, static_cast<Py_ssize_t>(i), ToPyObject(value[i]));
}
return result;
}
PyObject* ToPyObject(const std::vector<int64_t>& value) {
PyObject* result = PyList_New((Py_ssize_t)value.size());
for (size_t i = 0; i < value.size(); i++) {
PyList_SET_ITEM(result, (Py_ssize_t)i, ToPyObject(value[i]));
}
return result;
}
PyObject* ToPyObject(const std::vector<float>& value) {
PyObject* result = PyList_New((Py_ssize_t)value.size());
for (size_t i = 0; i < value.size(); i++) {
PyList_SET_ITEM(result, static_cast<Py_ssize_t>(i), ToPyObject(value[i]));
}
return result;
}
PyObject* ToPyObject(const std::vector<double>& value) {
PyObject* result = PyList_New((Py_ssize_t)value.size());
for (size_t i = 0; i < value.size(); i++) {
PyList_SET_ITEM(result, static_cast<Py_ssize_t>(i), ToPyObject(value[i]));
}
return result;
}
PyObject* ToPyObject(const std::vector<egr::EagerTensor>& value) {
PyObject* result = PyList_New((Py_ssize_t)value.size());
for (size_t i = 0; i < value.size(); i++) {
PyObject* obj = p_eager_tensor_type->tp_alloc(p_eager_tensor_type, 0);
if (obj) {
auto v = reinterpret_cast<EagerTensorObject*>(obj);
new (&(v->eagertensor)) egr::EagerTensor();
v->eagertensor = value[i];
} else {
PADDLE_THROW(platform::errors::Fatal(
"tp_alloc return null, can not new a PyObject."));
}
PyList_SET_ITEM(result, static_cast<Py_ssize_t>(i), obj);
}
return result;
}
PyObject* ToPyObject(const platform::Place& value) {
auto obj = ::pybind11::cast(value);
obj.inc_ref();
return obj.ptr();
}
} // namespace pybind
} // namespace paddle
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <Python.h>
#include "pybind11/pybind11.h"
#include "pybind11/stl.h"
namespace paddle {
namespace pybind {
typedef struct {
PyObject_HEAD egr::EagerTensor eagertensor;
} EagerTensorObject;
bool PyObject_CheckLongOrConvertToLong(PyObject** obj);
bool PyObject_CheckFloatOrConvertToFloat(PyObject** obj);
bool PyObject_CheckStr(PyObject* obj);
bool CastPyArg2AttrBoolean(PyObject* obj, ssize_t arg_pos);
int CastPyArg2AttrInt(PyObject* obj, ssize_t arg_pos);
int64_t CastPyArg2AttrLong(PyObject* obj, ssize_t arg_pos);
float CastPyArg2AttrFloat(PyObject* obj, ssize_t arg_pos);
std::string CastPyArg2AttrString(PyObject* obj, ssize_t arg_pos);
egr::EagerTensor CastPyArg2EagerTensor(PyObject* obj, ssize_t arg_pos);
std::vector<egr::EagerTensor> CastPyArg2VectorOfEagerTensor(PyObject* obj,
ssize_t arg_pos);
platform::Place CastPyArg2Place(PyObject* obj, ssize_t arg_pos);
PyObject* ToPyObject(int value);
PyObject* ToPyObject(bool value);
PyObject* ToPyObject(int64_t value);
PyObject* ToPyObject(float value);
PyObject* ToPyObject(double value);
PyObject* ToPyObject(const char* value);
PyObject* ToPyObject(const std::string& value);
PyObject* ToPyObject(const egr::EagerTensor& value);
PyObject* ToPyObject(const std::vector<bool>& value);
PyObject* ToPyObject(const std::vector<int>& value);
PyObject* ToPyObject(const std::vector<int64_t>& value);
PyObject* ToPyObject(const std::vector<float>& value);
PyObject* ToPyObject(const std::vector<double>& value);
PyObject* ToPyObject(const std::vector<egr::EagerTensor>& value);
PyObject* ToPyObject(const platform::Place& value);
} // namespace pybind
} // namespace paddle
......@@ -81,5 +81,48 @@ void BindException(pybind11::module* m) {
});
}
void ThrowExceptionToPython(std::exception_ptr p) {
static PyObject* EOFExceptionException =
PyErr_NewException("paddle.EOFException", PyExc_Exception, NULL);
static PyObject* EnforceNotMetException =
PyErr_NewException("paddle.EnforceNotMet", PyExc_Exception, NULL);
try {
if (p) std::rethrow_exception(p);
} catch (const platform::EOFException& e) {
PyErr_SetString(EOFExceptionException, e.what());
} catch (const platform::EnforceNotMet& e) {
switch (e.code()) {
case paddle::platform::error::INVALID_ARGUMENT:
PyErr_SetString(PyExc_ValueError, e.what());
break;
case paddle::platform::error::NOT_FOUND:
case paddle::platform::error::ALREADY_EXISTS:
case paddle::platform::error::PRECONDITION_NOT_MET:
case paddle::platform::error::PERMISSION_DENIED:
case paddle::platform::error::EXECUTION_TIMEOUT:
case paddle::platform::error::UNAVAILABLE:
PyErr_SetString(PyExc_RuntimeError, e.what());
break;
case paddle::platform::error::OUT_OF_RANGE:
PyErr_SetString(PyExc_IndexError, e.what());
break;
case paddle::platform::error::RESOURCE_EXHAUSTED:
PyErr_SetString(PyExc_MemoryError, e.what());
break;
case paddle::platform::error::UNIMPLEMENTED:
PyErr_SetString(PyExc_NotImplementedError, e.what());
break;
case paddle::platform::error::FATAL:
PyErr_SetString(PyExc_SystemError, e.what());
break;
case paddle::platform::error::EXTERNAL:
PyErr_SetString(PyExc_OSError, e.what());
break;
default:
PyErr_SetString(EnforceNotMetException, e.what());
break;
}
}
}
} // namespace pybind
} // namespace paddle
......@@ -18,10 +18,26 @@ limitations under the License. */
#include "paddle/fluid/platform/enforce.h"
#include "pybind11/pybind11.h"
#define EAGER_TRY try {
#define EAGER_CATCH_AND_THROW_RETURN_NULL \
} \
catch (...) { \
ThrowExceptionToPython(std::current_exception()); \
return nullptr; \
}
#define EAGER_CATCH_AND_THROW_RETURN_ZERO \
} \
catch (...) { \
ThrowExceptionToPython(std::current_exception()); \
return 0; \
}
namespace paddle {
namespace pybind {
void BindException(pybind11::module* m);
void ThrowExceptionToPython(std::exception_ptr p);
} // namespace pybind
} // namespace paddle
......@@ -29,6 +29,7 @@
#include "paddle/fluid/framework/variable.h"
#include "paddle/fluid/imperative/tracer.h"
#include "paddle/fluid/imperative/type_defs.h"
#include "paddle/fluid/pybind/exception.h"
#include "paddle/fluid/pybind/imperative.h"
namespace py = pybind11;
......@@ -992,50 +993,6 @@ void InitOpsAttrTypeMap() {
}
}
void ThrowExceptionToPython(std::exception_ptr p) {
static PyObject* EOFExceptionException =
PyErr_NewException("paddle.EOFException", PyExc_Exception, NULL);
static PyObject* EnforceNotMetException =
PyErr_NewException("paddle.EnforceNotMet", PyExc_Exception, NULL);
try {
if (p) std::rethrow_exception(p);
} catch (const platform::EOFException& e) {
PyErr_SetString(EOFExceptionException, e.what());
} catch (const platform::EnforceNotMet& e) {
switch (e.code()) {
case paddle::platform::error::INVALID_ARGUMENT:
PyErr_SetString(PyExc_ValueError, e.what());
break;
case paddle::platform::error::NOT_FOUND:
case paddle::platform::error::ALREADY_EXISTS:
case paddle::platform::error::PRECONDITION_NOT_MET:
case paddle::platform::error::PERMISSION_DENIED:
case paddle::platform::error::EXECUTION_TIMEOUT:
case paddle::platform::error::UNAVAILABLE:
PyErr_SetString(PyExc_RuntimeError, e.what());
break;
case paddle::platform::error::OUT_OF_RANGE:
PyErr_SetString(PyExc_IndexError, e.what());
break;
case paddle::platform::error::RESOURCE_EXHAUSTED:
PyErr_SetString(PyExc_MemoryError, e.what());
break;
case paddle::platform::error::UNIMPLEMENTED:
PyErr_SetString(PyExc_NotImplementedError, e.what());
break;
case paddle::platform::error::FATAL:
PyErr_SetString(PyExc_SystemError, e.what());
break;
case paddle::platform::error::EXTERNAL:
PyErr_SetString(PyExc_OSError, e.what());
break;
default:
PyErr_SetString(EnforceNotMetException, e.what());
break;
}
}
}
} // namespace pybind
} // namespace paddle
......
......@@ -75,6 +75,7 @@ limitations under the License. */
#include "paddle/fluid/platform/place.h"
#include "paddle/fluid/platform/profiler.h"
#include "paddle/fluid/pybind/cuda_streams_py.h"
#include "paddle/fluid/pybind/eager.h"
#include "paddle/fluid/pybind/io.h"
#include "paddle/utils/none.h"
#ifdef PADDLE_WITH_ASCEND
......@@ -150,6 +151,14 @@ PYBIND11_MAKE_OPAQUE(paddle::framework::FetchType);
namespace paddle {
namespace pybind {
PyTypeObject *g_place_pytype = nullptr;
PyTypeObject *g_cudaplace_pytype = nullptr;
PyTypeObject *g_cpuplace_pytype = nullptr;
PyTypeObject *g_xpuplace_pytype = nullptr;
PyTypeObject *g_npuplace_pytype = nullptr;
PyTypeObject *g_cudapinnedplace_pytype = nullptr;
bool IsCompiledWithCUDA() {
#if !defined(PADDLE_WITH_CUDA) && !defined(PADDLE_WITH_HIP)
return false;
......@@ -524,6 +533,7 @@ PYBIND11_MODULE(core_avx, m) {
PYBIND11_MODULE(core_noavx, m) {
#endif
BindEager(&m);
BindCudaStream(&m);
// Not used, just make sure cpu_info.cc is linked.
......@@ -1599,7 +1609,7 @@ All parameter, weight, gradient are variables in Paddle.
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
py::class_<platform::Communicator>(m, "Communicator").def(py::init<>());
#endif
py::class_<platform::CUDAPlace>(m, "CUDAPlace", R"DOC(
py::class_<platform::CUDAPlace> cudaplace(m, "CUDAPlace", R"DOC(
CUDAPlace is a descriptor of a device.
It represents a GPU device allocated or to be allocated with Tensor or LoDTensor.
......@@ -1622,7 +1632,9 @@ All parameter, weight, gradient are variables in Paddle.
place = paddle.CUDAPlace(0)
)DOC")
)DOC");
g_cudaplace_pytype = reinterpret_cast<PyTypeObject *>(cudaplace.ptr());
cudaplace
.def("__init__",
[](platform::CUDAPlace &self, int dev_id) {
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
......@@ -1680,13 +1692,15 @@ All parameter, weight, gradient are variables in Paddle.
.def("__repr__", string::to_string<const platform::CUDAPlace &>)
.def("__str__", string::to_string<const platform::CUDAPlace &>);
py::class_<platform::XPUPlace>(m, "XPUPlace", R"DOC(
py::class_<platform::XPUPlace> xpuplace(m, "XPUPlace", R"DOC(
**Note**:
Examples:
.. code-block:: python
import paddle.fluid as fluid
xpu_place = fluid.XPUPlace(0)
)DOC")
)DOC");
g_xpuplace_pytype = reinterpret_cast<PyTypeObject *>(xpuplace.ptr());
xpuplace
.def("__init__",
[](platform::XPUPlace &self, int dev_id) {
#ifdef PADDLE_WITH_XPU
......@@ -1756,7 +1770,7 @@ All parameter, weight, gradient are variables in Paddle.
});
#endif
py::class_<paddle::platform::CPUPlace>(m, "CPUPlace", R"DOC(
py::class_<paddle::platform::CPUPlace> cpuplace(m, "CPUPlace", R"DOC(
CPUPlace is a descriptor of a device.
It represents a CPU device on which a tensor will be allocated and a model will run.
......@@ -1766,8 +1780,9 @@ All parameter, weight, gradient are variables in Paddle.
import paddle
cpu_place = paddle.CPUPlace()
)DOC")
.def(py::init<>())
)DOC");
g_cpuplace_pytype = reinterpret_cast<PyTypeObject *>(cpuplace.ptr());
cpuplace.def(py::init<>())
.def("_type", &PlaceIndex<platform::CPUPlace>)
.def("_equals", &IsSamePlace<platform::CPUPlace, platform::Place>)
.def("_equals", &IsSamePlace<platform::CPUPlace, platform::XPUPlace>)
......@@ -1779,7 +1794,8 @@ All parameter, weight, gradient are variables in Paddle.
.def("__repr__", string::to_string<const platform::CPUPlace &>)
.def("__str__", string::to_string<const platform::CPUPlace &>);
py::class_<paddle::platform::CUDAPinnedPlace>(m, "CUDAPinnedPlace", R"DOC(
py::class_<paddle::platform::CUDAPinnedPlace> cudapinnedplace(
m, "CUDAPinnedPlace", R"DOC(
CUDAPinnedPlace is a descriptor of a device.
It refers to the page locked memory allocated by the CUDA function `cudaHostAlloc()` in the host memory.
The host operating system will not paging and exchanging the memory.
......@@ -1793,7 +1809,10 @@ All parameter, weight, gradient are variables in Paddle.
import paddle
place = paddle.CUDAPinnedPlace()
)DOC")
)DOC");
g_cudapinnedplace_pytype =
reinterpret_cast<PyTypeObject *>(cudapinnedplace.ptr());
cudapinnedplace
.def("__init__",
[](platform::CUDAPinnedPlace &self) {
#if !defined(PADDLE_WITH_CUDA) && !defined(PADDLE_WITH_HIP)
......@@ -1819,7 +1838,7 @@ All parameter, weight, gradient are variables in Paddle.
.def("__str__", string::to_string<const platform::CUDAPinnedPlace &>);
// NPUPlace
py::class_<platform::NPUPlace>(m, "NPUPlace", R"DOC(
py::class_<platform::NPUPlace> npuplace(m, "NPUPlace", R"DOC(
NPUPlace is a descriptor of a device.
It represents a NPU device on which a tensor will be allocated and a model will run.
......@@ -1828,7 +1847,9 @@ All parameter, weight, gradient are variables in Paddle.
import paddle
npu_place = paddle.NPUPlace(0)
)DOC")
)DOC");
g_npuplace_pytype = reinterpret_cast<PyTypeObject *>(npuplace.ptr());
npuplace
.def("__init__",
[](platform::NPUPlace &self, int dev_id) {
#ifdef PADDLE_WITH_ASCEND_CL
......@@ -1879,8 +1900,9 @@ All parameter, weight, gradient are variables in Paddle.
[](const platform::NPUPlace &self) { return self.GetDeviceId(); })
.def("__str__", string::to_string<const platform::NPUPlace &>);
py::class_<platform::Place>(m, "Place")
.def(py::init<>())
py::class_<platform::Place> platformplace(m, "Place");
g_place_pytype = reinterpret_cast<PyTypeObject *>(platformplace.ptr());
platformplace.def(py::init<>())
.def("_type", &PlaceIndex<platform::Place>)
.def("_equals", &IsSamePlace<platform::Place, platform::Place>)
.def("_equals", &IsSamePlace<platform::Place, platform::CUDAPlace>)
......
if(WITH_GPU)
cc_library(convert_utils SRCS convert_utils.cc DEPS data_type place gpu_info)
cc_library(convert_utils SRCS convert_utils.cc DEPS data_type place gpu_info python)
elseif(WITH_ROCM)
cc_library(convert_utils SRCS convert_utils.cc DEPS data_type place gpu_info)
cc_library(convert_utils SRCS convert_utils.cc DEPS data_type place gpu_info python)
else()
cc_library(convert_utils SRCS convert_utils.cc DEPS data_type place)
cc_library(convert_utils SRCS convert_utils.cc DEPS data_type place python)
endif()
cc_library(kernel_factory SRCS kernel_factory.cc DEPS enforce)
......
......@@ -11,8 +11,9 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/pten/core/convert_utils.h"
#include "paddle/fluid/operators/py_func_op.h"
#include "paddle/fluid/pybind/tensor_py.h"
// See Note [ Why still include the fluid headers? ]
#include "paddle/fluid/platform/gpu_info.h"
......@@ -180,4 +181,127 @@ pten::LoD TransToPtenLoD(const paddle::framework::LoD& lod) {
return out;
}
size_t DataTypeSize(DataType dtype) {
switch (dtype) {
case DataType::UNDEFINED:
return 0;
case DataType::BOOL:
return sizeof(bool);
case DataType::INT8:
return sizeof(int8_t);
case DataType::UINT8:
return sizeof(uint8_t);
case DataType::INT16:
return sizeof(int16_t);
case DataType::INT32:
return sizeof(int);
case DataType::INT64:
return sizeof(int64_t);
case DataType::FLOAT16:
return sizeof(paddle::platform::float16);
case DataType::FLOAT32:
return sizeof(float);
case DataType::FLOAT64:
return sizeof(double);
case DataType::COMPLEX64:
return sizeof(paddle::platform::complex<float>);
case DataType::COMPLEX128:
return sizeof(paddle::platform::complex<double>);
default:
return 0;
}
}
DataType String2DataType(const std::string& str) {
if (str == "bool") {
return DataType::BOOL;
} else if (str == "float16") {
return DataType::FLOAT16;
} else if (str == "float32") {
return DataType::FLOAT32;
} else if (str == "float64") {
return DataType::FLOAT64;
} else if (str == "int8") {
return DataType::INT8;
} else if (str == "int16") {
return DataType::INT16;
} else if (str == "int32") {
return DataType::INT32;
} else if (str == "int64") {
return DataType::INT64;
} else if (str == "uint8") {
return DataType::UINT8;
} else if (str == "complex64") {
return DataType::COMPLEX64;
} else if (str == "complex128") {
return DataType::COMPLEX128;
} else {
return DataType::UNDEFINED;
}
}
std::string DataType2String(DataType dtype) {
switch (dtype) {
case DataType::BOOL:
return "bool";
case DataType::INT8:
return "int8";
case DataType::UINT8:
return "uint8";
case DataType::INT16:
return "int16";
case DataType::INT32:
return "int32";
case DataType::INT64:
return "int64";
case DataType::FLOAT16:
return "float16";
case DataType::FLOAT32:
return "float32";
case DataType::FLOAT64:
return "float64";
case DataType::COMPLEX64:
return "complex64";
case DataType::COMPLEX128:
return "complex128";
default:
PADDLE_THROW(paddle::platform::errors::InvalidArgument(
"Unknow pten::DataType, the int value = %d.",
static_cast<int>(dtype)));
return "";
}
}
int TensorDtype2NumpyDtype(pten::DataType dtype) {
switch (dtype) {
case pten::DataType::BOOL:
return pybind11::detail::npy_api::NPY_BOOL_;
case pten::DataType::INT8:
return pybind11::detail::npy_api::NPY_INT8_;
case pten::DataType::UINT8:
return pybind11::detail::npy_api::NPY_UINT8_;
case pten::DataType::INT16:
return pybind11::detail::npy_api::NPY_INT16_;
case pten::DataType::INT32:
return pybind11::detail::npy_api::NPY_INT32_;
case pten::DataType::INT64:
return pybind11::detail::npy_api::NPY_INT64_;
case pten::DataType::FLOAT16:
return pybind11::detail::NPY_FLOAT16_;
case pten::DataType::FLOAT32:
return pybind11::detail::npy_api::NPY_FLOAT_;
case pten::DataType::FLOAT64:
return pybind11::detail::npy_api::NPY_DOUBLE_;
case pten::DataType::COMPLEX64:
return pybind11::detail::NPY_COMPLEX64;
case pten::DataType::COMPLEX128:
return pybind11::detail::NPY_COMPLEX128;
default:
PADDLE_THROW(paddle::platform::errors::InvalidArgument(
"Unknow pten::DataType, the int value = %d.",
static_cast<int>(dtype)));
return 0;
}
}
} // namespace pten
......@@ -45,4 +45,9 @@ paddle::framework::DataLayout TransToFluidDataLayout(const DataLayout& layout);
paddle::framework::LoD TransToFluidLoD(const pten::LoD& lod);
pten::LoD TransToPtenLoD(const paddle::framework::LoD& lod);
size_t DataTypeSize(DataType dtype);
DataType String2DataType(const std::string& str);
std::string DataType2String(DataType dtype);
int TensorDtype2NumpyDtype(pten::DataType dtype);
} // namespace pten
......@@ -55,6 +55,7 @@ from . import initializer
from .initializer import set_global_initializer
from . import layers
from . import dygraph
from . import eager
from . import contrib
from . import nets
from . import optimizer
......@@ -90,6 +91,7 @@ from .dygraph.base import enable_dygraph, disable_dygraph
from .io import save, load, load_program_state, set_program_state
from .dygraph.checkpoint import save_dygraph, load_dygraph
from .dygraph.varbase_patch_methods import monkey_patch_varbase
from .eager.eager_tensor_patch_methods import monkey_patch_eagertensor
from . import generator
from .core import _cuda_synchronize
from .generator import Generator
......@@ -113,6 +115,7 @@ __all__ = framework.__all__ + executor.__all__ + \
'contrib',
'data',
'dygraph',
'eager',
'enable_dygraph',
'disable_dygraph',
'enable_imperative',
......@@ -211,6 +214,7 @@ def __bootstrap__():
monkey_patch_variable()
__bootstrap__()
monkey_patch_varbase()
monkey_patch_eagertensor()
# NOTE(zhiqiu): register npu_finalize on the exit of Python,
# do some clean up manually.
......
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# incubate directory is mainly for internal use
# after we have tested incubate APIs in industrial application for a period
# we will move stable functions into fluid
from . import eager_tensor_patch_methods
__all__ = []
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.fluid.core as core
def monkey_patch_eagertensor():
def __str__(self):
from paddle.tensor.to_string import eager_tensor_to_string
return eager_tensor_to_string(self)
setattr(core.eager.EagerTensor, "__str__", __str__)
......@@ -45,6 +45,8 @@ __all__ = [
'Program',
'default_startup_program',
'default_main_program',
'eager_guard',
'in_eager_mode',
'program_guard',
'name_scope',
'cuda_places',
......@@ -75,6 +77,21 @@ _current_device = None
global_prog_seed = 0
_current_pipeline_stage = None
_global_flags_ = core.globals()
_eager_mode_ = False
@signature_safe_contextmanager
def eager_guard():
global _eager_mode_
_eager_mode_ = True
try:
yield
finally:
_eager_mode_ = False
def in_eager_mode():
return _eager_mode_
def require_version(min_version, max_version=None):
......@@ -340,6 +357,9 @@ def _set_dygraph_tracer_expected_place(place):
def _set_expected_place(place):
global _global_expected_place_
_global_expected_place_ = place
if in_eager_mode():
return core.eager._set_expected_place(place)
else:
_set_dygraph_tracer_expected_place(place)
......
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.fluid.core as core
import paddle.fluid.eager.eager_tensor_patch_methods as eager_tensor_patch_methods
import paddle
import numpy as np
from paddle.fluid import eager_guard
import unittest
class EagerScaleTestCase(unittest.TestCase):
def test_scale_base(self):
with eager_guard():
paddle.set_device("cpu")
arr = np.ones([4, 16, 16, 32]).astype('float32')
tensor = paddle.to_tensor(arr, 'float32', core.CPUPlace())
print(tensor)
tensor = core.eager.scale(tensor, 2.0, 0.9, True, False)
for i in range(0, 100):
tensor = core.eager.scale(tensor, 2.0, 0.9, True, False)
print(tensor)
self.assertEqual(tensor.shape, [4, 16, 16, 32])
self.assertEqual(tensor.stop_gradient, True)
def test_retain_grad_and_run_backward(self):
with eager_guard():
paddle.set_device("cpu")
input_data = np.ones([4, 16, 16, 32]).astype('float32')
data_eager = paddle.to_tensor(input_data, 'float32',
core.CPUPlace(), False)
grad_data = np.ones([4, 16, 16, 32]).astype('float32')
grad_eager = paddle.to_tensor(grad_data, 'float32', core.CPUPlace())
core.eager.retain_grad_for_tensor(data_eager)
out_eager = core.eager.scale(data_eager, 1.0, 0.9, True, True)
self.assertFalse(data_eager.grad._is_initialized())
core.eager.run_backward([out_eager], [grad_eager], False)
self.assertTrue(data_eager.grad._is_initialized())
self.assertTrue(np.array_equal(data_eager.grad.numpy(), input_data))
class EagerDtypeTestCase(unittest.TestCase):
def check_to_tesnsor_and_numpy(self, dtype):
with eager_guard():
arr = np.random.random([4, 16, 16, 32]).astype(dtype)
tensor = paddle.to_tensor(arr, dtype)
self.assertEqual(tensor.dtype, dtype)
self.assertTrue(np.array_equal(arr, tensor.numpy()))
def test_dtype_base(self):
self.check_to_tesnsor_and_numpy('bool')
self.check_to_tesnsor_and_numpy('int8')
self.check_to_tesnsor_and_numpy('uint8')
self.check_to_tesnsor_and_numpy('int16')
self.check_to_tesnsor_and_numpy('int32')
self.check_to_tesnsor_and_numpy('int64')
self.check_to_tesnsor_and_numpy('float16')
self.check_to_tesnsor_and_numpy('float32')
self.check_to_tesnsor_and_numpy('float64')
self.check_to_tesnsor_and_numpy('complex64')
self.check_to_tesnsor_and_numpy('complex128')
class EagerTensorPropertiesTestCase(unittest.TestCase):
def test_properties(self):
with eager_guard():
paddle.set_device("cpu")
arr = np.ones([4, 16, 16, 32]).astype('float32')
tensor = paddle.to_tensor(arr, 'float32', core.CPUPlace())
self.assertEqual(tensor.shape, [4, 16, 16, 32])
tensor.name = 'tensor_name_test'
self.assertEqual(tensor.name, 'tensor_name_test')
self.assertEqual(tensor.persistable, False)
tensor.persistable = True
self.assertEqual(tensor.persistable, True)
tensor.persistable = False
self.assertEqual(tensor.persistable, False)
self.assertTrue(tensor.place.is_cpu_place())
self.assertEqual(tensor._place_str, 'CPUPlace')
self.assertEqual(tensor.stop_gradient, True)
tensor.stop_gradient = False
self.assertEqual(tensor.stop_gradient, False)
tensor.stop_gradient = True
self.assertEqual(tensor.stop_gradient, True)
if __name__ == "__main__":
unittest.main()
......@@ -31,6 +31,7 @@ from ..fluid.framework import convert_np_dtype_to_dtype_, in_dygraph_mode, _varb
from ..fluid.layers import linspace # noqa: F401
import paddle
from paddle import _C_ops
from ..fluid.framework import in_eager_mode
__all__ = []
......@@ -115,6 +116,12 @@ def to_tensor(data, dtype=None, place=None, stop_gradient=True):
) != _current_expected_place()._get_device_id():
place = _current_expected_place()
if in_eager_mode():
if dtype is None:
dtype = paddle.get_default_dtype()
return core.eager.to_tensor(data,
convert_dtype(dtype), place, stop_gradient)
if not isinstance(data, np.ndarray):
def _handle_dtype(data, dtype):
......
......@@ -255,3 +255,39 @@ def to_string(var, prefix='Tensor'):
stop_gradient=var.stop_gradient,
indent=' ' * indent,
data=data)
def eager_tensor_to_string(tensor, prefix='Tensor'):
indent = len(prefix) + 1
_template = "{prefix}(shape={shape}, dtype={dtype}, place={place}, stop_gradient={stop_gradient},\n{indent}{data})"
if not tensor._is_initialized():
return "Tensor(Not initialized)"
np_tensor = tensor.numpy()
if len(tensor.shape) == 0:
size = 0
else:
size = 1
for dim in tensor.shape:
size *= dim
sumary = False
if size > DEFAULT_PRINT_OPTIONS.threshold:
sumary = True
max_width, signed = _get_max_width(_to_summary(np_tensor))
data = _format_tensor(
np_tensor, sumary, indent=indent, max_width=max_width, signed=signed)
return _template.format(
prefix=prefix,
shape=tensor.shape,
dtype=tensor.dtype,
place=tensor._place_str,
stop_gradient=tensor.stop_gradient,
indent=' ' * indent,
data=data)
......@@ -307,6 +307,7 @@ packages=['paddle',
'paddle.fluid.dygraph',
'paddle.fluid.dygraph.dygraph_to_static',
'paddle.fluid.dygraph.amp',
'paddle.fluid.eager',
'paddle.fluid.proto',
'paddle.fluid.proto.profiler',
'paddle.fluid.distributed',
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册