未验证 提交 539293e2 编写于 作者: H HongyuJia 提交者: GitHub

[Extension Operants] Extension supports tensor operants (#50869)

* [Extension Operants] Extension supports tensor operants

* Polish fluid init_tensor_operants
上级 a0c473f4
......@@ -42,7 +42,10 @@ set(PYBIND_DEPS
auto_parallel
jit_layer
jit_property
prim_utils)
prim_utils
operants_manager
phi_tensor_operants
static_tensor_operants)
if(WITH_PSCORE)
set(PYBIND_DEPS ${PYBIND_DEPS} ps_service)
......@@ -498,10 +501,7 @@ if(WITH_PYTHON)
list(APPEND PYBIND_DEPS custom_operator)
list(APPEND PYBIND_DEPS custom_operator_node)
list(APPEND PYBIND_DEPS tensor_api)
list(APPEND PYBIND_DEPS operants_manager)
list(APPEND PYBIND_DEPS eager_tensor_operants)
list(APPEND PYBIND_DEPS static_tensor_operants)
list(APPEND PYBIND_DEPS phi_tensor_operants)
list(APPEND PYBIND_DEPS pybind_util)
endif()
......
......@@ -40,8 +40,6 @@ typedef SSIZE_T ssize_t;
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
#include "paddle/fluid/platform/dynload/dynamic_loader.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/prim/utils/eager/eager_tensor_operants.h"
#include "paddle/fluid/prim/utils/static/static_tensor_operants.h"
#include "paddle/fluid/pybind/eager.h"
#include "paddle/fluid/pybind/eager_utils.h"
#include "paddle/fluid/pybind/exception.h"
......@@ -499,20 +497,6 @@ static PyObject* eager_api_jit_function_call(PyObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static PyObject* eager_api_init_eager_and_static_tensor_operants(
PyObject* self, PyObject* args, PyObject* kwargs) {
EAGER_TRY
paddle::OperantsManager::Instance().eager_operants.reset(
new paddle::prim::EagerTensorOperants());
paddle::OperantsManager::Instance().static_operants.reset(
new paddle::prim::StaticTensorOperants());
VLOG(4) << "Initialize eager and static tensor operants successfully";
RETURN_PY_NONE
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static PyObject* eager_api_run_custom_op(PyObject* self,
PyObject* args,
PyObject* kwargs) {
......@@ -1123,11 +1107,6 @@ PyMethodDef variable_functions[] = {
(PyCFunction)(void (*)(void))eager_api_run_custom_op,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"_init_eager_and_static_tensor_operants",
(PyCFunction)(void (*)(
void))eager_api_init_eager_and_static_tensor_operants,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"tensor_copy",
(PyCFunction)(void (*)(void))eager_api_tensor_copy,
METH_VARARGS | METH_KEYWORDS,
......
......@@ -196,8 +196,12 @@ limitations under the License. */
#include "paddle/fluid/eager/api/utils/global_utils.h"
#include "paddle/fluid/imperative/layout_autotune.h"
#include "paddle/fluid/prim/utils/eager/eager_tensor_operants.h"
#include "paddle/fluid/prim/utils/static/static_tensor_operants.h"
#include "paddle/fluid/pybind/eager_utils.h"
#include "paddle/phi/api/ext/op_meta_info.h"
#include "paddle/phi/api/include/operants_manager.h"
#include "paddle/phi/api/include/tensor_operants.h"
#include "paddle/phi/kernels/autotune/cache.h"
#include "paddle/phi/kernels/autotune/switch_autotune.h"
#include "pybind11/stl.h"
......@@ -1911,6 +1915,15 @@ All parameter, weight, gradient are variables in Paddle.
});
m.def("init_default_kernel_signatures",
[]() { framework::InitDefaultKernelSignatureMap(); });
m.def("init_tensor_operants", []() {
paddle::OperantsManager::Instance().eager_operants.reset(
new paddle::prim::EagerTensorOperants());
paddle::OperantsManager::Instance().static_operants.reset(
new paddle::prim::StaticTensorOperants());
paddle::OperantsManager::Instance().phi_operants.reset(
new paddle::operants::PhiTensorOperants());
VLOG(4) << "Initialize tensor operants successfully";
});
m.def("is_compiled_with_avx", IsCompiledWithAVX);
m.def("is_compiled_with_cuda", IsCompiledWithCUDA);
m.def("is_compiled_with_ascend", IsCompiledWithAscend);
......
......@@ -17,5 +17,5 @@ if(NOT ((NOT WITH_PYTHON) AND ON_INFER))
cc_library(
pybind_util
SRCS pybind.cc
DEPS phi_tensor_raw)
DEPS phi_tensor_raw flags)
endif()
......@@ -13,8 +13,11 @@
// limitations under the License.
#include "paddle/utils/pybind.h"
#include "gflags/gflags.h"
#include "paddle/phi/core/enforce.h"
DECLARE_string(tensor_operants_mode);
namespace paddle {
namespace pybind {
......@@ -66,5 +69,7 @@ PyObject* ToPyObject(const paddle::experimental::Tensor& value,
return obj;
}
void EnableTensorOperantsToPhiMode() { FLAGS_tensor_operants_mode = "phi"; }
} // namespace pybind
} // namespace paddle
......@@ -46,6 +46,9 @@ paddle::experimental::Tensor CastPyArg2Tensor(PyObject* obj, ssize_t arg_pos);
PyObject* ToPyObject(const paddle::experimental::Tensor& value,
bool return_py_none_if_not_initialize = false);
// Internal use only, switch tensor_operants_mode to phi
void EnableTensorOperantsToPhiMode();
} // namespace pybind
} // namespace paddle
......@@ -59,6 +62,7 @@ struct type_caster<paddle::experimental::Tensor> {
_("paddle::experimental::Tensor"));
bool load(handle src, bool) {
paddle::pybind::EnableTensorOperantsToPhiMode();
PyObject* obj = src.ptr();
if (paddle::pybind::PyCheckTensor(obj)) {
value = paddle::pybind::CastPyArg2Tensor(obj, 0);
......
......@@ -230,7 +230,7 @@ def __bootstrap__():
core.init_glog(sys.argv[0])
# don't init_p2p when in unittest to save time.
core.init_devices()
core.eager._init_eager_and_static_tensor_operants()
core.init_tensor_operants()
core.init_default_kernel_signatures()
core.init_memory_method()
......
......@@ -21,7 +21,7 @@
paddle::Tensor custom_sub(paddle::Tensor x, paddle::Tensor y);
paddle::Tensor custom_add(const paddle::Tensor& x, const paddle::Tensor& y) {
return paddle::add(paddle::exp(x), paddle::exp(y));
return x.exp() + y.exp();
}
paddle::Tensor nullable_tensor(bool return_none = false) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册