未验证 提交 1543c4cf 编写于 作者: Y Yi Wang 提交者: GitHub

Fix cpplint errors of paddle/fluid/pybind and add some tests (#9694)

* cpplint test and add tesnor_py_test.cc

* Update

* Update
上级 6ba26257
...@@ -15,4 +15,6 @@ if(WITH_PYTHON) ...@@ -15,4 +15,6 @@ if(WITH_PYTHON)
target_link_libraries(paddle_pybind rt) target_link_libraries(paddle_pybind rt)
endif(NOT APPLE AND NOT ANDROID) endif(NOT APPLE AND NOT ANDROID)
endif(WITH_AMD_GPU) endif(WITH_AMD_GPU)
cc_test(tensor_py_test SRCS tensor_py_test.cc DEPS python)
endif(WITH_PYTHON) endif(WITH_PYTHON)
...@@ -12,17 +12,17 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,17 +12,17 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "const_value.h" #include "paddle/fluid/pybind/const_value.h"
#include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/operator.h"
namespace paddle { namespace paddle {
namespace pybind { namespace pybind {
void BindConstValue(pybind11::module& m) { void BindConstValue(pybind11::module* m) {
m.def("kEmptyVarName", [] { return framework::kEmptyVarName; }); m->def("kEmptyVarName", [] { return framework::kEmptyVarName; });
m.def("kTempVarName", [] { return framework::kTempVarName; }); m->def("kTempVarName", [] { return framework::kTempVarName; });
m.def("kGradVarSuffix", [] { return framework::kGradVarSuffix; }); m->def("kGradVarSuffix", [] { return framework::kGradVarSuffix; });
m.def("kZeroVarSuffix", [] { return framework::kZeroVarSuffix; }); m->def("kZeroVarSuffix", [] { return framework::kZeroVarSuffix; });
} }
} // namespace pybind } // namespace pybind
......
...@@ -11,16 +11,17 @@ distributed under the License is distributed on an "AS IS" BASIS, ...@@ -11,16 +11,17 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#pragma once #pragma once
#include <Python.h> #include <Python.h>
#include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/enforce.h"
#include "pybind11/pybind11.h" #include "pybind11/pybind11.h"
namespace py = pybind11;
namespace paddle { namespace paddle {
namespace pybind { namespace pybind {
extern void BindConstValue(pybind11::module& m);
void BindConstValue(pybind11::module* m);
} // namespace pybind } // namespace pybind
} // namespace paddle } // namespace paddle
...@@ -17,8 +17,8 @@ limitations under the License. */ ...@@ -17,8 +17,8 @@ limitations under the License. */
namespace paddle { namespace paddle {
namespace pybind { namespace pybind {
void BindException(pybind11::module& m) { void BindException(pybind11::module* m) {
static pybind11::exception<platform::EnforceNotMet> exc(m, "EnforceNotMet"); static pybind11::exception<platform::EnforceNotMet> exc(*m, "EnforceNotMet");
pybind11::register_exception_translator([](std::exception_ptr p) { pybind11::register_exception_translator([](std::exception_ptr p) {
try { try {
if (p) std::rethrow_exception(p); if (p) std::rethrow_exception(p);
...@@ -27,7 +27,8 @@ void BindException(pybind11::module& m) { ...@@ -27,7 +27,8 @@ void BindException(pybind11::module& m) {
} }
}); });
m.def("__unittest_throw_exception__", [] { PADDLE_THROW("test exception"); }); m->def("__unittest_throw_exception__",
[] { PADDLE_THROW("test exception"); });
} }
} // namespace pybind } // namespace pybind
......
...@@ -11,14 +11,17 @@ distributed under the License is distributed on an "AS IS" BASIS, ...@@ -11,14 +11,17 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#pragma once #pragma once
#include <Python.h> #include <Python.h>
#include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/enforce.h"
#include "pybind11/pybind11.h" #include "pybind11/pybind11.h"
namespace paddle { namespace paddle {
namespace pybind { namespace pybind {
extern void BindException(pybind11::module& m); void BindException(pybind11::module* m);
} // namespace pybind } // namespace pybind
} // namespace paddle } // namespace paddle
...@@ -11,12 +11,13 @@ distributed under the License is distributed on an "AS IS" BASIS, ...@@ -11,12 +11,13 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/fluid/pybind/protobuf.h" #include "paddle/fluid/pybind/protobuf.h"
#include <deque> #include <deque>
#include <iostream> #include <iostream>
#include <string> #include <string>
#include <tuple> #include <tuple>
#include "paddle/fluid/framework/backward.h" #include "paddle/fluid/framework/backward.h"
#include "paddle/fluid/framework/block_desc.h" #include "paddle/fluid/framework/block_desc.h"
#include "paddle/fluid/framework/op_desc.h" #include "paddle/fluid/framework/op_desc.h"
...@@ -97,10 +98,11 @@ struct type_caster<boost::variant<Args...>> ...@@ -97,10 +98,11 @@ struct type_caster<boost::variant<Args...>>
namespace paddle { namespace paddle {
namespace pybind { namespace pybind {
using namespace paddle::framework; // NOLINT namespace pd = paddle::framework;
template <typename T> template <typename T>
static py::bytes SerializeMessage(T &self) { // NOLINT static pybind11::bytes SerializeMessage(
T &self) { // NOLINT due to pybind11 convention.
// Check IsInitialized in Python // Check IsInitialized in Python
std::string retv; std::string retv;
PADDLE_ENFORCE(self.Proto()->SerializePartialToString(&retv), PADDLE_ENFORCE(self.Proto()->SerializePartialToString(&retv),
...@@ -109,24 +111,24 @@ static py::bytes SerializeMessage(T &self) { // NOLINT ...@@ -109,24 +111,24 @@ static py::bytes SerializeMessage(T &self) { // NOLINT
} }
// Bind Methods // Bind Methods
void BindProgramDesc(py::module &m) { // NOLINT void BindProgramDesc(pybind11::module *m) {
py::class_<ProgramDesc>(m, "ProgramDesc", "") pybind11::class_<pd::ProgramDesc>(*m, "ProgramDesc", "")
.def(py::init<>()) .def(pybind11::init<>())
.def("__init__", .def("__init__",
[](ProgramDesc &self, const ProgramDesc &other) { [](pd::ProgramDesc &self, const pd::ProgramDesc &other) {
new (&self) ProgramDesc(other); new (&self) pd::ProgramDesc(other);
}) })
.def("__init__", .def("__init__",
[](ProgramDesc &self, const py::bytes &binary_str) { [](pd::ProgramDesc &self, const pybind11::bytes &binary_str) {
std::string str(binary_str); std::string str(binary_str);
new (&self) ProgramDesc(str); new (&self) pd::ProgramDesc(str);
}) })
.def("append_block", &ProgramDesc::AppendBlock, .def("append_block", &pd::ProgramDesc::AppendBlock,
py::return_value_policy::reference) pybind11::return_value_policy::reference)
.def("append_backward", .def("append_backward",
[](ProgramDesc &program_desc, const VarDesc &target, [](pd::ProgramDesc &program_desc, const pd::VarDesc &target,
const std::unordered_set<std::string> &no_grad_vars) { const std::unordered_set<std::string> &no_grad_vars) {
ParamGradInfoMap param_grad_map = pd::ParamGradInfoMap param_grad_map =
AppendBackward(program_desc, target, no_grad_vars); AppendBackward(program_desc, target, no_grad_vars);
std::unordered_map< std::unordered_map<
std::string, std::tuple<std::string /* grad_var_name */, std::string, std::tuple<std::string /* grad_var_name */,
...@@ -140,178 +142,184 @@ void BindProgramDesc(py::module &m) { // NOLINT ...@@ -140,178 +142,184 @@ void BindProgramDesc(py::module &m) { // NOLINT
} }
return retv; return retv;
}) })
.def("block", &ProgramDesc::MutableBlock, .def("block", &pd::ProgramDesc::MutableBlock,
py::return_value_policy::reference) pybind11::return_value_policy::reference)
.def("num_blocks", &ProgramDesc::Size) .def("num_blocks", &pd::ProgramDesc::Size)
.def("serialize_to_string", SerializeMessage<ProgramDesc>) .def("serialize_to_string", SerializeMessage<pd::ProgramDesc>)
.def("parse_from_string", .def("parse_from_string",
[](ProgramDesc &program_desc, const std::string &data) { [](pd::ProgramDesc &program_desc, const std::string &data) {
proto::ProgramDesc *desc = program_desc.Proto(); pd::proto::ProgramDesc *desc = program_desc.Proto();
PADDLE_ENFORCE(desc->ParseFromString(data), PADDLE_ENFORCE(desc->ParseFromString(data),
"Fail to parse ProgramDesc from string. This could " "Fail to parse ProgramDesc from string. This could "
"be a bug of Paddle."); "be a bug of Paddle.");
}); });
} }
void BindBlockDesc(py::module &m) { // NOLINT void BindBlockDesc(pybind11::module *m) {
py::class_<BlockDesc>(m, "BlockDesc", "") pybind11::class_<pd::BlockDesc>(*m, "BlockDesc", "")
.def_property_readonly("id", &BlockDesc::ID) .def_property_readonly("id", &pd::BlockDesc::ID)
.def_property_readonly("parent", &BlockDesc::Parent) .def_property_readonly("parent", &pd::BlockDesc::Parent)
.def("get_forward_block_idx", &BlockDesc::ForwardBlockID) .def("get_forward_block_idx", &pd::BlockDesc::ForwardBlockID)
.def("set_forward_block_idx", &BlockDesc::SetForwardBlockID) .def("set_forward_block_idx", &pd::BlockDesc::SetForwardBlockID)
.def("append_op", &BlockDesc::AppendOp, .def("append_op", &pd::BlockDesc::AppendOp,
py::return_value_policy::reference) pybind11::return_value_policy::reference)
.def("prepend_op", &BlockDesc::PrependOp, .def("prepend_op", &pd::BlockDesc::PrependOp,
py::return_value_policy::reference) pybind11::return_value_policy::reference)
.def("insert_op", &BlockDesc::InsertOp, .def("insert_op", &pd::BlockDesc::InsertOp,
py::return_value_policy::reference) pybind11::return_value_policy::reference)
.def("remove_op", &BlockDesc::RemoveOp) .def("remove_op", &pd::BlockDesc::RemoveOp)
.def("var", .def("var",
[](BlockDesc &self, py::bytes byte_name) { [](pd::BlockDesc &self, pybind11::bytes byte_name) {
std::string name = byte_name; std::string name = byte_name;
return self.Var(name); return self.Var(name);
}, },
py::return_value_policy::reference) pybind11::return_value_policy::reference)
.def("has_var", .def("has_var",
[](BlockDesc &self, py::bytes byte_name) { [](pd::BlockDesc &self, pybind11::bytes byte_name) {
std::string name = byte_name; std::string name = byte_name;
return self.HasVar(name); return self.HasVar(name);
}, },
py::return_value_policy::reference) pybind11::return_value_policy::reference)
.def("rename_var", .def("rename_var",
[](BlockDesc &self, const py::bytes &byte_name, [](pd::BlockDesc &self, const pybind11::bytes &byte_name,
const py::bytes &byte_name_new) { const pybind11::bytes &byte_name_new) {
std::string name = byte_name; std::string name = byte_name;
std::string new_name = byte_name_new; std::string new_name = byte_name_new;
self.RenameVar(name, new_name); self.RenameVar(name, new_name);
}) })
.def("has_var_recursive", .def("has_var_recursive",
[](BlockDesc &self, py::bytes byte_name) { [](pd::BlockDesc &self, pybind11::bytes byte_name) {
std::string name = byte_name; std::string name = byte_name;
return self.HasVarRecursive(name); return self.HasVarRecursive(name);
}) })
.def("find_var", .def("find_var",
[](BlockDesc &self, py::bytes byte_name) { [](pd::BlockDesc &self, pybind11::bytes byte_name) {
std::string name = byte_name; std::string name = byte_name;
return self.FindVar(name); return self.FindVar(name);
}, },
py::return_value_policy::reference) pybind11::return_value_policy::reference)
.def("find_var_recursive", .def("find_var_recursive",
[](BlockDesc &self, py::bytes byte_name) { [](pd::BlockDesc &self, pybind11::bytes byte_name) {
std::string name = byte_name; std::string name = byte_name;
return self.FindVarRecursive(name); return self.FindVarRecursive(name);
}, },
py::return_value_policy::reference) pybind11::return_value_policy::reference)
.def("remove_var", .def("remove_var",
[](BlockDesc &self, py::bytes byte_name) { [](pd::BlockDesc &self, pybind11::bytes byte_name) {
std::string name = byte_name; std::string name = byte_name;
return self.RemoveVar(name); return self.RemoveVar(name);
}, },
py::return_value_policy::reference) pybind11::return_value_policy::reference)
.def("all_vars", &BlockDesc::AllVars, py::return_value_policy::reference) .def("all_vars", &pd::BlockDesc::AllVars,
.def("op_size", &BlockDesc::OpSize) pybind11::return_value_policy::reference)
.def("op", &BlockDesc::Op, py::return_value_policy::reference) .def("op_size", &pd::BlockDesc::OpSize)
.def("serialize_to_string", SerializeMessage<BlockDesc>); .def("op", &pd::BlockDesc::Op, pybind11::return_value_policy::reference)
.def("serialize_to_string", SerializeMessage<pd::BlockDesc>);
} }
void BindVarDsec(py::module &m) { // NOLINT void BindVarDsec(pybind11::module *m) {
py::class_<VarDesc> var_desc(m, "VarDesc", ""); pybind11::class_<pd::VarDesc> var_desc(*m, "VarDesc", "");
var_desc var_desc
.def("name", .def("name",
[](VarDesc &self) { [](pd::VarDesc &self) {
py::bytes name = self.Name(); pybind11::bytes name = self.Name();
return name; return name;
}, },
py::return_value_policy::reference) pybind11::return_value_policy::reference)
.def("set_name", &VarDesc::SetName) .def("set_name", &pd::VarDesc::SetName)
.def("set_shape", &VarDesc::SetShape) .def("set_shape", &pd::VarDesc::SetShape)
.def("set_shapes", &VarDesc::SetShapes) .def("set_shapes", &pd::VarDesc::SetShapes)
.def("set_dtype", &VarDesc::SetDataType) .def("set_dtype", &pd::VarDesc::SetDataType)
.def("set_dtypes", &VarDesc::SetDataTypes) .def("set_dtypes", &pd::VarDesc::SetDataTypes)
.def("set_capacity", &VarDesc::SetCapacity) .def("set_capacity", &pd::VarDesc::SetCapacity)
.def("shape", &VarDesc::GetShape, py::return_value_policy::reference) .def("shape", &pd::VarDesc::GetShape,
.def("shapes", &VarDesc::GetShapes, py::return_value_policy::reference) pybind11::return_value_policy::reference)
.def("dtype", &VarDesc::GetDataType, py::return_value_policy::reference) .def("shapes", &pd::VarDesc::GetShapes,
.def("dtypes", &VarDesc::GetDataTypes, py::return_value_policy::reference) pybind11::return_value_policy::reference)
.def("lod_level", &VarDesc::GetLoDLevel) .def("dtype", &pd::VarDesc::GetDataType,
.def("lod_levels", &VarDesc::GetLoDLevels, pybind11::return_value_policy::reference)
py::return_value_policy::reference) .def("dtypes", &pd::VarDesc::GetDataTypes,
.def("set_lod_level", &VarDesc::SetLoDLevel) pybind11::return_value_policy::reference)
.def("set_lod_levels", &VarDesc::SetLoDLevels) .def("lod_level", &pd::VarDesc::GetLoDLevel)
.def("type", &VarDesc::GetType) .def("lod_levels", &pd::VarDesc::GetLoDLevels,
.def("set_type", &VarDesc::SetType) pybind11::return_value_policy::reference)
.def("serialize_to_string", SerializeMessage<VarDesc>) .def("set_lod_level", &pd::VarDesc::SetLoDLevel)
.def("persistable", &VarDesc::Persistable) .def("set_lod_levels", &pd::VarDesc::SetLoDLevels)
.def("set_persistable", &VarDesc::SetPersistable); .def("type", &pd::VarDesc::GetType)
.def("set_type", &pd::VarDesc::SetType)
.def("serialize_to_string", SerializeMessage<pd::VarDesc>)
.def("persistable", &pd::VarDesc::Persistable)
.def("set_persistable", &pd::VarDesc::SetPersistable);
py::enum_<proto::VarType::Type>(var_desc, "VarType", "") pybind11::enum_<pd::proto::VarType::Type>(var_desc, "VarType", "")
.value("BOOL", proto::VarType::BOOL) .value("BOOL", pd::proto::VarType::BOOL)
.value("INT16", proto::VarType::INT16) .value("INT16", pd::proto::VarType::INT16)
.value("INT32", proto::VarType::INT32) .value("INT32", pd::proto::VarType::INT32)
.value("INT64", proto::VarType::INT64) .value("INT64", pd::proto::VarType::INT64)
.value("FP16", proto::VarType::FP16) .value("FP16", pd::proto::VarType::FP16)
.value("FP32", proto::VarType::FP32) .value("FP32", pd::proto::VarType::FP32)
.value("FP64", proto::VarType::FP64) .value("FP64", pd::proto::VarType::FP64)
.value("LOD_TENSOR", proto::VarType::LOD_TENSOR) .value("LOD_TENSOR", pd::proto::VarType::LOD_TENSOR)
.value("SELECTED_ROWS", proto::VarType::SELECTED_ROWS) .value("SELECTED_ROWS", pd::proto::VarType::SELECTED_ROWS)
.value("FEED_MINIBATCH", proto::VarType::FEED_MINIBATCH) .value("FEED_MINIBATCH", pd::proto::VarType::FEED_MINIBATCH)
.value("FETCH_LIST", proto::VarType::FETCH_LIST) .value("FETCH_LIST", pd::proto::VarType::FETCH_LIST)
.value("STEP_SCOPES", proto::VarType::STEP_SCOPES) .value("STEP_SCOPES", pd::proto::VarType::STEP_SCOPES)
.value("LOD_RANK_TABLE", proto::VarType::LOD_RANK_TABLE) .value("LOD_RANK_TABLE", pd::proto::VarType::LOD_RANK_TABLE)
.value("LOD_TENSOR_ARRAY", proto::VarType::LOD_TENSOR_ARRAY) .value("LOD_TENSOR_ARRAY", pd::proto::VarType::LOD_TENSOR_ARRAY)
.value("CHANNEL", proto::VarType::CHANNEL) .value("CHANNEL", pd::proto::VarType::CHANNEL)
.value("PLACE_LIST", proto::VarType::PLACE_LIST) .value("PLACE_LIST", pd::proto::VarType::PLACE_LIST)
.value("READER", proto::VarType::READER) .value("READER", pd::proto::VarType::READER)
.value("RAW", proto::VarType::RAW); .value("RAW", pd::proto::VarType::RAW);
} }
void BindOpDesc(py::module &m) { // NOLINT void BindOpDesc(pybind11::module *m) {
py::enum_<proto::AttrType>(m, "AttrType", "") pybind11::enum_<pd::proto::AttrType>(*m, "AttrType", "")
.value("INT", proto::AttrType::INT) .value("INT", pd::proto::AttrType::INT)
.value("INTS", proto::AttrType::INTS) .value("INTS", pd::proto::AttrType::INTS)
.value("FLOAT", proto::AttrType::FLOAT) .value("FLOAT", pd::proto::AttrType::FLOAT)
.value("FLOATS", proto::AttrType::FLOATS) .value("FLOATS", pd::proto::AttrType::FLOATS)
.value("STRING", proto::AttrType::STRING) .value("STRING", pd::proto::AttrType::STRING)
.value("STRINGS", proto::AttrType::STRINGS) .value("STRINGS", pd::proto::AttrType::STRINGS)
.value("BOOL", proto::AttrType::BOOLEAN) .value("BOOL", pd::proto::AttrType::BOOLEAN)
.value("BOOLS", proto::AttrType::BOOLEANS) .value("BOOLS", pd::proto::AttrType::BOOLEANS)
.value("BLOCK", proto::AttrType::BLOCK); .value("BLOCK", pd::proto::AttrType::BLOCK);
py::class_<OpDesc> op_desc(m, "OpDesc", ""); pybind11::class_<pd::OpDesc> op_desc(*m, "OpDesc", "");
op_desc op_desc
.def("__init__", [](OpDesc &self) { new (&self) OpDesc(); }, .def("__init__", [](pd::OpDesc &self) { new (&self) pd::OpDesc(); },
py::return_value_policy::reference) pybind11::return_value_policy::reference)
.def("copy_from", &OpDesc::CopyFrom) .def("copy_from", &pd::OpDesc::CopyFrom)
.def("type", &OpDesc::Type) .def("type", &pd::OpDesc::Type)
.def("set_type", &OpDesc::SetType) .def("set_type", &pd::OpDesc::SetType)
.def("input", &OpDesc::Input) .def("input", &pd::OpDesc::Input)
.def("input_names", &OpDesc::InputNames) .def("input_names", &pd::OpDesc::InputNames)
.def("output", &OpDesc::Output) .def("output", &pd::OpDesc::Output)
.def("output_names", &OpDesc::OutputNames) .def("output_names", &pd::OpDesc::OutputNames)
.def("set_input", &OpDesc::SetInput) .def("set_input", &pd::OpDesc::SetInput)
.def("set_output", &OpDesc::SetOutput) .def("set_output", &pd::OpDesc::SetOutput)
.def("input_arg_names", &OpDesc::InputArgumentNames) .def("input_arg_names", &pd::OpDesc::InputArgumentNames)
.def("output_arg_names", &OpDesc::OutputArgumentNames) .def("output_arg_names", &pd::OpDesc::OutputArgumentNames)
.def("rename_input", &OpDesc::RenameInput) .def("rename_input", &pd::OpDesc::RenameInput)
.def("rename_output", &OpDesc::RenameOutput) .def("rename_output", &pd::OpDesc::RenameOutput)
.def("has_attr", &OpDesc::HasAttr) .def("has_attr", &pd::OpDesc::HasAttr)
.def("attr_type", &OpDesc::GetAttrType) .def("attr_type", &pd::OpDesc::GetAttrType)
.def("attr_names", &OpDesc::AttrNames) .def("attr_names", &pd::OpDesc::AttrNames)
.def("set_attr", &OpDesc::SetAttr) .def("set_attr", &pd::OpDesc::SetAttr)
.def("attr", &OpDesc::GetAttr) .def("attr", &pd::OpDesc::GetAttr)
.def("set_block_attr", &OpDesc::SetBlockAttr) .def("set_block_attr", &pd::OpDesc::SetBlockAttr)
.def("set_serialized_attr", .def("set_serialized_attr",
[](OpDesc &self, const std::string &name, [](pd::OpDesc &self, const std::string &name,
const py::bytes &seriralized) { const pybind11::bytes &seriralized) {
std::string ser(seriralized); std::string ser(seriralized);
self.SetAttr(name, ser); self.SetAttr(name, ser);
}) })
.def("block_attr", &OpDesc::GetBlockAttr) .def("block_attr", &pd::OpDesc::GetBlockAttr)
.def("check_attrs", &OpDesc::CheckAttrs) .def("check_attrs", &pd::OpDesc::CheckAttrs)
.def("infer_shape", &OpDesc::InferShape) .def("infer_shape", &pd::OpDesc::InferShape)
.def("infer_var_type", &OpDesc::InferVarType) .def("infer_var_type", &pd::OpDesc::InferVarType)
.def("serialize_to_string", SerializeMessage<OpDesc>) .def("serialize_to_string", SerializeMessage<pd::OpDesc>)
.def("block", &OpDesc::Block, py::return_value_policy::reference); .def("block", &pd::OpDesc::Block,
pybind11::return_value_policy::reference);
} }
} // namespace pybind } // namespace pybind
......
...@@ -11,25 +11,25 @@ distributed under the License is distributed on an "AS IS" BASIS, ...@@ -11,25 +11,25 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#pragma once #pragma once
#include <Python.h> #include <Python.h>
#include <fstream> #include <fstream>
#include <vector> #include <vector>
#include "paddle/fluid/platform/variant.h" #include "paddle/fluid/platform/variant.h"
#include "pybind11/numpy.h" #include "pybind11/numpy.h"
#include "pybind11/pybind11.h" #include "pybind11/pybind11.h"
#include "pybind11/stl.h" #include "pybind11/stl.h"
namespace py = pybind11;
namespace paddle { namespace paddle {
namespace pybind { namespace pybind {
void BindProgramDesc(py::module& m); void BindProgramDesc(pybind11::module* m);
void BindBlockDesc(py::module& m); void BindBlockDesc(pybind11::module* m);
void BindVarDsec(py::module& m); void BindVarDsec(pybind11::module* m);
void BindOpDesc(py::module& m); void BindOpDesc(pybind11::module* m);
} // namespace pybind } // namespace pybind
} // namespace paddle } // namespace paddle
...@@ -74,7 +74,7 @@ PYBIND11_PLUGIN(core) { ...@@ -74,7 +74,7 @@ PYBIND11_PLUGIN(core) {
// not cause namespace pollution. // not cause namespace pollution.
using namespace paddle::framework; // NOLINT using namespace paddle::framework; // NOLINT
BindException(m); BindException(&m);
py::class_<Tensor>(m, "Tensor", py::buffer_protocol()) py::class_<Tensor>(m, "Tensor", py::buffer_protocol())
.def_buffer( .def_buffer(
...@@ -478,11 +478,11 @@ All parameter, weight, gradient are variables in Paddle. ...@@ -478,11 +478,11 @@ All parameter, weight, gradient are variables in Paddle.
m.def("set_feed_variable", framework::SetFeedVariable); m.def("set_feed_variable", framework::SetFeedVariable);
m.def("get_fetch_variable", framework::GetFetchVariable); m.def("get_fetch_variable", framework::GetFetchVariable);
BindProgramDesc(m); BindProgramDesc(&m);
BindBlockDesc(m); BindBlockDesc(&m);
BindVarDsec(m); BindVarDsec(&m);
BindOpDesc(m); BindOpDesc(&m);
BindConstValue(m); BindConstValue(&m);
py::class_<framework::LoDRankTable>(m, "LodRankTable") py::class_<framework::LoDRankTable>(m, "LodRankTable")
.def("items", [](framework::LoDRankTable &table) { .def("items", [](framework::LoDRankTable &table) {
...@@ -553,7 +553,7 @@ All parameter, weight, gradient are variables in Paddle. ...@@ -553,7 +553,7 @@ All parameter, weight, gradient are variables in Paddle.
}) })
.def("run", &ParallelExecutor::Run); .def("run", &ParallelExecutor::Run);
BindRecordIOWriter(m); BindRecordIOWriter(&m);
return m.ptr(); return m.ptr();
} }
} // namespace pybind } // namespace pybind
......
...@@ -13,13 +13,19 @@ ...@@ -13,13 +13,19 @@
// limitations under the License. // limitations under the License.
#include "paddle/fluid/pybind/recordio.h" #include "paddle/fluid/pybind/recordio.h"
#include <fstream> #include <fstream>
#include <string>
#include <vector>
#include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/recordio/writer.h" #include "paddle/fluid/recordio/writer.h"
namespace paddle { namespace paddle {
namespace pybind { namespace pybind {
namespace {
class RecordIOWriter { class RecordIOWriter {
public: public:
RecordIOWriter(const std::string& filename, recordio::Compressor compressor, RecordIOWriter(const std::string& filename, recordio::Compressor compressor,
...@@ -49,8 +55,10 @@ class RecordIOWriter { ...@@ -49,8 +55,10 @@ class RecordIOWriter {
recordio::Writer writer_; recordio::Writer writer_;
}; };
void BindRecordIOWriter(py::module& m) { } // namespace
py::class_<RecordIOWriter> writer(m, "RecordIOWriter", "");
void BindRecordIOWriter(py::module* m) {
py::class_<RecordIOWriter> writer(*m, "RecordIOWriter", "");
py::enum_<recordio::Compressor>(writer, "Compressor", "") py::enum_<recordio::Compressor>(writer, "Compressor", "")
.value("Snappy", recordio::Compressor::kSnappy) .value("Snappy", recordio::Compressor::kSnappy)
.value("NoCompress", recordio::Compressor::kNoCompress); .value("NoCompress", recordio::Compressor::kNoCompress);
......
...@@ -21,6 +21,7 @@ namespace py = pybind11; ...@@ -21,6 +21,7 @@ namespace py = pybind11;
namespace paddle { namespace paddle {
namespace pybind { namespace pybind {
extern void BindRecordIOWriter(py::module& m); void BindRecordIOWriter(py::module* m);
} // namespace pybind } // namespace pybind
} // namespace paddle } // namespace paddle
...@@ -23,12 +23,8 @@ limitations under the License. */ ...@@ -23,12 +23,8 @@ limitations under the License. */
#include "pybind11/numpy.h" #include "pybind11/numpy.h"
#include "pybind11/pybind11.h" #include "pybind11/pybind11.h"
namespace py = pybind11;
namespace paddle { namespace paddle {
namespace pybind { namespace pybind {
namespace details { namespace details {
template <bool less, size_t I, typename... ARGS> template <bool less, size_t I, typename... ARGS>
...@@ -36,16 +32,16 @@ struct CastToPyBufferImpl; ...@@ -36,16 +32,16 @@ struct CastToPyBufferImpl;
template <size_t I, typename... ARGS> template <size_t I, typename... ARGS>
struct CastToPyBufferImpl<false, I, ARGS...> { struct CastToPyBufferImpl<false, I, ARGS...> {
py::buffer_info operator()(framework::Tensor &tensor) { pybind11::buffer_info operator()(const framework::Tensor &tensor) {
PADDLE_THROW("This type of tensor cannot be expose to Python"); PADDLE_THROW("This type of tensor cannot be expose to Python");
return py::buffer_info(); return pybind11::buffer_info();
} }
}; };
template <size_t I, typename... ARGS> template <size_t I, typename... ARGS>
struct CastToPyBufferImpl<true, I, ARGS...> { struct CastToPyBufferImpl<true, I, ARGS...> {
using CUR_TYPE = typename std::tuple_element<I, std::tuple<ARGS...>>::type; using CUR_TYPE = typename std::tuple_element<I, std::tuple<ARGS...>>::type;
py::buffer_info operator()(framework::Tensor &tensor) { pybind11::buffer_info operator()(const framework::Tensor &tensor) {
if (std::type_index(typeid(CUR_TYPE)) == tensor.type()) { if (std::type_index(typeid(CUR_TYPE)) == tensor.type()) {
auto dim_vec = framework::vectorize(tensor.dims()); auto dim_vec = framework::vectorize(tensor.dims());
std::vector<size_t> dims_outside; std::vector<size_t> dims_outside;
...@@ -84,15 +80,15 @@ struct CastToPyBufferImpl<true, I, ARGS...> { ...@@ -84,15 +80,15 @@ struct CastToPyBufferImpl<true, I, ARGS...> {
if (std::type_index(typeid(CUR_TYPE)) == if (std::type_index(typeid(CUR_TYPE)) ==
std::type_index(typeid(platform::float16))) { std::type_index(typeid(platform::float16))) {
return py::buffer_info(dst_tensor.data<CUR_TYPE>(), sizeof(CUR_TYPE), return pybind11::buffer_info(
"e", /* np.dtype('e') == np.float16 */ dst_tensor.data<CUR_TYPE>(), sizeof(CUR_TYPE),
(size_t)framework::arity(dst_tensor.dims()), "e", /* np.dtype('e') == np.float16 */
dims_outside, strides); (size_t)framework::arity(dst_tensor.dims()), dims_outside, strides);
} else { } else {
return py::buffer_info(dst_tensor.data<CUR_TYPE>(), sizeof(CUR_TYPE), return pybind11::buffer_info(
py::format_descriptor<CUR_TYPE>::format(), dst_tensor.data<CUR_TYPE>(), sizeof(CUR_TYPE),
(size_t)framework::arity(dst_tensor.dims()), pybind11::format_descriptor<CUR_TYPE>::format(),
dims_outside, strides); (size_t)framework::arity(dst_tensor.dims()), dims_outside, strides);
} }
} else { } else {
constexpr bool less = I + 1 < std::tuple_size<std::tuple<ARGS...>>::value; constexpr bool less = I + 1 < std::tuple_size<std::tuple<ARGS...>>::value;
...@@ -103,7 +99,7 @@ struct CastToPyBufferImpl<true, I, ARGS...> { ...@@ -103,7 +99,7 @@ struct CastToPyBufferImpl<true, I, ARGS...> {
} // namespace details } // namespace details
inline py::buffer_info CastToPyBuffer(framework::Tensor &tensor) { inline pybind11::buffer_info CastToPyBuffer(const framework::Tensor &tensor) {
auto buffer_info = auto buffer_info =
details::CastToPyBufferImpl<true, 0, float, int, double, int64_t, bool, details::CastToPyBufferImpl<true, 0, float, int, double, int64_t, bool,
platform::float16>()(tensor); platform::float16>()(tensor);
...@@ -111,7 +107,7 @@ inline py::buffer_info CastToPyBuffer(framework::Tensor &tensor) { ...@@ -111,7 +107,7 @@ inline py::buffer_info CastToPyBuffer(framework::Tensor &tensor) {
} }
template <typename T> template <typename T>
T TensorGetElement(framework::Tensor &self, size_t offset) { T TensorGetElement(const framework::Tensor &self, size_t offset) {
if (platform::is_cpu_place(self.place())) { if (platform::is_cpu_place(self.place())) {
return self.data<T>()[offset]; return self.data<T>()[offset];
} else { } else {
...@@ -123,31 +119,32 @@ T TensorGetElement(framework::Tensor &self, size_t offset) { ...@@ -123,31 +119,32 @@ T TensorGetElement(framework::Tensor &self, size_t offset) {
// TODO(dzhwinter) : fix the redundent Tensor allocate and free // TODO(dzhwinter) : fix the redundent Tensor allocate and free
template <typename T> template <typename T>
void TensorSetElement(framework::Tensor &self, size_t offset, T elem) { void TensorSetElement(framework::Tensor *self, size_t offset, T elem) {
if (platform::is_gpu_place(self.place())) { if (platform::is_gpu_place(self->place())) {
std::shared_ptr<framework::Tensor> dst(new framework::Tensor); std::shared_ptr<framework::Tensor> dst(new framework::Tensor);
framework::TensorCopy(self, platform::CPUPlace(), dst.get()); framework::TensorCopy(*self, platform::CPUPlace(), dst.get());
dst->data<T>()[offset] = elem; dst->data<T>()[offset] = elem;
framework::TensorCopy(*dst.get(), self.place(), &self); framework::TensorCopy(*dst.get(), self->place(), self);
} else if (platform::is_cpu_place(self.place())) { } else if (platform::is_cpu_place(self->place())) {
self.data<T>()[offset] = elem; self->data<T>()[offset] = elem;
} }
} }
template <typename T> template <typename T>
void PyCPUTensorSetFromArray( void PyCPUTensorSetFromArray(
framework::Tensor &self, framework::Tensor *self,
py::array_t<T, py::array::c_style | py::array::forcecast> array, pybind11::array_t<T, pybind11::array::c_style | pybind11::array::forcecast>
paddle::platform::CPUPlace &place) { array,
paddle::platform::CPUPlace place) {
std::vector<int64_t> dims; std::vector<int64_t> dims;
dims.reserve(array.ndim()); dims.reserve(array.ndim());
for (size_t i = 0; i < array.ndim(); ++i) { for (size_t i = 0; i < array.ndim(); ++i) {
dims.push_back(static_cast<int>(array.shape()[i])); dims.push_back(static_cast<int>(array.shape()[i]));
} }
self.Resize(framework::make_ddim(dims)); self->Resize(framework::make_ddim(dims));
auto *dst = self.mutable_data<T>(place); auto *dst = self->mutable_data<T>(place);
std::memcpy(dst, array.data(), sizeof(T) * array.size()); std::memcpy(dst, array.data(), sizeof(T) * array.size());
} }
...@@ -155,34 +152,37 @@ template <> ...@@ -155,34 +152,37 @@ template <>
// This following specialization maps uint16_t in the parameter type to // This following specialization maps uint16_t in the parameter type to
// platform::float16. // platform::float16.
void PyCPUTensorSetFromArray( void PyCPUTensorSetFromArray(
framework::Tensor &self, framework::Tensor *self,
py::array_t<uint16_t, py::array::c_style | py::array::forcecast> array, pybind11::array_t<uint16_t,
paddle::platform::CPUPlace &place) { pybind11::array::c_style | pybind11::array::forcecast>
array,
paddle::platform::CPUPlace place) {
std::vector<int64_t> dims; std::vector<int64_t> dims;
dims.reserve(array.ndim()); dims.reserve(array.ndim());
for (size_t i = 0; i < array.ndim(); ++i) { for (size_t i = 0; i < array.ndim(); ++i) {
dims.push_back(static_cast<int>(array.shape()[i])); dims.push_back(static_cast<int>(array.shape()[i]));
} }
self.Resize(framework::make_ddim(dims)); self->Resize(framework::make_ddim(dims));
auto *dst = self.mutable_data<platform::float16>(place); auto *dst = self->mutable_data<platform::float16>(place);
std::memcpy(dst, array.data(), sizeof(uint16_t) * array.size()); std::memcpy(dst, array.data(), sizeof(uint16_t) * array.size());
} }
#ifdef PADDLE_WITH_CUDA #ifdef PADDLE_WITH_CUDA
template <typename T> template <typename T>
void PyCUDATensorSetFromArray( void PyCUDATensorSetFromArray(
framework::Tensor &self, framework::Tensor *self,
py::array_t<T, py::array::c_style | py::array::forcecast> array, pybind11::array_t<T, pybind11::array::c_style | pybind11::array::forcecast>
paddle::platform::CUDAPlace &place) { array,
paddle::platform::CUDAPlace place) {
std::vector<int64_t> dims; std::vector<int64_t> dims;
dims.reserve(array.ndim()); dims.reserve(array.ndim());
for (size_t i = 0; i < array.ndim(); ++i) { for (size_t i = 0; i < array.ndim(); ++i) {
dims.push_back(static_cast<int>(array.shape()[i])); dims.push_back(static_cast<int>(array.shape()[i]));
} }
self.Resize(framework::make_ddim(dims)); self->Resize(framework::make_ddim(dims));
auto *dst = self.mutable_data<T>(place); auto *dst = self->mutable_data<T>(place);
platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance();
auto dev_ctx = auto dev_ctx =
...@@ -195,17 +195,19 @@ template <> ...@@ -195,17 +195,19 @@ template <>
// This following specialization maps uint16_t in the parameter type to // This following specialization maps uint16_t in the parameter type to
// platform::float16. // platform::float16.
void PyCUDATensorSetFromArray( void PyCUDATensorSetFromArray(
framework::Tensor &self, framework::Tensor *self,
py::array_t<uint16_t, py::array::c_style | py::array::forcecast> array, pybind11::array_t<uint16_t,
paddle::platform::CUDAPlace &place) { pybind11::array::c_style | pybind11::array::forcecast>
array,
paddle::platform::CUDAPlace place) {
std::vector<int64_t> dims; std::vector<int64_t> dims;
dims.reserve(array.ndim()); dims.reserve(array.ndim());
for (size_t i = 0; i < array.ndim(); ++i) { for (size_t i = 0; i < array.ndim(); ++i) {
dims.push_back(static_cast<int>(array.shape()[i])); dims.push_back(static_cast<int>(array.shape()[i]));
} }
self.Resize(framework::make_ddim(dims)); self->Resize(framework::make_ddim(dims));
auto *dst = self.mutable_data<platform::float16>(place); auto *dst = self->mutable_data<platform::float16>(place);
platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance();
auto dev_ctx = auto dev_ctx =
...@@ -217,8 +219,9 @@ void PyCUDATensorSetFromArray( ...@@ -217,8 +219,9 @@ void PyCUDATensorSetFromArray(
template <typename T> template <typename T>
void PyCUDAPinnedTensorSetFromArray( void PyCUDAPinnedTensorSetFromArray(
framework::Tensor &self, framework::Tensor *self,
py::array_t<T, py::array::c_style | py::array::forcecast> array, pybind11::array_t<T, pybind11::array::c_style | pybind11::array::forcecast>
array,
const paddle::platform::CUDAPinnedPlace &place) { const paddle::platform::CUDAPinnedPlace &place) {
std::vector<int64_t> dims; std::vector<int64_t> dims;
dims.reserve(array.ndim()); dims.reserve(array.ndim());
...@@ -226,8 +229,8 @@ void PyCUDAPinnedTensorSetFromArray( ...@@ -226,8 +229,8 @@ void PyCUDAPinnedTensorSetFromArray(
dims.push_back(static_cast<int>(array.shape()[i])); dims.push_back(static_cast<int>(array.shape()[i]));
} }
self.Resize(framework::make_ddim(dims)); self->Resize(framework::make_ddim(dims));
auto *dst = self.mutable_data<T>(place); auto *dst = self->mutable_data<T>(place);
std::memcpy(dst, array.data(), sizeof(T) * array.size()); std::memcpy(dst, array.data(), sizeof(T) * array.size());
} }
...@@ -235,8 +238,10 @@ template <> ...@@ -235,8 +238,10 @@ template <>
// This following specialization maps uint16_t in the parameter type to // This following specialization maps uint16_t in the parameter type to
// platform::float16. // platform::float16.
void PyCUDAPinnedTensorSetFromArray( void PyCUDAPinnedTensorSetFromArray(
framework::Tensor &self, framework::Tensor *self,
py::array_t<uint16_t, py::array::c_style | py::array::forcecast> array, pybind11::array_t<uint16_t,
pybind11::array::c_style | pybind11::array::forcecast>
array,
const paddle::platform::CUDAPinnedPlace &place) { const paddle::platform::CUDAPinnedPlace &place) {
std::vector<int64_t> dims; std::vector<int64_t> dims;
dims.reserve(array.ndim()); dims.reserve(array.ndim());
...@@ -244,8 +249,8 @@ void PyCUDAPinnedTensorSetFromArray( ...@@ -244,8 +249,8 @@ void PyCUDAPinnedTensorSetFromArray(
dims.push_back(static_cast<int>(array.shape()[i])); dims.push_back(static_cast<int>(array.shape()[i]));
} }
self.Resize(framework::make_ddim(dims)); self->Resize(framework::make_ddim(dims));
auto *dst = self.mutable_data<platform::float16>(place); auto *dst = self->mutable_data<platform::float16>(place);
std::memcpy(dst, array.data(), sizeof(uint16_t) * array.size()); std::memcpy(dst, array.data(), sizeof(uint16_t) * array.size());
} }
#endif #endif
......
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/pybind/tensor_py.h"
#include <iostream>
#include "gtest/gtest.h"
#include "paddle/fluid/framework/tensor.h"
TEST(TensorPy, CastToPyBufferImpl) {
typedef int ElemType;
paddle::framework::Tensor t;
auto d = paddle::framework::make_ddim({1, 2, 3});
int* p = t.mutable_data<ElemType>(d, paddle::platform::CPUPlace());
for (int i = 0; i < paddle::framework::product(d); ++i) {
p[i] = i;
}
pybind11::buffer_info bi = paddle::pybind::CastToPyBuffer(t);
EXPECT_EQ(bi.itemsize, static_cast<size_t>(sizeof(ElemType)));
EXPECT_EQ(bi.size, static_cast<size_t>(paddle::framework::product(d)));
EXPECT_EQ(bi.ndim, static_cast<size_t>(3)); // 3-dimensional as d.
EXPECT_EQ(bi.shape.size(), 3U); // as Dim d.
EXPECT_EQ(bi.shape[0], static_cast<size_t>(1));
EXPECT_EQ(bi.shape[1], static_cast<size_t>(2));
EXPECT_EQ(bi.shape[2], static_cast<size_t>(3));
EXPECT_EQ(bi.strides.size(), 3U); // 3-dimensional as d.
EXPECT_EQ(bi.strides[2], static_cast<size_t>(sizeof(ElemType)));
EXPECT_EQ(bi.strides[1], static_cast<size_t>(sizeof(ElemType) * 3));
EXPECT_EQ(bi.strides[0], static_cast<size_t>(sizeof(ElemType) * 2 * 3));
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册