From d2d3f2b569b31357bc8a28a39ab4ca628caab812 Mon Sep 17 00:00:00 2001 From: sneaxiy Date: Mon, 18 Feb 2019 07:52:15 +0000 Subject: [PATCH] fix api arg0 test=release/1.3 --- paddle/fluid/API.spec | 6 +- paddle/fluid/pybind/pybind.cc | 109 +++++++++++++++++++++++++++++----- 2 files changed, 96 insertions(+), 19 deletions(-) diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index df961be911..8a3c062dba 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -473,11 +473,11 @@ paddle.fluid.LoDTensor.has_valid_recursive_sequence_lengths has_valid_recursive_ paddle.fluid.LoDTensor.lod lod(self: paddle.fluid.core.LoDTensor) -> List[List[int]] paddle.fluid.LoDTensor.recursive_sequence_lengths recursive_sequence_lengths(self: paddle.fluid.core.LoDTensor) -> List[List[int]] paddle.fluid.LoDTensor.set 1. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[float32], arg1: paddle::platform::CPUPlace) -> None 2. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[int32], arg1: paddle::platform::CPUPlace) -> None 3. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[float64], arg1: paddle::platform::CPUPlace) -> None 4. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[int64], arg1: paddle::platform::CPUPlace) -> None 5. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[bool], arg1: paddle::platform::CPUPlace) -> None 6. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[uint16], arg1: paddle::platform::CPUPlace) -> None 7. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[uint8], arg1: paddle::platform::CPUPlace) -> None 8. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[int8], arg1: paddle::platform::CPUPlace) -> None 9. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[float32], arg1: paddle::platform::CUDAPlace) -> None 10. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[int32], arg1: paddle::platform::CUDAPlace) -> None 11. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[float64], arg1: paddle::platform::CUDAPlace) -> None 12. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[int64], arg1: paddle::platform::CUDAPlace) -> None 13. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[bool], arg1: paddle::platform::CUDAPlace) -> None 14. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[uint16], arg1: paddle::platform::CUDAPlace) -> None 15. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[uint8], arg1: paddle::platform::CUDAPlace) -> None 16. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[int8], arg1: paddle::platform::CUDAPlace) -> None 17. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[float32], arg1: paddle::platform::CUDAPinnedPlace) -> None 18. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[int32], arg1: paddle::platform::CUDAPinnedPlace) -> None 19. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[float64], arg1: paddle::platform::CUDAPinnedPlace) -> None 20. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[int64], arg1: paddle::platform::CUDAPinnedPlace) -> None 21. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[bool], arg1: paddle::platform::CUDAPinnedPlace) -> None 22. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[uint16], arg1: paddle::platform::CUDAPinnedPlace) -> None 23. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[uint8], arg1: paddle::platform::CUDAPinnedPlace) -> None 24. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[int8], arg1: paddle::platform::CUDAPinnedPlace) -> None -paddle.fluid.LoDTensor.set_lod set_lod(self: paddle.fluid.core.LoDTensor, arg0: List[List[int]]) -> None -paddle.fluid.LoDTensor.set_recursive_sequence_lengths set_recursive_sequence_lengths(self: paddle.fluid.core.LoDTensor, arg0: List[List[int]]) -> None +paddle.fluid.LoDTensor.set_lod set_lod(self: paddle.fluid.core.LoDTensor, lod: List[List[int]]) -> None +paddle.fluid.LoDTensor.set_recursive_sequence_lengths set_recursive_sequence_lengths(self: paddle.fluid.core.LoDTensor, recursive_sequence_lengths: List[List[int]]) -> None paddle.fluid.LoDTensor.shape shape(self: paddle.fluid.core.Tensor) -> List[int] paddle.fluid.LoDTensorArray.__init__ __init__(self: paddle.fluid.core.LoDTensorArray) -> None -paddle.fluid.LoDTensorArray.append append(self: paddle.fluid.core.LoDTensorArray, arg0: paddle.fluid.core.LoDTensor) -> None +paddle.fluid.LoDTensorArray.append append(self: paddle.fluid.core.LoDTensorArray, tensor: paddle.fluid.core.LoDTensor) -> None paddle.fluid.CPUPlace.__init__ __init__(self: paddle.fluid.core.CPUPlace) -> None paddle.fluid.CUDAPlace.__init__ __init__(self: paddle.fluid.core.CUDAPlace, arg0: int) -> None paddle.fluid.CUDAPinnedPlace.__init__ __init__(self: paddle.fluid.core.CUDAPinnedPlace) -> None diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index 6549229e05..9eec8bfb5d 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -372,7 +372,13 @@ PYBIND11_MODULE(core, m) { PADDLE_ENFORCE(CheckLoD(new_lod, vectorize(self.dims()).front()), "the provided lod info is invalid"); self.set_lod(new_lod); - }) + }, + py::arg("lod"), R"DOC( + Set LoD of the LoDTensor. + + Args: + lod (List[List[int]]): the lod to be set. + )DOC") .def("set_recursive_sequence_lengths", [](LoDTensor &self, const std::vector> &recursive_sequence_lengths) { @@ -388,7 +394,17 @@ PYBIND11_MODULE(core, m) { CheckLoD(new_offset_lod, vectorize(self.dims()).front()), "the provided recursive_sequence_lengths info is invalid"); self.set_lod(new_offset_lod); - }) + }, + py::arg("recursive_sequence_lengths"), R"DOC( + Set LoD of the LoDTensor according to recursive sequence length. + + For example, if recursive_sequence_lengths=[2, 3], meaning that + there are two sequences with length 2 and 3 respectively, the + corresponding lod would be [0, 2, 2+3], i.e, [0, 2, 5]. + + Args: + recursive_sequence_lengths (List[List[int]]): sequence lengths. + )DOC") .def("lod", [](LoDTensor &self) -> std::vector> { // output the offset-based lod info @@ -397,7 +413,13 @@ PYBIND11_MODULE(core, m) { new_lod.reserve(lod.size()); std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod)); return new_lod; - }) + }, + R"DOC( + Return the LoD of the LoDTensor. + + Returns: + out (List[List[int]]): the lod of the LoDTensor. + )DOC") // Set above comments of set_lod. .def("recursive_sequence_lengths", [](LoDTensor &self) -> std::vector> { @@ -407,12 +429,25 @@ PYBIND11_MODULE(core, m) { new_lod.reserve(lod.size()); std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod)); return new_lod; - }) - .def("has_valid_recursive_sequence_lengths", [](LoDTensor &self) -> bool { - // Check that the lod info is valid and match the outermost - // dimension of the LoDTensor data - return CheckLoD(self.lod(), vectorize(self.dims()).front()); - }); + }, + R"DOC( + Return the sequence length of the LoDTensor corresponding to LoD. + + Returns: + out (List[List[int]): the sequence lengths. + )DOC") + .def("has_valid_recursive_sequence_lengths", + [](LoDTensor &self) -> bool { + // Check that the lod info is valid and match the outermost + // dimension of the LoDTensor data + return CheckLoD(self.lod(), vectorize(self.dims()).front()); + }, + R"DOC( + Check whether the lod of the LoDTensor is valid. + + Returns: + out (bool): whether the lod is valid. + )DOC"); py::class_(m, "SelectedRows") .def("__init__", @@ -548,11 +583,45 @@ All parameter, weight, gradient are variables in Paddle. [](Scope &self, const std::string &name) -> Variable * { return self.Var(name); }, + py::arg("name"), + R"DOC( + Find or create variable named :code:`name` in the current scope. + + If the variable named :code:`name` does not exist in the + current scope, the variable would be created. Otherwise, + return the existing variable. + + Args: + name (str): the variable name. + + Returns: + out (core.Variable): the found or created variable. + )DOC", + py::return_value_policy::reference) + .def("find_var", &Scope::FindVar, py::arg("name"), + R"DOC( + Find variable named :code:`name` in the current scope or + its parent scope. Return None if not found. + + Args: + name (str): the variable name. + + Returns: + out (core.Variable|None): the found variable or None. + )DOC", py::return_value_policy::reference) - .def("find_var", &Scope::FindVar, py::return_value_policy::reference) .def("new_scope", [](Scope &self) -> Scope * { return &self.NewScope(); }, + R"DOC( + Create a new sub-scope of the current scope. + + Returns: + out (core._Scope): the created sub-scope. + )DOC", py::return_value_policy::reference) - .def("drop_kids", &Scope::DropKids); + .def("drop_kids", &Scope::DropKids, + R"DOC( + Delete all sub-scopes of the current scope. + )DOC"); m.def("Scope", []() -> Scope * { @@ -560,6 +629,12 @@ All parameter, weight, gradient are variables in Paddle. ScopePool::Instance().Insert(std::unique_ptr(s)); return s; }, + R"DOC( + Create a new scope. + + Returns: + out (core._Scope): the created scope. + )DOC", py::return_value_policy::reference); //! @note: Be careful! PyBind will return std::string as an unicode, not @@ -782,11 +857,13 @@ All parameter, weight, gradient are variables in Paddle. self[i].ShareDataWith(t); self[i].set_lod(t.lod()); }) - .def("append", [](LoDTensorArray &self, const LoDTensor &t) { - self.emplace_back(); - self.back().ShareDataWith(t); - self.back().set_lod(t.lod()); - }); + .def("append", + [](LoDTensorArray &self, const LoDTensor &t) { + self.emplace_back(); + self.back().ShareDataWith(t); + self.back().set_lod(t.lod()); + }, + py::arg("tensor"), "Append a LoDensor to LoDTensorArray."); m.def("IsInplace", [](std::string op) -> bool { return operators::IsInplace(op); }); -- GitLab