提交 d226daee 编写于 作者: S superjom

Merge branch 'develop' of github.com:PaddlePaddle/Paddle into feature/dynamic_batch

...@@ -29,16 +29,19 @@ limitations under the License. */ ...@@ -29,16 +29,19 @@ limitations under the License. */
namespace paddle { namespace paddle {
namespace framework { namespace pybind {
namespace details { namespace details {
template <bool less, size_t i, typename... args> template <bool less, size_t i, typename... args>
struct CastToPyBufferImpl; struct CastToPyBufferImpl;
} }
} // namespace pybind
namespace framework {
class Tensor { class Tensor {
public: public:
template <bool less, size_t i, typename... args> template <bool less, size_t i, typename... args>
friend struct details::CastToPyBufferImpl; friend struct pybind::details::CastToPyBufferImpl;
template <typename T, size_t D, int MajorType, typename IndexType> template <typename T, size_t D, int MajorType, typename IndexType>
friend struct EigenTensor; friend struct EigenTensor;
...@@ -165,12 +168,6 @@ class Tensor { ...@@ -165,12 +168,6 @@ class Tensor {
/*! points to dimensions of memory block. */ /*! points to dimensions of memory block. */
DDim dims_; DDim dims_;
/**
* A cache of the number of elements in a tensor.
* Would be 0 for an uninitialized tensor.
*/
int64_t numel_;
/** /**
* @brief A PlaceHolder may be shared by more than one tensor. * @brief A PlaceHolder may be shared by more than one tensor.
* *
......
...@@ -147,13 +147,12 @@ inline Tensor Tensor::Slice(const int& begin_idx, const int& end_idx) const { ...@@ -147,13 +147,12 @@ inline Tensor Tensor::Slice(const int& begin_idx, const int& end_idx) const {
inline Tensor& Tensor::Resize(const DDim& dims) { inline Tensor& Tensor::Resize(const DDim& dims) {
dims_ = dims; dims_ = dims;
numel_ = product(dims_);
return *this; return *this;
} }
inline const DDim& Tensor::dims() const { return dims_; } inline const DDim& Tensor::dims() const { return dims_; }
inline int64_t Tensor::numel() const { return numel_; } inline int64_t Tensor::numel() const { return product(dims_); }
template <typename T> template <typename T>
inline Tensor ReshapeToMatrix(const Tensor& src, int num_col_dims) { inline Tensor ReshapeToMatrix(const Tensor& src, int num_col_dims) {
......
...@@ -12,6 +12,8 @@ ...@@ -12,6 +12,8 @@
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#pragma once
#include "paddle/operators/elementwise_op.h" #include "paddle/operators/elementwise_op.h"
namespace paddle { namespace paddle {
......
...@@ -12,6 +12,8 @@ ...@@ -12,6 +12,8 @@
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#pragma once
#include "paddle/operators/elementwise_op.h" #include "paddle/operators/elementwise_op.h"
namespace paddle { namespace paddle {
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#pragma once
#include "paddle/operators/elementwise_op.h" #include "paddle/operators/elementwise_op.h"
namespace paddle { namespace paddle {
......
...@@ -34,12 +34,7 @@ limitations under the License. */ ...@@ -34,12 +34,7 @@ limitations under the License. */
namespace py = pybind11; namespace py = pybind11;
namespace paddle { namespace paddle {
namespace framework { namespace pybind {
using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;
using LoD = framework::LoD;
static size_t UniqueIntegerGenerator() { static size_t UniqueIntegerGenerator() {
static std::atomic<size_t> generator; static std::atomic<size_t> generator;
return generator.fetch_add(1); return generator.fetch_add(1);
...@@ -56,6 +51,10 @@ bool IsCompileGPU() { ...@@ -56,6 +51,10 @@ bool IsCompileGPU() {
PYBIND11_PLUGIN(core) { PYBIND11_PLUGIN(core) {
py::module m("core", "C++ core of PaddlePaddle"); py::module m("core", "C++ core of PaddlePaddle");
// using framework in this function. Since it is inside a function, it will
// not cause namespace pollution.
using namespace paddle::framework; // NOLINT
py::class_<Tensor>(m, "Tensor", py::buffer_protocol()) py::class_<Tensor>(m, "Tensor", py::buffer_protocol())
.def_buffer( .def_buffer(
[](Tensor &self) -> py::buffer_info { return CastToPyBuffer(self); }) [](Tensor &self) -> py::buffer_info { return CastToPyBuffer(self); })
...@@ -107,7 +106,7 @@ PYBIND11_PLUGIN(core) { ...@@ -107,7 +106,7 @@ PYBIND11_PLUGIN(core) {
#ifdef PADDLE_ONLY_CPU #ifdef PADDLE_ONLY_CPU
new (&instance) LoDTensor(lod); new (&instance) LoDTensor(lod);
#else #else
paddle::framework::LoD new_lod; LoD new_lod;
new_lod.reserve(lod.size()); new_lod.reserve(lod.size());
std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod)); std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod));
new (&instance) LoDTensor(new_lod); new (&instance) LoDTensor(new_lod);
...@@ -118,7 +117,7 @@ PYBIND11_PLUGIN(core) { ...@@ -118,7 +117,7 @@ PYBIND11_PLUGIN(core) {
#ifdef PADDLE_ONLY_CPU #ifdef PADDLE_ONLY_CPU
self.set_lod(lod); self.set_lod(lod);
#else #else
paddle::framework::LoD new_lod; LoD new_lod;
new_lod.reserve(lod.size()); new_lod.reserve(lod.size());
std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod)); std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod));
self.set_lod(new_lod); self.set_lod(new_lod);
...@@ -132,7 +131,7 @@ PYBIND11_PLUGIN(core) { ...@@ -132,7 +131,7 @@ PYBIND11_PLUGIN(core) {
std::vector<std::vector<size_t>> new_lod; std::vector<std::vector<size_t>> new_lod;
new_lod.reserve(lod.size()); new_lod.reserve(lod.size());
std::transform(lod.begin(), lod.end(), std::back_inserter(new_lod), std::transform(lod.begin(), lod.end(), std::back_inserter(new_lod),
[](paddle::framework::Vector<size_t> item) -> [](Vector<size_t> item) ->
std::vector<size_t> { std::vector<size_t> {
std::vector<size_t> v; std::vector<size_t> v;
v.reserve(item.size()); v.reserve(item.size());
...@@ -317,5 +316,5 @@ All parameter, weight, gradient are variables in Paddle. ...@@ -317,5 +316,5 @@ All parameter, weight, gradient are variables in Paddle.
return m.ptr(); return m.ptr();
} }
} // namespace framework } // namespace pybind
} // namespace paddle } // namespace paddle
...@@ -23,7 +23,7 @@ namespace py = pybind11; ...@@ -23,7 +23,7 @@ namespace py = pybind11;
namespace paddle { namespace paddle {
namespace framework { namespace pybind {
namespace details { namespace details {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册