diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt index 493f5690a12350bb0b0a0b9fd9a743604612722b..b33014210fb1ce4e5e86ae0e52ae6f42acd301cc 100644 --- a/paddle/framework/CMakeLists.txt +++ b/paddle/framework/CMakeLists.txt @@ -2,6 +2,7 @@ cc_library(ddim SRCS ddim.cc) cc_test(ddim_test SRCS ddim_test.cc DEPS ddim) nv_test(dim_test SRCS dim_test.cu DEPS ddim) +cc_test(tensor_test SRCS tensor_test.cc DEPS ddim) cc_test(variable_test SRCS variable_test.cc) cc_test(scope_test SRCS scope_test.cc) cc_test(enforce_test SRCS enforce_test.cc) diff --git a/paddle/framework/tensor.h b/paddle/framework/tensor.h index 067f2a85264b462e96b65946b60b046172765a1d..ce5d98b04e6b53fcedc4fc4610d9390e64846b2a 100644 --- a/paddle/framework/tensor.h +++ b/paddle/framework/tensor.h @@ -14,33 +14,39 @@ limitations under the License. */ #pragma once +#include +#include +#include "paddle/framework/ddim.h" +#include "paddle/framework/enforce.h" +#include "paddle/memory/memory.h" +#include "paddle/platform/place.h" + namespace paddle { namespace framework { class Tensor { - using paddle::platform::Place; - using paddle::platform::get_place; - public: template const T* data() const { - PADDLE_ASSERT(holder_ != nullptr, - "Tensor::data must be called after Tensor::mutable_data"); - return static_cast(holder->Ptr()); + PADDLE_ENFORCE(holder_ != nullptr, + "Tensor::data must be called after Tensor::mutable_data."); + return static_cast(holder_->Ptr()); } template ::value>::type> - T* mutable_data(DDim dims, Place place) { - if (holder_ == nullptr || holder_->Place() != place || - holder_->Size() < dims.product() * sizeof(T)) { - holder_.reset(new PlaceholderImpl(place, dims.product() * sizeof(T))); + typename std::enable_if::value>::type* = nullptr> + T* mutable_data(DDim dims, paddle::platform::Place place) { + if (holder_ == nullptr || + !(holder_->Place() == + place) /* some versions of boost::variant don't have operator!= */ + || holder_->Size() < product(dims) * sizeof(T)) { + holder_.reset(new PlaceholderImpl(place, product(dims) * sizeof(T))); } return static_cast(holder_->Ptr()); } template ::value>::type> + typename std::enable_if::value>::type* = nullptr> T* mutable_data(DDim dims) { return mutable_data(dims, paddle::platform::get_place()); } @@ -51,27 +57,41 @@ class Tensor { struct Placeholder { virtual ~Placeholder() {} virtual void* Ptr() const = 0; - virtual Place Place() const = 0; + virtual paddle::platform::Place Place() const = 0; virtual size_t Size() const = 0; }; template struct PlaceholderImpl : public Placeholder { - PlaceholderImpl(Place pl, size_t size) - : ptr_(paddle::memory::Alloc(pl, size), paddle::memory::Deleter(pl)), - place_(pl), + private: + class Deleter { + public: + Deleter(platform::Place place) : place_(place) {} + void operator()(T* ptr) { + paddle::memory::Free(place_, static_cast(ptr)); + } + + private: + paddle::platform::Place place_; + }; + + public: + PlaceholderImpl(paddle::platform::Place place, size_t size) + : ptr_(static_cast(paddle::memory::Alloc(place, size)), + Deleter(place)), + place_(place), size_(size) {} virtual void* Ptr() const { return static_cast(ptr_.get()); } virtual size_t Size() const { return size_; } - virtual Place Place() const { return place_; } + virtual paddle::platform::Place Place() const { return place_; } - std::unique_ptr ptr_; - Place place_; // record the place of ptr_. - size_t size_; // size of the memory block. + std::unique_ptr ptr_; + paddle::platform::Place place_; // record the place of ptr_. + size_t size_; // size of the memory block. }; - std::unique_ptr holder_; // holds the memory block if allocated. + std::shared_ptr holder_; // holds the memory block if allocated. }; } // namespace framework diff --git a/paddle/framework/tensor_test.cc b/paddle/framework/tensor_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..727d81f8d72e39ec564c42a48bf7ff64204adfff --- /dev/null +++ b/paddle/framework/tensor_test.cc @@ -0,0 +1,85 @@ +/* + Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "paddle/framework/tensor.h" +#include +#include + +TEST(Tensor, ASSERT) { + paddle::framework::Tensor cpu_tensor; + + bool caught = false; + try { + const double* p __attribute__((unused)) = cpu_tensor.data(); + } catch (paddle::framework::EnforceNotMet err) { + caught = true; + std::string msg = "Tensor::data must be called after Tensor::mutable_data."; + const char* what = err.what(); + for (size_t i = 0; i < msg.length(); ++i) { + ASSERT_EQ(what[i], msg[i]); + } + } + ASSERT_TRUE(caught); +} + +/* mutable_data() is not tested at present + because Memory::Alloc() and Memory::Free() have not been ready. + +TEST(Tensor, MutableData) { + using namespace paddle::framework; + using namespace paddle::platform; + { + Tensor cpu_tensor; + float* p1 = nullptr; + float* p2 = nullptr; + // initialization + p1 = cpu_tensor.mutable_data(make_ddim({1, 2, 3}), CPUPlace()); + EXPECT_NE(p1, nullptr); + // set cpu_tensor a new dim with large size + // momery is supposed to be re-allocated + p2 = cpu_tensor.mutable_data(make_ddim({3, 4})); + EXPECT_NE(p2, nullptr); + EXPECT_NE(p1, p2); + // set cpu_tensor a new dim with same size + // momery block is supposed to be unchanged + p1 = cpu_tensor.mutable_data(make_ddim({2, 2, 3})); + EXPECT_EQ(p1, p2); + // set cpu_tensor a new dim with smaller size + // momery block is supposed to be unchanged + p2 = cpu_tensor.mutable_data(make_ddim({2, 2})); + EXPECT_EQ(p1, p2); + } + + { + Tensor gpu_tensor; + float* p1 = nullptr; + float* p2 = nullptr; + // initialization + p1 = gpu_tensor.mutable_data(make_ddim({1, 2, 3}), GPUPlace()); + EXPECT_NE(p1, nullptr); + // set gpu_tensor a new dim with large size + // momery is supposed to be re-allocated + p2 = gpu_tensor.mutable_data(make_ddim({3, 4})); + EXPECT_NE(p2, nullptr); + EXPECT_NE(p1, p2); + // set gpu_tensor a new dim with same size + // momery block is supposed to be unchanged + p1 = gpu_tensor.mutable_data(make_ddim({2, 2, 3})); + EXPECT_EQ(p1, p2); + // set gpu_tensor a new dim with smaller size + // momery block is supposed to be unchanged + p2 = gpu_tensor.mutable_data(make_ddim({2, 2})); + EXPECT_EQ(p1, p2); + } +} +*/