diff --git a/paddle/framework/tensor.h b/paddle/framework/tensor.h index 7fa662fbb5497a4d72912493589b819d4180403b..73eedd7375ebd5357d565cfbc9b00ab7964253fc 100644 --- a/paddle/framework/tensor.h +++ b/paddle/framework/tensor.h @@ -20,7 +20,6 @@ limitations under the License. */ #include "paddle/framework/ddim.h" #include "paddle/framework/enforce.h" #include "paddle/memory/memory.h" -#include "paddle/platform/assert.h" #include "paddle/platform/place.h" namespace paddle { @@ -63,21 +62,35 @@ class Tensor { template struct PlaceholderImpl : public Placeholder { - PlaceholderImpl(paddle::platform::Place pl, size_t size) - : ptr_(paddle::memory::Alloc(pl, size), paddle::memory::Deleter(pl)), - place_(pl), + private: + class Deleter { + public: + Deleter(platform::Place place) : place_(place) {} + void operator()(T* ptr) { + paddle::memory::Free(place_, static_cast(ptr)); + } + + private: + paddle::platform::Place place_; + }; + + public: + PlaceholderImpl(paddle::platform::Place place, size_t size) + : ptr_(static_cast(paddle::memory::Alloc(place, size)), + Deleter(place)), + place_(place), size_(size) {} virtual void* Ptr() const { return static_cast(ptr_.get()); } virtual size_t Size() const { return size_; } virtual paddle::platform::Place Place() const { return place_; } - std::unique_ptr ptr_; + std::unique_ptr ptr_; paddle::platform::Place place_; // record the place of ptr_. size_t size_; // size of the memory block. }; - std::unique_ptr holder_; // holds the memory block if allocated. + std::shared_ptr holder_; // holds the memory block if allocated. }; } // namespace framework diff --git a/paddle/framework/tensor_test.cc b/paddle/framework/tensor_test.cc index fa44b24b6455c067cc2ae8c5a158dc425a200eb3..f76a31e921ab2819c0001d4e906c4acf113f2620 100644 --- a/paddle/framework/tensor_test.cc +++ b/paddle/framework/tensor_test.cc @@ -13,12 +13,23 @@ #include "paddle/framework/tensor.h" #include +#include -TEST(Tensor, Data) { - using namespace paddle::framework; - using namespace paddle::platform; +TEST(Tensor, ASSERT) { + paddle::framework::Tensor cpu_tensor; - Tensor cpu_tensor; + bool caught = false; + try { + const double* p __attribute__((unused)) = cpu_tensor.data(); + } catch (paddle::framework::EnforceNotMet err) { + caught = true; + std::string msg = "Tensor::data must be called after Tensor::mutable_data"; + const char* what = err.what(); + for (size_t i = 0; i < msg.length(); ++i) { + ASSERT_EQ(what[i], msg[i]); + } + } + ASSERT_TRUE(caught); } /* mutable_data() is not tested at present @@ -27,45 +38,48 @@ TEST(Tensor, Data) { TEST(Tensor, MutableData) { using namespace paddle::framework; using namespace paddle::platform; + { + Tensor cpu_tensor; + float* p1 = nullptr; + float* p2 = nullptr; + // initialization + p1 = cpu_tensor.mutable_data(make_ddim({1, 2, 3}), CPUPlace()); + EXPECT_NE(p1, nullptr); + // set cpu_tensor a new dim with large size + // momery is supposed to be re-allocated + p2 = cpu_tensor.mutable_data(make_ddim({3, 4})); + EXPECT_NE(p2, nullptr); + EXPECT_NE(p1, p2); + // set cpu_tensor a new dim with same size + // momery block is supposed to be unchanged + p1 = cpu_tensor.mutable_data(make_ddim({2, 2, 3})); + EXPECT_EQ(p1, p2); + // set cpu_tensor a new dim with smaller size + // momery block is supposed to be unchanged + p2 = cpu_tensor.mutable_data(make_ddim({2, 2})); + EXPECT_EQ(p1, p2); + } - Tensor cpu_tensor; - float* p1 = nullptr; - float* p2 = nullptr; - // initialization - p1 = cpu_tensor.mutable_data(make_ddim({1, 2, 3}), CPUPlace()); - EXPECT_NE(p1, nullptr); - // set cpu_tensor a new dim with large size - // momery is supposed to be re-allocated - p2 = cpu_tensor.mutable_data(make_ddim({3, 4})); - EXPECT_NE(p2, nullptr); - EXPECT_NE(p1, p2); - // set cpu_tensor a new dim with same size - // momery block is supposed to be unchanged - p1 = cpu_tensor.mutable_data(make_ddim({2, 2, 3})); - EXPECT_EQ(p1, p2); - // set cpu_tensor a new dim with smaller size - // momery block is supposed to be unchanged - p2 = cpu_tensor.mutable_data(make_ddim({2, 2})); - EXPECT_EQ(p1, p2); - - Tensor gpu_tensor; - float* p1 = nullptr; - float* p2 = nullptr; - // initialization - p1 = gpu_tensor.mutable_data(make_ddim({1, 2, 3}), GPUPlace()); - EXPECT_NE(p1, nullptr); - // set gpu_tensor a new dim with large size - // momery is supposed to be re-allocated - p2 = gpu_tensor.mutable_data(make_ddim({3, 4})); - EXPECT_NE(p2, nullptr); - EXPECT_NE(p1, p2); - // set gpu_tensor a new dim with same size - // momery block is supposed to be unchanged - p1 = gpu_tensor.mutable_data(make_ddim({2, 2, 3})); - EXPECT_EQ(p1, p2); - // set gpu_tensor a new dim with smaller size - // momery block is supposed to be unchanged - p2 = gpu_tensor.mutable_data(make_ddim({2, 2})); - EXPECT_EQ(p1, p2); + { + Tensor gpu_tensor; + float* p1 = nullptr; + float* p2 = nullptr; + // initialization + p1 = gpu_tensor.mutable_data(make_ddim({1, 2, 3}), GPUPlace()); + EXPECT_NE(p1, nullptr); + // set gpu_tensor a new dim with large size + // momery is supposed to be re-allocated + p2 = gpu_tensor.mutable_data(make_ddim({3, 4})); + EXPECT_NE(p2, nullptr); + EXPECT_NE(p1, p2); + // set gpu_tensor a new dim with same size + // momery block is supposed to be unchanged + p1 = gpu_tensor.mutable_data(make_ddim({2, 2, 3})); + EXPECT_EQ(p1, p2); + // set gpu_tensor a new dim with smaller size + // momery block is supposed to be unchanged + p2 = gpu_tensor.mutable_data(make_ddim({2, 2})); + EXPECT_EQ(p1, p2); + } } */ \ No newline at end of file