提交 e146fe83 编写于 作者: F fengjiayi

fix compile errors and add assert test

上级 9f408dfb
...@@ -20,7 +20,6 @@ limitations under the License. */ ...@@ -20,7 +20,6 @@ limitations under the License. */
#include "paddle/framework/ddim.h" #include "paddle/framework/ddim.h"
#include "paddle/framework/enforce.h" #include "paddle/framework/enforce.h"
#include "paddle/memory/memory.h" #include "paddle/memory/memory.h"
#include "paddle/platform/assert.h"
#include "paddle/platform/place.h" #include "paddle/platform/place.h"
namespace paddle { namespace paddle {
...@@ -63,21 +62,35 @@ class Tensor { ...@@ -63,21 +62,35 @@ class Tensor {
template <typename T> template <typename T>
struct PlaceholderImpl : public Placeholder { struct PlaceholderImpl : public Placeholder {
PlaceholderImpl(paddle::platform::Place pl, size_t size) private:
: ptr_(paddle::memory::Alloc(pl, size), paddle::memory::Deleter(pl)), class Deleter {
place_(pl), public:
Deleter(platform::Place place) : place_(place) {}
void operator()(T* ptr) {
paddle::memory::Free(place_, static_cast<void*>(ptr));
}
private:
paddle::platform::Place place_;
};
public:
PlaceholderImpl(paddle::platform::Place place, size_t size)
: ptr_(static_cast<T*>(paddle::memory::Alloc(place, size)),
Deleter(place)),
place_(place),
size_(size) {} size_(size) {}
virtual void* Ptr() const { return static_cast<void*>(ptr_.get()); } virtual void* Ptr() const { return static_cast<void*>(ptr_.get()); }
virtual size_t Size() const { return size_; } virtual size_t Size() const { return size_; }
virtual paddle::platform::Place Place() const { return place_; } virtual paddle::platform::Place Place() const { return place_; }
std::unique_ptr<T, memory::Deleter> ptr_; std::unique_ptr<T, Deleter> ptr_;
paddle::platform::Place place_; // record the place of ptr_. paddle::platform::Place place_; // record the place of ptr_.
size_t size_; // size of the memory block. size_t size_; // size of the memory block.
}; };
std::unique_ptr<Placeholder> holder_; // holds the memory block if allocated. std::shared_ptr<Placeholder> holder_; // holds the memory block if allocated.
}; };
} // namespace framework } // namespace framework
......
...@@ -13,12 +13,23 @@ ...@@ -13,12 +13,23 @@
#include "paddle/framework/tensor.h" #include "paddle/framework/tensor.h"
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <string>
TEST(Tensor, Data) { TEST(Tensor, ASSERT) {
using namespace paddle::framework; paddle::framework::Tensor cpu_tensor;
using namespace paddle::platform;
Tensor cpu_tensor; bool caught = false;
try {
const double* p __attribute__((unused)) = cpu_tensor.data<double>();
} catch (paddle::framework::EnforceNotMet err) {
caught = true;
std::string msg = "Tensor::data must be called after Tensor::mutable_data";
const char* what = err.what();
for (size_t i = 0; i < msg.length(); ++i) {
ASSERT_EQ(what[i], msg[i]);
}
}
ASSERT_TRUE(caught);
} }
/* mutable_data() is not tested at present /* mutable_data() is not tested at present
...@@ -27,45 +38,48 @@ TEST(Tensor, Data) { ...@@ -27,45 +38,48 @@ TEST(Tensor, Data) {
TEST(Tensor, MutableData) { TEST(Tensor, MutableData) {
using namespace paddle::framework; using namespace paddle::framework;
using namespace paddle::platform; using namespace paddle::platform;
{
Tensor cpu_tensor;
float* p1 = nullptr;
float* p2 = nullptr;
// initialization
p1 = cpu_tensor.mutable_data<float>(make_ddim({1, 2, 3}), CPUPlace());
EXPECT_NE(p1, nullptr);
// set cpu_tensor a new dim with large size
// momery is supposed to be re-allocated
p2 = cpu_tensor.mutable_data<float>(make_ddim({3, 4}));
EXPECT_NE(p2, nullptr);
EXPECT_NE(p1, p2);
// set cpu_tensor a new dim with same size
// momery block is supposed to be unchanged
p1 = cpu_tensor.mutable_data<float>(make_ddim({2, 2, 3}));
EXPECT_EQ(p1, p2);
// set cpu_tensor a new dim with smaller size
// momery block is supposed to be unchanged
p2 = cpu_tensor.mutable_data<float>(make_ddim({2, 2}));
EXPECT_EQ(p1, p2);
}
Tensor cpu_tensor; {
float* p1 = nullptr; Tensor gpu_tensor;
float* p2 = nullptr; float* p1 = nullptr;
// initialization float* p2 = nullptr;
p1 = cpu_tensor.mutable_data<float>(make_ddim({1, 2, 3}), CPUPlace()); // initialization
EXPECT_NE(p1, nullptr); p1 = gpu_tensor.mutable_data<float>(make_ddim({1, 2, 3}), GPUPlace());
// set cpu_tensor a new dim with large size EXPECT_NE(p1, nullptr);
// momery is supposed to be re-allocated // set gpu_tensor a new dim with large size
p2 = cpu_tensor.mutable_data<float>(make_ddim({3, 4})); // momery is supposed to be re-allocated
EXPECT_NE(p2, nullptr); p2 = gpu_tensor.mutable_data<float>(make_ddim({3, 4}));
EXPECT_NE(p1, p2); EXPECT_NE(p2, nullptr);
// set cpu_tensor a new dim with same size EXPECT_NE(p1, p2);
// momery block is supposed to be unchanged // set gpu_tensor a new dim with same size
p1 = cpu_tensor.mutable_data<float>(make_ddim({2, 2, 3})); // momery block is supposed to be unchanged
EXPECT_EQ(p1, p2); p1 = gpu_tensor.mutable_data<float>(make_ddim({2, 2, 3}));
// set cpu_tensor a new dim with smaller size EXPECT_EQ(p1, p2);
// momery block is supposed to be unchanged // set gpu_tensor a new dim with smaller size
p2 = cpu_tensor.mutable_data<float>(make_ddim({2, 2})); // momery block is supposed to be unchanged
EXPECT_EQ(p1, p2); p2 = gpu_tensor.mutable_data<float>(make_ddim({2, 2}));
EXPECT_EQ(p1, p2);
Tensor gpu_tensor; }
float* p1 = nullptr;
float* p2 = nullptr;
// initialization
p1 = gpu_tensor.mutable_data<float>(make_ddim({1, 2, 3}), GPUPlace());
EXPECT_NE(p1, nullptr);
// set gpu_tensor a new dim with large size
// momery is supposed to be re-allocated
p2 = gpu_tensor.mutable_data<float>(make_ddim({3, 4}));
EXPECT_NE(p2, nullptr);
EXPECT_NE(p1, p2);
// set gpu_tensor a new dim with same size
// momery block is supposed to be unchanged
p1 = gpu_tensor.mutable_data<float>(make_ddim({2, 2, 3}));
EXPECT_EQ(p1, p2);
// set gpu_tensor a new dim with smaller size
// momery block is supposed to be unchanged
p2 = gpu_tensor.mutable_data<float>(make_ddim({2, 2}));
EXPECT_EQ(p1, p2);
} }
*/ */
\ No newline at end of file
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册