未验证 提交 5c358674 编写于 作者: 石晓伟 提交者: GitHub

updates the ctor of tensor, test=develop (#38946)

上级 d13c7799
......@@ -32,15 +32,17 @@ TEST(AccumulationNode, EagerTensor) {
pten::DenseTensorMeta meta = pten::DenseTensorMeta(
pten::DataType::FLOAT16, paddle::framework::make_ddim({1, 1}));
std::shared_ptr<pten::DenseTensor> dt0 = std::make_shared<pten::DenseTensor>(
std::make_shared<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace()),
std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace())
.get(),
meta);
dt0->mutable_data<paddle::platform::float16>()[0] = 10.0;
EagerTensor et0 = EagerTensor(dt0);
std::shared_ptr<pten::DenseTensor> dt1 = std::make_shared<pten::DenseTensor>(
std::make_shared<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace()),
std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace())
.get(),
meta);
dt1->mutable_data<paddle::platform::float16>()[0] = 20.0;
......@@ -48,8 +50,9 @@ TEST(AccumulationNode, EagerTensor) {
std::shared_ptr<pten::DenseTensor> grad_dt =
std::make_shared<pten::DenseTensor>(
std::make_shared<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace()),
std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace())
.get(),
meta);
EagerTensor grad_et = EagerTensor(grad_dt);
......
......@@ -42,8 +42,9 @@ TEST(AutogradMeta, MemberFunction) {
pten::DenseTensorMeta meta = pten::DenseTensorMeta(
pten::DataType::FLOAT32, paddle::framework::make_ddim({1, 2}));
std::shared_ptr<pten::DenseTensor> dt = std::make_shared<pten::DenseTensor>(
std::make_shared<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace()),
std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace())
.get(),
meta);
auto* dt_ptr = dt->mutable_data<float>();
dt_ptr[0] = 5.0f;
......
......@@ -36,8 +36,9 @@ TEST(EagerTensor, Constructor) {
pten::DenseTensorMeta meta = pten::DenseTensorMeta(
pten::DataType::FLOAT32, paddle::framework::make_ddim({1, 2}));
std::shared_ptr<pten::DenseTensor> dt = std::make_shared<pten::DenseTensor>(
std::make_shared<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace()),
std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace())
.get(),
meta);
auto* dt_ptr = dt->mutable_data<float>();
dt_ptr[0] = 5.0f;
......@@ -65,8 +66,9 @@ TEST(EagerTensor, MemberFunction) {
pten::DenseTensorMeta meta = pten::DenseTensorMeta(
pten::DataType::FLOAT32, paddle::framework::make_ddim({1, 2}));
std::shared_ptr<pten::DenseTensor> dt = std::make_shared<pten::DenseTensor>(
std::make_shared<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace()),
std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace())
.get(),
meta);
auto* dt_ptr = dt->mutable_data<float>();
dt_ptr[0] = 5.0f;
......
......@@ -41,8 +41,9 @@ TEST(GradNodeInfo, GradNodeBase) {
pten::DenseTensorMeta meta = pten::DenseTensorMeta(
pten::DataType::FLOAT32, paddle::framework::make_ddim({1, 1}));
std::shared_ptr<pten::DenseTensor> dt = std::make_shared<pten::DenseTensor>(
std::make_shared<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace()),
std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace())
.get(),
meta);
auto* dt_ptr = dt->mutable_data<float>();
dt_ptr[0] = 5.0f;
......@@ -97,8 +98,9 @@ TEST(GradNodeInfo, GradNodeBase) {
pten::DenseTensorMeta meta = pten::DenseTensorMeta(
pten::DataType::FLOAT32, paddle::framework::make_ddim({1, 1}));
std::shared_ptr<pten::DenseTensor> dt = std::make_shared<pten::DenseTensor>(
std::make_shared<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace()),
std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace())
.get(),
meta);
auto* dt_ptr = dt->mutable_data<float>();
dt_ptr[0] = 6.0f;
......
......@@ -37,8 +37,9 @@ class GradTestNode : public egr::GradNodeBase {
pten::DenseTensorMeta meta = pten::DenseTensorMeta(
pten::DataType::FLOAT32, paddle::framework::make_ddim({1, 1}));
std::shared_ptr<pten::DenseTensor> dt = std::make_shared<pten::DenseTensor>(
std::make_shared<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace()),
std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace())
.get(),
meta);
auto* dt_ptr = dt->mutable_data<float>();
dt_ptr[0] = 6.0f;
......
......@@ -36,8 +36,9 @@ TEST(GradTensorHolder, Constructor) {
pten::DenseTensorMeta meta = pten::DenseTensorMeta(
pten::DataType::FLOAT32, paddle::framework::make_ddim({2, 2}));
std::shared_ptr<pten::DenseTensor> dt = std::make_shared<pten::DenseTensor>(
std::make_shared<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace()),
std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace())
.get(),
meta);
EagerTensor et = EagerTensor(dt);
......@@ -52,15 +53,17 @@ TEST(GradTensorHolder, Interfaces) {
pten::DenseTensorMeta meta = pten::DenseTensorMeta(
pten::DataType::FLOAT32, paddle::framework::make_ddim({1, 1}));
std::shared_ptr<pten::DenseTensor> dt0 = std::make_shared<pten::DenseTensor>(
std::make_shared<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace()),
std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace())
.get(),
meta);
dt0->mutable_data<float>()[0] = 10.0;
EagerTensor et0 = EagerTensor(dt0);
std::shared_ptr<pten::DenseTensor> dt1 = std::make_shared<pten::DenseTensor>(
std::make_shared<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace()),
std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace())
.get(),
meta);
dt1->mutable_data<float>()[0] = 20.0;
EagerTensor et1 = EagerTensor(dt1);
......
......@@ -25,8 +25,9 @@ TEST(TensorWrapper, Basic) {
pten::DenseTensorMeta meta = pten::DenseTensorMeta(
pten::DataType::FLOAT32, paddle::framework::make_ddim({1, 2}));
std::shared_ptr<pten::DenseTensor> dt = std::make_shared<pten::DenseTensor>(
std::make_shared<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace()),
std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace())
.get(),
meta);
auto* dt_ptr = dt->mutable_data<float>();
dt_ptr[0] = 5.0f;
......@@ -51,8 +52,9 @@ TEST(TensorWrapper, Basic) {
pten::DenseTensorMeta meta2 = pten::DenseTensorMeta(
pten::DataType::FLOAT32, paddle::framework::make_ddim({1, 2}));
std::shared_ptr<pten::DenseTensor> dt2 = std::make_shared<pten::DenseTensor>(
std::make_shared<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace()),
std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace())
.get(),
meta2);
auto* dt_ptr2 = dt->mutable_data<float>();
dt_ptr2[0] = 6.0f;
......
......@@ -31,15 +31,17 @@ TEST(EagerUtils, AutoGradMeta) {
pten::DenseTensorMeta meta = pten::DenseTensorMeta(
pten::DataType::FLOAT32, paddle::framework::make_ddim({1, 1}));
std::shared_ptr<pten::DenseTensor> dt0 = std::make_shared<pten::DenseTensor>(
std::make_shared<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace()),
std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace())
.get(),
meta);
dt0->mutable_data<float>()[0] = 10.0;
EagerTensor et0 = EagerTensor(dt0);
std::shared_ptr<pten::DenseTensor> dt1 = std::make_shared<pten::DenseTensor>(
std::make_shared<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace()),
std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace())
.get(),
meta);
dt1->mutable_data<float>()[0] = 20.0;
EagerTensor et1 = EagerTensor(dt1);
......@@ -106,8 +108,9 @@ egr::EagerTensor CreateTestCPUTensor(T val,
pten::DenseTensorMeta(pten::DataType::FLOAT32, ddim);
egr::EagerTensor tensor;
std::shared_ptr<pten::DenseTensor> dt = std::make_shared<pten::DenseTensor>(
std::make_shared<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace()),
std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace())
.get(),
meta);
auto* dt_ptr = dt->mutable_data<T>();
for (int64_t i = 0; i < dt->numel(); i++) {
......
......@@ -22,25 +22,15 @@ limitations under the License. */
namespace paddle {
namespace experimental {
class DefaultAllocator : public pten::deprecated::Allocator {
class DefaultAllocator : public pten::Allocator {
public:
using Allocation = pten::deprecated::Allocation;
explicit DefaultAllocator(const paddle::platform::Place& place)
: place_(place) {}
static void Delete(Allocation* allocation) {
paddle::memory::allocation::Allocator::AllocationDeleter(
allocation->CastContextWithoutCheck<paddle::memory::Allocation>());
AllocationPtr Allocate(size_t bytes_size) override {
return memory::Alloc(place_, bytes_size);
}
Allocation Allocate(size_t bytes_size) override {
paddle::memory::AllocationPtr a = memory::Alloc(place_, bytes_size);
void* ptr = a->ptr();
return Allocation(ptr, a.release(), &Delete, place_);
}
const paddle::platform::Place& place() override { return place_; }
private:
paddle::platform::Place place_;
};
......
......@@ -24,13 +24,11 @@ limitations under the License. */
namespace pten {
DenseTensor::DenseTensor(const std::shared_ptr<Allocator>& a,
const DenseTensorMeta& meta)
DenseTensor::DenseTensor(Allocator* a, const DenseTensorMeta& meta)
: meta_(meta),
storage_(make_intrusive<TensorStorage>(a, SizeOf(dtype()) * numel())) {}
DenseTensor::DenseTensor(const std::shared_ptr<Allocator>& a,
DenseTensorMeta&& meta)
DenseTensor::DenseTensor(Allocator* a, DenseTensorMeta&& meta)
: meta_(std::move(meta)),
storage_(make_intrusive<TensorStorage>(a, SizeOf(dtype()) * numel())) {}
......
......@@ -60,17 +60,15 @@ class TensorInplaceVersion {
class DenseTensor : public TensorBase,
public TypeInfoTraits<TensorBase, DenseTensor> {
public:
using Allocator = deprecated::Allocator;
/// \brief Construct a dense tensor and allocate space.
/// \param a The allocator used to allocate space.
/// \param meta The meta data of dense tensor.
DenseTensor(const std::shared_ptr<Allocator>& a, const DenseTensorMeta& meta);
DenseTensor(Allocator* a, const DenseTensorMeta& meta);
/// \brief Construct a dense tensor and allocate space.
/// \param a The allocator used to allocate space.
/// \param meta The meta data of dense tensor.
DenseTensor(const std::shared_ptr<Allocator>& a, DenseTensorMeta&& meta);
DenseTensor(Allocator* a, DenseTensorMeta&& meta);
/// \brief Use existing storage space to create dense tensor. This interface
/// can be used to deliberately create an uninitialized dense tensor.
......
......@@ -18,7 +18,7 @@ namespace pten {
void TensorStorage::Realloc(size_t size) {
this->Clear();
data_ = paddle::memory::AllocShared(alloc_->place(), size);
data_ = alloc_->Allocate(size);
size_ = size;
}
......
......@@ -91,12 +91,11 @@ class Storage : public intrusive_ref_counter<Storage> {
class TensorStorage : public Storage {
public:
using Place = paddle::platform::Place;
using Allocator = deprecated::Allocator;
explicit TensorStorage(const std::shared_ptr<Allocator>& a) : alloc_(a) {}
explicit TensorStorage(Allocator* a) : alloc_(a) {}
TensorStorage(const std::shared_ptr<Allocator>& a, size_t size)
: Storage(paddle::memory::AllocShared(a->place(), size)), alloc_(a) {
TensorStorage(Allocator* a, size_t size)
: Storage(a->Allocate(size)), alloc_(a) {
size_ = data_->size();
}
......@@ -114,24 +113,18 @@ class TensorStorage : public Storage {
size_t size() const noexcept override { return size_; }
const Place& place() const override {
if (!data_ && !alloc_) {
if (!data_) {
PADDLE_THROW(paddle::platform::errors::Unimplemented(
"Unable to visit place: either data_ or alloc_ has to be initialized "
"first."));
}
if (data_) {
return data_->place();
}
return alloc_->place();
return data_->place();
}
bool OwnsMemory() const noexcept override { return true; }
const std::shared_ptr<Allocator>& allocator() const noexcept {
return alloc_;
}
private:
const std::shared_ptr<Allocator> alloc_;
Allocator* alloc_;
int64_t size_{0};
};
......
......@@ -5,8 +5,6 @@ else()
endif()
cc_test(test_pten_exception SRCS test_pten_exception.cc DEPS gtest)
cc_test(test_framework_storage SRCS test_storage.cc DEPS pten_api_utils)
cc_test(test_framework_tensor_utils SRCS test_tensor_utils.cc DEPS pten_api_utils)
cc_test(test_mean_api SRCS test_mean_api.cc DEPS pten_tensor pten_api pten_api_utils)
cc_test(test_dot_api SRCS test_dot_api.cc DEPS pten_tensor pten_api pten_api_utils)
......
......@@ -30,10 +30,10 @@ using DDim = paddle::framework::DDim;
// TODO(chenweihang): Remove this test after the API is used in the dygraph
TEST(API, cast) {
// 1. create tensor
const auto alloc = std::make_shared<paddle::experimental::DefaultAllocator>(
const auto alloc = std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace());
auto dense_x = std::make_shared<pten::DenseTensor>(
alloc,
alloc.get(),
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({3, 4}),
pten::DataLayout::NCHW));
......
......@@ -30,10 +30,10 @@ using DDim = paddle::framework::DDim;
// TODO(chenweihang): Remove this test after the API is used in the dygraph
TEST(API, conj) {
// 1. create tensor
const auto alloc = std::make_shared<paddle::experimental::DefaultAllocator>(
const auto alloc = std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace());
auto dense_x = std::make_shared<pten::DenseTensor>(
alloc,
alloc.get(),
pten::DenseTensorMeta(pten::DataType::COMPLEX64,
framework::make_ddim({3, 10}),
pten::DataLayout::NCHW));
......
......@@ -30,17 +30,17 @@ using DDim = paddle::framework::DDim;
// TODO(chenweihang): Remove this test after the API is used in the dygraph
TEST(API, dot) {
// 1. create tensor
const auto alloc = std::make_shared<paddle::experimental::DefaultAllocator>(
const auto alloc = std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace());
auto dense_x = std::make_shared<pten::DenseTensor>(
alloc,
alloc.get(),
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({3, 10}),
pten::DataLayout::NCHW));
auto* dense_x_data = dense_x->mutable_data<float>();
auto dense_y = std::make_shared<pten::DenseTensor>(
alloc,
alloc.get(),
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({3, 10}),
pten::DataLayout::NCHW));
......
......@@ -30,17 +30,17 @@ using DDim = paddle::framework::DDim;
// TODO(chenweihang): Remove this test after the API is used in the dygraph
TEST(API, add) {
// 1. create tensor
const auto alloc = std::make_shared<paddle::experimental::DefaultAllocator>(
const auto alloc = std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace());
auto dense_x = std::make_shared<pten::DenseTensor>(
alloc,
alloc.get(),
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({3, 10}),
pten::DataLayout::NCHW));
auto* dense_x_data = dense_x->mutable_data<float>();
auto dense_y = std::make_shared<pten::DenseTensor>(
alloc,
alloc.get(),
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({10}),
pten::DataLayout::NCHW));
......@@ -84,17 +84,17 @@ TEST(API, add) {
// TODO(chenweihang): Remove this test after the API is used in the dygraph
TEST(API, subtract) {
// 1. create tensor
const auto alloc = std::make_shared<paddle::experimental::DefaultAllocator>(
const auto alloc = std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace());
auto dense_x = std::make_shared<pten::DenseTensor>(
alloc,
alloc.get(),
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({3, 10}),
pten::DataLayout::NCHW));
auto* dense_x_data = dense_x->mutable_data<float>();
auto dense_y = std::make_shared<pten::DenseTensor>(
alloc,
alloc.get(),
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({10}),
pten::DataLayout::NCHW));
......@@ -138,17 +138,17 @@ TEST(API, subtract) {
// TODO(chenweihang): Remove this test after the API is used in the dygraph
TEST(API, divide) {
// 1. create tensor
const auto alloc = std::make_shared<paddle::experimental::DefaultAllocator>(
const auto alloc = std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace());
auto dense_x = std::make_shared<pten::DenseTensor>(
alloc,
alloc.get(),
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({3, 10}),
pten::DataLayout::NCHW));
auto* dense_x_data = dense_x->mutable_data<float>();
auto dense_y = std::make_shared<pten::DenseTensor>(
alloc,
alloc.get(),
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({10}),
pten::DataLayout::NCHW));
......@@ -192,17 +192,17 @@ TEST(API, divide) {
TEST(API, multiply) {
// 1. create tensor
const auto alloc = std::make_shared<paddle::experimental::DefaultAllocator>(
const auto alloc = std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace());
auto dense_x = std::make_shared<pten::DenseTensor>(
alloc,
alloc.get(),
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({3, 10}),
pten::DataLayout::NCHW));
auto* dense_x_data = dense_x->mutable_data<float>();
auto dense_y = std::make_shared<pten::DenseTensor>(
alloc,
alloc.get(),
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({10}),
pten::DataLayout::NCHW));
......
......@@ -30,10 +30,10 @@ using DDim = paddle::framework::DDim;
// TODO(chenweihang): Remove this test after the API is used in the dygraph
TEST(API, empty_like) {
// 1. create tensor
const auto alloc = std::make_shared<paddle::experimental::DefaultAllocator>(
const auto alloc = std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace());
auto dense_x = std::make_shared<pten::DenseTensor>(
alloc,
alloc.get(),
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({3, 2}),
pten::DataLayout::NCHW));
......@@ -55,11 +55,11 @@ TEST(API, empty_like) {
TEST(API, empty1) {
// 1. create tensor
const auto alloc = std::make_shared<paddle::experimental::DefaultAllocator>(
const auto alloc = std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace());
auto dense_shape = std::make_shared<pten::DenseTensor>(
alloc,
alloc.get(),
pten::DenseTensorMeta(pten::DataType::INT64,
framework::make_ddim({2}),
pten::DataLayout::NCHW));
......@@ -83,11 +83,11 @@ TEST(API, empty1) {
}
TEST(API, empty2) {
const auto alloc = std::make_shared<paddle::experimental::DefaultAllocator>(
const auto alloc = std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace());
auto dense_scalar = std::make_shared<pten::DenseTensor>(
alloc,
alloc.get(),
pten::DenseTensorMeta(pten::DataType::INT32,
framework::make_ddim({1}),
pten::DataLayout::NCHW));
......
......@@ -30,10 +30,10 @@ using DDim = paddle::framework::DDim;
// TODO(chenweihang): Remove this test after the API is used in the dygraph
TEST(API, full_like) {
// 1. create tensor
const auto alloc = std::make_shared<paddle::experimental::DefaultAllocator>(
const auto alloc = std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace());
auto dense_x = std::make_shared<pten::DenseTensor>(
alloc,
alloc.get(),
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({3, 2}),
pten::DataLayout::NCHW));
......@@ -65,10 +65,10 @@ TEST(API, full_like) {
TEST(API, zeros_like) {
// 1. create tensor
const auto alloc = std::make_shared<paddle::experimental::DefaultAllocator>(
const auto alloc = std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace());
auto dense_x = std::make_shared<pten::DenseTensor>(
alloc,
alloc.get(),
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({3, 2}),
pten::DataLayout::NCHW));
......@@ -98,10 +98,10 @@ TEST(API, zeros_like) {
TEST(API, ones_like) {
// 1. create tensor
const auto alloc = std::make_shared<paddle::experimental::DefaultAllocator>(
const auto alloc = std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace());
auto dense_x = std::make_shared<pten::DenseTensor>(
alloc,
alloc.get(),
pten::DenseTensorMeta(pten::DataType::INT32,
framework::make_ddim({3, 2}),
pten::DataLayout::NCHW));
......@@ -131,11 +131,11 @@ TEST(API, ones_like) {
TEST(API, full1) {
// 1. create tensor
const auto alloc = std::make_shared<paddle::experimental::DefaultAllocator>(
const auto alloc = std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace());
auto dense_shape = std::make_shared<pten::DenseTensor>(
alloc,
alloc.get(),
pten::DenseTensorMeta(pten::DataType::INT64,
framework::make_ddim({2}),
pten::DataLayout::NCHW));
......@@ -144,7 +144,7 @@ TEST(API, full1) {
shape_data[1] = 3;
auto dense_scalar = std::make_shared<pten::DenseTensor>(
alloc,
alloc.get(),
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({1}),
pten::DataLayout::NCHW));
......@@ -177,11 +177,11 @@ TEST(API, full1) {
}
TEST(API, full2) {
const auto alloc = std::make_shared<paddle::experimental::DefaultAllocator>(
const auto alloc = std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace());
auto dense_scalar = std::make_shared<pten::DenseTensor>(
alloc,
alloc.get(),
pten::DenseTensorMeta(pten::DataType::INT32,
framework::make_ddim({1}),
pten::DataLayout::NCHW));
......
......@@ -30,10 +30,10 @@ using DDim = paddle::framework::DDim;
// TODO(chenweihang): Remove this test after the API is used in the dygraph
TEST(API, flatten) {
// 1. create tensor
const auto alloc = std::make_shared<paddle::experimental::DefaultAllocator>(
const auto alloc = std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace());
auto dense_x = std::make_shared<pten::DenseTensor>(
alloc,
alloc.get(),
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({3, 2, 2, 3}),
pten::DataLayout::NCHW));
......
......@@ -30,10 +30,10 @@ using DDim = paddle::framework::DDim;
TEST(API, matmul_cpu) {
// 1. create tensor
const auto alloc = std::make_shared<paddle::experimental::DefaultAllocator>(
const auto alloc = std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace());
auto dense_x = std::make_shared<pten::DenseTensor>(
alloc,
alloc.get(),
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({3, 3}),
pten::DataLayout::NCHW));
......@@ -41,7 +41,7 @@ TEST(API, matmul_cpu) {
auto* dense_x_data = dense_x->mutable_data<float>();
auto dense_y = std::make_shared<pten::DenseTensor>(
alloc,
alloc.get(),
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({3, 3}),
pten::DataLayout::NCHW));
......@@ -79,10 +79,10 @@ TEST(API, matmul_cpu) {
TEST(API, matmul_cuda) {
// Prepare CPU Dense Tensor
const auto alloc_cpu =
std::make_shared<paddle::experimental::DefaultAllocator>(
std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace());
auto ref_x = std::make_shared<pten::DenseTensor>(
alloc_cpu,
alloc_cpu.get(),
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({3, 3}),
pten::DataLayout::NCHW));
......@@ -90,7 +90,7 @@ TEST(API, matmul_cuda) {
auto* ref_x_data = ref_x->mutable_data<float>();
auto ref_y = std::make_shared<pten::DenseTensor>(
alloc_cpu,
alloc_cpu.get(),
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({3, 3}),
pten::DataLayout::NCHW));
......@@ -104,16 +104,16 @@ TEST(API, matmul_cuda) {
// 1. create tensor
const auto alloc_cuda =
std::make_shared<paddle::experimental::DefaultAllocator>(
std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CUDAPlace());
auto dense_x = std::make_shared<pten::DenseTensor>(
alloc_cuda,
alloc_cuda.get(),
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({3, 3}),
pten::DataLayout::NCHW));
auto dense_y = std::make_shared<pten::DenseTensor>(
alloc_cuda,
alloc_cuda.get(),
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({3, 3}),
pten::DataLayout::NCHW));
......@@ -143,7 +143,7 @@ TEST(API, matmul_cuda) {
auto dense_out = std::dynamic_pointer_cast<pten::DenseTensor>(out.impl());
auto ref_out = std::make_shared<pten::DenseTensor>(
alloc_cpu,
alloc_cpu.get(),
pten::DenseTensorMeta(
pten::DataType::FLOAT32, out.dims(), pten::DataLayout::NCHW));
......
......@@ -30,10 +30,10 @@ using DDim = paddle::framework::DDim;
// TODO(chenweihang): Remove this test after the API is used in the dygraph
TEST(API, mean) {
// 1. create tensor
const auto alloc = std::make_shared<paddle::experimental::DefaultAllocator>(
const auto alloc = std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace());
auto dense_x = std::make_shared<pten::DenseTensor>(
alloc,
alloc.get(),
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({3, 4}),
pten::DataLayout::NCHW));
......
......@@ -30,10 +30,10 @@ using DDim = paddle::framework::DDim;
// TODO(chenweihang): Remove this test after the API is used in the dygraph
TEST(API, reshape) {
// 1. create tensor
const auto alloc = std::make_shared<paddle::experimental::DefaultAllocator>(
const auto alloc = std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace());
auto dense_x = std::make_shared<pten::DenseTensor>(
alloc,
alloc.get(),
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({3, 2, 2, 3}),
pten::DataLayout::NCHW));
......
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <vector>
#include "gtest/gtest.h"
#include "paddle/pten/api/lib/utils/allocator.h"
#include "paddle/pten/api/lib/utils/storage.h"
namespace paddle {
namespace tests {
TEST(host_storage, external_stroage) {
const size_t size{100};
const auto a = std::make_shared<experimental::DefaultAllocator>(
paddle::platform::CPUPlace());
pten::intrusive_ptr<pten::Storage> in_storage =
pten::make_intrusive<pten::TensorStorage>(a, size);
char* data = static_cast<char*>(in_storage->data());
for (size_t i = 0; i < size; ++i) {
data[i] = i;
}
const size_t delta{1};
const size_t n{10};
auto ex_storage =
pten::make_intrusive<experimental::ExternalStorage>(in_storage, delta, n);
CHECK_EQ(ex_storage->size(), n);
CHECK(paddle::platform::is_cpu_place(ex_storage->place()));
CHECK(!ex_storage->OwnsMemory());
for (size_t i = delta; i < delta + n; ++i) {
CHECK_EQ(data[i], static_cast<char>(i));
}
}
TEST(host_storage, external_vector) {
std::vector<char> data(100);
for (size_t i = 0; i < data.size(); ++i) {
data[i] = i;
}
const size_t delta{1};
const size_t n{10};
auto ex_storage = pten::make_intrusive<experimental::ExternalStorage>(
data.data(), n, paddle::platform::CPUPlace());
CHECK_EQ(ex_storage->size(), n);
CHECK(paddle::platform::is_cpu_place(ex_storage->place()));
CHECK(!ex_storage->OwnsMemory());
for (size_t i = delta; i < delta + n; ++i) {
CHECK_EQ(data[i], static_cast<char>(i));
}
}
} // namespace tests
} // namespace paddle
......@@ -30,10 +30,10 @@ using DDim = paddle::framework::DDim;
// TODO(chenweihang): Remove this test after the API is used in the dygraph
TEST(API, sum) {
// 1. create tensor
const auto alloc = std::make_shared<paddle::experimental::DefaultAllocator>(
const auto alloc = std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace());
auto dense_x = std::make_shared<pten::DenseTensor>(
alloc,
alloc.get(),
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({3, 4}),
pten::DataLayout::NCHW));
......
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "gtest/gtest.h"
#include "paddle/pten/api/lib/utils/tensor_utils.h"
#include "paddle/pten/core/tensor_meta.h"
namespace paddle {
namespace tests {
using DDim = paddle::framework::DDim;
using DataType = paddle::experimental::DataType;
using DataLayout = paddle::experimental::DataLayout;
using DenseTensor = pten::DenseTensor;
using DenseTensorMeta = pten::DenseTensorMeta;
TEST(tensor_utils, dense_tensor_to_lod_tensor) {
const DDim dims({2, 1});
const DataType dtype{DataType::FLOAT32};
const DataLayout layout{DataLayout::NCHW};
const pten::LoD lod{{0, 2}};
DenseTensorMeta meta(dtype, dims, layout, lod);
auto alloc =
std::make_shared<experimental::DefaultAllocator>(platform::CPUPlace());
DenseTensor dense_tensor(alloc, meta);
float* data = dense_tensor.mutable_data<float>();
data[0] = 1.0f;
data[1] = 2.1f;
framework::LoDTensor lod_tensor;
experimental::MovesStorage(&dense_tensor, &lod_tensor);
CHECK(dense_tensor.lod().size() == lod_tensor.lod().size());
CHECK(dense_tensor.lod()[0] ==
static_cast<paddle::framework::Vector<size_t>>((lod_tensor.lod()[0])));
CHECK(dense_tensor.dtype() == pten::TransToPtenDataType(lod_tensor.type()));
CHECK(dense_tensor.layout() == lod_tensor.layout());
CHECK(platform::is_cpu_place(lod_tensor.place()));
CHECK(lod_tensor.data<float>()[0] == 1.0f);
CHECK(lod_tensor.data<float>()[1] == 2.1f);
auto dense_tensor_1 = experimental::MakePtenDenseTensor(lod_tensor);
CHECK(dense_tensor_1->dims() == dims);
CHECK(dense_tensor_1->dtype() == dtype);
CHECK(dense_tensor_1->layout() == layout);
CHECK(dense_tensor_1->lod().size() == lod.size());
CHECK(dense_tensor_1->lod()[0] == lod[0]);
const float* data_1 = dense_tensor_1->data<float>();
CHECK(data_1[0] == 1.0f);
CHECK(data_1[1] == 2.1f);
}
TEST(tensor_utils, dense_tensor_to_tensor) {
const DDim dims({2, 1});
const DataType dtype{DataType::FLOAT32};
const DataLayout layout{DataLayout::NCHW};
DenseTensorMeta meta(dtype, dims, layout);
auto alloc =
std::make_shared<experimental::DefaultAllocator>(platform::CPUPlace());
DenseTensor dense_tensor(alloc, meta);
float* data = dense_tensor.mutable_data<float>();
data[0] = 1.0f;
data[1] = 2.1f;
framework::Tensor tensor;
experimental::MovesStorage(&dense_tensor, &tensor);
CHECK(dense_tensor.dtype() == pten::TransToPtenDataType(tensor.type()));
CHECK(dense_tensor.layout() == tensor.layout());
CHECK(platform::is_cpu_place(tensor.place()));
CHECK(tensor.data<float>()[0] == 1.0f);
CHECK(tensor.data<float>()[1] == 2.1f);
auto dense_tensor_1 = experimental::MakePtenDenseTensor(tensor);
CHECK(dense_tensor_1->dims() == dims);
CHECK(dense_tensor_1->dtype() == dtype);
CHECK(dense_tensor_1->layout() == layout);
const float* data_1 = dense_tensor_1->data<float>();
CHECK(data_1[0] == 1.0f);
CHECK(data_1[1] == 2.1f);
}
TEST(PtenUtils, VarToPtTensor) {
// 1. create Variable
paddle::framework::Variable v;
auto selected_rows = v.GetMutable<paddle::framework::SelectedRows>();
paddle::framework::Tensor* value = selected_rows->mutable_value();
auto* data = value->mutable_data<int>(paddle::framework::make_ddim({1, 1}),
paddle::platform::CPUPlace());
data[0] = 123;
pten::Backend expect_backend = pten::Backend::CPU;
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
expect_backend = pten::Backend::GPU;
#endif
auto tensor_def = pten::TensorArgDef(
expect_backend, pten::DataLayout::NCHW, pten::DataType::INT32);
// 2. test API
auto tensor_x = experimental::MakePtenTensorBaseFromVar(v, tensor_def);
// 3. check result
ASSERT_EQ(tensor_x->dtype(), pten::DataType::INT32);
}
} // namespace tests
} // namespace paddle
......@@ -28,10 +28,10 @@ namespace framework = paddle::framework;
using DDim = paddle::framework::DDim;
paddle::experimental::Tensor CreateInputTensor() {
const auto alloc = std::make_shared<paddle::experimental::DefaultAllocator>(
const auto alloc = std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace());
auto dense_x = std::make_shared<pten::DenseTensor>(
alloc,
alloc.get(),
pten::DenseTensorMeta(pten::DataType::INT64,
framework::make_ddim({3, 4}),
pten::DataLayout::NCHW));
......
cc_test(test_allocator SRCS test_allocator.cc DEPS tensor_base)
cc_test(test_storage SRCS test_storage.cc DEPS tensor_base)
cc_test(test_dense_tensor SRCS test_dense_tensor.cc DEPS dense_tensor)
cc_test(test_intrusive_ptr SRCS test_intrusive_ptr.cc)
cc_test(test_type_info SRCS test_type_info.cc)
......
......@@ -21,76 +21,19 @@ limitations under the License. */
namespace pten {
namespace tests {
class HostAllocatorSample : public pten::deprecated::RawAllocator {
class FancyAllocator : public pten::Allocator {
public:
using Place = paddle::platform::Place;
void* Allocate(size_t bytes_size) override {
return ::operator new(bytes_size);
}
void Deallocate(void* ptr, size_t bytes_size) override {
return ::operator delete(ptr);
}
const Place& place() const override { return place_; }
private:
Place place_{paddle::platform::CPUPlace()};
};
class FancyAllocator : public pten::deprecated::Allocator {
public:
using Allocation = pten::deprecated::Allocation;
static void Delete(Allocation* allocation) {
::operator delete(allocation->ptr());
}
Allocation Allocate(size_t bytes_size) override {
AllocationPtr Allocate(size_t bytes_size) override {
void* data = ::operator new(bytes_size);
return Allocation(data, data, &Delete, place());
}
const paddle::platform::Place& place() override { return place_; }
paddle::platform::Place place_ = paddle::platform::CPUPlace();
};
template <typename T>
struct CustomAllocator {
using value_type = T;
using Allocator = pten::deprecated::RawAllocator;
explicit CustomAllocator(const std::shared_ptr<Allocator>& a) noexcept
: alloc_(a) {}
CustomAllocator(const CustomAllocator&) noexcept = default;
T* allocate(std::size_t n) {
return static_cast<T*>(alloc_->Allocate(n * sizeof(T)));
}
void deallocate(T* p, std::size_t n) {
return alloc_->Deallocate(p, sizeof(T) * n);
auto* allocation =
new pten::Allocation(data, bytes_size, paddle::platform::CPUPlace());
return AllocationPtr(allocation, Delete);
}
template <typename R, typename U>
friend bool operator==(const CustomAllocator<R>&,
const CustomAllocator<U>&) noexcept;
template <typename R, typename U>
friend bool operator!=(const CustomAllocator<R>&,
const CustomAllocator<U>&) noexcept;
private:
std::shared_ptr<Allocator> alloc_;
};
template <typename T, typename U>
inline bool operator==(const CustomAllocator<T>& lhs,
const CustomAllocator<U>& rhs) noexcept {
return &lhs.alloc_ == &rhs.alloc_;
}
template <typename T, typename U>
inline bool operator!=(const CustomAllocator<T>& lhs,
const CustomAllocator<U>& rhs) noexcept {
return &lhs.alloc_ != &rhs.alloc_;
}
} // namespace tests
} // namespace pten
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include <vector>
#include "gtest/gtest.h"
#include "paddle/fluid/framework/generator.h"
#include "paddle/pten/tests/core/allocator.h"
#include "paddle/pten/tests/core/random.h"
#include "paddle/pten/tests/core/timer.h"
namespace pten {
namespace tests {
using RawAllocator = pten::deprecated::RawAllocator;
using Allocator = pten::deprecated::Allocator;
using Allocation = pten::deprecated::Allocation;
template <typename T>
bool host_allocator_test(size_t vector_size) {
std::vector<T> src(vector_size);
std::generate(src.begin(), src.end(), make_generator(src));
std::vector<T, CustomAllocator<T>> dst(
src.begin(),
src.end(),
CustomAllocator<T>(std::make_shared<HostAllocatorSample>()));
return std::equal(src.begin(), src.end(), dst.begin());
}
TEST(raw_allocator, host) {
CHECK(host_allocator_test<float>(1000));
CHECK(host_allocator_test<int32_t>(1000));
CHECK(host_allocator_test<int64_t>(1000));
}
class StorageRawAlloc {
public:
StorageRawAlloc(const std::shared_ptr<RawAllocator>& a, size_t size)
: alloc_(a) {
data_ = alloc_->Allocate(size);
}
~StorageRawAlloc() { alloc_->Deallocate(data_, size); }
private:
void* data_;
size_t size;
std::shared_ptr<RawAllocator> alloc_;
};
class StorageFancyAlloc {
public:
StorageFancyAlloc(const std::shared_ptr<Allocator>& a, size_t size)
: alloc_(a), allocation_(a->Allocate(size)) {}
private:
std::shared_ptr<Allocator> alloc_;
Allocation allocation_;
};
TEST(benchmark, allocator) {
std::shared_ptr<RawAllocator> raw_allocator(new HostAllocatorSample);
std::shared_ptr<Allocator> fancy_allocator(new FancyAllocator);
const size_t cycles = 100;
Timer timer;
double t1{}, t2{};
for (size_t i = 0; i < cycles; ++i) {
timer.tic();
for (size_t i = 0; i < cycles; ++i) {
StorageRawAlloc(raw_allocator, i * 100);
}
t1 += timer.toc();
timer.tic();
for (size_t i = 0; i < cycles; ++i) {
StorageFancyAlloc(fancy_allocator, i * 100);
}
t2 += timer.toc();
}
std::cout << "The cost of raw alloc is " << t1 << "ms.\n";
std::cout << "The cost of fancy alloc with place is " << t2 << "ms.\n";
}
} // namespace tests
} // namespace pten
......@@ -75,7 +75,8 @@ TEST(dense_tensor, ctor) {
const LoD lod{};
DenseTensorMeta meta(dtype, dims, layout, lod);
auto alloc = std::make_shared<FancyAllocator>();
auto fancy_allocator = std::unique_ptr<Allocator>(new FancyAllocator);
auto* alloc = fancy_allocator.get();
auto check_dense_tensor = [](const DenseTensor& t,
const DenseTensorMeta& m) -> bool {
......@@ -95,10 +96,6 @@ TEST(dense_tensor, ctor) {
DenseTensor tensor_1(alloc, DenseTensorMeta(meta));
check_dense_tensor(tensor_0, meta);
DenseTensor tensor_2(make_intrusive<TensorStorage>(alloc), meta);
CHECK_NOTNULL(tensor_2.mutable_data<int8_t>());
check_dense_tensor(tensor_2, meta);
}
TEST(dense_tensor, resize) {
......@@ -108,7 +105,8 @@ TEST(dense_tensor, resize) {
const LoD lod{};
DenseTensorMeta meta(dtype, dims, layout, lod);
auto alloc = std::make_shared<FancyAllocator>();
auto fancy_allocator = std::unique_ptr<Allocator>(new FancyAllocator);
auto* alloc = fancy_allocator.get();
DenseTensor tensor_0(alloc, meta);
CHECK_EQ(tensor_0.capacity(), 2u);
......@@ -125,7 +123,8 @@ TEST(dense_tensor, shallow_copy) {
const LoD lod{};
DenseTensorMeta meta(dtype, dims, layout, lod);
auto alloc = std::make_shared<FancyAllocator>();
auto fancy_allocator = std::unique_ptr<Allocator>(new FancyAllocator);
auto* alloc = fancy_allocator.get();
DenseTensor tensor_0(alloc, meta);
DenseTensor tensor_1(tensor_0);
......
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <vector>
#include "gtest/gtest.h"
#include "paddle/pten/core/storage.h"
#include "paddle/pten/tests/core/allocator.h"
namespace pten {
namespace tests {
TEST(host_storage, internal) {
// TODO(Shixiaowei02): Here we need to consider the case
// where the size is zero.
const size_t size{100};
const auto a = std::make_shared<FancyAllocator>();
TensorStorage storage(a, size);
CHECK_EQ(storage.size(), size);
CHECK(paddle::platform::is_cpu_place(storage.place()));
CHECK(storage.OwnsMemory());
CHECK(storage.allocator() == a);
storage.Realloc(size + 100);
CHECK_EQ(storage.size(), size + 100);
}
} // namespace tests
} // namespace pten
......@@ -31,9 +31,9 @@ using DDim = paddle::framework::DDim;
TEST(DEV_API, cast) {
// 1. create tensor
const auto alloc = std::make_shared<paddle::experimental::DefaultAllocator>(
const auto alloc = std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace());
pten::DenseTensor dense_x(alloc,
pten::DenseTensor dense_x(alloc.get(),
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({3, 4}),
pten::DataLayout::NCHW));
......
......@@ -29,9 +29,9 @@ using DDim = paddle::framework::DDim;
TEST(DEV_API, conj) {
// 1. create tensor
const auto alloc = std::make_shared<paddle::experimental::DefaultAllocator>(
const auto alloc = std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace());
pten::DenseTensor dense_x(alloc,
pten::DenseTensor dense_x(alloc.get(),
pten::DenseTensorMeta(pten::DataType::COMPLEX64,
framework::make_ddim({3, 4}),
pten::DataLayout::NCHW));
......
......@@ -31,17 +31,17 @@ using DDim = paddle::framework::DDim;
// in 'paddle/api'
TEST(DEV_API, copy) {
// 1. create tensor
const auto alloc = std::make_shared<paddle::experimental::DefaultAllocator>(
const auto alloc = std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace());
auto dense_src = std::make_shared<pten::DenseTensor>(
alloc,
alloc.get(),
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({2, 3}),
pten::DataLayout::NCHW));
auto* dense_x_data = dense_src->mutable_data<float>();
auto dense_dst = std::make_shared<pten::DenseTensor>(
alloc,
alloc.get(),
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({2, 3}),
pten::DataLayout::NCHW));
......
......@@ -50,9 +50,9 @@ TEST(DEV_API, empty) {
TEST(DEV_API, empty_like) {
// 1. create tensor
const auto alloc = std::make_shared<paddle::experimental::DefaultAllocator>(
const auto alloc = std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace());
pten::DenseTensor dense_x(alloc,
pten::DenseTensor dense_x(alloc.get(),
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({3, 2}),
pten::DataLayout::NCHW));
......@@ -105,9 +105,9 @@ TEST(DEV_API, full) {
TEST(DEV_API, full_like) {
// 1. create tensor
const auto alloc = std::make_shared<paddle::experimental::DefaultAllocator>(
const auto alloc = std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace());
pten::DenseTensor dense_x(alloc,
pten::DenseTensor dense_x(alloc.get(),
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({3, 2}),
pten::DataLayout::NCHW));
......
......@@ -29,15 +29,15 @@ using DDim = paddle::framework::DDim;
TEST(DEV_API, dot) {
// 1. create tensor
const auto alloc = std::make_shared<paddle::experimental::DefaultAllocator>(
const auto alloc = std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace());
pten::DenseTensor dense_x(alloc,
pten::DenseTensor dense_x(alloc.get(),
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({3, 10}),
pten::DataLayout::NCHW));
auto* dense_x_data = dense_x.mutable_data<float>();
pten::DenseTensor dense_y(alloc,
pten::DenseTensor dense_y(alloc.get(),
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({3, 10}),
pten::DataLayout::NCHW));
......
......@@ -29,15 +29,15 @@ using DDim = paddle::framework::DDim;
TEST(DEV_API, add) {
// 1. create tensor
const auto alloc = std::make_shared<paddle::experimental::DefaultAllocator>(
const auto alloc = std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace());
pten::DenseTensor dense_x(alloc,
pten::DenseTensor dense_x(alloc.get(),
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({3, 10}),
pten::DataLayout::NCHW));
auto* dense_x_data = dense_x.mutable_data<float>();
pten::DenseTensor dense_y(alloc,
pten::DenseTensor dense_y(alloc.get(),
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({10}),
pten::DataLayout::NCHW));
......@@ -82,15 +82,15 @@ TEST(DEV_API, add) {
TEST(DEV_API, subtract) {
// 1. create tensor
const auto alloc = std::make_shared<paddle::experimental::DefaultAllocator>(
const auto alloc = std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace());
pten::DenseTensor dense_x(alloc,
pten::DenseTensor dense_x(alloc.get(),
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({3, 10}),
pten::DataLayout::NCHW));
auto* dense_x_data = dense_x.mutable_data<float>();
pten::DenseTensor dense_y(alloc,
pten::DenseTensor dense_y(alloc.get(),
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({10}),
pten::DataLayout::NCHW));
......@@ -135,15 +135,15 @@ TEST(DEV_API, subtract) {
TEST(DEV_API, divide) {
// 1. create tensor
const auto alloc = std::make_shared<paddle::experimental::DefaultAllocator>(
const auto alloc = std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace());
pten::DenseTensor dense_x(alloc,
pten::DenseTensor dense_x(alloc.get(),
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({3, 10}),
pten::DataLayout::NCHW));
auto* dense_x_data = dense_x.mutable_data<float>();
pten::DenseTensor dense_y(alloc,
pten::DenseTensor dense_y(alloc.get(),
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({10}),
pten::DataLayout::NCHW));
......@@ -188,15 +188,15 @@ TEST(DEV_API, divide) {
TEST(DEV_API, multiply) {
// 1. create tensor
const auto alloc = std::make_shared<paddle::experimental::DefaultAllocator>(
const auto alloc = std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace());
pten::DenseTensor dense_x(alloc,
pten::DenseTensor dense_x(alloc.get(),
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({3, 10}),
pten::DataLayout::NCHW));
auto* dense_x_data = dense_x.mutable_data<float>();
pten::DenseTensor dense_y(alloc,
pten::DenseTensor dense_y(alloc.get(),
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({10}),
pten::DataLayout::NCHW));
......
......@@ -39,10 +39,10 @@ using DDim = paddle::framework::DDim;
TEST(DEV_API, flatten) {
// 1. create tensor
const auto alloc = std::make_shared<paddle::experimental::DefaultAllocator>(
const auto alloc = std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace());
pten::DenseTensor dense_x(
alloc,
alloc.get(),
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({3, 2, 2, 3}),
pten::DataLayout::NCHW));
......
......@@ -29,16 +29,16 @@ using DDim = paddle::framework::DDim;
TEST(DEV_API, dot) {
// 1. create tensor
const auto alloc = std::make_shared<paddle::experimental::DefaultAllocator>(
const auto alloc = std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace());
DenseTensor dense_x(alloc,
DenseTensor dense_x(alloc.get(),
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({3, 3}),
pten::DataLayout::NCHW));
auto* dense_x_data = dense_x.mutable_data<float>();
DenseTensor dense_y(alloc,
DenseTensor dense_y(alloc.get(),
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({3, 3}),
pten::DataLayout::NCHW));
......
......@@ -29,9 +29,9 @@ using DDim = paddle::framework::DDim;
TEST(DEV_API, mean) {
// 1. create tensor
const auto alloc = std::make_shared<paddle::experimental::DefaultAllocator>(
const auto alloc = std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace());
pten::DenseTensor dense_x(alloc,
pten::DenseTensor dense_x(alloc.get(),
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({3, 4}),
pten::DataLayout::NCHW));
......
......@@ -30,10 +30,10 @@ using DDim = paddle::framework::DDim;
// TODO(chenweihang): Remove this test after the API is used in the dygraph
TEST(DEV_API, reshape) {
// 1. create tensor
const auto alloc = std::make_shared<paddle::experimental::DefaultAllocator>(
const auto alloc = std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace());
pten::DenseTensor dense_x(
alloc,
alloc.get(),
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({3, 2, 2, 3}),
pten::DataLayout::NCHW));
......
......@@ -29,9 +29,9 @@ using DDim = paddle::framework::DDim;
TEST(DEV_API, scale) {
// 1. create tensor
const auto alloc = std::make_shared<paddle::experimental::DefaultAllocator>(
const auto alloc = std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace());
pten::DenseTensor dense_x(alloc,
pten::DenseTensor dense_x(alloc.get(),
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({3, 4}),
pten::DataLayout::NCHW));
......@@ -69,9 +69,9 @@ TEST(DEV_API, scale) {
TEST(DEV_API, scale_host) {
// 1. create tensor
const auto alloc = std::make_shared<paddle::experimental::DefaultAllocator>(
const auto alloc = std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace());
pten::DenseTensor dense_x(alloc,
pten::DenseTensor dense_x(alloc.get(),
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({3, 4}),
pten::DataLayout::NCHW));
......@@ -79,9 +79,8 @@ TEST(DEV_API, scale_host) {
for (size_t i = 0; i < 12; ++i) {
dense_x_data[i] = i * 1.0;
}
const auto alloc2 = std::make_shared<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace());
pten::DenseTensor scale(alloc2,
pten::DenseTensor scale(alloc.get(),
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({1}),
pten::DataLayout::NCHW));
......
......@@ -29,9 +29,9 @@ using DDim = paddle::framework::DDim;
TEST(DEV_API, sum) {
// 1. create tensor
const auto alloc = std::make_shared<paddle::experimental::DefaultAllocator>(
const auto alloc = std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace());
pten::DenseTensor dense_x(alloc,
pten::DenseTensor dense_x(alloc.get(),
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({3, 4}),
pten::DataLayout::NCHW));
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册