未验证 提交 e5aa145d 编写于 作者: 石晓伟 提交者: GitHub

add unit tests, test=develop (#36910)

上级 b9defb4f
......@@ -54,7 +54,7 @@ struct DenseTensorMeta {
/// marked with `const` are expected to remain unchanged.
const bool is_scalar{false};
DDim dims;
const DataType type{DataType::FLOAT32};
const DataType type{DataType::UNDEFINED};
const DataLayout layout{DataLayout::NCHW};
LoD lod;
};
......
add_subdirectory(core)
add_subdirectory(utils)
cc_test(pten_backend_test SRCS backend_test.cc DEPS gtest)
cc_test(pten_data_layout_test SRCS data_layout_test.cc DEPS gtest)
cc_test(pten_data_type_test SRCS data_type_test.cc DEPS gtest)
......
cc_test(test_allocator SRCS test_allocator.cc DEPS tensor_base)
cc_test(test_storage SRCS test_storage.cc DEPS tensor_base)
cc_test(test_dense_tensor SRCS test_dense_tensor.cc DEPS dense_tensor)
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <memory>
#include "paddle/pten/core/allocator.h"
namespace pten {
namespace tests {
class HostAllocatorSample : public pten::RawAllocator {
public:
using Place = paddle::platform::Place;
void* Allocate(size_t bytes_size) override {
return ::operator new(bytes_size);
}
void Deallocate(void* ptr, size_t bytes_size) override {
return ::operator delete(ptr);
}
const Place& place() const override { return place_; }
private:
Place place_{paddle::platform::CPUPlace()};
};
class FancyAllocator : public pten::Allocator {
public:
static void Delete(void* data) { ::operator delete(data); }
Allocation Allocate(size_t bytes_size) override {
void* data = ::operator new(bytes_size);
return Allocation(data, data, &Delete, paddle::platform::CPUPlace());
}
};
template <typename T>
struct CustomAllocator {
using value_type = T;
using Allocator = pten::RawAllocator;
explicit CustomAllocator(const std::shared_ptr<Allocator>& a) noexcept
: alloc_(a) {}
CustomAllocator(const CustomAllocator&) noexcept = default;
T* allocate(std::size_t n) {
return static_cast<T*>(alloc_->Allocate(n * sizeof(T)));
}
void deallocate(T* p, std::size_t n) {
return alloc_->Deallocate(p, sizeof(T) * n);
}
template <typename R, typename U>
friend bool operator==(const CustomAllocator<R>&,
const CustomAllocator<U>&) noexcept;
template <typename R, typename U>
friend bool operator!=(const CustomAllocator<R>&,
const CustomAllocator<U>&) noexcept;
private:
std::shared_ptr<Allocator> alloc_;
};
template <typename T, typename U>
inline bool operator==(const CustomAllocator<T>& lhs,
const CustomAllocator<U>& rhs) noexcept {
return &lhs.alloc_ == &rhs.alloc_;
}
template <typename T, typename U>
inline bool operator!=(const CustomAllocator<T>& lhs,
const CustomAllocator<U>& rhs) noexcept {
return &lhs.alloc_ != &rhs.alloc_;
}
} // namespace tests
} // namespace pten
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <random>
#include <type_traits>
namespace pten {
namespace tests {
template <typename T,
typename =
typename std::enable_if<std::is_arithmetic<T>::value>::type>
class RandomGenerator {
using distribution_type =
typename std::conditional<std::is_integral<T>::value,
std::uniform_int_distribution<T>,
std::uniform_real_distribution<T>>::type;
std::default_random_engine engine;
distribution_type distribution;
public:
auto operator()() -> decltype(distribution(engine)) {
return distribution(engine);
}
};
template <typename Container, typename T = typename Container::value_type>
auto make_generator(Container const&) -> decltype(RandomGenerator<T>()) {
return RandomGenerator<T>();
}
} // namespace tests
} // namespace pten
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include <vector>
#include "gtest/gtest.h"
#include "paddle/fluid/framework/generator.h"
#include "paddle/pten/tests/core/allocator.h"
#include "paddle/pten/tests/core/random.h"
#include "paddle/pten/tests/core/timer.h"
namespace pten {
namespace tests {
template <typename T>
bool host_allocator_test(size_t vector_size) {
std::vector<T> src(vector_size);
std::generate(src.begin(), src.end(), make_generator(src));
std::vector<T, CustomAllocator<T>> dst(
src.begin(),
src.end(),
CustomAllocator<T>(std::make_shared<HostAllocatorSample>()));
return std::equal(src.begin(), src.end(), dst.begin());
}
TEST(raw_allocator, host) {
CHECK(host_allocator_test<float>(1000));
CHECK(host_allocator_test<int32_t>(1000));
CHECK(host_allocator_test<int64_t>(1000));
}
class StorageRawAlloc {
public:
StorageRawAlloc(const std::shared_ptr<RawAllocator>& a, size_t size)
: alloc_(a) {
data_ = alloc_->Allocate(size);
}
~StorageRawAlloc() { alloc_->Deallocate(data_, size); }
private:
void* data_;
size_t size;
std::shared_ptr<RawAllocator> alloc_;
};
class StorageFancyAlloc {
public:
StorageFancyAlloc(const std::shared_ptr<Allocator>& a, size_t size)
: alloc_(a), allocation_(a->Allocate(size)) {}
private:
std::shared_ptr<Allocator> alloc_;
Allocation allocation_;
};
TEST(benchmark, allocator) {
std::shared_ptr<RawAllocator> raw_allocator(new HostAllocatorSample);
std::shared_ptr<Allocator> fancy_allocator(new FancyAllocator);
const size_t cycles = 100;
Timer timer;
double t1{}, t2{};
for (size_t i = 0; i < cycles; ++i) {
timer.tic();
for (size_t i = 0; i < cycles; ++i) {
StorageRawAlloc(raw_allocator, i * 100);
}
t1 += timer.toc();
timer.tic();
for (size_t i = 0; i < cycles; ++i) {
StorageFancyAlloc(fancy_allocator, i * 100);
}
t2 += timer.toc();
}
std::cout << "The cost of raw alloc is " << t1 << "ms.\n";
std::cout << "The cost of fancy alloc with place is " << t2 << "ms.\n";
}
} // namespace tests
} // namespace pten
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "gtest/gtest.h"
#include "paddle/pten/core/dense_tensor.h"
#include "paddle/pten/tests/core/allocator.h"
namespace pten {
namespace tests {
TEST(dense_tensor, meta) {
const DDim dims({1, 2});
const DataType dtype{DataType::INT8};
const DataLayout layout{DataLayout::NHWC};
// TODO(Shixiaowei02): need to check the lod is valid.
const std::vector<std::vector<size_t>> lod{};
DenseTensorMeta meta_0;
CHECK(!meta_0.valid());
DenseTensorMeta meta_1(dtype, dims);
CHECK(meta_1.type == dtype);
CHECK(meta_1.dims == dims);
CHECK(meta_1.valid());
DenseTensorMeta meta_2(dtype, dims, layout);
CHECK(meta_2.type == dtype);
CHECK(meta_2.dims == dims);
CHECK(meta_2.layout == layout);
CHECK(meta_2.valid());
DenseTensorMeta meta_3(dtype, dims, layout, lod);
CHECK(meta_3.type == dtype);
CHECK(meta_3.dims == dims);
CHECK(meta_3.layout == layout);
CHECK(meta_3.lod == lod);
CHECK(meta_3.valid());
DenseTensorMeta meta_4(meta_3);
CHECK(meta_4.type == dtype);
CHECK(meta_4.dims == dims);
CHECK(meta_4.layout == layout);
CHECK(meta_4.lod == lod);
CHECK(meta_4.valid());
DenseTensorMeta meta_5(std::move(meta_4));
CHECK(meta_5.type == dtype);
CHECK(meta_5.dims == dims);
CHECK(meta_5.layout == layout);
CHECK(meta_5.lod == lod);
CHECK(meta_5.valid());
}
TEST(dense_tensor, def_ctor) {
DenseTensor tensor_0;
CHECK(!tensor_0.valid());
}
TEST(dense_tensor, ctor) {
const DDim dims({1, 2});
const DataType dtype{DataType::INT8};
const DataLayout layout{DataLayout::NHWC};
const std::vector<std::vector<size_t>> lod{};
DenseTensorMeta meta(dtype, dims, layout, lod);
auto alloc = std::make_shared<FancyAllocator>();
auto check_dense_tensor = [](const DenseTensor& t,
const DenseTensorMeta& m) -> bool {
bool r{true};
r = r && (t.numel() == product(m.dims));
r = r && (t.dims() == m.dims);
r = r && (t.data_type() == m.type);
r = r && (t.layout() == m.layout);
r = r && (t.place() == paddle::platform::CPUPlace());
r = r && t.initialized();
r = r && t.IsSharedWith(t);
return r;
};
DenseTensor tensor_0(alloc, meta);
check_dense_tensor(tensor_0, meta);
DenseTensor tensor_1(alloc, DenseTensorMeta(meta));
check_dense_tensor(tensor_0, meta);
DenseTensor tensor_2(make_intrusive<TensorStorage>(alloc), meta);
CHECK(tensor_2.data<int8_t>() == nullptr);
CHECK_NOTNULL(tensor_2.mutable_data<int8_t>());
check_dense_tensor(tensor_2, meta);
}
TEST(dense_tensor, resize) {
const DDim dims({1, 2});
const DataType dtype{DataType::INT8};
const DataLayout layout{DataLayout::NHWC};
const std::vector<std::vector<size_t>> lod{};
DenseTensorMeta meta(dtype, dims, layout, lod);
auto alloc = std::make_shared<FancyAllocator>();
DenseTensor tensor_0(alloc, meta);
CHECK_EQ(tensor_0.memory_size(), 2u);
tensor_0.check_memory_size();
tensor_0.Resize({1, 2, 3});
CHECK_EQ(tensor_0.memory_size(), 2u);
tensor_0.mutable_data<int8_t>();
CHECK_EQ(tensor_0.memory_size(), 6u);
auto storage = tensor_0.release();
CHECK_EQ(storage->size(), 6u);
}
} // namespace tests
} // namespace pten
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <vector>
#include "gtest/gtest.h"
#include "paddle/pten/core/storage.h"
#include "paddle/pten/tests/core/allocator.h"
namespace pten {
namespace tests {
TEST(host_storage, internal) {
// TODO(Shixiaowei02): Here we need to consider the case
// where the size is zero.
const size_t size{100};
const auto a = std::make_shared<FancyAllocator>();
TensorStorage storage(a, size);
CHECK_EQ(storage.size(), size);
CHECK(paddle::platform::is_cpu_place(storage.place()));
CHECK(storage.OwnsMemory());
CHECK(storage.allocator() == a);
storage.Realloc(size + 100);
CHECK_EQ(storage.size(), size + 100);
}
} // namespace tests
} // namespace pten
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <chrono> // NOLINT
namespace pten {
namespace tests {
class Timer {
public:
std::chrono::high_resolution_clock::time_point start;
std::chrono::high_resolution_clock::time_point startu;
void tic() { start = std::chrono::high_resolution_clock::now(); }
double toc() {
startu = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> time_span =
std::chrono::duration_cast<std::chrono::duration<double>>(startu -
start);
double used_time_ms = static_cast<double>(time_span.count()) * 1000.0;
return used_time_ms;
}
};
} // namespace tests
} // namespace pten
cc_test(test_intrusive_ptr SRCS test_intrusive_ptr.cc)
cc_test(test_type_info SRCS test_type_info.cc)
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <future>
#include <vector>
#include "gtest/gtest.h"
#include "paddle/pten/core/utils/intrusive_ptr.h"
#include "paddle/pten/core/utils/intrusive_ref_counter.h"
namespace pten {
namespace tests {
struct SharedObject : public intrusive_ref_counter<SharedObject> {
int i{0};
};
TEST(intrusive_ref_counter, async) {
SharedObject obj;
const size_t num{100};
std::vector<std::future<void>> results;
auto add_ref_and_release = [](const SharedObject* p) {
intrusive_ptr_add_ref<SharedObject>(p);
intrusive_ptr_release<SharedObject>(p);
};
for (size_t i = 0; i < num; ++i) {
results.emplace_back(std::async(add_ref_and_release, &obj));
}
for (auto& result : results) {
result.get();
}
CHECK_EQ(obj.use_count(), 1u);
}
TEST(intrusive_ptr, default_ctor) {
intrusive_ptr<SharedObject> p;
CHECK(p == nullptr);
}
TEST(intrusive_ptr, private_ctor) {
auto p = make_intrusive<SharedObject>();
const auto* ptr0 = p.get();
auto p1 = std::move(p);
intrusive_ptr<intrusive_ref_counter<SharedObject>> p2(std::move(p1));
const auto* ptr1 = p2.get();
CHECK_EQ(ptr0, ptr1);
}
TEST(intrusive_ptr, reset_with_obj) {
SharedObject obj;
obj.i = 1;
intrusive_ptr<SharedObject> p;
p.reset(&obj, true);
CHECK_EQ(p->i, obj.i);
}
TEST(intrusive_ptr, reset_with_ptr) {
auto* ptr = new SharedObject;
ptr->i = 1;
intrusive_ptr<SharedObject> p;
p.reset(ptr, false);
CHECK_EQ((*p).i, ptr->i);
p.reset();
CHECK(p == nullptr);
}
TEST(intrusive_ptr, op_comp) {
auto p = make_intrusive<SharedObject>();
auto copy = copy_intrusive<SharedObject>(p);
auto null = intrusive_ptr<SharedObject>();
auto p1 = make_intrusive<SharedObject>();
CHECK(p == copy);
CHECK(p != p1);
CHECK(p == copy.get());
CHECK(p != p1.get());
CHECK(p.get() == copy);
CHECK(p.get() != p1);
CHECK(null == nullptr);
CHECK(nullptr == null);
CHECK(p != nullptr);
CHECK(nullptr != p);
}
} // namespace tests
} // namespace pten
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "gtest/gtest.h"
#include "paddle/pten/core/utils/type_registry.h"
namespace pten {
namespace tests {
template <typename T>
class Base {
public:
TypeInfo<Base<T>> type_info() const { return type_info_; }
private:
template <typename T1, typename T2>
friend class pten::TypeInfoTraits;
TypeInfo<Base<T>> type_info_{TypeInfo<Base<T>>::kUnknownType};
};
template <typename T>
class DerivedA : public Base<T>, public TypeInfoTraits<Base<T>, DerivedA<T>> {
public:
static const char* name() { return "DerivedA"; }
};
template <typename T>
class DerivedB : public Base<T>, public TypeInfoTraits<Base<T>, DerivedB<T>> {
public:
static const char* name() { return "DerivedB"; }
};
template <typename T>
void check_type_info() {
std::unique_ptr<Base<T>> base(new Base<T>);
std::unique_ptr<Base<T>> derived_a(new DerivedA<T>);
std::unique_ptr<Base<T>> derived_b(new DerivedB<T>);
EXPECT_EQ(DerivedA<T>::classof(derived_a.get()), true);
EXPECT_EQ(DerivedB<T>::classof(derived_b.get()), true);
EXPECT_EQ(DerivedB<T>::classof(derived_a.get()), false);
EXPECT_EQ(DerivedA<T>::classof(derived_b.get()), false);
EXPECT_EQ(base->type_info().id(), 0);
EXPECT_EQ(derived_a->type_info().id(), 1);
EXPECT_EQ(derived_b->type_info().id(), 2);
EXPECT_EQ(base->type_info().name(), "Unknown");
EXPECT_EQ(derived_a->type_info().name(), "DerivedA");
EXPECT_EQ(derived_b->type_info().name(), "DerivedB");
}
TEST(type_info, base) {
check_type_info<int>();
check_type_info<float>();
}
} // namespace tests
} // namespace pten
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册