未验证 提交 9a53477c 编写于 作者: C Chen Weihang 提交者: GitHub

[PTen] Organize pten unitests and directory (#36948)

* organize pten unitests

* fix detail errors
上级 85c8c170
...@@ -399,7 +399,7 @@ cc_library(save_load_util SRCS save_load_util.cc DEPS tensor scope layer) ...@@ -399,7 +399,7 @@ cc_library(save_load_util SRCS save_load_util.cc DEPS tensor scope layer)
cc_test(save_load_util_test SRCS save_load_util_test.cc DEPS save_load_util tensor scope layer) cc_test(save_load_util_test SRCS save_load_util_test.cc DEPS save_load_util tensor scope layer)
cc_library(generator SRCS generator.cc DEPS enforce place) cc_library(generator SRCS generator.cc DEPS enforce place)
cc_library(pten_utils SRCS pten_utils.cc DEPS lod_tensor selected_rows place pten var_type_traits pten_hapi_utils op_info) cc_library(pten_utils SRCS pten_utils.cc DEPS lod_tensor selected_rows place pten var_type_traits pten_api_utils op_info)
# Get the current working branch # Get the current working branch
execute_process( execute_process(
......
add_subdirectory(lib) add_subdirectory(lib)
cc_library(pten_hapi SRCS all.cc DEPS linalg_api math_api creation_api) cc_library(pten_api SRCS all.cc DEPS linalg_api math_api creation_api manipulation_api)
add_subdirectory(tests) cc_library(pten_api_utils SRCS allocator.cc storage.cc tensor_utils.cc DEPS tensor_base convert_utils dense_tensor lod_tensor selected_rows place var_type_traits)
cc_library(pten_hapi_utils SRCS allocator.cc storage.cc tensor_utils.cc DEPS tensor_base convert_utils
dense_tensor lod_tensor selected_rows place var_type_traits)
cc_test(test_framework_storage SRCS test_storage.cc DEPS pten_hapi_utils)
cc_test(test_framework_tensor_utils SRCS test_tensor_utils.cc DEPS pten_hapi_utils)
add_subdirectory(api)
add_subdirectory(common)
add_subdirectory(core) add_subdirectory(core)
add_subdirectory(utils) add_subdirectory(kernels)
cc_test(pten_backend_test SRCS backend_test.cc DEPS gtest)
cc_test(pten_data_layout_test SRCS data_layout_test.cc DEPS gtest)
cc_test(pten_data_type_test SRCS data_type_test.cc DEPS gtest)
cc_test(dense_tensor_test SRCS dense_tensor_test.cc DEPS dense_tensor)
cc_test(kernel_factory_test SRCS kernel_factory_test.cc DEPS kernel_factory)
cc_test(test_mean_api SRCS test_mean_api.cc DEPS math_api pten_hapi_utils)
cc_test(test_dot_api SRCS test_dot_api.cc DEPS linalg_api pten_hapi_utils)
cc_test(test_matmul_api SRCS test_matmul_api.cc DEPS linalg_api pten_hapi_utils)
cc_test(test_fill_api SRCS test_fill_api.cc DEPS creation_api pten_hapi_utils)
cc_test(test_copy_api SRCS test_copy_api.cc DEPS utils_cpu pten_hapi_utils)
cc_test(test_flatten_api SRCS test_flatten_api.cc DEPS utils_cpu manipulation_api pten_hapi_utils)
cc_test(test_scale_api SRCS test_scale_api.cc DEPS math_api pten_hapi_utils)
cc_test(test_mean_api SRCS test_mean_api.cc DEPS pten_api pten_api_utils)
cc_test(test_dot_api SRCS test_dot_api.cc DEPS pten_api pten_api_utils)
cc_test(test_matmul_api SRCS test_matmul_api.cc DEPS pten_api pten_api_utils)
cc_test(test_fill_api SRCS test_fill_api.cc DEPS pten_api pten_api_utils)
cc_test(test_flatten_api SRCS test_flatten_api.cc DEPS pten_api pten_api_utils)
cc_test(test_framework_storage SRCS test_storage.cc DEPS pten_api_utils)
cc_test(test_framework_tensor_utils SRCS test_tensor_utils.cc DEPS pten_api_utils)
...@@ -21,8 +21,6 @@ limitations under the License. */ ...@@ -21,8 +21,6 @@ limitations under the License. */
#include "paddle/pten/core/dense_tensor.h" #include "paddle/pten/core/dense_tensor.h"
#include "paddle/pten/core/kernel_registry.h" #include "paddle/pten/core/kernel_registry.h"
#include "paddle/pten/include/linalg.h"
PT_DECLARE_MODULE(LinalgCPU); PT_DECLARE_MODULE(LinalgCPU);
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
...@@ -84,55 +82,3 @@ TEST(API, dot) { ...@@ -84,55 +82,3 @@ TEST(API, dot) {
ASSERT_NEAR(expect_result[1], actual_result1, 1e-6f); ASSERT_NEAR(expect_result[1], actual_result1, 1e-6f);
ASSERT_NEAR(expect_result[2], actual_result2, 1e-6f); ASSERT_NEAR(expect_result[2], actual_result2, 1e-6f);
} }
// TODO(YuanRisheng) This unitest should be created in other file.
// It is convenient to make compilation decoupling.
TEST(DEV_API, dot) {
// 1. create tensor
const auto alloc = std::make_shared<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace());
pten::DenseTensor dense_x(alloc,
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({3, 10}),
pten::DataLayout::NCHW));
auto* dense_x_data = dense_x.mutable_data<float>();
pten::DenseTensor dense_y(alloc,
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({3, 10}),
pten::DataLayout::NCHW));
auto* dense_y_data = dense_y.mutable_data<float>();
float sum[3] = {0.0, 0.0, 0.0};
for (size_t i = 0; i < 3; ++i) {
for (size_t j = 0; j < 10; ++j) {
dense_x_data[i * 10 + j] = (i * 10 + j) * 1.0;
dense_y_data[i * 10 + j] = (i * 10 + j) * 1.0;
sum[i] += (i * 10 + j) * (i * 10 + j) * 1.0;
}
}
paddle::platform::DeviceContextPool& pool =
paddle::platform::DeviceContextPool::Instance();
auto* dev_ctx = pool.Get(paddle::platform::CPUPlace());
// 2. test API
auto out = pten::Dot<float>(
*(static_cast<paddle::platform::CPUDeviceContext*>(dev_ctx)),
dense_x,
dense_y);
// 3. check result
ASSERT_EQ(out.dims().size(), 2);
ASSERT_EQ(out.dims()[0], 3);
ASSERT_EQ(out.meta().type, pten::DataType::FLOAT32);
ASSERT_EQ(out.meta().layout, pten::DataLayout::NCHW);
auto expect_result = sum;
auto actual_result0 = out.data<float>()[0];
auto actual_result1 = out.data<float>()[1];
auto actual_result2 = out.data<float>()[2];
ASSERT_NEAR(expect_result[0], actual_result0, 1e-6f);
ASSERT_NEAR(expect_result[1], actual_result1, 1e-6f);
ASSERT_NEAR(expect_result[2], actual_result2, 1e-6f);
}
...@@ -21,8 +21,6 @@ limitations under the License. */ ...@@ -21,8 +21,6 @@ limitations under the License. */
#include "paddle/pten/core/dense_tensor.h" #include "paddle/pten/core/dense_tensor.h"
#include "paddle/pten/core/kernel_registry.h" #include "paddle/pten/core/kernel_registry.h"
#include "paddle/pten/include/creation.h"
PT_DECLARE_MODULE(CreationCPU); PT_DECLARE_MODULE(CreationCPU);
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
...@@ -133,38 +131,3 @@ TEST(API, ones_like) { ...@@ -133,38 +131,3 @@ TEST(API, ones_like) {
ASSERT_EQ(actual_result[i], 1); ASSERT_EQ(actual_result[i], 1);
} }
} }
TEST(DEV_API, fill_any_like) {
// 1. create tensor
const auto alloc = std::make_shared<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace());
pten::DenseTensor dense_x(alloc,
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({3, 2}),
pten::DataLayout::NCHW));
auto* dense_x_data = dense_x.mutable_data<float>();
dense_x_data[0] = 0;
float val = 1.0;
paddle::platform::DeviceContextPool& pool =
paddle::platform::DeviceContextPool::Instance();
auto* dev_ctx = pool.Get(paddle::platform::CPUPlace());
// 2. test API
auto out = pten::FillAnyLike<float>(
*(static_cast<paddle::platform::CPUDeviceContext*>(dev_ctx)),
dense_x,
val);
// 3. check result
ASSERT_EQ(out.dims().size(), 2);
ASSERT_EQ(out.dims()[0], 3);
ASSERT_EQ(out.numel(), 6);
ASSERT_EQ(out.meta().type, pten::DataType::FLOAT32);
ASSERT_EQ(out.meta().layout, pten::DataLayout::NCHW);
auto* actual_result = out.data<float>();
for (auto i = 0; i < 6; i++) {
ASSERT_NEAR(actual_result[i], val, 1e-6f);
}
}
...@@ -21,8 +21,6 @@ limitations under the License. */ ...@@ -21,8 +21,6 @@ limitations under the License. */
#include "paddle/pten/core/dense_tensor.h" #include "paddle/pten/core/dense_tensor.h"
#include "paddle/pten/core/kernel_registry.h" #include "paddle/pten/core/kernel_registry.h"
#include "paddle/pten/include/manipulation.h"
PT_DECLARE_MODULE(ManipulationCPU); PT_DECLARE_MODULE(ManipulationCPU);
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
...@@ -72,47 +70,3 @@ TEST(API, flatten) { ...@@ -72,47 +70,3 @@ TEST(API, flatten) {
} }
ASSERT_EQ(value_equal, true); ASSERT_EQ(value_equal, true);
} }
TEST(DEV_API, flatten) {
// 1. create tensor
const auto alloc = std::make_shared<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace());
pten::DenseTensor dense_x(
alloc,
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({3, 2, 2, 3}),
pten::DataLayout::NCHW));
auto* dense_x_data = dense_x.mutable_data<float>();
for (int i = 0; i < dense_x.numel(); i++) {
dense_x_data[i] = i;
}
int start_axis = 1, stop_axis = 2;
paddle::platform::DeviceContextPool& pool =
paddle::platform::DeviceContextPool::Instance();
auto* dev_ctx = pool.Get(paddle::platform::CPUPlace());
// 2. test API
auto out = pten::Flatten<float>(
*(static_cast<paddle::platform::CPUDeviceContext*>(dev_ctx)),
dense_x,
start_axis,
stop_axis);
// 3. check result
std::vector<int> expect_shape = {3, 4, 3};
ASSERT_EQ(out.dims()[0], expect_shape[0]);
ASSERT_EQ(out.dims()[1], expect_shape[1]);
ASSERT_EQ(out.dims()[2], expect_shape[2]);
ASSERT_EQ(out.numel(), 36);
ASSERT_EQ(out.meta().type, pten::DataType::FLOAT32);
ASSERT_EQ(out.meta().layout, pten::DataLayout::NCHW);
bool value_equal = true;
auto* dense_out_data = out.data<float>();
for (int i = 0; i < dense_x.numel(); i++) {
if (std::abs(dense_x_data[i] - dense_out_data[i]) > 1e-6f)
value_equal = false;
}
ASSERT_EQ(value_equal, true);
}
...@@ -21,8 +21,6 @@ limitations under the License. */ ...@@ -21,8 +21,6 @@ limitations under the License. */
#include "paddle/pten/core/dense_tensor.h" #include "paddle/pten/core/dense_tensor.h"
#include "paddle/pten/core/kernel_registry.h" #include "paddle/pten/core/kernel_registry.h"
#include "paddle/pten/include/math.h"
PT_DECLARE_MODULE(MathCPU); PT_DECLARE_MODULE(MathCPU);
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
...@@ -69,36 +67,3 @@ TEST(API, mean) { ...@@ -69,36 +67,3 @@ TEST(API, mean) {
auto actual_result = dense_out->data<float>()[0]; auto actual_result = dense_out->data<float>()[0];
ASSERT_NEAR(expect_result, actual_result, 1e-6f); ASSERT_NEAR(expect_result, actual_result, 1e-6f);
} }
TEST(DEV_API, mean) {
// 1. create tensor
const auto alloc = std::make_shared<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace());
pten::DenseTensor dense_x(alloc,
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({3, 4}),
pten::DataLayout::NCHW));
auto* dense_x_data = dense_x.mutable_data<float>();
float sum = 0.0;
for (size_t i = 0; i < 12; ++i) {
dense_x_data[i] = i * 1.0;
sum += i * 1.0;
}
paddle::platform::DeviceContextPool& pool =
paddle::platform::DeviceContextPool::Instance();
auto* dev_ctx = pool.Get(paddle::platform::CPUPlace());
// 2. test API
auto out = pten::Mean<float>(
*(static_cast<paddle::platform::CPUDeviceContext*>(dev_ctx)), dense_x);
// 3. check result
ASSERT_EQ(out.dims().size(), 1);
ASSERT_EQ(out.numel(), 1);
ASSERT_EQ(out.meta().type, pten::DataType::FLOAT32);
ASSERT_EQ(out.meta().layout, pten::DataLayout::NCHW);
auto expect_result = sum / 12;
auto actual_result = out.data<float>()[0];
ASSERT_NEAR(expect_result, actual_result, 1e-6f);
}
cc_test(pten_test_backend SRCS test_backend.cc DEPS gtest)
cc_test(pten_test_data_layout SRCS test_data_layout.cc DEPS gtest)
cc_test(pten_test_data_type SRCS test_data_type.cc DEPS gtest)
...@@ -12,11 +12,11 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,11 +12,11 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/pten/common/backend.h"
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <iostream> #include <iostream>
#include "paddle/pten/common/backend.h"
TEST(Backend, OStream) { TEST(Backend, OStream) {
std::ostringstream oss; std::ostringstream oss;
oss << pten::Backend::UNDEFINED; oss << pten::Backend::UNDEFINED;
......
...@@ -15,6 +15,7 @@ limitations under the License. */ ...@@ -15,6 +15,7 @@ limitations under the License. */
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <iostream> #include <iostream>
#include <sstream> #include <sstream>
#include "paddle/pten/common/layout.h" #include "paddle/pten/common/layout.h"
TEST(DataLayout, OStream) { TEST(DataLayout, OStream) {
......
...@@ -12,12 +12,12 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,12 +12,12 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/pten/common/data_type.h"
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <iostream> #include <iostream>
#include <sstream> #include <sstream>
#include "paddle/pten/common/data_type.h"
TEST(DataType, OStream) { TEST(DataType, OStream) {
std::ostringstream oss; std::ostringstream oss;
oss << pten::DataType::UNDEFINED; oss << pten::DataType::UNDEFINED;
......
cc_test(test_allocator SRCS test_allocator.cc DEPS tensor_base) cc_test(test_allocator SRCS test_allocator.cc DEPS tensor_base)
cc_test(test_storage SRCS test_storage.cc DEPS tensor_base) cc_test(test_storage SRCS test_storage.cc DEPS tensor_base)
cc_test(test_dense_tensor SRCS test_dense_tensor.cc DEPS dense_tensor) cc_test(test_dense_tensor SRCS test_dense_tensor.cc DEPS dense_tensor)
cc_test(test_intrusive_ptr SRCS test_intrusive_ptr.cc)
cc_test(test_type_info SRCS test_type_info.cc)
cc_test(test_kernel_factory SRCS test_kernel_factory.cc DEPS kernel_factory)
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/pten/core/dense_tensor.h"
#include <gtest/gtest.h>
namespace framework = paddle::framework;
using DDim = paddle::framework::DDim;
cc_test(test_copy_dev_api SRCS test_copy_dev_api.cc DEPS pten pten_api_utils)
cc_test(test_dot_dev_api SRCS test_dot_dev_api.cc DEPS pten pten_api_utils)
cc_test(test_fill_dev_api SRCS test_fill_dev_api.cc DEPS pten pten_api_utils)
cc_test(test_flatten_dev_api SRCS test_flatten_dev_api.cc DEPS pten pten_api_utils)
cc_test(test_mean_dev_api SRCS test_mean_dev_api.cc DEPS pten pten_api_utils)
cc_test(test_scale_dev_api SRCS test_scale_dev_api.cc DEPS pten pten_api_utils)
...@@ -29,7 +29,7 @@ using DDim = paddle::framework::DDim; ...@@ -29,7 +29,7 @@ using DDim = paddle::framework::DDim;
// TODO(YuanRisheng): This TEST file need to be refactored after 'copy' realized // TODO(YuanRisheng): This TEST file need to be refactored after 'copy' realized
// in // in
// 'paddle/api', // 'paddle/api',
TEST(API, copy) { TEST(DEV_API, copy) {
// 1. create tensor // 1. create tensor
const auto alloc = std::make_shared<paddle::experimental::DefaultAllocator>( const auto alloc = std::make_shared<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace()); paddle::platform::CPUPlace());
......
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include <memory>
#include "paddle/pten/include/linalg.h"
#include "paddle/pten/api/lib/utils/allocator.h"
#include "paddle/pten/core/dense_tensor.h"
#include "paddle/pten/core/kernel_registry.h"
PT_DECLARE_MODULE(LinalgCPU);
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
PT_DECLARE_MODULE(LinalgCUDA);
#endif
namespace framework = paddle::framework;
using DDim = paddle::framework::DDim;
TEST(DEV_API, dot) {
// 1. create tensor
const auto alloc = std::make_shared<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace());
pten::DenseTensor dense_x(alloc,
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({3, 10}),
pten::DataLayout::NCHW));
auto* dense_x_data = dense_x.mutable_data<float>();
pten::DenseTensor dense_y(alloc,
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({3, 10}),
pten::DataLayout::NCHW));
auto* dense_y_data = dense_y.mutable_data<float>();
float sum[3] = {0.0, 0.0, 0.0};
for (size_t i = 0; i < 3; ++i) {
for (size_t j = 0; j < 10; ++j) {
dense_x_data[i * 10 + j] = (i * 10 + j) * 1.0;
dense_y_data[i * 10 + j] = (i * 10 + j) * 1.0;
sum[i] += (i * 10 + j) * (i * 10 + j) * 1.0;
}
}
paddle::platform::DeviceContextPool& pool =
paddle::platform::DeviceContextPool::Instance();
auto* dev_ctx = pool.Get(paddle::platform::CPUPlace());
// 2. test API
auto out = pten::Dot<float>(
*(static_cast<paddle::platform::CPUDeviceContext*>(dev_ctx)),
dense_x,
dense_y);
// 3. check result
ASSERT_EQ(out.dims().size(), 2);
ASSERT_EQ(out.dims()[0], 3);
ASSERT_EQ(out.meta().type, pten::DataType::FLOAT32);
ASSERT_EQ(out.meta().layout, pten::DataLayout::NCHW);
auto expect_result = sum;
auto actual_result0 = out.data<float>()[0];
auto actual_result1 = out.data<float>()[1];
auto actual_result2 = out.data<float>()[2];
ASSERT_NEAR(expect_result[0], actual_result0, 1e-6f);
ASSERT_NEAR(expect_result[1], actual_result1, 1e-6f);
ASSERT_NEAR(expect_result[2], actual_result2, 1e-6f);
}
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include <memory>
#include "paddle/pten/include/creation.h"
#include "paddle/pten/api/lib/utils/allocator.h"
#include "paddle/pten/core/dense_tensor.h"
#include "paddle/pten/core/kernel_registry.h"
PT_DECLARE_MODULE(CreationCPU);
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
PT_DECLARE_MODULE(CreationCUDA);
#endif
namespace framework = paddle::framework;
using DDim = paddle::framework::DDim;
TEST(DEV_API, fill_any_like) {
// 1. create tensor
const auto alloc = std::make_shared<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace());
pten::DenseTensor dense_x(alloc,
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({3, 2}),
pten::DataLayout::NCHW));
auto* dense_x_data = dense_x.mutable_data<float>();
dense_x_data[0] = 0;
float val = 1.0;
paddle::platform::DeviceContextPool& pool =
paddle::platform::DeviceContextPool::Instance();
auto* dev_ctx = pool.Get(paddle::platform::CPUPlace());
// 2. test API
auto out = pten::FillAnyLike<float>(
*(static_cast<paddle::platform::CPUDeviceContext*>(dev_ctx)),
dense_x,
val);
// 3. check result
ASSERT_EQ(out.dims().size(), 2);
ASSERT_EQ(out.dims()[0], 3);
ASSERT_EQ(out.numel(), 6);
ASSERT_EQ(out.meta().type, pten::DataType::FLOAT32);
ASSERT_EQ(out.meta().layout, pten::DataLayout::NCHW);
auto* actual_result = out.data<float>();
for (auto i = 0; i < 6; i++) {
ASSERT_NEAR(actual_result[i], val, 1e-6f);
}
}
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include <memory>
#include "paddle/pten/include/manipulation.h"
#include "paddle/pten/api/lib/utils/allocator.h"
#include "paddle/pten/core/dense_tensor.h"
#include "paddle/pten/core/kernel_registry.h"
PT_DECLARE_MODULE(ManipulationCPU);
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
PT_DECLARE_MODULE(ManipulationCUDA);
#endif
namespace framework = paddle::framework;
using DDim = paddle::framework::DDim;
TEST(DEV_API, flatten) {
// 1. create tensor
const auto alloc = std::make_shared<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace());
pten::DenseTensor dense_x(
alloc,
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({3, 2, 2, 3}),
pten::DataLayout::NCHW));
auto* dense_x_data = dense_x.mutable_data<float>();
for (int i = 0; i < dense_x.numel(); i++) {
dense_x_data[i] = i;
}
int start_axis = 1, stop_axis = 2;
paddle::platform::DeviceContextPool& pool =
paddle::platform::DeviceContextPool::Instance();
auto* dev_ctx = pool.Get(paddle::platform::CPUPlace());
// 2. test API
auto out = pten::Flatten<float>(
*(static_cast<paddle::platform::CPUDeviceContext*>(dev_ctx)),
dense_x,
start_axis,
stop_axis);
// 3. check result
std::vector<int> expect_shape = {3, 4, 3};
ASSERT_EQ(out.dims()[0], expect_shape[0]);
ASSERT_EQ(out.dims()[1], expect_shape[1]);
ASSERT_EQ(out.dims()[2], expect_shape[2]);
ASSERT_EQ(out.numel(), 36);
ASSERT_EQ(out.meta().type, pten::DataType::FLOAT32);
ASSERT_EQ(out.meta().layout, pten::DataLayout::NCHW);
bool value_equal = true;
auto* dense_out_data = out.data<float>();
for (int i = 0; i < dense_x.numel(); i++) {
if (std::abs(dense_x_data[i] - dense_out_data[i]) > 1e-6f)
value_equal = false;
}
ASSERT_EQ(value_equal, true);
}
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include <memory>
#include "paddle/pten/include/math.h"
#include "paddle/pten/api/lib/utils/allocator.h"
#include "paddle/pten/core/dense_tensor.h"
#include "paddle/pten/core/kernel_registry.h"
PT_DECLARE_MODULE(MathCPU);
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
PT_DECLARE_MODULE(MathCUDA);
#endif
namespace framework = paddle::framework;
using DDim = paddle::framework::DDim;
TEST(DEV_API, mean) {
// 1. create tensor
const auto alloc = std::make_shared<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace());
pten::DenseTensor dense_x(alloc,
pten::DenseTensorMeta(pten::DataType::FLOAT32,
framework::make_ddim({3, 4}),
pten::DataLayout::NCHW));
auto* dense_x_data = dense_x.mutable_data<float>();
float sum = 0.0;
for (size_t i = 0; i < 12; ++i) {
dense_x_data[i] = i * 1.0;
sum += i * 1.0;
}
paddle::platform::DeviceContextPool& pool =
paddle::platform::DeviceContextPool::Instance();
auto* dev_ctx = pool.Get(paddle::platform::CPUPlace());
// 2. test API
auto out = pten::Mean<float>(
*(static_cast<paddle::platform::CPUDeviceContext*>(dev_ctx)), dense_x);
// 3. check result
ASSERT_EQ(out.dims().size(), 1);
ASSERT_EQ(out.numel(), 1);
ASSERT_EQ(out.meta().type, pten::DataType::FLOAT32);
ASSERT_EQ(out.meta().layout, pten::DataLayout::NCHW);
auto expect_result = sum / 12;
auto actual_result = out.data<float>()[0];
ASSERT_NEAR(expect_result, actual_result, 1e-6f);
}
...@@ -15,14 +15,12 @@ limitations under the License. */ ...@@ -15,14 +15,12 @@ limitations under the License. */
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <memory> #include <memory>
#include "paddle/pten/api/include/math.h" #include "paddle/pten/include/math.h"
#include "paddle/pten/api/lib/utils/allocator.h" #include "paddle/pten/api/lib/utils/allocator.h"
#include "paddle/pten/core/dense_tensor.h" #include "paddle/pten/core/dense_tensor.h"
#include "paddle/pten/core/kernel_registry.h" #include "paddle/pten/core/kernel_registry.h"
#include "paddle/pten/include/math.h"
PT_DECLARE_MODULE(MathCPU); PT_DECLARE_MODULE(MathCPU);
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
......
cc_test(test_intrusive_ptr SRCS test_intrusive_ptr.cc)
cc_test(test_type_info SRCS test_type_info.cc)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册