Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
c334405f
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
c334405f
编写于
10月 25, 2022
作者:
C
Chen Weihang
提交者:
GitHub
10月 25, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
clean useless api tests in phi (#47321)
上级
1cb12ff5
变更
39
展开全部
隐藏空白更改
内联
并排
Showing
39 changed file
with
0 addition
and
6645 deletion
+0
-6645
paddle/phi/tests/api/CMakeLists.txt
paddle/phi/tests/api/CMakeLists.txt
+0
-68
paddle/phi/tests/api/test_add_n_api.cc
paddle/phi/tests/api/test_add_n_api.cc
+0
-84
paddle/phi/tests/api/test_cast_api.cc
paddle/phi/tests/api/test_cast_api.cc
+0
-91
paddle/phi/tests/api/test_concat_api.cc
paddle/phi/tests/api/test_concat_api.cc
+0
-89
paddle/phi/tests/api/test_conj_api.cc
paddle/phi/tests/api/test_conj_api.cc
+0
-79
paddle/phi/tests/api/test_dot_api.cc
paddle/phi/tests/api/test_dot_api.cc
+0
-88
paddle/phi/tests/api/test_elementwise_api.cc
paddle/phi/tests/api/test_elementwise_api.cc
+0
-255
paddle/phi/tests/api/test_embedding_api.cc
paddle/phi/tests/api/test_embedding_api.cc
+0
-119
paddle/phi/tests/api/test_empty_api.cc
paddle/phi/tests/api/test_empty_api.cc
+0
-120
paddle/phi/tests/api/test_fill_api.cc
paddle/phi/tests/api/test_fill_api.cc
+0
-241
paddle/phi/tests/api/test_matmul_api.cc
paddle/phi/tests/api/test_matmul_api.cc
+0
-211
paddle/phi/tests/api/test_mean_api.cc
paddle/phi/tests/api/test_mean_api.cc
+0
-73
paddle/phi/tests/api/test_reshape_api.cc
paddle/phi/tests/api/test_reshape_api.cc
+0
-107
paddle/phi/tests/api/test_scale_api.cc
paddle/phi/tests/api/test_scale_api.cc
+0
-86
paddle/phi/tests/api/test_sparse_conv_api.cc
paddle/phi/tests/api/test_sparse_conv_api.cc
+0
-175
paddle/phi/tests/api/test_sparse_utils_api.cc
paddle/phi/tests/api/test_sparse_utils_api.cc
+0
-241
paddle/phi/tests/api/test_split_api.cc
paddle/phi/tests/api/test_split_api.cc
+0
-84
paddle/phi/tests/api/test_sum_api.cc
paddle/phi/tests/api/test_sum_api.cc
+0
-73
paddle/phi/tests/kernels/CMakeLists.txt
paddle/phi/tests/kernels/CMakeLists.txt
+0
-81
paddle/phi/tests/kernels/test_cast_dev_api.cc
paddle/phi/tests/kernels/test_cast_dev_api.cc
+0
-74
paddle/phi/tests/kernels/test_concat_dev_api.cc
paddle/phi/tests/kernels/test_concat_dev_api.cc
+0
-84
paddle/phi/tests/kernels/test_conj_dev_api.cc
paddle/phi/tests/kernels/test_conj_dev_api.cc
+0
-70
paddle/phi/tests/kernels/test_copy_dev_api.cc
paddle/phi/tests/kernels/test_copy_dev_api.cc
+0
-78
paddle/phi/tests/kernels/test_creation_dev_api.cc
paddle/phi/tests/kernels/test_creation_dev_api.cc
+0
-137
paddle/phi/tests/kernels/test_dot_dev_api.cc
paddle/phi/tests/kernels/test_dot_dev_api.cc
+0
-82
paddle/phi/tests/kernels/test_elementwise_dev_api.cc
paddle/phi/tests/kernels/test_elementwise_dev_api.cc
+0
-239
paddle/phi/tests/kernels/test_flatten_dev_api.cc
paddle/phi/tests/kernels/test_flatten_dev_api.cc
+0
-78
paddle/phi/tests/kernels/test_matmul_dev_api.cc
paddle/phi/tests/kernels/test_matmul_dev_api.cc
+0
-78
paddle/phi/tests/kernels/test_mean_dev_api.cc
paddle/phi/tests/kernels/test_mean_dev_api.cc
+0
-68
paddle/phi/tests/kernels/test_reshape_dev_api.cc
paddle/phi/tests/kernels/test_reshape_dev_api.cc
+0
-76
paddle/phi/tests/kernels/test_scale_dev_api.cc
paddle/phi/tests/kernels/test_scale_dev_api.cc
+0
-110
paddle/phi/tests/kernels/test_sparse_activation_dev_api.cc
paddle/phi/tests/kernels/test_sparse_activation_dev_api.cc
+0
-82
paddle/phi/tests/kernels/test_sparse_conv3d_dev_api.cc
paddle/phi/tests/kernels/test_sparse_conv3d_dev_api.cc
+0
-834
paddle/phi/tests/kernels/test_sparse_elementwise_dev_api.cc
paddle/phi/tests/kernels/test_sparse_elementwise_dev_api.cc
+0
-409
paddle/phi/tests/kernels/test_sparse_pool_dev_api.cc
paddle/phi/tests/kernels/test_sparse_pool_dev_api.cc
+0
-443
paddle/phi/tests/kernels/test_sparse_transpose_dev_api.cc
paddle/phi/tests/kernels/test_sparse_transpose_dev_api.cc
+0
-165
paddle/phi/tests/kernels/test_sparse_utils_dev_api.cc
paddle/phi/tests/kernels/test_sparse_utils_dev_api.cc
+0
-980
paddle/phi/tests/kernels/test_split_dev_api.cc
paddle/phi/tests/kernels/test_split_dev_api.cc
+0
-124
paddle/phi/tests/kernels/test_sum_dev_api.cc
paddle/phi/tests/kernels/test_sum_dev_api.cc
+0
-69
未找到文件。
paddle/phi/tests/api/CMakeLists.txt
浏览文件 @
c334405f
...
...
@@ -21,42 +21,6 @@ cc_test(
DEPS gtest
)
set
(
COMMON_API_TEST_DEPS phi_tensor phi_api phi_api_utils
)
cc_test
(
test_mean_api
SRCS test_mean_api.cc
DEPS
${
COMMON_API_TEST_DEPS
}
)
cc_test
(
test_dot_api
SRCS test_dot_api.cc
DEPS
${
COMMON_API_TEST_DEPS
}
)
cc_test
(
test_matmul_api
SRCS test_matmul_api.cc
DEPS
${
COMMON_API_TEST_DEPS
}
)
cc_test
(
test_empty_api
SRCS test_empty_api.cc
DEPS
${
COMMON_API_TEST_DEPS
}
)
cc_test
(
test_fill_api
SRCS test_fill_api.cc
DEPS
${
COMMON_API_TEST_DEPS
}
api_scalar
)
cc_test
(
test_elementwise_api
SRCS test_elementwise_api.cc
DEPS
${
COMMON_API_TEST_DEPS
}
)
cc_test
(
test_embedding_api
SRCS test_embedding_api.cc
DEPS
${
COMMON_API_TEST_DEPS
}
)
cc_test
(
test_cast_api
SRCS test_cast_api.cc
DEPS
${
COMMON_API_TEST_DEPS
}
)
cc_test
(
test_reshape_api
SRCS test_reshape_api.cc
DEPS
${
COMMON_API_TEST_DEPS
}
)
cc_test
(
test_to_api
SRCS test_to_api.cc
...
...
@@ -65,42 +29,14 @@ cc_test(
test_slice_api
SRCS test_slice_api.cc
DEPS
${
COMMON_API_TEST_DEPS
}
)
cc_test
(
test_sum_api
SRCS test_sum_api.cc
DEPS
${
COMMON_API_TEST_DEPS
}
)
cc_test
(
test_scale_api
SRCS test_scale_api.cc
DEPS
${
COMMON_API_TEST_DEPS
}
api_scalar
)
cc_test
(
test_scale_benchmark
SRCS test_scale_benchmark.cc
DEPS
${
COMMON_API_TEST_DEPS
}
)
cc_test
(
test_conj_api
SRCS test_conj_api.cc
DEPS
${
COMMON_API_TEST_DEPS
}
)
cc_test
(
test_concat_api
SRCS test_concat_api.cc
DEPS
${
COMMON_API_TEST_DEPS
}
)
cc_test
(
test_split_api
SRCS test_split_api.cc
DEPS
${
COMMON_API_TEST_DEPS
}
)
cc_test
(
test_data_transform
SRCS test_data_transform.cc
DEPS
${
COMMON_API_TEST_DEPS
}
)
cc_test
(
test_sparse_utils_api
SRCS test_sparse_utils_api.cc
DEPS
${
COMMON_API_TEST_DEPS
}
)
cc_test
(
test_sparse_conv_api
SRCS test_sparse_conv_api.cc
DEPS
${
COMMON_API_TEST_DEPS
}
)
cc_test
(
test_strings_empty_api
SRCS test_strings_empty_api.cc
...
...
@@ -109,7 +45,3 @@ cc_test(
test_strings_lower_upper_api
SRCS test_strings_lower_upper_api.cc
DEPS
${
COMMON_API_TEST_DEPS
}
)
cc_test
(
test_add_n_api
SRCS test_add_n_api.cc
DEPS
${
COMMON_API_TEST_DEPS
}
)
paddle/phi/tests/api/test_add_n_api.cc
已删除
100644 → 0
浏览文件 @
1cb12ff5
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include <memory>
#include "paddle/fluid/memory/allocation/allocator_facade.h"
#include "paddle/phi/api/include/tensor.h"
#include "paddle/phi/api/lib/api_custom_impl.h"
#include "paddle/phi/common/place.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/selected_rows.h"
PD_DECLARE_KERNEL
(
add_n_sr
,
CPU
,
ALL_LAYOUT
);
namespace
paddle
{
namespace
tests
{
TEST
(
API
,
add_n
)
{
// 1. create tensor
std
::
vector
<
int64_t
>
rows
=
{
0
,
1
,
2
,
3
,
4
,
5
,
6
};
int64_t
row_numel
=
12
;
auto
x_sr
=
std
::
make_shared
<
phi
::
SelectedRows
>
(
rows
,
10
);
auto
x_meta
=
phi
::
DenseTensorMeta
(
phi
::
DataType
::
FLOAT32
,
phi
::
make_ddim
({
static_cast
<
int64_t
>
(
rows
.
size
()),
row_numel
}),
phi
::
DataLayout
::
NCHW
);
x_sr
->
mutable_value
()
->
set_meta
(
x_meta
);
x_sr
->
AllocateFrom
(
paddle
::
memory
::
allocation
::
AllocatorFacade
::
Instance
()
.
GetAllocator
(
paddle
::
platform
::
CPUPlace
())
.
get
(),
phi
::
DataType
::
FLOAT32
);
auto
*
dense_x_data
=
x_sr
->
mutable_value
()
->
data
<
float
>
();
auto
y_sr
=
std
::
make_shared
<
phi
::
SelectedRows
>
(
rows
,
10
);
y_sr
->
mutable_value
()
->
set_meta
(
x_meta
);
y_sr
->
AllocateFrom
(
paddle
::
memory
::
allocation
::
AllocatorFacade
::
Instance
()
.
GetAllocator
(
paddle
::
platform
::
CPUPlace
())
.
get
(),
phi
::
DataType
::
FLOAT32
);
auto
*
dense_y_data
=
y_sr
->
mutable_value
()
->
data
<
float
>
();
float
sum
[
84
]
=
{
0.0
};
for
(
size_t
i
=
0
;
i
<
7
;
++
i
)
{
for
(
size_t
j
=
0
;
j
<
12
;
++
j
)
{
dense_x_data
[
i
*
12
+
j
]
=
(
i
*
4
+
j
);
dense_y_data
[
i
*
12
+
j
]
=
(
i
*
4
+
j
);
sum
[
i
*
12
+
j
]
+=
(
i
*
4
+
j
)
*
2
;
}
}
paddle
::
experimental
::
Tensor
x
(
x_sr
);
paddle
::
experimental
::
Tensor
y
(
y_sr
);
auto
out
=
paddle
::
experimental
::
add_n_impl
({
x
,
y
});
// check slice result
ASSERT_EQ
(
static_cast
<
int
>
(
std
::
dynamic_pointer_cast
<
phi
::
SelectedRows
>
(
out
.
impl
())
->
rows
()
.
size
()),
7
);
for
(
int64_t
i
=
0
;
i
<
84
;
++
i
)
{
ASSERT_EQ
(
sum
[
i
],
std
::
dynamic_pointer_cast
<
phi
::
SelectedRows
>
(
out
.
impl
())
->
value
()
.
data
<
float
>
()[
i
]);
}
}
}
// namespace tests
}
// namespace paddle
paddle/phi/tests/api/test_cast_api.cc
已删除
100644 → 0
浏览文件 @
1cb12ff5
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include <memory>
#include "paddle/phi/api/include/api.h"
#include "paddle/phi/api/lib/utils/allocator.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/kernel_registry.h"
PD_DECLARE_KERNEL
(
full
,
CPU
,
ALL_LAYOUT
);
PD_DECLARE_KERNEL
(
cast
,
CPU
,
ALL_LAYOUT
);
namespace
paddle
{
namespace
tests
{
namespace
framework
=
paddle
::
framework
;
using
DDim
=
phi
::
DDim
;
// TODO(chenweihang): Remove this test after the API is used in the dygraph
TEST
(
API
,
cast
)
{
// 1. create tensor
const
auto
alloc
=
std
::
make_unique
<
paddle
::
experimental
::
DefaultAllocator
>
(
paddle
::
platform
::
CPUPlace
());
auto
dense_x
=
std
::
make_shared
<
phi
::
DenseTensor
>
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
FLOAT32
,
phi
::
make_ddim
({
3
,
4
}),
phi
::
DataLayout
::
NCHW
));
auto
*
dense_x_data
=
dense_x
->
mutable_data
<
float
>
(
paddle
::
platform
::
CPUPlace
());
for
(
int
i
=
0
;
i
<
dense_x
->
numel
();
i
++
)
{
dense_x_data
[
i
]
=
i
;
}
paddle
::
experimental
::
Tensor
x
(
dense_x
);
phi
::
DataType
out_dtype
=
phi
::
DataType
::
FLOAT64
;
// 2. test API
auto
out
=
paddle
::
experimental
::
cast
(
x
,
out_dtype
);
// 3. check result
std
::
vector
<
int
>
expect_shape
=
{
3
,
4
};
ASSERT_EQ
(
out
.
shape
().
size
(),
size_t
(
2
));
ASSERT_EQ
(
out
.
shape
()[
0
],
expect_shape
[
0
]);
ASSERT_EQ
(
out
.
shape
()[
1
],
expect_shape
[
1
]);
ASSERT_EQ
(
out
.
numel
(),
12
);
ASSERT_EQ
(
out
.
is_cpu
(),
true
);
ASSERT_EQ
(
out
.
type
(),
phi
::
DataType
::
FLOAT64
);
ASSERT_EQ
(
out
.
layout
(),
phi
::
DataLayout
::
NCHW
);
ASSERT_EQ
(
out
.
initialized
(),
true
);
auto
dense_out
=
std
::
dynamic_pointer_cast
<
phi
::
DenseTensor
>
(
out
.
impl
());
auto
*
dense_out_data
=
dense_out
->
data
<
double
>
();
for
(
int
i
=
0
;
i
<
dense_x
->
numel
();
i
++
)
{
ASSERT_NEAR
(
dense_out_data
[
i
],
static_cast
<
double
>
(
dense_x_data
[
i
]),
1e-6
f
);
}
}
TEST
(
Tensor
,
cast
)
{
auto
x
=
paddle
::
experimental
::
full
({
3
,
4
},
1.0
,
phi
::
DataType
::
FLOAT32
);
auto
y
=
x
.
cast
(
phi
::
DataType
::
INT32
);
// check slice result
ASSERT_EQ
(
y
.
dims
().
size
(),
2
);
ASSERT_EQ
(
y
.
dims
()[
0
],
3
);
ASSERT_EQ
(
y
.
dims
()[
1
],
4
);
ASSERT_EQ
(
y
.
numel
(),
12
);
ASSERT_EQ
(
y
.
is_cpu
(),
true
);
ASSERT_EQ
(
y
.
type
(),
phi
::
DataType
::
INT32
);
ASSERT_EQ
(
y
.
layout
(),
phi
::
DataLayout
::
NCHW
);
ASSERT_EQ
(
y
.
initialized
(),
true
);
for
(
int64_t
i
=
0
;
i
<
y
.
numel
();
++
i
)
{
ASSERT_EQ
(
y
.
mutable_data
<
int
>
()[
i
],
1
);
}
}
}
// namespace tests
}
// namespace paddle
paddle/phi/tests/api/test_concat_api.cc
已删除
100644 → 0
浏览文件 @
1cb12ff5
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include <memory>
#include "paddle/phi/api/include/api.h"
#include "paddle/phi/api/lib/utils/allocator.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/kernel_registry.h"
PD_DECLARE_KERNEL
(
concat
,
CPU
,
ALL_LAYOUT
);
namespace
paddle
{
namespace
tests
{
using
DDim
=
phi
::
DDim
;
// TODO(chentianyu03): Remove this test after the API is used in the dygraph
TEST
(
API
,
concat
)
{
// 1. create tensor
const
auto
alloc
=
std
::
make_unique
<
paddle
::
experimental
::
DefaultAllocator
>
(
paddle
::
platform
::
CPUPlace
());
auto
dense_x
=
std
::
make_shared
<
phi
::
DenseTensor
>
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
FLOAT32
,
phi
::
make_ddim
({
3
,
10
}),
phi
::
DataLayout
::
NCHW
));
auto
*
dense_x_data
=
dense_x
->
mutable_data
<
float
>
(
paddle
::
platform
::
CPUPlace
());
auto
dense_y
=
std
::
make_shared
<
phi
::
DenseTensor
>
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
FLOAT32
,
phi
::
make_ddim
({
3
,
10
}),
phi
::
DataLayout
::
NCHW
));
auto
*
dense_y_data
=
dense_y
->
mutable_data
<
float
>
(
paddle
::
platform
::
CPUPlace
());
for
(
size_t
i
=
0
;
i
<
3
;
++
i
)
{
for
(
size_t
j
=
0
;
j
<
10
;
++
j
)
{
dense_x_data
[
i
*
10
+
j
]
=
(
i
*
10
+
j
)
*
1.0
;
dense_y_data
[
i
*
10
+
j
]
=
(
i
*
10
+
j
)
*
1.0
;
}
}
paddle
::
experimental
::
Tensor
x
(
dense_x
);
paddle
::
experimental
::
Tensor
y
(
dense_y
);
std
::
vector
<
paddle
::
experimental
::
Tensor
>
inputs
{
x
,
y
};
// 2. test API
auto
out
=
paddle
::
experimental
::
concat
(
inputs
,
0
);
// 3. check result
ASSERT_EQ
(
out
.
dims
().
size
(),
2
);
ASSERT_EQ
(
out
.
dims
()[
0
],
6
);
ASSERT_EQ
(
out
.
dims
()[
1
],
10
);
ASSERT_EQ
(
out
.
numel
(),
60
);
ASSERT_EQ
(
out
.
is_cpu
(),
true
);
ASSERT_EQ
(
out
.
type
(),
phi
::
DataType
::
FLOAT32
);
ASSERT_EQ
(
out
.
layout
(),
phi
::
DataLayout
::
NCHW
);
ASSERT_EQ
(
out
.
initialized
(),
true
);
auto
dense_out
=
std
::
dynamic_pointer_cast
<
phi
::
DenseTensor
>
(
out
.
impl
());
auto
out_data
=
dense_out
->
data
<
float
>
();
for
(
size_t
i
=
0
;
i
<
60
;
++
i
)
{
if
(
i
<
30
)
{
ASSERT_NEAR
(
dense_x_data
[
i
],
out_data
[
i
],
1e-6
f
);
}
else
{
ASSERT_NEAR
(
dense_y_data
[
i
-
30
],
out_data
[
i
],
1e-6
f
);
}
}
}
}
// namespace tests
}
// namespace paddle
paddle/phi/tests/api/test_conj_api.cc
已删除
100644 → 0
浏览文件 @
1cb12ff5
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include <memory>
#include "paddle/phi/api/include/api.h"
#include "paddle/phi/api/lib/utils/allocator.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/kernel_registry.h"
PD_DECLARE_KERNEL
(
conj
,
CPU
,
ALL_LAYOUT
);
namespace
paddle
{
namespace
tests
{
namespace
framework
=
paddle
::
framework
;
using
DDim
=
phi
::
DDim
;
// TODO(chenweihang): Remove this test after the API is used in the dygraph
TEST
(
API
,
conj
)
{
// 1. create tensor
const
auto
alloc
=
std
::
make_unique
<
paddle
::
experimental
::
DefaultAllocator
>
(
paddle
::
platform
::
CPUPlace
());
auto
dense_x
=
std
::
make_shared
<
phi
::
DenseTensor
>
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
COMPLEX64
,
phi
::
make_ddim
({
3
,
10
}),
phi
::
DataLayout
::
NCHW
));
auto
*
dense_x_data
=
dense_x
->
mutable_data
<
paddle
::
complex64
>
(
paddle
::
platform
::
CPUPlace
());
for
(
size_t
i
=
0
;
i
<
3
;
++
i
)
{
for
(
size_t
j
=
0
;
j
<
10
;
++
j
)
{
dense_x_data
[
i
*
10
+
j
]
=
paddle
::
complex64
(
i
*
10
+
j
,
i
*
10
+
j
);
}
}
paddle
::
experimental
::
Tensor
x
(
dense_x
);
// 2. test API
auto
out
=
paddle
::
experimental
::
conj
(
x
);
// 3. check result
ASSERT_EQ
(
out
.
dims
().
size
(),
2
);
ASSERT_EQ
(
out
.
dims
()[
0
],
3
);
ASSERT_EQ
(
out
.
dims
()[
1
],
10
);
ASSERT_EQ
(
out
.
numel
(),
30
);
ASSERT_EQ
(
out
.
is_cpu
(),
true
);
ASSERT_EQ
(
out
.
type
(),
phi
::
DataType
::
COMPLEX64
);
ASSERT_EQ
(
out
.
layout
(),
phi
::
DataLayout
::
NCHW
);
ASSERT_EQ
(
out
.
initialized
(),
true
);
auto
dense_out
=
std
::
dynamic_pointer_cast
<
phi
::
DenseTensor
>
(
out
.
impl
());
auto
actual_result
=
dense_out
->
data
<
paddle
::
complex64
>
();
for
(
size_t
i
=
0
;
i
<
3
;
++
i
)
{
for
(
size_t
j
=
0
;
j
<
10
;
++
j
)
{
dense_x_data
[
i
*
10
+
j
]
=
paddle
::
complex64
(
i
*
10
+
j
,
i
*
10
+
j
);
ASSERT_NEAR
(
actual_result
[
i
*
10
+
j
].
real
,
1.0
*
(
i
*
10
+
j
),
1e-6
f
);
ASSERT_NEAR
(
actual_result
[
i
*
10
+
j
].
imag
,
-
1.0
*
(
i
*
10
+
j
),
1e-6
f
);
}
}
}
}
// namespace tests
}
// namespace paddle
paddle/phi/tests/api/test_dot_api.cc
已删除
100644 → 0
浏览文件 @
1cb12ff5
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include <memory>
#include "paddle/phi/api/include/api.h"
#include "paddle/phi/api/lib/utils/allocator.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/kernel_registry.h"
PD_DECLARE_KERNEL
(
dot
,
CPU
,
ALL_LAYOUT
);
namespace
paddle
{
namespace
tests
{
namespace
framework
=
paddle
::
framework
;
using
DDim
=
phi
::
DDim
;
// TODO(chenweihang): Remove this test after the API is used in the dygraph
TEST
(
API
,
dot
)
{
// 1. create tensor
const
auto
alloc
=
std
::
make_unique
<
paddle
::
experimental
::
DefaultAllocator
>
(
paddle
::
platform
::
CPUPlace
());
auto
dense_x
=
std
::
make_shared
<
phi
::
DenseTensor
>
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
FLOAT32
,
phi
::
make_ddim
({
3
,
10
}),
phi
::
DataLayout
::
NCHW
));
auto
*
dense_x_data
=
dense_x
->
mutable_data
<
float
>
(
paddle
::
platform
::
CPUPlace
());
auto
dense_y
=
std
::
make_shared
<
phi
::
DenseTensor
>
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
FLOAT32
,
phi
::
make_ddim
({
3
,
10
}),
phi
::
DataLayout
::
NCHW
));
auto
*
dense_y_data
=
dense_y
->
mutable_data
<
float
>
(
paddle
::
platform
::
CPUPlace
());
float
sum
[
3
]
=
{
0.0
,
0.0
,
0.0
};
for
(
size_t
i
=
0
;
i
<
3
;
++
i
)
{
for
(
size_t
j
=
0
;
j
<
10
;
++
j
)
{
dense_x_data
[
i
*
10
+
j
]
=
(
i
*
10
+
j
)
*
1.0
;
dense_y_data
[
i
*
10
+
j
]
=
(
i
*
10
+
j
)
*
1.0
;
sum
[
i
]
+=
(
i
*
10
+
j
)
*
(
i
*
10
+
j
)
*
1.0
;
}
}
paddle
::
experimental
::
Tensor
x
(
dense_x
);
paddle
::
experimental
::
Tensor
y
(
dense_y
);
// 2. test API
auto
out
=
paddle
::
experimental
::
dot
(
x
,
y
);
// 3. check result
ASSERT_EQ
(
out
.
dims
().
size
(),
2
);
ASSERT_EQ
(
out
.
dims
()[
0
],
3
);
ASSERT_EQ
(
out
.
numel
(),
3
);
ASSERT_EQ
(
out
.
is_cpu
(),
true
);
ASSERT_EQ
(
out
.
type
(),
phi
::
DataType
::
FLOAT32
);
ASSERT_EQ
(
out
.
layout
(),
phi
::
DataLayout
::
NCHW
);
ASSERT_EQ
(
out
.
initialized
(),
true
);
auto
expect_result
=
sum
;
auto
dense_out
=
std
::
dynamic_pointer_cast
<
phi
::
DenseTensor
>
(
out
.
impl
());
auto
actual_result0
=
dense_out
->
data
<
float
>
()[
0
];
auto
actual_result1
=
dense_out
->
data
<
float
>
()[
1
];
auto
actual_result2
=
dense_out
->
data
<
float
>
()[
2
];
ASSERT_NEAR
(
expect_result
[
0
],
actual_result0
,
1e-6
f
);
ASSERT_NEAR
(
expect_result
[
1
],
actual_result1
,
1e-6
f
);
ASSERT_NEAR
(
expect_result
[
2
],
actual_result2
,
1e-6
f
);
}
}
// namespace tests
}
// namespace paddle
paddle/phi/tests/api/test_elementwise_api.cc
已删除
100644 → 0
浏览文件 @
1cb12ff5
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include <memory>
#include "paddle/phi/api/include/api.h"
#include "paddle/phi/api/lib/utils/allocator.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/kernel_registry.h"
PD_DECLARE_KERNEL
(
add
,
CPU
,
ALL_LAYOUT
);
PD_DECLARE_KERNEL
(
subtract
,
CPU
,
ALL_LAYOUT
);
PD_DECLARE_KERNEL
(
multiply
,
CPU
,
ALL_LAYOUT
);
PD_DECLARE_KERNEL
(
divide
,
CPU
,
ALL_LAYOUT
);
namespace
paddle
{
namespace
tests
{
namespace
framework
=
paddle
::
framework
;
using
DDim
=
phi
::
DDim
;
// TODO(chenweihang): Remove this test after the API is used in the dygraph
TEST
(
API
,
add
)
{
// 1. create tensor
const
auto
alloc
=
std
::
make_unique
<
paddle
::
experimental
::
DefaultAllocator
>
(
paddle
::
platform
::
CPUPlace
());
auto
dense_x
=
std
::
make_shared
<
phi
::
DenseTensor
>
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
FLOAT32
,
phi
::
make_ddim
({
3
,
10
}),
phi
::
DataLayout
::
NCHW
));
auto
*
dense_x_data
=
dense_x
->
mutable_data
<
float
>
(
paddle
::
platform
::
CPUPlace
());
auto
dense_y
=
std
::
make_shared
<
phi
::
DenseTensor
>
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
FLOAT32
,
phi
::
make_ddim
({
10
}),
phi
::
DataLayout
::
NCHW
));
auto
*
dense_y_data
=
dense_y
->
mutable_data
<
float
>
(
paddle
::
platform
::
CPUPlace
());
float
sum
[
3
][
10
]
=
{
0.0
};
for
(
size_t
i
=
0
;
i
<
3
;
++
i
)
{
for
(
size_t
j
=
0
;
j
<
10
;
++
j
)
{
dense_x_data
[
i
*
10
+
j
]
=
(
i
*
10
+
j
)
*
1.0
;
sum
[
i
][
j
]
=
(
i
*
10
+
j
)
*
1.0
+
j
*
2.0
;
}
}
for
(
size_t
i
=
0
;
i
<
10
;
++
i
)
{
dense_y_data
[
i
]
=
i
*
2.0
;
}
paddle
::
experimental
::
Tensor
x
(
dense_x
);
paddle
::
experimental
::
Tensor
y
(
dense_y
);
// 2. test API
auto
out
=
paddle
::
experimental
::
add
(
x
,
y
);
// 3. check result
ASSERT_EQ
(
out
.
shape
().
size
(),
2UL
);
ASSERT_EQ
(
out
.
shape
()[
0
],
3
);
ASSERT_EQ
(
out
.
numel
(),
30
);
ASSERT_EQ
(
out
.
is_cpu
(),
true
);
ASSERT_EQ
(
out
.
type
(),
phi
::
DataType
::
FLOAT32
);
ASSERT_EQ
(
out
.
layout
(),
phi
::
DataLayout
::
NCHW
);
ASSERT_EQ
(
out
.
initialized
(),
true
);
auto
expect_result
=
sum
;
auto
dense_out
=
std
::
dynamic_pointer_cast
<
phi
::
DenseTensor
>
(
out
.
impl
());
auto
actual_result0
=
dense_out
->
data
<
float
>
()[
0
];
auto
actual_result1
=
dense_out
->
data
<
float
>
()[
1
];
auto
actual_result2
=
dense_out
->
data
<
float
>
()[
10
];
ASSERT_NEAR
(
expect_result
[
0
][
0
],
actual_result0
,
1e-6
f
);
ASSERT_NEAR
(
expect_result
[
0
][
1
],
actual_result1
,
1e-6
f
);
ASSERT_NEAR
(
expect_result
[
1
][
0
],
actual_result2
,
1e-6
f
);
}
// TODO(chenweihang): Remove this test after the API is used in the dygraph
TEST
(
API
,
subtract
)
{
// 1. create tensor
const
auto
alloc
=
std
::
make_unique
<
paddle
::
experimental
::
DefaultAllocator
>
(
paddle
::
platform
::
CPUPlace
());
auto
dense_x
=
std
::
make_shared
<
phi
::
DenseTensor
>
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
FLOAT32
,
phi
::
make_ddim
({
3
,
10
}),
phi
::
DataLayout
::
NCHW
));
auto
*
dense_x_data
=
dense_x
->
mutable_data
<
float
>
(
paddle
::
platform
::
CPUPlace
());
auto
dense_y
=
std
::
make_shared
<
phi
::
DenseTensor
>
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
FLOAT32
,
phi
::
make_ddim
({
10
}),
phi
::
DataLayout
::
NCHW
));
auto
*
dense_y_data
=
dense_y
->
mutable_data
<
float
>
(
paddle
::
platform
::
CPUPlace
());
float
sub
[
3
][
10
]
=
{
0.0
};
for
(
size_t
i
=
0
;
i
<
3
;
++
i
)
{
for
(
size_t
j
=
0
;
j
<
10
;
++
j
)
{
dense_x_data
[
i
*
10
+
j
]
=
(
i
*
10
+
j
)
*
1.0
;
sub
[
i
][
j
]
=
(
i
*
10
+
j
)
*
1.0
-
j
*
2.0
;
}
}
for
(
size_t
i
=
0
;
i
<
10
;
++
i
)
{
dense_y_data
[
i
]
=
i
*
2.0
;
}
paddle
::
experimental
::
Tensor
x
(
dense_x
);
paddle
::
experimental
::
Tensor
y
(
dense_y
);
// 2. test API
auto
out
=
paddle
::
experimental
::
subtract
(
x
,
y
);
// 3. check result
ASSERT_EQ
(
out
.
shape
().
size
(),
2UL
);
ASSERT_EQ
(
out
.
shape
()[
0
],
3
);
ASSERT_EQ
(
out
.
numel
(),
30
);
ASSERT_EQ
(
out
.
is_cpu
(),
true
);
ASSERT_EQ
(
out
.
type
(),
phi
::
DataType
::
FLOAT32
);
ASSERT_EQ
(
out
.
layout
(),
phi
::
DataLayout
::
NCHW
);
ASSERT_EQ
(
out
.
initialized
(),
true
);
auto
expect_result
=
sub
;
auto
dense_out
=
std
::
dynamic_pointer_cast
<
phi
::
DenseTensor
>
(
out
.
impl
());
auto
actual_result0
=
dense_out
->
data
<
float
>
()[
0
];
auto
actual_result1
=
dense_out
->
data
<
float
>
()[
1
];
auto
actual_result2
=
dense_out
->
data
<
float
>
()[
10
];
ASSERT_NEAR
(
expect_result
[
0
][
0
],
actual_result0
,
1e-6
f
);
ASSERT_NEAR
(
expect_result
[
0
][
1
],
actual_result1
,
1e-6
f
);
ASSERT_NEAR
(
expect_result
[
1
][
0
],
actual_result2
,
1e-6
f
);
}
// TODO(chenweihang): Remove this test after the API is used in the dygraph
TEST
(
API
,
divide
)
{
// 1. create tensor
const
auto
alloc
=
std
::
make_unique
<
paddle
::
experimental
::
DefaultAllocator
>
(
paddle
::
platform
::
CPUPlace
());
auto
dense_x
=
std
::
make_shared
<
phi
::
DenseTensor
>
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
FLOAT32
,
phi
::
make_ddim
({
3
,
10
}),
phi
::
DataLayout
::
NCHW
));
auto
*
dense_x_data
=
dense_x
->
mutable_data
<
float
>
(
paddle
::
platform
::
CPUPlace
());
auto
dense_y
=
std
::
make_shared
<
phi
::
DenseTensor
>
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
FLOAT32
,
phi
::
make_ddim
({
10
}),
phi
::
DataLayout
::
NCHW
));
auto
*
dense_y_data
=
dense_y
->
mutable_data
<
float
>
(
paddle
::
platform
::
CPUPlace
());
float
div
[
3
][
10
]
=
{
0.0
};
for
(
size_t
i
=
0
;
i
<
3
;
++
i
)
{
for
(
size_t
j
=
0
;
j
<
10
;
++
j
)
{
dense_x_data
[
i
*
10
+
j
]
=
(
i
*
10
+
j
)
*
1.0
;
div
[
i
][
j
]
=
(
i
*
10
+
j
)
*
1.0
/
(
j
*
2.0
+
1
);
}
}
for
(
size_t
i
=
0
;
i
<
10
;
++
i
)
{
dense_y_data
[
i
]
=
i
*
2.0
+
1
;
}
paddle
::
experimental
::
Tensor
x
(
dense_x
);
paddle
::
experimental
::
Tensor
y
(
dense_y
);
// 2. test API
auto
out
=
paddle
::
experimental
::
divide
(
x
,
y
);
// 3. check result
ASSERT_EQ
(
out
.
shape
().
size
(),
2UL
);
ASSERT_EQ
(
out
.
shape
()[
0
],
3
);
ASSERT_EQ
(
out
.
numel
(),
30
);
ASSERT_EQ
(
out
.
is_cpu
(),
true
);
ASSERT_EQ
(
out
.
type
(),
phi
::
DataType
::
FLOAT32
);
ASSERT_EQ
(
out
.
layout
(),
phi
::
DataLayout
::
NCHW
);
ASSERT_EQ
(
out
.
initialized
(),
true
);
auto
expect_result
=
div
;
auto
dense_out
=
std
::
dynamic_pointer_cast
<
phi
::
DenseTensor
>
(
out
.
impl
());
auto
actual_result0
=
dense_out
->
data
<
float
>
()[
0
];
auto
actual_result1
=
dense_out
->
data
<
float
>
()[
1
];
auto
actual_result2
=
dense_out
->
data
<
float
>
()[
10
];
ASSERT_NEAR
(
expect_result
[
0
][
0
],
actual_result0
,
1e-6
f
);
ASSERT_NEAR
(
expect_result
[
0
][
1
],
actual_result1
,
1e-6
f
);
ASSERT_NEAR
(
expect_result
[
1
][
0
],
actual_result2
,
1e-6
f
);
}
TEST
(
API
,
multiply
)
{
// 1. create tensor
const
auto
alloc
=
std
::
make_unique
<
paddle
::
experimental
::
DefaultAllocator
>
(
paddle
::
platform
::
CPUPlace
());
auto
dense_x
=
std
::
make_shared
<
phi
::
DenseTensor
>
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
FLOAT32
,
phi
::
make_ddim
({
3
,
10
}),
phi
::
DataLayout
::
NCHW
));
auto
*
dense_x_data
=
dense_x
->
mutable_data
<
float
>
(
paddle
::
platform
::
CPUPlace
());
auto
dense_y
=
std
::
make_shared
<
phi
::
DenseTensor
>
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
FLOAT32
,
phi
::
make_ddim
({
10
}),
phi
::
DataLayout
::
NCHW
));
auto
*
dense_y_data
=
dense_y
->
mutable_data
<
float
>
(
paddle
::
platform
::
CPUPlace
());
float
mul
[
3
][
10
]
=
{
0.0
};
for
(
size_t
i
=
0
;
i
<
3
;
++
i
)
{
for
(
size_t
j
=
0
;
j
<
10
;
++
j
)
{
dense_x_data
[
i
*
10
+
j
]
=
(
i
*
10
+
j
)
*
1.0
;
mul
[
i
][
j
]
=
(
i
*
10
+
j
)
*
1.0
*
j
*
2.0
;
}
}
for
(
size_t
i
=
0
;
i
<
10
;
++
i
)
{
dense_y_data
[
i
]
=
i
*
2.0
;
}
paddle
::
experimental
::
Tensor
x
(
dense_x
);
paddle
::
experimental
::
Tensor
y
(
dense_y
);
// 2. test API
auto
out
=
paddle
::
experimental
::
multiply
(
x
,
y
);
// 3. check result
ASSERT_EQ
(
out
.
shape
().
size
(),
2UL
);
ASSERT_EQ
(
out
.
shape
()[
0
],
3
);
ASSERT_EQ
(
out
.
numel
(),
30
);
ASSERT_EQ
(
out
.
is_cpu
(),
true
);
ASSERT_EQ
(
out
.
type
(),
phi
::
DataType
::
FLOAT32
);
ASSERT_EQ
(
out
.
layout
(),
phi
::
DataLayout
::
NCHW
);
ASSERT_EQ
(
out
.
initialized
(),
true
);
auto
expect_result
=
mul
;
auto
dense_out
=
std
::
dynamic_pointer_cast
<
phi
::
DenseTensor
>
(
out
.
impl
());
auto
actual_result0
=
dense_out
->
data
<
float
>
()[
0
];
auto
actual_result1
=
dense_out
->
data
<
float
>
()[
1
];
auto
actual_result2
=
dense_out
->
data
<
float
>
()[
10
];
ASSERT_NEAR
(
expect_result
[
0
][
0
],
actual_result0
,
1e-6
f
);
ASSERT_NEAR
(
expect_result
[
0
][
1
],
actual_result1
,
1e-6
f
);
ASSERT_NEAR
(
expect_result
[
1
][
0
],
actual_result2
,
1e-6
f
);
}
}
// namespace tests
}
// namespace paddle
paddle/phi/tests/api/test_embedding_api.cc
已删除
100644 → 0
浏览文件 @
1cb12ff5
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include <memory>
#include "paddle/phi/api/backward/backward_api.h"
#include "paddle/phi/api/include/api.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/kernel_registry.h"
PD_DECLARE_KERNEL
(
sparse_weight_embedding
,
CPU
,
ALL_LAYOUT
);
PD_DECLARE_KERNEL
(
sparse_weight_embedding_grad
,
CPU
,
ALL_LAYOUT
);
PD_DECLARE_KERNEL
(
sparse_weight_embedding_sparse_grad
,
CPU
,
ALL_LAYOUT
);
PD_DECLARE_KERNEL
(
empty
,
CPU
,
ALL_LAYOUT
);
PD_DECLARE_KERNEL
(
full
,
CPU
,
ALL_LAYOUT
);
namespace
paddle
{
namespace
tests
{
TEST
(
API
,
sparse_weight_embedding
)
{
auto
x
=
paddle
::
experimental
::
empty
({
4
},
DataType
::
INT32
);
auto
*
x_data
=
x
.
data
<
int32_t
>
();
x_data
[
0
]
=
0
;
x_data
[
1
]
=
4
;
x_data
[
2
]
=
3
;
x_data
[
3
]
=
1
;
auto
weight_sr
=
std
::
make_shared
<
phi
::
SelectedRows
>
(
std
::
vector
<
int64_t
>
{
0
,
1
,
2
,
3
,
4
,
5
,
6
},
16
);
*
weight_sr
->
mutable_value
()
=
*
static_cast
<
phi
::
DenseTensor
*>
(
paddle
::
experimental
::
full
({
7
,
3
},
2
,
DataType
::
FLOAT32
).
impl
().
get
());
paddle
::
experimental
::
Tensor
weight
;
weight
.
set_impl
(
weight_sr
);
auto
out
=
paddle
::
experimental
::
embedding
(
x
,
weight
);
// 3. check result
ASSERT_EQ
(
out
.
dims
().
size
(),
2
);
ASSERT_EQ
(
out
.
dims
()[
0
],
4
);
ASSERT_EQ
(
out
.
numel
(),
12
);
ASSERT_EQ
(
out
.
type
(),
phi
::
DataType
::
FLOAT32
);
ASSERT_EQ
(
out
.
layout
(),
phi
::
DataLayout
::
NCHW
);
}
TEST
(
API
,
sparse_weight_embedding_grad
)
{
auto
x
=
paddle
::
experimental
::
empty
({
4
},
DataType
::
INT32
);
auto
*
x_data
=
x
.
data
<
int32_t
>
();
x_data
[
0
]
=
0
;
x_data
[
1
]
=
4
;
x_data
[
2
]
=
3
;
x_data
[
3
]
=
1
;
auto
weight_sr
=
std
::
make_shared
<
phi
::
SelectedRows
>
(
std
::
vector
<
int64_t
>
{
0
,
1
,
2
,
3
,
4
,
5
,
6
},
16
);
*
weight_sr
->
mutable_value
()
=
*
static_cast
<
phi
::
DenseTensor
*>
(
paddle
::
experimental
::
full
({
7
,
3
},
2
,
DataType
::
FLOAT32
).
impl
().
get
());
paddle
::
experimental
::
Tensor
weight
;
weight
.
set_impl
(
weight_sr
);
auto
out_grad
=
paddle
::
experimental
::
full
({
4
,
3
},
1
,
DataType
::
FLOAT32
);
paddle
::
experimental
::
Tensor
weight_grad
;
paddle
::
experimental
::
embedding_grad
(
x
,
weight
,
out_grad
,
-
1
,
false
,
&
weight_grad
);
// 3. check result
ASSERT_EQ
(
weight_grad
.
dims
().
size
(),
2
);
ASSERT_EQ
(
weight_grad
.
dims
()[
0
],
16
);
ASSERT_EQ
(
weight_grad
.
numel
(),
48
);
ASSERT_EQ
(
weight_grad
.
type
(),
phi
::
DataType
::
FLOAT32
);
ASSERT_EQ
(
weight_grad
.
layout
(),
phi
::
DataLayout
::
NCHW
);
}
TEST
(
API
,
sparse_weight_embedding_sparse_grad
)
{
auto
x
=
paddle
::
experimental
::
empty
({
4
},
DataType
::
INT32
);
auto
*
x_data
=
x
.
data
<
int32_t
>
();
x_data
[
0
]
=
0
;
x_data
[
1
]
=
4
;
x_data
[
2
]
=
3
;
x_data
[
3
]
=
1
;
auto
weight_sr
=
std
::
make_shared
<
phi
::
SelectedRows
>
(
std
::
vector
<
int64_t
>
{
0
,
1
,
2
,
3
,
4
,
5
,
6
},
16
);
*
weight_sr
->
mutable_value
()
=
*
static_cast
<
phi
::
DenseTensor
*>
(
paddle
::
experimental
::
full
({
7
,
3
},
2
,
DataType
::
FLOAT32
).
impl
().
get
());
paddle
::
experimental
::
Tensor
weight
;
weight
.
set_impl
(
weight_sr
);
auto
out_grad
=
paddle
::
experimental
::
full
({
4
,
3
},
1
,
DataType
::
FLOAT32
);
paddle
::
experimental
::
Tensor
weight_grad
;
paddle
::
experimental
::
embedding_grad
(
x
,
weight
,
out_grad
,
-
1
,
true
,
&
weight_grad
);
// 3. check result
ASSERT_EQ
(
weight_grad
.
dims
().
size
(),
2
);
ASSERT_EQ
(
weight_grad
.
dims
()[
0
],
4
);
ASSERT_EQ
(
weight_grad
.
numel
(),
12
);
ASSERT_EQ
(
weight_grad
.
type
(),
phi
::
DataType
::
FLOAT32
);
ASSERT_EQ
(
weight_grad
.
layout
(),
phi
::
DataLayout
::
NCHW
);
}
}
// namespace tests
}
// namespace paddle
paddle/phi/tests/api/test_empty_api.cc
已删除
100644 → 0
浏览文件 @
1cb12ff5
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include <memory>
#include "paddle/phi/api/include/api.h"
#include "paddle/phi/api/lib/utils/allocator.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/kernel_registry.h"
PD_DECLARE_KERNEL
(
empty
,
CPU
,
ALL_LAYOUT
);
namespace
paddle
{
namespace
tests
{
namespace
framework
=
paddle
::
framework
;
using
DDim
=
phi
::
DDim
;
// TODO(chenweihang): Remove this test after the API is used in the dygraph
TEST
(
API
,
empty_like
)
{
// 1. create tensor
const
auto
alloc
=
std
::
make_unique
<
paddle
::
experimental
::
DefaultAllocator
>
(
paddle
::
platform
::
CPUPlace
());
auto
dense_x
=
std
::
make_shared
<
phi
::
DenseTensor
>
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
FLOAT32
,
phi
::
make_ddim
({
3
,
2
}),
phi
::
DataLayout
::
NCHW
));
paddle
::
experimental
::
Tensor
x
(
dense_x
);
// 2. test API
auto
out
=
paddle
::
experimental
::
empty_like
(
x
,
phi
::
DataType
::
FLOAT32
);
// 3. check result
ASSERT_EQ
(
out
.
dims
().
size
(),
2
);
ASSERT_EQ
(
out
.
dims
()[
0
],
3
);
ASSERT_EQ
(
out
.
numel
(),
6
);
ASSERT_EQ
(
out
.
type
(),
phi
::
DataType
::
FLOAT32
);
ASSERT_EQ
(
out
.
layout
(),
phi
::
DataLayout
::
NCHW
);
}
TEST
(
API
,
empty1
)
{
// 1. create tensor
const
auto
alloc
=
std
::
make_unique
<
paddle
::
experimental
::
DefaultAllocator
>
(
paddle
::
platform
::
CPUPlace
());
auto
dense_shape
=
std
::
make_shared
<
phi
::
DenseTensor
>
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
INT64
,
phi
::
make_ddim
({
2
}),
phi
::
DataLayout
::
NCHW
));
auto
*
shape_data
=
dense_shape
->
mutable_data
<
int64_t
>
(
paddle
::
platform
::
CPUPlace
());
shape_data
[
0
]
=
2
;
shape_data
[
1
]
=
3
;
paddle
::
experimental
::
Tensor
tensor_shape
(
dense_shape
);
// 2. test API
auto
out
=
paddle
::
experimental
::
empty
(
tensor_shape
,
phi
::
DataType
::
FLOAT32
);
// 3. check result
ASSERT_EQ
(
out
.
shape
().
size
(),
2UL
);
ASSERT_EQ
(
out
.
shape
()[
0
],
2
);
ASSERT_EQ
(
out
.
numel
(),
6
);
ASSERT_EQ
(
out
.
type
(),
phi
::
DataType
::
FLOAT32
);
ASSERT_EQ
(
out
.
layout
(),
phi
::
DataLayout
::
NCHW
);
}
TEST
(
API
,
empty2
)
{
const
auto
alloc
=
std
::
make_unique
<
paddle
::
experimental
::
DefaultAllocator
>
(
paddle
::
platform
::
CPUPlace
());
auto
dense_scalar
=
std
::
make_shared
<
phi
::
DenseTensor
>
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
INT32
,
phi
::
make_ddim
({
1
}),
phi
::
DataLayout
::
NCHW
));
dense_scalar
->
mutable_data
<
int32_t
>
(
paddle
::
platform
::
CPUPlace
())[
0
]
=
2
;
paddle
::
experimental
::
Tensor
shape_scalar1
(
dense_scalar
);
paddle
::
experimental
::
Tensor
shape_scalar2
(
dense_scalar
);
std
::
vector
<
paddle
::
experimental
::
Tensor
>
list_shape
{
shape_scalar1
,
shape_scalar2
};
auto
out
=
paddle
::
experimental
::
empty
(
list_shape
,
phi
::
DataType
::
FLOAT32
);
ASSERT_EQ
(
out
.
shape
().
size
(),
2UL
);
ASSERT_EQ
(
out
.
shape
()[
0
],
2
);
ASSERT_EQ
(
out
.
numel
(),
4
);
ASSERT_EQ
(
out
.
type
(),
phi
::
DataType
::
FLOAT32
);
ASSERT_EQ
(
out
.
layout
(),
phi
::
DataLayout
::
NCHW
);
}
TEST
(
API
,
empty3
)
{
std
::
vector
<
int64_t
>
vector_shape
{
2
,
3
};
auto
out
=
paddle
::
experimental
::
empty
(
vector_shape
,
phi
::
DataType
::
INT32
);
ASSERT_EQ
(
out
.
shape
().
size
(),
2UL
);
ASSERT_EQ
(
out
.
shape
()[
0
],
2
);
ASSERT_EQ
(
out
.
numel
(),
6
);
ASSERT_EQ
(
out
.
type
(),
phi
::
DataType
::
INT32
);
ASSERT_EQ
(
out
.
layout
(),
phi
::
DataLayout
::
NCHW
);
}
}
// namespace tests
}
// namespace paddle
paddle/phi/tests/api/test_fill_api.cc
已删除
100644 → 0
浏览文件 @
1cb12ff5
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include <memory>
#include "paddle/phi/api/include/api.h"
#include "paddle/phi/api/lib/utils/allocator.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/kernel_registry.h"
PD_DECLARE_KERNEL
(
full
,
CPU
,
ALL_LAYOUT
);
namespace
paddle
{
namespace
tests
{
namespace
framework
=
paddle
::
framework
;
using
DDim
=
phi
::
DDim
;
// TODO(chenweihang): Remove this test after the API is used in the dygraph
TEST
(
API
,
full_like
)
{
// 1. create tensor
const
auto
alloc
=
std
::
make_unique
<
paddle
::
experimental
::
DefaultAllocator
>
(
paddle
::
platform
::
CPUPlace
());
auto
dense_x
=
std
::
make_shared
<
phi
::
DenseTensor
>
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
FLOAT32
,
phi
::
make_ddim
({
3
,
2
}),
phi
::
DataLayout
::
NCHW
));
auto
*
dense_x_data
=
dense_x
->
mutable_data
<
float
>
(
paddle
::
platform
::
CPUPlace
());
dense_x_data
[
0
]
=
0
;
float
val
=
1.0
;
paddle
::
experimental
::
Tensor
x
(
dense_x
);
// 2. test API
auto
out
=
paddle
::
experimental
::
full_like
(
x
,
val
,
phi
::
DataType
::
FLOAT32
);
// 3. check result
ASSERT_EQ
(
out
.
dims
().
size
(),
2
);
ASSERT_EQ
(
out
.
dims
()[
0
],
3
);
ASSERT_EQ
(
out
.
numel
(),
6
);
ASSERT_EQ
(
out
.
is_cpu
(),
true
);
ASSERT_EQ
(
out
.
type
(),
phi
::
DataType
::
FLOAT32
);
ASSERT_EQ
(
out
.
layout
(),
phi
::
DataLayout
::
NCHW
);
ASSERT_EQ
(
out
.
initialized
(),
true
);
auto
dense_out
=
std
::
dynamic_pointer_cast
<
phi
::
DenseTensor
>
(
out
.
impl
());
auto
*
actual_result
=
dense_out
->
data
<
float
>
();
for
(
auto
i
=
0
;
i
<
6
;
i
++
)
{
ASSERT_NEAR
(
actual_result
[
i
],
val
,
1e-6
f
);
}
}
TEST
(
API
,
zeros_like
)
{
// 1. create tensor
const
auto
alloc
=
std
::
make_unique
<
paddle
::
experimental
::
DefaultAllocator
>
(
paddle
::
platform
::
CPUPlace
());
auto
dense_x
=
std
::
make_shared
<
phi
::
DenseTensor
>
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
FLOAT32
,
phi
::
make_ddim
({
3
,
2
}),
phi
::
DataLayout
::
NCHW
));
auto
*
dense_x_data
=
dense_x
->
mutable_data
<
float
>
(
paddle
::
platform
::
CPUPlace
());
dense_x_data
[
0
]
=
1
;
paddle
::
experimental
::
Tensor
x
(
dense_x
);
// 2. test API
auto
out
=
paddle
::
experimental
::
zeros_like
(
x
,
phi
::
DataType
::
INT32
);
// 3. check result
ASSERT_EQ
(
out
.
dims
().
size
(),
2
);
ASSERT_EQ
(
out
.
dims
()[
0
],
3
);
ASSERT_EQ
(
out
.
numel
(),
6
);
ASSERT_EQ
(
out
.
is_cpu
(),
true
);
ASSERT_EQ
(
out
.
type
(),
phi
::
DataType
::
INT32
);
ASSERT_EQ
(
out
.
layout
(),
phi
::
DataLayout
::
NCHW
);
ASSERT_EQ
(
out
.
initialized
(),
true
);
auto
dense_out
=
std
::
dynamic_pointer_cast
<
phi
::
DenseTensor
>
(
out
.
impl
());
auto
*
actual_result
=
dense_out
->
data
<
int32_t
>
();
for
(
auto
i
=
0
;
i
<
6
;
i
++
)
{
ASSERT_EQ
(
actual_result
[
i
],
0
);
}
}
TEST
(
API
,
ones_like
)
{
// 1. create tensor
const
auto
alloc
=
std
::
make_unique
<
paddle
::
experimental
::
DefaultAllocator
>
(
paddle
::
platform
::
CPUPlace
());
auto
dense_x
=
std
::
make_shared
<
phi
::
DenseTensor
>
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
INT32
,
phi
::
make_ddim
({
3
,
2
}),
phi
::
DataLayout
::
NCHW
));
auto
*
dense_x_data
=
dense_x
->
mutable_data
<
int32_t
>
(
paddle
::
platform
::
CPUPlace
());
dense_x_data
[
0
]
=
0
;
paddle
::
experimental
::
Tensor
x
(
dense_x
);
// 2. test API
auto
out
=
paddle
::
experimental
::
ones_like
(
x
,
phi
::
DataType
::
INT32
);
// 3. check result
ASSERT_EQ
(
out
.
dims
().
size
(),
2
);
ASSERT_EQ
(
out
.
dims
()[
0
],
3
);
ASSERT_EQ
(
out
.
numel
(),
6
);
ASSERT_EQ
(
out
.
is_cpu
(),
true
);
ASSERT_EQ
(
out
.
type
(),
phi
::
DataType
::
INT32
);
ASSERT_EQ
(
out
.
layout
(),
phi
::
DataLayout
::
NCHW
);
ASSERT_EQ
(
out
.
initialized
(),
true
);
auto
dense_out
=
std
::
dynamic_pointer_cast
<
phi
::
DenseTensor
>
(
out
.
impl
());
auto
*
actual_result
=
dense_out
->
data
<
int32_t
>
();
for
(
auto
i
=
0
;
i
<
6
;
i
++
)
{
ASSERT_EQ
(
actual_result
[
i
],
1
);
}
}
TEST
(
API
,
full1
)
{
// 1. create tensor
const
auto
alloc
=
std
::
make_unique
<
paddle
::
experimental
::
DefaultAllocator
>
(
paddle
::
platform
::
CPUPlace
());
auto
dense_shape
=
std
::
make_shared
<
phi
::
DenseTensor
>
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
INT64
,
phi
::
make_ddim
({
2
}),
phi
::
DataLayout
::
NCHW
));
auto
*
shape_data
=
dense_shape
->
mutable_data
<
int64_t
>
(
paddle
::
platform
::
CPUPlace
());
shape_data
[
0
]
=
2
;
shape_data
[
1
]
=
3
;
auto
dense_scalar
=
std
::
make_shared
<
phi
::
DenseTensor
>
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
FLOAT32
,
phi
::
make_ddim
({
1
}),
phi
::
DataLayout
::
NCHW
));
dense_scalar
->
mutable_data
<
float
>
(
paddle
::
platform
::
CPUPlace
())[
0
]
=
1.0
;
paddle
::
experimental
::
Tensor
value
(
dense_scalar
);
paddle
::
experimental
::
Tensor
tensor_shape
(
dense_shape
);
float
val
=
1.0
;
// 2. test API
auto
out
=
paddle
::
experimental
::
full
(
tensor_shape
,
value
,
phi
::
DataType
::
FLOAT32
);
// 3. check result
ASSERT_EQ
(
out
.
shape
().
size
(),
2UL
);
ASSERT_EQ
(
out
.
shape
()[
0
],
2
);
ASSERT_EQ
(
out
.
numel
(),
6
);
ASSERT_EQ
(
out
.
is_cpu
(),
true
);
ASSERT_EQ
(
out
.
type
(),
phi
::
DataType
::
FLOAT32
);
ASSERT_EQ
(
out
.
layout
(),
phi
::
DataLayout
::
NCHW
);
ASSERT_EQ
(
out
.
initialized
(),
true
);
auto
dense_out
=
std
::
dynamic_pointer_cast
<
phi
::
DenseTensor
>
(
out
.
impl
());
auto
*
actual_result
=
dense_out
->
data
<
float
>
();
for
(
auto
i
=
0
;
i
<
6
;
i
++
)
{
ASSERT_NEAR
(
actual_result
[
i
],
val
,
1e-6
f
);
}
}
TEST
(
API
,
full2
)
{
const
auto
alloc
=
std
::
make_unique
<
paddle
::
experimental
::
DefaultAllocator
>
(
paddle
::
platform
::
CPUPlace
());
auto
dense_scalar
=
std
::
make_shared
<
phi
::
DenseTensor
>
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
INT32
,
phi
::
make_ddim
({
1
}),
phi
::
DataLayout
::
NCHW
));
dense_scalar
->
mutable_data
<
int
>
(
paddle
::
platform
::
CPUPlace
())[
0
]
=
2
;
paddle
::
experimental
::
Tensor
shape_scalar1
(
dense_scalar
);
paddle
::
experimental
::
Tensor
shape_scalar2
(
dense_scalar
);
std
::
vector
<
paddle
::
experimental
::
Tensor
>
list_shape
{
shape_scalar1
,
shape_scalar2
};
float
val
=
1.0
;
auto
out
=
paddle
::
experimental
::
full
(
list_shape
,
val
,
phi
::
DataType
::
FLOAT32
);
ASSERT_EQ
(
out
.
shape
().
size
(),
2UL
);
ASSERT_EQ
(
out
.
shape
()[
0
],
2
);
ASSERT_EQ
(
out
.
numel
(),
4
);
ASSERT_EQ
(
out
.
is_cpu
(),
true
);
ASSERT_EQ
(
out
.
type
(),
phi
::
DataType
::
FLOAT32
);
ASSERT_EQ
(
out
.
layout
(),
phi
::
DataLayout
::
NCHW
);
ASSERT_EQ
(
out
.
initialized
(),
true
);
auto
dense_out
=
std
::
dynamic_pointer_cast
<
phi
::
DenseTensor
>
(
out
.
impl
());
auto
*
actual_result
=
dense_out
->
data
<
float
>
();
for
(
auto
i
=
0
;
i
<
4
;
i
++
)
{
ASSERT_NEAR
(
actual_result
[
i
],
val
,
1e-6
f
);
}
}
TEST
(
API
,
full3
)
{
std
::
vector
<
int64_t
>
vector_shape
{
2
,
3
};
float
val
=
1.0
;
auto
out
=
paddle
::
experimental
::
full
(
vector_shape
,
val
,
phi
::
DataType
::
INT32
);
ASSERT_EQ
(
out
.
shape
().
size
(),
2UL
);
ASSERT_EQ
(
out
.
shape
()[
0
],
2
);
ASSERT_EQ
(
out
.
numel
(),
6
);
ASSERT_EQ
(
out
.
is_cpu
(),
true
);
ASSERT_EQ
(
out
.
type
(),
phi
::
DataType
::
INT32
);
ASSERT_EQ
(
out
.
layout
(),
phi
::
DataLayout
::
NCHW
);
ASSERT_EQ
(
out
.
initialized
(),
true
);
auto
dense_out
=
std
::
dynamic_pointer_cast
<
phi
::
DenseTensor
>
(
out
.
impl
());
auto
*
actual_result
=
dense_out
->
data
<
int
>
();
for
(
auto
i
=
0
;
i
<
6
;
i
++
)
{
ASSERT_EQ
(
actual_result
[
i
],
1
);
}
}
}
// namespace tests
}
// namespace paddle
paddle/phi/tests/api/test_matmul_api.cc
已删除
100644 → 0
浏览文件 @
1cb12ff5
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include <memory>
#include "paddle/phi/api/backward/backward_api.h"
#include "paddle/phi/api/include/api.h"
#include "paddle/phi/api/lib/utils/allocator.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_utils.h"
// See Note [ Why still include the fluid headers? ]
#include "paddle/fluid/platform/device_context.h"
PD_DECLARE_KERNEL
(
full
,
CPU
,
ALL_LAYOUT
);
PD_DECLARE_KERNEL
(
matmul
,
CPU
,
ALL_LAYOUT
);
PD_DECLARE_KERNEL
(
matmul_double_grad
,
CPU
,
ALL_LAYOUT
);
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
PD_DECLARE_KERNEL
(
matmul
,
GPU
,
ALL_LAYOUT
);
#endif
namespace
paddle
{
namespace
tests
{
namespace
framework
=
paddle
::
framework
;
using
DDim
=
phi
::
DDim
;
TEST
(
API
,
matmul_cpu
)
{
// 1. create tensor
const
auto
alloc
=
std
::
make_unique
<
paddle
::
experimental
::
DefaultAllocator
>
(
paddle
::
platform
::
CPUPlace
());
auto
dense_x
=
std
::
make_shared
<
phi
::
DenseTensor
>
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
FLOAT32
,
phi
::
make_ddim
({
3
,
3
}),
phi
::
DataLayout
::
NCHW
));
auto
*
dense_x_data
=
dense_x
->
mutable_data
<
float
>
(
paddle
::
platform
::
CPUPlace
());
auto
dense_y
=
std
::
make_shared
<
phi
::
DenseTensor
>
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
FLOAT32
,
phi
::
make_ddim
({
3
,
3
}),
phi
::
DataLayout
::
NCHW
));
auto
*
dense_y_data
=
dense_y
->
mutable_data
<
float
>
(
paddle
::
platform
::
CPUPlace
());
for
(
size_t
i
=
0
;
i
<
9
;
++
i
)
{
dense_x_data
[
i
]
=
1.0
;
dense_y_data
[
i
]
=
2.0
;
}
std
::
vector
<
float
>
sum
(
9
,
6.0
);
paddle
::
experimental
::
Tensor
x
(
dense_x
);
paddle
::
experimental
::
Tensor
y
(
dense_y
);
// 2. test API
auto
out
=
paddle
::
experimental
::
matmul
(
x
,
y
,
false
,
false
);
// 3. check result
ASSERT_EQ
(
out
.
dims
().
size
(),
2
);
ASSERT_EQ
(
out
.
dims
()[
0
],
3
);
ASSERT_EQ
(
out
.
dims
()[
1
],
3
);
ASSERT_EQ
(
out
.
numel
(),
9
);
ASSERT_EQ
(
out
.
type
(),
phi
::
DataType
::
FLOAT32
);
ASSERT_EQ
(
out
.
layout
(),
phi
::
DataLayout
::
NCHW
);
ASSERT_EQ
(
out
.
initialized
(),
true
);
auto
dense_out
=
std
::
dynamic_pointer_cast
<
phi
::
DenseTensor
>
(
out
.
impl
());
for
(
size_t
i
=
0
;
i
<
9
;
i
++
)
{
ASSERT_NEAR
(
sum
[
i
],
dense_out
->
data
<
float
>
()[
i
],
1e-6
f
);
}
}
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
TEST
(
API
,
matmul_cuda
)
{
// Prepare CPU Dense Tensor
const
auto
alloc_cpu
=
std
::
make_unique
<
paddle
::
experimental
::
DefaultAllocator
>
(
paddle
::
platform
::
CPUPlace
());
auto
ref_x
=
std
::
make_shared
<
phi
::
DenseTensor
>
(
alloc_cpu
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
FLOAT32
,
phi
::
make_ddim
({
3
,
3
}),
phi
::
DataLayout
::
NCHW
));
auto
*
ref_x_data
=
ref_x
->
mutable_data
<
float
>
(
paddle
::
platform
::
CPUPlace
());
auto
ref_y
=
std
::
make_shared
<
phi
::
DenseTensor
>
(
alloc_cpu
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
FLOAT32
,
phi
::
make_ddim
({
3
,
3
}),
phi
::
DataLayout
::
NCHW
));
auto
*
ref_y_data
=
ref_y
->
mutable_data
<
float
>
(
paddle
::
platform
::
CPUPlace
());
for
(
size_t
i
=
0
;
i
<
9
;
++
i
)
{
ref_x_data
[
i
]
=
1.0
;
ref_y_data
[
i
]
=
2.0
;
}
std
::
vector
<
float
>
sum
(
9
,
6.0
);
// 1. create tensor
const
auto
alloc_cuda
=
std
::
make_unique
<
paddle
::
experimental
::
DefaultAllocator
>
(
paddle
::
platform
::
CUDAPlace
());
auto
dense_x
=
std
::
make_shared
<
phi
::
DenseTensor
>
(
alloc_cuda
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
FLOAT32
,
phi
::
make_ddim
({
3
,
3
}),
phi
::
DataLayout
::
NCHW
));
auto
dense_y
=
std
::
make_shared
<
phi
::
DenseTensor
>
(
alloc_cuda
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
FLOAT32
,
phi
::
make_ddim
({
3
,
3
}),
phi
::
DataLayout
::
NCHW
));
auto
&
pool
=
paddle
::
platform
::
DeviceContextPool
::
Instance
();
auto
place
=
paddle
::
platform
::
CUDAPlace
();
auto
*
dev_ctx
=
static_cast
<
const
phi
::
GPUContext
*>
(
pool
.
GetByPlace
(
place
));
phi
::
Copy
(
*
dev_ctx
,
*
ref_x
.
get
(),
phi
::
GPUPlace
(),
false
,
dense_x
.
get
());
phi
::
Copy
(
*
dev_ctx
,
*
ref_y
.
get
(),
phi
::
GPUPlace
(),
false
,
dense_y
.
get
());
paddle
::
experimental
::
Tensor
x
(
dense_x
);
paddle
::
experimental
::
Tensor
y
(
dense_y
);
// 2. test API
auto
out
=
paddle
::
experimental
::
matmul
(
x
,
y
,
false
,
false
);
// 3. check result
ASSERT_EQ
(
out
.
dims
().
size
(),
2
);
ASSERT_EQ
(
out
.
dims
()[
0
],
3
);
ASSERT_EQ
(
out
.
dims
()[
1
],
3
);
ASSERT_EQ
(
out
.
numel
(),
9
);
ASSERT_EQ
(
out
.
type
(),
phi
::
DataType
::
FLOAT32
);
ASSERT_EQ
(
out
.
layout
(),
phi
::
DataLayout
::
NCHW
);
ASSERT_EQ
(
out
.
initialized
(),
true
);
auto
dense_out
=
std
::
dynamic_pointer_cast
<
phi
::
DenseTensor
>
(
out
.
impl
());
auto
ref_out
=
std
::
make_shared
<
phi
::
DenseTensor
>
(
alloc_cpu
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
FLOAT32
,
out
.
dims
(),
phi
::
DataLayout
::
NCHW
));
phi
::
Copy
(
*
dev_ctx
,
*
dense_out
.
get
(),
phi
::
CPUPlace
(),
false
,
ref_out
.
get
());
for
(
size_t
i
=
0
;
i
<
9
;
i
++
)
{
ASSERT_NEAR
(
sum
[
i
],
ref_out
->
data
<
float
>
()[
i
],
1e-6
f
);
}
}
#endif
TEST
(
API
,
matmul_double_grad
)
{
// 1. create tensor
auto
x
=
paddle
::
experimental
::
full
({
3
,
3
},
1.0
);
auto
y
=
paddle
::
experimental
::
full
({
3
,
3
},
2.0
);
auto
out_grad
=
paddle
::
experimental
::
full
({
3
,
3
},
2.0
);
auto
dx_grad
=
paddle
::
experimental
::
full
({
3
,
3
},
2.0
);
// 2. test API
std
::
vector
<
std
::
vector
<
paddle
::
experimental
::
Tensor
>>
out
(
3
,
std
::
vector
<
paddle
::
experimental
::
Tensor
>
(
1
));
paddle
::
experimental
::
matmul_double_grad
(
x
,
y
,
out_grad
,
dx_grad
,
{},
false
,
false
,
&
out
[
0
][
0
],
&
out
[
1
][
0
],
&
out
[
2
][
0
]);
// 3. check result
ASSERT_EQ
(
out
.
size
(),
3UL
);
ASSERT_EQ
(
out
[
0
].
size
(),
1UL
);
ASSERT_EQ
(
out
[
1
].
size
(),
1UL
);
ASSERT_EQ
(
out
[
2
].
size
(),
1UL
);
ASSERT_EQ
(
out
[
0
][
0
].
dims
()[
1
],
3
);
ASSERT_EQ
(
out
[
0
][
0
].
numel
(),
9
);
ASSERT_EQ
(
out
[
1
][
0
].
numel
(),
9
);
ASSERT_EQ
(
out
[
2
][
0
].
numel
(),
9
);
ASSERT_EQ
(
out
[
0
][
0
].
type
(),
phi
::
DataType
::
FLOAT32
);
ASSERT_EQ
(
out
[
0
][
0
].
layout
(),
phi
::
DataLayout
::
NCHW
);
ASSERT_EQ
(
out
[
1
][
0
].
initialized
(),
true
);
ASSERT_EQ
(
out
[
2
][
0
].
initialized
(),
true
);
}
}
// namespace tests
}
// namespace paddle
paddle/phi/tests/api/test_mean_api.cc
已删除
100644 → 0
浏览文件 @
1cb12ff5
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include <memory>
#include "paddle/phi/api/include/api.h"
#include "paddle/phi/api/lib/utils/allocator.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/kernel_registry.h"
PD_DECLARE_KERNEL
(
mean
,
CPU
,
ALL_LAYOUT
);
namespace
paddle
{
namespace
tests
{
namespace
framework
=
paddle
::
framework
;
using
DDim
=
phi
::
DDim
;
// TODO(chenweihang): Remove this test after the API is used in the dygraph
TEST
(
API
,
mean
)
{
// 1. create tensor
const
auto
alloc
=
std
::
make_unique
<
paddle
::
experimental
::
DefaultAllocator
>
(
paddle
::
platform
::
CPUPlace
());
auto
dense_x
=
std
::
make_shared
<
phi
::
DenseTensor
>
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
FLOAT32
,
phi
::
make_ddim
({
3
,
4
}),
phi
::
DataLayout
::
NCHW
));
auto
*
dense_x_data
=
dense_x
->
mutable_data
<
float
>
(
paddle
::
platform
::
CPUPlace
());
float
sum
=
0.0
;
for
(
size_t
i
=
0
;
i
<
12
;
++
i
)
{
dense_x_data
[
i
]
=
i
*
1.0
;
sum
+=
i
*
1.0
;
}
paddle
::
experimental
::
Tensor
x
(
dense_x
);
std
::
vector
<
int64_t
>
axis
=
{
0
,
1
};
// 2. test API
auto
out
=
paddle
::
experimental
::
mean
(
x
,
axis
,
false
);
// 3. check result
ASSERT_EQ
(
out
.
dims
().
size
(),
1
);
ASSERT_EQ
(
out
.
dims
()[
0
],
1
);
ASSERT_EQ
(
out
.
numel
(),
1
);
ASSERT_EQ
(
out
.
is_cpu
(),
true
);
ASSERT_EQ
(
out
.
type
(),
phi
::
DataType
::
FLOAT32
);
ASSERT_EQ
(
out
.
layout
(),
phi
::
DataLayout
::
NCHW
);
ASSERT_EQ
(
out
.
initialized
(),
true
);
auto
expect_result
=
sum
/
12
;
auto
dense_out
=
std
::
dynamic_pointer_cast
<
phi
::
DenseTensor
>
(
out
.
impl
());
auto
actual_result
=
dense_out
->
data
<
float
>
()[
0
];
ASSERT_NEAR
(
expect_result
,
actual_result
,
1e-6
f
);
}
}
// namespace tests
}
// namespace paddle
paddle/phi/tests/api/test_reshape_api.cc
已删除
100644 → 0
浏览文件 @
1cb12ff5
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include <memory>
#include "paddle/phi/api/include/api.h"
#include "paddle/phi/api/lib/utils/allocator.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/kernel_registry.h"
PD_DECLARE_KERNEL
(
full
,
CPU
,
ALL_LAYOUT
);
PD_DECLARE_KERNEL
(
reshape
,
CPU
,
ALL_LAYOUT
);
namespace
paddle
{
namespace
tests
{
namespace
framework
=
paddle
::
framework
;
using
DDim
=
phi
::
DDim
;
// TODO(chenweihang): Remove this test after the API is used in the dygraph
TEST
(
API
,
reshape
)
{
// 1. create tensor
const
auto
alloc
=
std
::
make_unique
<
paddle
::
experimental
::
DefaultAllocator
>
(
paddle
::
platform
::
CPUPlace
());
auto
dense_x
=
std
::
make_shared
<
phi
::
DenseTensor
>
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
FLOAT32
,
phi
::
make_ddim
({
3
,
2
,
2
,
3
}),
phi
::
DataLayout
::
NCHW
));
auto
*
dense_x_data
=
dense_x
->
mutable_data
<
float
>
(
paddle
::
platform
::
CPUPlace
());
for
(
int
i
=
0
;
i
<
dense_x
->
numel
();
i
++
)
{
dense_x_data
[
i
]
=
i
;
}
paddle
::
experimental
::
Tensor
x
(
dense_x
);
std
::
vector
<
int64_t
>
shape
{
12
,
3
};
// 2. test API
auto
out
=
paddle
::
experimental
::
reshape
(
x
,
shape
);
// 3. check result
std
::
vector
<
int64_t
>
expect_shape
=
{
12
,
3
};
ASSERT_EQ
(
out
.
shape
()[
0
],
expect_shape
[
0
]);
ASSERT_EQ
(
out
.
shape
()[
1
],
expect_shape
[
1
]);
ASSERT_EQ
(
out
.
numel
(),
36
);
ASSERT_EQ
(
out
.
is_cpu
(),
true
);
ASSERT_EQ
(
out
.
type
(),
phi
::
DataType
::
FLOAT32
);
ASSERT_EQ
(
out
.
layout
(),
phi
::
DataLayout
::
NCHW
);
ASSERT_EQ
(
out
.
initialized
(),
true
);
bool
value_equal
=
true
;
auto
dense_out
=
std
::
dynamic_pointer_cast
<
phi
::
DenseTensor
>
(
out
.
impl
());
auto
*
dense_out_data
=
dense_out
->
data
<
float
>
();
for
(
int
i
=
0
;
i
<
dense_x
->
numel
();
i
++
)
{
if
(
std
::
abs
(
dense_x_data
[
i
]
-
dense_out_data
[
i
])
>
1e-6
f
)
value_equal
=
false
;
}
ASSERT_EQ
(
value_equal
,
true
);
}
TEST
(
API
,
reshape_
)
{
// 1. create tensor
auto
x
=
paddle
::
experimental
::
full
(
{
3
,
2
,
2
,
3
},
1.0
,
experimental
::
DataType
::
FLOAT32
);
// 2. test API
paddle
::
experimental
::
Tensor
out
=
paddle
::
experimental
::
reshape_
(
x
,
{
12
,
3
});
// 3. check result
std
::
vector
<
int64_t
>
expect_shape
=
{
12
,
3
};
ASSERT_EQ
(
out
.
shape
()[
0
],
expect_shape
[
0
]);
ASSERT_EQ
(
out
.
shape
()[
1
],
expect_shape
[
1
]);
ASSERT_EQ
(
out
.
numel
(),
36
);
ASSERT_EQ
(
out
.
is_cpu
(),
true
);
ASSERT_EQ
(
out
.
type
(),
phi
::
DataType
::
FLOAT32
);
ASSERT_EQ
(
out
.
layout
(),
phi
::
DataLayout
::
NCHW
);
ASSERT_EQ
(
out
.
initialized
(),
true
);
ASSERT_EQ
(
out
.
data
<
float
>
(),
x
.
data
<
float
>
());
}
TEST
(
Tensor
,
old_reshape
)
{
paddle
::
experimental
::
Tensor
x
(
paddle
::
PlaceType
::
kCPU
);
x
.
reshape
({
3
,
4
});
x
.
mutable_data
<
float
>
(
paddle
::
PlaceType
::
kCPU
);
ASSERT_EQ
(
x
.
shape
()[
0
],
3
);
ASSERT_EQ
(
x
.
shape
()[
1
],
4
);
ASSERT_EQ
(
x
.
numel
(),
12
);
ASSERT_EQ
(
x
.
is_cpu
(),
true
);
ASSERT_EQ
(
x
.
type
(),
phi
::
DataType
::
FLOAT32
);
ASSERT_EQ
(
x
.
layout
(),
phi
::
DataLayout
::
NCHW
);
ASSERT_EQ
(
x
.
initialized
(),
true
);
}
}
// namespace tests
}
// namespace paddle
paddle/phi/tests/api/test_scale_api.cc
已删除
100644 → 0
浏览文件 @
1cb12ff5
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include <memory>
#include "paddle/phi/api/include/api.h"
#include "paddle/phi/api/lib/utils/allocator.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/selected_rows.h"
PD_DECLARE_KERNEL
(
full
,
CPU
,
ALL_LAYOUT
);
PD_DECLARE_KERNEL
(
scale
,
CPU
,
ALL_LAYOUT
);
PD_DECLARE_KERNEL
(
scale_sr
,
CPU
,
ALL_LAYOUT
);
namespace
paddle
{
namespace
tests
{
namespace
framework
=
paddle
::
framework
;
using
DDim
=
phi
::
DDim
;
void
CheckScaleResult
(
const
experimental
::
Tensor
*
out
)
{
ASSERT_EQ
(
out
->
dims
().
size
(),
2
);
ASSERT_EQ
(
out
->
dims
()[
0
],
3
);
ASSERT_EQ
(
out
->
dims
()[
1
],
4
);
ASSERT_EQ
(
out
->
numel
(),
12
);
ASSERT_EQ
(
out
->
is_cpu
(),
true
);
ASSERT_EQ
(
out
->
type
(),
phi
::
DataType
::
FLOAT32
);
ASSERT_EQ
(
out
->
layout
(),
phi
::
DataLayout
::
NCHW
);
ASSERT_EQ
(
out
->
initialized
(),
true
);
for
(
int64_t
i
=
0
;
i
<
out
->
numel
();
++
i
)
{
ASSERT_NEAR
(
3.0
,
out
->
data
<
float
>
()[
i
],
1e-6
f
);
}
}
TEST
(
API
,
scale
)
{
// 1. check `scale` is float value
auto
x
=
experimental
::
full
({
3
,
4
},
1.0
,
phi
::
DataType
::
FLOAT32
);
auto
out1
=
experimental
::
scale
(
x
,
2.0
,
1.0
,
true
);
CheckScaleResult
(
&
out1
);
// 2. check `scale` is Tensor with shape [1]
auto
scale
=
experimental
::
full
({
1
},
2.0
,
phi
::
DataType
::
FLOAT32
);
auto
out2
=
experimental
::
scale
(
x
,
scale
,
1.0
,
true
);
CheckScaleResult
(
&
out2
);
}
TEST
(
API
,
scale_sr
)
{
// 1. check `scale` is float value
std
::
vector
<
int64_t
>
rows
{
0
,
4
,
7
};
int64_t
height
=
10
;
auto
selected_rows
=
std
::
make_shared
<
phi
::
SelectedRows
>
(
rows
,
height
);
auto
dense_tensor
=
std
::
dynamic_pointer_cast
<
phi
::
DenseTensor
>
(
experimental
::
full
({
3
,
4
},
1.0
,
phi
::
DataType
::
FLOAT32
).
impl
());
*
(
selected_rows
->
mutable_value
())
=
*
dense_tensor
;
experimental
::
Tensor
x
(
selected_rows
);
auto
out
=
experimental
::
scale
(
x
,
2.0
,
1.0
,
true
);
ASSERT_EQ
(
out
.
dims
().
size
(),
2
);
ASSERT_EQ
(
out
.
dims
()[
0
],
3
);
ASSERT_EQ
(
out
.
dims
()[
1
],
4
);
ASSERT_EQ
(
out
.
numel
(),
12
);
ASSERT_EQ
(
out
.
is_cpu
(),
true
);
ASSERT_EQ
(
out
.
type
(),
phi
::
DataType
::
FLOAT32
);
ASSERT_EQ
(
out
.
layout
(),
phi
::
DataLayout
::
NCHW
);
ASSERT_EQ
(
out
.
initialized
(),
true
);
for
(
int64_t
i
=
0
;
i
<
out
.
numel
();
++
i
)
{
ASSERT_NEAR
(
3.0
,
out
.
data
<
float
>
()[
i
],
1e-6
f
);
}
}
}
// namespace tests
}
// namespace paddle
paddle/phi/tests/api/test_sparse_conv_api.cc
已删除
100644 → 0
浏览文件 @
1cb12ff5
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See
the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include <memory>
#include "paddle/phi/api/include/api.h"
#include "paddle/phi/api/include/sparse_api.h"
#include "paddle/phi/api/lib/utils/allocator.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/sparse_coo_tensor.h"
PD_DECLARE_KERNEL
(
conv3d_coo
,
CPU
,
ALL_LAYOUT
);
template
<
typename
T
>
void
TestConv3dBase
(
const
std
::
vector
<
int
>&
indices
,
const
std
::
vector
<
T
>&
features
,
const
phi
::
DDim
&
x_dims
,
const
std
::
vector
<
T
>&
kernel
,
const
phi
::
DDim
&
kernel_dims
,
const
std
::
vector
<
int
>&
correct_out_indices
,
const
std
::
vector
<
T
>&
correct_out_features
,
const
phi
::
DDim
&
correct_out_dims
,
const
int
non_zero_num
,
const
std
::
vector
<
int
>&
paddings
,
const
std
::
vector
<
int
>&
strides
,
const
std
::
vector
<
int
>&
dilations
,
const
float
diff
=
1e-3
)
{
const
auto
alloc
=
std
::
make_unique
<
paddle
::
experimental
::
DefaultAllocator
>
(
paddle
::
platform
::
CPUPlace
());
const
int
in_channels
=
kernel_dims
[
3
];
const
int
out_channels
=
kernel_dims
[
4
];
phi
::
DenseTensor
indices_tensor
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
INT32
,
{
4
,
non_zero_num
},
phi
::
DataLayout
::
NCHW
));
memcpy
(
indices_tensor
.
data
<
int
>
(),
indices
.
data
(),
indices
.
size
()
*
sizeof
(
int
));
phi
::
DenseTensor
features_tensor
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
paddle
::
experimental
::
CppTypeToDataType
<
T
>::
Type
(),
{
non_zero_num
,
in_channels
},
phi
::
DataLayout
::
NHWC
));
memcpy
(
features_tensor
.
data
<
T
>
(),
features
.
data
(),
features
.
size
()
*
sizeof
(
T
));
auto
x_tensor
=
std
::
make_shared
<
phi
::
SparseCooTensor
>
(
indices_tensor
,
features_tensor
,
x_dims
);
paddle
::
experimental
::
Tensor
x
(
x_tensor
);
auto
kernel_tensor
=
std
::
make_shared
<
phi
::
DenseTensor
>
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
paddle
::
experimental
::
CppTypeToDataType
<
T
>::
Type
(),
kernel_dims
,
phi
::
DataLayout
::
NHWC
));
paddle
::
experimental
::
Tensor
weight
(
kernel_tensor
);
memcpy
(
kernel_tensor
->
mutable_data
<
T
>
(
paddle
::
platform
::
CPUPlace
()),
kernel
.
data
(),
kernel
.
size
()
*
sizeof
(
T
));
if
(
!
std
::
is_same
<
T
,
phi
::
dtype
::
float16
>::
value
)
{
auto
tensor_out
=
paddle
::
experimental
::
sparse
::
conv3d
(
x
,
weight
,
paddings
,
dilations
,
strides
,
1
,
false
,
"Conv3d"
);
auto
out
=
std
::
dynamic_pointer_cast
<
phi
::
SparseCooTensor
>
(
tensor_out
.
impl
());
ASSERT_EQ
(
correct_out_dims
.
size
(),
out
->
dims
().
size
());
for
(
int
i
=
0
;
i
<
correct_out_dims
.
size
();
i
++
)
{
ASSERT_EQ
(
correct_out_dims
[
i
],
out
->
dims
()[
i
]);
}
ASSERT_EQ
((
int64_t
)
correct_out_features
.
size
()
/
out_channels
,
out
->
nnz
());
int
cmp_indices
=
memcmp
(
correct_out_indices
.
data
(),
out
->
non_zero_indices
().
data
<
int
>
(),
correct_out_indices
.
size
()
*
sizeof
(
int
));
ASSERT_EQ
(
cmp_indices
,
0
);
for
(
uint64_t
i
=
0
;
i
<
correct_out_features
.
size
();
i
++
)
{
float
tmp
=
std
::
fabs
(
static_cast
<
float
>
(
correct_out_features
[
i
]
-
out
->
non_zero_elements
().
data
<
T
>
()[
i
]));
ASSERT_LT
(
tmp
,
diff
);
}
}
}
void
TestConv3d
(
const
std
::
vector
<
int
>&
indices
,
const
std
::
vector
<
float
>&
features
,
const
phi
::
DDim
&
x_dims
,
const
std
::
vector
<
float
>&
kernel
,
const
phi
::
DDim
&
kernel_dims
,
const
std
::
vector
<
int
>&
correct_out_indices
,
const
std
::
vector
<
float
>&
correct_out_features
,
const
phi
::
DDim
&
correct_out_dims
,
const
int
non_zero_num
,
const
std
::
vector
<
int
>&
paddings
,
const
std
::
vector
<
int
>&
strides
,
const
std
::
vector
<
int
>&
dilations
)
{
// test float
TestConv3dBase
<
float
>
(
indices
,
features
,
x_dims
,
kernel
,
kernel_dims
,
correct_out_indices
,
correct_out_features
,
correct_out_dims
,
non_zero_num
,
paddings
,
strides
,
dilations
);
}
TEST
(
API
,
sparse_conv2d
)
{
const
auto
alloc
=
std
::
make_shared
<
paddle
::
experimental
::
DefaultAllocator
>
(
paddle
::
platform
::
CPUPlace
());
const
int
in_channels
=
1
;
const
int
out_channels
=
1
;
phi
::
DDim
x_dims
=
{
1
,
1
,
5
,
5
,
in_channels
};
phi
::
DDim
kernel_dims
=
{
1
,
3
,
3
,
in_channels
,
out_channels
};
phi
::
DDim
out_dims
=
{
1
,
1
,
3
,
3
,
out_channels
};
std
::
vector
<
int
>
paddings
=
{
0
,
0
,
0
};
std
::
vector
<
int
>
strides
=
{
1
,
1
,
1
};
std
::
vector
<
int
>
dilations
=
{
1
,
1
,
1
};
const
int
non_zero_num
=
3
;
std
::
vector
<
int
>
indices_flatten
=
{
0
,
0
,
0
,
0
,
0
,
0
,
0
,
4
,
0
,
3
,
2
,
4
};
std
::
vector
<
float
>
features
=
{
-
0.79394531
,
-
0.3125
,
-
0.55029297
};
// 3*3*3=27
std
::
vector
<
float
>
kernel
=
{
0.65820312
,
0.75048828
,
0.21411133
,
0.17370605
,
0.85546875
,
0.53076172
,
0.28833008
,
0.71044922
,
0.00659943
};
std
::
vector
<
int
>
out_indices_flatten
=
{
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
2
,
2
,
2
,
1
,
2
,
0
,
1
,
2
};
std
::
vector
<
float
>
out_features
=
{
-
0.17004
,
-
0.71338
,
-
0.00206
,
-
0.22205
,
-
0.09009
};
TestConv3d
(
indices_flatten
,
features
,
x_dims
,
kernel
,
kernel_dims
,
out_indices_flatten
,
out_features
,
out_dims
,
non_zero_num
,
paddings
,
strides
,
dilations
);
}
paddle/phi/tests/api/test_sparse_utils_api.cc
已删除
100644 → 0
浏览文件 @
1cb12ff5
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See
the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include <memory>
#include "paddle/phi/api/include/api.h"
#include "paddle/phi/api/include/sparse_api.h"
#include "paddle/phi/api/lib/utils/allocator.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/sparse_coo_tensor.h"
PD_DECLARE_KERNEL
(
dense_to_coo
,
CPU
,
ALL_LAYOUT
);
TEST
(
API
,
to_sparse_coo
)
{
const
auto
alloc
=
std
::
make_shared
<
paddle
::
experimental
::
DefaultAllocator
>
(
paddle
::
platform
::
CPUPlace
());
auto
dense_x
=
std
::
make_shared
<
phi
::
DenseTensor
>
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
FLOAT32
,
phi
::
make_ddim
({
3
,
3
}),
phi
::
DataLayout
::
NCHW
));
phi
::
CPUPlace
cpu
;
const
int64_t
sparse_dim
=
2
;
auto
*
dense_x_data
=
dense_x
->
mutable_data
<
float
>
(
cpu
);
float
dense_data
[
3
][
3
]
=
{{
0.0
,
1.0
,
0.0
},
{
2.0
,
0.0
,
3.0
},
{
3.2
,
0.0
,
0.0
}};
std
::
vector
<
float
>
non_zero_data
=
{
1.0
,
2.0
,
3.0
,
3.2
};
std
::
vector
<
int64_t
>
indices_data
=
{
0
,
1
,
1
,
2
,
1
,
0
,
2
,
0
};
std
::
vector
<
int64_t
>
cols_data
=
{
1
,
0
,
2
,
0
};
std
::
vector
<
int64_t
>
crows_data
=
{
0
,
1
,
3
,
4
};
const
int64_t
non_zero_num
=
4
;
std
::
copy
(
&
dense_data
[
0
][
0
],
&
dense_data
[
0
][
0
]
+
9
,
dense_x_data
);
phi
::
CPUContext
dev_ctx_cpu
;
// 1. test dense_to_sparse_coo
paddle
::
experimental
::
Tensor
x
(
dense_x
);
auto
out
=
paddle
::
experimental
::
sparse
::
to_sparse_coo
(
x
,
sparse_dim
);
auto
coo
=
std
::
dynamic_pointer_cast
<
phi
::
SparseCooTensor
>
(
out
.
impl
());
ASSERT_EQ
(
coo
->
nnz
(),
non_zero_num
);
int
cmp_indices
=
memcmp
(
coo
->
non_zero_indices
().
data
<
int64_t
>
(),
indices_data
.
data
(),
indices_data
.
size
()
*
sizeof
(
int64_t
));
ASSERT_EQ
(
cmp_indices
,
0
);
int
cmp_elements
=
memcmp
(
coo
->
non_zero_elements
().
data
<
float
>
(),
non_zero_data
.
data
(),
non_zero_data
.
size
()
*
sizeof
(
float
));
ASSERT_EQ
(
cmp_elements
,
0
);
// 1. test sparse_csr_to_coo
auto
dense_dims
=
phi
::
make_ddim
({
3
,
3
});
phi
::
DenseTensorMeta
crows_meta
(
phi
::
DataType
::
INT64
,
{
dense_dims
[
0
]
+
1
},
phi
::
DataLayout
::
NCHW
);
phi
::
DenseTensorMeta
cols_meta
(
phi
::
DataType
::
INT64
,
{
non_zero_num
},
phi
::
DataLayout
::
NCHW
);
phi
::
DenseTensorMeta
values_meta
(
phi
::
DataType
::
FLOAT32
,
{
non_zero_num
},
phi
::
DataLayout
::
NCHW
);
phi
::
CPUPlace
place
;
phi
::
DenseTensor
crows
(
alloc
.
get
(),
crows_meta
);
phi
::
DenseTensor
cols
(
alloc
.
get
(),
cols_meta
);
phi
::
DenseTensor
values
(
alloc
.
get
(),
values_meta
);
memcpy
(
crows
.
mutable_data
<
int64_t
>
(
place
),
crows_data
.
data
(),
crows_data
.
size
()
*
sizeof
(
int64_t
));
memcpy
(
cols
.
mutable_data
<
int64_t
>
(
place
),
cols_data
.
data
(),
cols_data
.
size
()
*
sizeof
(
int64_t
));
memcpy
(
values
.
mutable_data
<
float
>
(
place
),
non_zero_data
.
data
(),
non_zero_data
.
size
()
*
sizeof
(
float
));
auto
csr
=
std
::
make_shared
<
phi
::
SparseCsrTensor
>
(
crows
,
cols
,
values
,
dense_dims
);
paddle
::
experimental
::
Tensor
csr_x
(
csr
);
auto
out2
=
paddle
::
experimental
::
sparse
::
to_sparse_coo
(
csr_x
,
sparse_dim
);
auto
coo2
=
std
::
dynamic_pointer_cast
<
phi
::
SparseCooTensor
>
(
out
.
impl
());
ASSERT_EQ
(
coo2
->
nnz
(),
non_zero_num
);
int
cmp_indices2
=
memcmp
(
coo2
->
non_zero_indices
().
data
<
int64_t
>
(),
indices_data
.
data
(),
indices_data
.
size
()
*
sizeof
(
int64_t
));
ASSERT_EQ
(
cmp_indices2
,
0
);
int
cmp_elements2
=
memcmp
(
coo2
->
non_zero_elements
().
data
<
float
>
(),
non_zero_data
.
data
(),
non_zero_data
.
size
()
*
sizeof
(
float
));
ASSERT_EQ
(
cmp_elements2
,
0
);
}
TEST
(
API
,
to_sparse_csr
)
{
const
auto
alloc
=
std
::
make_shared
<
paddle
::
experimental
::
DefaultAllocator
>
(
paddle
::
platform
::
CPUPlace
());
auto
dense_x
=
std
::
make_shared
<
phi
::
DenseTensor
>
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
FLOAT32
,
phi
::
make_ddim
({
3
,
3
}),
phi
::
DataLayout
::
NCHW
));
phi
::
CPUPlace
cpu
;
const
int64_t
sparse_dim
=
2
;
auto
*
dense_x_data
=
dense_x
->
mutable_data
<
float
>
(
cpu
);
float
dense_data
[
3
][
3
]
=
{{
0.0
,
1.0
,
0.0
},
{
2.0
,
0.0
,
3.0
},
{
3.2
,
0.0
,
0.0
}};
std
::
vector
<
float
>
non_zero_data
=
{
1.0
,
2.0
,
3.0
,
3.2
};
std
::
vector
<
int64_t
>
indices_data
=
{
0
,
1
,
1
,
2
,
1
,
0
,
2
,
0
};
std
::
vector
<
int64_t
>
cols_data
=
{
1
,
0
,
2
,
0
};
std
::
vector
<
int64_t
>
crows_data
=
{
0
,
1
,
3
,
4
};
const
int64_t
non_zero_num
=
4
;
std
::
copy
(
&
dense_data
[
0
][
0
],
&
dense_data
[
0
][
0
]
+
9
,
dense_x_data
);
phi
::
CPUContext
dev_ctx_cpu
;
// 1. test dense_to_sparse_csr
paddle
::
experimental
::
Tensor
x
(
dense_x
);
auto
out
=
paddle
::
experimental
::
sparse
::
to_sparse_csr
(
x
);
auto
csr
=
std
::
dynamic_pointer_cast
<
phi
::
SparseCsrTensor
>
(
out
.
impl
());
auto
check
=
[
&
](
const
phi
::
SparseCsrTensor
&
csr
)
{
ASSERT_EQ
(
csr
.
non_zero_cols
().
numel
(),
non_zero_num
);
int
cmp_crows
=
memcmp
(
csr
.
non_zero_crows
().
data
<
int64_t
>
(),
crows_data
.
data
(),
crows_data
.
size
()
*
sizeof
(
int64_t
));
ASSERT_EQ
(
cmp_crows
,
0
);
int
cmp_cols
=
memcmp
(
csr
.
non_zero_cols
().
data
<
int64_t
>
(),
cols_data
.
data
(),
cols_data
.
size
()
*
sizeof
(
int64_t
));
ASSERT_EQ
(
cmp_cols
,
0
);
int
cmp_elements
=
memcmp
(
csr
.
non_zero_elements
().
data
<
float
>
(),
non_zero_data
.
data
(),
non_zero_data
.
size
()
*
sizeof
(
float
));
ASSERT_EQ
(
cmp_elements
,
0
);
};
check
(
*
csr
);
// 1. test sparse_coo_to_csr
auto
dense_dims
=
phi
::
make_ddim
({
3
,
3
});
phi
::
DenseTensorMeta
indices_meta
(
phi
::
DataType
::
INT64
,
{
sparse_dim
,
non_zero_num
},
phi
::
DataLayout
::
NCHW
);
phi
::
DenseTensorMeta
values_meta
(
phi
::
DataType
::
FLOAT32
,
{
non_zero_num
},
phi
::
DataLayout
::
NCHW
);
phi
::
CPUPlace
place
;
phi
::
DenseTensor
indices
(
alloc
.
get
(),
indices_meta
);
phi
::
DenseTensor
values
(
alloc
.
get
(),
values_meta
);
memcpy
(
indices
.
mutable_data
<
int64_t
>
(
place
),
indices_data
.
data
(),
indices_data
.
size
()
*
sizeof
(
int64_t
));
memcpy
(
values
.
mutable_data
<
float
>
(
place
),
non_zero_data
.
data
(),
non_zero_data
.
size
()
*
sizeof
(
float
));
auto
coo
=
std
::
make_shared
<
phi
::
SparseCooTensor
>
(
indices
,
values
,
dense_dims
);
paddle
::
experimental
::
Tensor
coo_x
(
coo
);
auto
out2
=
paddle
::
experimental
::
sparse
::
to_sparse_csr
(
coo_x
);
auto
csr2
=
std
::
dynamic_pointer_cast
<
phi
::
SparseCsrTensor
>
(
out
.
impl
());
check
(
*
csr2
);
}
TEST
(
API
,
to_dense
)
{
const
auto
alloc
=
std
::
make_shared
<
paddle
::
experimental
::
DefaultAllocator
>
(
paddle
::
platform
::
CPUPlace
());
phi
::
CPUPlace
cpu
;
const
int64_t
sparse_dim
=
2
;
float
dense_data
[
3
][
3
]
=
{{
0.0
,
1.0
,
0.0
},
{
2.0
,
0.0
,
3.0
},
{
3.2
,
0.0
,
0.0
}};
std
::
vector
<
float
>
non_zero_data
=
{
1.0
,
2.0
,
3.0
,
3.2
};
std
::
vector
<
int64_t
>
indices_data
=
{
0
,
1
,
1
,
2
,
1
,
0
,
2
,
0
};
std
::
vector
<
int64_t
>
cols_data
=
{
1
,
0
,
2
,
0
};
std
::
vector
<
int64_t
>
crows_data
=
{
0
,
1
,
3
,
4
};
const
int64_t
non_zero_num
=
4
;
auto
dense_dims
=
phi
::
make_ddim
({
3
,
3
});
phi
::
CPUContext
dev_ctx_cpu
;
// 1. test sparse_coo_to_dense
phi
::
DenseTensorMeta
indices_meta
(
phi
::
DataType
::
INT64
,
{
sparse_dim
,
non_zero_num
},
phi
::
DataLayout
::
NCHW
);
phi
::
DenseTensorMeta
values_meta
(
phi
::
DataType
::
FLOAT32
,
{
non_zero_num
},
phi
::
DataLayout
::
NCHW
);
phi
::
CPUPlace
place
;
phi
::
DenseTensor
indices
(
alloc
.
get
(),
indices_meta
);
phi
::
DenseTensor
values
(
alloc
.
get
(),
values_meta
);
memcpy
(
indices
.
mutable_data
<
int64_t
>
(
place
),
indices_data
.
data
(),
indices_data
.
size
()
*
sizeof
(
int64_t
));
memcpy
(
values
.
mutable_data
<
float
>
(
place
),
non_zero_data
.
data
(),
non_zero_data
.
size
()
*
sizeof
(
float
));
auto
coo
=
std
::
make_shared
<
phi
::
SparseCooTensor
>
(
indices
,
values
,
dense_dims
);
paddle
::
experimental
::
Tensor
coo_x
(
coo
);
auto
out
=
paddle
::
experimental
::
sparse
::
to_dense
(
coo_x
);
auto
dense_out
=
std
::
dynamic_pointer_cast
<
phi
::
DenseTensor
>
(
out
.
impl
());
int
cmp1
=
memcmp
(
dense_out
->
data
<
float
>
(),
&
dense_data
[
0
][
0
],
9
*
sizeof
(
float
));
ASSERT_EQ
(
cmp1
,
0
);
// 1. test sparse_csr_to_dense
phi
::
DenseTensorMeta
crows_meta
(
phi
::
DataType
::
INT64
,
{
dense_dims
[
0
]
+
1
},
phi
::
DataLayout
::
NCHW
);
phi
::
DenseTensorMeta
cols_meta
(
phi
::
DataType
::
INT64
,
{
non_zero_num
},
phi
::
DataLayout
::
NCHW
);
phi
::
DenseTensor
crows
(
alloc
.
get
(),
crows_meta
);
phi
::
DenseTensor
cols
(
alloc
.
get
(),
cols_meta
);
memcpy
(
crows
.
mutable_data
<
int64_t
>
(
place
),
crows_data
.
data
(),
crows_data
.
size
()
*
sizeof
(
int64_t
));
memcpy
(
cols
.
mutable_data
<
int64_t
>
(
place
),
cols_data
.
data
(),
cols_data
.
size
()
*
sizeof
(
int64_t
));
memcpy
(
values
.
mutable_data
<
float
>
(
place
),
non_zero_data
.
data
(),
non_zero_data
.
size
()
*
sizeof
(
float
));
auto
csr
=
std
::
make_shared
<
phi
::
SparseCsrTensor
>
(
crows
,
cols
,
values
,
dense_dims
);
paddle
::
experimental
::
Tensor
csr_x
(
csr
);
auto
out2
=
paddle
::
experimental
::
sparse
::
to_dense
(
csr_x
);
auto
dense_out2
=
std
::
dynamic_pointer_cast
<
phi
::
DenseTensor
>
(
out
.
impl
());
int
cmp2
=
memcmp
(
dense_out2
->
data
<
float
>
(),
&
dense_data
[
0
][
0
],
9
*
sizeof
(
float
));
ASSERT_EQ
(
cmp2
,
0
);
}
paddle/phi/tests/api/test_split_api.cc
已删除
100644 → 0
浏览文件 @
1cb12ff5
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <gtest/gtest.h>
#include <memory>
#include "paddle/phi/api/include/api.h"
#include "paddle/phi/api/lib/utils/allocator.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/kernel_registry.h"
PD_DECLARE_KERNEL
(
split
,
CPU
,
ALL_LAYOUT
);
namespace
paddle
{
namespace
tests
{
namespace
framework
=
paddle
::
framework
;
using
DDim
=
phi
::
DDim
;
// TODO(chentianyu03): Remove this test after the API is used in the dygraph
TEST
(
API
,
split
)
{
// 1. create tensor
const
auto
alloc
=
std
::
make_unique
<
paddle
::
experimental
::
DefaultAllocator
>
(
paddle
::
platform
::
CPUPlace
());
auto
dense_x
=
std
::
make_shared
<
phi
::
DenseTensor
>
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
FLOAT32
,
phi
::
make_ddim
({
4
,
10
}),
phi
::
DataLayout
::
NCHW
));
auto
*
dense_x_data
=
dense_x
->
mutable_data
<
float
>
(
paddle
::
platform
::
CPUPlace
());
for
(
size_t
i
=
0
;
i
<
4
;
++
i
)
{
for
(
size_t
j
=
0
;
j
<
10
;
++
j
)
{
dense_x_data
[
i
*
10
+
j
]
=
(
i
*
10
+
j
)
*
1.0
;
}
}
paddle
::
experimental
::
Tensor
x
(
dense_x
);
// 2. test API
auto
out
=
paddle
::
experimental
::
split
(
x
,
{
2
,
2
},
0
);
// 3. check result
ASSERT_EQ
(
out
.
size
(),
static_cast
<
size_t
>
(
2
));
ASSERT_EQ
(
out
[
0
].
dims
().
size
(),
2
);
ASSERT_EQ
(
out
[
0
].
dims
()[
0
],
2
);
ASSERT_EQ
(
out
[
0
].
dims
()[
1
],
10
);
ASSERT_EQ
(
out
[
0
].
type
(),
phi
::
DataType
::
FLOAT32
);
ASSERT_EQ
(
out
[
0
].
layout
(),
phi
::
DataLayout
::
NCHW
);
ASSERT_EQ
(
out
[
1
].
dims
().
size
(),
2
);
ASSERT_EQ
(
out
[
1
].
dims
()[
0
],
2
);
ASSERT_EQ
(
out
[
1
].
dims
()[
1
],
10
);
ASSERT_EQ
(
out
[
1
].
type
(),
phi
::
DataType
::
FLOAT32
);
ASSERT_EQ
(
out
[
1
].
layout
(),
phi
::
DataLayout
::
NCHW
);
auto
out_data_0
=
std
::
dynamic_pointer_cast
<
phi
::
DenseTensor
>
(
out
[
0
].
impl
())
->
data
<
float
>
();
auto
out_data_1
=
std
::
dynamic_pointer_cast
<
phi
::
DenseTensor
>
(
out
[
1
].
impl
())
->
data
<
float
>
();
for
(
size_t
i
=
0
;
i
<
4
;
++
i
)
{
if
(
i
<
20
)
{
ASSERT_NEAR
(
dense_x_data
[
i
],
out_data_0
[
i
],
1e-6
);
}
else
{
ASSERT_NEAR
(
dense_x_data
[
i
],
out_data_1
[
i
-
20
],
1e-6
);
}
}
}
}
// namespace tests
}
// namespace paddle
paddle/phi/tests/api/test_sum_api.cc
已删除
100644 → 0
浏览文件 @
1cb12ff5
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include <memory>
#include "paddle/phi/api/include/api.h"
#include "paddle/phi/api/lib/utils/allocator.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/kernel_registry.h"
PD_DECLARE_KERNEL
(
sum
,
CPU
,
ALL_LAYOUT
);
namespace
paddle
{
namespace
tests
{
namespace
framework
=
paddle
::
framework
;
using
DDim
=
phi
::
DDim
;
// TODO(chenweihang): Remove this test after the API is used in the dygraph
TEST
(
API
,
sum
)
{
// 1. create tensor
const
auto
alloc
=
std
::
make_unique
<
paddle
::
experimental
::
DefaultAllocator
>
(
paddle
::
platform
::
CPUPlace
());
auto
dense_x
=
std
::
make_shared
<
phi
::
DenseTensor
>
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
FLOAT32
,
phi
::
make_ddim
({
3
,
4
}),
phi
::
DataLayout
::
NCHW
));
auto
*
dense_x_data
=
dense_x
->
mutable_data
<
float
>
(
paddle
::
platform
::
CPUPlace
());
float
sum
=
0.0
;
for
(
size_t
i
=
0
;
i
<
12
;
++
i
)
{
dense_x_data
[
i
]
=
i
*
1.0
;
sum
+=
i
*
1.0
;
}
paddle
::
experimental
::
Tensor
x
(
dense_x
);
std
::
vector
<
int64_t
>
axis
=
{
0
,
1
};
// 2. test API
auto
out
=
paddle
::
experimental
::
sum
(
x
,
axis
,
DataType
::
UNDEFINED
,
false
);
// 3. check result
ASSERT_EQ
(
out
.
dims
().
size
(),
1
);
ASSERT_EQ
(
out
.
dims
()[
0
],
1
);
ASSERT_EQ
(
out
.
numel
(),
1
);
ASSERT_EQ
(
out
.
is_cpu
(),
true
);
ASSERT_EQ
(
out
.
type
(),
phi
::
DataType
::
FLOAT32
);
ASSERT_EQ
(
out
.
layout
(),
phi
::
DataLayout
::
NCHW
);
ASSERT_EQ
(
out
.
initialized
(),
true
);
auto
expect_result
=
sum
;
auto
dense_out
=
std
::
dynamic_pointer_cast
<
phi
::
DenseTensor
>
(
out
.
impl
());
auto
actual_result
=
dense_out
->
data
<
float
>
()[
0
];
ASSERT_NEAR
(
expect_result
,
actual_result
,
1e-6
f
);
}
}
// namespace tests
}
// namespace paddle
paddle/phi/tests/kernels/CMakeLists.txt
浏览文件 @
c334405f
cc_test
(
test_copy_dev_api
SRCS test_copy_dev_api.cc
DEPS phi phi_api_utils
)
cc_test
(
test_dot_dev_api
SRCS test_dot_dev_api.cc
DEPS phi phi_api_utils
)
cc_test
(
test_creation_dev_api
SRCS test_creation_dev_api.cc
DEPS phi phi_api_utils
)
cc_test
(
test_flatten_dev_api
SRCS test_flatten_dev_api.cc
DEPS phi phi_api_utils
)
cc_test
(
test_matmul_dev_api
SRCS test_matmul_dev_api.cc
DEPS phi phi_api_utils
)
cc_test
(
test_mean_dev_api
SRCS test_mean_dev_api.cc
DEPS phi phi_api_utils
)
cc_test
(
test_scale_dev_api
SRCS test_scale_dev_api.cc
DEPS phi phi_api_utils
)
cc_test
(
test_cast_dev_api
SRCS test_cast_dev_api.cc
DEPS phi phi_api_utils
)
cc_test
(
test_elementwise_dev_api
SRCS test_elementwise_dev_api.cc
DEPS phi phi_api_utils
)
cc_test
(
test_reshape_dev_api
SRCS test_reshape_dev_api.cc
DEPS phi phi_api_utils
)
cc_test
(
test_sum_dev_api
SRCS test_sum_dev_api.cc
DEPS phi phi_api_utils
)
cc_test
(
test_conj_dev_api
SRCS test_conj_dev_api.cc
DEPS phi phi_api_utils
)
cc_test
(
test_concat_dev_api
SRCS test_concat_dev_api.cc
DEPS phi phi_api_utils
)
cc_test
(
test_split_dev_api
SRCS test_split_dev_api.cc
DEPS phi phi_api_utils
)
cc_test
(
test_sparse_utils_dev_api
SRCS test_sparse_utils_dev_api.cc
DEPS phi phi_api_utils
)
cc_test
(
test_sparse_conv3d_dev_api
SRCS test_sparse_conv3d_dev_api.cc
DEPS phi phi_api_utils
)
cc_test
(
test_sparse_pool_dev_api
SRCS test_sparse_pool_dev_api.cc
DEPS phi phi_api_utils
)
cc_test
(
test_sparse_activation_dev_api
SRCS test_sparse_activation_dev_api.cc
DEPS phi phi_api_utils
)
cc_test
(
test_sparse_elementwise_dev_api
SRCS test_sparse_elementwise_dev_api.cc
DEPS phi phi_api_utils
)
cc_test
(
test_sparse_transpose_dev_api
SRCS test_sparse_transpose_dev_api.cc
DEPS phi phi_api_utils
)
cc_test
(
test_math_function
SRCS test_math_function.cc
...
...
paddle/phi/tests/kernels/test_cast_dev_api.cc
已删除
100644 → 0
浏览文件 @
1cb12ff5
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include <memory>
#include "paddle/fluid/memory/allocation/allocator_facade.h"
#include "paddle/phi/api/lib/utils/allocator.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/common/data_type.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/cast_kernel.h"
namespace
phi
{
namespace
tests
{
namespace
framework
=
paddle
::
framework
;
using
DDim
=
phi
::
DDim
;
TEST
(
DEV_API
,
cast
)
{
// 1. create tensor
const
auto
alloc
=
std
::
make_unique
<
paddle
::
experimental
::
DefaultAllocator
>
(
paddle
::
platform
::
CPUPlace
());
phi
::
DenseTensor
dense_x
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
FLOAT32
,
phi
::
make_ddim
({
3
,
4
}),
phi
::
DataLayout
::
NCHW
));
auto
*
dense_x_data
=
dense_x
.
mutable_data
<
float
>
(
paddle
::
platform
::
CPUPlace
());
float
sum
=
0.0
;
for
(
size_t
i
=
0
;
i
<
12
;
++
i
)
{
dense_x_data
[
i
]
=
i
*
1.0
;
sum
+=
i
*
1.0
;
}
phi
::
CPUContext
dev_ctx
;
dev_ctx
.
SetAllocator
(
paddle
::
memory
::
allocation
::
AllocatorFacade
::
Instance
()
.
GetAllocator
(
paddle
::
platform
::
CPUPlace
())
.
get
());
phi
::
DataType
out_dtype
=
phi
::
DataType
::
FLOAT64
;
// 2. test API
auto
out
=
phi
::
Cast
<
float
>
(
dev_ctx
,
dense_x
,
out_dtype
);
// 3. check result
ASSERT_EQ
(
out
.
dims
().
size
(),
2
);
ASSERT_EQ
(
out
.
dims
()[
0
],
3
);
ASSERT_EQ
(
out
.
dims
()[
1
],
4
);
ASSERT_EQ
(
out
.
meta
().
dtype
,
phi
::
DataType
::
FLOAT64
);
ASSERT_EQ
(
out
.
meta
().
layout
,
phi
::
DataLayout
::
NCHW
);
auto
actual_result
=
out
.
data
<
double
>
();
for
(
size_t
i
=
0
;
i
<
12
;
++
i
)
{
ASSERT_NEAR
(
actual_result
[
i
],
static_cast
<
double
>
(
dense_x_data
[
i
]),
1e-6
f
);
}
}
}
// namespace tests
}
// namespace phi
paddle/phi/tests/kernels/test_concat_dev_api.cc
已删除
100644 → 0
浏览文件 @
1cb12ff5
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include <memory>
#include "paddle/fluid/memory/allocation/allocator_facade.h"
#include "paddle/phi/api/lib/utils/allocator.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/concat_kernel.h"
namespace
phi
{
namespace
tests
{
namespace
framework
=
paddle
::
framework
;
using
DDim
=
phi
::
DDim
;
TEST
(
DEV_API
,
concat
)
{
// 1. create tensor
const
auto
alloc
=
std
::
make_unique
<
paddle
::
experimental
::
DefaultAllocator
>
(
paddle
::
platform
::
CPUPlace
());
phi
::
DenseTensor
dense_x
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
FLOAT32
,
phi
::
make_ddim
({
3
,
10
}),
phi
::
DataLayout
::
NCHW
));
auto
*
dense_x_data
=
dense_x
.
mutable_data
<
float
>
(
paddle
::
platform
::
CPUPlace
());
phi
::
DenseTensor
dense_y
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
FLOAT32
,
phi
::
make_ddim
({
3
,
10
}),
phi
::
DataLayout
::
NCHW
));
auto
*
dense_y_data
=
dense_y
.
mutable_data
<
float
>
(
paddle
::
platform
::
CPUPlace
());
for
(
size_t
i
=
0
;
i
<
3
;
++
i
)
{
for
(
size_t
j
=
0
;
j
<
10
;
++
j
)
{
dense_x_data
[
i
*
10
+
j
]
=
(
i
*
10
+
j
)
*
1.0
;
dense_y_data
[
i
*
10
+
j
]
=
(
i
*
10
+
j
)
*
1.0
;
}
}
std
::
vector
<
const
phi
::
DenseTensor
*>
inputs
=
{
&
dense_x
,
&
dense_y
};
// 2. test API
phi
::
CPUContext
dev_ctx
;
dev_ctx
.
SetAllocator
(
paddle
::
memory
::
allocation
::
AllocatorFacade
::
Instance
()
.
GetAllocator
(
paddle
::
platform
::
CPUPlace
())
.
get
());
auto
out
=
phi
::
Concat
<
float
>
(
dev_ctx
,
inputs
,
0
);
// 3. check result
ASSERT_EQ
(
out
.
dims
().
size
(),
2
);
ASSERT_EQ
(
out
.
dims
()[
0
],
6
);
ASSERT_EQ
(
out
.
dims
()[
1
],
10
);
ASSERT_EQ
(
out
.
meta
().
dtype
,
phi
::
DataType
::
FLOAT32
);
ASSERT_EQ
(
out
.
meta
().
layout
,
phi
::
DataLayout
::
NCHW
);
auto
out_data
=
out
.
data
<
float
>
();
for
(
size_t
i
=
0
;
i
<
60
;
++
i
)
{
if
(
i
<
30
)
{
ASSERT_NEAR
(
dense_x_data
[
i
],
out_data
[
i
],
1e-6
f
);
}
else
{
ASSERT_NEAR
(
dense_y_data
[
i
-
30
],
out_data
[
i
],
1e-6
f
);
}
}
}
}
// namespace tests
}
// namespace phi
paddle/phi/tests/kernels/test_conj_dev_api.cc
已删除
100644 → 0
浏览文件 @
1cb12ff5
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include <memory>
#include "paddle/fluid/memory/allocation/allocator_facade.h"
#include "paddle/phi/api/lib/utils/allocator.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/complex_kernel.h"
namespace
phi
{
namespace
tests
{
namespace
framework
=
paddle
::
framework
;
using
DDim
=
phi
::
DDim
;
TEST
(
DEV_API
,
conj
)
{
// 1. create tensor
const
auto
alloc
=
std
::
make_unique
<
paddle
::
experimental
::
DefaultAllocator
>
(
paddle
::
platform
::
CPUPlace
());
phi
::
DenseTensor
dense_x
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
COMPLEX64
,
phi
::
make_ddim
({
3
,
4
}),
phi
::
DataLayout
::
NCHW
));
auto
*
dense_x_data
=
dense_x
.
mutable_data
<
paddle
::
complex64
>
(
paddle
::
platform
::
CPUPlace
());
for
(
size_t
i
=
0
;
i
<
12
;
++
i
)
{
dense_x_data
[
i
]
=
paddle
::
complex64
(
i
*
1.0
,
i
*
1.0
);
}
phi
::
CPUContext
dev_ctx
;
dev_ctx
.
SetAllocator
(
paddle
::
memory
::
allocation
::
AllocatorFacade
::
Instance
()
.
GetAllocator
(
paddle
::
platform
::
CPUPlace
())
.
get
());
// 2. test API
auto
out
=
phi
::
Conj
<
paddle
::
complex64
>
(
dev_ctx
,
dense_x
);
// 3. check result
ASSERT_EQ
(
out
.
dims
().
size
(),
2
);
ASSERT_EQ
(
out
.
numel
(),
12
);
ASSERT_EQ
(
out
.
meta
().
dtype
,
phi
::
DataType
::
COMPLEX64
);
ASSERT_EQ
(
out
.
meta
().
layout
,
phi
::
DataLayout
::
NCHW
);
auto
actual_result
=
out
.
data
<
paddle
::
complex64
>
();
for
(
size_t
i
=
0
;
i
<
12
;
++
i
)
{
ASSERT_NEAR
(
i
*
1.0
,
actual_result
[
i
].
real
,
1e-6
f
);
ASSERT_NEAR
(
i
*
-
1.0
,
actual_result
[
i
].
imag
,
1e-6
f
);
}
}
}
// namespace tests
}
// namespace phi
paddle/phi/tests/kernels/test_copy_dev_api.cc
已删除
100644 → 0
浏览文件 @
1cb12ff5
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include <memory>
#include "paddle/fluid/memory/allocation/allocator_facade.h"
#include "paddle/phi/api/lib/utils/allocator.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_utils.h"
namespace
phi
{
namespace
tests
{
namespace
framework
=
paddle
::
framework
;
using
DDim
=
phi
::
DDim
;
// TODO(YuanRisheng): This TEST file need to be refactored after 'copy' realized
// in 'paddle/api'
TEST
(
DEV_API
,
copy
)
{
// 1. create tensor
const
auto
alloc
=
std
::
make_unique
<
paddle
::
experimental
::
DefaultAllocator
>
(
paddle
::
platform
::
CPUPlace
());
auto
dense_src
=
std
::
make_shared
<
phi
::
DenseTensor
>
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
FLOAT32
,
phi
::
make_ddim
({
2
,
3
}),
phi
::
DataLayout
::
NCHW
));
auto
*
dense_x_data
=
dense_src
->
mutable_data
<
float
>
(
paddle
::
platform
::
CPUPlace
());
auto
dense_dst
=
std
::
make_shared
<
phi
::
DenseTensor
>
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
FLOAT32
,
phi
::
make_ddim
({
2
,
3
}),
phi
::
DataLayout
::
NCHW
));
for
(
size_t
i
=
0
;
i
<
2
;
++
i
)
{
for
(
size_t
j
=
0
;
j
<
3
;
++
j
)
{
dense_x_data
[
i
*
3
+
j
]
=
(
i
*
3
+
j
)
*
1.0
;
}
}
const
auto
&
a
=
paddle
::
platform
::
CPUPlace
();
std
::
cout
<<
typeid
(
a
).
name
()
<<
std
::
endl
;
// 2. test API
phi
::
CPUContext
dev_ctx
;
dev_ctx
.
SetAllocator
(
paddle
::
memory
::
allocation
::
AllocatorFacade
::
Instance
()
.
GetAllocator
(
paddle
::
platform
::
CPUPlace
())
.
get
());
dev_ctx
.
SetHostAllocator
(
paddle
::
memory
::
allocation
::
AllocatorFacade
::
Instance
()
.
GetAllocator
(
paddle
::
platform
::
CPUPlace
())
.
get
());
phi
::
Copy
(
dev_ctx
,
*
(
dense_src
.
get
()),
phi
::
CPUPlace
(),
false
,
dense_dst
.
get
());
// 3. check result
for
(
int64_t
i
=
0
;
i
<
dense_src
->
numel
();
i
++
)
{
ASSERT_EQ
(
dense_src
->
data
<
float
>
()[
i
],
dense_dst
->
data
<
float
>
()[
i
]);
}
}
}
// namespace tests
}
// namespace phi
paddle/phi/tests/kernels/test_creation_dev_api.cc
已删除
100644 → 0
浏览文件 @
1cb12ff5
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include <memory>
#include "paddle/fluid/memory/allocation/allocator_facade.h"
#include "paddle/phi/api/lib/utils/allocator.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/empty_kernel.h"
#include "paddle/phi/kernels/full_kernel.h"
namespace
phi
{
namespace
tests
{
namespace
framework
=
paddle
::
framework
;
using
DDim
=
phi
::
DDim
;
TEST
(
DEV_API
,
empty
)
{
// 1. create input
phi
::
CPUContext
dev_ctx
;
dev_ctx
.
SetAllocator
(
paddle
::
memory
::
allocation
::
AllocatorFacade
::
Instance
()
.
GetAllocator
(
paddle
::
platform
::
CPUPlace
())
.
get
());
// 2. test API
auto
out
=
phi
::
Empty
<
int
>
(
dev_ctx
,
{
3
,
2
});
// 3. check result
ASSERT_EQ
(
out
.
dims
().
size
(),
2
);
ASSERT_EQ
(
out
.
dims
()[
0
],
3
);
ASSERT_EQ
(
out
.
numel
(),
6
);
ASSERT_EQ
(
out
.
meta
().
dtype
,
phi
::
DataType
::
INT32
);
ASSERT_EQ
(
out
.
meta
().
layout
,
phi
::
DataLayout
::
NCHW
);
}
TEST
(
DEV_API
,
empty_like
)
{
// 1. create tensor
const
auto
alloc
=
std
::
make_unique
<
paddle
::
experimental
::
DefaultAllocator
>
(
paddle
::
platform
::
CPUPlace
());
phi
::
DenseTensor
dense_x
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
FLOAT32
,
phi
::
make_ddim
({
3
,
2
}),
phi
::
DataLayout
::
NCHW
));
auto
*
dense_x_data
=
dense_x
.
mutable_data
<
float
>
(
paddle
::
platform
::
CPUPlace
());
dense_x_data
[
0
]
=
0
;
// 2. test API
phi
::
CPUContext
dev_ctx
;
dev_ctx
.
SetAllocator
(
paddle
::
memory
::
allocation
::
AllocatorFacade
::
Instance
()
.
GetAllocator
(
paddle
::
platform
::
CPUPlace
())
.
get
());
auto
out
=
phi
::
EmptyLike
<
float
>
(
dev_ctx
,
dense_x
);
// 3. check result
ASSERT_EQ
(
out
.
dims
().
size
(),
2
);
ASSERT_EQ
(
out
.
dims
()[
0
],
3
);
ASSERT_EQ
(
out
.
numel
(),
6
);
ASSERT_EQ
(
out
.
meta
().
dtype
,
phi
::
DataType
::
FLOAT32
);
ASSERT_EQ
(
out
.
meta
().
layout
,
phi
::
DataLayout
::
NCHW
);
}
TEST
(
DEV_API
,
full
)
{
// 1. create input
float
val
=
1.0
;
// 2. test API
phi
::
CPUContext
dev_ctx
;
dev_ctx
.
SetAllocator
(
paddle
::
memory
::
allocation
::
AllocatorFacade
::
Instance
()
.
GetAllocator
(
paddle
::
platform
::
CPUPlace
())
.
get
());
auto
out
=
phi
::
Full
<
float
>
(
dev_ctx
,
{
3
,
2
},
val
);
// 3. check result
ASSERT_EQ
(
out
.
dims
().
size
(),
2
);
ASSERT_EQ
(
out
.
dims
()[
0
],
3
);
ASSERT_EQ
(
out
.
numel
(),
6
);
ASSERT_EQ
(
out
.
meta
().
dtype
,
phi
::
DataType
::
FLOAT32
);
ASSERT_EQ
(
out
.
meta
().
layout
,
phi
::
DataLayout
::
NCHW
);
auto
*
actual_result
=
out
.
data
<
float
>
();
for
(
auto
i
=
0
;
i
<
6
;
i
++
)
{
ASSERT_NEAR
(
actual_result
[
i
],
val
,
1e-6
f
);
}
}
TEST
(
DEV_API
,
full_like
)
{
// 1. create tensor
const
auto
alloc
=
std
::
make_unique
<
paddle
::
experimental
::
DefaultAllocator
>
(
paddle
::
platform
::
CPUPlace
());
phi
::
DenseTensor
dense_x
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
FLOAT32
,
phi
::
make_ddim
({
3
,
2
}),
phi
::
DataLayout
::
NCHW
));
auto
*
dense_x_data
=
dense_x
.
mutable_data
<
float
>
(
paddle
::
platform
::
CPUPlace
());
dense_x_data
[
0
]
=
0
;
float
val
=
1.0
;
phi
::
CPUContext
dev_ctx
;
dev_ctx
.
SetAllocator
(
paddle
::
memory
::
allocation
::
AllocatorFacade
::
Instance
()
.
GetAllocator
(
paddle
::
platform
::
CPUPlace
())
.
get
());
// 2. test API
auto
out
=
phi
::
FullLike
<
float
>
(
dev_ctx
,
dense_x
,
val
);
// 3. check result
ASSERT_EQ
(
out
.
dims
().
size
(),
2
);
ASSERT_EQ
(
out
.
dims
()[
0
],
3
);
ASSERT_EQ
(
out
.
numel
(),
6
);
ASSERT_EQ
(
out
.
meta
().
dtype
,
phi
::
DataType
::
FLOAT32
);
ASSERT_EQ
(
out
.
meta
().
layout
,
phi
::
DataLayout
::
NCHW
);
auto
*
actual_result
=
out
.
data
<
float
>
();
for
(
auto
i
=
0
;
i
<
6
;
i
++
)
{
ASSERT_NEAR
(
actual_result
[
i
],
val
,
1e-6
f
);
}
}
}
// namespace tests
}
// namespace phi
paddle/phi/tests/kernels/test_dot_dev_api.cc
已删除
100644 → 0
浏览文件 @
1cb12ff5
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include <memory>
#include "paddle/fluid/memory/allocation/allocator_facade.h"
#include "paddle/phi/api/lib/utils/allocator.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/dot_kernel.h"
namespace
phi
{
namespace
tests
{
namespace
framework
=
paddle
::
framework
;
using
DDim
=
phi
::
DDim
;
TEST
(
DEV_API
,
dot
)
{
// 1. create tensor
const
auto
alloc
=
std
::
make_unique
<
paddle
::
experimental
::
DefaultAllocator
>
(
paddle
::
platform
::
CPUPlace
());
phi
::
DenseTensor
dense_x
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
FLOAT32
,
phi
::
make_ddim
({
3
,
10
}),
phi
::
DataLayout
::
NCHW
));
auto
*
dense_x_data
=
dense_x
.
mutable_data
<
float
>
(
paddle
::
platform
::
CPUPlace
());
phi
::
DenseTensor
dense_y
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
FLOAT32
,
phi
::
make_ddim
({
3
,
10
}),
phi
::
DataLayout
::
NCHW
));
auto
*
dense_y_data
=
dense_y
.
mutable_data
<
float
>
(
paddle
::
platform
::
CPUPlace
());
float
sum
[
3
]
=
{
0.0
,
0.0
,
0.0
};
for
(
size_t
i
=
0
;
i
<
3
;
++
i
)
{
for
(
size_t
j
=
0
;
j
<
10
;
++
j
)
{
dense_x_data
[
i
*
10
+
j
]
=
(
i
*
10
+
j
)
*
1.0
;
dense_y_data
[
i
*
10
+
j
]
=
(
i
*
10
+
j
)
*
1.0
;
sum
[
i
]
+=
(
i
*
10
+
j
)
*
(
i
*
10
+
j
)
*
1.0
;
}
}
// 2. test API
phi
::
CPUContext
dev_ctx
;
dev_ctx
.
SetAllocator
(
paddle
::
memory
::
allocation
::
AllocatorFacade
::
Instance
()
.
GetAllocator
(
paddle
::
platform
::
CPUPlace
())
.
get
());
auto
out
=
phi
::
Dot
<
float
>
(
dev_ctx
,
dense_x
,
dense_y
);
// 3. check result
ASSERT_EQ
(
out
.
dims
().
size
(),
2
);
ASSERT_EQ
(
out
.
dims
()[
0
],
3
);
ASSERT_EQ
(
out
.
meta
().
dtype
,
phi
::
DataType
::
FLOAT32
);
ASSERT_EQ
(
out
.
meta
().
layout
,
phi
::
DataLayout
::
NCHW
);
auto
expect_result
=
sum
;
auto
actual_result0
=
out
.
data
<
float
>
()[
0
];
auto
actual_result1
=
out
.
data
<
float
>
()[
1
];
auto
actual_result2
=
out
.
data
<
float
>
()[
2
];
ASSERT_NEAR
(
expect_result
[
0
],
actual_result0
,
1e-6
f
);
ASSERT_NEAR
(
expect_result
[
1
],
actual_result1
,
1e-6
f
);
ASSERT_NEAR
(
expect_result
[
2
],
actual_result2
,
1e-6
f
);
}
}
// namespace tests
}
// namespace phi
paddle/phi/tests/kernels/test_elementwise_dev_api.cc
已删除
100644 → 0
浏览文件 @
1cb12ff5
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include <memory>
#include "paddle/fluid/memory/allocation/allocator_facade.h"
#include "paddle/phi/api/lib/utils/allocator.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/elementwise_add_kernel.h"
#include "paddle/phi/kernels/elementwise_divide_kernel.h"
#include "paddle/phi/kernels/elementwise_multiply_kernel.h"
#include "paddle/phi/kernels/elementwise_subtract_kernel.h"
namespace
phi
{
namespace
tests
{
namespace
framework
=
paddle
::
framework
;
using
DDim
=
phi
::
DDim
;
TEST
(
DEV_API
,
add
)
{
// 1. create tensor
const
auto
alloc
=
std
::
make_unique
<
paddle
::
experimental
::
DefaultAllocator
>
(
paddle
::
platform
::
CPUPlace
());
phi
::
DenseTensor
dense_x
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
FLOAT32
,
phi
::
make_ddim
({
3
,
10
}),
phi
::
DataLayout
::
NCHW
));
auto
*
dense_x_data
=
dense_x
.
mutable_data
<
float
>
(
paddle
::
platform
::
CPUPlace
());
phi
::
DenseTensor
dense_y
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
FLOAT32
,
phi
::
make_ddim
({
10
}),
phi
::
DataLayout
::
NCHW
));
auto
*
dense_y_data
=
dense_y
.
mutable_data
<
float
>
(
paddle
::
platform
::
CPUPlace
());
float
sum
[
3
][
10
]
=
{
0.0
};
for
(
size_t
i
=
0
;
i
<
3
;
++
i
)
{
for
(
size_t
j
=
0
;
j
<
10
;
++
j
)
{
dense_x_data
[
i
*
10
+
j
]
=
(
i
*
10
+
j
)
*
1.0
;
sum
[
i
][
j
]
=
(
i
*
10
+
j
)
*
1.0
+
j
*
2.0
;
}
}
for
(
size_t
i
=
0
;
i
<
10
;
++
i
)
{
dense_y_data
[
i
]
=
i
*
2.0
;
}
// 2. test API
phi
::
CPUContext
dev_ctx
;
dev_ctx
.
SetAllocator
(
paddle
::
memory
::
allocation
::
AllocatorFacade
::
Instance
()
.
GetAllocator
(
paddle
::
platform
::
CPUPlace
())
.
get
());
auto
dense_out
=
phi
::
Add
<
float
>
(
dev_ctx
,
dense_x
,
dense_y
);
// 3. check result
ASSERT_EQ
(
dense_out
.
dims
().
size
(),
2
);
ASSERT_EQ
(
dense_out
.
dims
()[
0
],
3
);
ASSERT_EQ
(
dense_out
.
dtype
(),
phi
::
DataType
::
FLOAT32
);
ASSERT_EQ
(
dense_out
.
layout
(),
phi
::
DataLayout
::
NCHW
);
auto
expect_result
=
sum
;
auto
actual_result0
=
dense_out
.
data
<
float
>
()[
0
];
auto
actual_result1
=
dense_out
.
data
<
float
>
()[
1
];
auto
actual_result2
=
dense_out
.
data
<
float
>
()[
10
];
ASSERT_NEAR
(
expect_result
[
0
][
0
],
actual_result0
,
1e-6
f
);
ASSERT_NEAR
(
expect_result
[
0
][
1
],
actual_result1
,
1e-6
f
);
ASSERT_NEAR
(
expect_result
[
1
][
0
],
actual_result2
,
1e-6
f
);
}
TEST
(
DEV_API
,
subtract
)
{
// 1. create tensor
const
auto
alloc
=
std
::
make_unique
<
paddle
::
experimental
::
DefaultAllocator
>
(
paddle
::
platform
::
CPUPlace
());
phi
::
DenseTensor
dense_x
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
FLOAT32
,
phi
::
make_ddim
({
3
,
10
}),
phi
::
DataLayout
::
NCHW
));
auto
*
dense_x_data
=
dense_x
.
mutable_data
<
float
>
(
paddle
::
platform
::
CPUPlace
());
phi
::
DenseTensor
dense_y
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
FLOAT32
,
phi
::
make_ddim
({
10
}),
phi
::
DataLayout
::
NCHW
));
auto
*
dense_y_data
=
dense_y
.
mutable_data
<
float
>
(
paddle
::
platform
::
CPUPlace
());
float
sub
[
3
][
10
]
=
{
0.0
};
for
(
size_t
i
=
0
;
i
<
3
;
++
i
)
{
for
(
size_t
j
=
0
;
j
<
10
;
++
j
)
{
dense_x_data
[
i
*
10
+
j
]
=
(
i
*
10
+
j
)
*
1.0
;
sub
[
i
][
j
]
=
(
i
*
10
+
j
)
*
1.0
-
j
*
2.0
;
}
}
for
(
size_t
i
=
0
;
i
<
10
;
++
i
)
{
dense_y_data
[
i
]
=
i
*
2.0
;
}
// 2. test API
phi
::
CPUContext
dev_ctx
;
dev_ctx
.
SetAllocator
(
paddle
::
memory
::
allocation
::
AllocatorFacade
::
Instance
()
.
GetAllocator
(
paddle
::
platform
::
CPUPlace
())
.
get
());
auto
dense_out
=
phi
::
Subtract
<
float
>
(
dev_ctx
,
dense_x
,
dense_y
);
// 3. check result
ASSERT_EQ
(
dense_out
.
dims
().
size
(),
2
);
ASSERT_EQ
(
dense_out
.
dims
()[
0
],
3
);
ASSERT_EQ
(
dense_out
.
dtype
(),
phi
::
DataType
::
FLOAT32
);
ASSERT_EQ
(
dense_out
.
meta
().
layout
,
phi
::
DataLayout
::
NCHW
);
auto
expect_result
=
sub
;
auto
actual_result0
=
dense_out
.
data
<
float
>
()[
0
];
auto
actual_result1
=
dense_out
.
data
<
float
>
()[
1
];
auto
actual_result2
=
dense_out
.
data
<
float
>
()[
10
];
ASSERT_NEAR
(
expect_result
[
0
][
0
],
actual_result0
,
1e-6
f
);
ASSERT_NEAR
(
expect_result
[
0
][
1
],
actual_result1
,
1e-6
f
);
ASSERT_NEAR
(
expect_result
[
1
][
0
],
actual_result2
,
1e-6
f
);
}
TEST
(
DEV_API
,
divide
)
{
// 1. create tensor
const
auto
alloc
=
std
::
make_unique
<
paddle
::
experimental
::
DefaultAllocator
>
(
paddle
::
platform
::
CPUPlace
());
phi
::
DenseTensor
dense_x
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
FLOAT32
,
phi
::
make_ddim
({
3
,
10
}),
phi
::
DataLayout
::
NCHW
));
auto
*
dense_x_data
=
dense_x
.
mutable_data
<
float
>
(
paddle
::
platform
::
CPUPlace
());
phi
::
DenseTensor
dense_y
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
FLOAT32
,
phi
::
make_ddim
({
10
}),
phi
::
DataLayout
::
NCHW
));
auto
*
dense_y_data
=
dense_y
.
mutable_data
<
float
>
(
paddle
::
platform
::
CPUPlace
());
float
div
[
3
][
10
]
=
{
0.0
};
for
(
size_t
i
=
0
;
i
<
3
;
++
i
)
{
for
(
size_t
j
=
0
;
j
<
10
;
++
j
)
{
dense_x_data
[
i
*
10
+
j
]
=
(
i
*
10
+
j
)
*
1.0
;
div
[
i
][
j
]
=
(
i
*
10
+
j
)
*
1.0
/
(
j
*
2.0
+
1
);
}
}
for
(
size_t
i
=
0
;
i
<
10
;
++
i
)
{
dense_y_data
[
i
]
=
i
*
2.0
+
1
;
}
// 2. test API
phi
::
CPUContext
dev_ctx
;
dev_ctx
.
SetAllocator
(
paddle
::
memory
::
allocation
::
AllocatorFacade
::
Instance
()
.
GetAllocator
(
paddle
::
platform
::
CPUPlace
())
.
get
());
auto
dense_out
=
phi
::
Divide
<
float
>
(
dev_ctx
,
dense_x
,
dense_y
);
// 3. check result
ASSERT_EQ
(
dense_out
.
dims
().
size
(),
2
);
ASSERT_EQ
(
dense_out
.
dims
()[
0
],
3
);
ASSERT_EQ
(
dense_out
.
dtype
(),
phi
::
DataType
::
FLOAT32
);
ASSERT_EQ
(
dense_out
.
layout
(),
phi
::
DataLayout
::
NCHW
);
auto
expect_result
=
div
;
auto
actual_result0
=
dense_out
.
data
<
float
>
()[
0
];
auto
actual_result1
=
dense_out
.
data
<
float
>
()[
1
];
auto
actual_result2
=
dense_out
.
data
<
float
>
()[
10
];
ASSERT_NEAR
(
expect_result
[
0
][
0
],
actual_result0
,
1e-6
f
);
ASSERT_NEAR
(
expect_result
[
0
][
1
],
actual_result1
,
1e-6
f
);
ASSERT_NEAR
(
expect_result
[
1
][
0
],
actual_result2
,
1e-6
f
);
}
TEST
(
DEV_API
,
multiply
)
{
// 1. create tensor
const
auto
alloc
=
std
::
make_unique
<
paddle
::
experimental
::
DefaultAllocator
>
(
paddle
::
platform
::
CPUPlace
());
phi
::
DenseTensor
dense_x
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
FLOAT32
,
phi
::
make_ddim
({
3
,
10
}),
phi
::
DataLayout
::
NCHW
));
auto
*
dense_x_data
=
dense_x
.
mutable_data
<
float
>
(
paddle
::
platform
::
CPUPlace
());
phi
::
DenseTensor
dense_y
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
FLOAT32
,
phi
::
make_ddim
({
10
}),
phi
::
DataLayout
::
NCHW
));
auto
*
dense_y_data
=
dense_y
.
mutable_data
<
float
>
(
paddle
::
platform
::
CPUPlace
());
float
mul
[
3
][
10
]
=
{
0.0
};
for
(
size_t
i
=
0
;
i
<
3
;
++
i
)
{
for
(
size_t
j
=
0
;
j
<
10
;
++
j
)
{
dense_x_data
[
i
*
10
+
j
]
=
(
i
*
10
+
j
)
*
1.0
;
mul
[
i
][
j
]
=
(
i
*
10
+
j
)
*
1.0
*
j
*
2.0
;
}
}
for
(
size_t
i
=
0
;
i
<
10
;
++
i
)
{
dense_y_data
[
i
]
=
i
*
2.0
;
}
// 2. test API
phi
::
CPUContext
dev_ctx
;
dev_ctx
.
SetAllocator
(
paddle
::
memory
::
allocation
::
AllocatorFacade
::
Instance
()
.
GetAllocator
(
paddle
::
platform
::
CPUPlace
())
.
get
());
auto
dense_out
=
phi
::
Multiply
<
float
>
(
dev_ctx
,
dense_x
,
dense_y
);
// 3. check result
ASSERT_EQ
(
dense_out
.
dims
().
size
(),
2
);
ASSERT_EQ
(
dense_out
.
dims
()[
0
],
3
);
ASSERT_EQ
(
dense_out
.
dtype
(),
phi
::
DataType
::
FLOAT32
);
ASSERT_EQ
(
dense_out
.
layout
(),
phi
::
DataLayout
::
NCHW
);
auto
expect_result
=
mul
;
auto
actual_result0
=
dense_out
.
data
<
float
>
()[
0
];
auto
actual_result1
=
dense_out
.
data
<
float
>
()[
1
];
auto
actual_result2
=
dense_out
.
data
<
float
>
()[
10
];
ASSERT_NEAR
(
expect_result
[
0
][
0
],
actual_result0
,
1e-6
f
);
ASSERT_NEAR
(
expect_result
[
0
][
1
],
actual_result1
,
1e-6
f
);
ASSERT_NEAR
(
expect_result
[
1
][
0
],
actual_result2
,
1e-6
f
);
}
}
// namespace tests
}
// namespace phi
paddle/phi/tests/kernels/test_flatten_dev_api.cc
已删除
100644 → 0
浏览文件 @
1cb12ff5
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include <memory>
#include "paddle/fluid/memory/allocation/allocator_facade.h"
#include "paddle/phi/api/lib/utils/allocator.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/flatten_kernel.h"
namespace
phi
{
namespace
tests
{
namespace
framework
=
paddle
::
framework
;
using
DDim
=
phi
::
DDim
;
TEST
(
DEV_API
,
flatten
)
{
// 1. create tensor
const
auto
alloc
=
std
::
make_unique
<
paddle
::
experimental
::
DefaultAllocator
>
(
paddle
::
platform
::
CPUPlace
());
phi
::
DenseTensor
dense_x
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
FLOAT32
,
phi
::
make_ddim
({
3
,
2
,
2
,
3
}),
phi
::
DataLayout
::
NCHW
));
auto
*
dense_x_data
=
dense_x
.
mutable_data
<
float
>
(
paddle
::
platform
::
CPUPlace
());
for
(
int
i
=
0
;
i
<
dense_x
.
numel
();
i
++
)
{
dense_x_data
[
i
]
=
i
;
}
int
start_axis
=
1
,
stop_axis
=
2
;
phi
::
CPUContext
dev_ctx
;
dev_ctx
.
SetAllocator
(
paddle
::
memory
::
allocation
::
AllocatorFacade
::
Instance
()
.
GetAllocator
(
paddle
::
platform
::
CPUPlace
())
.
get
());
dev_ctx
.
SetHostAllocator
(
paddle
::
memory
::
allocation
::
AllocatorFacade
::
Instance
()
.
GetAllocator
(
paddle
::
platform
::
CPUPlace
())
.
get
());
// 2. test API
auto
out
=
phi
::
Flatten
<
float
>
(
dev_ctx
,
dense_x
,
start_axis
,
stop_axis
);
// 3. check result
std
::
vector
<
int
>
expect_shape
=
{
3
,
4
,
3
};
ASSERT_EQ
(
out
.
dims
()[
0
],
expect_shape
[
0
]);
ASSERT_EQ
(
out
.
dims
()[
1
],
expect_shape
[
1
]);
ASSERT_EQ
(
out
.
dims
()[
2
],
expect_shape
[
2
]);
ASSERT_EQ
(
out
.
numel
(),
36
);
ASSERT_EQ
(
out
.
meta
().
dtype
,
phi
::
DataType
::
FLOAT32
);
ASSERT_EQ
(
out
.
meta
().
layout
,
phi
::
DataLayout
::
NCHW
);
bool
value_equal
=
true
;
auto
*
dense_out_data
=
out
.
data
<
float
>
();
for
(
int
i
=
0
;
i
<
dense_x
.
numel
();
i
++
)
{
if
(
std
::
abs
(
dense_x_data
[
i
]
-
dense_out_data
[
i
])
>
1e-6
f
)
value_equal
=
false
;
}
ASSERT_EQ
(
value_equal
,
true
);
}
}
// namespace tests
}
// namespace phi
paddle/phi/tests/kernels/test_matmul_dev_api.cc
已删除
100644 → 0
浏览文件 @
1cb12ff5
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include <memory>
#include "paddle/fluid/memory/allocation/allocator_facade.h"
#include "paddle/phi/api/lib/utils/allocator.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/matmul_kernel.h"
namespace
phi
{
namespace
tests
{
namespace
framework
=
paddle
::
framework
;
using
DDim
=
phi
::
DDim
;
TEST
(
DEV_API
,
dot
)
{
// 1. create tensor
const
auto
alloc
=
std
::
make_unique
<
paddle
::
experimental
::
DefaultAllocator
>
(
paddle
::
platform
::
CPUPlace
());
DenseTensor
dense_x
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
FLOAT32
,
phi
::
make_ddim
({
3
,
3
}),
phi
::
DataLayout
::
NCHW
));
auto
*
dense_x_data
=
dense_x
.
mutable_data
<
float
>
(
paddle
::
platform
::
CPUPlace
());
DenseTensor
dense_y
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
FLOAT32
,
phi
::
make_ddim
({
3
,
3
}),
phi
::
DataLayout
::
NCHW
));
auto
*
dense_y_data
=
dense_y
.
mutable_data
<
float
>
(
paddle
::
platform
::
CPUPlace
());
for
(
size_t
i
=
0
;
i
<
9
;
++
i
)
{
dense_x_data
[
i
]
=
1.0
;
dense_y_data
[
i
]
=
2.0
;
}
std
::
vector
<
float
>
sum
(
9
,
6.0
);
// 2. test API
phi
::
CPUContext
dev_ctx
;
dev_ctx
.
SetAllocator
(
paddle
::
memory
::
allocation
::
AllocatorFacade
::
Instance
()
.
GetAllocator
(
paddle
::
platform
::
CPUPlace
())
.
get
());
auto
out
=
Matmul
<
float
,
CPUContext
>
(
dev_ctx
,
dense_x
,
dense_y
,
false
,
false
);
// 3. check result
ASSERT_EQ
(
out
.
dims
().
size
(),
2
);
ASSERT_EQ
(
out
.
dims
()[
0
],
3
);
ASSERT_EQ
(
out
.
dims
()[
1
],
3
);
ASSERT_EQ
(
out
.
numel
(),
9
);
ASSERT_EQ
(
out
.
dtype
(),
DataType
::
FLOAT32
);
ASSERT_EQ
(
out
.
layout
(),
DataLayout
::
NCHW
);
ASSERT_EQ
(
out
.
initialized
(),
true
);
for
(
size_t
i
=
0
;
i
<
9
;
i
++
)
{
ASSERT_NEAR
(
sum
[
i
],
out
.
data
<
float
>
()[
i
],
1e-6
f
);
}
}
}
// namespace tests
}
// namespace phi
paddle/phi/tests/kernels/test_mean_dev_api.cc
已删除
100644 → 0
浏览文件 @
1cb12ff5
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include <memory>
#include "paddle/fluid/memory/allocation/allocator_facade.h"
#include "paddle/phi/api/lib/utils/allocator.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/reduce_mean_kernel.h"
namespace
phi
{
namespace
tests
{
namespace
framework
=
paddle
::
framework
;
using
DDim
=
phi
::
DDim
;
TEST
(
DEV_API
,
mean
)
{
// 1. create tensor
const
auto
alloc
=
std
::
make_unique
<
paddle
::
experimental
::
DefaultAllocator
>
(
paddle
::
platform
::
CPUPlace
());
phi
::
DenseTensor
dense_x
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
FLOAT32
,
phi
::
make_ddim
({
3
,
4
}),
phi
::
DataLayout
::
NCHW
));
auto
*
dense_x_data
=
dense_x
.
mutable_data
<
float
>
(
paddle
::
platform
::
CPUPlace
());
float
sum
=
0.0
;
for
(
size_t
i
=
0
;
i
<
12
;
++
i
)
{
dense_x_data
[
i
]
=
i
*
1.0
;
sum
+=
i
*
1.0
;
}
std
::
vector
<
int64_t
>
dims
=
{
0
,
1
};
// 2. test API
phi
::
CPUContext
dev_ctx
;
dev_ctx
.
SetAllocator
(
paddle
::
memory
::
allocation
::
AllocatorFacade
::
Instance
()
.
GetAllocator
(
paddle
::
platform
::
CPUPlace
())
.
get
());
auto
out
=
phi
::
Mean
<
float
>
(
dev_ctx
,
dense_x
,
dims
,
false
);
// 3. check result
ASSERT_EQ
(
out
.
dims
().
size
(),
1
);
ASSERT_EQ
(
out
.
numel
(),
1
);
ASSERT_EQ
(
out
.
meta
().
dtype
,
phi
::
DataType
::
FLOAT32
);
ASSERT_EQ
(
out
.
meta
().
layout
,
phi
::
DataLayout
::
NCHW
);
auto
expect_result
=
sum
/
12
;
auto
actual_result
=
out
.
data
<
float
>
()[
0
];
ASSERT_NEAR
(
expect_result
,
actual_result
,
1e-6
f
);
}
}
// namespace tests
}
// namespace phi
paddle/phi/tests/kernels/test_reshape_dev_api.cc
已删除
100644 → 0
浏览文件 @
1cb12ff5
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include <memory>
#include "paddle/fluid/memory/allocation/allocator_facade.h"
#include "paddle/phi/api/lib/utils/allocator.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/reshape_kernel.h"
namespace
phi
{
namespace
tests
{
namespace
framework
=
paddle
::
framework
;
using
DDim
=
phi
::
DDim
;
// TODO(chenweihang): Remove this test after the API is used in the dygraph
TEST
(
DEV_API
,
reshape
)
{
// 1. create tensor
const
auto
alloc
=
std
::
make_unique
<
paddle
::
experimental
::
DefaultAllocator
>
(
paddle
::
platform
::
CPUPlace
());
phi
::
DenseTensor
dense_x
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
FLOAT32
,
phi
::
make_ddim
({
3
,
2
,
2
,
3
}),
phi
::
DataLayout
::
NCHW
));
auto
*
dense_x_data
=
dense_x
.
mutable_data
<
float
>
(
paddle
::
platform
::
CPUPlace
());
for
(
int
i
=
0
;
i
<
dense_x
.
numel
();
i
++
)
{
dense_x_data
[
i
]
=
i
;
}
std
::
vector
<
int64_t
>
shape
{
12
,
3
};
// 2. test API
phi
::
CPUContext
dev_ctx
;
dev_ctx
.
SetAllocator
(
paddle
::
memory
::
allocation
::
AllocatorFacade
::
Instance
()
.
GetAllocator
(
paddle
::
platform
::
CPUPlace
())
.
get
());
dev_ctx
.
SetHostAllocator
(
paddle
::
memory
::
allocation
::
AllocatorFacade
::
Instance
()
.
GetAllocator
(
paddle
::
platform
::
CPUPlace
())
.
get
());
auto
out
=
phi
::
Reshape
<
float
>
(
dev_ctx
,
dense_x
,
shape
);
// 3. check result
std
::
vector
<
int64_t
>
expect_shape
=
{
12
,
3
};
ASSERT_EQ
(
out
.
dims
()[
0
],
expect_shape
[
0
]);
ASSERT_EQ
(
out
.
dims
()[
1
],
expect_shape
[
1
]);
ASSERT_EQ
(
out
.
numel
(),
36
);
ASSERT_EQ
(
out
.
meta
().
dtype
,
phi
::
DataType
::
FLOAT32
);
ASSERT_EQ
(
out
.
meta
().
layout
,
phi
::
DataLayout
::
NCHW
);
bool
value_equal
=
true
;
auto
*
dense_out_data
=
out
.
data
<
float
>
();
for
(
int
i
=
0
;
i
<
dense_x
.
numel
();
i
++
)
{
if
(
std
::
abs
(
dense_x_data
[
i
]
-
dense_out_data
[
i
])
>
1e-6
f
)
value_equal
=
false
;
}
ASSERT_EQ
(
value_equal
,
true
);
}
}
// namespace tests
}
// namespace phi
paddle/phi/tests/kernels/test_scale_dev_api.cc
已删除
100644 → 0
浏览文件 @
1cb12ff5
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include <memory>
#include "paddle/fluid/memory/allocation/allocator_facade.h"
#include "paddle/phi/api/lib/utils/allocator.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/scale_kernel.h"
namespace
phi
{
namespace
tests
{
namespace
framework
=
paddle
::
framework
;
using
DDim
=
phi
::
DDim
;
TEST
(
DEV_API
,
scale
)
{
// 1. create tensor
const
auto
alloc
=
std
::
make_unique
<
paddle
::
experimental
::
DefaultAllocator
>
(
paddle
::
platform
::
CPUPlace
());
phi
::
DenseTensor
dense_x
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
FLOAT32
,
phi
::
make_ddim
({
3
,
4
}),
phi
::
DataLayout
::
NCHW
));
auto
*
dense_x_data
=
dense_x
.
mutable_data
<
float
>
(
paddle
::
platform
::
CPUPlace
());
for
(
size_t
i
=
0
;
i
<
12
;
++
i
)
{
dense_x_data
[
i
]
=
i
*
1.0
;
}
float
scale
=
2
;
float
bias
=
1
;
bool
bias_after_scale
=
true
;
// 2. test API
phi
::
CPUContext
dev_ctx
;
dev_ctx
.
SetAllocator
(
paddle
::
memory
::
allocation
::
AllocatorFacade
::
Instance
()
.
GetAllocator
(
paddle
::
platform
::
CPUPlace
())
.
get
());
auto
out
=
phi
::
Scale
<
float
>
(
dev_ctx
,
dense_x
,
scale
,
bias
,
bias_after_scale
);
// 3. check result
ASSERT_EQ
(
out
.
dims
().
size
(),
2
);
ASSERT_EQ
(
out
.
numel
(),
12
);
ASSERT_EQ
(
out
.
meta
().
dtype
,
phi
::
DataType
::
FLOAT32
);
ASSERT_EQ
(
out
.
meta
().
layout
,
phi
::
DataLayout
::
NCHW
);
auto
expect_result
=
23
;
auto
actual_result
=
out
.
data
<
float
>
()[
11
];
ASSERT_NEAR
(
expect_result
,
actual_result
,
1e-6
f
);
}
TEST
(
DEV_API
,
scale_host
)
{
// 1. create tensor
const
auto
alloc
=
std
::
make_unique
<
paddle
::
experimental
::
DefaultAllocator
>
(
paddle
::
platform
::
CPUPlace
());
phi
::
DenseTensor
dense_x
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
FLOAT32
,
phi
::
make_ddim
({
3
,
4
}),
phi
::
DataLayout
::
NCHW
));
auto
*
dense_x_data
=
dense_x
.
mutable_data
<
float
>
(
paddle
::
platform
::
CPUPlace
());
for
(
size_t
i
=
0
;
i
<
12
;
++
i
)
{
dense_x_data
[
i
]
=
i
*
1.0
;
}
phi
::
DenseTensor
scale
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
FLOAT32
,
phi
::
make_ddim
({
1
}),
phi
::
DataLayout
::
NCHW
));
scale
.
data
<
float
>
()[
0
]
=
2
;
float
bias
=
1
;
bool
bias_after_scale
=
true
;
// 2. test API
phi
::
CPUContext
dev_ctx
;
dev_ctx
.
SetAllocator
(
paddle
::
memory
::
allocation
::
AllocatorFacade
::
Instance
()
.
GetAllocator
(
paddle
::
platform
::
CPUPlace
())
.
get
());
auto
out
=
phi
::
Scale
<
float
>
(
dev_ctx
,
dense_x
,
scale
,
bias
,
bias_after_scale
);
// 3. check result
ASSERT_EQ
(
out
.
dims
().
size
(),
2
);
ASSERT_EQ
(
out
.
numel
(),
12
);
ASSERT_EQ
(
out
.
meta
().
dtype
,
phi
::
DataType
::
FLOAT32
);
ASSERT_EQ
(
out
.
meta
().
layout
,
phi
::
DataLayout
::
NCHW
);
auto
expect_result
=
23
;
auto
actual_result
=
out
.
data
<
float
>
()[
11
];
ASSERT_NEAR
(
expect_result
,
actual_result
,
1e-6
f
);
}
}
// namespace tests
}
// namespace phi
paddle/phi/tests/kernels/test_sparse_activation_dev_api.cc
已删除
100644 → 0
浏览文件 @
1cb12ff5
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include <memory>
#include "paddle/fluid/memory/allocation/allocator_facade.h"
#include "paddle/phi/api/lib/utils/allocator.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/common/place.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/activation_grad_kernel.h"
#include "paddle/phi/kernels/activation_kernel.h"
#include "paddle/phi/kernels/empty_kernel.h"
#include "paddle/phi/kernels/sparse/sparse_utils_kernel.h"
#include "paddle/phi/kernels/sparse/unary_grad_kernel.h"
#include "paddle/phi/kernels/sparse/unary_kernel.h"
namespace
phi
{
namespace
tests
{
TEST
(
DEV_API
,
sparse_relu
)
{
std
::
vector
<
float
>
data
=
{
0
,
-
1
,
0
,
2
,
0
,
0
,
-
3
,
0
,
4
,
5
,
0
,
0
};
phi
::
CPUContext
dev_ctx_cpu
;
dev_ctx_cpu
.
SetAllocator
(
paddle
::
memory
::
allocation
::
AllocatorFacade
::
Instance
()
.
GetAllocator
(
paddle
::
platform
::
CPUPlace
())
.
get
());
dev_ctx_cpu
.
SetHostAllocator
(
paddle
::
memory
::
allocation
::
AllocatorFacade
::
Instance
()
.
GetAllocator
(
paddle
::
platform
::
CPUPlace
())
.
get
());
DenseTensor
dense_x
=
phi
::
Empty
(
dev_ctx_cpu
,
DenseTensorMeta
(
DataType
::
FLOAT32
,
{
3
,
4
},
DataLayout
::
NCHW
));
memcpy
(
dense_x
.
data
<
float
>
(),
data
.
data
(),
data
.
size
()
*
sizeof
(
float
));
auto
sparse_coo
=
sparse
::
DenseToCoo
<
float
>
(
dev_ctx_cpu
,
dense_x
,
2
);
auto
sparse_out
=
sparse
::
ReluCoo
<
float
>
(
dev_ctx_cpu
,
sparse_coo
);
DenseTensor
dense_out
=
phi
::
EmptyLike
<
float
>
(
dev_ctx_cpu
,
sparse_out
.
non_zero_elements
());
ReluKernel
<
float
>
(
dev_ctx_cpu
,
sparse_coo
.
non_zero_elements
(),
&
dense_out
);
int
cmp
=
memcmp
(
dense_out
.
data
<
float
>
(),
sparse_out
.
non_zero_elements
().
data
<
float
>
(),
dense_out
.
numel
()
*
sizeof
(
float
));
ASSERT_EQ
(
cmp
,
0
);
// backward
DenseTensor
dense_grad_x
=
phi
::
EmptyLike
<
float
>
(
dev_ctx_cpu
,
dense_out
);
ReluGradKernel
<
float
>
(
dev_ctx_cpu
,
sparse_coo
.
non_zero_elements
(),
dense_out
,
&
dense_grad_x
);
SparseCooTensor
sparse_grad_x
(
phi
::
EmptyLike
<
int
>
(
dev_ctx_cpu
,
sparse_coo
.
non_zero_indices
()),
phi
::
EmptyLike
<
int
>
(
dev_ctx_cpu
,
sparse_coo
.
non_zero_elements
()),
{
3
,
4
});
SparseCooTensor
sparse_out_grad
(
sparse_coo
.
non_zero_indices
(),
dense_out
,
{
3
,
4
});
sparse
::
ReluCooGradKernel
<
float
>
(
dev_ctx_cpu
,
sparse_coo
,
sparse_out_grad
,
&
sparse_grad_x
);
cmp
=
memcmp
(
dense_grad_x
.
data
<
float
>
(),
sparse_grad_x
.
non_zero_elements
().
data
<
float
>
(),
dense_grad_x
.
numel
()
*
sizeof
(
float
));
ASSERT_EQ
(
cmp
,
0
);
}
}
// namespace tests
}
// namespace phi
paddle/phi/tests/kernels/test_sparse_conv3d_dev_api.cc
已删除
100644 → 0
浏览文件 @
1cb12ff5
此差异已折叠。
点击以展开。
paddle/phi/tests/kernels/test_sparse_elementwise_dev_api.cc
已删除
100644 → 0
浏览文件 @
1cb12ff5
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include <cmath>
#include <memory>
#include "paddle/fluid/memory/allocation/allocator_facade.h"
#include "paddle/phi/api/lib/utils/allocator.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/elementwise_add_grad_kernel.h"
#include "paddle/phi/kernels/elementwise_add_kernel.h"
#include "paddle/phi/kernels/elementwise_divide_grad_kernel.h"
#include "paddle/phi/kernels/elementwise_divide_kernel.h"
#include "paddle/phi/kernels/elementwise_multiply_grad_kernel.h"
#include "paddle/phi/kernels/elementwise_multiply_kernel.h"
#include "paddle/phi/kernels/elementwise_subtract_grad_kernel.h"
#include "paddle/phi/kernels/elementwise_subtract_kernel.h"
#include "paddle/phi/kernels/sparse/elementwise_grad_kernel.h"
#include "paddle/phi/kernels/sparse/elementwise_kernel.h"
#include "paddle/phi/kernels/sparse/sparse_utils_kernel.h"
namespace
phi
{
namespace
tests
{
#define TEST_ELEMENTWISE_OP(name) \
TEST_ELEMENTWISE_OP_WITH_TYPE(name, Csr) \
\
TEST_ELEMENTWISE_OP_WITH_TYPE(name, Coo)
#define TEST_ELEMENTWISE_OP_WITH_TYPE(name, type) \
template <typename T, typename Context> \
void TestElementWise##name##type(const Context& dev_ctx_cpu, \
const Sparse##type##Tensor& x, \
const Sparse##type##Tensor& y, \
const DDim& dense_dims) { \
auto out = sparse::ElementWise##name##type<T>(dev_ctx_cpu, x, y); \
const DenseTensor denseX = sparse::type##ToDense<T>(dev_ctx_cpu, x); \
const DenseTensor denseY = sparse::type##ToDense<T>(dev_ctx_cpu, y); \
const DenseTensor denseOut = sparse::type##ToDense<T>(dev_ctx_cpu, out); \
auto expectResult = name<T>(dev_ctx_cpu, denseX, denseY); \
for (int j = 0; j < denseOut.numel(); ++j) { \
auto actualResultRow = denseOut.template data<T>()[j]; \
auto expectResultRow = expectResult.template data<T>()[j]; \
if (std::is_same<T, float>::value || std::is_same<T, double>::value) { \
if (!std::isnan(expectResultRow)) { \
ASSERT_DOUBLE_EQ(expectResultRow, actualResultRow); \
} \
} else { \
ASSERT_EQ(expectResultRow, actualResultRow); \
} \
} \
}
TEST_ELEMENTWISE_OP
(
Add
)
TEST_ELEMENTWISE_OP
(
Subtract
)
TEST_ELEMENTWISE_OP
(
Multiply
)
TEST_ELEMENTWISE_OP
(
Divide
)
TEST
(
DEV_API
,
sparse_elementwise_coo_kernel_double
)
{
using
T
=
double
;
using
IntT
=
int64_t
;
for
(
int
epoch
=
0
;
epoch
<
100
;
++
epoch
)
{
DDim
dense_dims
=
phi
::
make_ddim
({
2
,
4
,
4
});
IntT
sparse_dim
=
2
;
// 32els
std
::
vector
<
T
>
x_dense_data
=
{
0.0
,
1.0
,
0.0
,
0.0
,
2.0
,
0.0
,
3.0
,
0.0
,
0.0
,
0.0
,
0.0
,
4.0
,
0.0
,
0.0
,
0.0
,
0.0
,
0.0
,
1.0
,
0.0
,
0.0
,
2.0
,
0.0
,
3.0
,
0.0
,
0.0
,
0.0
,
0.0
,
4.0
,
0.0
,
0.0
,
0.0
,
0.0
};
std
::
vector
<
T
>
y_dense_data
=
{
0.0
,
0.0
,
0.0
,
4.0
,
0.0
,
0.0
,
0.0
,
0.0
,
0.0
,
1.0
,
0.0
,
0.0
,
0.0
,
0.0
,
0.0
,
0.0
,
0.0
,
1.0
,
0.0
,
0.0
,
2.0
,
0.0
,
3.0
,
0.0
,
0.0
,
0.0
,
0.0
,
4.0
,
0.0
,
0.0
,
0.0
,
0.0
};
const
auto
alloc
=
std
::
make_unique
<
paddle
::
experimental
::
DefaultAllocator
>
(
paddle
::
platform
::
CPUPlace
());
phi
::
DenseTensor
dense_x
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
DataType
::
FLOAT32
,
dense_dims
,
DataLayout
::
NCHW
));
auto
*
dense_x_data
=
dense_x
.
mutable_data
<
T
>
(
paddle
::
platform
::
CPUPlace
());
memcpy
(
dense_x_data
,
x_dense_data
.
data
(),
x_dense_data
.
size
()
*
sizeof
(
T
));
phi
::
DenseTensor
dense_y
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
DataType
::
FLOAT32
,
dense_dims
,
DataLayout
::
NCHW
));
auto
*
dense_y_data
=
dense_y
.
mutable_data
<
T
>
(
paddle
::
platform
::
CPUPlace
());
memcpy
(
dense_y_data
,
y_dense_data
.
data
(),
y_dense_data
.
size
()
*
sizeof
(
T
));
phi
::
CPUContext
dev_ctx_cpu
;
dev_ctx_cpu
.
SetAllocator
(
paddle
::
memory
::
allocation
::
AllocatorFacade
::
Instance
()
.
GetAllocator
(
paddle
::
platform
::
CPUPlace
())
.
get
());
auto
coo_x
=
sparse
::
DenseToCoo
<
T
>
(
dev_ctx_cpu
,
dense_x
,
sparse_dim
);
auto
coo_y
=
sparse
::
DenseToCoo
<
T
>
(
dev_ctx_cpu
,
dense_y
,
sparse_dim
);
TestElementWiseAddCoo
<
T
>
(
dev_ctx_cpu
,
coo_x
,
coo_y
,
dense_dims
);
TestElementWiseSubtractCoo
<
T
>
(
dev_ctx_cpu
,
coo_x
,
coo_y
,
dense_dims
);
TestElementWiseMultiplyCoo
<
T
>
(
dev_ctx_cpu
,
coo_x
,
coo_y
,
dense_dims
);
TestElementWiseDivideCoo
<
T
>
(
dev_ctx_cpu
,
coo_x
,
coo_y
,
dense_dims
);
}
}
TEST
(
DEV_API
,
sparse_elementwise_csr_kernel_float
)
{
using
T
=
float
;
DDim
dense_dims
=
phi
::
make_ddim
({
6
,
4
});
// 24els
std
::
vector
<
T
>
x_dense_data
=
{
0.0
,
0.0
,
4.0
,
2.0
,
6.0
,
3.0
,
0.2
,
0.1
,
2.2
,
1.1
,
4.2
,
2.1
,
0.4
,
0.2
,
0.0
,
0.0
,
4.4
,
2.2
,
0.6
,
0.3
,
2.6
,
1.3
,
0.0
,
0.0
};
std
::
vector
<
T
>
y_dense_data
=
{
0.0
,
1.0
,
0.0
,
2.0
,
0.0
,
3.0
,
0.0
,
3.5
,
0.7
,
0.0
,
3.5
,
0.7
,
3.2
,
0.1
,
0.0
,
3.2
,
1.0
,
0.0
,
1.2
,
0.5
,
0.7
,
3.3
,
0.0
,
9.0
};
const
auto
alloc
=
std
::
make_unique
<
paddle
::
experimental
::
DefaultAllocator
>
(
paddle
::
platform
::
CPUPlace
());
phi
::
DenseTensor
dense_x
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
DataType
::
FLOAT32
,
dense_dims
,
DataLayout
::
NCHW
));
auto
*
dense_x_data
=
dense_x
.
mutable_data
<
T
>
(
paddle
::
platform
::
CPUPlace
());
memcpy
(
dense_x_data
,
x_dense_data
.
data
(),
x_dense_data
.
size
()
*
sizeof
(
T
));
phi
::
DenseTensor
dense_y
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
DataType
::
FLOAT32
,
dense_dims
,
DataLayout
::
NCHW
));
auto
*
dense_y_data
=
dense_y
.
mutable_data
<
T
>
(
paddle
::
platform
::
CPUPlace
());
memcpy
(
dense_y_data
,
y_dense_data
.
data
(),
y_dense_data
.
size
()
*
sizeof
(
T
));
phi
::
CPUContext
dev_ctx_cpu
;
dev_ctx_cpu
.
SetAllocator
(
paddle
::
memory
::
allocation
::
AllocatorFacade
::
Instance
()
.
GetAllocator
(
paddle
::
platform
::
CPUPlace
())
.
get
());
auto
csr_x
=
sparse
::
DenseToCsr
<
T
>
(
dev_ctx_cpu
,
dense_x
);
auto
csr_y
=
sparse
::
DenseToCsr
<
T
>
(
dev_ctx_cpu
,
dense_y
);
TestElementWiseAddCsr
<
T
>
(
dev_ctx_cpu
,
csr_x
,
csr_y
,
dense_dims
);
TestElementWiseSubtractCsr
<
T
>
(
dev_ctx_cpu
,
csr_x
,
csr_y
,
dense_dims
);
TestElementWiseMultiplyCsr
<
T
>
(
dev_ctx_cpu
,
csr_x
,
csr_y
,
dense_dims
);
TestElementWiseDivideCsr
<
T
>
(
dev_ctx_cpu
,
csr_x
,
csr_y
,
dense_dims
);
}
#define TEST_ELEMENTWISE_OP_GRAD(name) \
TEST_ELEMENTWISE_OP_GRAD_WITH_TYPE(name, Csr) \
\
TEST_ELEMENTWISE_OP_GRAD_WITH_TYPE(name, Coo)
#define TEST_ELEMENTWISE_OP_GRAD_WITH_TYPE(name, type) \
template <typename T, typename Context> \
void TestElementWise##name##type##Grad(const Context& dev_ctx_cpu, \
const Sparse##type##Tensor& x, \
const Sparse##type##Tensor& y, \
const DDim& dense_dims) { \
auto out = sparse::ElementWise##name##type<T>(dev_ctx_cpu, x, y); \
auto dresult = \
sparse::ElementWise##name##type##Grad<T>(dev_ctx_cpu, x, y, out); \
\
DenseTensor expectdy = phi::Empty( \
dev_ctx_cpu, \
DenseTensorMeta(DataType::FLOAT32, dense_dims, DataLayout::NCHW)); \
DenseTensor expectdx = phi::Empty( \
dev_ctx_cpu, \
DenseTensorMeta(DataType::FLOAT32, dense_dims, DataLayout::NCHW)); \
\
phi::name##GradKernel<T>(dev_ctx_cpu, \
sparse::type##ToDense<T>(dev_ctx_cpu, x), \
sparse::type##ToDense<T>(dev_ctx_cpu, y), \
sparse::type##ToDense<T>(dev_ctx_cpu, out), \
-1, \
&expectdx, \
&expectdy); \
const DenseTensor densedX = \
sparse::type##ToDense<T>(dev_ctx_cpu, dresult[0]); \
const DenseTensor densedY = \
sparse::type##ToDense<T>(dev_ctx_cpu, dresult[1]); \
const DenseTensor denseOut = sparse::type##ToDense<T>(dev_ctx_cpu, out); \
\
for (int j = 0; j < densedX.numel(); ++j) { \
auto actualResultRow = densedX.template data<T>()[j]; \
auto expectResultRow = expectdx.template data<T>()[j]; \
if (std::is_same<T, float>::value || std::is_same<T, double>::value) { \
if (!std::isnan(expectResultRow)) { \
ASSERT_DOUBLE_EQ(expectResultRow, actualResultRow); \
} \
} else { \
ASSERT_EQ(expectResultRow, actualResultRow); \
} \
} \
for (int j = 0; j < densedY.numel(); ++j) { \
auto actualResultRow = densedY.template data<T>()[j]; \
auto expectResultRow = expectdy.template data<T>()[j]; \
if (std::is_same<T, float>::value || std::is_same<T, double>::value) { \
if (!std::isnan(expectResultRow)) { \
ASSERT_DOUBLE_EQ(expectResultRow, actualResultRow); \
} \
} else { \
ASSERT_EQ(expectResultRow, actualResultRow); \
} \
} \
}
TEST_ELEMENTWISE_OP_GRAD
(
Add
)
TEST_ELEMENTWISE_OP_GRAD
(
Subtract
)
TEST_ELEMENTWISE_OP_GRAD
(
Multiply
)
template
<
typename
T
,
typename
Context
>
void
TestElementWiseDivideCsrGrad
(
const
Context
&
dev_ctx_cpu
,
const
SparseCsrTensor
&
x
,
const
SparseCsrTensor
&
y
,
const
DDim
&
dense_dims
)
{
auto
out
=
sparse
::
ElementWiseDivideCsr
<
T
>
(
dev_ctx_cpu
,
x
,
y
);
auto
dresult
=
sparse
::
ElementWiseDivideCsrGrad
<
T
>
(
dev_ctx_cpu
,
x
,
y
,
out
,
out
);
DenseTensor
expectdy
=
phi
::
Empty
(
dev_ctx_cpu
,
DenseTensorMeta
(
DataType
::
FLOAT32
,
dense_dims
,
DataLayout
::
NCHW
));
DenseTensor
expectdx
=
phi
::
Empty
(
dev_ctx_cpu
,
DenseTensorMeta
(
DataType
::
FLOAT32
,
dense_dims
,
DataLayout
::
NCHW
));
phi
::
DivideGradKernel
<
T
>
(
dev_ctx_cpu
,
sparse
::
CsrToDense
<
T
>
(
dev_ctx_cpu
,
x
),
sparse
::
CsrToDense
<
T
>
(
dev_ctx_cpu
,
y
),
sparse
::
CsrToDense
<
T
>
(
dev_ctx_cpu
,
out
),
sparse
::
CsrToDense
<
T
>
(
dev_ctx_cpu
,
out
),
-
1
,
&
expectdx
,
&
expectdy
);
const
DenseTensor
densedX
=
sparse
::
CsrToDense
<
T
>
(
dev_ctx_cpu
,
dresult
[
0
]);
const
DenseTensor
densedY
=
sparse
::
CsrToDense
<
T
>
(
dev_ctx_cpu
,
dresult
[
1
]);
const
DenseTensor
denseOut
=
sparse
::
CsrToDense
<
T
>
(
dev_ctx_cpu
,
out
);
for
(
int
j
=
0
;
j
<
densedX
.
numel
();
++
j
)
{
auto
actualResultRow
=
densedX
.
template
data
<
T
>()[
j
];
auto
expectResultRow
=
expectdx
.
template
data
<
T
>()[
j
];
if
(
!
std
::
isnan
(
expectResultRow
))
{
ASSERT_DOUBLE_EQ
(
expectResultRow
,
actualResultRow
);
}
}
for
(
int
j
=
0
;
j
<
densedY
.
numel
();
++
j
)
{
auto
actualResultRow
=
densedY
.
template
data
<
T
>()[
j
];
auto
expectResultRow
=
expectdy
.
template
data
<
T
>()[
j
];
if
(
!
std
::
isnan
(
expectResultRow
))
{
ASSERT_DOUBLE_EQ
(
expectResultRow
,
actualResultRow
);
}
}
}
template
<
typename
T
,
typename
Context
>
void
TestElementWiseDivideCooGrad
(
const
Context
&
dev_ctx_cpu
,
const
SparseCooTensor
&
x
,
const
SparseCooTensor
&
y
,
const
DDim
&
dense_dims
)
{
auto
out
=
sparse
::
ElementWiseDivideCoo
<
T
>
(
dev_ctx_cpu
,
x
,
y
);
auto
dresult
=
sparse
::
ElementWiseDivideCooGrad
<
T
>
(
dev_ctx_cpu
,
x
,
y
,
out
,
out
);
DenseTensor
expectdy
=
phi
::
Empty
(
dev_ctx_cpu
,
DenseTensorMeta
(
DataType
::
FLOAT32
,
dense_dims
,
DataLayout
::
NCHW
));
DenseTensor
expectdx
=
phi
::
Empty
(
dev_ctx_cpu
,
DenseTensorMeta
(
DataType
::
FLOAT32
,
dense_dims
,
DataLayout
::
NCHW
));
phi
::
DivideGradKernel
<
T
>
(
dev_ctx_cpu
,
sparse
::
CooToDense
<
T
>
(
dev_ctx_cpu
,
x
),
sparse
::
CooToDense
<
T
>
(
dev_ctx_cpu
,
y
),
sparse
::
CooToDense
<
T
>
(
dev_ctx_cpu
,
out
),
sparse
::
CooToDense
<
T
>
(
dev_ctx_cpu
,
out
),
-
1
,
&
expectdx
,
&
expectdy
);
const
DenseTensor
densedX
=
sparse
::
CooToDense
<
T
>
(
dev_ctx_cpu
,
dresult
[
0
]);
const
DenseTensor
densedY
=
sparse
::
CooToDense
<
T
>
(
dev_ctx_cpu
,
dresult
[
1
]);
const
DenseTensor
denseOut
=
sparse
::
CooToDense
<
T
>
(
dev_ctx_cpu
,
out
);
for
(
int
j
=
0
;
j
<
densedX
.
numel
();
++
j
)
{
auto
actualResultRow
=
densedX
.
template
data
<
T
>()[
j
];
auto
expectResultRow
=
expectdx
.
template
data
<
T
>()[
j
];
if
(
!
std
::
isnan
(
expectResultRow
))
{
ASSERT_DOUBLE_EQ
(
expectResultRow
,
actualResultRow
);
}
}
for
(
int
j
=
0
;
j
<
densedY
.
numel
();
++
j
)
{
auto
actualResultRow
=
densedY
.
template
data
<
T
>()[
j
];
auto
expectResultRow
=
expectdy
.
template
data
<
T
>()[
j
];
if
(
!
std
::
isnan
(
expectResultRow
))
{
ASSERT_DOUBLE_EQ
(
expectResultRow
,
actualResultRow
);
}
}
}
TEST
(
DEV_API
,
sparse_elementwise_csr_grad_kernel_float
)
{
using
T
=
float
;
DDim
dense_dims
=
phi
::
make_ddim
({
2
,
3
,
4
});
std
::
vector
<
T
>
x_dense_data
=
{
0.0
,
0.0
,
4.0
,
2.0
,
6.0
,
3.0
,
0.2
,
0.1
,
2.2
,
1.1
,
4.2
,
2.1
,
0.4
,
0.2
,
0.0
,
0.0
,
4.4
,
2.2
,
0.6
,
0.3
,
2.6
,
1.3
,
0.0
,
0.0
};
std
::
vector
<
T
>
y_dense_data
=
{
0.0
,
1.0
,
0.0
,
2.0
,
0.0
,
3.0
,
0.0
,
3.5
,
0.7
,
0.0
,
3.5
,
0.7
,
3.2
,
0.1
,
0.0
,
3.2
,
1.0
,
0.0
,
1.2
,
0.5
,
0.7
,
3.3
,
0.0
,
9.0
};
const
auto
alloc
=
std
::
make_unique
<
paddle
::
experimental
::
DefaultAllocator
>
(
paddle
::
platform
::
CPUPlace
());
phi
::
DenseTensor
dense_x
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
DataType
::
FLOAT32
,
dense_dims
,
DataLayout
::
NCHW
));
auto
*
dense_x_data
=
dense_x
.
mutable_data
<
T
>
(
paddle
::
platform
::
CPUPlace
());
memcpy
(
dense_x_data
,
x_dense_data
.
data
(),
x_dense_data
.
size
()
*
sizeof
(
T
));
phi
::
DenseTensor
dense_y
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
DataType
::
FLOAT32
,
dense_dims
,
DataLayout
::
NCHW
));
auto
*
dense_y_data
=
dense_y
.
mutable_data
<
T
>
(
paddle
::
platform
::
CPUPlace
());
memcpy
(
dense_y_data
,
y_dense_data
.
data
(),
y_dense_data
.
size
()
*
sizeof
(
T
));
phi
::
CPUContext
dev_ctx_cpu
;
dev_ctx_cpu
.
SetAllocator
(
paddle
::
memory
::
allocation
::
AllocatorFacade
::
Instance
()
.
GetAllocator
(
paddle
::
platform
::
CPUPlace
())
.
get
());
dev_ctx_cpu
.
SetHostAllocator
(
paddle
::
memory
::
allocation
::
AllocatorFacade
::
Instance
()
.
GetAllocator
(
paddle
::
platform
::
CPUPlace
())
.
get
());
auto
csr_x
=
sparse
::
DenseToCsr
<
T
>
(
dev_ctx_cpu
,
dense_x
);
auto
csr_y
=
sparse
::
DenseToCsr
<
T
>
(
dev_ctx_cpu
,
dense_y
);
auto
dx
=
sparse
::
DenseToCsr
<
T
>
(
dev_ctx_cpu
,
dense_y
);
auto
dy
=
sparse
::
DenseToCsr
<
T
>
(
dev_ctx_cpu
,
dense_x
);
TestElementWiseAddCsrGrad
<
T
>
(
dev_ctx_cpu
,
csr_x
,
csr_y
,
dense_dims
);
TestElementWiseSubtractCsrGrad
<
T
>
(
dev_ctx_cpu
,
csr_x
,
csr_y
,
dense_dims
);
TestElementWiseMultiplyCsrGrad
<
T
>
(
dev_ctx_cpu
,
csr_x
,
csr_y
,
dense_dims
);
TestElementWiseDivideCsrGrad
<
T
>
(
dev_ctx_cpu
,
csr_x
,
csr_y
,
dense_dims
);
}
TEST
(
DEV_API
,
sparse_elementwise_coo_grad_kernel_double
)
{
using
T
=
double
;
int64_t
sparse_dim
=
2
;
DDim
dense_dims
=
phi
::
make_ddim
({
3
,
4
});
std
::
vector
<
T
>
x_dense_data
=
{
0.0
,
1.0
,
0.0
,
2.0
,
0.0
,
3.0
,
3.2
,
0.0
,
0.0
,
3.2
,
0.0
,
0.0
};
std
::
vector
<
T
>
y_dense_data
=
{
0.0
,
1.0
,
0.0
,
2.0
,
0.0
,
3.0
,
0.0
,
3.5
,
0.7
,
0.0
,
3.5
,
0.7
};
const
auto
alloc
=
std
::
make_unique
<
paddle
::
experimental
::
DefaultAllocator
>
(
paddle
::
platform
::
CPUPlace
());
phi
::
DenseTensor
dense_x
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
DataType
::
FLOAT32
,
dense_dims
,
DataLayout
::
NCHW
));
auto
*
dense_x_data
=
dense_x
.
mutable_data
<
T
>
(
paddle
::
platform
::
CPUPlace
());
memcpy
(
dense_x_data
,
x_dense_data
.
data
(),
x_dense_data
.
size
()
*
sizeof
(
T
));
phi
::
DenseTensor
dense_y
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
DataType
::
FLOAT32
,
dense_dims
,
DataLayout
::
NCHW
));
auto
*
dense_y_data
=
dense_y
.
mutable_data
<
T
>
(
paddle
::
platform
::
CPUPlace
());
memcpy
(
dense_y_data
,
y_dense_data
.
data
(),
y_dense_data
.
size
()
*
sizeof
(
T
));
phi
::
CPUContext
dev_ctx_cpu
;
dev_ctx_cpu
.
SetAllocator
(
paddle
::
memory
::
allocation
::
AllocatorFacade
::
Instance
()
.
GetAllocator
(
paddle
::
platform
::
CPUPlace
())
.
get
());
dev_ctx_cpu
.
SetHostAllocator
(
paddle
::
memory
::
allocation
::
AllocatorFacade
::
Instance
()
.
GetAllocator
(
paddle
::
platform
::
CPUPlace
())
.
get
());
auto
csr_x
=
sparse
::
DenseToCoo
<
T
>
(
dev_ctx_cpu
,
dense_x
,
sparse_dim
);
auto
csr_y
=
sparse
::
DenseToCoo
<
T
>
(
dev_ctx_cpu
,
dense_y
,
sparse_dim
);
auto
dx
=
sparse
::
DenseToCoo
<
T
>
(
dev_ctx_cpu
,
dense_y
,
sparse_dim
);
auto
dy
=
sparse
::
DenseToCoo
<
T
>
(
dev_ctx_cpu
,
dense_x
,
sparse_dim
);
TestElementWiseAddCooGrad
<
T
>
(
dev_ctx_cpu
,
csr_x
,
csr_y
,
dense_dims
);
TestElementWiseSubtractCooGrad
<
T
>
(
dev_ctx_cpu
,
csr_x
,
csr_y
,
dense_dims
);
TestElementWiseMultiplyCooGrad
<
T
>
(
dev_ctx_cpu
,
csr_x
,
csr_y
,
dense_dims
);
TestElementWiseDivideCooGrad
<
T
>
(
dev_ctx_cpu
,
csr_x
,
csr_y
,
dense_dims
);
}
}
// namespace tests
}
// namespace phi
paddle/phi/tests/kernels/test_sparse_pool_dev_api.cc
已删除
100644 → 0
浏览文件 @
1cb12ff5
此差异已折叠。
点击以展开。
paddle/phi/tests/kernels/test_sparse_transpose_dev_api.cc
已删除
100644 → 0
浏览文件 @
1cb12ff5
此差异已折叠。
点击以展开。
paddle/phi/tests/kernels/test_sparse_utils_dev_api.cc
已删除
100644 → 0
浏览文件 @
1cb12ff5
此差异已折叠。
点击以展开。
paddle/phi/tests/kernels/test_split_dev_api.cc
已删除
100644 → 0
浏览文件 @
1cb12ff5
此差异已折叠。
点击以展开。
paddle/phi/tests/kernels/test_sum_dev_api.cc
已删除
100644 → 0
浏览文件 @
1cb12ff5
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include <memory>
#include "paddle/fluid/memory/allocation/allocator_facade.h"
#include "paddle/phi/api/lib/utils/allocator.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/reduce_sum_kernel.h"
namespace
phi
{
namespace
tests
{
namespace
framework
=
paddle
::
framework
;
using
DDim
=
phi
::
DDim
;
TEST
(
DEV_API
,
sum
)
{
// 1. create tensor
const
auto
alloc
=
std
::
make_unique
<
paddle
::
experimental
::
DefaultAllocator
>
(
paddle
::
platform
::
CPUPlace
());
phi
::
DenseTensor
dense_x
(
alloc
.
get
(),
phi
::
DenseTensorMeta
(
phi
::
DataType
::
FLOAT32
,
phi
::
make_ddim
({
3
,
4
}),
phi
::
DataLayout
::
NCHW
));
auto
*
dense_x_data
=
dense_x
.
mutable_data
<
float
>
(
paddle
::
platform
::
CPUPlace
());
float
sum
=
0.0
;
for
(
size_t
i
=
0
;
i
<
12
;
++
i
)
{
dense_x_data
[
i
]
=
i
*
1.0
;
sum
+=
i
*
1.0
;
}
std
::
vector
<
int64_t
>
axis
=
{
0
,
1
};
phi
::
CPUContext
dev_ctx
;
dev_ctx
.
SetAllocator
(
paddle
::
memory
::
allocation
::
AllocatorFacade
::
Instance
()
.
GetAllocator
(
paddle
::
platform
::
CPUPlace
())
.
get
());
// 2. test API
auto
out
=
phi
::
Sum
<
float
>
(
dev_ctx
,
dense_x
,
phi
::
IntArray
(
axis
),
phi
::
DataType
::
FLOAT32
,
false
);
// 3. check result
ASSERT_EQ
(
out
.
dims
().
size
(),
1
);
ASSERT_EQ
(
out
.
numel
(),
1
);
ASSERT_EQ
(
out
.
meta
().
dtype
,
phi
::
DataType
::
FLOAT32
);
ASSERT_EQ
(
out
.
meta
().
layout
,
phi
::
DataLayout
::
NCHW
);
auto
expect_result
=
sum
;
auto
actual_result
=
out
.
data
<
float
>
()[
0
];
ASSERT_NEAR
(
expect_result
,
actual_result
,
1e-6
f
);
}
}
// namespace tests
}
// namespace phi
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录