Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
d09962a1
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
d09962a1
编写于
2月 06, 2023
作者:
E
engineer1109
提交者:
GitHub
2月 06, 2023
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
phi move ReshapeToMatrix & GetValue (#50139)
上级
1274e738
变更
18
显示空白变更内容
内联
并排
Showing
18 changed file
with
131 addition
and
143 deletion
+131
-143
paddle/fluid/framework/tensor_test.cc
paddle/fluid/framework/tensor_test.cc
+6
-6
paddle/fluid/framework/tensor_util.h
paddle/fluid/framework/tensor_util.h
+0
-21
paddle/fluid/operators/bpr_loss_op.h
paddle/fluid/operators/bpr_loss_op.h
+4
-3
paddle/fluid/operators/cross_entropy_op.h
paddle/fluid/operators/cross_entropy_op.h
+4
-3
paddle/fluid/operators/fused/multihead_matmul_op.cu
paddle/fluid/operators/fused/multihead_matmul_op.cu
+3
-2
paddle/fluid/operators/isfinite_op.h
paddle/fluid/operators/isfinite_op.h
+3
-3
paddle/phi/core/tensor_utils.cc
paddle/phi/core/tensor_utils.cc
+56
-0
paddle/phi/core/tensor_utils.h
paddle/phi/core/tensor_utils.h
+18
-0
paddle/phi/kernels/fusion/onednn/fused_matmul_kernel.cc
paddle/phi/kernels/fusion/onednn/fused_matmul_kernel.cc
+2
-1
paddle/phi/kernels/gpu/arange_kernel.cu
paddle/phi/kernels/gpu/arange_kernel.cu
+0
-13
paddle/phi/kernels/gpu/linspace_kernel.cu
paddle/phi/kernels/gpu/linspace_kernel.cu
+0
-13
paddle/phi/kernels/impl/matmul_grad_kernel_impl.h
paddle/phi/kernels/impl/matmul_grad_kernel_impl.h
+22
-34
paddle/phi/kernels/impl/matmul_kernel_impl.h
paddle/phi/kernels/impl/matmul_kernel_impl.h
+2
-6
paddle/phi/kernels/onednn/matmul_grad_kernel.cc
paddle/phi/kernels/onednn/matmul_grad_kernel.cc
+2
-4
paddle/phi/kernels/onednn/matmul_kernel.cc
paddle/phi/kernels/onednn/matmul_kernel.cc
+1
-1
paddle/phi/kernels/xpu/arange_kernel.cc
paddle/phi/kernels/xpu/arange_kernel.cc
+2
-19
paddle/phi/kernels/xpu/matmul_grad_kernel.cc
paddle/phi/kernels/xpu/matmul_grad_kernel.cc
+4
-7
paddle/phi/kernels/xpu/matmul_kernel.cc
paddle/phi/kernels/xpu/matmul_kernel.cc
+2
-7
未找到文件。
paddle/fluid/framework/tensor_test.cc
浏览文件 @
d09962a1
...
@@ -12,8 +12,8 @@
...
@@ -12,8 +12,8 @@
// See the License for the specific language governing permissions and
// See the License for the specific language governing permissions and
// limitations under the License.
// limitations under the License.
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/phi/core/tensor_utils.h"
#include <gtest/gtest.h>
#include <gtest/gtest.h>
...
@@ -25,7 +25,7 @@ namespace platform = paddle::platform;
...
@@ -25,7 +25,7 @@ namespace platform = paddle::platform;
TEST
(
DenseTensor
,
Dims
)
{
TEST
(
DenseTensor
,
Dims
)
{
phi
::
DenseTensor
tt
;
phi
::
DenseTensor
tt
;
tt
.
Resize
({
2
,
3
,
4
});
tt
.
Resize
({
2
,
3
,
4
});
framework
::
DDim
dims
=
tt
.
dims
();
phi
::
DDim
dims
=
tt
.
dims
();
ASSERT_EQ
(
arity
(
dims
),
3
);
ASSERT_EQ
(
arity
(
dims
),
3
);
for
(
int
i
=
0
;
i
<
3
;
++
i
)
{
for
(
int
i
=
0
;
i
<
3
;
++
i
)
{
EXPECT_EQ
(
i
+
2
,
dims
[
i
]);
EXPECT_EQ
(
i
+
2
,
dims
[
i
]);
...
@@ -225,7 +225,7 @@ TEST(DenseTensor, Slice) {
...
@@ -225,7 +225,7 @@ TEST(DenseTensor, Slice) {
src_tensor
.
mutable_data
<
int
>
(
phi
::
make_ddim
({
5
,
3
,
4
}),
src_tensor
.
mutable_data
<
int
>
(
phi
::
make_ddim
({
5
,
3
,
4
}),
platform
::
CPUPlace
());
platform
::
CPUPlace
());
phi
::
DenseTensor
slice_tensor
=
src_tensor
.
Slice
(
1
,
3
);
phi
::
DenseTensor
slice_tensor
=
src_tensor
.
Slice
(
1
,
3
);
framework
::
DDim
slice_dims
=
slice_tensor
.
dims
();
phi
::
DDim
slice_dims
=
slice_tensor
.
dims
();
ASSERT_EQ
(
arity
(
slice_dims
),
3
);
ASSERT_EQ
(
arity
(
slice_dims
),
3
);
EXPECT_EQ
(
slice_dims
[
0
],
2
);
EXPECT_EQ
(
slice_dims
[
0
],
2
);
EXPECT_EQ
(
slice_dims
[
1
],
3
);
EXPECT_EQ
(
slice_dims
[
1
],
3
);
...
@@ -251,7 +251,7 @@ TEST(DenseTensor, Slice) {
...
@@ -251,7 +251,7 @@ TEST(DenseTensor, Slice) {
src_tensor
.
mutable_data
<
double
>
(
phi
::
make_ddim
({
6
,
9
}),
src_tensor
.
mutable_data
<
double
>
(
phi
::
make_ddim
({
6
,
9
}),
platform
::
CUDAPlace
(
0
));
platform
::
CUDAPlace
(
0
));
phi
::
DenseTensor
slice_tensor
=
src_tensor
.
Slice
(
2
,
6
);
phi
::
DenseTensor
slice_tensor
=
src_tensor
.
Slice
(
2
,
6
);
framework
::
DDim
slice_dims
=
slice_tensor
.
dims
();
phi
::
DDim
slice_dims
=
slice_tensor
.
dims
();
ASSERT_EQ
(
arity
(
slice_dims
),
2
);
ASSERT_EQ
(
arity
(
slice_dims
),
2
);
EXPECT_EQ
(
slice_dims
[
0
],
4
);
EXPECT_EQ
(
slice_dims
[
0
],
4
);
EXPECT_EQ
(
slice_dims
[
1
],
9
);
EXPECT_EQ
(
slice_dims
[
1
],
9
);
...
@@ -278,7 +278,7 @@ TEST(DenseTensor, Slice) {
...
@@ -278,7 +278,7 @@ TEST(DenseTensor, Slice) {
src_tensor
.
mutable_data
<
double
>
(
phi
::
make_ddim
({
6
,
9
}),
src_tensor
.
mutable_data
<
double
>
(
phi
::
make_ddim
({
6
,
9
}),
platform
::
NPUPlace
(
0
));
platform
::
NPUPlace
(
0
));
phi
::
DenseTensor
slice_tensor
=
src_tensor
.
Slice
(
2
,
6
);
phi
::
DenseTensor
slice_tensor
=
src_tensor
.
Slice
(
2
,
6
);
framework
::
DDim
slice_dims
=
slice_tensor
.
dims
();
phi
::
DDim
slice_dims
=
slice_tensor
.
dims
();
ASSERT_EQ
(
arity
(
slice_dims
),
2
);
ASSERT_EQ
(
arity
(
slice_dims
),
2
);
EXPECT_EQ
(
slice_dims
[
0
],
4
);
EXPECT_EQ
(
slice_dims
[
0
],
4
);
EXPECT_EQ
(
slice_dims
[
1
],
9
);
EXPECT_EQ
(
slice_dims
[
1
],
9
);
...
@@ -306,7 +306,7 @@ TEST(DenseTensor, ReshapeToMatrix) {
...
@@ -306,7 +306,7 @@ TEST(DenseTensor, ReshapeToMatrix) {
for
(
int
i
=
0
;
i
<
2
*
3
*
4
*
9
;
++
i
)
{
for
(
int
i
=
0
;
i
<
2
*
3
*
4
*
9
;
++
i
)
{
src_ptr
[
i
]
=
i
;
src_ptr
[
i
]
=
i
;
}
}
phi
::
DenseTensor
res
=
framework
::
ReshapeToMatrix
(
src
,
2
);
phi
::
DenseTensor
res
=
phi
::
ReshapeToMatrix
(
src
,
2
);
ASSERT_EQ
(
res
.
dims
()[
0
],
2
*
3
);
ASSERT_EQ
(
res
.
dims
()[
0
],
2
*
3
);
ASSERT_EQ
(
res
.
dims
()[
1
],
4
*
9
);
ASSERT_EQ
(
res
.
dims
()[
1
],
4
*
9
);
}
}
...
...
paddle/fluid/framework/tensor_util.h
浏览文件 @
d09962a1
...
@@ -560,27 +560,6 @@ inline void TensorToVector(const phi::DenseTensor& src,
...
@@ -560,27 +560,6 @@ inline void TensorToVector(const phi::DenseTensor& src,
std
::
ostream
&
operator
<<
(
std
::
ostream
&
os
,
const
LoD
&
lod
);
std
::
ostream
&
operator
<<
(
std
::
ostream
&
os
,
const
LoD
&
lod
);
inline
phi
::
DenseTensor
ReshapeToMatrix
(
const
phi
::
DenseTensor
&
src
,
int
num_col_dims
)
{
int
rank
=
src
.
dims
().
size
();
PADDLE_ENFORCE_GE
(
rank
,
2
,
platform
::
errors
::
InvalidArgument
(
"'ReshapeToMatrix()' is only used for flatten high rank "
"tensors to matrixs. The dimensions of phi::DenseTensor must be "
"greater or equal than 2. "
"But received dimensions of phi::DenseTensor is %d"
,
rank
));
if
(
rank
==
2
)
{
return
src
;
}
phi
::
DenseTensor
res
;
res
.
ShareDataWith
(
src
);
res
.
Resize
(
phi
::
flatten_to_2d
(
src
.
dims
(),
num_col_dims
));
return
res
;
}
template
<
typename
T
>
template
<
typename
T
>
inline
T
GetValue
(
const
phi
::
DenseTensor
*
x
)
{
inline
T
GetValue
(
const
phi
::
DenseTensor
*
x
)
{
T
value
=
static_cast
<
T
>
(
0
);
T
value
=
static_cast
<
T
>
(
0
);
...
...
paddle/fluid/operators/bpr_loss_op.h
浏览文件 @
d09962a1
...
@@ -16,6 +16,7 @@ limitations under the License. */
...
@@ -16,6 +16,7 @@ limitations under the License. */
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/platform/for_range.h"
#include "paddle/fluid/platform/for_range.h"
#include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace
paddle
{
namespace
paddle
{
...
@@ -44,9 +45,9 @@ class BprLossOpKernel : public framework::OpKernel<T> {
...
@@ -44,9 +45,9 @@ class BprLossOpKernel : public framework::OpKernel<T> {
y
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
y
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
int
rank
=
x
->
dims
().
size
();
int
rank
=
x
->
dims
().
size
();
phi
::
DenseTensor
x_2d
=
framework
::
ReshapeToMatrix
(
*
x
,
rank
-
1
);
phi
::
DenseTensor
x_2d
=
phi
::
ReshapeToMatrix
(
*
x
,
rank
-
1
);
phi
::
DenseTensor
labels_2d
=
framework
::
ReshapeToMatrix
(
*
label
,
rank
-
1
);
phi
::
DenseTensor
labels_2d
=
phi
::
ReshapeToMatrix
(
*
label
,
rank
-
1
);
phi
::
DenseTensor
y_2d
=
framework
::
ReshapeToMatrix
(
*
y
,
rank
-
1
);
phi
::
DenseTensor
y_2d
=
phi
::
ReshapeToMatrix
(
*
y
,
rank
-
1
);
const
phi
::
DenseTensor
*
logits
=
&
x_2d
;
const
phi
::
DenseTensor
*
logits
=
&
x_2d
;
const
phi
::
DenseTensor
*
labels
=
&
labels_2d
;
const
phi
::
DenseTensor
*
labels
=
&
labels_2d
;
...
...
paddle/fluid/operators/cross_entropy_op.h
浏览文件 @
d09962a1
...
@@ -16,6 +16,7 @@ limitations under the License. */
...
@@ -16,6 +16,7 @@ limitations under the License. */
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/platform/for_range.h"
#include "paddle/fluid/platform/for_range.h"
#include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/kernels/funcs/cross_entropy.h"
#include "paddle/phi/kernels/funcs/cross_entropy.h"
#include "paddle/phi/kernels/funcs/math.h"
#include "paddle/phi/kernels/funcs/math.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/math_function.h"
...
@@ -34,7 +35,7 @@ class CrossEntropyOpKernel : public framework::OpKernel<T> {
...
@@ -34,7 +35,7 @@ class CrossEntropyOpKernel : public framework::OpKernel<T> {
int
rank
=
x
->
dims
().
size
();
int
rank
=
x
->
dims
().
size
();
auto
label_dims
=
labels
->
dims
();
auto
label_dims
=
labels
->
dims
();
phi
::
DenseTensor
x_2d
=
framework
::
ReshapeToMatrix
(
*
x
,
rank
-
1
);
phi
::
DenseTensor
x_2d
=
phi
::
ReshapeToMatrix
(
*
x
,
rank
-
1
);
phi
::
DenseTensor
labels_2d
,
y_2d
;
phi
::
DenseTensor
labels_2d
,
y_2d
;
if
(
label_dims
.
size
()
<
rank
)
{
if
(
label_dims
.
size
()
<
rank
)
{
labels_2d
.
ShareDataWith
(
*
labels
);
labels_2d
.
ShareDataWith
(
*
labels
);
...
@@ -44,8 +45,8 @@ class CrossEntropyOpKernel : public framework::OpKernel<T> {
...
@@ -44,8 +45,8 @@ class CrossEntropyOpKernel : public framework::OpKernel<T> {
y_2d
.
Resize
({
phi
::
product
(
y
->
dims
()),
1
});
y_2d
.
Resize
({
phi
::
product
(
y
->
dims
()),
1
});
}
else
{
}
else
{
labels_2d
=
framework
::
ReshapeToMatrix
(
*
labels
,
rank
-
1
);
labels_2d
=
phi
::
ReshapeToMatrix
(
*
labels
,
rank
-
1
);
y_2d
=
framework
::
ReshapeToMatrix
(
*
y
,
rank
-
1
);
y_2d
=
phi
::
ReshapeToMatrix
(
*
y
,
rank
-
1
);
}
}
int
axis_dim
=
x
->
dims
()[
rank
-
1
];
int
axis_dim
=
x
->
dims
()[
rank
-
1
];
...
...
paddle/fluid/operators/fused/multihead_matmul_op.cu
浏览文件 @
d09962a1
...
@@ -21,6 +21,7 @@
...
@@ -21,6 +21,7 @@
#include "paddle/fluid/memory/malloc.h"
#include "paddle/fluid/memory/malloc.h"
#include "paddle/fluid/operators/math/bert_encoder_functor.h"
#include "paddle/fluid/operators/math/bert_encoder_functor.h"
#include "paddle/fluid/platform/float16.h"
#include "paddle/fluid/platform/float16.h"
#include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/kernels/funcs/blas/blas.h"
#include "paddle/phi/kernels/funcs/blas/blas.h"
namespace
paddle
{
namespace
paddle
{
...
@@ -343,10 +344,10 @@ class MultiHeadMatMulV2Kernel : public framework::OpKernel<T> {
...
@@ -343,10 +344,10 @@ class MultiHeadMatMulV2Kernel : public framework::OpKernel<T> {
// (B*S, hidden)
// (B*S, hidden)
const
phi
::
DenseTensor
input_matrix
=
const
phi
::
DenseTensor
input_matrix
=
framework
::
ReshapeToMatrix
(
*
input
,
2
/*x_num_col_dims */
);
phi
::
ReshapeToMatrix
(
*
input
,
2
/*x_num_col_dims */
);
// (hidden, 3 * all_head_size)
// (hidden, 3 * all_head_size)
const
phi
::
DenseTensor
w_matrix
=
const
phi
::
DenseTensor
w_matrix
=
framework
::
ReshapeToMatrix
(
*
w
,
1
/*y_num_col_dims*/
);
phi
::
ReshapeToMatrix
(
*
w
,
1
/*y_num_col_dims*/
);
phi
::
DenseTensor
temp_out_tensor
;
phi
::
DenseTensor
temp_out_tensor
;
auto
temp_out_dims
=
auto
temp_out_dims
=
...
...
paddle/fluid/operators/isfinite_op.h
浏览文件 @
d09962a1
...
@@ -129,17 +129,17 @@ inline void TensorIsfinite(const phi::DenseTensor& tensor,
...
@@ -129,17 +129,17 @@ inline void TensorIsfinite(const phi::DenseTensor& tensor,
inline
bool
TensorContainsNAN
(
const
phi
::
DenseTensor
&
tensor
)
{
inline
bool
TensorContainsNAN
(
const
phi
::
DenseTensor
&
tensor
)
{
phi
::
DenseTensor
out
;
phi
::
DenseTensor
out
;
TensorContainsNAN
(
tensor
,
&
out
);
TensorContainsNAN
(
tensor
,
&
out
);
return
GetValue
<
bool
>
(
&
out
);
return
paddle
::
framework
::
GetValue
<
bool
>
(
&
out
);
}
}
inline
bool
TensorContainsInf
(
const
phi
::
DenseTensor
&
tensor
)
{
inline
bool
TensorContainsInf
(
const
phi
::
DenseTensor
&
tensor
)
{
phi
::
DenseTensor
out
;
phi
::
DenseTensor
out
;
TensorContainsInf
(
tensor
,
&
out
);
TensorContainsInf
(
tensor
,
&
out
);
return
GetValue
<
bool
>
(
&
out
);
return
paddle
::
framework
::
GetValue
<
bool
>
(
&
out
);
}
}
inline
bool
TensorIsfinite
(
const
phi
::
DenseTensor
&
tensor
)
{
inline
bool
TensorIsfinite
(
const
phi
::
DenseTensor
&
tensor
)
{
phi
::
DenseTensor
out
;
phi
::
DenseTensor
out
;
TensorIsfinite
(
tensor
,
&
out
);
TensorIsfinite
(
tensor
,
&
out
);
return
GetValue
<
bool
>
(
&
out
);
return
paddle
::
framework
::
GetValue
<
bool
>
(
&
out
);
}
}
}
// namespace framework
}
// namespace framework
namespace
operators
{
namespace
operators
{
...
...
paddle/phi/core/tensor_utils.cc
浏览文件 @
d09962a1
...
@@ -867,4 +867,60 @@ template void TensorToVector(const phi::DenseTensor& src,
...
@@ -867,4 +867,60 @@ template void TensorToVector(const phi::DenseTensor& src,
template
void
TensorToVector
(
const
phi
::
DenseTensor
&
src
,
template
void
TensorToVector
(
const
phi
::
DenseTensor
&
src
,
std
::
vector
<
phi
::
dtype
::
complex
<
double
>
>*
dst
);
std
::
vector
<
phi
::
dtype
::
complex
<
double
>
>*
dst
);
phi
::
DenseTensor
ReshapeToMatrix
(
const
phi
::
DenseTensor
&
src
,
int
num_col_dims
)
{
int
rank
=
src
.
dims
().
size
();
PADDLE_ENFORCE_GE
(
rank
,
2
,
phi
::
errors
::
InvalidArgument
(
"'ReshapeToMatrix()' is only used for flatten high rank "
"tensors to matrixs. The dimensions of phi::DenseTensor must be "
"greater or equal than 2. "
"But received dimensions of phi::DenseTensor is %d"
,
rank
));
if
(
rank
==
2
)
{
return
src
;
}
phi
::
DenseTensor
res
;
res
.
ShareDataWith
(
src
);
res
.
Resize
(
phi
::
flatten_to_2d
(
src
.
dims
(),
num_col_dims
));
return
res
;
}
template
<
typename
T
>
T
GetValue
(
const
phi
::
DenseTensor
*
x
)
{
T
value
=
static_cast
<
T
>
(
0
);
if
(
!
paddle
::
platform
::
is_cpu_place
(
x
->
place
()))
{
phi
::
DenseTensor
cpu_x
{};
phi
::
DeviceContextPool
&
pool
=
phi
::
DeviceContextPool
::
Instance
();
phi
::
DeviceContext
*
dev_ctx
=
pool
.
Get
(
x
->
place
());
phi
::
Copy
(
*
dev_ctx
,
*
x
,
phi
::
CPUPlace
(),
true
,
&
cpu_x
);
value
=
cpu_x
.
data
<
T
>
()[
0
];
}
else
{
value
=
x
->
data
<
T
>
()[
0
];
}
return
value
;
}
template
bool
GetValue
(
const
phi
::
DenseTensor
*
x
);
template
int16_t
GetValue
(
const
phi
::
DenseTensor
*
x
);
template
int
GetValue
(
const
phi
::
DenseTensor
*
x
);
template
int64_t
GetValue
(
const
phi
::
DenseTensor
*
x
);
template
float
GetValue
(
const
phi
::
DenseTensor
*
x
);
template
double
GetValue
(
const
phi
::
DenseTensor
*
x
);
template
phi
::
dtype
::
bfloat16
GetValue
(
const
phi
::
DenseTensor
*
x
);
template
phi
::
dtype
::
float16
GetValue
(
const
phi
::
DenseTensor
*
x
);
template
phi
::
dtype
::
complex
<
float
>
GetValue
(
const
phi
::
DenseTensor
*
x
);
template
phi
::
dtype
::
complex
<
double
>
GetValue
(
const
phi
::
DenseTensor
*
x
);
}
// namespace phi
}
// namespace phi
paddle/phi/core/tensor_utils.h
浏览文件 @
d09962a1
...
@@ -126,4 +126,22 @@ void TensorToVector(const phi::DenseTensor& src,
...
@@ -126,4 +126,22 @@ void TensorToVector(const phi::DenseTensor& src,
const
phi
::
DeviceContext
&
ctx
,
const
phi
::
DeviceContext
&
ctx
,
std
::
vector
<
T
>*
dst
);
std
::
vector
<
T
>*
dst
);
phi
::
DenseTensor
ReshapeToMatrix
(
const
phi
::
DenseTensor
&
src
,
int
num_col_dims
);
template
<
typename
T
>
T
GetValue
(
const
phi
::
DenseTensor
*
x
);
template
<
typename
T
,
typename
Context
>
inline
T
GetValue
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
)
{
T
value
=
static_cast
<
T
>
(
0
);
if
(
x
.
place
()
!=
CPUPlace
())
{
DenseTensor
cpu_x
;
Copy
(
dev_ctx
,
x
,
CPUPlace
(),
true
,
&
cpu_x
);
value
=
cpu_x
.
data
<
T
>
()[
0
];
}
else
{
value
=
x
.
data
<
T
>
()[
0
];
}
return
value
;
}
}
// namespace phi
}
// namespace phi
paddle/phi/kernels/fusion/onednn/fused_matmul_kernel.cc
浏览文件 @
d09962a1
...
@@ -16,13 +16,14 @@
...
@@ -16,13 +16,14 @@
#include "paddle/phi/backends/onednn/onednn_reuse.h"
#include "paddle/phi/backends/onednn/onednn_reuse.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_utils.h"
using
dnnl
::
engine
;
using
dnnl
::
engine
;
using
dnnl
::
inner_product_forward
;
using
dnnl
::
inner_product_forward
;
using
dnnl
::
memory
;
using
dnnl
::
memory
;
using
dnnl
::
prop_kind
;
using
dnnl
::
prop_kind
;
using
dnnl
::
stream
;
using
dnnl
::
stream
;
using
p
addle
::
framework
::
ReshapeToMatrix
;
using
p
hi
::
ReshapeToMatrix
;
namespace
phi
{
namespace
phi
{
...
...
paddle/phi/kernels/gpu/arange_kernel.cu
浏览文件 @
d09962a1
...
@@ -23,19 +23,6 @@
...
@@ -23,19 +23,6 @@
namespace
phi
{
namespace
phi
{
template
<
typename
T
,
typename
Context
>
inline
T
GetValue
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
)
{
T
value
=
static_cast
<
T
>
(
0
);
if
(
x
.
place
()
!=
CPUPlace
())
{
DenseTensor
cpu_x
;
Copy
(
dev_ctx
,
x
,
CPUPlace
(),
true
,
&
cpu_x
);
value
=
cpu_x
.
data
<
T
>
()[
0
];
}
else
{
value
=
x
.
data
<
T
>
()[
0
];
}
return
value
;
}
template
<
typename
T
>
template
<
typename
T
>
__global__
void
Range
(
T
start
,
T
step
,
int64_t
size
,
T
*
out
)
{
__global__
void
Range
(
T
start
,
T
step
,
int64_t
size
,
T
*
out
)
{
CUDA_KERNEL_LOOP
(
index
,
size
)
{
out
[
index
]
=
start
+
step
*
index
;
}
CUDA_KERNEL_LOOP
(
index
,
size
)
{
out
[
index
]
=
start
+
step
*
index
;
}
...
...
paddle/phi/kernels/gpu/linspace_kernel.cu
浏览文件 @
d09962a1
...
@@ -41,19 +41,6 @@ __global__ void LinspaceSpecialKernel(T start, T* out) {
...
@@ -41,19 +41,6 @@ __global__ void LinspaceSpecialKernel(T start, T* out) {
out
[
0
]
=
static_cast
<
T
>
(
start
);
out
[
0
]
=
static_cast
<
T
>
(
start
);
}
}
template
<
typename
T
,
typename
Context
>
T
GetValue
(
const
Context
&
ctx
,
const
DenseTensor
&
x
)
{
T
value
=
static_cast
<
T
>
(
0
);
if
(
x
.
place
()
!=
CPUPlace
())
{
DenseTensor
cpu_x
;
Copy
(
ctx
,
x
,
CPUPlace
(),
true
,
&
cpu_x
);
value
=
cpu_x
.
data
<
T
>
()[
0
];
}
else
{
value
=
x
.
data
<
T
>
()[
0
];
}
return
value
;
}
template
<
typename
T
,
typename
Context
>
template
<
typename
T
,
typename
Context
>
T
GetValueOfExpectedType
(
const
Context
&
ctx
,
const
DenseTensor
&
x
)
{
T
GetValueOfExpectedType
(
const
Context
&
ctx
,
const
DenseTensor
&
x
)
{
switch
(
x
.
dtype
())
{
switch
(
x
.
dtype
())
{
...
...
paddle/phi/kernels/impl/matmul_grad_kernel_impl.h
浏览文件 @
d09962a1
...
@@ -1872,12 +1872,10 @@ void MatmulWithFlattenGradKernel(const Context& dev_ctx,
...
@@ -1872,12 +1872,10 @@ void MatmulWithFlattenGradKernel(const Context& dev_ctx,
int
y_num_col_dims
,
int
y_num_col_dims
,
DenseTensor
*
x_grad
,
DenseTensor
*
x_grad
,
DenseTensor
*
y_grad
)
{
DenseTensor
*
y_grad
)
{
auto
x_matrix
=
x
.
dims
().
size
()
>
2
auto
x_matrix
=
?
paddle
::
framework
::
ReshapeToMatrix
(
x
,
x_num_col_dims
)
x
.
dims
().
size
()
>
2
?
phi
::
ReshapeToMatrix
(
x
,
x_num_col_dims
)
:
x
;
:
x
;
auto
y_matrix
=
auto
y_matrix
=
y
.
dims
().
size
()
>
2
y
.
dims
().
size
()
>
2
?
phi
::
ReshapeToMatrix
(
y
,
y_num_col_dims
)
:
y
;
?
paddle
::
framework
::
ReshapeToMatrix
(
y
,
y_num_col_dims
)
:
y
;
auto
*
dout
=
&
out_grad
;
auto
*
dout
=
&
out_grad
;
DenseTensor
dout_mat
(
*
dout
);
DenseTensor
dout_mat
(
*
dout
);
...
@@ -1898,9 +1896,7 @@ void MatmulWithFlattenGradKernel(const Context& dev_ctx,
...
@@ -1898,9 +1896,7 @@ void MatmulWithFlattenGradKernel(const Context& dev_ctx,
if
(
dx
)
{
if
(
dx
)
{
dev_ctx
.
template
Alloc
<
T
>(
dx
);
dev_ctx
.
template
Alloc
<
T
>(
dx
);
DenseTensor
dx_matrix
=
DenseTensor
dx_matrix
=
dx
->
dims
().
size
()
>
2
dx
->
dims
().
size
()
>
2
?
phi
::
ReshapeToMatrix
(
*
dx
,
x_num_col_dims
)
:
*
dx
;
?
paddle
::
framework
::
ReshapeToMatrix
(
*
dx
,
x_num_col_dims
)
:
*
dx
;
// dx = dout * y'. dx: M x K, dout : M x N, y : K x N
// dx = dout * y'. dx: M x K, dout : M x N, y : K x N
blas
.
MatMul
(
dout_mat
,
false
,
y_matrix
,
true
,
&
dx_matrix
);
blas
.
MatMul
(
dout_mat
,
false
,
y_matrix
,
true
,
&
dx_matrix
);
...
@@ -1908,9 +1904,7 @@ void MatmulWithFlattenGradKernel(const Context& dev_ctx,
...
@@ -1908,9 +1904,7 @@ void MatmulWithFlattenGradKernel(const Context& dev_ctx,
if
(
dy
)
{
if
(
dy
)
{
dev_ctx
.
template
Alloc
<
T
>(
dy
);
dev_ctx
.
template
Alloc
<
T
>(
dy
);
DenseTensor
dy_matrix
=
DenseTensor
dy_matrix
=
dy
->
dims
().
size
()
>
2
dy
->
dims
().
size
()
>
2
?
phi
::
ReshapeToMatrix
(
*
dy
,
y_num_col_dims
)
:
*
dy
;
?
paddle
::
framework
::
ReshapeToMatrix
(
*
dy
,
y_num_col_dims
)
:
*
dy
;
// dy = x' * dout. dy K x N, dout : M x N, x : M x K
// dy = x' * dout. dy K x N, dout : M x N, x : M x K
blas
.
MatMul
(
x_matrix
,
true
,
dout_mat
,
false
,
&
dy_matrix
);
blas
.
MatMul
(
x_matrix
,
true
,
dout_mat
,
false
,
&
dy_matrix
);
}
}
...
@@ -1929,12 +1923,10 @@ void MatmulWithFlattenDoubleGradKernel(
...
@@ -1929,12 +1923,10 @@ void MatmulWithFlattenDoubleGradKernel(
DenseTensor
*
x_grad
,
DenseTensor
*
x_grad
,
DenseTensor
*
y_grad
,
DenseTensor
*
y_grad
,
DenseTensor
*
out_grad_grad
)
{
DenseTensor
*
out_grad_grad
)
{
auto
x_mat
=
x
.
dims
().
size
()
>
2
auto
x_mat
=
?
paddle
::
framework
::
ReshapeToMatrix
(
x
,
x_num_col_dims
)
x
.
dims
().
size
()
>
2
?
phi
::
ReshapeToMatrix
(
x
,
x_num_col_dims
)
:
x
;
:
x
;
auto
y_mat
=
auto
y_mat
=
y
.
dims
().
size
()
>
2
y
.
dims
().
size
()
>
2
?
phi
::
ReshapeToMatrix
(
y
,
y_num_col_dims
)
:
y
;
?
paddle
::
framework
::
ReshapeToMatrix
(
y
,
y_num_col_dims
)
:
y
;
const
int
m
=
phi
::
flatten_to_2d
(
x
.
dims
(),
x_num_col_dims
)[
0
];
const
int
m
=
phi
::
flatten_to_2d
(
x
.
dims
(),
x_num_col_dims
)[
0
];
const
int
n
=
phi
::
flatten_to_2d
(
y
.
dims
(),
y_num_col_dims
)[
1
];
const
int
n
=
phi
::
flatten_to_2d
(
y
.
dims
(),
y_num_col_dims
)[
1
];
...
@@ -1965,9 +1957,8 @@ void MatmulWithFlattenDoubleGradKernel(
...
@@ -1965,9 +1957,8 @@ void MatmulWithFlattenDoubleGradKernel(
// true, MatMul beta should be 1 to add result to ddout.
// true, MatMul beta should be 1 to add result to ddout.
bool
ddout_flag
=
false
;
bool
ddout_flag
=
false
;
if
(
ddx
)
{
if
(
ddx
)
{
auto
ddx_mat
=
auto
ddx_mat
=
ddx
->
dims
().
size
()
>
2
ddx
->
dims
().
size
()
>
2
?
phi
::
ReshapeToMatrix
(
*
ddx
,
x_num_col_dims
)
?
paddle
::
framework
::
ReshapeToMatrix
(
*
ddx
,
x_num_col_dims
)
:
static_cast
<
const
DenseTensor
&>
(
*
ddx
);
:
static_cast
<
const
DenseTensor
&>
(
*
ddx
);
// dy = ddx' * dout. dy : K x M, ddx' : K x M, dout : M x N
// dy = ddx' * dout. dy : K x M, ddx' : K x M, dout : M x N
...
@@ -1975,9 +1966,8 @@ void MatmulWithFlattenDoubleGradKernel(
...
@@ -1975,9 +1966,8 @@ void MatmulWithFlattenDoubleGradKernel(
dy
->
set_lod
(
y
.
lod
());
dy
->
set_lod
(
y
.
lod
());
// allocate and reshape dy
// allocate and reshape dy
dev_ctx
.
template
Alloc
<
T
>(
dy
);
dev_ctx
.
template
Alloc
<
T
>(
dy
);
DenseTensor
dy_mat
=
DenseTensor
dy_mat
=
dy
->
dims
().
size
()
>
2
dy
->
dims
().
size
()
>
2
?
phi
::
ReshapeToMatrix
(
*
dy
,
y_num_col_dims
)
?
paddle
::
framework
::
ReshapeToMatrix
(
*
dy
,
y_num_col_dims
)
:
*
dy
;
:
*
dy
;
blas
.
MatMul
(
ddx_mat
,
true
,
dout_mat
,
false
,
&
dy_mat
);
blas
.
MatMul
(
ddx_mat
,
true
,
dout_mat
,
false
,
&
dy_mat
);
}
}
...
@@ -1994,18 +1984,16 @@ void MatmulWithFlattenDoubleGradKernel(
...
@@ -1994,18 +1984,16 @@ void MatmulWithFlattenDoubleGradKernel(
}
}
}
}
if
(
ddy
)
{
if
(
ddy
)
{
auto
ddy_mat
=
auto
ddy_mat
=
ddy
->
dims
().
size
()
>
2
ddy
->
dims
().
size
()
>
2
?
phi
::
ReshapeToMatrix
(
*
ddy
,
y_num_col_dims
)
?
paddle
::
framework
::
ReshapeToMatrix
(
*
ddy
,
y_num_col_dims
)
:
static_cast
<
const
DenseTensor
&>
(
*
ddy
);
:
static_cast
<
const
DenseTensor
&>
(
*
ddy
);
// dx = dout * ddy'. dout : M x N, ddy' : N x K, dx : M x K
// dx = dout * ddy'. dout : M x N, ddy' : N x K, dx : M x K
if
(
dx
)
{
if
(
dx
)
{
dx
->
set_lod
(
x
.
lod
());
dx
->
set_lod
(
x
.
lod
());
// allocate and reshape dx
// allocate and reshape dx
dev_ctx
.
template
Alloc
<
T
>(
dx
);
dev_ctx
.
template
Alloc
<
T
>(
dx
);
DenseTensor
dx_mat
=
DenseTensor
dx_mat
=
dx
->
dims
().
size
()
>
2
dx
->
dims
().
size
()
>
2
?
phi
::
ReshapeToMatrix
(
*
dx
,
x_num_col_dims
)
?
paddle
::
framework
::
ReshapeToMatrix
(
*
dx
,
x_num_col_dims
)
:
*
dx
;
:
*
dx
;
blas
.
MatMul
(
dout_mat
,
false
,
ddy_mat
,
true
,
&
dx_mat
);
blas
.
MatMul
(
dout_mat
,
false
,
ddy_mat
,
true
,
&
dx_mat
);
}
}
...
...
paddle/phi/kernels/impl/matmul_kernel_impl.h
浏览文件 @
d09962a1
...
@@ -513,13 +513,9 @@ void MatmulWithFlattenKernel(const Context& dev_ctx,
...
@@ -513,13 +513,9 @@ void MatmulWithFlattenKernel(const Context& dev_ctx,
int
y_num_col_dims
,
int
y_num_col_dims
,
DenseTensor
*
out
)
{
DenseTensor
*
out
)
{
const
DenseTensor
x_matrix
=
const
DenseTensor
x_matrix
=
x
.
dims
().
size
()
>
2
x
.
dims
().
size
()
>
2
?
phi
::
ReshapeToMatrix
(
x
,
x_num_col_dims
)
:
x
;
?
paddle
::
framework
::
ReshapeToMatrix
(
x
,
x_num_col_dims
)
:
x
;
const
DenseTensor
y_matrix
=
const
DenseTensor
y_matrix
=
y
.
dims
().
size
()
>
2
y
.
dims
().
size
()
>
2
?
phi
::
ReshapeToMatrix
(
y
,
y_num_col_dims
)
:
y
;
?
paddle
::
framework
::
ReshapeToMatrix
(
y
,
y_num_col_dims
)
:
y
;
dev_ctx
.
template
Alloc
<
T
>(
out
);
dev_ctx
.
template
Alloc
<
T
>(
out
);
auto
z_dim
=
out
->
dims
();
auto
z_dim
=
out
->
dims
();
...
...
paddle/phi/kernels/onednn/matmul_grad_kernel.cc
浏览文件 @
d09962a1
...
@@ -196,10 +196,8 @@ void MatmulWithFlattenGradKernel(const Context &dev_ctx,
...
@@ -196,10 +196,8 @@ void MatmulWithFlattenGradKernel(const Context &dev_ctx,
int
y_num_col_dims
,
int
y_num_col_dims
,
DenseTensor
*
x_grad
,
DenseTensor
*
x_grad
,
DenseTensor
*
y_grad
)
{
DenseTensor
*
y_grad
)
{
const
DenseTensor
reshaped_y
=
const
DenseTensor
reshaped_y
=
phi
::
ReshapeToMatrix
(
y
,
y_num_col_dims
);
paddle
::
framework
::
ReshapeToMatrix
(
y
,
y_num_col_dims
);
const
DenseTensor
reshaped_x
=
phi
::
ReshapeToMatrix
(
x
,
x_num_col_dims
);
const
DenseTensor
reshaped_x
=
paddle
::
framework
::
ReshapeToMatrix
(
x
,
x_num_col_dims
);
const
DenseTensor
x_matrix
=
x
.
dims
().
size
()
>
2
?
reshaped_x
:
x
;
const
DenseTensor
x_matrix
=
x
.
dims
().
size
()
>
2
?
reshaped_x
:
x
;
const
DenseTensor
y_matrix
=
y
.
dims
().
size
()
>
2
?
reshaped_y
:
y
;
const
DenseTensor
y_matrix
=
y
.
dims
().
size
()
>
2
?
reshaped_y
:
y
;
...
...
paddle/phi/kernels/onednn/matmul_kernel.cc
浏览文件 @
d09962a1
...
@@ -24,7 +24,7 @@ using dnnl::inner_product_forward;
...
@@ -24,7 +24,7 @@ using dnnl::inner_product_forward;
using
dnnl
::
memory
;
using
dnnl
::
memory
;
using
dnnl
::
prop_kind
;
using
dnnl
::
prop_kind
;
using
dnnl
::
stream
;
using
dnnl
::
stream
;
using
p
addle
::
framework
::
ReshapeToMatrix
;
using
p
hi
::
ReshapeToMatrix
;
namespace
phi
{
namespace
phi
{
...
...
paddle/phi/kernels/xpu/arange_kernel.cc
浏览文件 @
d09962a1
...
@@ -14,33 +14,17 @@ limitations under the License. */
...
@@ -14,33 +14,17 @@ limitations under the License. */
#include "paddle/phi/kernels/arange_kernel.h"
#include "paddle/phi/kernels/arange_kernel.h"
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/range_function.h"
#include "paddle/phi/kernels/funcs/range_function.h"
namespace
phi
{
namespace
phi
{
template
<
typename
T
,
typename
Context
>
inline
T
GetValue
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
)
{
T
value
=
static_cast
<
T
>
(
0
);
if
(
x
.
place
()
!=
CPUPlace
())
{
DenseTensor
cpu_x
;
Copy
(
dev_ctx
,
x
,
CPUPlace
(),
true
,
&
cpu_x
);
value
=
cpu_x
.
data
<
T
>
()[
0
];
}
else
{
value
=
x
.
data
<
T
>
()[
0
];
}
return
value
;
}
template
<
typename
T
,
typename
Context
>
template
<
typename
T
,
typename
Context
>
void
ArangeKernel
(
const
Context
&
dev_ctx
,
void
ArangeKernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
start
,
const
DenseTensor
&
start
,
const
DenseTensor
&
end
,
const
DenseTensor
&
end
,
const
DenseTensor
&
step
,
const
DenseTensor
&
step
,
DenseTensor
*
out
)
{
DenseTensor
*
out
)
{
auto
place
=
dev_ctx
.
GetPlace
();
auto
cpu_place
=
phi
::
CPUPlace
();
T
start_value
=
GetValue
<
T
,
Context
>
(
dev_ctx
,
start
);
T
start_value
=
GetValue
<
T
,
Context
>
(
dev_ctx
,
start
);
T
end_value
=
GetValue
<
T
,
Context
>
(
dev_ctx
,
end
);
T
end_value
=
GetValue
<
T
,
Context
>
(
dev_ctx
,
end
);
T
step_value
=
GetValue
<
T
,
Context
>
(
dev_ctx
,
step
);
T
step_value
=
GetValue
<
T
,
Context
>
(
dev_ctx
,
step
);
...
@@ -48,7 +32,7 @@ void ArangeKernel(const Context& dev_ctx,
...
@@ -48,7 +32,7 @@ void ArangeKernel(const Context& dev_ctx,
int64_t
size
=
0
;
int64_t
size
=
0
;
phi
::
funcs
::
GetSize
(
start_value
,
end_value
,
step_value
,
&
size
);
phi
::
funcs
::
GetSize
(
start_value
,
end_value
,
step_value
,
&
size
);
out
->
Resize
(
phi
::
make_ddim
({
size
}));
out
->
Resize
(
phi
::
make_ddim
({
size
}));
T
*
out_data
=
dev_ctx
.
template
Alloc
<
T
>(
out
);
dev_ctx
.
template
Alloc
<
T
>(
out
);
DenseTensor
out_cpu
;
DenseTensor
out_cpu
;
out_cpu
.
Resize
({
out
->
numel
()});
out_cpu
.
Resize
({
out
->
numel
()});
...
@@ -60,8 +44,7 @@ void ArangeKernel(const Context& dev_ctx,
...
@@ -60,8 +44,7 @@ void ArangeKernel(const Context& dev_ctx,
out_cpu_data
[
i
]
=
value
;
out_cpu_data
[
i
]
=
value
;
value
+=
step_value
;
value
+=
step_value
;
}
}
paddle
::
memory
::
Copy
(
phi
::
Copy
(
dev_ctx
,
out_cpu
,
out
->
place
(),
true
,
out
);
place
,
out_data
,
cpu_place
,
out_cpu_data
,
out
->
numel
()
*
sizeof
(
T
));
}
}
}
// namespace phi
}
// namespace phi
...
...
paddle/phi/kernels/xpu/matmul_grad_kernel.cc
浏览文件 @
d09962a1
...
@@ -13,7 +13,6 @@
...
@@ -13,7 +13,6 @@
// limitations under the License.
// limitations under the License.
#include "paddle/phi/kernels/matmul_grad_kernel.h"
#include "paddle/phi/kernels/matmul_grad_kernel.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/phi/backends/xpu/enforce_xpu.h"
#include "paddle/phi/backends/xpu/enforce_xpu.h"
#include "paddle/phi/backends/xpu/xpu_context.h"
#include "paddle/phi/backends/xpu/xpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/kernel_registry.h"
...
@@ -110,11 +109,9 @@ void MatmulWithFlattenGradKernel(const Context& dev_ctx,
...
@@ -110,11 +109,9 @@ void MatmulWithFlattenGradKernel(const Context& dev_ctx,
DenseTensor
*
y_grad
)
{
DenseTensor
*
y_grad
)
{
using
XPUType
=
typename
XPUTypeTrait
<
T
>::
Type
;
using
XPUType
=
typename
XPUTypeTrait
<
T
>::
Type
;
auto
x_matrix
=
x
.
dims
().
size
()
>
2
auto
x_matrix
=
x
.
dims
().
size
()
>
2
?
phi
::
ReshapeToMatrix
(
x
,
x_num_col_dims
)
?
paddle
::
framework
::
ReshapeToMatrix
(
x
,
x_num_col_dims
)
:
static_cast
<
const
DenseTensor
&>
(
x
);
:
static_cast
<
const
DenseTensor
&>
(
x
);
auto
y_matrix
=
y
.
dims
().
size
()
>
2
auto
y_matrix
=
y
.
dims
().
size
()
>
2
?
phi
::
ReshapeToMatrix
(
y
,
y_num_col_dims
)
?
paddle
::
framework
::
ReshapeToMatrix
(
y
,
y_num_col_dims
)
:
static_cast
<
const
DenseTensor
&>
(
y
);
:
static_cast
<
const
DenseTensor
&>
(
y
);
DenseTensor
dout_mat
;
DenseTensor
dout_mat
;
dout_mat
.
Resize
({
phi
::
flatten_to_2d
(
x
.
dims
(),
x_num_col_dims
)[
0
],
dout_mat
.
Resize
({
phi
::
flatten_to_2d
(
x
.
dims
(),
x_num_col_dims
)[
0
],
...
...
paddle/phi/kernels/xpu/matmul_kernel.cc
浏览文件 @
d09962a1
...
@@ -13,7 +13,6 @@
...
@@ -13,7 +13,6 @@
// limitations under the License.
// limitations under the License.
#include "paddle/phi/kernels/matmul_kernel.h"
#include "paddle/phi/kernels/matmul_kernel.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/phi/backends/xpu/enforce_xpu.h"
#include "paddle/phi/backends/xpu/enforce_xpu.h"
#include "paddle/phi/backends/xpu/xpu_context.h"
#include "paddle/phi/backends/xpu/xpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/kernel_registry.h"
...
@@ -52,13 +51,9 @@ void MatmulWithFlattenKernel(const Context& dev_ctx,
...
@@ -52,13 +51,9 @@ void MatmulWithFlattenKernel(const Context& dev_ctx,
DenseTensor
*
out
)
{
DenseTensor
*
out
)
{
using
XPUType
=
typename
XPUTypeTrait
<
T
>::
Type
;
using
XPUType
=
typename
XPUTypeTrait
<
T
>::
Type
;
const
DenseTensor
x_matrix
=
const
DenseTensor
x_matrix
=
x
.
dims
().
size
()
>
2
x
.
dims
().
size
()
>
2
?
phi
::
ReshapeToMatrix
(
x
,
x_num_col_dims
)
:
x
;
?
paddle
::
framework
::
ReshapeToMatrix
(
x
,
x_num_col_dims
)
:
x
;
const
DenseTensor
y_matrix
=
const
DenseTensor
y_matrix
=
y
.
dims
().
size
()
>
2
y
.
dims
().
size
()
>
2
?
phi
::
ReshapeToMatrix
(
y
,
y_num_col_dims
)
:
y
;
?
paddle
::
framework
::
ReshapeToMatrix
(
y
,
y_num_col_dims
)
:
y
;
dev_ctx
.
template
Alloc
<
T
>(
out
);
dev_ctx
.
template
Alloc
<
T
>(
out
);
const
XPUType
*
x_ptr
=
reinterpret_cast
<
const
XPUType
*>
(
x_matrix
.
data
<
T
>
());
const
XPUType
*
x_ptr
=
reinterpret_cast
<
const
XPUType
*>
(
x_matrix
.
data
<
T
>
());
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录