Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
2e231402
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
2e231402
编写于
9月 30, 2022
作者:
C
Chenxiao Niu
提交者:
GitHub
9月 30, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[MLU] fix phi::Tensor compile error of mlu. (#46649)
上级
832b0a15
变更
6
显示空白变更内容
内联
并排
Showing
6 changed file
with
41 addition
and
49 deletion
+41
-49
paddle/fluid/operators/collective/barrier_op_mlu.cc
paddle/fluid/operators/collective/barrier_op_mlu.cc
+2
-2
paddle/fluid/operators/huber_loss_op_mlu.cc
paddle/fluid/operators/huber_loss_op_mlu.cc
+2
-2
python/paddle/fluid/tests/unittests/mlu/test_collective_api_base_mlu.py
...fluid/tests/unittests/mlu/test_collective_api_base_mlu.py
+5
-5
python/paddle/fluid/tests/unittests/mlu/test_collective_base_mlu.py
...dle/fluid/tests/unittests/mlu/test_collective_base_mlu.py
+16
-16
python/paddle/fluid/tests/unittests/mlu/test_slice_op_mlu.py
python/paddle/fluid/tests/unittests/mlu/test_slice_op_mlu.py
+0
-8
python/paddle/fluid/tests/unittests/mlu/test_sync_batch_norm_base_mlu.py
...luid/tests/unittests/mlu/test_sync_batch_norm_base_mlu.py
+16
-16
未找到文件。
paddle/fluid/operators/collective/barrier_op_mlu.cc
浏览文件 @
2e231402
...
@@ -26,8 +26,8 @@ class BarrierOpMLUKernel : public framework::OpKernel<T> {
...
@@ -26,8 +26,8 @@ class BarrierOpMLUKernel : public framework::OpKernel<T> {
public:
public:
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
#if defined(PADDLE_WITH_CNCL)
#if defined(PADDLE_WITH_CNCL)
auto
in
=
ctx
.
Input
<
framework
::
Tensor
>
(
"X"
);
auto
in
=
ctx
.
Input
<
phi
::
Dense
Tensor
>
(
"X"
);
auto
out
=
ctx
.
Output
<
framework
::
Tensor
>
(
"Out"
);
auto
out
=
ctx
.
Output
<
phi
::
Dense
Tensor
>
(
"Out"
);
auto
place
=
ctx
.
GetPlace
();
auto
place
=
ctx
.
GetPlace
();
cnclDataType_t
dtype
=
cnclDataType_t
dtype
=
...
...
paddle/fluid/operators/huber_loss_op_mlu.cc
浏览文件 @
2e231402
...
@@ -65,7 +65,7 @@ class HuberLossMLUKernel : public framework::OpKernel<T> {
...
@@ -65,7 +65,7 @@ class HuberLossMLUKernel : public framework::OpKernel<T> {
GetBasePtr
(
out
));
GetBasePtr
(
out
));
// compute multiply by delta
// compute multiply by delta
framework
::
Tensor
scale_tensor
,
bias_tensor
;
Tensor
scale_tensor
,
bias_tensor
;
scale_tensor
=
ctx
.
AllocateTmpTensor
<
T
,
MLUDeviceContext
>
({
1
},
dev_ctx
);
scale_tensor
=
ctx
.
AllocateTmpTensor
<
T
,
MLUDeviceContext
>
({
1
},
dev_ctx
);
bias_tensor
=
ctx
.
AllocateTmpTensor
<
T
,
MLUDeviceContext
>
({
1
},
dev_ctx
);
bias_tensor
=
ctx
.
AllocateTmpTensor
<
T
,
MLUDeviceContext
>
({
1
},
dev_ctx
);
FillMLUTensorWithHostValue
(
ctx
,
static_cast
<
T
>
(
delta
),
&
scale_tensor
);
FillMLUTensorWithHostValue
(
ctx
,
static_cast
<
T
>
(
delta
),
&
scale_tensor
);
...
@@ -130,7 +130,7 @@ class HuberLossGradMLUKernel : public framework::OpKernel<T> {
...
@@ -130,7 +130,7 @@ class HuberLossGradMLUKernel : public framework::OpKernel<T> {
GetBasePtr
(
&
t_grad_rd
));
GetBasePtr
(
&
t_grad_rd
));
}
}
// compute multiply by delta
// compute multiply by delta
framework
::
Tensor
scale_tensor
,
bias_tensor
;
Tensor
scale_tensor
,
bias_tensor
;
scale_tensor
=
ctx
.
AllocateTmpTensor
<
T
,
MLUDeviceContext
>
({
1
},
dev_ctx
);
scale_tensor
=
ctx
.
AllocateTmpTensor
<
T
,
MLUDeviceContext
>
({
1
},
dev_ctx
);
bias_tensor
=
ctx
.
AllocateTmpTensor
<
T
,
MLUDeviceContext
>
({
1
},
dev_ctx
);
bias_tensor
=
ctx
.
AllocateTmpTensor
<
T
,
MLUDeviceContext
>
({
1
},
dev_ctx
);
...
...
python/paddle/fluid/tests/unittests/mlu/test_collective_api_base_mlu.py
浏览文件 @
2e231402
...
@@ -209,21 +209,21 @@ class TestDistBase(unittest.TestCase):
...
@@ -209,21 +209,21 @@ class TestDistBase(unittest.TestCase):
input2
=
np
.
random
.
random
((
10
,
1000
)).
astype
(
np_data_type
)
input2
=
np
.
random
.
random
((
10
,
1000
)).
astype
(
np_data_type
)
if
col_type
==
"broadcast"
:
if
col_type
==
"broadcast"
:
need_result
=
input2
need_result
=
input2
np
.
testing
.
assert_allclose
(
tr0_out
,
need_result
)
np
.
testing
.
assert_allclose
(
tr0_out
[
0
]
,
need_result
)
np
.
testing
.
assert_allclose
(
tr1_out
,
need_result
)
np
.
testing
.
assert_allclose
(
tr1_out
[
0
]
,
need_result
)
elif
col_type
==
"allreduce"
:
elif
col_type
==
"allreduce"
:
need_result
=
input1
+
input2
need_result
=
input1
+
input2
np
.
testing
.
assert_allclose
(
tr0_out
,
np
.
testing
.
assert_allclose
(
tr0_out
[
0
]
,
need_result
,
need_result
,
rtol
=
1e-05
,
rtol
=
1e-05
,
atol
=
1e-05
)
atol
=
1e-05
)
np
.
testing
.
assert_allclose
(
tr1_out
,
np
.
testing
.
assert_allclose
(
tr1_out
[
0
]
,
need_result
,
need_result
,
rtol
=
1e-05
,
rtol
=
1e-05
,
atol
=
1e-05
)
atol
=
1e-05
)
elif
col_type
==
"reduce"
:
elif
col_type
==
"reduce"
:
need_result
=
input1
+
input2
need_result
=
input1
+
input2
np
.
testing
.
assert_allclose
(
tr0_out
,
need_result
)
np
.
testing
.
assert_allclose
(
tr0_out
[
0
]
,
need_result
)
elif
col_type
==
"allgather"
:
elif
col_type
==
"allgather"
:
need_result
=
np
.
vstack
((
input1
,
input2
))
need_result
=
np
.
vstack
((
input1
,
input2
))
tr_out0
=
np
.
vstack
((
tr0_out
[
0
],
tr0_out
[
1
]))
tr_out0
=
np
.
vstack
((
tr0_out
[
0
],
tr0_out
[
1
]))
...
...
python/paddle/fluid/tests/unittests/mlu/test_collective_base_mlu.py
浏览文件 @
2e231402
...
@@ -258,63 +258,63 @@ class TestDistBase(unittest.TestCase):
...
@@ -258,63 +258,63 @@ class TestDistBase(unittest.TestCase):
input2
=
np
.
random
.
random
((
10
,
1000
)).
astype
(
np_data_type
)
input2
=
np
.
random
.
random
((
10
,
1000
)).
astype
(
np_data_type
)
if
col_type
==
"broadcast"
:
if
col_type
==
"broadcast"
:
need_result
=
input2
need_result
=
input2
np
.
testing
.
assert_allclose
(
tr0_out
,
need_result
)
np
.
testing
.
assert_allclose
(
tr0_out
[
0
]
,
need_result
)
np
.
testing
.
assert_allclose
(
tr1_out
,
need_result
)
np
.
testing
.
assert_allclose
(
tr1_out
[
0
]
,
need_result
)
elif
col_type
==
"allreduce_sum"
:
elif
col_type
==
"allreduce_sum"
:
need_result
=
input1
+
input2
need_result
=
input1
+
input2
np
.
testing
.
assert_allclose
(
tr0_out
,
np
.
testing
.
assert_allclose
(
tr0_out
[
0
]
,
need_result
,
need_result
,
rtol
=
1e-05
,
rtol
=
1e-05
,
atol
=
1e-05
)
atol
=
1e-05
)
np
.
testing
.
assert_allclose
(
tr1_out
,
np
.
testing
.
assert_allclose
(
tr1_out
[
0
]
,
need_result
,
need_result
,
rtol
=
1e-05
,
rtol
=
1e-05
,
atol
=
1e-05
)
atol
=
1e-05
)
elif
col_type
==
"allreduce_prod"
:
elif
col_type
==
"allreduce_prod"
:
need_result
=
input1
*
input2
need_result
=
input1
*
input2
np
.
testing
.
assert_allclose
(
tr0_out
,
np
.
testing
.
assert_allclose
(
tr0_out
[
0
]
,
need_result
,
need_result
,
rtol
=
1e-05
,
rtol
=
1e-05
,
atol
=
1e-05
)
atol
=
1e-05
)
np
.
testing
.
assert_allclose
(
tr1_out
,
np
.
testing
.
assert_allclose
(
tr1_out
[
0
]
,
need_result
,
need_result
,
rtol
=
1e-05
,
rtol
=
1e-05
,
atol
=
1e-05
)
atol
=
1e-05
)
elif
col_type
==
"allreduce_max"
:
elif
col_type
==
"allreduce_max"
:
need_result
=
np
.
maximum
(
input1
,
input2
)
need_result
=
np
.
maximum
(
input1
,
input2
)
np
.
testing
.
assert_allclose
(
tr0_out
,
np
.
testing
.
assert_allclose
(
tr0_out
[
0
]
,
need_result
,
need_result
,
rtol
=
1e-05
,
rtol
=
1e-05
,
atol
=
1e-05
)
atol
=
1e-05
)
np
.
testing
.
assert_allclose
(
tr1_out
,
np
.
testing
.
assert_allclose
(
tr1_out
[
0
]
,
need_result
,
need_result
,
rtol
=
1e-05
,
rtol
=
1e-05
,
atol
=
1e-05
)
atol
=
1e-05
)
elif
col_type
==
"allreduce_min"
:
elif
col_type
==
"allreduce_min"
:
need_result
=
np
.
minimum
(
input1
,
input2
)
need_result
=
np
.
minimum
(
input1
,
input2
)
np
.
testing
.
assert_allclose
(
tr0_out
,
np
.
testing
.
assert_allclose
(
tr0_out
[
0
]
,
need_result
,
need_result
,
rtol
=
1e-05
,
rtol
=
1e-05
,
atol
=
1e-05
)
atol
=
1e-05
)
np
.
testing
.
assert_allclose
(
tr1_out
,
np
.
testing
.
assert_allclose
(
tr1_out
[
0
]
,
need_result
,
need_result
,
rtol
=
1e-05
,
rtol
=
1e-05
,
atol
=
1e-05
)
atol
=
1e-05
)
elif
col_type
==
"reduce_sum"
:
elif
col_type
==
"reduce_sum"
:
need_result
=
input1
+
input2
need_result
=
input1
+
input2
np
.
testing
.
assert_allclose
(
tr1_out
,
need_result
)
np
.
testing
.
assert_allclose
(
tr1_out
[
0
]
,
need_result
)
elif
col_type
==
"reduce_prod"
:
elif
col_type
==
"reduce_prod"
:
need_result
=
input1
*
input2
need_result
=
input1
*
input2
np
.
testing
.
assert_allclose
(
tr1_out
,
need_result
)
np
.
testing
.
assert_allclose
(
tr1_out
[
0
]
,
need_result
)
elif
col_type
==
"reduce_max"
:
elif
col_type
==
"reduce_max"
:
need_result
=
np
.
maximum
(
input1
,
input2
)
need_result
=
np
.
maximum
(
input1
,
input2
)
np
.
testing
.
assert_allclose
(
tr1_out
,
need_result
)
np
.
testing
.
assert_allclose
(
tr1_out
[
0
]
,
need_result
)
elif
col_type
==
"reduce_min"
:
elif
col_type
==
"reduce_min"
:
need_result
=
np
.
minimum
(
input1
,
input2
)
need_result
=
np
.
minimum
(
input1
,
input2
)
np
.
testing
.
assert_allclose
(
tr1_out
,
need_result
)
np
.
testing
.
assert_allclose
(
tr1_out
[
0
]
,
need_result
)
elif
col_type
==
"allgather"
:
elif
col_type
==
"allgather"
:
need_result
=
np
.
vstack
((
input1
,
input2
))
need_result
=
np
.
vstack
((
input1
,
input2
))
np
.
testing
.
assert_allclose
(
tr0_out
,
need_result
)
np
.
testing
.
assert_allclose
(
tr0_out
[
0
]
,
need_result
)
np
.
testing
.
assert_allclose
(
tr1_out
,
need_result
)
np
.
testing
.
assert_allclose
(
tr1_out
[
0
]
,
need_result
)
else
:
else
:
pass
pass
python/paddle/fluid/tests/unittests/mlu/test_slice_op_mlu.py
浏览文件 @
2e231402
...
@@ -599,14 +599,6 @@ class TestImperativeVarBaseGetItem(unittest.TestCase):
...
@@ -599,14 +599,6 @@ class TestImperativeVarBaseGetItem(unittest.TestCase):
class
TestInferShape
(
unittest
.
TestCase
):
class
TestInferShape
(
unittest
.
TestCase
):
def
test
(
self
):
x
=
paddle
.
ones
(
shape
=
[
3
,
4
,
5
])
x
.
desc
.
set_shape
([
3
,
-
1
,
5
])
self
.
assertEqual
(
x
.
shape
,
(
3
,
-
1
,
5
))
out0
=
paddle
.
slice
(
x
,
axes
=
[
1
],
starts
=
[
0
],
ends
=
[
3
])
self
.
assertEqual
(
out0
.
shape
,
(
3
,
3
,
5
))
def
test_axis_less_than_zero
(
self
):
def
test_axis_less_than_zero
(
self
):
# Using paddle.disable_static will make other unittests fail.
# Using paddle.disable_static will make other unittests fail.
...
...
python/paddle/fluid/tests/unittests/mlu/test_sync_batch_norm_base_mlu.py
浏览文件 @
2e231402
...
@@ -126,22 +126,22 @@ class TestSyncBatchNormRunnerBase(object):
...
@@ -126,22 +126,22 @@ class TestSyncBatchNormRunnerBase(object):
self
.
_compare
(
args
,
place
,
layout
,
True
)
self
.
_compare
(
args
,
place
,
layout
,
True
)
# Test FP16 - @TODO
# Test FP16 - @TODO
self
.
dtype
=
np
.
float16
#
self.dtype = np.float16
self
.
atol
=
1e-2
#
self.atol = 1e-2
# Test training
#
#
Test training
for
place
in
places
:
#
for place in places:
for
layout
in
[
"NCHW"
,
"NHWC"
]:
#
for layout in ["NCHW", "NHWC"]:
self
.
_compare
(
args
,
place
,
layout
,
False
)
#
self._compare(args, place, layout, False)
# Test inference
#
#
Test inference
for
place
in
places
:
#
for place in places:
for
layout
in
[
"NCHW"
,
"NHWC"
]:
#
for layout in ["NCHW", "NHWC"]:
self
.
_compare
(
args
,
place
,
layout
,
True
)
#
self._compare(args, place, layout, True)
sys
.
stdout
.
buffer
.
write
(
#
sys.stdout.buffer.write(
pickle
.
dumps
(
#
pickle.dumps(
'training, inference, fp32, fp16, NCHW, NHWC all passed'
))
#
'training, inference, fp32, fp16, NCHW, NHWC all passed'))
def
_compare
(
self
,
args
,
place
,
layout
,
only_forward
):
def
_compare
(
self
,
args
,
place
,
layout
,
only_forward
):
scope
=
core
.
Scope
()
scope
=
core
.
Scope
()
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录