Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
e19d6ca3
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
e19d6ca3
编写于
9月 27, 2020
作者:
T
tianshuo78520a
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
notest;add kunlun_test
上级
73da8c02
变更
5
显示空白变更内容
内联
并排
Showing
5 changed file
with
1 addition
and
1077 deletion
+1
-1077
paddle/scripts/paddle_build.sh
paddle/scripts/paddle_build.sh
+1
-0
python/paddle/fluid/tests/unittests/xpu/test_activation_op.py
...on/paddle/fluid/tests/unittests/xpu/test_activation_op.py
+0
-215
python/paddle/fluid/tests/unittests/xpu/test_elementwise_add_op.py
...ddle/fluid/tests/unittests/xpu/test_elementwise_add_op.py
+0
-346
python/paddle/fluid/tests/unittests/xpu/test_matmul_op.py
python/paddle/fluid/tests/unittests/xpu/test_matmul_op.py
+0
-355
python/paddle/fluid/tests/unittests/xpu/test_mul_op.py
python/paddle/fluid/tests/unittests/xpu/test_mul_op.py
+0
-161
未找到文件。
paddle/scripts/paddle_build.sh
浏览文件 @
e19d6ca3
...
@@ -1694,6 +1694,7 @@ function main() {
...
@@ -1694,6 +1694,7 @@ function main() {
check_xpu
)
check_xpu
)
cmake_gen_and_build
${
PYTHON_ABI
:-
""
}
${
parallel_number
}
cmake_gen_and_build
${
PYTHON_ABI
:-
""
}
${
parallel_number
}
parallel_test
parallel_test
;;
cmake_gen
)
cmake_gen
)
cmake_gen
${
PYTHON_ABI
:-
""
}
cmake_gen
${
PYTHON_ABI
:-
""
}
;;
;;
...
...
python/paddle/fluid/tests/unittests/xpu/test_activation_op.py
已删除
100755 → 0
浏览文件 @
73da8c02
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
__future__
import
print_function
import
sys
sys
.
path
.
append
(
".."
)
import
unittest
import
numpy
as
np
import
paddle.fluid.core
as
core
from
op_test
import
OpTest
from
scipy.special
import
expit
,
erf
import
paddle
import
paddle.fluid
as
fluid
import
paddle.nn
as
nn
import
paddle.nn.functional
as
F
from
paddle.fluid
import
compiler
,
Program
,
program_guard
@
unittest
.
skipIf
(
not
paddle
.
is_compiled_with_xpu
(),
"core is not compiled with XPU"
)
class
TestXPUActivation
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"exp"
self
.
init_dtype
()
self
.
init_kernel_type
()
x
=
np
.
random
.
uniform
(
0.1
,
1
,
[
11
,
17
]).
astype
(
self
.
dtype
)
out
=
np
.
exp
(
x
)
self
.
attrs
=
{
'use_xpu'
:
True
}
self
.
inputs
=
{
'X'
:
OpTest
.
np_dtype_to_fluid_dtype
(
x
)}
self
.
outputs
=
{
'Out'
:
out
}
def
init_dtype
(
self
):
self
.
dtype
=
np
.
float32
def
test_check_output
(
self
):
if
paddle
.
is_compiled_with_xpu
():
place
=
paddle
.
XPUPlace
(
0
)
self
.
check_output_with_place
(
place
,
atol
=
1e-3
)
def
init_kernel_type
(
self
):
pass
@
unittest
.
skipIf
(
not
paddle
.
is_compiled_with_xpu
(),
"core is not compiled with XPU"
)
class
TestXPUSigmoid
(
TestXPUActivation
):
def
setUp
(
self
):
self
.
op_type
=
"sigmoid"
self
.
init_dtype
()
x
=
np
.
random
.
uniform
(
-
1
,
1
,
[
11
,
17
]).
astype
(
self
.
dtype
)
out
=
1
/
(
1
+
np
.
exp
(
-
x
))
self
.
attrs
=
{
'use_xpu'
:
True
}
self
.
inputs
=
{
'X'
:
OpTest
.
np_dtype_to_fluid_dtype
(
x
)}
self
.
outputs
=
{
'Out'
:
out
}
def
test_check_grad
(
self
):
if
paddle
.
is_compiled_with_xpu
():
place
=
paddle
.
XPUPlace
(
0
)
self
.
check_grad_with_place
(
place
,
[
'X'
],
'Out'
,
max_relative_error
=
0.01
)
@
unittest
.
skipIf
(
not
paddle
.
is_compiled_with_xpu
(),
"core is not compiled with XPU"
)
class
TestXPUTanh
(
TestXPUActivation
):
def
setUp
(
self
):
self
.
op_type
=
"tanh"
self
.
init_dtype
()
x
=
np
.
random
.
uniform
(
0.1
,
1
,
[
11
,
17
]).
astype
(
self
.
dtype
)
out
=
np
.
tanh
(
x
)
self
.
attrs
=
{
'use_xpu'
:
True
}
self
.
inputs
=
{
'X'
:
OpTest
.
np_dtype_to_fluid_dtype
(
x
)}
self
.
outputs
=
{
'Out'
:
out
}
@
unittest
.
skipIf
(
not
paddle
.
is_compiled_with_xpu
(),
"core is not compiled with XPU"
)
class
TestXPUSqrt
(
TestXPUActivation
):
def
setUp
(
self
):
self
.
op_type
=
"sqrt"
self
.
init_dtype
()
x
=
np
.
random
.
uniform
(
0.1
,
1
,
[
11
,
17
]).
astype
(
self
.
dtype
)
out
=
np
.
sqrt
(
x
)
self
.
attrs
=
{
'use_xpu'
:
True
}
self
.
inputs
=
{
'X'
:
OpTest
.
np_dtype_to_fluid_dtype
(
x
)}
self
.
outputs
=
{
'Out'
:
out
}
@
unittest
.
skipIf
(
not
paddle
.
is_compiled_with_xpu
(),
"core is not compiled with XPU"
)
class
TestXPUAbs
(
TestXPUActivation
):
def
setUp
(
self
):
self
.
op_type
=
"abs"
self
.
init_dtype
()
x
=
np
.
random
.
uniform
(
-
1
,
1
,
[
4
,
25
]).
astype
(
self
.
dtype
)
# Because we set delta = 0.005 in calculating numeric gradient,
# if x is too small, such as 0.002, x_neg will be -0.003
# x_pos will be 0.007, so the numeric gradient is inaccurate.
# we should avoid this
x
[
np
.
abs
(
x
)
<
0.005
]
=
0.02
out
=
np
.
abs
(
x
)
self
.
attrs
=
{
'use_xpu'
:
True
}
self
.
inputs
=
{
'X'
:
OpTest
.
np_dtype_to_fluid_dtype
(
x
)}
self
.
outputs
=
{
'Out'
:
out
}
@
unittest
.
skipIf
(
not
paddle
.
is_compiled_with_xpu
(),
"core is not compiled with XPU"
)
class
TestXPURelu
(
TestXPUActivation
):
def
setUp
(
self
):
self
.
op_type
=
"relu"
self
.
init_dtype
()
x
=
np
.
random
.
uniform
(
-
1
,
1
,
[
11
,
17
]).
astype
(
self
.
dtype
)
# The same reason with TestAbs
x
[
np
.
abs
(
x
)
<
0.005
]
=
0.02
out
=
np
.
maximum
(
x
,
0
)
self
.
attrs
=
{
'use_xpu'
:
True
}
self
.
inputs
=
{
'X'
:
x
}
self
.
outputs
=
{
'Out'
:
out
}
@
unittest
.
skipIf
(
not
paddle
.
is_compiled_with_xpu
(),
"core is not compiled with XPU"
)
class
TestXPUGelu
(
TestXPUActivation
):
def
setUp
(
self
):
self
.
op_type
=
"gelu"
self
.
init_dtype
()
approximate
=
False
x
=
np
.
random
.
uniform
(
-
1
,
1
,
[
11
,
17
]).
astype
(
self
.
dtype
)
out
=
gelu
(
x
,
approximate
)
self
.
inputs
=
{
'X'
:
x
}
self
.
outputs
=
{
'Out'
:
out
}
self
.
attrs
=
{
"approximate"
:
approximate
,
'use_xpu'
:
True
}
def
gelu
(
x
,
approximate
):
if
approximate
:
y_ref
=
0.5
*
x
*
(
1.0
+
np
.
tanh
(
np
.
sqrt
(
2
/
np
.
pi
)
*
(
x
+
0.044715
*
np
.
power
(
x
,
3
))))
else
:
y_ref
=
0.5
*
x
*
(
1
+
erf
(
x
/
np
.
sqrt
(
2
)))
return
y_ref
.
astype
(
x
.
dtype
)
@
unittest
.
skipIf
(
not
paddle
.
is_compiled_with_xpu
(),
"core is not compiled with XPU"
)
class
TestXPULog
(
TestXPUActivation
):
def
setUp
(
self
):
self
.
op_type
=
"log"
self
.
init_dtype
()
x
=
np
.
random
.
uniform
(
0.1
,
1
,
[
11
,
17
]).
astype
(
self
.
dtype
)
out
=
np
.
log
(
x
)
self
.
attrs
=
{
'use_xpu'
:
True
}
self
.
inputs
=
{
'X'
:
OpTest
.
np_dtype_to_fluid_dtype
(
x
)}
self
.
outputs
=
{
'Out'
:
out
}
@
unittest
.
skipIf
(
not
paddle
.
is_compiled_with_xpu
(),
"core is not compiled with XPU"
)
class
TestXPUSquare
(
TestXPUActivation
):
def
setUp
(
self
):
self
.
op_type
=
"square"
self
.
init_dtype
()
x
=
np
.
random
.
uniform
(
0.1
,
1
,
[
11
,
17
]).
astype
(
self
.
dtype
)
out
=
np
.
square
(
x
)
self
.
attrs
=
{
'use_xpu'
:
True
}
self
.
inputs
=
{
'X'
:
OpTest
.
np_dtype_to_fluid_dtype
(
x
)}
self
.
outputs
=
{
'Out'
:
out
}
@
unittest
.
skipIf
(
not
paddle
.
is_compiled_with_xpu
(),
"core is not compiled with XPU"
)
class
TestXPUPow
(
TestXPUActivation
):
def
setUp
(
self
):
self
.
op_type
=
"pow"
self
.
init_dtype
()
x
=
np
.
random
.
uniform
(
1
,
2
,
[
11
,
17
]).
astype
(
self
.
dtype
)
out
=
np
.
power
(
x
,
3
)
self
.
inputs
=
{
'X'
:
OpTest
.
np_dtype_to_fluid_dtype
(
x
)}
self
.
attrs
=
{
'factor'
:
3.0
,
'use_xpu'
:
True
}
self
.
outputs
=
{
'Out'
:
out
}
if
__name__
==
"__main__"
:
unittest
.
main
()
python/paddle/fluid/tests/unittests/xpu/test_elementwise_add_op.py
已删除
100644 → 0
浏览文件 @
73da8c02
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
__future__
import
print_function
import
sys
sys
.
path
.
append
(
".."
)
import
unittest
import
numpy
as
np
import
paddle
import
paddle.fluid.core
as
core
from
op_test
import
OpTest
,
skip_check_grad_ci
import
paddle.fluid
as
fluid
from
paddle.fluid
import
compiler
,
Program
,
program_guard
class
TestElementwiseAddOp
(
OpTest
):
def
init_kernel_type
(
self
):
self
.
use_mkldnn
=
False
def
setUp
(
self
):
self
.
op_type
=
"elementwise_add"
self
.
init_dtype
()
self
.
init_input_output
()
self
.
init_kernel_type
()
self
.
init_axis
()
self
.
inputs
=
{
'X'
:
OpTest
.
np_dtype_to_fluid_dtype
(
self
.
x
),
'Y'
:
OpTest
.
np_dtype_to_fluid_dtype
(
self
.
y
)
}
self
.
attrs
=
{
'axis'
:
self
.
axis
,
'use_mkldnn'
:
self
.
use_mkldnn
}
self
.
outputs
=
{
'Out'
:
self
.
out
}
def
test_check_output
(
self
):
# TODO(wangzhongpu): support mkldnn op in dygraph mode
self
.
check_output
(
check_dygraph
=
(
self
.
use_mkldnn
==
False
))
def
test_check_grad_normal
(
self
):
# TODO(wangzhongpu): support mkldnn op in dygraph mode
if
self
.
dtype
==
np
.
float16
:
return
self
.
check_grad
(
[
'X'
,
'Y'
],
'Out'
,
check_dygraph
=
(
self
.
use_mkldnn
==
False
))
def
test_check_grad_ingore_x
(
self
):
# TODO(wangzhongpu): support mkldnn op in dygraph mode
if
self
.
dtype
==
np
.
float16
:
return
self
.
check_grad
(
[
'Y'
],
'Out'
,
no_grad_set
=
set
(
"X"
),
check_dygraph
=
(
self
.
use_mkldnn
==
False
))
def
test_check_grad_ingore_y
(
self
):
# TODO(wangzhongpu): support mkldnn op in dygraph mode
if
self
.
dtype
==
np
.
float16
:
return
self
.
check_grad
(
[
'X'
],
'Out'
,
no_grad_set
=
set
(
'Y'
),
check_dygraph
=
(
self
.
use_mkldnn
==
False
))
def
init_input_output
(
self
):
self
.
x
=
np
.
random
.
uniform
(
0.1
,
1
,
[
13
,
17
]).
astype
(
self
.
dtype
)
self
.
y
=
np
.
random
.
uniform
(
0.1
,
1
,
[
13
,
17
]).
astype
(
self
.
dtype
)
self
.
out
=
np
.
add
(
self
.
x
,
self
.
y
)
def
init_dtype
(
self
):
self
.
dtype
=
np
.
float64
def
init_axis
(
self
):
self
.
axis
=
-
1
@
unittest
.
skipIf
(
not
paddle
.
is_compiled_with_xpu
(),
"core is not compiled with XPU"
)
class
TestXPUElementwiseAddOp
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"elementwise_add"
self
.
init_dtype
()
self
.
init_input_output
()
self
.
init_axis
()
self
.
inputs
=
{
'X'
:
self
.
x
,
'Y'
:
self
.
y
}
self
.
attrs
=
{
'axis'
:
self
.
axis
,
'use_mkldnn'
:
False
,
'use_xpu'
:
True
}
self
.
outputs
=
{
'Out'
:
self
.
out
}
def
test_check_output
(
self
):
if
self
.
dtype
==
np
.
float32
and
paddle
.
is_compiled_with_xpu
():
place
=
paddle
.
XPUPlace
(
0
)
self
.
check_output_with_place
(
place
)
def
test_check_grad_normal
(
self
):
if
self
.
dtype
==
np
.
float32
and
paddle
.
is_compiled_with_xpu
():
place
=
paddle
.
XPUPlace
(
0
)
self
.
check_grad_with_place
(
place
,
[
'X'
,
'Y'
],
'Out'
)
def
test_check_grad_ingore_x
(
self
):
if
self
.
dtype
==
np
.
float32
and
paddle
.
is_compiled_with_xpu
():
place
=
paddle
.
XPUPlace
(
0
)
self
.
check_grad_with_place
(
place
,
[
'Y'
],
'Out'
)
def
test_check_grad_ingore_y
(
self
):
if
self
.
dtype
==
np
.
float32
and
paddle
.
is_compiled_with_xpu
():
place
=
paddle
.
XPUPlace
(
0
)
self
.
check_grad_with_place
(
place
,
[
'X'
],
'Out'
)
def
init_input_output
(
self
):
self
.
x
=
np
.
random
.
uniform
(
0.1
,
1
,
[
13
,
17
]).
astype
(
self
.
dtype
)
self
.
y
=
np
.
random
.
uniform
(
0.1
,
1
,
[
13
,
17
]).
astype
(
self
.
dtype
)
self
.
out
=
np
.
add
(
self
.
x
,
self
.
y
)
def
init_dtype
(
self
):
self
.
dtype
=
np
.
float32
def
init_axis
(
self
):
self
.
axis
=
-
1
@
skip_check_grad_ci
(
reason
=
"[skip shape check] Use y_shape(1) to test broadcast."
)
class
TestElementwiseAddOp_scalar
(
TestElementwiseAddOp
):
def
init_input_output
(
self
):
self
.
x
=
np
.
random
.
rand
(
2
,
3
,
4
).
astype
(
self
.
dtype
)
self
.
y
=
np
.
random
.
rand
(
1
).
astype
(
self
.
dtype
)
self
.
out
=
self
.
x
+
self
.
y
@
skip_check_grad_ci
(
reason
=
"[skip shape check] Use y_shape(1,1) to test broadcast."
)
class
TestElementwiseAddOp_scalar2
(
TestElementwiseAddOp
):
def
init_input_output
(
self
):
self
.
x
=
np
.
random
.
rand
(
2
,
3
,
4
).
astype
(
self
.
dtype
)
self
.
y
=
np
.
random
.
rand
(
1
,
1
).
astype
(
self
.
dtype
)
self
.
out
=
self
.
x
+
self
.
y
class
TestElementwiseAddOp_Vector
(
TestElementwiseAddOp
):
def
init_input_output
(
self
):
self
.
x
=
np
.
random
.
random
((
100
,
)).
astype
(
self
.
dtype
)
self
.
y
=
np
.
random
.
random
((
100
,
)).
astype
(
self
.
dtype
)
self
.
out
=
np
.
add
(
self
.
x
,
self
.
y
)
class
TestElementwiseAddOp_broadcast_0
(
TestElementwiseAddOp
):
def
init_input_output
(
self
):
self
.
x
=
np
.
random
.
rand
(
100
,
2
,
3
).
astype
(
self
.
dtype
)
self
.
y
=
np
.
random
.
rand
(
100
).
astype
(
self
.
dtype
)
self
.
out
=
self
.
x
+
self
.
y
.
reshape
(
100
,
1
,
1
)
def
init_axis
(
self
):
self
.
axis
=
0
class
TestElementwiseAddOp_broadcast_1
(
TestElementwiseAddOp
):
def
init_input_output
(
self
):
self
.
x
=
np
.
random
.
rand
(
2
,
100
,
3
).
astype
(
self
.
dtype
)
self
.
y
=
np
.
random
.
rand
(
100
).
astype
(
self
.
dtype
)
self
.
out
=
self
.
x
+
self
.
y
.
reshape
(
1
,
100
,
1
)
def
init_axis
(
self
):
self
.
axis
=
1
class
TestElementwiseAddOp_broadcast_2
(
TestElementwiseAddOp
):
def
init_input_output
(
self
):
self
.
x
=
np
.
random
.
rand
(
2
,
3
,
100
).
astype
(
self
.
dtype
)
self
.
y
=
np
.
random
.
rand
(
100
).
astype
(
self
.
dtype
)
self
.
out
=
self
.
x
+
self
.
y
.
reshape
(
1
,
1
,
100
)
class
TestElementwiseAddOp_broadcast_3
(
TestElementwiseAddOp
):
def
init_input_output
(
self
):
self
.
x
=
np
.
random
.
rand
(
2
,
10
,
12
,
3
).
astype
(
self
.
dtype
)
self
.
y
=
np
.
random
.
rand
(
10
,
12
).
astype
(
self
.
dtype
)
self
.
out
=
self
.
x
+
self
.
y
.
reshape
(
1
,
10
,
12
,
1
)
def
init_axis
(
self
):
self
.
axis
=
1
class
TestElementwiseAddOp_broadcast_4
(
TestElementwiseAddOp
):
def
init_input_output
(
self
):
self
.
x
=
np
.
random
.
rand
(
100
,
2
,
3
,
4
).
astype
(
self
.
dtype
)
self
.
y
=
np
.
random
.
rand
(
100
,
1
).
astype
(
self
.
dtype
)
self
.
out
=
self
.
x
+
self
.
y
.
reshape
(
100
,
1
,
1
,
1
)
def
init_axis
(
self
):
self
.
axis
=
0
class
TestElementwiseAddOp_broadcast_5
(
TestElementwiseAddOp
):
def
init_input_output
(
self
):
self
.
x
=
np
.
random
.
rand
(
10
,
3
,
12
).
astype
(
self
.
dtype
)
self
.
y
=
np
.
random
.
rand
(
10
,
1
,
12
).
astype
(
self
.
dtype
)
self
.
out
=
self
.
x
+
self
.
y
class
TestElementwiseAddOp_broadcast_6
(
TestElementwiseAddOp
):
def
init_input_output
(
self
):
self
.
x
=
np
.
random
.
rand
(
2
,
12
,
3
,
5
).
astype
(
self
.
dtype
)
self
.
y
=
np
.
random
.
rand
(
2
,
12
,
1
,
5
).
astype
(
self
.
dtype
)
self
.
out
=
self
.
x
+
self
.
y
class
TestElementwiseAddOp_broadcast_7
(
TestElementwiseAddOp
):
def
init_input_output
(
self
):
self
.
x
=
np
.
random
.
rand
(
1
,
1
,
20
,
5
).
astype
(
self
.
dtype
)
self
.
y
=
np
.
random
.
rand
(
20
,
5
,
1
,
1
).
astype
(
self
.
dtype
)
self
.
out
=
self
.
x
+
self
.
y
class
TestElementwiseAddOp_rowwise_add_0
(
TestElementwiseAddOp
):
def
init_input_output
(
self
):
self
.
x
=
np
.
random
.
rand
(
2
,
10
,
12
).
astype
(
self
.
dtype
)
self
.
y
=
np
.
random
.
rand
(
10
,
12
).
astype
(
self
.
dtype
)
self
.
out
=
self
.
x
+
self
.
y
.
reshape
(
1
,
10
,
12
)
def
init_axis
(
self
):
self
.
axis
=
1
@
skip_check_grad_ci
(
reason
=
"[skip shape check] Use y_shape(1) to test broadcast."
)
class
TestElementwiseAddOp_rowwise_add_1
(
TestElementwiseAddOp
):
def
init_input_output
(
self
):
self
.
x
=
np
.
random
.
rand
(
100
,
1
).
astype
(
self
.
dtype
)
self
.
y
=
np
.
random
.
rand
(
1
).
astype
(
self
.
dtype
)
self
.
out
=
self
.
x
+
self
.
y
.
reshape
(
1
,
1
)
def
init_axis
(
self
):
self
.
axis
=
1
class
TestElementwiseAddOp_channelwise_add
(
TestElementwiseAddOp
):
def
init_input_output
(
self
):
self
.
x
=
np
.
random
.
rand
(
100
,
2
,
3
).
astype
(
self
.
dtype
)
self
.
y
=
np
.
random
.
rand
(
100
,
1
,
1
).
astype
(
self
.
dtype
)
self
.
out
=
self
.
x
+
self
.
y
def
init_axis
(
self
):
self
.
axis
=
-
1
class
TestElementwiseAddOp_commonuse_add1
(
TestElementwiseAddOp
):
def
init_input_output
(
self
):
self
.
x
=
np
.
random
.
rand
(
2
,
3
,
100
).
astype
(
self
.
dtype
)
self
.
y
=
np
.
random
.
rand
(
1
,
1
,
100
).
astype
(
self
.
dtype
)
self
.
out
=
self
.
x
+
self
.
y
def
init_axis
(
self
):
self
.
axis
=
-
1
class
TestElementwiseAddOp_commonuse_add2
(
TestElementwiseAddOp
):
def
init_input_output
(
self
):
self
.
x
=
np
.
random
.
rand
(
10
,
3
,
1
,
4
).
astype
(
self
.
dtype
)
self
.
y
=
np
.
random
.
rand
(
10
,
1
,
12
,
1
).
astype
(
self
.
dtype
)
self
.
out
=
self
.
x
+
self
.
y
def
init_axis
(
self
):
self
.
axis
=
-
1
class
TestElementwiseAddOp_xsize_lessthan_ysize_add
(
TestElementwiseAddOp
):
def
init_input_output
(
self
):
self
.
x
=
np
.
random
.
rand
(
10
,
12
).
astype
(
self
.
dtype
)
self
.
y
=
np
.
random
.
rand
(
2
,
3
,
10
,
12
).
astype
(
self
.
dtype
)
self
.
out
=
self
.
x
+
self
.
y
def
init_axis
(
self
):
self
.
axis
=
2
class
TestElementwiseAddOpError
(
unittest
.
TestCase
):
def
test_errors
(
self
):
with
program_guard
(
Program
(),
Program
()):
# the input of elementwise_add must be Variable.
x1
=
fluid
.
create_lod_tensor
(
np
.
array
([
-
1
,
3
,
5
,
5
]),
[[
1
,
1
,
1
,
1
]],
fluid
.
CPUPlace
())
y1
=
fluid
.
create_lod_tensor
(
np
.
array
([
-
1
,
3
,
5
,
5
]),
[[
1
,
1
,
1
,
1
]],
fluid
.
CPUPlace
())
self
.
assertRaises
(
TypeError
,
fluid
.
layers
.
elementwise_add
,
x1
,
y1
)
# the input dtype of elementwise_add must be float16 or float32 or float64 or int32 or int64
# float16 only can be set on GPU place
x2
=
fluid
.
layers
.
data
(
name
=
'x2'
,
shape
=
[
3
,
4
,
5
,
6
],
dtype
=
"uint8"
)
y2
=
fluid
.
layers
.
data
(
name
=
'y2'
,
shape
=
[
3
,
4
,
5
,
6
],
dtype
=
"uint8"
)
self
.
assertRaises
(
TypeError
,
fluid
.
layers
.
elementwise_add
,
x2
,
y2
)
class
TestAddOp
(
unittest
.
TestCase
):
def
test_name
(
self
):
with
fluid
.
program_guard
(
fluid
.
Program
()):
x
=
fluid
.
data
(
name
=
"x"
,
shape
=
[
2
,
3
],
dtype
=
"float32"
)
y
=
fluid
.
data
(
name
=
'y'
,
shape
=
[
2
,
3
],
dtype
=
'float32'
)
y_1
=
paddle
.
add
(
x
,
y
,
name
=
'add_res'
)
self
.
assertEqual
((
'add_res'
in
y_1
.
name
),
True
)
def
test_declarative
(
self
):
with
fluid
.
program_guard
(
fluid
.
Program
()):
def
gen_data
():
return
{
"x"
:
np
.
array
([
2
,
3
,
4
]).
astype
(
'float32'
),
"y"
:
np
.
array
([
1
,
5
,
2
]).
astype
(
'float32'
)
}
x
=
fluid
.
data
(
name
=
"x"
,
shape
=
[
3
],
dtype
=
'float32'
)
y
=
fluid
.
data
(
name
=
"y"
,
shape
=
[
3
],
dtype
=
'float32'
)
z
=
paddle
.
add
(
x
,
y
)
place
=
fluid
.
CPUPlace
()
exe
=
fluid
.
Executor
(
place
)
z_value
=
exe
.
run
(
feed
=
gen_data
(),
fetch_list
=
[
z
.
name
])
z_expected
=
np
.
array
([
3.
,
8.
,
6.
])
self
.
assertEqual
((
z_value
==
z_expected
).
all
(),
True
)
def
test_dygraph
(
self
):
with
fluid
.
dygraph
.
guard
():
np_x
=
np
.
array
([
2
,
3
,
4
]).
astype
(
'float64'
)
np_y
=
np
.
array
([
1
,
5
,
2
]).
astype
(
'float64'
)
x
=
fluid
.
dygraph
.
to_variable
(
np_x
)
y
=
fluid
.
dygraph
.
to_variable
(
np_y
)
z
=
paddle
.
add
(
x
,
y
)
np_z
=
z
.
numpy
()
z_expected
=
np
.
array
([
3.
,
8.
,
6.
])
self
.
assertEqual
((
np_z
==
z_expected
).
all
(),
True
)
if
__name__
==
'__main__'
:
unittest
.
main
()
python/paddle/fluid/tests/unittests/xpu/test_matmul_op.py
已删除
100644 → 0
浏览文件 @
73da8c02
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
__future__
import
print_function
import
sys
sys
.
path
.
append
(
".."
)
import
paddle.fluid.core
as
core
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid
import
Program
,
program_guard
def
generate_compatible_shapes
(
dim_X
,
dim_Y
,
transpose_X
,
transpose_Y
):
BATCH_SIZE
=
2
M
=
3
N
=
4
K
=
5
if
(
dim_X
==
1
and
transpose_X
)
or
(
dim_Y
==
1
and
transpose_Y
):
K
=
1
if
dim_X
==
1
:
if
transpose_X
:
shape_X
=
[
M
]
else
:
shape_X
=
[
K
]
if
dim_Y
==
1
:
if
transpose_Y
:
shape_Y
=
[
N
]
else
:
shape_Y
=
[
K
]
if
dim_X
>=
2
:
if
transpose_X
:
shape_X
=
[
K
,
M
]
else
:
shape_X
=
[
M
,
K
]
if
dim_X
==
3
:
shape_X
=
[
BATCH_SIZE
]
+
shape_X
if
dim_Y
>=
2
:
if
transpose_Y
:
shape_Y
=
[
N
,
K
]
else
:
shape_Y
=
[
K
,
N
]
if
dim_Y
==
3
:
shape_Y
=
[
BATCH_SIZE
]
+
shape_Y
return
shape_X
,
shape_Y
def
reference_matmul
(
X
,
Y
,
transpose_X
=
False
,
transpose_Y
=
False
):
"""Reference forward implementation using np.matmul."""
# np.matmul does not support the transpose flags, so we manually
# transpose X and Y appropriately.
if
transpose_X
:
if
X
.
ndim
==
1
:
X
=
X
.
reshape
((
X
.
size
,
1
))
elif
X
.
ndim
==
2
:
X
=
X
.
T
else
:
dim
=
[
i
for
i
in
range
(
len
(
X
.
shape
))]
dim
[
-
1
],
dim
[
len
(
X
.
shape
)
-
2
]
=
dim
[
len
(
X
.
shape
)
-
2
],
dim
[
-
1
]
X
=
np
.
transpose
(
X
,
tuple
(
dim
))
if
transpose_Y
:
if
Y
.
ndim
==
1
:
Y
=
Y
.
reshape
((
1
,
Y
.
size
))
else
:
dim
=
[
i
for
i
in
range
(
len
(
Y
.
shape
))]
dim
[
-
1
],
dim
[
len
(
Y
.
shape
)
-
2
]
=
dim
[
len
(
Y
.
shape
)
-
2
],
dim
[
-
1
]
Y
=
np
.
transpose
(
Y
,
tuple
(
dim
))
Out
=
np
.
matmul
(
X
,
Y
)
if
not
Out
.
shape
:
# We do not support 0-dimensional Tensors (scalars). So where
# np.matmul outputs a scalar, we must convert to a Tensor of
# shape (1, ) instead.
# Everywhere else, we are compatible with np.matmul.
Out
=
np
.
array
([
Out
],
dtype
=
"float32"
)
return
Out
class
Generator
(
object
):
def
setUp
(
self
):
self
.
op_type
=
"matmul"
X
=
np
.
random
.
random
(
self
.
shape_X
).
astype
(
"float32"
)
Y
=
np
.
random
.
random
(
self
.
shape_Y
).
astype
(
"float32"
)
Out
=
reference_matmul
(
X
,
Y
,
self
.
transpose_X
,
self
.
transpose_Y
)
self
.
inputs
=
{
'X'
:
X
,
'Y'
:
Y
}
self
.
attrs
=
{
'transpose_X'
:
self
.
transpose_X
,
'transpose_Y'
:
self
.
transpose_Y
}
self
.
outputs
=
{
'Out'
:
Out
}
def
test_check_output
(
self
):
self
.
check_output
()
if
paddle
.
is_compiled_with_xpu
()
and
len
(
self
.
inputs
[
'X'
].
shape
)
==
len
(
self
.
inputs
[
'Y'
].
shape
)
and
self
.
inputs
[
'X'
].
shape
[
0
]
==
self
.
inputs
[
'Y'
].
shape
[
0
]:
place
=
paddle
.
XPUPlace
(
0
)
self
.
check_output_with_place
(
place
,
atol
=
1e-3
)
def
test_check_grad_normal
(
self
):
self
.
check_grad
([
'X'
,
'Y'
],
'Out'
,
max_relative_error
=
1e-3
)
if
paddle
.
is_compiled_with_xpu
()
and
len
(
self
.
inputs
[
'X'
].
shape
)
==
len
(
self
.
inputs
[
'Y'
].
shape
)
and
self
.
inputs
[
'X'
].
shape
[
0
]
==
self
.
inputs
[
'Y'
].
shape
[
0
]:
place
=
paddle
.
XPUPlace
(
0
)
self
.
check_grad_with_place
(
place
,
[
'X'
,
'Y'
],
'Out'
,
max_relative_error
=
5e-2
)
def
test_check_grad_ignore_x
(
self
):
self
.
check_grad
(
[
'Y'
],
'Out'
,
max_relative_error
=
1e-3
,
no_grad_set
=
set
(
"X"
))
if
paddle
.
is_compiled_with_xpu
()
and
len
(
self
.
inputs
[
'X'
].
shape
)
==
len
(
self
.
inputs
[
'Y'
].
shape
)
and
self
.
inputs
[
'X'
].
shape
[
0
]
==
self
.
inputs
[
'Y'
].
shape
[
0
]:
place
=
paddle
.
XPUPlace
(
0
)
self
.
check_grad_with_place
(
place
,
[
'Y'
],
'Out'
,
max_relative_error
=
5e-2
,
no_grad_set
=
set
(
"X"
))
def
test_check_grad_ignore_y
(
self
):
self
.
check_grad
(
[
'X'
],
'Out'
,
max_relative_error
=
1e-3
,
no_grad_set
=
set
(
'Y'
))
if
paddle
.
is_compiled_with_xpu
()
and
len
(
self
.
inputs
[
'X'
].
shape
)
==
len
(
self
.
inputs
[
'Y'
].
shape
)
and
self
.
inputs
[
'X'
].
shape
[
0
]
==
self
.
inputs
[
'Y'
].
shape
[
0
]:
place
=
paddle
.
XPUPlace
(
0
)
self
.
check_grad_with_place
(
place
,
[
'X'
],
'Out'
,
max_relative_error
=
5e-2
,
no_grad_set
=
set
(
'Y'
))
class
TestMatmulOpError
(
unittest
.
TestCase
):
def
test_errors
(
self
):
with
program_guard
(
Program
(),
Program
()):
# The inputs type of matmul_op must be Variable.
input1
=
12
self
.
assertRaises
(
TypeError
,
fluid
.
layers
.
matmul
,
input1
,
input1
)
# The inputs dtype of matmul_op must be float32, float64.
input2
=
fluid
.
layers
.
data
(
name
=
'input2'
,
shape
=
[
10
,
10
],
dtype
=
"int32"
)
self
.
assertRaises
(
TypeError
,
fluid
.
layers
.
matmul
,
input2
,
input2
)
input3
=
fluid
.
layers
.
data
(
name
=
'input3'
,
shape
=
[
2
,
2
],
dtype
=
"float16"
)
fluid
.
layers
.
matmul
(
input3
,
input3
)
# Negative dimension generation
def
generate_negative_dims
(
in_shape
):
from
itertools
import
combinations
size
=
len
(
in_shape
)
indexs
=
list
()
shapes
=
list
()
for
i
in
range
(
size
):
indexs
.
extend
(
list
(
combinations
([
j
for
j
in
range
(
size
)],
i
+
1
)))
for
idx
in
indexs
:
shapes
.
append
(
[
in_shape
[
i
]
if
i
not
in
idx
else
-
1
for
i
in
range
(
size
)])
return
shapes
# Build program with inputs sizes that contain negative numbers
def
test_negative_dims_program
(
obj
):
for
shape_x
in
generate_negative_dims
(
obj
.
shape_X
):
for
shape_y
in
generate_negative_dims
(
obj
.
shape_Y
):
X
=
np
.
random
.
random
(
obj
.
shape_X
).
astype
(
"float32"
)
Y
=
np
.
random
.
random
(
obj
.
shape_Y
).
astype
(
"float32"
)
Ref
=
reference_matmul
(
X
,
Y
,
obj
.
transpose_X
,
obj
.
transpose_Y
)
with
program_guard
(
Program
(),
Program
()):
x
=
fluid
.
data
(
name
=
'x'
,
shape
=
shape_x
,
dtype
=
'float32'
)
y
=
fluid
.
data
(
name
=
'y'
,
shape
=
shape_y
,
dtype
=
'float32'
)
output
=
fluid
.
layers
.
matmul
(
x
,
y
,
obj
.
transpose_X
,
obj
.
transpose_Y
)
obj
.
assertEqual
(
len
(
Ref
.
shape
),
len
(
output
.
shape
))
for
idx
in
range
(
len
(
Ref
.
shape
)):
if
output
.
shape
[
idx
]
!=
-
1
:
obj
.
assertEqual
(
Ref
.
shape
[
idx
],
output
.
shape
[
idx
])
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
())
res
,
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
'x'
:
X
,
'y'
:
Y
},
fetch_list
=
[
output
])
np
.
allclose
(
res
,
Ref
,
atol
=
1e-5
)
# Generate program api cases for all negative possibilities
def
api_test
(
dim_x
,
dim_y
,
trans_x
,
trans_y
):
test_name
=
(
'TestMatMulAPI_dimX_{}_dim_Y_{}_transX_{}_transY_{}'
.
format
(
dim_x
,
dim_y
,
trans_x
,
trans_y
))
shape_x
,
shape_y
=
generate_compatible_shapes
(
dim_x
,
dim_y
,
trans_x
,
trans_y
)
globals
()[
test_name
]
=
type
(
test_name
,
(
unittest
.
TestCase
,
),
{
'shape_X'
:
shape_x
,
'shape_Y'
:
shape_y
,
'transpose_X'
:
trans_x
,
'transpose_Y'
:
trans_y
,
'test_propram'
:
test_negative_dims_program
,
})
# Generate operators cases for all possibilities
def
inject_test
(
dim_x
,
dim_y
,
trans_x
,
trans_y
):
test_name
=
(
'TestMatMulOp_dimX_{}_dim_Y_{}_transX_{}_transY_{}'
.
format
(
dim_x
,
dim_y
,
trans_x
,
trans_y
))
shape_x
,
shape_y
=
generate_compatible_shapes
(
dim_x
,
dim_y
,
trans_x
,
trans_y
)
globals
()[
test_name
]
=
type
(
test_name
,
(
Generator
,
OpTest
),
{
'shape_X'
:
shape_x
,
'shape_Y'
:
shape_y
,
'transpose_X'
:
trans_x
,
'transpose_Y'
:
trans_y
,
})
for
dim_X
in
(
1
,
2
,
3
):
for
dim_Y
in
(
1
,
2
,
3
):
for
transose_x
in
(
False
,
True
):
for
transose_y
in
(
False
,
True
):
inject_test
(
dim_X
,
dim_Y
,
transose_x
,
transose_y
)
api_test
(
dim_X
,
dim_Y
,
transose_x
,
transose_y
)
# Test case n-dim
def
generate_compatible_shapes
(
dim
,
transpose_X
,
transpose_Y
):
M
=
2
N
=
4
K
=
3
shape_X
=
[
2
for
_
in
range
(
dim
-
2
)]
shape_Y
=
[
2
for
_
in
range
(
dim
-
2
)]
if
transpose_X
:
shape_X
+=
[
K
,
M
]
else
:
shape_X
+=
[
M
,
K
]
if
transpose_Y
:
shape_Y
+=
[
N
,
K
]
else
:
shape_Y
+=
[
K
,
N
]
return
shape_X
,
shape_Y
# # Test case n-dim
for
dim
in
[
4
]:
for
transpose_X
in
[
False
,
True
]:
for
transpose_Y
in
[
False
,
True
]:
test_name
=
(
'TestMatMulOp_dimX_{}_dim_Y_{}_transX_{}_transY_{}'
.
format
(
dim
,
dim
,
transpose_X
,
transpose_Y
))
shape_X
,
shape_Y
=
generate_compatible_shapes
(
dim
,
transpose_X
,
transpose_Y
)
globals
()[
test_name
]
=
type
(
test_name
,
(
Generator
,
OpTest
),
{
'shape_X'
:
shape_X
,
'shape_Y'
:
shape_Y
,
'transpose_X'
:
transpose_X
,
'transpose_Y'
:
transpose_Y
,
})
class
API_TestMm
(
unittest
.
TestCase
):
def
test_out
(
self
):
with
fluid
.
program_guard
(
fluid
.
Program
()):
x
=
fluid
.
data
(
name
=
"x"
,
shape
=
[
2
],
dtype
=
"float64"
)
y
=
fluid
.
data
(
name
=
'y'
,
shape
=
[
2
],
dtype
=
'float64'
)
res
=
fluid
.
data
(
name
=
"output"
,
shape
=
[
1
],
dtype
=
"float64"
)
result
=
paddle
.
mm
(
x
,
y
)
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
())
data1
=
np
.
random
.
rand
(
2
)
data2
=
np
.
random
.
rand
(
2
)
np_res
=
exe
.
run
(
feed
=
{
'x'
:
data1
,
'y'
:
data2
},
fetch_list
=
[
result
])
expected_result
=
np
.
matmul
(
data1
.
reshape
(
1
,
2
),
data2
.
reshape
(
2
,
1
))
self
.
assertTrue
(
np
.
allclose
(
np_res
,
expected_result
,
atol
=
1e-5
),
"two value is
\
{}
\n
{}, check diff!"
.
format
(
np_res
,
expected_result
))
def
test_dygraph_without_out
(
self
):
device
=
fluid
.
CPUPlace
()
with
fluid
.
dygraph
.
guard
(
device
):
input_array1
=
np
.
random
.
rand
(
3
,
4
).
astype
(
"float64"
)
input_array2
=
np
.
random
.
rand
(
4
,
3
).
astype
(
"float64"
)
data1
=
fluid
.
dygraph
.
to_variable
(
input_array1
)
data2
=
fluid
.
dygraph
.
to_variable
(
input_array2
)
out
=
paddle
.
mm
(
data1
,
data2
)
expected_result
=
np
.
matmul
(
input_array1
,
input_array2
)
self
.
assertTrue
(
np
.
allclose
(
expected_result
,
out
.
numpy
()))
class
Test_API_Matmul
(
unittest
.
TestCase
):
def
test_dygraph_without_out
(
self
):
device
=
fluid
.
CPUPlace
()
with
fluid
.
dygraph
.
guard
(
device
):
input_array1
=
np
.
random
.
rand
(
3
,
4
).
astype
(
"float64"
)
input_array2
=
np
.
random
.
rand
(
4
,
3
).
astype
(
"float64"
)
data1
=
fluid
.
dygraph
.
to_variable
(
input_array1
)
data2
=
fluid
.
dygraph
.
to_variable
(
input_array2
)
out
=
paddle
.
matmul
(
data1
,
data2
)
expected_result
=
np
.
matmul
(
input_array1
,
input_array2
)
self
.
assertTrue
(
np
.
allclose
(
expected_result
,
out
.
numpy
()))
class
API_TestMmError
(
unittest
.
TestCase
):
def
test_errors
(
self
):
def
test_error1
():
with
fluid
.
program_guard
(
fluid
.
Program
(),
fluid
.
Program
()):
data1
=
fluid
.
data
(
name
=
"data1"
,
shape
=
[
10
,
2
],
dtype
=
"float32"
)
data2
=
fluid
.
data
(
name
=
"data2"
,
shape
=
[
3
,
10
],
dtype
=
"float32"
)
paddle
.
mm
(
data1
,
data2
)
self
.
assertRaises
(
ValueError
,
test_error1
)
def
test_error2
():
with
fluid
.
program_guard
(
fluid
.
Program
(),
fluid
.
Program
()):
data1
=
fluid
.
data
(
name
=
"data1"
,
shape
=
[
-
1
,
10
,
2
],
dtype
=
"float32"
)
data2
=
fluid
.
data
(
name
=
"data2"
,
shape
=
[
-
1
,
2
,
10
],
dtype
=
"float32"
)
paddle
.
mm
(
data1
,
data2
)
test_error2
()
def
test_error3
():
with
fluid
.
program_guard
(
fluid
.
Program
(),
fluid
.
Program
()):
data1
=
fluid
.
data
(
name
=
"data1"
,
shape
=
[
10
,
10
,
2
],
dtype
=
"float32"
)
data2
=
fluid
.
data
(
name
=
"data2"
,
shape
=
[
3
,
2
,
10
],
dtype
=
"float32"
)
paddle
.
mm
(
data1
,
data2
)
self
.
assertRaises
(
ValueError
,
test_error3
)
if
__name__
==
"__main__"
:
unittest
.
main
()
python/paddle/fluid/tests/unittests/xpu/test_mul_op.py
已删除
100644 → 0
浏览文件 @
73da8c02
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
__future__
import
print_function
import
unittest
import
numpy
as
np
import
paddle
import
paddle.fluid.core
as
core
import
sys
sys
.
path
.
append
(
".."
)
from
op_test
import
OpTest
import
paddle.fluid
as
fluid
from
paddle.fluid
import
Program
,
program_guard
class
TestMulOp
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"mul"
self
.
dtype
=
np
.
float64
self
.
init_dtype_type
()
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
20
,
5
)).
astype
(
self
.
dtype
),
'Y'
:
np
.
random
.
random
((
5
,
21
)).
astype
(
self
.
dtype
)
}
self
.
outputs
=
{
'Out'
:
np
.
dot
(
self
.
inputs
[
'X'
],
self
.
inputs
[
'Y'
])}
def
init_dtype_type
(
self
):
pass
def
test_check_output
(
self
):
self
.
check_output
()
def
test_check_grad_normal
(
self
):
self
.
check_grad
([
'X'
,
'Y'
],
'Out'
)
def
test_check_grad_ingore_x
(
self
):
self
.
check_grad
(
[
'Y'
],
'Out'
,
max_relative_error
=
0.5
,
no_grad_set
=
set
(
"X"
))
def
test_check_grad_ingore_y
(
self
):
self
.
check_grad
(
[
'X'
],
'Out'
,
max_relative_error
=
0.5
,
no_grad_set
=
set
(
'Y'
))
class
TestMulOpError
(
unittest
.
TestCase
):
def
test_errors
(
self
):
with
program_guard
(
Program
(),
Program
()):
# The input type of mul_op must be Variable.
x1
=
fluid
.
create_lod_tensor
(
np
.
array
([[
-
1
]]),
[[
1
]],
fluid
.
CPUPlace
())
x2
=
fluid
.
create_lod_tensor
(
np
.
array
([[
-
1
]]),
[[
1
]],
fluid
.
CPUPlace
())
self
.
assertRaises
(
TypeError
,
fluid
.
layers
.
mul
,
x1
,
x2
)
# The input dtype of mul_op must be float32 or float64.
x3
=
fluid
.
layers
.
data
(
name
=
'x3'
,
shape
=
[
4
],
dtype
=
"int32"
)
x4
=
fluid
.
layers
.
data
(
name
=
'x4'
,
shape
=
[
4
],
dtype
=
"int32"
)
self
.
assertRaises
(
TypeError
,
fluid
.
layers
.
mul
,
x3
,
x4
)
class
TestMulOp2
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"mul"
self
.
dtype
=
np
.
float64
self
.
init_dtype_type
()
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
3
,
4
,
2
,
9
)).
astype
(
self
.
dtype
),
'Y'
:
np
.
random
.
random
((
3
,
6
,
1
,
2
,
3
)).
astype
(
self
.
dtype
)
}
self
.
attrs
=
{
'x_num_col_dims'
:
2
,
'y_num_col_dims'
:
2
,
}
result
=
np
.
dot
(
self
.
inputs
[
'X'
].
reshape
(
3
*
4
,
2
*
9
),
self
.
inputs
[
'Y'
].
reshape
(
3
*
6
,
1
*
2
*
3
))
result
=
result
.
reshape
(
3
,
4
,
1
,
2
,
3
)
self
.
outputs
=
{
'Out'
:
result
}
def
init_dtype_type
(
self
):
pass
def
test_check_output
(
self
):
self
.
check_output
()
def
test_check_grad_normal
(
self
):
self
.
check_grad
([
'X'
,
'Y'
],
'Out'
)
def
test_check_grad_ingore_x
(
self
):
self
.
check_grad
(
[
'Y'
],
'Out'
,
max_relative_error
=
0.5
,
no_grad_set
=
set
(
'X'
))
def
test_check_grad_ignore_y
(
self
):
self
.
check_grad
(
[
'X'
],
'Out'
,
max_relative_error
=
0.5
,
no_grad_set
=
set
(
'Y'
))
@
unittest
.
skipIf
(
not
paddle
.
is_compiled_with_xpu
(),
"core is not compiled with XPU"
)
class
TestXPUMulOp1
(
TestMulOp
):
def
init_dtype_type
(
self
):
self
.
dtype
=
np
.
float32
def
test_check_output
(
self
):
place
=
paddle
.
XPUPlace
(
0
)
self
.
check_output_with_place
(
place
,
atol
=
1e-1
)
def
test_check_grad_normal
(
self
):
place
=
paddle
.
XPUPlace
(
0
)
self
.
check_grad_with_place
(
place
,
[
'X'
,
'Y'
],
'Out'
,
max_relative_error
=
0.5
)
def
test_check_grad_ingore_x
(
self
):
place
=
paddle
.
XPUPlace
(
0
)
self
.
check_grad_with_place
(
place
,
[
'Y'
],
'Out'
,
max_relative_error
=
0.5
,
no_grad_set
=
set
(
"X"
))
def
test_check_grad_ingore_y
(
self
):
place
=
paddle
.
XPUPlace
(
0
)
self
.
check_grad_with_place
(
place
,
[
'X'
],
'Out'
,
max_relative_error
=
0.5
,
no_grad_set
=
set
(
'Y'
))
@
unittest
.
skipIf
(
not
paddle
.
is_compiled_with_xpu
(),
"core is not compiled with XPU"
)
class
TestXPUMulOp2
(
TestMulOp2
):
def
init_dtype_type
(
self
):
self
.
dtype
=
np
.
float32
def
test_check_output
(
self
):
place
=
paddle
.
XPUPlace
(
0
)
self
.
check_output_with_place
(
place
,
atol
=
2e-1
)
def
test_check_grad_normal
(
self
):
place
=
paddle
.
XPUPlace
(
0
)
self
.
check_grad_with_place
(
place
,
[
'X'
,
'Y'
],
'Out'
,
max_relative_error
=
0.9
)
def
test_check_grad_ingore_x
(
self
):
place
=
paddle
.
XPUPlace
(
0
)
self
.
check_grad_with_place
(
place
,
[
'Y'
],
'Out'
,
max_relative_error
=
0.5
,
no_grad_set
=
set
(
"X"
))
def
test_check_grad_ingore_y
(
self
):
place
=
paddle
.
XPUPlace
(
0
)
self
.
check_grad_with_place
(
place
,
[
'X'
],
'Out'
,
max_relative_error
=
0.9
,
no_grad_set
=
set
(
'Y'
))
if
__name__
==
"__main__"
:
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录