Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
096a07e8
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
096a07e8
编写于
12月 14, 2021
作者:
J
jianghaicheng
提交者:
GitHub
12月 14, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
ipu_commit_tests p4 (#38092)
上级
d12d8389
变更
7
隐藏空白更改
内联
并排
Showing
7 changed file
with
947 addition
and
0 deletion
+947
-0
python/paddle/fluid/tests/unittests/ipu/test_cross_entropy2_op_ipu.py
...e/fluid/tests/unittests/ipu/test_cross_entropy2_op_ipu.py
+133
-0
python/paddle/fluid/tests/unittests/ipu/test_dropout_op_ipu.py
...n/paddle/fluid/tests/unittests/ipu/test_dropout_op_ipu.py
+127
-0
python/paddle/fluid/tests/unittests/ipu/test_elemetwise_x_op_ipu.py
...dle/fluid/tests/unittests/ipu/test_elemetwise_x_op_ipu.py
+172
-0
python/paddle/fluid/tests/unittests/ipu/test_equal_op_ipu.py
python/paddle/fluid/tests/unittests/ipu/test_equal_op_ipu.py
+126
-0
python/paddle/fluid/tests/unittests/ipu/test_expand_op_ipu.py
...on/paddle/fluid/tests/unittests/ipu/test_expand_op_ipu.py
+158
-0
python/paddle/fluid/tests/unittests/ipu/test_fill_constant_op_ipu.py
...le/fluid/tests/unittests/ipu/test_fill_constant_op_ipu.py
+114
-0
python/paddle/fluid/tests/unittests/ipu/test_gather_op_ipu.py
...on/paddle/fluid/tests/unittests/ipu/test_gather_op_ipu.py
+117
-0
未找到文件。
python/paddle/fluid/tests/unittests/ipu/test_cross_entropy2_op_ipu.py
0 → 100644
浏览文件 @
096a07e8
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
unittest
import
numpy
as
np
import
paddle
import
paddle.fluid
as
fluid
import
paddle.fluid.compiler
as
compiler
import
paddle.optimizer
import
paddle.static
from
paddle.fluid.tests.unittests.ipu.op_test_ipu
import
(
IPUOpTest
,
np_dtype_to_fluid_str
)
paddle
.
enable_static
()
@
unittest
.
skipIf
(
not
paddle
.
is_compiled_with_ipu
(),
"core is not compiled with IPU"
)
class
TestBase
(
IPUOpTest
):
def
setUp
(
self
):
self
.
set_atol
()
self
.
set_training
()
self
.
set_feed
()
self
.
set_feed_attr
()
self
.
set_attrs
()
def
set_feed
(
self
):
self
.
feed
=
{
"x"
:
np
.
random
.
uniform
(
size
=
[
3
,
7
]).
astype
(
'float32'
),
"label"
:
np
.
arange
(
3
).
reshape
([
3
]).
astype
(
np
.
int64
),
}
def
set_feed_attr
(
self
):
self
.
feed_shape
=
[
x
.
shape
for
x
in
self
.
feed
.
values
()]
self
.
feed_list
=
list
(
self
.
feed
.
keys
())
self
.
feed_dtype
=
[
np_dtype_to_fluid_str
(
x
.
dtype
)
for
x
in
self
.
feed
.
values
()
]
def
set_attrs
(
self
):
self
.
attrs
=
{
'soft_label'
:
False
,
}
def
_test_base
(
self
,
run_ipu
=
True
):
scope
=
fluid
.
core
.
Scope
()
main_prog
=
paddle
.
static
.
Program
()
startup_prog
=
paddle
.
static
.
Program
()
SEED
=
self
.
SEED
main_prog
.
random_seed
=
SEED
startup_prog
.
random_seed
=
SEED
with
fluid
.
scope_guard
(
scope
):
with
paddle
.
static
.
program_guard
(
main_prog
,
startup_prog
):
x
=
paddle
.
static
.
data
(
name
=
self
.
feed_list
[
0
],
shape
=
self
.
feed_shape
[
0
],
dtype
=
self
.
feed_dtype
[
0
])
# [warning] Copying (host) tensor input/1 from INT64 to INT32.
# Will only warn once
if
run_ipu
:
label
=
paddle
.
static
.
data
(
name
=
self
.
feed_list
[
1
],
shape
=
self
.
feed_shape
[
1
],
dtype
=
'int32'
)
else
:
label
=
paddle
.
static
.
data
(
name
=
self
.
feed_list
[
1
],
shape
=
self
.
feed_shape
[
1
],
dtype
=
'int64'
)
out
=
fluid
.
layers
.
cross_entropy
(
input
=
x
,
label
=
label
,
**
self
.
attrs
)
fetch_list
=
[
out
.
name
]
if
run_ipu
:
place
=
paddle
.
IPUPlace
()
else
:
place
=
paddle
.
CPUPlace
()
exe
=
paddle
.
static
.
Executor
(
place
)
exe
.
run
(
startup_prog
)
if
run_ipu
:
feed_list
=
self
.
feed_list
ipu_strategy
=
compiler
.
get_ipu_strategy
()
ipu_strategy
.
is_training
=
self
.
is_training
program
=
compiler
.
IPUCompiledProgram
(
main_prog
,
ipu_strategy
=
ipu_strategy
).
compile
(
feed_list
,
fetch_list
)
else
:
program
=
main_prog
result
=
exe
.
run
(
program
,
feed
=
self
.
feed
,
fetch_list
=
fetch_list
)
return
result
[
0
]
def
test_base
(
self
):
res0
=
self
.
_test_base
(
True
)
res1
=
self
.
_test_base
(
False
)
self
.
assertTrue
(
np
.
allclose
(
res0
.
flatten
(),
res1
.
flatten
(),
atol
=
self
.
atol
))
self
.
assertTrue
(
res0
.
shape
==
res1
.
shape
)
class
TestCase1
(
TestBase
):
def
set_attrs
(
self
):
self
.
attrs
=
{
'soft_label'
:
False
,
'ignore_index'
:
1
,
}
@
unittest
.
skip
(
"soft_label=True id not supported"
)
class
TestCase2
(
TestBase
):
def
set_attrs
(
self
):
self
.
attrs
=
{
'soft_label'
:
True
,
}
if
__name__
==
"__main__"
:
unittest
.
main
()
python/paddle/fluid/tests/unittests/ipu/test_dropout_op_ipu.py
0 → 100644
浏览文件 @
096a07e8
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
unittest
import
numpy
as
np
import
paddle
import
paddle.fluid
as
fluid
import
paddle.fluid.compiler
as
compiler
import
paddle.optimizer
import
paddle.static
from
paddle.fluid.tests.unittests.ipu.op_test_ipu
import
(
IPUOpTest
,
np_dtype_to_fluid_str
)
paddle
.
enable_static
()
@
unittest
.
skipIf
(
not
paddle
.
is_compiled_with_ipu
(),
"core is not compiled with IPU"
)
class
TestBase
(
IPUOpTest
):
def
setUp
(
self
):
self
.
set_atol
()
self
.
set_training
()
self
.
set_feed
()
self
.
set_feed_attr
()
self
.
set_attrs
()
def
set_feed
(
self
):
self
.
feed
=
{
"x"
:
np
.
random
.
uniform
(
size
=
[
1
,
3
,
10
,
10
]).
astype
(
'float32'
)
}
def
set_feed_attr
(
self
):
self
.
feed_shape
=
[
x
.
shape
for
x
in
self
.
feed
.
values
()]
self
.
feed_list
=
list
(
self
.
feed
.
keys
())
self
.
feed_dtype
=
[
np_dtype_to_fluid_str
(
x
.
dtype
)
for
x
in
self
.
feed
.
values
()
]
def
set_attrs
(
self
):
self
.
attrs
=
{
"dropout_prob"
:
0.5
,
"is_test"
:
True
,
"dropout_implementation"
:
"downgrade_in_infer"
}
def
_test_base
(
self
,
run_ipu
=
True
):
scope
=
fluid
.
core
.
Scope
()
main_prog
=
paddle
.
static
.
Program
()
startup_prog
=
paddle
.
static
.
Program
()
SEED
=
self
.
SEED
main_prog
.
random_seed
=
SEED
startup_prog
.
random_seed
=
SEED
with
fluid
.
scope_guard
(
scope
):
with
paddle
.
static
.
program_guard
(
main_prog
,
startup_prog
):
x
=
paddle
.
static
.
data
(
name
=
self
.
feed_list
[
0
],
shape
=
self
.
feed_shape
[
0
],
dtype
=
self
.
feed_dtype
[
0
])
dropout
=
paddle
.
fluid
.
layers
.
dropout
(
x
,
**
self
.
attrs
)
out
=
paddle
.
fluid
.
layers
.
elementwise_add
(
dropout
,
dropout
)
fetch_list
=
[
out
.
name
]
if
run_ipu
:
place
=
paddle
.
IPUPlace
()
else
:
place
=
paddle
.
CPUPlace
()
exe
=
paddle
.
static
.
Executor
(
place
)
exe
.
run
(
startup_prog
)
if
run_ipu
:
feed_list
=
self
.
feed_list
ipu_strategy
=
compiler
.
get_ipu_strategy
()
ipu_strategy
.
is_training
=
self
.
is_training
program
=
compiler
.
IPUCompiledProgram
(
main_prog
,
ipu_strategy
=
ipu_strategy
).
compile
(
feed_list
,
fetch_list
)
else
:
program
=
main_prog
result
=
exe
.
run
(
program
,
feed
=
self
.
feed
,
fetch_list
=
fetch_list
)
return
result
[
0
]
def
test_base
(
self
):
res0
=
self
.
_test_base
(
True
)
res1
=
self
.
_test_base
(
False
)
self
.
assertTrue
(
np
.
allclose
(
res0
.
flatten
(),
res1
.
flatten
(),
atol
=
self
.
atol
))
self
.
assertTrue
(
res0
.
shape
==
res1
.
shape
)
class
TestCase1
(
TestBase
):
def
set_attrs
(
self
):
self
.
attrs
=
{
"dropout_prob"
:
0.5
,
"is_test"
:
True
,
"dropout_implementation"
:
"upscale_in_train"
}
class
TestCase2
(
TestBase
):
def
set_attrs
(
self
):
self
.
attrs
=
{
"dropout_prob"
:
0.0
,
"is_test"
:
False
,
"dropout_implementation"
:
"upscale_in_train"
}
if
__name__
==
"__main__"
:
unittest
.
main
()
python/paddle/fluid/tests/unittests/ipu/test_elemetwise_x_op_ipu.py
0 → 100644
浏览文件 @
096a07e8
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
unittest
import
numpy
as
np
import
paddle
import
paddle.fluid
as
fluid
import
paddle.fluid.compiler
as
compiler
import
paddle.optimizer
import
paddle.static
from
paddle.fluid.tests.unittests.ipu.op_test_ipu
import
(
IPUOpTest
,
np_dtype_to_fluid_str
)
paddle
.
enable_static
()
@
unittest
.
skipIf
(
not
paddle
.
is_compiled_with_ipu
(),
"core is not compiled with IPU"
)
class
TestMul
(
IPUOpTest
):
def
setUp
(
self
):
self
.
set_atol
()
self
.
set_training
()
self
.
init_op
()
def
init_op
(
self
):
self
.
op
=
paddle
.
fluid
.
layers
.
elementwise_mul
def
set_feed_attr
(
self
):
self
.
feed_shape
=
[
x
.
shape
for
x
in
self
.
feed
.
values
()]
self
.
feed_list
=
list
(
self
.
feed
.
keys
())
self
.
feed_dtype
=
[
np_dtype_to_fluid_str
(
x
.
dtype
)
for
x
in
self
.
feed
.
values
()
]
def
_test_base
(
self
,
run_ipu
=
True
):
scope
=
fluid
.
core
.
Scope
()
main_prog
=
paddle
.
static
.
Program
()
startup_prog
=
paddle
.
static
.
Program
()
SEED
=
self
.
SEED
main_prog
.
random_seed
=
SEED
startup_prog
.
random_seed
=
SEED
with
fluid
.
scope_guard
(
scope
):
with
paddle
.
static
.
program_guard
(
main_prog
,
startup_prog
):
x
=
paddle
.
static
.
data
(
name
=
self
.
feed_list
[
0
],
shape
=
self
.
feed_shape
[
0
],
dtype
=
self
.
feed_dtype
[
0
])
y
=
paddle
.
static
.
data
(
name
=
self
.
feed_list
[
1
],
shape
=
self
.
feed_shape
[
1
],
dtype
=
self
.
feed_dtype
[
1
])
out
=
self
.
op
(
x
,
y
,
**
self
.
attrs
)
fetch_list
=
[
out
.
name
]
if
run_ipu
:
place
=
paddle
.
IPUPlace
()
else
:
place
=
paddle
.
CPUPlace
()
exe
=
paddle
.
static
.
Executor
(
place
)
exe
.
run
(
startup_prog
)
if
run_ipu
:
feed_list
=
self
.
feed_list
ipu_strategy
=
compiler
.
get_ipu_strategy
()
ipu_strategy
.
is_training
=
self
.
is_training
program
=
compiler
.
IPUCompiledProgram
(
main_prog
,
ipu_strategy
=
ipu_strategy
).
compile
(
feed_list
,
fetch_list
)
else
:
program
=
main_prog
result
=
exe
.
run
(
program
,
feed
=
self
.
feed
,
fetch_list
=
fetch_list
)
return
result
[
0
]
def
run_test_base
(
self
):
res0
=
self
.
_test_base
(
True
)
res1
=
self
.
_test_base
(
False
)
self
.
assertTrue
(
np
.
allclose
(
res0
.
flatten
(),
res1
.
flatten
(),
atol
=
self
.
atol
))
self
.
assertTrue
(
res0
.
shape
==
res1
.
shape
)
def
test_case0
(
self
):
self
.
feed
=
{
"x"
:
np
.
random
.
uniform
(
size
=
(
2
,
3
,
4
,
5
)).
astype
(
'float32'
),
"y"
:
np
.
random
.
uniform
(
size
=
(
2
,
3
,
4
,
5
)).
astype
(
'float32'
),
}
self
.
attrs
=
{}
self
.
set_feed_attr
()
self
.
run_test_base
()
def
test_case1
(
self
):
self
.
feed
=
{
"x"
:
np
.
random
.
uniform
(
size
=
(
2
,
3
,
4
,
5
)).
astype
(
'float32'
),
"y"
:
np
.
random
.
uniform
(
size
=
(
3
,
4
)).
astype
(
'float32'
),
}
self
.
set_feed_attr
()
self
.
attrs
=
{
"axis"
:
1
}
self
.
run_test_base
()
def
test_case2
(
self
):
self
.
feed
=
{
"x"
:
np
.
random
.
uniform
(
size
=
(
2
,
3
,
4
,
5
)).
astype
(
'float32'
),
"y"
:
np
.
random
.
uniform
(
size
=
(
5
)).
astype
(
'float32'
),
}
self
.
set_feed_attr
()
self
.
attrs
=
{
"axis"
:
-
1
}
self
.
run_test_base
()
def
test_case3
(
self
):
self
.
feed
=
{
"x"
:
np
.
random
.
uniform
(
size
=
(
2
,
3
,
4
,
5
)).
astype
(
'float32'
),
"y"
:
np
.
random
.
uniform
(
size
=
(
2
)).
astype
(
'float32'
),
}
self
.
set_feed_attr
()
self
.
attrs
=
{
"axis"
:
0
}
self
.
run_test_base
()
class
TestAdd
(
TestMul
):
def
init_op
(
self
):
self
.
op
=
paddle
.
fluid
.
layers
.
elementwise_add
class
TestSub
(
TestMul
):
def
init_op
(
self
):
self
.
op
=
paddle
.
fluid
.
layers
.
elementwise_sub
class
TestDiv
(
TestMul
):
def
init_op
(
self
):
self
.
op
=
paddle
.
fluid
.
layers
.
elementwise_div
class
TestMin
(
TestMul
):
def
init_op
(
self
):
self
.
op
=
paddle
.
fluid
.
layers
.
elementwise_min
class
TestMax
(
TestMul
):
def
init_op
(
self
):
self
.
op
=
paddle
.
fluid
.
layers
.
elementwise_max
class
TestPow
(
TestMul
):
def
init_op
(
self
):
self
.
op
=
paddle
.
fluid
.
layers
.
elementwise_pow
class
TestMod
(
TestMul
):
def
init_op
(
self
):
self
.
op
=
paddle
.
fluid
.
layers
.
elementwise_mod
if
__name__
==
"__main__"
:
unittest
.
main
()
python/paddle/fluid/tests/unittests/ipu/test_equal_op_ipu.py
0 → 100644
浏览文件 @
096a07e8
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
unittest
import
numpy
as
np
import
paddle
import
paddle.fluid
as
fluid
import
paddle.fluid.compiler
as
compiler
import
paddle.optimizer
import
paddle.static
from
paddle.fluid.tests.unittests.ipu.op_test_ipu
import
(
IPUOpTest
,
np_dtype_to_fluid_str
)
paddle
.
enable_static
()
@
unittest
.
skipIf
(
not
paddle
.
is_compiled_with_ipu
(),
"core is not compiled with IPU"
)
class
TestBase
(
IPUOpTest
):
def
setUp
(
self
):
self
.
set_atol
()
self
.
set_training
()
self
.
set_feed
()
self
.
set_feed_attr
()
self
.
set_attrs
()
def
set_feed
(
self
):
self
.
feed
=
{
"x"
:
np
.
ones
([
1
,
10
]).
astype
(
'float32'
),
"y"
:
np
.
zeros
([
1
,
10
]).
astype
(
'float32'
),
}
def
set_feed_attr
(
self
):
self
.
feed_shape
=
[
x
.
shape
for
x
in
self
.
feed
.
values
()]
self
.
feed_list
=
list
(
self
.
feed
.
keys
())
self
.
feed_dtype
=
[
np_dtype_to_fluid_str
(
x
.
dtype
)
for
x
in
self
.
feed
.
values
()
]
def
set_attrs
(
self
):
self
.
attrs
=
{}
def
_test_base
(
self
,
run_ipu
=
True
):
scope
=
fluid
.
core
.
Scope
()
main_prog
=
paddle
.
static
.
Program
()
startup_prog
=
paddle
.
static
.
Program
()
SEED
=
self
.
SEED
main_prog
.
random_seed
=
SEED
startup_prog
.
random_seed
=
SEED
with
fluid
.
scope_guard
(
scope
):
with
paddle
.
static
.
program_guard
(
main_prog
,
startup_prog
):
# XX
x
=
paddle
.
static
.
data
(
name
=
self
.
feed_list
[
0
],
shape
=
self
.
feed_shape
[
0
],
dtype
=
self
.
feed_dtype
[
0
])
y
=
paddle
.
static
.
data
(
name
=
self
.
feed_list
[
1
],
shape
=
self
.
feed_shape
[
1
],
dtype
=
self
.
feed_dtype
[
1
])
out
=
paddle
.
fluid
.
layers
.
equal
(
x
,
y
,
**
self
.
attrs
)
fetch_list
=
[
out
.
name
]
if
run_ipu
:
place
=
paddle
.
IPUPlace
()
else
:
place
=
paddle
.
CPUPlace
()
exe
=
paddle
.
static
.
Executor
(
place
)
exe
.
run
(
startup_prog
)
if
run_ipu
:
feed_list
=
self
.
feed_list
ipu_strategy
=
compiler
.
get_ipu_strategy
()
ipu_strategy
.
is_training
=
self
.
is_training
program
=
compiler
.
IPUCompiledProgram
(
main_prog
,
ipu_strategy
=
ipu_strategy
).
compile
(
feed_list
,
fetch_list
)
else
:
program
=
main_prog
result
=
exe
.
run
(
program
,
feed
=
self
.
feed
,
fetch_list
=
fetch_list
)
return
result
[
0
]
def
test_base
(
self
):
res0
=
self
.
_test_base
(
True
)
res1
=
self
.
_test_base
(
False
)
self
.
assertTrue
(
np
.
allclose
(
res0
.
flatten
(),
res1
.
flatten
(),
atol
=
self
.
atol
))
self
.
assertTrue
(
res0
.
shape
==
res1
.
shape
)
class
TestCase1
(
TestBase
):
def
set_feed
(
self
):
self
.
feed
=
{
"x"
:
np
.
ones
([
1
,
10
]).
astype
(
'float32'
),
"y"
:
np
.
ones
([
1
,
10
]).
astype
(
'float32'
),
}
class
TestCase2
(
TestBase
):
def
set_feed
(
self
):
self
.
feed
=
{
"x"
:
np
.
ones
([
1
,
10
]).
astype
(
'float32'
),
"y"
:
np
.
arange
(
0
,
10
).
reshape
([
1
,
10
]).
astype
(
'float32'
),
}
if
__name__
==
"__main__"
:
unittest
.
main
()
python/paddle/fluid/tests/unittests/ipu/test_expand_op_ipu.py
0 → 100644
浏览文件 @
096a07e8
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
unittest
import
numpy
as
np
import
paddle
import
paddle.fluid
as
fluid
import
paddle.fluid.compiler
as
compiler
import
paddle.optimizer
import
paddle.static
from
paddle.fluid.tests.unittests.ipu.op_test_ipu
import
(
IPUOpTest
,
np_dtype_to_fluid_str
)
paddle
.
enable_static
()
@
unittest
.
skipIf
(
not
paddle
.
is_compiled_with_ipu
(),
"core is not compiled with IPU"
)
class
TestBase
(
IPUOpTest
):
def
setUp
(
self
):
self
.
set_atol
()
self
.
set_training
()
self
.
set_feed
()
self
.
set_feed_attr
()
self
.
set_attrs
()
def
set_feed
(
self
):
self
.
feed
=
{
"x"
:
np
.
random
.
uniform
(
size
=
[
2
,
3
,
1
]).
astype
(
'float32'
)}
def
set_feed_attr
(
self
):
self
.
feed_shape
=
[
x
.
shape
for
x
in
self
.
feed
.
values
()]
self
.
feed_list
=
list
(
self
.
feed
.
keys
())
self
.
feed_dtype
=
[
np_dtype_to_fluid_str
(
x
.
dtype
)
for
x
in
self
.
feed
.
values
()
]
def
set_attrs
(
self
):
self
.
attrs
=
{
"expand_times"
:
[
1
,
2
,
2
]}
def
_test_base
(
self
,
run_ipu
=
True
):
scope
=
fluid
.
core
.
Scope
()
main_prog
=
paddle
.
static
.
Program
()
startup_prog
=
paddle
.
static
.
Program
()
SEED
=
self
.
SEED
main_prog
.
random_seed
=
SEED
startup_prog
.
random_seed
=
SEED
with
fluid
.
scope_guard
(
scope
):
with
paddle
.
static
.
program_guard
(
main_prog
,
startup_prog
):
x
=
paddle
.
static
.
data
(
name
=
self
.
feed_list
[
0
],
shape
=
self
.
feed_shape
[
0
],
dtype
=
self
.
feed_dtype
[
0
])
out
=
paddle
.
fluid
.
layers
.
expand
(
x
,
**
self
.
attrs
)
fetch_list
=
[
out
.
name
]
if
run_ipu
:
place
=
paddle
.
IPUPlace
()
else
:
place
=
paddle
.
CPUPlace
()
exe
=
paddle
.
static
.
Executor
(
place
)
exe
.
run
(
startup_prog
)
if
run_ipu
:
feed_list
=
self
.
feed_list
ipu_strategy
=
compiler
.
get_ipu_strategy
()
ipu_strategy
.
is_training
=
self
.
is_training
program
=
compiler
.
IPUCompiledProgram
(
main_prog
,
ipu_strategy
=
ipu_strategy
).
compile
(
feed_list
,
fetch_list
)
else
:
program
=
main_prog
result
=
exe
.
run
(
program
,
feed
=
self
.
feed
,
fetch_list
=
fetch_list
)
return
result
[
0
]
def
test_base
(
self
):
res0
=
self
.
_test_base
(
False
)
res1
=
self
.
_test_base
(
True
)
self
.
assertTrue
(
np
.
allclose
(
res0
.
flatten
(),
res1
.
flatten
(),
atol
=
self
.
atol
))
self
.
assertTrue
(
res0
.
shape
==
res1
.
shape
)
class
TestCase1
(
TestBase
):
def
set_feed
(
self
):
self
.
feed
=
{
"x"
:
np
.
random
.
uniform
(
size
=
[
2
,
2
]).
astype
(
'float32'
)}
def
set_feed_attr
(
self
):
self
.
feed_shape
=
[
x
.
shape
for
x
in
self
.
feed
.
values
()]
self
.
feed_list
=
list
(
self
.
feed
.
keys
())
self
.
feed_dtype
=
[
np_dtype_to_fluid_str
(
x
.
dtype
)
for
x
in
self
.
feed
.
values
()
]
def
set_attrs
(
self
):
self
.
attrs
=
{}
def
_test_base
(
self
,
run_ipu
=
True
):
scope
=
fluid
.
core
.
Scope
()
main_prog
=
paddle
.
static
.
Program
()
startup_prog
=
paddle
.
static
.
Program
()
SEED
=
self
.
SEED
main_prog
.
random_seed
=
SEED
startup_prog
.
random_seed
=
SEED
with
fluid
.
scope_guard
(
scope
):
with
paddle
.
static
.
program_guard
(
main_prog
,
startup_prog
):
x
=
paddle
.
static
.
data
(
name
=
self
.
feed_list
[
0
],
shape
=
self
.
feed_shape
[
0
],
dtype
=
self
.
feed_dtype
[
0
])
expand_times
=
fluid
.
layers
.
fill_constant
(
shape
=
[
len
(
self
.
feed_shape
[
0
])],
dtype
=
"int32"
,
value
=
2
)
out
=
paddle
.
fluid
.
layers
.
expand
(
x
,
expand_times
=
expand_times
,
**
self
.
attrs
)
fetch_list
=
[
out
.
name
]
if
run_ipu
:
place
=
paddle
.
IPUPlace
()
else
:
place
=
paddle
.
CPUPlace
()
exe
=
paddle
.
static
.
Executor
(
place
)
exe
.
run
(
startup_prog
)
if
run_ipu
:
feed_list
=
self
.
feed_list
ipu_strategy
=
compiler
.
get_ipu_strategy
()
ipu_strategy
.
is_training
=
self
.
is_training
program
=
compiler
.
IPUCompiledProgram
(
main_prog
,
ipu_strategy
=
ipu_strategy
).
compile
(
feed_list
,
fetch_list
)
else
:
program
=
main_prog
result
=
exe
.
run
(
program
,
feed
=
self
.
feed
,
fetch_list
=
fetch_list
)
return
result
[
0
]
if
__name__
==
"__main__"
:
unittest
.
main
()
python/paddle/fluid/tests/unittests/ipu/test_fill_constant_op_ipu.py
0 → 100644
浏览文件 @
096a07e8
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
unittest
import
numpy
as
np
import
paddle
import
paddle.fluid
as
fluid
import
paddle.fluid.compiler
as
compiler
import
paddle.optimizer
import
paddle.static
from
paddle.fluid.tests.unittests.ipu.op_test_ipu
import
(
IPUOpTest
,
np_dtype_to_fluid_str
)
paddle
.
enable_static
()
@
unittest
.
skipIf
(
not
paddle
.
is_compiled_with_ipu
(),
"core is not compiled with IPU"
)
class
TestBase
(
IPUOpTest
):
def
setUp
(
self
):
self
.
set_atol
()
self
.
set_training
()
self
.
set_feed
()
self
.
set_feed_attr
()
self
.
set_attrs
()
def
set_feed
(
self
):
self
.
feed
=
{}
def
set_feed_attr
(
self
):
self
.
feed_shape
=
[
x
.
shape
for
x
in
self
.
feed
.
values
()]
self
.
feed_list
=
list
(
self
.
feed
.
keys
())
self
.
feed_dtype
=
[
np_dtype_to_fluid_str
(
x
.
dtype
)
for
x
in
self
.
feed
.
values
()
]
def
set_attrs
(
self
):
self
.
attrs
=
{
'name'
:
'x'
,
'shape'
:
[
1
,
3
,
3
,
3
],
'dtype'
:
'float32'
,
'value'
:
0.3
,
}
def
_test_base
(
self
,
run_ipu
=
True
):
scope
=
fluid
.
core
.
Scope
()
main_prog
=
paddle
.
static
.
Program
()
startup_prog
=
paddle
.
static
.
Program
()
SEED
=
self
.
SEED
main_prog
.
random_seed
=
SEED
startup_prog
.
random_seed
=
SEED
with
fluid
.
scope_guard
(
scope
):
with
paddle
.
static
.
program_guard
(
main_prog
,
startup_prog
):
x
=
paddle
.
fluid
.
layers
.
fill_constant
(
**
self
.
attrs
)
out
=
paddle
.
fluid
.
layers
.
elementwise_add
(
x
,
x
)
fetch_list
=
[
out
.
name
]
if
run_ipu
:
place
=
paddle
.
IPUPlace
()
else
:
place
=
paddle
.
CPUPlace
()
exe
=
paddle
.
static
.
Executor
(
place
)
exe
.
run
(
startup_prog
)
if
run_ipu
:
feed_list
=
self
.
feed_list
ipu_strategy
=
compiler
.
get_ipu_strategy
()
ipu_strategy
.
is_training
=
self
.
is_training
program
=
compiler
.
IPUCompiledProgram
(
main_prog
,
ipu_strategy
=
ipu_strategy
).
compile
(
feed_list
,
fetch_list
)
else
:
program
=
main_prog
result
=
exe
.
run
(
program
,
feed
=
self
.
feed
,
fetch_list
=
fetch_list
)
return
result
[
0
]
def
test_base
(
self
):
res0
=
self
.
_test_base
(
False
)
res1
=
self
.
_test_base
(
True
)
self
.
assertTrue
(
np
.
allclose
(
res0
.
flatten
(),
res1
.
flatten
(),
atol
=
self
.
atol
))
self
.
assertTrue
(
res0
.
shape
==
res1
.
shape
)
class
TestCase1
(
TestBase
):
def
set_attrs
(
self
):
self
.
attrs
=
{
'name'
:
'x'
,
'shape'
:
[
1
,
3
,
3
,
3
],
'dtype'
:
'int32'
,
'value'
:
3.0
,
}
if
__name__
==
"__main__"
:
unittest
.
main
()
python/paddle/fluid/tests/unittests/ipu/test_gather_op_ipu.py
0 → 100644
浏览文件 @
096a07e8
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
unittest
import
numpy
as
np
import
paddle
import
paddle.fluid
as
fluid
import
paddle.fluid.compiler
as
compiler
import
paddle.optimizer
import
paddle.static
from
paddle.fluid.tests.unittests.ipu.op_test_ipu
import
(
IPUOpTest
,
np_dtype_to_fluid_str
)
paddle
.
enable_static
()
@
unittest
.
skipIf
(
not
paddle
.
is_compiled_with_ipu
(),
"core is not compiled with IPU"
)
class
TestBase
(
IPUOpTest
):
def
setUp
(
self
):
self
.
set_atol
()
self
.
set_training
()
self
.
set_feed
()
self
.
set_feed_attr
()
self
.
set_attrs
()
def
set_feed
(
self
):
self
.
feed
=
{
"x"
:
np
.
random
.
uniform
(
size
=
[
10
,
20
]).
astype
(
'float32'
),
"y"
:
np
.
array
([
1
,
3
,
5
]).
astype
(
'int32'
),
}
def
set_feed_attr
(
self
):
self
.
feed_shape
=
[
x
.
shape
for
x
in
self
.
feed
.
values
()]
self
.
feed_list
=
list
(
self
.
feed
.
keys
())
self
.
feed_dtype
=
[
np_dtype_to_fluid_str
(
x
.
dtype
)
for
x
in
self
.
feed
.
values
()
]
def
set_attrs
(
self
):
self
.
attrs
=
{}
def
_test_base
(
self
,
run_ipu
=
True
):
scope
=
fluid
.
core
.
Scope
()
main_prog
=
paddle
.
static
.
Program
()
startup_prog
=
paddle
.
static
.
Program
()
SEED
=
self
.
SEED
main_prog
.
random_seed
=
SEED
startup_prog
.
random_seed
=
SEED
with
fluid
.
scope_guard
(
scope
):
with
paddle
.
static
.
program_guard
(
main_prog
,
startup_prog
):
x
=
paddle
.
static
.
data
(
name
=
self
.
feed_list
[
0
],
shape
=
self
.
feed_shape
[
0
],
dtype
=
self
.
feed_dtype
[
0
])
y
=
paddle
.
static
.
data
(
name
=
self
.
feed_list
[
1
],
shape
=
self
.
feed_shape
[
1
],
dtype
=
self
.
feed_dtype
[
1
])
out
=
paddle
.
fluid
.
layers
.
gather
(
x
,
index
=
y
,
**
self
.
attrs
)
fetch_list
=
[
out
.
name
]
if
run_ipu
:
place
=
paddle
.
IPUPlace
()
else
:
place
=
paddle
.
CPUPlace
()
exe
=
paddle
.
static
.
Executor
(
place
)
exe
.
run
(
startup_prog
)
if
run_ipu
:
feed_list
=
self
.
feed_list
ipu_strategy
=
compiler
.
get_ipu_strategy
()
ipu_strategy
.
is_training
=
self
.
is_training
program
=
compiler
.
IPUCompiledProgram
(
main_prog
,
ipu_strategy
=
ipu_strategy
).
compile
(
feed_list
,
fetch_list
)
else
:
program
=
main_prog
result
=
exe
.
run
(
program
,
feed
=
self
.
feed
,
fetch_list
=
fetch_list
)
return
result
[
0
]
def
test_base
(
self
):
res0
=
self
.
_test_base
(
False
)
res1
=
self
.
_test_base
(
True
)
self
.
assertTrue
(
np
.
allclose
(
res0
.
flatten
(),
res1
.
flatten
(),
atol
=
self
.
atol
))
self
.
assertTrue
(
res0
.
shape
==
res1
.
shape
)
class
TestCase1
(
TestBase
):
def
set_feed
(
self
):
self
.
feed
=
{
"x"
:
np
.
random
.
uniform
(
size
=
[
100
]).
astype
(
'float32'
),
"y"
:
np
.
array
([
1
,
3
,
5
]).
astype
(
'int32'
),
}
if
__name__
==
"__main__"
:
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录