Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
94acf7c8
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
94acf7c8
编写于
5月 06, 2022
作者:
A
Allen Guo
提交者:
GitHub
5月 06, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
update UTs 3 (#42519)
上级
832e58d6
变更
10
隐藏空白更改
内联
并排
Showing
10 changed file
with
315 addition
and
603 deletion
+315
-603
python/paddle/fluid/tests/unittests/ipu/test_softmax_op_ipu.py
...n/paddle/fluid/tests/unittests/ipu/test_softmax_op_ipu.py
+17
-58
python/paddle/fluid/tests/unittests/ipu/test_softmax_with_cross_entropy_op_ipu.py
...s/unittests/ipu/test_softmax_with_cross_entropy_op_ipu.py
+102
-0
python/paddle/fluid/tests/unittests/ipu/test_split_op_ipu.py
python/paddle/fluid/tests/unittests/ipu/test_split_op_ipu.py
+19
-61
python/paddle/fluid/tests/unittests/ipu/test_squeeze_op_ipu.py
...n/paddle/fluid/tests/unittests/ipu/test_squeeze_op_ipu.py
+17
-58
python/paddle/fluid/tests/unittests/ipu/test_stack_op_ipu.py
python/paddle/fluid/tests/unittests/ipu/test_stack_op_ipu.py
+21
-66
python/paddle/fluid/tests/unittests/ipu/test_sum_op_ipu.py
python/paddle/fluid/tests/unittests/ipu/test_sum_op_ipu.py
+30
-116
python/paddle/fluid/tests/unittests/ipu/test_topk_op_ipu.py
python/paddle/fluid/tests/unittests/ipu/test_topk_op_ipu.py
+28
-66
python/paddle/fluid/tests/unittests/ipu/test_transpose_op_ipu.py
...paddle/fluid/tests/unittests/ipu/test_transpose_op_ipu.py
+15
-56
python/paddle/fluid/tests/unittests/ipu/test_unsqueeze_op_ipu.py
...paddle/fluid/tests/unittests/ipu/test_unsqueeze_op_ipu.py
+17
-58
python/paddle/fluid/tests/unittests/ipu/test_weight_sharing_ipu.py
...ddle/fluid/tests/unittests/ipu/test_weight_sharing_ipu.py
+49
-64
未找到文件。
python/paddle/fluid/tests/unittests/ipu/test_softmax_op_ipu.py
浏览文件 @
94acf7c8
...
...
@@ -17,7 +17,7 @@ import unittest
import
numpy
as
np
import
paddle
import
paddle.static
from
paddle.fluid.tests.unittests.ipu.op_test_ipu
import
IPUOpTest
,
ExecutionMode
from
paddle.fluid.tests.unittests.ipu.op_test_ipu
import
IPUOpTest
@
unittest
.
skipIf
(
not
paddle
.
is_compiled_with_ipu
(),
...
...
@@ -30,10 +30,6 @@ class TestBase(IPUOpTest):
self
.
set_feed_attr
()
self
.
set_op_attrs
()
@
property
def
fp16_enabled
(
self
):
return
True
def
set_data_feed
(
self
):
data
=
np
.
random
.
uniform
(
size
=
[
1
,
3
,
2
,
20
])
self
.
feed_fp32
=
{
"in_0"
:
data
.
astype
(
np
.
float32
)}
...
...
@@ -47,59 +43,22 @@ class TestBase(IPUOpTest):
def
set_op_attrs
(
self
):
self
.
attrs
=
{
"axis"
:
-
1
}
def
_test_base
(
self
,
exec_mode
):
scope
=
paddle
.
static
.
Scope
()
main_prog
=
paddle
.
static
.
Program
()
startup_prog
=
paddle
.
static
.
Program
()
main_prog
.
random_seed
=
self
.
SEED
startup_prog
.
random_seed
=
self
.
SEED
with
paddle
.
static
.
scope_guard
(
scope
):
with
paddle
.
static
.
program_guard
(
main_prog
,
startup_prog
):
x
=
paddle
.
static
.
data
(
name
=
self
.
feed_list
[
0
],
shape
=
self
.
feed_shape
[
0
],
dtype
=
'float32'
)
out
=
paddle
.
fluid
.
layers
.
softmax
(
x
,
**
self
.
attrs
)
fetch_list
=
[
out
.
name
]
if
exec_mode
==
ExecutionMode
.
CPU_FP32
:
place
=
paddle
.
CPUPlace
()
else
:
place
=
paddle
.
IPUPlace
()
exe
=
paddle
.
static
.
Executor
(
place
)
exe
.
run
(
startup_prog
)
if
exec_mode
!=
ExecutionMode
.
CPU_FP32
:
feed_list
=
self
.
feed_list
ipu_strategy
=
paddle
.
static
.
IpuStrategy
()
ipu_strategy
.
set_graph_config
(
is_training
=
self
.
is_training
)
if
exec_mode
==
ExecutionMode
.
IPU_POPART_FP16
:
ipu_strategy
.
set_precision_config
(
enable_fp16
=
True
)
program
=
paddle
.
static
.
IpuCompiledProgram
(
main_prog
,
ipu_strategy
=
ipu_strategy
).
compile
(
feed_list
,
fetch_list
)
else
:
program
=
main_prog
feed
=
self
.
feed_fp32
if
exec_mode
>
ExecutionMode
.
IPU_FP32
:
feed
=
self
.
feed_fp16
result
=
exe
.
run
(
program
,
feed
=
feed
,
fetch_list
=
fetch_list
)
return
result
[
0
]
def
test_base
(
self
):
output_dict
=
{}
for
mode
in
ExecutionMode
:
if
mode
>
ExecutionMode
.
IPU_FP32
and
not
self
.
fp16_enabled
:
break
output_dict
[
mode
]
=
self
.
_test_base
(
mode
).
flatten
()
self
.
check
(
output_dict
)
@
IPUOpTest
.
static_graph
def
build_model
(
self
):
x
=
paddle
.
static
.
data
(
name
=
self
.
feed_list
[
0
],
shape
=
self
.
feed_shape
[
0
],
dtype
=
'float32'
)
out
=
paddle
.
fluid
.
layers
.
softmax
(
x
,
**
self
.
attrs
)
self
.
fetch_list
=
[
out
.
name
]
def
run_model
(
self
,
exec_mode
):
self
.
run_op_test
(
exec_mode
)
def
test
(
self
):
for
m
in
IPUOpTest
.
ExecutionMode
:
if
not
self
.
skip_mode
(
m
):
self
.
build_model
()
self
.
run_model
(
m
)
self
.
check
()
class
TestCase1
(
TestBase
):
...
...
python/paddle/fluid/tests/unittests/ipu/test_softmax_with_cross_entropy_op_ipu.py
0 → 100644
浏览文件 @
94acf7c8
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
unittest
import
numpy
as
np
import
paddle
import
paddle.static
from
paddle.fluid.tests.unittests.ipu.op_test_ipu
import
IPUOpTest
import
paddle.nn.functional
as
F
@
unittest
.
skipIf
(
not
paddle
.
is_compiled_with_ipu
(),
"core is not compiled with IPU"
)
class
TestBase
(
IPUOpTest
):
def
setUp
(
self
):
self
.
set_atol
()
self
.
set_training
()
self
.
set_data_feed
()
self
.
set_feed_attr
()
self
.
set_op_attrs
()
def
set_data_feed
(
self
):
x
=
np
.
random
.
uniform
(
size
=
[
3
,
7
])
label
=
np
.
arange
(
3
).
reshape
([
3
,
1
])
self
.
feed_fp32
=
{
"x"
:
x
.
astype
(
np
.
float32
),
"label"
:
label
.
astype
(
np
.
int64
)
}
self
.
feed_fp16
=
{
"x"
:
x
.
astype
(
np
.
float16
),
"label"
:
label
.
astype
(
np
.
int32
)
}
def
set_feed_attr
(
self
):
self
.
feed_shape
=
[
x
.
shape
for
x
in
self
.
feed_fp32
.
values
()]
self
.
feed_list
=
list
(
self
.
feed_fp32
.
keys
())
def
set_op_attrs
(
self
):
self
.
attrs
=
{
'soft_label'
:
False
,
}
@
IPUOpTest
.
static_graph
def
build_model
(
self
,
on_ipu
):
x
=
paddle
.
static
.
data
(
name
=
self
.
feed_list
[
0
],
shape
=
self
.
feed_shape
[
0
],
dtype
=
"float32"
)
if
on_ipu
:
label
=
paddle
.
static
.
data
(
name
=
self
.
feed_list
[
1
],
shape
=
self
.
feed_shape
[
1
],
dtype
=
'int32'
)
else
:
label
=
paddle
.
static
.
data
(
name
=
self
.
feed_list
[
1
],
shape
=
self
.
feed_shape
[
1
],
dtype
=
'int64'
)
out
=
F
.
softmax_with_cross_entropy
(
x
,
label
,
**
self
.
attrs
)
self
.
fetch_list
=
[
out
.
name
]
def
run_model
(
self
,
exec_mode
):
if
self
.
is_ipu_mode
(
exec_mode
):
self
.
feed_fp32
[
'label'
]
=
self
.
feed_fp32
[
'label'
].
astype
(
np
.
int32
)
self
.
run_op_test
(
exec_mode
)
def
test
(
self
):
for
m
in
IPUOpTest
.
ExecutionMode
:
if
not
self
.
skip_mode
(
m
):
self
.
build_model
(
self
.
is_ipu_mode
(
m
))
self
.
run_model
(
m
)
self
.
check
()
class
TestCase1
(
TestBase
):
def
set_op_attrs
(
self
):
self
.
attrs
=
{
'soft_label'
:
False
,
'ignore_index'
:
1
,
}
class
TestCase2
(
TestBase
):
def
set_data_feed
(
self
):
x
=
np
.
random
.
uniform
(
size
=
[
30
,
70
])
label
=
np
.
arange
(
30
).
reshape
([
30
,
1
])
self
.
feed_fp32
=
{
"x"
:
x
.
astype
(
np
.
float32
),
"label"
:
label
.
astype
(
np
.
int64
)
}
self
.
feed_fp16
=
{
"x"
:
x
.
astype
(
np
.
float16
),
"label"
:
label
.
astype
(
np
.
int32
)
}
if
__name__
==
"__main__"
:
unittest
.
main
()
python/paddle/fluid/tests/unittests/ipu/test_split_op_ipu.py
浏览文件 @
94acf7c8
...
...
@@ -17,7 +17,7 @@ import unittest
import
numpy
as
np
import
paddle
import
paddle.static
from
paddle.fluid.tests.unittests.ipu.op_test_ipu
import
IPUOpTest
,
ExecutionMode
from
paddle.fluid.tests.unittests.ipu.op_test_ipu
import
IPUOpTest
@
unittest
.
skipIf
(
not
paddle
.
is_compiled_with_ipu
(),
...
...
@@ -30,13 +30,8 @@ class TestBase(IPUOpTest):
self
.
set_feed_attr
()
self
.
set_op_attrs
()
@
property
def
fp16_enabled
(
self
):
return
True
def
set_data_feed
(
self
):
data1
=
np
.
random
.
uniform
(
size
=
[
1
,
3
,
10
,
10
])
self
.
feed_fp32
=
{
'x'
:
data1
.
astype
(
np
.
float32
)}
self
.
feed_fp16
=
{
'x'
:
data1
.
astype
(
np
.
float16
)}
...
...
@@ -47,61 +42,24 @@ class TestBase(IPUOpTest):
def
set_op_attrs
(
self
):
self
.
attrs
=
{
"num_or_sections"
:
[
1
,
1
,
1
],
"axis"
:
1
}
def
_test_base
(
self
,
exec_mode
):
scope
=
paddle
.
static
.
Scope
()
main_prog
=
paddle
.
static
.
Program
()
startup_prog
=
paddle
.
static
.
Program
()
main_prog
.
random_seed
=
self
.
SEED
startup_prog
.
random_seed
=
self
.
SEED
with
paddle
.
static
.
scope_guard
(
scope
):
with
paddle
.
static
.
program_guard
(
main_prog
,
startup_prog
):
x
=
paddle
.
static
.
data
(
name
=
self
.
feed_list
[
0
],
shape
=
self
.
feed_shape
[
0
],
dtype
=
'float32'
)
out
=
paddle
.
split
(
x
,
**
self
.
attrs
)
fetch_list
=
[
fetch
.
name
for
fetch
in
out
]
if
exec_mode
==
ExecutionMode
.
CPU_FP32
:
place
=
paddle
.
CPUPlace
()
else
:
place
=
paddle
.
IPUPlace
()
exe
=
paddle
.
static
.
Executor
(
place
)
exe
.
run
(
startup_prog
)
if
exec_mode
!=
ExecutionMode
.
CPU_FP32
:
feed_list
=
self
.
feed_list
ipu_strategy
=
paddle
.
static
.
IpuStrategy
()
ipu_strategy
.
set_graph_config
(
is_training
=
self
.
is_training
)
if
exec_mode
==
ExecutionMode
.
IPU_POPART_FP16
:
ipu_strategy
.
set_precision_config
(
enable_fp16
=
True
)
program
=
paddle
.
static
.
IpuCompiledProgram
(
main_prog
,
ipu_strategy
=
ipu_strategy
).
compile
(
feed_list
,
fetch_list
)
else
:
program
=
main_prog
feed
=
self
.
feed_fp32
if
exec_mode
>
ExecutionMode
.
IPU_FP32
:
feed
=
self
.
feed_fp16
result
=
exe
.
run
(
program
,
feed
=
feed
,
fetch_list
=
fetch_list
)
return
result
[
0
]
def
test_base
(
self
):
output_dict
=
{}
for
mode
in
ExecutionMode
:
if
(
mode
>
ExecutionMode
.
IPU_FP32
and
not
self
.
fp16_enabled
)
or
mode
==
ExecutionMode
.
IPU_POPART_FP16
:
break
output_dict
[
mode
]
=
self
.
_test_base
(
mode
).
flatten
()
self
.
check
(
output_dict
)
@
IPUOpTest
.
static_graph
def
build_model
(
self
):
x
=
paddle
.
static
.
data
(
name
=
self
.
feed_list
[
0
],
shape
=
self
.
feed_shape
[
0
],
dtype
=
'float32'
)
out
=
paddle
.
split
(
x
,
**
self
.
attrs
)
self
.
fetch_list
=
[
fetch
.
name
for
fetch
in
out
]
def
run_model
(
self
,
exec_mode
):
self
.
run_op_test
(
exec_mode
)
def
test
(
self
):
for
m
in
IPUOpTest
.
ExecutionMode
:
if
not
self
.
skip_mode
(
m
):
self
.
build_model
()
self
.
run_model
(
m
)
for
k
,
v
in
self
.
output_dict
.
items
():
self
.
output_dict
[
k
]
=
np
.
concatenate
([
vv
.
flatten
()
for
vv
in
v
])
self
.
check
()
class
TestCase1
(
TestBase
):
...
...
python/paddle/fluid/tests/unittests/ipu/test_squeeze_op_ipu.py
浏览文件 @
94acf7c8
...
...
@@ -17,7 +17,7 @@ import unittest
import
numpy
as
np
import
paddle
import
paddle.static
from
paddle.fluid.tests.unittests.ipu.op_test_ipu
import
IPUOpTest
,
ExecutionMode
from
paddle.fluid.tests.unittests.ipu.op_test_ipu
import
IPUOpTest
@
unittest
.
skipIf
(
not
paddle
.
is_compiled_with_ipu
(),
...
...
@@ -30,10 +30,6 @@ class TestBase(IPUOpTest):
self
.
set_feed_attr
()
self
.
set_op_attrs
()
@
property
def
fp16_enabled
(
self
):
return
True
def
set_data_feed
(
self
):
data
=
np
.
random
.
uniform
(
size
=
[
1
,
3
,
1
,
5
])
self
.
feed_fp32
=
{
"in_0"
:
data
.
astype
(
np
.
float32
)}
...
...
@@ -47,59 +43,22 @@ class TestBase(IPUOpTest):
def
set_op_attrs
(
self
):
self
.
attrs
=
{
"axes"
:
[
0
]}
def
_test_base
(
self
,
exec_mode
):
scope
=
paddle
.
static
.
Scope
()
main_prog
=
paddle
.
static
.
Program
()
startup_prog
=
paddle
.
static
.
Program
()
main_prog
.
random_seed
=
self
.
SEED
startup_prog
.
random_seed
=
self
.
SEED
with
paddle
.
static
.
scope_guard
(
scope
):
with
paddle
.
static
.
program_guard
(
main_prog
,
startup_prog
):
x
=
paddle
.
static
.
data
(
name
=
self
.
feed_list
[
0
],
shape
=
self
.
feed_shape
[
0
],
dtype
=
'float32'
)
out
=
paddle
.
fluid
.
layers
.
squeeze
(
x
,
**
self
.
attrs
)
fetch_list
=
[
out
.
name
]
if
exec_mode
==
ExecutionMode
.
CPU_FP32
:
place
=
paddle
.
CPUPlace
()
else
:
place
=
paddle
.
IPUPlace
()
exe
=
paddle
.
static
.
Executor
(
place
)
exe
.
run
(
startup_prog
)
if
exec_mode
!=
ExecutionMode
.
CPU_FP32
:
feed_list
=
self
.
feed_list
ipu_strategy
=
paddle
.
static
.
IpuStrategy
()
ipu_strategy
.
set_graph_config
(
is_training
=
self
.
is_training
)
if
exec_mode
==
ExecutionMode
.
IPU_POPART_FP16
:
ipu_strategy
.
set_precision_config
(
enable_fp16
=
True
)
program
=
paddle
.
static
.
IpuCompiledProgram
(
main_prog
,
ipu_strategy
=
ipu_strategy
).
compile
(
feed_list
,
fetch_list
)
else
:
program
=
main_prog
feed
=
self
.
feed_fp32
if
exec_mode
>
ExecutionMode
.
IPU_FP32
:
feed
=
self
.
feed_fp16
result
=
exe
.
run
(
program
,
feed
=
feed
,
fetch_list
=
fetch_list
)
return
result
[
0
]
def
test_base
(
self
):
output_dict
=
{}
for
mode
in
ExecutionMode
:
if
mode
>
ExecutionMode
.
IPU_FP32
and
not
self
.
fp16_enabled
:
break
output_dict
[
mode
]
=
self
.
_test_base
(
mode
)
self
.
check
(
output_dict
,
check_shape
=
True
)
@
IPUOpTest
.
static_graph
def
build_model
(
self
):
x
=
paddle
.
static
.
data
(
name
=
self
.
feed_list
[
0
],
shape
=
self
.
feed_shape
[
0
],
dtype
=
'float32'
)
out
=
paddle
.
fluid
.
layers
.
squeeze
(
x
,
**
self
.
attrs
)
self
.
fetch_list
=
[
out
.
name
]
def
run_model
(
self
,
exec_mode
):
self
.
run_op_test
(
exec_mode
)
def
test
(
self
):
for
m
in
IPUOpTest
.
ExecutionMode
:
if
not
self
.
skip_mode
(
m
):
self
.
build_model
()
self
.
run_model
(
m
)
self
.
check
()
class
TestCase1
(
TestBase
):
...
...
python/paddle/fluid/tests/unittests/ipu/test_stack_op_ipu.py
浏览文件 @
94acf7c8
...
...
@@ -17,7 +17,7 @@ import unittest
import
numpy
as
np
import
paddle
import
paddle.static
from
paddle.fluid.tests.unittests.ipu.op_test_ipu
import
IPUOpTest
,
ExecutionMode
from
paddle.fluid.tests.unittests.ipu.op_test_ipu
import
IPUOpTest
@
unittest
.
skipIf
(
not
paddle
.
is_compiled_with_ipu
(),
...
...
@@ -30,10 +30,6 @@ class TestBase(IPUOpTest):
self
.
set_feed_attr
()
self
.
set_op_attrs
()
@
property
def
fp16_enabled
(
self
):
return
True
def
set_data_feed
(
self
):
x
=
np
.
random
.
uniform
(
size
=
[
1
,
2
])
y
=
np
.
random
.
uniform
(
size
=
[
1
,
2
])
...
...
@@ -57,67 +53,26 @@ class TestBase(IPUOpTest):
def
set_op_attrs
(
self
):
self
.
attrs
=
{
"axis"
:
0
}
def
_test_base
(
self
,
exec_mode
):
scope
=
paddle
.
static
.
Scope
()
main_prog
=
paddle
.
static
.
Program
()
startup_prog
=
paddle
.
static
.
Program
()
main_prog
.
random_seed
=
self
.
SEED
startup_prog
.
random_seed
=
self
.
SEED
with
paddle
.
static
.
scope_guard
(
scope
):
with
paddle
.
static
.
program_guard
(
main_prog
,
startup_prog
):
x
=
paddle
.
static
.
data
(
name
=
self
.
feed_list
[
0
],
shape
=
self
.
feed_shape
[
0
],
dtype
=
'float32'
)
y
=
paddle
.
static
.
data
(
name
=
self
.
feed_list
[
1
],
shape
=
self
.
feed_shape
[
1
],
dtype
=
'float32'
)
z
=
paddle
.
static
.
data
(
name
=
self
.
feed_list
[
2
],
shape
=
self
.
feed_shape
[
2
],
dtype
=
'float32'
)
out
=
paddle
.
fluid
.
layers
.
stack
([
x
,
y
,
z
],
**
self
.
attrs
)
fetch_list
=
[
out
.
name
]
if
exec_mode
==
ExecutionMode
.
CPU_FP32
:
place
=
paddle
.
CPUPlace
()
else
:
place
=
paddle
.
IPUPlace
()
exe
=
paddle
.
static
.
Executor
(
place
)
exe
.
run
(
startup_prog
)
if
exec_mode
!=
ExecutionMode
.
CPU_FP32
:
feed_list
=
self
.
feed_list
ipu_strategy
=
paddle
.
static
.
IpuStrategy
()
ipu_strategy
.
set_graph_config
(
is_training
=
self
.
is_training
)
if
exec_mode
==
ExecutionMode
.
IPU_POPART_FP16
:
ipu_strategy
.
set_precision_config
(
enable_fp16
=
True
)
program
=
paddle
.
static
.
IpuCompiledProgram
(
main_prog
,
ipu_strategy
=
ipu_strategy
).
compile
(
feed_list
,
fetch_list
)
else
:
program
=
main_prog
feed
=
self
.
feed_fp32
if
exec_mode
>
ExecutionMode
.
IPU_FP32
:
feed
=
self
.
feed_fp16
result
=
exe
.
run
(
program
,
feed
=
feed
,
fetch_list
=
fetch_list
)
return
result
[
0
]
def
test_base
(
self
):
output_dict
=
{}
for
mode
in
ExecutionMode
:
if
mode
>
ExecutionMode
.
IPU_FP32
and
not
self
.
fp16_enabled
:
break
output_dict
[
mode
]
=
self
.
_test_base
(
mode
)
self
.
check
(
output_dict
,
check_shape
=
True
)
@
IPUOpTest
.
static_graph
def
build_model
(
self
):
x
=
paddle
.
static
.
data
(
name
=
self
.
feed_list
[
0
],
shape
=
self
.
feed_shape
[
0
],
dtype
=
'float32'
)
y
=
paddle
.
static
.
data
(
name
=
self
.
feed_list
[
1
],
shape
=
self
.
feed_shape
[
1
],
dtype
=
'float32'
)
z
=
paddle
.
static
.
data
(
name
=
self
.
feed_list
[
2
],
shape
=
self
.
feed_shape
[
2
],
dtype
=
'float32'
)
out
=
paddle
.
fluid
.
layers
.
stack
([
x
,
y
,
z
],
**
self
.
attrs
)
self
.
fetch_list
=
[
out
.
name
]
def
run_model
(
self
,
exec_mode
):
self
.
run_op_test
(
exec_mode
)
def
test
(
self
):
for
m
in
IPUOpTest
.
ExecutionMode
:
if
not
self
.
skip_mode
(
m
):
self
.
build_model
()
self
.
run_model
(
m
)
self
.
check
()
class
TestCase1
(
TestBase
):
...
...
python/paddle/fluid/tests/unittests/ipu/test_sum_op_ipu.py
浏览文件 @
94acf7c8
...
...
@@ -17,7 +17,7 @@ import unittest
import
numpy
as
np
import
paddle
import
paddle.static
from
paddle.fluid.tests.unittests.ipu.op_test_ipu
import
IPUOpTest
,
ExecutionMode
from
paddle.fluid.tests.unittests.ipu.op_test_ipu
import
IPUOpTest
@
unittest
.
skipIf
(
not
paddle
.
is_compiled_with_ipu
(),
...
...
@@ -30,10 +30,6 @@ class TestBase(IPUOpTest):
self
.
set_feed_attr
()
self
.
set_op_attrs
()
@
property
def
fp16_enabled
(
self
):
return
True
def
set_data_feed
(
self
):
x
=
np
.
random
.
uniform
(
size
=
[
1
,
3
,
2
,
2
])
y
=
np
.
random
.
uniform
(
size
=
[
1
,
3
,
2
,
2
])
...
...
@@ -48,134 +44,52 @@ class TestBase(IPUOpTest):
def
set_op_attrs
(
self
):
self
.
attrs
=
{}
def
_test_base
(
self
,
exec_mode
):
scope
=
paddle
.
static
.
Scope
()
main_prog
=
paddle
.
static
.
Program
()
startup_prog
=
paddle
.
static
.
Program
()
main_prog
.
random_seed
=
self
.
SEED
startup_prog
.
random_seed
=
self
.
SEED
with
paddle
.
static
.
scope_guard
(
scope
):
with
paddle
.
static
.
program_guard
(
main_prog
,
startup_prog
):
x
=
paddle
.
static
.
data
(
name
=
self
.
feed_list
[
0
],
shape
=
self
.
feed_shape
[
0
],
dtype
=
'float32'
)
y
=
paddle
.
static
.
data
(
name
=
self
.
feed_list
[
1
],
shape
=
self
.
feed_shape
[
1
],
dtype
=
'float32'
)
out
=
paddle
.
fluid
.
layers
.
sum
([
x
,
y
],
**
self
.
attrs
)
fetch_list
=
[
out
.
name
]
if
exec_mode
==
ExecutionMode
.
CPU_FP32
:
place
=
paddle
.
CPUPlace
()
else
:
place
=
paddle
.
IPUPlace
()
exe
=
paddle
.
static
.
Executor
(
place
)
exe
.
run
(
startup_prog
)
if
exec_mode
!=
ExecutionMode
.
CPU_FP32
:
feed_list
=
self
.
feed_list
ipu_strategy
=
paddle
.
static
.
IpuStrategy
()
ipu_strategy
.
set_graph_config
(
is_training
=
self
.
is_training
)
if
exec_mode
==
ExecutionMode
.
IPU_POPART_FP16
:
ipu_strategy
.
set_precision_config
(
enable_fp16
=
True
)
program
=
paddle
.
static
.
IpuCompiledProgram
(
main_prog
,
ipu_strategy
=
ipu_strategy
).
compile
(
feed_list
,
fetch_list
)
else
:
program
=
main_prog
feed
=
self
.
feed_fp32
if
exec_mode
>
ExecutionMode
.
IPU_FP32
:
feed
=
self
.
feed_fp16
result
=
exe
.
run
(
program
,
feed
=
feed
,
fetch_list
=
fetch_list
)
return
result
[
0
]
@
IPUOpTest
.
static_graph
def
build_model
(
self
):
x
=
paddle
.
static
.
data
(
name
=
self
.
feed_list
[
0
],
shape
=
self
.
feed_shape
[
0
],
dtype
=
'float32'
)
y
=
paddle
.
static
.
data
(
name
=
self
.
feed_list
[
1
],
shape
=
self
.
feed_shape
[
1
],
dtype
=
'float32'
)
out
=
paddle
.
fluid
.
layers
.
sum
([
x
,
y
],
**
self
.
attrs
)
self
.
fetch_list
=
[
out
.
name
]
def
test_base
(
self
):
output_dict
=
{}
for
mode
in
ExecutionMode
:
if
mode
>
ExecutionMode
.
IPU_FP32
and
not
self
.
fp16_enabled
:
break
output_dict
[
mode
]
=
self
.
_test_base
(
mode
)
def
run_model
(
self
,
exec_mode
):
self
.
run_op_test
(
exec_mode
)
self
.
check
(
output_dict
,
check_shape
=
True
)
def
test
(
self
):
for
m
in
IPUOpTest
.
ExecutionMode
:
if
not
self
.
skip_mode
(
m
):
self
.
build_model
()
self
.
run_model
(
m
)
self
.
check
()
@
unittest
.
skip
(
''
)
class
TestCase1
(
TestBase
):
def
set_feed
(
self
):
def
set_
data_
feed
(
self
):
x
=
np
.
random
.
uniform
(
size
=
[
1
,
3
,
2
,
2
])
y
=
np
.
random
.
uniform
(
size
=
[
1
,
3
,
2
,
2
])
z
=
np
.
random
.
uniform
(
size
=
[
1
,
3
,
2
,
2
])
self
.
feed_fp32
=
{
"x"
:
x
.
astype
(
np
.
float32
),
"y"
:
y
.
astype
(
np
.
float32
),
"z"
:
y
.
astype
(
np
.
float32
)
"z"
:
z
.
astype
(
np
.
float32
)
}
self
.
feed_fp16
=
{
"x"
:
x
.
astype
(
np
.
float16
),
"y"
:
y
.
astype
(
np
.
float16
),
"z"
:
y
.
astype
(
np
.
float16
)
"z"
:
z
.
astype
(
np
.
float16
)
}
def
_test_base
(
self
,
exec_mode
):
scope
=
paddle
.
static
.
Scope
()
main_prog
=
paddle
.
static
.
Program
()
startup_prog
=
paddle
.
static
.
Program
()
main_prog
.
random_seed
=
self
.
SEED
startup_prog
.
random_seed
=
self
.
SEED
with
paddle
.
static
.
scope_guard
(
scope
):
with
paddle
.
static
.
program_guard
(
main_prog
,
startup_prog
):
x
=
paddle
.
static
.
data
(
name
=
self
.
feed_list
[
0
],
shape
=
self
.
feed_shape
[
0
],
dtype
=
'float32'
)
y
=
paddle
.
static
.
data
(
name
=
self
.
feed_list
[
1
],
shape
=
self
.
feed_shape
[
1
],
dtype
=
'float32'
)
z
=
paddle
.
static
.
data
(
name
=
self
.
feed_list
[
2
],
shape
=
self
.
feed_shape
[
2
],
dtype
=
'float32'
)
out
=
paddle
.
fluid
.
layers
.
sum
([
x
,
y
,
z
],
**
self
.
attrs
)
fetch_list
=
[
out
.
name
]
if
exec_mode
==
ExecutionMode
.
CPU_FP32
:
place
=
paddle
.
CPUPlace
()
else
:
place
=
paddle
.
IPUPlace
()
exe
=
paddle
.
static
.
Executor
(
place
)
exe
.
run
(
startup_prog
)
if
exec_mode
!=
ExecutionMode
.
CPU_FP32
:
feed_list
=
self
.
feed_list
ipu_strategy
=
paddle
.
static
.
IpuStrategy
()
ipu_strategy
.
set_graph_config
(
is_training
=
self
.
is_training
)
if
exec_mode
==
ExecutionMode
.
IPU_POPART_FP16
:
ipu_strategy
.
set_precision_config
(
enable_fp16
=
True
)
program
=
paddle
.
static
.
IpuCompiledProgram
(
main_prog
,
iipu_strategy
=
ipu_strategy
).
compile
(
feed_list
,
fetch_list
)
else
:
program
=
main_prog
feed
=
self
.
feed_fp32
if
exec_mode
>
ExecutionMode
.
IPU_FP32
:
feed
=
self
.
feed_fp16
result
=
exe
.
run
(
program
,
feed
=
self
.
feed
,
fetch_list
=
fetch_list
)
return
result
[
0
]
@
IPUOpTest
.
static_graph
def
build_model
(
self
):
x
=
paddle
.
static
.
data
(
name
=
self
.
feed_list
[
0
],
shape
=
self
.
feed_shape
[
0
],
dtype
=
'float32'
)
y
=
paddle
.
static
.
data
(
name
=
self
.
feed_list
[
1
],
shape
=
self
.
feed_shape
[
1
],
dtype
=
'float32'
)
z
=
paddle
.
static
.
data
(
name
=
self
.
feed_list
[
2
],
shape
=
self
.
feed_shape
[
2
],
dtype
=
'float32'
)
out
=
paddle
.
fluid
.
layers
.
sum
([
x
,
y
,
z
],
**
self
.
attrs
)
self
.
fetch_list
=
[
out
.
name
]
if
__name__
==
"__main__"
:
...
...
python/paddle/fluid/tests/unittests/ipu/test_topk_op_ipu.py
浏览文件 @
94acf7c8
...
...
@@ -17,7 +17,7 @@ import unittest
import
numpy
as
np
import
paddle
import
paddle.static
from
paddle.fluid.tests.unittests.ipu.op_test_ipu
import
IPUOpTest
,
ExecutionMode
from
paddle.fluid.tests.unittests.ipu.op_test_ipu
import
IPUOpTest
@
unittest
.
skipIf
(
not
paddle
.
is_compiled_with_ipu
(),
...
...
@@ -31,10 +31,6 @@ class TestTopKOp(IPUOpTest):
self
.
set_test_op
()
self
.
set_op_attrs
()
@
property
def
fp16_enabled
(
self
):
return
True
def
set_test_op
(
self
):
self
.
op
=
paddle
.
fluid
.
layers
.
topk
...
...
@@ -53,69 +49,35 @@ class TestTopKOp(IPUOpTest):
if
not
self
.
use_k_as_const_variable
:
self
.
attrs
[
"k"
]
=
3
def
_test_base
(
self
,
exec_mode
):
scope
=
paddle
.
static
.
Scope
()
main_prog
=
paddle
.
static
.
Program
()
startup_prog
=
paddle
.
static
.
Program
()
main_prog
.
random_seed
=
self
.
SEED
startup_prog
.
random_seed
=
self
.
SEED
with
paddle
.
static
.
scope_guard
(
scope
):
with
paddle
.
static
.
program_guard
(
main_prog
,
startup_prog
):
x
=
paddle
.
static
.
data
(
name
=
self
.
feed_list
[
0
],
shape
=
self
.
feed_shape
[
0
],
dtype
=
'float32'
)
if
not
self
.
use_k_as_const_variable
:
topk_values
,
topk_indices
=
self
.
op
(
x
,
**
self
.
attrs
)
else
:
# !important, popart cannot accept non const tensor
K_t
=
paddle
.
fluid
.
layers
.
fill_constant
(
shape
=
[
1
],
dtype
=
'int32'
,
value
=
self
.
k
,
name
=
"in_2"
)
topk_values
,
topk_indices
=
self
.
op
(
x
,
K_t
,
**
self
.
attrs
)
fetch_list
=
[
topk_values
.
name
,
topk_indices
.
name
]
if
exec_mode
==
ExecutionMode
.
CPU_FP32
:
place
=
paddle
.
CPUPlace
()
else
:
place
=
paddle
.
IPUPlace
()
exe
=
paddle
.
static
.
Executor
(
place
)
exe
.
run
(
startup_prog
)
if
exec_mode
!=
ExecutionMode
.
CPU_FP32
:
feed_list
=
self
.
feed_list
ipu_strategy
=
paddle
.
static
.
IpuStrategy
()
ipu_strategy
.
set_graph_config
(
is_training
=
self
.
is_training
)
if
exec_mode
==
ExecutionMode
.
IPU_POPART_FP16
:
ipu_strategy
.
set_precision_config
(
enable_fp16
=
True
)
program
=
paddle
.
static
.
IpuCompiledProgram
(
main_prog
,
ipu_strategy
=
ipu_strategy
).
compile
(
feed_list
,
fetch_list
)
else
:
program
=
main_prog
feed
=
self
.
feed_fp32
if
exec_mode
>
ExecutionMode
.
IPU_FP32
:
feed
=
self
.
feed_fp16
result
=
exe
.
run
(
program
,
feed
=
feed
,
fetch_list
=
fetch_list
)
return
result
def
test_base
(
self
):
@
IPUOpTest
.
static_graph
def
build_model
(
self
):
x
=
paddle
.
static
.
data
(
name
=
self
.
feed_list
[
0
],
shape
=
self
.
feed_shape
[
0
],
dtype
=
'float32'
)
if
not
self
.
use_k_as_const_variable
:
topk_values
,
topk_indices
=
self
.
op
(
x
,
**
self
.
attrs
)
else
:
# !important, popart cannot accept non const tensor
K_t
=
paddle
.
fluid
.
layers
.
fill_constant
(
shape
=
[
1
],
dtype
=
'int32'
,
value
=
self
.
k
,
name
=
"in_2"
)
topk_values
,
topk_indices
=
self
.
op
(
x
,
K_t
,
**
self
.
attrs
)
self
.
fetch_list
=
[
topk_values
.
name
,
topk_indices
.
name
]
def
run_model
(
self
,
exec_mode
):
self
.
run_op_test
(
exec_mode
)
def
test
(
self
):
for
m
in
IPUOpTest
.
ExecutionMode
:
if
not
self
.
skip_mode
(
m
):
self
.
build_model
()
self
.
run_model
(
m
)
value_dict
=
{}
index_dict
=
{}
for
mode
in
ExecutionMode
:
if
mode
>
ExecutionMode
.
IPU_FP32
and
not
self
.
fp16_enabled
:
break
value
,
index
=
self
.
_test_base
(
mode
)
value_dict
[
mode
]
=
value
index_dict
[
mode
]
=
index
self
.
check
(
value_dict
)
self
.
check
(
index_dict
)
for
k
,
v
in
self
.
output_dict
.
items
():
value_dict
[
k
]
=
v
[
0
]
index_dict
[
k
]
=
v
[
1
]
self
.
check
(
output_dict
=
value_dict
)
self
.
check
(
output_dict
=
index_dict
)
class
TestCase2
(
TestTopKOp
):
...
...
python/paddle/fluid/tests/unittests/ipu/test_transpose_op_ipu.py
浏览文件 @
94acf7c8
...
...
@@ -17,7 +17,7 @@ import unittest
import
numpy
as
np
import
paddle
import
paddle.static
from
paddle.fluid.tests.unittests.ipu.op_test_ipu
import
IPUOpTest
,
ExecutionMode
from
paddle.fluid.tests.unittests.ipu.op_test_ipu
import
IPUOpTest
@
unittest
.
skipIf
(
not
paddle
.
is_compiled_with_ipu
(),
...
...
@@ -30,10 +30,6 @@ class TestBase(IPUOpTest):
self
.
set_feed_attr
()
self
.
set_op_attrs
()
@
property
def
fp16_enabled
(
self
):
return
True
def
set_data_feed
(
self
):
data
=
np
.
random
.
uniform
(
size
=
[
1
,
3
,
10
,
10
])
self
.
feed_fp32
=
{
"x"
:
data
.
astype
(
np
.
float32
)}
...
...
@@ -47,59 +43,22 @@ class TestBase(IPUOpTest):
def
set_op_attrs
(
self
):
self
.
attrs
=
{
"perm"
:
[
0
,
2
,
3
,
1
]}
def
_test_base
(
self
,
exec_mode
):
scope
=
paddle
.
static
.
Scope
()
main_prog
=
paddle
.
static
.
Program
()
startup_prog
=
paddle
.
static
.
Program
()
main_prog
.
random_seed
=
self
.
SEED
startup_prog
.
random_seed
=
self
.
SEED
with
paddle
.
static
.
scope_guard
(
scope
):
with
paddle
.
static
.
program_guard
(
main_prog
,
startup_prog
):
x
=
paddle
.
static
.
data
(
name
=
self
.
feed_list
[
0
],
shape
=
self
.
feed_shape
[
0
],
dtype
=
'float32'
)
out
=
paddle
.
fluid
.
layers
.
transpose
(
x
,
**
self
.
attrs
)
fetch_list
=
[
out
.
name
]
if
exec_mode
==
ExecutionMode
.
CPU_FP32
:
place
=
paddle
.
CPUPlace
()
else
:
place
=
paddle
.
IPUPlace
()
exe
=
paddle
.
static
.
Executor
(
place
)
exe
.
run
(
startup_prog
)
if
exec_mode
!=
ExecutionMode
.
CPU_FP32
:
feed_list
=
self
.
feed_list
ipu_strategy
=
paddle
.
static
.
IpuStrategy
()
ipu_strategy
.
set_graph_config
(
is_training
=
self
.
is_training
)
if
exec_mode
==
ExecutionMode
.
IPU_POPART_FP16
:
ipu_strategy
.
set_precision_config
(
enable_fp16
=
True
)
program
=
paddle
.
static
.
IpuCompiledProgram
(
main_prog
,
ipu_strategy
=
ipu_strategy
).
compile
(
feed_list
,
fetch_list
)
else
:
program
=
main_prog
feed
=
self
.
feed_fp32
if
exec_mode
>
ExecutionMode
.
IPU_FP32
:
feed
=
self
.
feed_fp16
result
=
exe
.
run
(
program
,
feed
=
feed
,
fetch_list
=
fetch_list
)
return
result
[
0
]
@
IPUOpTest
.
static_graph
def
build_model
(
self
):
x
=
paddle
.
static
.
data
(
name
=
self
.
feed_list
[
0
],
shape
=
self
.
feed_shape
[
0
],
dtype
=
'float32'
)
out
=
paddle
.
fluid
.
layers
.
transpose
(
x
,
**
self
.
attrs
)
self
.
fetch_list
=
[
out
.
name
]
def
test
(
self
):
output_dict
=
{}
for
mode
in
ExecutionMode
:
if
mode
>
ExecutionMode
.
IPU_FP32
and
not
self
.
fp16_enabled
:
break
output_dict
[
mode
]
=
self
.
_test_base
(
mode
).
flatten
()
def
run_model
(
self
,
exec_mode
):
self
.
run_op_test
(
exec_mode
)
self
.
check
(
output_dict
,
check_shape
=
True
)
def
test
(
self
):
for
m
in
IPUOpTest
.
ExecutionMode
:
if
not
self
.
skip_mode
(
m
):
self
.
build_model
()
self
.
run_model
(
m
)
self
.
check
(
check_shape
=
True
)
class
TestCase1
(
TestBase
):
...
...
python/paddle/fluid/tests/unittests/ipu/test_unsqueeze_op_ipu.py
浏览文件 @
94acf7c8
...
...
@@ -17,7 +17,7 @@ import unittest
import
numpy
as
np
import
paddle
import
paddle.static
from
paddle.fluid.tests.unittests.ipu.op_test_ipu
import
IPUOpTest
,
ExecutionMode
from
paddle.fluid.tests.unittests.ipu.op_test_ipu
import
IPUOpTest
@
unittest
.
skipIf
(
not
paddle
.
is_compiled_with_ipu
(),
...
...
@@ -30,10 +30,6 @@ class TestBase(IPUOpTest):
self
.
set_feed_attr
()
self
.
set_op_attrs
()
@
property
def
fp16_enabled
(
self
):
return
True
def
set_data_feed
(
self
):
data
=
np
.
random
.
uniform
(
size
=
[
1
,
2
,
3
])
self
.
feed_fp32
=
{
"x"
:
data
.
astype
(
np
.
float32
)}
...
...
@@ -47,59 +43,22 @@ class TestBase(IPUOpTest):
def
set_op_attrs
(
self
):
self
.
attrs
=
{
"axes"
:
0
}
def
_test_base
(
self
,
exec_mode
):
scope
=
paddle
.
static
.
Scope
()
main_prog
=
paddle
.
static
.
Program
()
startup_prog
=
paddle
.
static
.
Program
()
main_prog
.
random_seed
=
self
.
SEED
startup_prog
.
random_seed
=
self
.
SEED
with
paddle
.
static
.
scope_guard
(
scope
):
with
paddle
.
static
.
program_guard
(
main_prog
,
startup_prog
):
x
=
paddle
.
static
.
data
(
name
=
self
.
feed_list
[
0
],
shape
=
self
.
feed_shape
[
0
],
dtype
=
'float32'
)
out
=
paddle
.
fluid
.
layers
.
unsqueeze
(
x
,
**
self
.
attrs
)
fetch_list
=
[
out
.
name
]
if
exec_mode
==
ExecutionMode
.
CPU_FP32
:
place
=
paddle
.
CPUPlace
()
else
:
place
=
paddle
.
IPUPlace
()
exe
=
paddle
.
static
.
Executor
(
place
)
exe
.
run
(
startup_prog
)
if
exec_mode
!=
ExecutionMode
.
CPU_FP32
:
feed_list
=
self
.
feed_list
ipu_strategy
=
paddle
.
static
.
IpuStrategy
()
ipu_strategy
.
set_graph_config
(
is_training
=
self
.
is_training
)
if
exec_mode
==
ExecutionMode
.
IPU_POPART_FP16
:
ipu_strategy
.
set_precision_config
(
enable_fp16
=
True
)
program
=
paddle
.
static
.
IpuCompiledProgram
(
main_prog
,
ipu_strategy
=
ipu_strategy
).
compile
(
feed_list
,
fetch_list
)
else
:
program
=
main_prog
feed
=
self
.
feed_fp32
if
exec_mode
>
ExecutionMode
.
IPU_FP32
:
feed
=
self
.
feed_fp16
result
=
exe
.
run
(
program
,
feed
=
feed
,
fetch_list
=
fetch_list
)
return
result
[
0
]
def
test_base
(
self
):
output_dict
=
{}
for
mode
in
ExecutionMode
:
if
mode
>
ExecutionMode
.
IPU_FP32
and
not
self
.
fp16_enabled
:
break
output_dict
[
mode
]
=
self
.
_test_base
(
mode
).
flatten
()
self
.
check
(
output_dict
,
check_shape
=
True
)
@
IPUOpTest
.
static_graph
def
build_model
(
self
):
x
=
paddle
.
static
.
data
(
name
=
self
.
feed_list
[
0
],
shape
=
self
.
feed_shape
[
0
],
dtype
=
'float32'
)
out
=
paddle
.
fluid
.
layers
.
unsqueeze
(
x
,
**
self
.
attrs
)
self
.
fetch_list
=
[
out
.
name
]
def
run_model
(
self
,
exec_mode
):
self
.
run_op_test
(
exec_mode
)
def
test
(
self
):
for
m
in
IPUOpTest
.
ExecutionMode
:
if
not
self
.
skip_mode
(
m
):
self
.
build_model
()
self
.
run_model
(
m
)
self
.
check
(
check_shape
=
True
)
class
TestCase1
(
TestBase
):
...
...
python/paddle/fluid/tests/unittests/ipu/test_weight_sharing_ipu.py
浏览文件 @
94acf7c8
...
...
@@ -50,72 +50,57 @@ class TestWeightSharing(IPUOpTest):
def
set_op_attrs
(
self
):
self
.
attrs
=
{}
def
_test_base
(
self
,
run_ipu
=
True
):
scope
=
paddle
.
static
.
Scope
()
main_prog
=
paddle
.
static
.
Program
()
startup_prog
=
paddle
.
static
.
Program
()
main_prog
.
random_seed
=
self
.
SEED
startup_prog
.
random_seed
=
self
.
SEED
with
paddle
.
static
.
scope_guard
(
scope
):
with
paddle
.
static
.
program_guard
(
main_prog
,
startup_prog
):
x
=
paddle
.
static
.
data
(
name
=
self
.
feed_list
[
0
],
shape
=
self
.
feed_shape
[
0
],
dtype
=
'int64'
)
with
paddle
.
static
.
ipu_shard_guard
(
index
=
0
,
stage
=
0
):
y
=
paddle
.
fluid
.
layers
.
embedding
(
input
=
x
,
size
=
[
768
,
768
],
dtype
=
'float32'
,
param_attr
=
paddle
.
fluid
.
ParamAttr
(
name
=
'word_embedding'
),
is_sparse
=
False
)
with
paddle
.
static
.
ipu_shard_guard
(
index
=
1
,
stage
=
1
):
z
=
paddle
.
fluid
.
layers
.
fc
(
input
=
y
,
size
=
768
,
param_attr
=
paddle
.
fluid
.
ParamAttr
(
name
=
"fc"
))
with
paddle
.
static
.
ipu_shard_guard
(
index
=
0
,
stage
=
2
):
out
=
paddle
.
fluid
.
layers
.
matmul
(
x
=
z
,
y
=
main_prog
.
global_block
().
var
(
'word_embedding'
),
transpose_y
=
True
)
fetch_list
=
[
out
.
name
]
if
run_ipu
:
place
=
paddle
.
IPUPlace
()
else
:
place
=
paddle
.
CPUPlace
()
exe
=
paddle
.
static
.
Executor
(
place
)
exe
.
run
(
startup_prog
)
if
run_ipu
:
feed_list
=
self
.
feed_list
ipu_strategy
=
paddle
.
static
.
IpuStrategy
()
ipu_strategy
.
set_graph_config
(
num_ipus
=
2
,
is_training
=
self
.
is_training
,
enable_manual_shard
=
True
)
ipu_strategy
.
set_pipelining_config
(
enable_pipelining
=
True
,
batches_per_step
=
3
)
program
=
paddle
.
static
.
IpuCompiledProgram
(
main_prog
,
ipu_strategy
=
ipu_strategy
).
compile
(
feed_list
,
fetch_list
)
else
:
program
=
main_prog
feed
=
self
.
feed_ipu
if
run_ipu
else
self
.
feed_cpu
result
=
exe
.
run
(
program
,
feed
=
feed
,
fetch_list
=
fetch_list
)
return
result
[
0
]
@
IPUOpTest
.
static_graph
def
build_model
(
self
):
x
=
paddle
.
static
.
data
(
name
=
self
.
feed_list
[
0
],
shape
=
self
.
feed_shape
[
0
],
dtype
=
'int64'
)
with
paddle
.
static
.
ipu_shard_guard
(
index
=
0
,
stage
=
0
):
y
=
paddle
.
fluid
.
layers
.
embedding
(
input
=
x
,
size
=
[
768
,
768
],
dtype
=
'float32'
,
param_attr
=
paddle
.
fluid
.
ParamAttr
(
name
=
'word_embedding'
),
is_sparse
=
False
)
with
paddle
.
static
.
ipu_shard_guard
(
index
=
1
,
stage
=
1
):
z
=
paddle
.
fluid
.
layers
.
fc
(
input
=
y
,
size
=
768
,
param_attr
=
paddle
.
fluid
.
ParamAttr
(
name
=
"fc"
))
with
paddle
.
static
.
ipu_shard_guard
(
index
=
0
,
stage
=
2
):
out
=
paddle
.
fluid
.
layers
.
matmul
(
x
=
z
,
y
=
self
.
main_prog
.
global_block
().
var
(
'word_embedding'
),
transpose_y
=
True
)
self
.
feed_list
=
[
x
.
name
]
self
.
fetch_list
=
[
out
.
name
]
def
run_model
(
self
,
run_ipu
):
self
.
build_model
()
if
run_ipu
:
place
=
paddle
.
IPUPlace
()
else
:
place
=
paddle
.
CPUPlace
()
exe
=
paddle
.
static
.
Executor
(
place
)
exe
.
run
(
self
.
startup_prog
)
if
run_ipu
:
ipu_strategy
=
paddle
.
static
.
IpuStrategy
()
ipu_strategy
.
set_graph_config
(
num_ipus
=
2
,
is_training
=
self
.
is_training
,
enable_manual_shard
=
True
)
ipu_strategy
.
set_pipelining_config
(
enable_pipelining
=
True
,
batches_per_step
=
3
)
program
=
paddle
.
static
.
IpuCompiledProgram
(
self
.
main_prog
,
ipu_strategy
=
ipu_strategy
).
compile
(
self
.
feed_list
,
self
.
fetch_list
)
else
:
program
=
self
.
main_prog
feed
=
self
.
feed_ipu
if
run_ipu
else
self
.
feed_cpu
result
=
exe
.
run
(
program
,
feed
=
feed
,
fetch_list
=
self
.
fetch_list
)
return
result
[
0
]
def
test_base
(
self
):
res0
=
self
.
_test_base
(
False
)
res1
=
self
.
_test_base
(
True
)
res0
=
self
.
run_model
(
False
)
res1
=
self
.
run_model
(
True
)
self
.
assertTrue
(
np
.
allclose
(
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录