Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
a9dbdab5
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
a9dbdab5
编写于
1月 11, 2018
作者:
Y
Yu Yang
提交者:
GitHub
1月 11, 2018
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #7396 from reyoung/feature/parallel_for_unittest
Feature/parallel for unittest
上级
95c0c126
83c72536
变更
2
显示空白变更内容
内联
并排
Showing
2 changed file
with
155 addition
and
41 deletion
+155
-41
paddle/framework/init.cc
paddle/framework/init.cc
+4
-1
python/paddle/v2/fluid/tests/test_parallel_op.py
python/paddle/v2/fluid/tests/test_parallel_op.py
+151
-40
未找到文件。
paddle/framework/init.cc
浏览文件 @
a9dbdab5
...
@@ -11,6 +11,7 @@ distributed under the License is distributed on an "AS IS" BASIS,
...
@@ -11,6 +11,7 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
See the License for the specific language governing permissions and
limitations under the License. */
limitations under the License. */
#include <string.h> // for strdup
#include <algorithm>
#include <algorithm>
#include <string>
#include <string>
...
@@ -60,7 +61,9 @@ void InitDevices() {
...
@@ -60,7 +61,9 @@ void InitDevices() {
}
}
void
InitGLOG
(
const
std
::
string
&
prog_name
)
{
void
InitGLOG
(
const
std
::
string
&
prog_name
)
{
google
::
InitGoogleLogging
(
prog_name
.
c_str
());
// glog will not hold the ARGV[0] inside.
// Use strdup to alloc a new string.
google
::
InitGoogleLogging
(
strdup
(
prog_name
.
c_str
()));
google
::
InstallFailureSignalHandler
();
google
::
InstallFailureSignalHandler
();
}
}
...
...
python/paddle/v2/fluid/tests/test_parallel_op.py
浏览文件 @
a9dbdab5
import
unittest
import
unittest
import
paddle.v2.fluid.layers
as
layers
import
paddle.v2.fluid
as
fluid
import
paddle.v2.fluid
as
fluid
from
paddle.v2.fluid.framework
import
Program
import
numpy
from
paddle.v2.fluid.executor
import
Executor
from
paddle.v2.fluid.backward
import
append_backward
import
numpy
as
np
class
BaseParallelForTest
(
unittest
.
TestCase
):
import
paddle.v2.fluid.core
as
core
def
run_test
(
self
,
callback
,
feed
,
fetch
):
"""
Run the unittest for parallel.for
Args:
callback(callable): A callable function returns a generator. There
are two yields in the generator function. The first yield
returns the data layers, and the second yield returns the loss.
The modified data variables will be sent back during the first
yield.
feed(dict): The executor feeding dictionary.
fetch(list|basestr): The fetch name lists.
Returns:
None
Raises:
AssertionError when the computation of cpu, parallel.for in cpu,
gpu, parallel.for in gpu are different.
"""
cpu
=
fluid
.
CPUPlace
()
result_cpu
=
self
.
_run_test_impl_
(
callback
=
callback
,
feed
=
feed
,
fetch
=
fetch
,
place
=
cpu
,
use_parallel
=
False
)
result_cpu_parallel
=
self
.
_run_test_impl_
(
callback
=
callback
,
feed
=
feed
,
fetch
=
fetch
,
place
=
cpu
,
use_parallel
=
True
)
if
fluid
.
core
.
is_compile_gpu
():
gpu
=
fluid
.
CUDAPlace
(
0
)
result_gpu
=
self
.
_run_test_impl_
(
callback
=
callback
,
feed
=
feed
,
fetch
=
fetch
,
place
=
gpu
,
use_parallel
=
False
)
result_gpu_parallel
=
self
.
_run_test_impl_
(
callback
=
callback
,
feed
=
feed
,
fetch
=
fetch
,
place
=
gpu
,
use_parallel
=
True
)
self
.
_assert_same_
(
fetch
,
result_cpu
,
result_cpu_parallel
,
result_gpu
,
result_gpu_parallel
)
else
:
self
.
_assert_same_
(
fetch
,
result_cpu
,
result_cpu_parallel
)
class
ParallelOpTest
(
unittest
.
TestCase
):
def
_run_test_impl_
(
self
,
callback
,
feed
,
fetch
,
place
,
use_parallel
=
False
):
def
setUp
(
self
):
"""
x
=
layers
.
data
(
Run a single test, returns the fetch values
shape
=
[
-
1
,
30
,
40
],
Args:
dtype
=
'float32'
,
place(Place): the computation place.
name
=
'x'
,
use_parallel(bool): Whether use parallel.for or not.
append_batch_size
=
False
,
stop_gradient
=
False
)
places
=
layers
.
get_places
(
device_count
=
4
)
Returns:
pd
=
layers
.
ParallelDo
(
places
=
places
)
Fetched numpy arrays.
"""
if
isinstance
(
fetch
,
basestring
):
fetch
=
[
fetch
]
main
=
fluid
.
Program
()
startup
=
fluid
.
Program
()
# Fix seed
main
.
random_seed
=
10
startup
.
random_seed
=
10
with
fluid
.
program_guard
(
main
,
startup
):
generator
=
callback
()
# Automatically insert parallel do if use_parallel = True
if
use_parallel
:
places
=
fluid
.
layers
.
get_places
()
pd
=
fluid
.
layers
.
ParallelDo
(
places
)
data
=
next
(
generator
)
if
isinstance
(
data
,
fluid
.
Variable
):
data
=
[
data
]
with
pd
.
do
():
with
pd
.
do
():
data
=
pd
.
read_input
(
x
)
ins
=
map
(
pd
.
read_input
,
data
)
hidden
=
layers
.
fc
(
input
=
data
,
size
=
7
)
if
len
(
ins
)
==
1
:
pd
.
write_output
(
hidden
)
ins
=
ins
[
0
]
data
=
pd
()
loss
=
generator
.
send
(
ins
)
# patch input
loss
=
layers
.
mean
(
x
=
data
)
pd
.
write_output
(
loss
)
sgd_optimizer
=
fluid
.
optimizer
.
SGD
(
learning_rate
=
0.001
)
sgd_optimizer
.
minimize
(
loss
)
loss
=
pd
()
else
:
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
())
data
=
next
(
generator
)
exe
.
run
(
fluid
.
default_startup_program
())
loss
=
generator
.
send
(
data
)
exe
.
run
(
fluid
.
default_main_program
(),
self
.
assertIsNotNone
(
loss
)
feed
=
{
avg_loss
=
fluid
.
layers
.
mean
(
x
=
loss
)
x
.
name
:
np
.
random
.
uniform
(
0.1
,
0.6
,
fluid
.
backward
.
append_backward
(
loss
=
avg_loss
)
(
20
,
30
,
40
)).
astype
(
"float32"
)
})
def
test_forward
(
self
):
exe
=
fluid
.
Executor
(
place
)
pass
exe
.
run
(
startup
)
return
exe
.
run
(
main
,
feed
=
feed
,
fetch_list
=
fetch
)
def
_assert_same_
(
self
,
fetch
,
*
args
):
"""
Assert the return values of `run_test` are same.
Args:
fetch: Fetch list. Used for print error message
*args: The fetch result lists of each situations.
Returns:
None
Raises:
AssertionError
"""
def
_impl_
(
a
,
b
,
fetch_id
,
item_id
):
item_str
=
[
'CPU'
,
'ParallelCPU'
,
'GPU'
,
'ParallelGPU'
]
flag
=
numpy
.
allclose
(
a
,
b
,
rtol
=
0.1
)
self
.
assertTrue
(
flag
,
"The {0} are different in {1}"
.
format
(
fetch
[
fetch_id
],
item_str
[
item_id
]))
for
i
,
items
in
enumerate
(
zip
(
*
args
)):
self
.
assertGreater
(
len
(
items
),
0
)
for
j
in
range
(
1
,
len
(
items
)):
_impl_
(
items
[
0
],
items
[
j
],
fetch_id
=
i
,
item_id
=
j
)
class
ParallelOpTest
(
BaseParallelForTest
):
def
test_simple_fc
(
self
):
def
__network__
():
x
=
fluid
.
layers
.
data
(
shape
=
[
784
],
dtype
=
'float32'
,
name
=
'img'
)
# FIXME: This is a bug of parallel.do
x
.
stop_gradient
=
False
x
=
yield
x
hidden
=
fluid
.
layers
.
fc
(
input
=
x
,
size
=
200
,
param_attr
=
'fc1.w'
)
loss
=
fluid
.
layers
.
mean
(
x
=
hidden
)
yield
loss
self
.
run_test
(
callback
=
__network__
,
feed
=
{
'img'
:
numpy
.
random
.
random
(
size
=
(
128
*
3
,
784
)).
astype
(
'float32'
)
},
fetch
=
'fc1.w@GRAD'
)
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录