Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
495368c2
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
495368c2
编写于
6月 10, 2018
作者:
C
chengduoZH
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
ADD CPU_NUM
上级
d09fd1f6
变更
10
显示空白变更内容
内联
并排
Showing
10 changed file
with
28 addition
and
7 deletion
+28
-7
paddle/fluid/framework/details/all_reduce_op_handle.cc
paddle/fluid/framework/details/all_reduce_op_handle.cc
+1
-1
python/paddle/dataset/flowers.py
python/paddle/dataset/flowers.py
+2
-1
python/paddle/fluid/data_feeder.py
python/paddle/fluid/data_feeder.py
+4
-1
python/paddle/fluid/parallel_executor.py
python/paddle/fluid/parallel_executor.py
+7
-3
python/paddle/fluid/tests/unittests/test_parallel_executor_fetch_feed.py
...luid/tests/unittests/test_parallel_executor_fetch_feed.py
+3
-0
python/paddle/fluid/tests/unittests/test_parallel_executor_mnist.py
...dle/fluid/tests/unittests/test_parallel_executor_mnist.py
+2
-0
python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext.py
...fluid/tests/unittests/test_parallel_executor_seresnext.py
+3
-0
python/paddle/fluid/tests/unittests/test_parallel_executor_test_while_train.py
...ests/unittests/test_parallel_executor_test_while_train.py
+2
-0
python/paddle/fluid/tests/unittests/test_parallel_executor_transformer.py
...uid/tests/unittests/test_parallel_executor_transformer.py
+2
-0
python/paddle/v2/dataset/flowers.py
python/paddle/v2/dataset/flowers.py
+2
-1
未找到文件。
paddle/fluid/framework/details/all_reduce_op_handle.cc
浏览文件 @
495368c2
...
...
@@ -131,7 +131,7 @@ void AllReduceOpHandle::RunImpl() {
}
}
std
::
string
AllReduceOpHandle
::
Name
()
const
{
return
"
nccl_
all_reduce"
;
}
std
::
string
AllReduceOpHandle
::
Name
()
const
{
return
"all_reduce"
;
}
}
// namespace details
}
// namespace framework
}
// namespace paddle
python/paddle/dataset/flowers.py
浏览文件 @
495368c2
...
...
@@ -119,7 +119,8 @@ def reader_creator(data_file,
yield
sample
,
int
(
label
)
-
1
if
use_xmap
:
return
xmap_readers
(
mapper
,
reader
,
min
(
4
,
cpu_count
()),
buffered_size
)
cpu_num
=
int
(
os
.
environ
.
get
(
'CPU_NUM'
,
cpu_count
()))
return
xmap_readers
(
mapper
,
reader
,
cpu_num
,
buffered_size
)
else
:
return
map_readers
(
mapper
,
reader
)
...
...
python/paddle/fluid/data_feeder.py
浏览文件 @
495368c2
...
...
@@ -15,6 +15,7 @@
from
__future__
import
print_function
import
core
import
numpy
import
os
import
six.moves
as
six
import
multiprocessing
...
...
@@ -150,7 +151,9 @@ class DataFeeder(object):
elif
isinstance
(
self
.
place
,
core
.
CUDAPlace
):
return
core
.
get_cuda_device_count
()
else
:
return
min
(
4
,
multiprocessing
.
cpu_count
())
cpu_num
=
int
(
os
.
environ
.
get
(
'CPU_NUM'
,
multiprocessing
.
cpu_count
()))
return
cpu_num
def
decorate_reader
(
self
,
reader
,
...
...
python/paddle/fluid/parallel_executor.py
浏览文件 @
495368c2
...
...
@@ -18,6 +18,7 @@ import framework
import
executor
import
warnings
import
sys
import
os
__all__
=
[
'ParallelExecutor'
,
'ExecutionStrategy'
,
'BuildStrategy'
]
...
...
@@ -101,7 +102,9 @@ class ParallelExecutor(object):
p
.
set_place
(
self
.
_act_places
[
-
1
])
self
.
_places
.
append
(
p
)
else
:
for
i
in
xrange
(
min
(
4
,
multiprocessing
.
cpu_count
())):
cpu_num
=
int
(
os
.
environ
.
get
(
'CPU_NUM'
,
multiprocessing
.
cpu_count
()))
for
i
in
xrange
(
cpu_num
):
p
=
core
.
Place
()
self
.
_act_places
.
append
(
core
.
CPUPlace
())
p
.
set_place
(
self
.
_act_places
[
-
1
])
...
...
@@ -118,8 +121,9 @@ class ParallelExecutor(object):
# performance. Worth tunning for other models in the future.
exec_strategy
.
num_threads
=
len
(
self
.
_places
)
*
2
else
:
exec_strategy
.
num_threads
=
min
(
len
(
self
.
_places
)
*
2
,
multiprocessing
.
cpu_count
())
cpu_num
=
int
(
os
.
environ
.
get
(
'CPU_NUM'
,
multiprocessing
.
cpu_count
()))
exec_strategy
.
num_threads
=
min
(
len
(
self
.
_places
)
*
2
,
cpu_num
)
if
build_strategy
is
None
:
build_strategy
=
BuildStrategy
()
...
...
python/paddle/fluid/tests/unittests/test_parallel_executor_fetch_feed.py
浏览文件 @
495368c2
...
...
@@ -18,6 +18,7 @@ import paddle.fluid as fluid
import
unittest
import
numpy
as
np
import
paddle
import
os
def
Lenet
(
data
,
class_dim
):
...
...
@@ -89,6 +90,7 @@ class TestFetchOp(unittest.TestCase):
train_inputs
.
append
(
tst_reader_iter
.
next
())
self
.
parallel_exe
(
train_inputs
,
seed
=
1
,
use_cuda
=
True
)
os
.
environ
[
'CPU_NUM'
]
=
str
(
4
)
self
.
parallel_exe
(
train_inputs
,
seed
=
1
,
use_cuda
=
False
)
...
...
@@ -133,6 +135,7 @@ class TestFeedParallel(unittest.TestCase):
def
test_feed_op
(
self
):
self
.
parallel_exe
(
use_cuda
=
True
,
seed
=
1
)
os
.
environ
[
'CPU_NUM'
]
=
str
(
4
)
self
.
parallel_exe
(
use_cuda
=
False
,
seed
=
1
)
...
...
python/paddle/fluid/tests/unittests/test_parallel_executor_mnist.py
浏览文件 @
495368c2
...
...
@@ -18,6 +18,7 @@ import numpy as np
import
paddle
import
paddle.dataset.mnist
as
mnist
import
unittest
import
os
MNIST_RECORDIO_FILE
=
"./mnist_test_pe.recordio"
...
...
@@ -85,6 +86,7 @@ def fc_with_batchnorm(use_feed):
class
TestMNIST
(
TestParallelExecutorBase
):
@
classmethod
def
setUpClass
(
cls
):
os
.
environ
[
'CPU_NUM'
]
=
str
(
4
)
# Convert mnist to recordio file
with
fluid
.
program_guard
(
fluid
.
Program
(),
fluid
.
Program
()):
reader
=
paddle
.
batch
(
mnist
.
train
(),
batch_size
=
4
)
...
...
python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext.py
浏览文件 @
495368c2
...
...
@@ -15,6 +15,7 @@
import
paddle.fluid
as
fluid
from
parallel_executor_test_base
import
TestParallelExecutorBase
import
unittest
import
os
def
squeeze_excitation
(
input
,
num_channels
,
reduction_ratio
):
...
...
@@ -145,10 +146,12 @@ class TestResnet(TestParallelExecutorBase):
)
def
test_resnet
(
self
):
# os.environ['CPU_NUM'] = str(4)
self
.
check_resnet_convergence
(
False
,
use_cuda
=
True
)
# self.check_resnet_convergence(False,use_cuda=False)
def
test_resnet_with_new_strategy
(
self
):
os
.
environ
[
'CPU_NUM'
]
=
str
(
4
)
self
.
check_resnet_convergence
(
True
,
use_cuda
=
True
)
self
.
check_resnet_convergence
(
True
,
use_cuda
=
False
)
...
...
python/paddle/fluid/tests/unittests/test_parallel_executor_test_while_train.py
浏览文件 @
495368c2
...
...
@@ -15,6 +15,7 @@
import
paddle.fluid
as
fluid
import
numpy
as
np
import
unittest
import
os
def
simple_fc_net
():
...
...
@@ -36,6 +37,7 @@ def simple_fc_net():
class
ParallelExecutorTestingDuringTraining
(
unittest
.
TestCase
):
def
check_network_convergence
(
self
,
use_cuda
,
build_strategy
=
None
):
os
.
environ
[
'CPU_NUM'
]
=
str
(
4
)
main
=
fluid
.
Program
()
startup
=
fluid
.
Program
()
with
fluid
.
program_guard
(
main
,
startup
):
...
...
python/paddle/fluid/tests/unittests/test_parallel_executor_transformer.py
浏览文件 @
495368c2
...
...
@@ -19,6 +19,7 @@ from parallel_executor_test_base import TestParallelExecutorBase
import
unittest
import
paddle
import
paddle.dataset.wmt16
as
wmt16
import
os
WMT16_RECORDIO_FILE
=
"./wmt16_test_pe.recordio"
...
...
@@ -149,6 +150,7 @@ def transformer(use_feed):
class
TestTransformer
(
TestParallelExecutorBase
):
@
classmethod
def
setUpClass
(
cls
):
os
.
environ
[
'CPU_NUM'
]
=
str
(
4
)
reader
=
paddle
.
batch
(
wmt16
.
train
(
ModelHyperParams
.
src_vocab_size
,
ModelHyperParams
.
trg_vocab_size
),
...
...
python/paddle/v2/dataset/flowers.py
浏览文件 @
495368c2
...
...
@@ -119,7 +119,8 @@ def reader_creator(data_file,
yield
sample
,
int
(
label
)
-
1
if
use_xmap
:
return
xmap_readers
(
mapper
,
reader
,
min
(
4
,
cpu_count
()),
buffered_size
)
cpu_num
=
int
(
os
.
environ
.
get
(
'CPU_NUM'
,
cpu_count
()))
return
xmap_readers
(
mapper
,
reader
,
cpu_num
,
buffered_size
)
else
:
return
map_readers
(
mapper
,
reader
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录