Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
ade93374
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
ade93374
编写于
3月 29, 2019
作者:
D
dongdaxiang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix API.spec
test=develop
上级
720647e1
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
8 addition
and
20 deletion
+8
-20
p2p_role_maker.py
p2p_role_maker.py
+0
-17
paddle/fluid/API.spec
paddle/fluid/API.spec
+1
-1
paddle/fluid/framework/executor.h
paddle/fluid/framework/executor.h
+0
-1
python/paddle/fluid/async_executor.py
python/paddle/fluid/async_executor.py
+1
-1
python/paddle/fluid/executor.py
python/paddle/fluid/executor.py
+6
-0
未找到文件。
p2p_role_maker.py
已删除
100644 → 0
浏览文件 @
720647e1
class
P2PRoleMakers
(
object
):
def
__init__
(
self
):
from
mpi4py
import
MPI
self
.
comm
=
MPI
.
COMM_WORLD
self
.
MPI
=
MPI
def
get_endpoints
(
self
,
port_start
):
rank
=
self
.
comm
.
Get_rank
()
size
=
self
.
comm
.
Get_size
()
import
socket
local_ip
=
socket
.
gethostbyname
(
socket
.
gethostname
())
hostname
=
socket
.
gethostname
()
all_ips
=
self
.
comm
.
allgather
(
local_ip
)
all_ports
=
[
str
(
port_start
+
rank
)
for
]
return
all_ports
paddle/fluid/API.spec
浏览文件 @
ade93374
...
...
@@ -497,7 +497,7 @@ paddle.fluid.LoDTensor.__init__ 1. __init__(self: paddle.fluid.core.LoDTensor, a
paddle.fluid.LoDTensor.has_valid_recursive_sequence_lengths has_valid_recursive_sequence_lengths(self: paddle.fluid.core.LoDTensor) -> bool
paddle.fluid.LoDTensor.lod lod(self: paddle.fluid.core.LoDTensor) -> List[List[int]]
paddle.fluid.LoDTensor.recursive_sequence_lengths recursive_sequence_lengths(self: paddle.fluid.core.LoDTensor) -> List[List[int]]
paddle.fluid.LoDTensor.set 1. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[float32], arg1: paddle::platform::CPUPlace) -> None 2. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[int32], arg1: paddle::platform::CPUPlace) -> None 3. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[float64], arg1: paddle::platform::CPUPlace) -> None 4. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[int64], arg1: paddle::platform::CPUPlace) -> None 5. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[bool], arg1: paddle::platform::CPUPlace) -> None 6. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[uint16], arg1: paddle::platform::CPUPlace) -> None 7. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[uint8], arg1: paddle::platform::CPUPlace) -> None 8. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[int8], arg1: paddle::platform::CPUPlace) -> None
paddle.fluid.LoDTensor.set 1. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[float32], arg1: paddle::platform::CPUPlace) -> None 2. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[int32], arg1: paddle::platform::CPUPlace) -> None 3. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[float64], arg1: paddle::platform::CPUPlace) -> None 4. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[int64], arg1: paddle::platform::CPUPlace) -> None 5. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[bool], arg1: paddle::platform::CPUPlace) -> None 6. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[uint16], arg1: paddle::platform::CPUPlace) -> None 7. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[uint8], arg1: paddle::platform::CPUPlace) -> None 8. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[int8], arg1: paddle::platform::CPUPlace) -> None
9. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[float32], arg1: paddle::platform::CUDAPlace) -> None 10. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[int32], arg1: paddle::platform::CUDAPlace) -> None 11. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[float64], arg1: paddle::platform::CUDAPlace) -> None 12. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[int64], arg1: paddle::platform::CUDAPlace) -> None 13. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[bool], arg1: paddle::platform::CUDAPlace) -> None 14. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[uint16], arg1: paddle::platform::CUDAPlace) -> None 15. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[uint8], arg1: paddle::platform::CUDAPlace) -> None 16. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[int8], arg1: paddle::platform::CUDAPlace) -> None 17. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[float32], arg1: paddle::platform::CUDAPinnedPlace) -> None 18. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[int32], arg1: paddle::platform::CUDAPinnedPlace) -> None 19. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[float64], arg1: paddle::platform::CUDAPinnedPlace) -> None 20. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[int64], arg1: paddle::platform::CUDAPinnedPlace) -> None 21. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[bool], arg1: paddle::platform::CUDAPinnedPlace) -> None 22. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[uint16], arg1: paddle::platform::CUDAPinnedPlace) -> None 23. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[uint8], arg1: paddle::platform::CUDAPinnedPlace) -> None 24. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[int8], arg1: paddle::platform::CUDAPinnedPlace) -> None
paddle.fluid.LoDTensor.set_lod set_lod(self: paddle.fluid.core.LoDTensor, lod: List[List[int]]) -> None
paddle.fluid.LoDTensor.set_recursive_sequence_lengths set_recursive_sequence_lengths(self: paddle.fluid.core.LoDTensor, recursive_sequence_lengths: List[List[int]]) -> None
paddle.fluid.LoDTensor.shape shape(self: paddle.fluid.core.Tensor) -> List[int]
...
...
paddle/fluid/framework/executor.h
浏览文件 @
ade93374
...
...
@@ -56,7 +56,6 @@ class Executor {
explicit
Executor
(
const
platform
::
Place
&
place
);
explicit
Executor
(
Scope
*
scope
,
const
platform
::
Place
&
place
);
/*
* Close this Executor.
* Calling this method will send complete messages to all pserver instances.
...
...
python/paddle/fluid/async_executor.py
浏览文件 @
ade93374
...
...
@@ -86,7 +86,7 @@ class AsyncExecutor(object):
>>> async_executor = fluid.AsyncExecutor(place)
Args:
place(Place): CPUPlace o
r GPUPlace.
place(Place): CPUPlace o
nly
run_mode(str): default is empty string.
"""
if
place
is
None
:
...
...
python/paddle/fluid/executor.py
浏览文件 @
ade93374
...
...
@@ -703,6 +703,9 @@ class Executor(object):
exe.infer_from_dataset(program=fluid.default_main_program(),
dataset=dataset)
"""
if
self
.
place
==
paddle
.
fluid
.
CUDAPlace
():
raise
RuntimeError
(
"infer_from_dataset is verified on CPUPlace"
"We will open CUDAPlace in the future"
)
scope
,
trainer
=
self
.
_prepare_trainer
(
program
=
program
,
...
...
@@ -776,6 +779,9 @@ class Executor(object):
dataset=dataset)
"""
if
self
.
place
==
paddle
.
fluid
.
CUDAPlace
():
raise
RuntimeError
(
"train_from_dataset is verified on CPUPlace"
"We will open CUDAPlace in the future"
)
scope
,
trainer
=
self
.
_prepare_trainer
(
program
=
program
,
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录