Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
wmsofts
Paddle
提交
efd8346f
P
Paddle
项目概览
wmsofts
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
未验证
提交
efd8346f
编写于
3月 06, 2023
作者:
K
kangguangli
提交者:
GitHub
3月 06, 2023
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
remove parameter_server_graph_optimizer (#51030)
上级
c9a39758
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
0 addition
and
91 deletion
+0
-91
python/paddle/distributed/fleet/fleet.py
python/paddle/distributed/fleet/fleet.py
+0
-12
python/paddle/distributed/fleet/meta_optimizers/__init__.py
python/paddle/distributed/fleet/meta_optimizers/__init__.py
+0
-1
python/paddle/distributed/fleet/meta_optimizers/parameter_server_graph_optimizer.py
...fleet/meta_optimizers/parameter_server_graph_optimizer.py
+0
-78
未找到文件。
python/paddle/distributed/fleet/fleet.py
浏览文件 @
efd8346f
...
...
@@ -1367,18 +1367,6 @@ class Fleet:
copy_user_defined_strategy
,
)
can_not_apply_optimizer_list
.
append
(
meta_optimizer
)
from
.meta_optimizers
import
ParameterServerGraphOptimizer
graph_optimizer
=
ParameterServerGraphOptimizer
(
self
.
user_defined_optimizer
)
graph_optimizer
.
_set_basic_info
(
loss
,
self
.
_role_maker
,
self
.
user_defined_optimizer
,
copy_user_defined_strategy
,
)
can_not_apply_optimizer_list
.
append
(
graph_optimizer
)
else
:
# compile time
distributed_optimizer_list
=
(
...
...
python/paddle/distributed/fleet/meta_optimizers/__init__.py
浏览文件 @
efd8346f
...
...
@@ -22,7 +22,6 @@ from .pipeline_optimizer import PipelineOptimizer
from
.localsgd_optimizer
import
LocalSGDOptimizer
from
.localsgd_optimizer
import
AdaptiveLocalSGDOptimizer
from
.lars_optimizer
import
LarsOptimizer
from
.parameter_server_graph_optimizer
import
ParameterServerGraphOptimizer
from
.dgc_optimizer
import
DGCOptimizer
from
.dgc_optimizer
import
DGCMomentumOptimizer
from
.lamb_optimizer
import
LambOptimizer
...
...
python/paddle/distributed/fleet/meta_optimizers/parameter_server_graph_optimizer.py
已删除
100644 → 0
浏览文件 @
c9a39758
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
import
paddle
from
.parameter_server_optimizer
import
ParameterServerOptimizer
__all__
=
[]
class
ParameterServerGraphOptimizer
(
ParameterServerOptimizer
):
def
__init__
(
self
,
optimizer
):
super
().
__init__
(
optimizer
)
self
.
inner_opt
=
optimizer
# we do not allow meta optimizer to be inner optimizer currently
self
.
meta_optimizers_white_list
=
[]
def
_can_apply
(
self
):
if
self
.
role_maker
.
_is_collective
:
return
False
k_steps
=
self
.
user_defined_strategy
.
a_sync_configs
[
"k_steps"
]
if
k_steps
<
0
:
return
False
if
self
.
role_maker
.
_is_server
():
return
False
if
self
.
role_maker
.
_is_heter_parameter_server_mode
:
return
False
return
True
def
_disable_strategy
(
self
,
dist_strategy
):
return
def
_enable_strategy
(
self
,
dist_strategy
,
context
):
# only open up the async mode for auto-parallel
return
def
_is_graph_out
(
self
):
return
True
def
_try_to_compile
(
self
,
main_program
,
loss
):
dist_strategy
=
self
.
_get_distributed_strategy
()
build_strategy
=
dist_strategy
.
get_build_strategy
()
exec_strategy
=
dist_strategy
.
get_execute_strategy
()
self
.
_compiled_program
=
paddle
.
static
.
CompiledProgram
(
main_program
)
self
.
_compiled_program
.
with_data_parallel
(
loss_name
=
loss
.
name
,
build_strategy
=
build_strategy
,
exec_strategy
=
exec_strategy
,
share_vars_from
=
None
,
)
return
self
.
_compiled_program
def
minimize
(
self
,
loss
,
startup_program
=
None
,
parameter_list
=
None
,
no_grad_set
=
None
):
program
=
loss
.
block
.
program
compiled_program
=
self
.
_try_to_compile
(
program
,
loss
)
program
.
_graph
=
compiled_program
# just return self.optimizer_ops and self.param_grads
return
None
,
None
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录