From 79ba19ce7a48c443bb0f34077772536d6f47787c Mon Sep 17 00:00:00 2001 From: Dong Daxiang <35550832+guru4elephant@users.noreply.github.com> Date: Tue, 30 Jul 2019 08:15:45 +0800 Subject: [PATCH] Update fleet_api_howto_cn.rst test=document_preview --- .../howto/training/fleet_api_howto_cn.rst | 24 ------------------- 1 file changed, 24 deletions(-) diff --git a/doc/fluid/user_guides/howto/training/fleet_api_howto_cn.rst b/doc/fluid/user_guides/howto/training/fleet_api_howto_cn.rst index a92d9f1ab..5ceef7b11 100644 --- a/doc/fluid/user_guides/howto/training/fleet_api_howto_cn.rst +++ b/doc/fluid/user_guides/howto/training/fleet_api_howto_cn.rst @@ -144,27 +144,10 @@ training通常在GPU多机多卡训练中使用,一般在复杂模型的训练 print("worker_index: %d, step%d cost = %f" % (fleet.worker_index(), i, cost_val[0])) -更多使用示例 ------------- - -`点击率预估 <>`__ - -`语义匹配 <>`__ - -`向量学习 <>`__ - -`基于Resnet50的图像分类 <>`__ - -`基于Transformer的机器翻译 <>`__ - -`基于Bert的语义表示学习 <>`__ Fleet API相关的接口说明 ----------------------- -Fleet API接口 -~~~~~~~~~~~~~ - - init(role\_maker=None) - fleet初始化,需要在使用fleet其他接口前先调用,用于定义多机的环境配置 - is\_worker() @@ -186,8 +169,6 @@ Fleet API接口 - distributed\_optimizer(optimizer, strategy=None) - 分布式优化算法装饰器,用户可带入单机optimizer,并配置分布式训练策略,返回一个分布式的optimizer -RoleMaker -~~~~~~~~~ - MPISymetricRoleMaker @@ -264,17 +245,12 @@ RoleMaker server_endpoints=pserver_endpoints) fleet.init(role) -Strategy -~~~~~~~~ - - Parameter Server Training - Sync\_mode - Collective Training - LocalSGD - ReduceGrad -Fleet Mode -~~~~~~~~~~ - Parameter Server Training -- GitLab