ps_optimizer.py 10.0 KB
Newer Older
Z
ziyoujiyi 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
#   Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and

from paddle import fluid
import paddle.distributed.passes
from .meta_optimizer_base import MetaOptimizerBase
from paddle.fluid import core
import subprocess
import re
import os
import platform
from paddle.distributed.ps.utils.public import *
from paddle.distributed.passes import PassContext
from paddle.distributed.ps.utils.ps_factory import PsProgramBuilderFactory


class ParameterServerOptimizer(MetaOptimizerBase):
28

Z
ziyoujiyi 已提交
29 30 31 32 33 34 35 36
    def __init__(self, optimizer):
        super(ParameterServerOptimizer, self).__init__(optimizer)
        self.inner_opt = optimizer
        # we do not allow meta optimizer to be inner optimizer currently
        self.meta_optimizers_white_list = []

    def _set_basic_info(self, loss, role_maker, user_defined_optimizer,
                        user_defined_strategy):
37 38 39
        super(ParameterServerOptimizer,
              self)._set_basic_info(loss, role_maker, user_defined_optimizer,
                                    user_defined_strategy)
Z
ziyoujiyi 已提交
40

41 42 43 44 45
    def _set_origin_programs(self, losses):
        self.origin_main_programs = []
        for loss in losses:
            self.origin_main_programs.append(loss.block.program)

Z
ziyoujiyi 已提交
46
    def _init_ps_pass_context(self, loss, startup_program):
47
        self.pass_ctx = PassContext()
Z
ziyoujiyi 已提交
48
        attrs = {}
Z
ziyoujiyi 已提交
49
        # trainer
Z
ziyoujiyi 已提交
50
        attrs["env"] = get_dist_env()
Z
ziyoujiyi 已提交
51

Z
ziyoujiyi 已提交
52 53 54 55
        attrs['loss'] = loss
        attrs['min_block_size'] = 81920
        attrs['origin_main_program'] = loss.block.program
        attrs['origin_startup_program'] = startup_program
56 57

        attrs['origin_main_programs'] = self.origin_main_programs
Z
ziyoujiyi 已提交
58

Z
ziyoujiyi 已提交
59 60
        attrs['cloned_main'] = attrs['origin_main_program'].clone()
        attrs['cloned_startup'] = attrs['origin_startup_program'].clone()
Z
ziyoujiyi 已提交
61

Z
ziyoujiyi 已提交
62
        attrs['user_defined_strategy'] = self.user_defined_strategy
Z
ziyoujiyi 已提交
63
        attrs['valid_strategy'] = self.user_defined_strategy
Z
ziyoujiyi 已提交
64 65
        attrs['trainer'] = TrainerRuntimeConfig(self.user_defined_strategy)
        attrs['ps_mode'] = attrs['trainer'].mode
66
        logger.info("ps_mode: {}".format(attrs['ps_mode']))
Z
ziyoujiyi 已提交
67 68
        attrs['role_maker'] = self.role_maker
        attrs[
Z
ziyoujiyi 已提交
69
            'is_heter_ps_mode'] = self.role_maker._is_heter_parameter_server_mode
Z
ziyoujiyi 已提交
70 71 72
        attrs['is_worker'] = self.role_maker._is_worker()
        attrs['is_server'] = self.role_maker._is_server()
        attrs['is_heter_worker'] = self.role_maker._is_heter_worker()
73 74
        logger.info("this process is heter? {}".format(
            attrs['is_heter_worker']))
Z
ziyoujiyi 已提交
75
        attrs['use_ps_gpu'] = self.user_defined_strategy.a_sync_configs[
Z
ziyoujiyi 已提交
76
            "use_ps_gpu"]
Z
ziyoujiyi 已提交
77 78
        attrs['lr_decay_steps'] = self.user_defined_strategy.a_sync_configs[
            "lr_decay_steps"]
79 80 81 82 83
        # FL
        attrs['local_sparse'] = attrs[
            "user_defined_strategy"].trainer_desc_configs["local_sparse"]
        attrs['remote_sparse'] = attrs[
            "user_defined_strategy"].trainer_desc_configs["remote_sparse"]
84
        attrs['is_fl_ps_mode'] = self.user_defined_strategy.is_fl_ps_mode
85 86
        attrs[
            'with_coordinator'] = self.user_defined_strategy.is_with_coordinator
87

Z
ziyoujiyi 已提交
88 89 90 91 92
        attrs['k_steps'] = self.user_defined_strategy.a_sync_configs["k_steps"]
        attrs['launch_barrier'] = self.user_defined_strategy.a_sync_configs[
            "launch_barrier"]

        attrs['launch_barrier_flag'] = int(
Z
ziyoujiyi 已提交
93 94
            os.getenv("FLAGS_LAUNCH_BARRIER", "1"))

Z
ziyoujiyi 已提交
95
        build_var_distributed(attrs)
Z
ziyoujiyi 已提交
96

97
        # server
Z
ziyoujiyi 已提交
98 99 100
        attrs['_main_server'] = fluid.Program()
        attrs['_startup_server'] = fluid.Program()
        attrs['tensor_table'] = {}
Z
ziyoujiyi 已提交
101

Z
ziyoujiyi 已提交
102
        self.pass_ctx._attrs = attrs
Z
ziyoujiyi 已提交
103 104 105 106 107

    def _is_graph_out(self):
        return False

    def _can_apply(self):
108
        if self.role_maker._is_collective:
Z
ziyoujiyi 已提交
109
            return False
110 111 112

        k_steps = self.user_defined_strategy.a_sync_configs["k_steps"]
        return True if k_steps >= 0 else False
Z
ziyoujiyi 已提交
113 114 115 116 117 118 119 120 121 122

    def minimize_impl(self,
                      loss,
                      startup_program=None,
                      parameter_list=None,
                      no_grad_set=None):
        self.inner_opt.minimize(loss, startup_program, parameter_list,
                                no_grad_set)
        if startup_program == None:
            startup_program = paddle.static.default_startup_program()
123

124

125 126
#        print("program after inner optimizer minimize:",
#              str(loss.block.program))
127
        self._set_origin_programs([loss])
Z
ziyoujiyi 已提交
128 129 130 131 132 133
        self._init_ps_pass_context(loss, startup_program)
        ps_builder = PsProgramBuilderFactory()._create_ps_program_builder(
            self.pass_ctx)
        ps_builder._build_programs()
        return None, None

134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155
    def minimize_losses_impl(self,
                             losses,
                             startup_program=None,
                             parameter_list=None,
                             no_grad_set=None):
        if parameter_list is None:
            parameter_list = [None] * len(losses)
        for idx, loss in enumerate(losses):
            startup_prog = startup_program[idx]
            parameters = parameter_list[idx]
            self.inner_opt.minimize(loss, startup_prog, parameters, no_grad_set)
        self._set_origin_programs(losses)
        for idx, loss in enumerate(losses):
            print("ps_optimizer idx loss:", idx, loss)
            startup_prog = startup_program[idx]
            self._init_ps_pass_context(loss, startup_prog)
            ps_builder = PsProgramBuilderFactory()._create_ps_program_builder(
                self.pass_ctx)
            ps_builder._build_programs()
            startup_program[idx] = self.pass_ctx._attrs['cloned_startup']
        return None, None

Z
ziyoujiyi 已提交
156
    def _can_apply_geo(self, program):
157

Z
ziyoujiyi 已提交
158 159 160
        def get_sys_free_mem():
            plat = platform.system()
            if platform.system() == "Darwin":
161 162
                vm = subprocess.Popen(['vm_stat'],
                                      stdout=subprocess.PIPE).communicate()[0]
Z
ziyoujiyi 已提交
163 164 165 166 167 168 169
                # Process vm_stat
                vmLines = vm.split('\n')
                sep = re.compile(r':[\s]+')
                vmStats = {}
                for row in range(1, len(vmLines) - 2):
                    rowText = vmLines[row].strip()
                    rowElements = sep.split(rowText)
170 171
                    vmStats[(rowElements[0])] = int(
                        rowElements[1].strip(r'\.')) * 4096
Z
ziyoujiyi 已提交
172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225
                return vmStats["Pages free"]
            elif platform.system() == "Linux":
                mems = {}
                with open('/proc/meminfo', 'rb') as f:
                    for line in f:
                        fields = line.split()
                        mems[fields[0]] = int(fields[1]) * 1024
                free = mems[b'MemFree:']
                return free
            else:
                raise ValueError(
                    "%s platform is unsupported is parameter server optimizer" %
                    (platform.system()))

        if not isinstance(self.inner_opt, fluid.optimizer.SGDOptimizer):
            return False

        free = get_sys_free_mem()
        processed_var_names = set(["@EMPTY@"])
        param_memory_size = 0
        for varname in program.global_block().vars:
            var = program.global_block().vars[varname]
            if not var.persistable or var.desc.type(
            ) != core.VarDesc.VarType.LOD_TENSOR:
                continue
            param_memory_size += get_var_mem_size(var)
            processed_var_names.add(varname)

        upper_mem_use = param_memory_size * 5.0

        program_tmp_vars = dict()
        eval_batch_size = 1024
        for op in program.global_block().ops:
            for var_name in op.output_arg_names:
                if var_name in processed_var_names:
                    continue
                processed_var_names.add(var_name)
                var = program.global_block().vars[var_name]

                if var.desc.type() != core.VarDesc.VarType.LOD_TENSOR:
                    continue

                data_count = 1
                neg_dim_count = 0
                for x in var.shape:
                    if x < 0:
                        if neg_dim_count >= 1:
                            raise ValueError(
                                "Var %s has more than one negative dim." %
                                (var_name))
                        neg_dim_count += 1
                        data_count *= (-x)
                    else:
                        data_count *= x
226 227
                program_tmp_vars[var_name] = (data_count, neg_dim_count,
                                              dtype_to_size[var.dtype])
Z
ziyoujiyi 已提交
228 229 230 231 232 233 234 235 236 237 238 239 240 241

        for varname in program_tmp_vars:
            data_count, neg_dim_count, type_size = program_tmp_vars[varname]
            if neg_dim_count == 1:
                data_count *= eval_batch_size
            var_memory = data_count * type_size
            upper_mem_use += var_memory

        if upper_mem_use < free:
            return True
        else:
            return False

    def _enable_strategy(self, dist_strategy, context):
242
        a_sync_configs = dist_strategy.a_sync_configs
Z
ziyoujiyi 已提交
243 244 245
        if dist_strategy.a_sync_configs["k_steps"] >= 0:
            return
        dist_strategy.a_sync = True
246 247
        a_sync_configs = dist_strategy.a_sync_configs

Z
ziyoujiyi 已提交
248
        is_geo = self._can_apply_geo(context["origin_main_program"])
249 250 251

        a_sync_configs["k_steps"] = 800 if is_geo else 0
        dist_strategy.a_sync_configs = a_sync_configs
Z
ziyoujiyi 已提交
252 253 254

    def _disable_strategy(self, dist_strategy):
        dist_strategy.a_sync = False
255
        a_sync_configs = dist_strategy.a_sync_configs
Z
ziyoujiyi 已提交
256
        dist_strategy.a_sync_configs["k_steps"] = -1
257
        dist_strategy.a_sync_configs = a_sync_configs