strategy_compiler.py 7.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.


16 17 18 19 20 21 22
def create_graph(optimizer_list):
    nsize = len(optimizer_list)

    edge = [[0] * nsize for _ in range(nsize)]  # adjacency matrix
    indegree = [0] * nsize
    for i, opt in enumerate(optimizer_list):
        for j, opt_inner in enumerate(optimizer_list):
23
            if opt._can_update(opt_inner):
24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
                edge[i][j] = 1  # weight
                indegree[j] += 1

    return edge, indegree


def topo_sort(edge, indegree):
    nsize = len(indegree)

    topo = [-1] * nsize
    for i in range(nsize):
        j = 0
        while j < nsize and indegree[j] != 0:
            j += 1
        assert j < nsize, 'The combination of meta optimizers contains ring'

        topo[i] = j
        indegree[j] = -1
        for k in range(nsize):
            if edge[j][k] != 0:
                indegree[k] -= 1

    return topo


def floyd(edge):
    nsize = len(edge)
    max_len = -1
    max_edge = [-1, -1]

    max_path = [[[] for _ in range(nsize)] for _ in range(nsize)]
    for i in range(nsize):
        for j in range(nsize):
            if edge[i][j] > 0:
                max_path[i][j] = [j]

                if edge[i][j] > max_len:
                    max_len = edge[i][j]
                    max_edge = [i, j]

    # use floyd algorithm to find max_path
    for k in range(nsize):
        for i in range(nsize):
            for j in range(nsize):
                # if a-->b-->c, but a-/->c, can only apply a-->b or b-->c,
                # however if a-->b-->c, and a-->c, can apply a->b->c
                if edge[i][j] == 0:
                    continue

                if edge[i][k] == 0 or edge[k][j] == 0:
                    continue

                if edge[i][j] < edge[i][k] + edge[k][j]:
                    edge[i][j] = edge[i][k] + edge[k][j]
                    max_path[i][j] = max_path[i][k] + max_path[k][j]

                    max_len = edge[i][j]
                    max_edge = [i, j]

    if max_len == -1:
        return [0]

    return [max_edge[0]] + max_path[max_edge[0]][max_edge[1]]


def maximum_path_len_algo(optimizer_list):
    if len(optimizer_list) == 0:
91
        return None
92 93 94 95 96 97 98 99 100 101 102 103 104

    edge, indegree = create_graph(optimizer_list)
    topo_sort(edge, indegree)
    max_path = floyd(edge)

    candidate = []
    for idx in max_path:
        candidate.append(optimizer_list[idx])

    for idx, opt in enumerate(candidate[:-1]):
        opt._update_inner_optimizer(candidate[idx + 1])

    return candidate
105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124


class StrategyCompilerBase(object):
    def __init__(self):
        pass


class StrategyCompiler(StrategyCompilerBase):
    """
    StrategyCompiler is responsible for meta optimizers combination
    Generally, a user can define serveral distributed strategies that
    can generate serveral meta optimizer. The combination of these 
    meta optimizers should have the right order to apply the optimizers'
    minimize function.
    This class is responsible for the executable distributed optimizer
    generation.
    """

    def __init__(self):
        super(StrategyCompiler, self).__init__()
125 126
        self._meta_optimizers = []
        self._graph_optimizers = []
D
Dong Daxiang 已提交
127 128 129 130 131
        self._valid_optimizer_list = None
        self._user_defined_strategy = None
        self._meta_optimizer_candidates = []
        self._graph_optimizer_candidates = []

132 133 134 135 136 137
    def _get_applied_meta_list(self):
        return [type(opt).__name__ for opt in self._meta_optimizers]

    def _get_applied_graph_list(self):
        return [type(opt).__name__ for opt in self._graph_optimizers]

D
Dong Daxiang 已提交
138 139
    def _get_valid_strategy(self, dist_strategy, can_not_apply_optimizer_list):
        import copy
M
mapingshuo 已提交
140
        valid_strategy = copy.deepcopy(dist_strategy)
D
Dong Daxiang 已提交
141 142 143 144 145 146 147 148 149 150 151 152 153 154
        invalid_optimizers = []
        for candidate in self._meta_optimizer_candidates:
            is_valid = False
            for valid in self._meta_optimizers:
                if candidate.__class__.__name__ == valid.__class__.__name__:
                    is_valid = True
                    break
            if not is_valid:
                invalid_optimizers.append(candidate)
        for opt in invalid_optimizers:
            opt._disable_strategy(valid_strategy)
        for opt in can_not_apply_optimizer_list:
            opt._disable_strategy(valid_strategy)
        return valid_strategy
155

156 157 158 159 160 161 162 163 164 165 166 167
    """
    Meta Optimizer Type A: rewrite forward, backward. e.g. recompute, async, sync, pipeline.
                           results will be splitted in async, sync, pipeline
    Meta Optimizer Type B: rewrite forward, 
                           e.g. AMP and the corresponding backward is generated by rewritten forward
    Meta Opitmizer Type B: rewrite backward. e.g. gradient fusion
    Meta Optimizer Type D: rewrite optimize. e.g. lars, lamb, localsgd, gradient merge, dgc
    Meta Optimizer Type E: only transpile to Graph structure for runtime,
                           currently, grad fusion and kernel fusion, sync batch-norm included.
                           we will remove grad fusion and sync batch-norm
    """

168
    def generate_optimizer(self, loss, role_maker, optimizer,
D
Dong Daxiang 已提交
169
                           user_defined_strategy, meta_optimizer_list,
170
                           graph_optimizer_list):
D
Dong Daxiang 已提交
171 172 173 174
        self._user_defined_strategy = user_defined_strategy
        self._meta_optimizer_candidates = meta_optimizer_list
        self._graph_optimizer_candidates = graph_optimizer_list

175 176 177 178 179
        if len(meta_optimizer_list) == 0 and len(graph_optimizer_list) == 0:
            return optimizer, None
        else:
            # currently, we use heuristic algorithm to select
            # meta optimizers combinations
D
Dong Daxiang 已提交
180 181
            meta_optimizers = maximum_path_len_algo(meta_optimizer_list)
            graph_optimizers = maximum_path_len_algo(graph_optimizer_list)
182 183 184 185
            # should design a distributed strategy update interface
            # when we have finally decided the combination of meta_optimizer
            # and graph_optimizer, the corresponding distributed strategy
            # should be updated.
D
Dong Daxiang 已提交
186

187 188
            self._meta_optimizers = [] if meta_optimizers is None else meta_optimizers
            self._graph_optimizers = [] if graph_optimizers is None else graph_optimizers
D
Dong Daxiang 已提交
189 190 191 192 193

            return_meta = None if meta_optimizers == None else meta_optimizers[
                0]
            return_graph = None if graph_optimizers == None else graph_optimizers[
                0]
194 195 196 197 198 199 200 201 202 203 204 205 206 207

            if meta_optimizers == None or graph_optimizers == None:
                return return_meta, return_graph

            # do heuristic filter here, if any meta optimizer in graph optimizers is in 
            # any meta optimizers' black list, set return_graph to None
            need_graph_opt = True
            for graph_opt in graph_optimizers:
                for program_opt in meta_optimizers:
                    if graph_opt.__class__.__name__ in program_opt.meta_optimizers_black_list:
                        need_graph_opt = False
            if not need_graph_opt:
                return_graph = None

D
Dong Daxiang 已提交
208
            return return_meta, return_graph