memory_optimization_transpiler.py 14.9 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
from collections import defaultdict
16 17 18
from .. import core
from ..framework import Program, default_main_program, Parameter, Variable
from ..backward import _rename_arg_
19 20

dtype_to_size = {
21 22 23 24 25 26 27
    core.VarDesc.VarType.FP16: 2,
    core.VarDesc.VarType.FP32: 4,
    core.VarDesc.VarType.FP64: 8,
    core.VarDesc.VarType.INT16: 2,
    core.VarDesc.VarType.INT32: 4,
    core.VarDesc.VarType.INT64: 8,
    core.VarDesc.VarType.BOOL: 1
28
}
29

30
SUB_BLOCK_OPS = [
31 32 33
    "while", "while_grad", "parallel_do", "parallel_do_grad",
    "conditional_block", "conditional_block_grad"
]
34

35 36 37
SUB_BLOCK_PAIR = [("while", "while_grad"), ("parallel_do", "parallel_do_grad"),
                  ("conditional_block", "conditional_block_grad")]

Q
qiaolongfei 已提交
38 39
PRINT_LOG = False

40 41

class ControlFlowGraph(object):
42 43
    def __init__(self, program, ops, forward_num, skip_opt):
        self._program = program
44 45 46 47
        self._ops = ops
        self._forward_num = forward_num
        self._successors = defaultdict(set)
        self._presuccessors = defaultdict(set)
48 49 50 51
        self._uses = defaultdict(set)
        self._defs = defaultdict(set)
        self._live_in = defaultdict(set)
        self._live_out = defaultdict(set)
52
        self._skip_opt = skip_opt
53 54

    def _add_connections(self, connections):
55
        """Populates _successors and _presuccessors for two neighbor nodes."""
56 57 58 59
        for node1, node2 in connections:
            self._add(node1, node2)

    def _add(self, node1, node2):
60 61
        self._successors[node1].add(node2)
        self._presuccessors[node2].add(node1)
62

63 64
    # TODO(panyx0718): We need to have a unified way of building intermediate
    # representation.
65
    def _build_graph(self):
66 67
        """Build a graph based on op sequence.
        """
68
        self.op_size = len(self._ops)
69 70 71
        op_node_connections = [(i, i + 1) for i in range(self.op_size - 1)]
        self._add_connections(op_node_connections)
        for i in range(self.op_size):
72 73
            self._uses[i].update(self._ops[i].input_arg_names())
            self._defs[i].update(self._ops[i].output_arg_names())
74

75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
    def _update_graph(self, old_name, new_name, begin_idx=0):
        for i in range(begin_idx, self.op_size):
            if old_name in self._uses[i]:
                self._uses[i].remove(old_name)
                self._uses[i].add(new_name)
            if old_name in self._defs[i]:
                self._defs[i].remove(old_name)
                self._defs[i].add(new_name)
            if old_name in self._live_in[i]:
                self._live_in[i].remove(old_name)
                self._live_out[i].add(new_name)
            if old_name in self._live_out[i]:
                self._live_out[i].remove(old_name)
                self._live_out[i].add(new_name)

90
    def _reach_fixed_point(self, live_in, live_out):
91
        """Check if the liveness set has stablized."""
92 93 94 95 96
        if len(live_in) != len(self._live_in):
            return False
        if len(live_out) != len(self._live_out):
            return False
        for i in range(self.op_size):
97 98
            if (live_in[i] != self._live_in[i] or
                    live_out[i] != self._live_out[i]):
99 100 101 102 103 104 105
                return False
        return True

    def _dataflow_analyze(self):
        self._build_graph()
        live_in = defaultdict(set)
        live_out = defaultdict(set)
106 107
        # Repeatedly apply liveness updates until the algorithm stablize
        # on a complete set live input vars and live output vars.
108
        while True:
Q
QI JUN 已提交
109
            for i in range(self.op_size, 0, -1):
110 111
                live_in[i] = set(self._live_in[i])
                live_out[i] = set(self._live_out[i])
112
                for s in self._successors[i]:
113
                    self._live_out[i] |= self._live_in[s]
Q
QI JUN 已提交
114 115
                self._live_in[i] = self._uses[i] | (
                    self._live_out[i] - self._defs[i])
116 117 118 119 120 121 122
            if self._reach_fixed_point(live_in, live_out):
                break

    def _get_diff(self, a, b):
        u = a & b
        return a - u, b - u

123 124 125 126 127 128 129 130 131 132 133 134
    def _has_var(self, block_desc, var_name, is_forward):
        if is_forward:
            return block_desc.has_var(str(var_name))
        else:
            return block_desc.has_var_recursive(str(var_name))

    def _find_var(self, block_desc, var_name, is_forward):
        if is_forward:
            return block_desc.find_var(str(var_name))
        else:
            return block_desc.find_var_recursive(str(var_name))

135 136 137 138 139 140 141 142 143 144 145 146 147 148 149
    def _check_var_validity(self, block_desc, x, is_forward):
        if str(x) == "@EMPTY@":
            return False
        if not self._has_var(block_desc, x, is_forward):
            return False
        if self._find_var(block_desc, x, is_forward).persistable():
            return False
        if self._find_var(block_desc, x,
                          is_forward).type() != core.VarDesc.VarType.LOD_TENSOR:
            return False
        if x in self._skip_opt:
            return False
        if not self._find_var(block_desc, x, is_forward).shape():
            return False
        return True
150

151 152
    # TODO(panyx0718): This needs to be less hacky. It seems memory optimization
    # doesn't consider vars copied between cpu and gpu.
153 154 155 156 157 158 159
    def _update_skip_opt_set(self):
        for i in range(self.op_size):
            op = self._ops[i]
            if op.type() == "fill_constant" and op.attr("force_cpu") == True:
                self._skip_opt.update(op.output_arg_names())

    def release_memory(self):
160
        self._dataflow_analyze()
161 162 163 164 165
        self._update_skip_opt_set()
        fwd_id = 0
        bwd_id = 0
        for i in range(self.op_size):
            op = self._ops[i]
166
            if op.type() in SUB_BLOCK_OPS:
167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188
                continue
            block_desc = op.block()
            is_forward = i < self._forward_num
            in_diff, out_diff = self._get_diff(self._live_in[i],
                                               self._live_out[i])
            can_optimize = filter(
                lambda x: self._check_var_validity(block_desc, x, is_forward),
                in_diff)
            if can_optimize:
                index = i + fwd_id + 1 if is_forward else i - self._forward_num + bwd_id + 1
                delete_op = block_desc.insert_op(index)
                delete_op.set_type("delete_var")
                delete_op.set_input("X", can_optimize)
                if is_forward:
                    fwd_id += 1
                else:
                    bwd_id += 1

    def memory_optimize(self, level=0):
        def compare_shape(x_shape, cache_shape, opt_level):
            if opt_level == 0:
                return x_shape == cache_shape
189
            elif opt_level == 1:
190 191 192 193 194 195
                if (x_shape[0] == -1) ^ (cache_shape[0] == -1):
                    return False
                x_size = abs(reduce(lambda x, y: x * y, x_shape))
                cache_size = abs(reduce(lambda x, y: x * y, cache_shape))
                if x_size <= cache_size:
                    return True
196 197
            else:
                raise ValueError("only support opt_level 0 or 1.")
198 199 200 201
            return False

        self._dataflow_analyze()
        self._update_skip_opt_set()
202 203
        self.pool = []
        for i in range(self.op_size):
204
            op = self._ops[i]
205
            if op.type() in SUB_BLOCK_OPS:
206 207 208
                continue
            block_desc = op.block()
            is_forward = i < self._forward_num
209
            if self.pool:
210
                defs_can_optimize = filter(
211
                    lambda x: self._check_var_validity(block_desc, x, is_forward),
212 213 214 215 216
                    self._defs[i])
                out_pair = [
                    (x, self._find_var(block_desc, x, is_forward).shape())
                    for x in defs_can_optimize
                ]
217
                for x, x_shape in out_pair:
218 219 220
                    # If x is both in uses and defs, it can not be optimized!
                    if x in self._uses[i]:
                        continue
221 222 223
                    for index, cache_pair in enumerate(self.pool):
                        cache_var = cache_pair[0]
                        cache_shape = cache_pair[1]
224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257
                        if not compare_shape(x_shape, cache_shape, level):
                            continue

                        if not self._has_var(block_desc, cache_var, is_forward):
                            continue

                        x_dtype = self._find_var(block_desc, x,
                                                 is_forward).dtype()
                        cache_dtype = self._find_var(block_desc, cache_var,
                                                     is_forward).dtype()
                        # TODO(qijun): actually, we should compare
                        # dtype_to_size[x_dtype] and dtype_to_size[cache_dtype]
                        if x_dtype != cache_dtype:
                            continue

                        if PRINT_LOG:
                            print(("Hit Cache !!!! cache pool index "
                                   "is %d, var name is %s, "
                                   "cached var name is %s, "
                                   "var shape is %s ") % (index, x, cache_var,
                                                          str(cache_shape)))
                        self.pool.pop(index)
                        if x == cache_var:
                            break
                        # Rename the var to the cache var already with
                        # memory allocated in order to reuse the memory.
                        _rename_arg_(self._ops, x, cache_var, begin_idx=i)
                        self._program.block(block_desc.id).var(str(
                            x)).desc = self._find_var(block_desc, cache_var,
                                                      is_forward)
                        self._update_graph(x, cache_var, begin_idx=i)
                        break

            in_diff, _ = self._get_diff(self._live_in[i], self._live_out[i])
258
            can_optimize = filter(
259
                lambda x: self._check_var_validity(block_desc, x, is_forward),
260 261 262
                in_diff)
            if can_optimize:
                for var_name in can_optimize:
263 264 265 266
                    self.pool.append((var_name, self._find_var(
                        block_desc, var_name, is_forward).shape()))


267
def _process_sub_block_pair(pdesc, sub_block_pair):
268 269 270 271 272 273 274 275 276 277 278 279 280
    """Creates a list of tuple each of which tracks info of a subblock.

      Note: this function doesn't handle nested subblocks yet.
      TODO(panyx0718): assert if case nested subblocks happen.

    :param pdesc: ProgramDesc.
    :param sub_block_pair: A list op pairs. Each op pair is the forward
        op and backward op. The ops in the list are special that they contain
        a subblock of ops.
    :return: A list of tuples, each tuple is (all ops in a subblock pair
        including forward and backward, number of forward ops,
        all output args names of the ops in the subblock pairs).
    """
281 282 283
    ops_list = []
    block_desc = pdesc.block(0)
    op_size = block_desc.op_size()
284 285 286 287 288 289 290 291 292 293 294 295 296
    for fwd_op, bwd_op in sub_block_pair:
        sub_block_ids = []
        grad_sub_block_ids = []
        sub_block_id_pair = []
        sub_op_dict = {}
        for i in range(op_size):
            op = block_desc.op(i)
            if op.type() == fwd_op:
                sub_block_ids.append(op.attr("sub_block").id)
                sub_op_dict[op.attr("sub_block").id] = op
            elif op.type() == bwd_op:
                grad_sub_block_ids.append(op.attr("sub_block").id)
                sub_op_dict[op.attr("sub_block").id] = op
297

298 299
        # Find fwd_op/bwd_op block pair
        for grad_id in grad_sub_block_ids:
Q
qijun 已提交
300 301 302 303
            fwd_id = pdesc.block(grad_id).get_forward_block_idx()
            if fwd_id in sub_block_ids:
                sub_block_id_pair.append((fwd_id, grad_id))
                sub_block_ids.remove(fwd_id)
304

305
        # Get fwd_op/bwd_op block ops
Q
qijun 已提交
306
        for fwd_id, grad_id in sub_block_id_pair:
307
            sub_block_ops = []
Q
qijun 已提交
308
            sub_block = pdesc.block(fwd_id)
309 310 311
            block_op_size = sub_block.op_size()
            for i in range(block_op_size):
                sub_block_ops.append(sub_block.op(i))
312

313 314 315 316
            grad_sub_block = pdesc.block(grad_id)
            grad_sub_block_op_size = grad_sub_block.op_size()
            for i in range(grad_sub_block_op_size):
                sub_block_ops.append(grad_sub_block.op(i))
317

318
            sub_op_output = set()
Q
qijun 已提交
319
            sub_op_output.update(sub_op_dict[fwd_id].output_arg_names())
320 321
            sub_op_output.update(sub_op_dict[grad_id].output_arg_names())
            ops_list.append((sub_block_ops, block_op_size, sub_op_output))
322

323
        # Process rest fwd_op block ops
Q
qijun 已提交
324
        for fwd_id in sub_block_ids:
325
            sub_block_ops = []
Q
qijun 已提交
326
            sub_block = pdesc.block(fwd_id)
327 328 329 330
            sub_block_op_size = sub_block.op_size()
            for i in range(sub_block_op_size):
                sub_block_ops.append(sub_block.op(i))
            sub_op_output = set()
Q
qijun 已提交
331
            sub_op_output.update(sub_op_dict[fwd_id].output_arg_names())
332 333
            ops_list.append((sub_block_ops, sub_block_op_size, sub_op_output))
    return ops_list
334

335

336
def _get_cfgs(input_program):
337 338 339 340 341
    """Process each block and create ControlFlowGraph for each of them.

    :param input_program: Program object.
    :return: A list of ControlFlowGraph, each corresponds to a block.
    """
342 343 344 345 346 347 348
    ops_list = []
    pdesc = input_program.get_desc()
    block_desc = pdesc.block(0)
    op_size = block_desc.op_size()
    # Get global block ops
    ops_list.append(
        ([block_desc.op(i) for i in range(op_size)], op_size, set()))
349

350 351
    # Only process one level of nested subblock.
    ops_list.extend(_process_sub_block_pair(pdesc, SUB_BLOCK_PAIR))
352

353 354 355 356
    cfgs = [
        ControlFlowGraph(input_program, ops, forward_num, skip_opt)
        for ops, forward_num, skip_opt in ops_list
    ]
357
    return cfgs
358 359


360
def memory_optimize(input_program, print_log=False, level=0):
361 362 363 364 365 366 367 368 369 370 371
    """Optimize memory by reusing var memory.

      Note: it doesn't not support subblock nested in subblock.

    :param input_program: Input Program
    :param print_log: whether to print debug log.
    :param level: If level=0, reuse if the shape is completely equal, o
    :return:
    """
    if level != 0 and level != 1:
        raise ValueError("only support opt_level 0 or 1.")
Q
qiaolongfei 已提交
372 373
    global PRINT_LOG
    PRINT_LOG = print_log
374
    cfgs = _get_cfgs(input_program)
375
    for cfg in cfgs:
376 377 378 379 380 381 382
        cfg.memory_optimize(level)


def release_memory(input_program):
    cfgs = _get_cfgs(input_program)
    for cfg in cfgs:
        cfg.release_memory()