memory_optimization_transpiler.py 14.9 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16 17 18 19
from collections import defaultdict
import framework
from framework import Program, default_main_program, Parameter, Variable
import backward
from backward import _rename_arg_
20 21 22
from . import core

dtype_to_size = {
23 24 25 26 27 28 29
    core.VarDesc.VarType.FP16: 2,
    core.VarDesc.VarType.FP32: 4,
    core.VarDesc.VarType.FP64: 8,
    core.VarDesc.VarType.INT16: 2,
    core.VarDesc.VarType.INT32: 4,
    core.VarDesc.VarType.INT64: 8,
    core.VarDesc.VarType.BOOL: 1
30
}
31

32
SUB_BLOCK_OPS = [
33 34 35
    "while", "while_grad", "parallel_do", "parallel_do_grad",
    "conditional_block", "conditional_block_grad"
]
36

37 38 39
SUB_BLOCK_PAIR = [("while", "while_grad"), ("parallel_do", "parallel_do_grad"),
                  ("conditional_block", "conditional_block_grad")]

Q
qiaolongfei 已提交
40 41
PRINT_LOG = False

42 43

class ControlFlowGraph(object):
44 45
    def __init__(self, program, ops, forward_num, skip_opt):
        self._program = program
46 47 48 49
        self._ops = ops
        self._forward_num = forward_num
        self._successors = defaultdict(set)
        self._presuccessors = defaultdict(set)
50 51 52 53
        self._uses = defaultdict(set)
        self._defs = defaultdict(set)
        self._live_in = defaultdict(set)
        self._live_out = defaultdict(set)
54
        self._skip_opt = skip_opt
55 56

    def _add_connections(self, connections):
57
        """Populates _successors and _presuccessors for two neighbor nodes."""
58 59 60 61
        for node1, node2 in connections:
            self._add(node1, node2)

    def _add(self, node1, node2):
62 63
        self._successors[node1].add(node2)
        self._presuccessors[node2].add(node1)
64

65 66
    # TODO(panyx0718): We need to have a unified way of building intermediate
    # representation.
67
    def _build_graph(self):
68 69
        """Build a graph based on op sequence.
        """
70
        self.op_size = len(self._ops)
71 72 73
        op_node_connections = [(i, i + 1) for i in range(self.op_size - 1)]
        self._add_connections(op_node_connections)
        for i in range(self.op_size):
74 75
            self._uses[i].update(self._ops[i].input_arg_names())
            self._defs[i].update(self._ops[i].output_arg_names())
76

77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
    def _update_graph(self, old_name, new_name, begin_idx=0):
        for i in range(begin_idx, self.op_size):
            if old_name in self._uses[i]:
                self._uses[i].remove(old_name)
                self._uses[i].add(new_name)
            if old_name in self._defs[i]:
                self._defs[i].remove(old_name)
                self._defs[i].add(new_name)
            if old_name in self._live_in[i]:
                self._live_in[i].remove(old_name)
                self._live_out[i].add(new_name)
            if old_name in self._live_out[i]:
                self._live_out[i].remove(old_name)
                self._live_out[i].add(new_name)

92
    def _reach_fixed_point(self, live_in, live_out):
93
        """Check if the liveness set has stablized."""
94 95 96 97 98
        if len(live_in) != len(self._live_in):
            return False
        if len(live_out) != len(self._live_out):
            return False
        for i in range(self.op_size):
99 100
            if (live_in[i] != self._live_in[i] or
                    live_out[i] != self._live_out[i]):
101 102 103 104 105 106 107
                return False
        return True

    def _dataflow_analyze(self):
        self._build_graph()
        live_in = defaultdict(set)
        live_out = defaultdict(set)
108 109
        # Repeatedly apply liveness updates until the algorithm stablize
        # on a complete set live input vars and live output vars.
110
        while True:
Q
QI JUN 已提交
111
            for i in range(self.op_size, 0, -1):
112 113
                live_in[i] = set(self._live_in[i])
                live_out[i] = set(self._live_out[i])
114
                for s in self._successors[i]:
115
                    self._live_out[i] |= self._live_in[s]
Q
QI JUN 已提交
116 117
                self._live_in[i] = self._uses[i] | (
                    self._live_out[i] - self._defs[i])
118 119 120 121 122 123 124
            if self._reach_fixed_point(live_in, live_out):
                break

    def _get_diff(self, a, b):
        u = a & b
        return a - u, b - u

125 126 127 128 129 130 131 132 133 134 135 136
    def _has_var(self, block_desc, var_name, is_forward):
        if is_forward:
            return block_desc.has_var(str(var_name))
        else:
            return block_desc.has_var_recursive(str(var_name))

    def _find_var(self, block_desc, var_name, is_forward):
        if is_forward:
            return block_desc.find_var(str(var_name))
        else:
            return block_desc.find_var_recursive(str(var_name))

137 138 139 140 141 142 143 144 145 146 147 148 149 150 151
    def _check_var_validity(self, block_desc, x, is_forward):
        if str(x) == "@EMPTY@":
            return False
        if not self._has_var(block_desc, x, is_forward):
            return False
        if self._find_var(block_desc, x, is_forward).persistable():
            return False
        if self._find_var(block_desc, x,
                          is_forward).type() != core.VarDesc.VarType.LOD_TENSOR:
            return False
        if x in self._skip_opt:
            return False
        if not self._find_var(block_desc, x, is_forward).shape():
            return False
        return True
152

153 154
    # TODO(panyx0718): This needs to be less hacky. It seems memory optimization
    # doesn't consider vars copied between cpu and gpu.
155 156 157 158 159 160 161
    def _update_skip_opt_set(self):
        for i in range(self.op_size):
            op = self._ops[i]
            if op.type() == "fill_constant" and op.attr("force_cpu") == True:
                self._skip_opt.update(op.output_arg_names())

    def release_memory(self):
162
        self._dataflow_analyze()
163 164 165 166 167
        self._update_skip_opt_set()
        fwd_id = 0
        bwd_id = 0
        for i in range(self.op_size):
            op = self._ops[i]
168
            if op.type() in SUB_BLOCK_OPS:
169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190
                continue
            block_desc = op.block()
            is_forward = i < self._forward_num
            in_diff, out_diff = self._get_diff(self._live_in[i],
                                               self._live_out[i])
            can_optimize = filter(
                lambda x: self._check_var_validity(block_desc, x, is_forward),
                in_diff)
            if can_optimize:
                index = i + fwd_id + 1 if is_forward else i - self._forward_num + bwd_id + 1
                delete_op = block_desc.insert_op(index)
                delete_op.set_type("delete_var")
                delete_op.set_input("X", can_optimize)
                if is_forward:
                    fwd_id += 1
                else:
                    bwd_id += 1

    def memory_optimize(self, level=0):
        def compare_shape(x_shape, cache_shape, opt_level):
            if opt_level == 0:
                return x_shape == cache_shape
191
            elif opt_level == 1:
192 193 194 195 196 197
                if (x_shape[0] == -1) ^ (cache_shape[0] == -1):
                    return False
                x_size = abs(reduce(lambda x, y: x * y, x_shape))
                cache_size = abs(reduce(lambda x, y: x * y, cache_shape))
                if x_size <= cache_size:
                    return True
198 199
            else:
                raise ValueError("only support opt_level 0 or 1.")
200 201 202 203
            return False

        self._dataflow_analyze()
        self._update_skip_opt_set()
204 205
        self.pool = []
        for i in range(self.op_size):
206
            op = self._ops[i]
207
            if op.type() in SUB_BLOCK_OPS:
208 209 210
                continue
            block_desc = op.block()
            is_forward = i < self._forward_num
211
            if self.pool:
212
                defs_can_optimize = filter(
213
                    lambda x: self._check_var_validity(block_desc, x, is_forward),
214 215 216 217 218
                    self._defs[i])
                out_pair = [
                    (x, self._find_var(block_desc, x, is_forward).shape())
                    for x in defs_can_optimize
                ]
219
                for x, x_shape in out_pair:
220 221 222
                    # If x is both in uses and defs, it can not be optimized!
                    if x in self._uses[i]:
                        continue
223 224 225
                    for index, cache_pair in enumerate(self.pool):
                        cache_var = cache_pair[0]
                        cache_shape = cache_pair[1]
226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259
                        if not compare_shape(x_shape, cache_shape, level):
                            continue

                        if not self._has_var(block_desc, cache_var, is_forward):
                            continue

                        x_dtype = self._find_var(block_desc, x,
                                                 is_forward).dtype()
                        cache_dtype = self._find_var(block_desc, cache_var,
                                                     is_forward).dtype()
                        # TODO(qijun): actually, we should compare
                        # dtype_to_size[x_dtype] and dtype_to_size[cache_dtype]
                        if x_dtype != cache_dtype:
                            continue

                        if PRINT_LOG:
                            print(("Hit Cache !!!! cache pool index "
                                   "is %d, var name is %s, "
                                   "cached var name is %s, "
                                   "var shape is %s ") % (index, x, cache_var,
                                                          str(cache_shape)))
                        self.pool.pop(index)
                        if x == cache_var:
                            break
                        # Rename the var to the cache var already with
                        # memory allocated in order to reuse the memory.
                        _rename_arg_(self._ops, x, cache_var, begin_idx=i)
                        self._program.block(block_desc.id).var(str(
                            x)).desc = self._find_var(block_desc, cache_var,
                                                      is_forward)
                        self._update_graph(x, cache_var, begin_idx=i)
                        break

            in_diff, _ = self._get_diff(self._live_in[i], self._live_out[i])
260
            can_optimize = filter(
261
                lambda x: self._check_var_validity(block_desc, x, is_forward),
262 263 264
                in_diff)
            if can_optimize:
                for var_name in can_optimize:
265 266 267 268
                    self.pool.append((var_name, self._find_var(
                        block_desc, var_name, is_forward).shape()))


269
def _process_sub_block_pair(pdesc, sub_block_pair):
270 271 272 273 274 275 276 277 278 279 280 281 282
    """Creates a list of tuple each of which tracks info of a subblock.

      Note: this function doesn't handle nested subblocks yet.
      TODO(panyx0718): assert if case nested subblocks happen.

    :param pdesc: ProgramDesc.
    :param sub_block_pair: A list op pairs. Each op pair is the forward
        op and backward op. The ops in the list are special that they contain
        a subblock of ops.
    :return: A list of tuples, each tuple is (all ops in a subblock pair
        including forward and backward, number of forward ops,
        all output args names of the ops in the subblock pairs).
    """
283 284 285
    ops_list = []
    block_desc = pdesc.block(0)
    op_size = block_desc.op_size()
286 287 288 289 290 291 292 293 294 295 296 297 298
    for fwd_op, bwd_op in sub_block_pair:
        sub_block_ids = []
        grad_sub_block_ids = []
        sub_block_id_pair = []
        sub_op_dict = {}
        for i in range(op_size):
            op = block_desc.op(i)
            if op.type() == fwd_op:
                sub_block_ids.append(op.attr("sub_block").id)
                sub_op_dict[op.attr("sub_block").id] = op
            elif op.type() == bwd_op:
                grad_sub_block_ids.append(op.attr("sub_block").id)
                sub_op_dict[op.attr("sub_block").id] = op
299

300 301
        # Find fwd_op/bwd_op block pair
        for grad_id in grad_sub_block_ids:
Q
qijun 已提交
302 303 304 305
            fwd_id = pdesc.block(grad_id).get_forward_block_idx()
            if fwd_id in sub_block_ids:
                sub_block_id_pair.append((fwd_id, grad_id))
                sub_block_ids.remove(fwd_id)
306

307
        # Get fwd_op/bwd_op block ops
Q
qijun 已提交
308
        for fwd_id, grad_id in sub_block_id_pair:
309
            sub_block_ops = []
Q
qijun 已提交
310
            sub_block = pdesc.block(fwd_id)
311 312 313
            block_op_size = sub_block.op_size()
            for i in range(block_op_size):
                sub_block_ops.append(sub_block.op(i))
314

315 316 317 318
            grad_sub_block = pdesc.block(grad_id)
            grad_sub_block_op_size = grad_sub_block.op_size()
            for i in range(grad_sub_block_op_size):
                sub_block_ops.append(grad_sub_block.op(i))
319

320
            sub_op_output = set()
Q
qijun 已提交
321
            sub_op_output.update(sub_op_dict[fwd_id].output_arg_names())
322 323
            sub_op_output.update(sub_op_dict[grad_id].output_arg_names())
            ops_list.append((sub_block_ops, block_op_size, sub_op_output))
324

325
        # Process rest fwd_op block ops
Q
qijun 已提交
326
        for fwd_id in sub_block_ids:
327
            sub_block_ops = []
Q
qijun 已提交
328
            sub_block = pdesc.block(fwd_id)
329 330 331 332
            sub_block_op_size = sub_block.op_size()
            for i in range(sub_block_op_size):
                sub_block_ops.append(sub_block.op(i))
            sub_op_output = set()
Q
qijun 已提交
333
            sub_op_output.update(sub_op_dict[fwd_id].output_arg_names())
334 335
            ops_list.append((sub_block_ops, sub_block_op_size, sub_op_output))
    return ops_list
336

337

338
def _get_cfgs(input_program):
339 340 341 342 343
    """Process each block and create ControlFlowGraph for each of them.

    :param input_program: Program object.
    :return: A list of ControlFlowGraph, each corresponds to a block.
    """
344 345 346 347 348 349 350
    ops_list = []
    pdesc = input_program.get_desc()
    block_desc = pdesc.block(0)
    op_size = block_desc.op_size()
    # Get global block ops
    ops_list.append(
        ([block_desc.op(i) for i in range(op_size)], op_size, set()))
351

352 353
    # Only process one level of nested subblock.
    ops_list.extend(_process_sub_block_pair(pdesc, SUB_BLOCK_PAIR))
354

355 356 357 358
    cfgs = [
        ControlFlowGraph(input_program, ops, forward_num, skip_opt)
        for ops, forward_num, skip_opt in ops_list
    ]
359
    return cfgs
360 361


362
def memory_optimize(input_program, print_log=False, level=0):
363 364 365 366 367 368 369 370 371 372 373
    """Optimize memory by reusing var memory.

      Note: it doesn't not support subblock nested in subblock.

    :param input_program: Input Program
    :param print_log: whether to print debug log.
    :param level: If level=0, reuse if the shape is completely equal, o
    :return:
    """
    if level != 0 and level != 1:
        raise ValueError("only support opt_level 0 or 1.")
Q
qiaolongfei 已提交
374 375
    global PRINT_LOG
    PRINT_LOG = print_log
376
    cfgs = _get_cfgs(input_program)
377
    for cfg in cfgs:
378 379 380 381 382 383 384
        cfg.memory_optimize(level)


def release_memory(input_program):
    cfgs = _get_cfgs(input_program)
    for cfg in cfgs:
        cfg.release_memory()