memory_optimization_transpiler.py 14.9 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
from collections import defaultdict
16 17 18
from .. import core
from ..framework import Program, default_main_program, Parameter, Variable
from ..backward import _rename_arg_
19 20

dtype_to_size = {
21 22 23 24 25 26
    core.VarDesc.VarType.FP16: 2,
    core.VarDesc.VarType.FP32: 4,
    core.VarDesc.VarType.FP64: 8,
    core.VarDesc.VarType.INT16: 2,
    core.VarDesc.VarType.INT32: 4,
    core.VarDesc.VarType.INT64: 8,
27 28
    core.VarDesc.VarType.BOOL: 1,
    core.VarDesc.VarType.UINT8: 1,
29
}
30

31
SUB_BLOCK_OPS = [
32 33 34
    "while", "while_grad", "parallel_do", "parallel_do_grad",
    "conditional_block", "conditional_block_grad"
]
35

36 37 38
SUB_BLOCK_PAIR = [("while", "while_grad"), ("parallel_do", "parallel_do_grad"),
                  ("conditional_block", "conditional_block_grad")]

Q
qiaolongfei 已提交
39 40
PRINT_LOG = False

41 42

class ControlFlowGraph(object):
43 44
    def __init__(self, program, ops, forward_num, skip_opt):
        self._program = program
45 46 47 48
        self._ops = ops
        self._forward_num = forward_num
        self._successors = defaultdict(set)
        self._presuccessors = defaultdict(set)
49 50 51 52
        self._uses = defaultdict(set)
        self._defs = defaultdict(set)
        self._live_in = defaultdict(set)
        self._live_out = defaultdict(set)
53
        self._skip_opt = skip_opt
54 55

    def _add_connections(self, connections):
56
        """Populates _successors and _presuccessors for two neighbor nodes."""
57 58 59 60
        for node1, node2 in connections:
            self._add(node1, node2)

    def _add(self, node1, node2):
61 62
        self._successors[node1].add(node2)
        self._presuccessors[node2].add(node1)
63

64 65
    # TODO(panyx0718): We need to have a unified way of building intermediate
    # representation.
66
    def _build_graph(self):
67 68
        """Build a graph based on op sequence.
        """
69
        self.op_size = len(self._ops)
70 71 72
        op_node_connections = [(i, i + 1) for i in range(self.op_size - 1)]
        self._add_connections(op_node_connections)
        for i in range(self.op_size):
73 74
            self._uses[i].update(self._ops[i].input_arg_names())
            self._defs[i].update(self._ops[i].output_arg_names())
75

76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
    def _update_graph(self, old_name, new_name, begin_idx=0):
        for i in range(begin_idx, self.op_size):
            if old_name in self._uses[i]:
                self._uses[i].remove(old_name)
                self._uses[i].add(new_name)
            if old_name in self._defs[i]:
                self._defs[i].remove(old_name)
                self._defs[i].add(new_name)
            if old_name in self._live_in[i]:
                self._live_in[i].remove(old_name)
                self._live_out[i].add(new_name)
            if old_name in self._live_out[i]:
                self._live_out[i].remove(old_name)
                self._live_out[i].add(new_name)

91
    def _reach_fixed_point(self, live_in, live_out):
92
        """Check if the liveness set has stablized."""
93 94 95 96 97
        if len(live_in) != len(self._live_in):
            return False
        if len(live_out) != len(self._live_out):
            return False
        for i in range(self.op_size):
98 99
            if (live_in[i] != self._live_in[i] or
                    live_out[i] != self._live_out[i]):
100 101 102 103 104 105 106
                return False
        return True

    def _dataflow_analyze(self):
        self._build_graph()
        live_in = defaultdict(set)
        live_out = defaultdict(set)
107 108
        # Repeatedly apply liveness updates until the algorithm stablize
        # on a complete set live input vars and live output vars.
109
        while True:
Q
QI JUN 已提交
110
            for i in range(self.op_size, 0, -1):
111 112
                live_in[i] = set(self._live_in[i])
                live_out[i] = set(self._live_out[i])
113
                for s in self._successors[i]:
114
                    self._live_out[i] |= self._live_in[s]
Q
QI JUN 已提交
115 116
                self._live_in[i] = self._uses[i] | (
                    self._live_out[i] - self._defs[i])
117 118 119 120 121 122 123
            if self._reach_fixed_point(live_in, live_out):
                break

    def _get_diff(self, a, b):
        u = a & b
        return a - u, b - u

124 125 126 127 128 129 130 131 132 133 134 135
    def _has_var(self, block_desc, var_name, is_forward):
        if is_forward:
            return block_desc.has_var(str(var_name))
        else:
            return block_desc.has_var_recursive(str(var_name))

    def _find_var(self, block_desc, var_name, is_forward):
        if is_forward:
            return block_desc.find_var(str(var_name))
        else:
            return block_desc.find_var_recursive(str(var_name))

136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
    def _check_var_validity(self, block_desc, x, is_forward):
        if str(x) == "@EMPTY@":
            return False
        if not self._has_var(block_desc, x, is_forward):
            return False
        if self._find_var(block_desc, x, is_forward).persistable():
            return False
        if self._find_var(block_desc, x,
                          is_forward).type() != core.VarDesc.VarType.LOD_TENSOR:
            return False
        if x in self._skip_opt:
            return False
        if not self._find_var(block_desc, x, is_forward).shape():
            return False
        return True
151

152 153
    # TODO(panyx0718): This needs to be less hacky. It seems memory optimization
    # doesn't consider vars copied between cpu and gpu.
154 155 156 157 158 159 160
    def _update_skip_opt_set(self):
        for i in range(self.op_size):
            op = self._ops[i]
            if op.type() == "fill_constant" and op.attr("force_cpu") == True:
                self._skip_opt.update(op.output_arg_names())

    def release_memory(self):
161
        self._dataflow_analyze()
162 163 164 165 166
        self._update_skip_opt_set()
        fwd_id = 0
        bwd_id = 0
        for i in range(self.op_size):
            op = self._ops[i]
167
            if op.type() in SUB_BLOCK_OPS:
168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189
                continue
            block_desc = op.block()
            is_forward = i < self._forward_num
            in_diff, out_diff = self._get_diff(self._live_in[i],
                                               self._live_out[i])
            can_optimize = filter(
                lambda x: self._check_var_validity(block_desc, x, is_forward),
                in_diff)
            if can_optimize:
                index = i + fwd_id + 1 if is_forward else i - self._forward_num + bwd_id + 1
                delete_op = block_desc.insert_op(index)
                delete_op.set_type("delete_var")
                delete_op.set_input("X", can_optimize)
                if is_forward:
                    fwd_id += 1
                else:
                    bwd_id += 1

    def memory_optimize(self, level=0):
        def compare_shape(x_shape, cache_shape, opt_level):
            if opt_level == 0:
                return x_shape == cache_shape
190
            elif opt_level == 1:
191 192 193 194 195 196
                if (x_shape[0] == -1) ^ (cache_shape[0] == -1):
                    return False
                x_size = abs(reduce(lambda x, y: x * y, x_shape))
                cache_size = abs(reduce(lambda x, y: x * y, cache_shape))
                if x_size <= cache_size:
                    return True
197 198
            else:
                raise ValueError("only support opt_level 0 or 1.")
199 200 201 202
            return False

        self._dataflow_analyze()
        self._update_skip_opt_set()
203 204
        self.pool = []
        for i in range(self.op_size):
205
            op = self._ops[i]
206
            if op.type() in SUB_BLOCK_OPS:
207 208 209
                continue
            block_desc = op.block()
            is_forward = i < self._forward_num
210
            if self.pool:
211
                defs_can_optimize = filter(
212
                    lambda x: self._check_var_validity(block_desc, x, is_forward),
213 214 215 216 217
                    self._defs[i])
                out_pair = [
                    (x, self._find_var(block_desc, x, is_forward).shape())
                    for x in defs_can_optimize
                ]
218
                for x, x_shape in out_pair:
219 220 221
                    # If x is both in uses and defs, it can not be optimized!
                    if x in self._uses[i]:
                        continue
222 223 224
                    for index, cache_pair in enumerate(self.pool):
                        cache_var = cache_pair[0]
                        cache_shape = cache_pair[1]
225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258
                        if not compare_shape(x_shape, cache_shape, level):
                            continue

                        if not self._has_var(block_desc, cache_var, is_forward):
                            continue

                        x_dtype = self._find_var(block_desc, x,
                                                 is_forward).dtype()
                        cache_dtype = self._find_var(block_desc, cache_var,
                                                     is_forward).dtype()
                        # TODO(qijun): actually, we should compare
                        # dtype_to_size[x_dtype] and dtype_to_size[cache_dtype]
                        if x_dtype != cache_dtype:
                            continue

                        if PRINT_LOG:
                            print(("Hit Cache !!!! cache pool index "
                                   "is %d, var name is %s, "
                                   "cached var name is %s, "
                                   "var shape is %s ") % (index, x, cache_var,
                                                          str(cache_shape)))
                        self.pool.pop(index)
                        if x == cache_var:
                            break
                        # Rename the var to the cache var already with
                        # memory allocated in order to reuse the memory.
                        _rename_arg_(self._ops, x, cache_var, begin_idx=i)
                        self._program.block(block_desc.id).var(str(
                            x)).desc = self._find_var(block_desc, cache_var,
                                                      is_forward)
                        self._update_graph(x, cache_var, begin_idx=i)
                        break

            in_diff, _ = self._get_diff(self._live_in[i], self._live_out[i])
259
            can_optimize = filter(
260
                lambda x: self._check_var_validity(block_desc, x, is_forward),
261 262 263
                in_diff)
            if can_optimize:
                for var_name in can_optimize:
264 265 266 267
                    self.pool.append((var_name, self._find_var(
                        block_desc, var_name, is_forward).shape()))


268
def _process_sub_block_pair(pdesc, sub_block_pair):
269 270 271 272 273 274 275 276 277 278 279 280 281
    """Creates a list of tuple each of which tracks info of a subblock.

      Note: this function doesn't handle nested subblocks yet.
      TODO(panyx0718): assert if case nested subblocks happen.

    :param pdesc: ProgramDesc.
    :param sub_block_pair: A list op pairs. Each op pair is the forward
        op and backward op. The ops in the list are special that they contain
        a subblock of ops.
    :return: A list of tuples, each tuple is (all ops in a subblock pair
        including forward and backward, number of forward ops,
        all output args names of the ops in the subblock pairs).
    """
282 283 284
    ops_list = []
    block_desc = pdesc.block(0)
    op_size = block_desc.op_size()
285 286 287 288 289 290 291 292 293 294 295 296 297
    for fwd_op, bwd_op in sub_block_pair:
        sub_block_ids = []
        grad_sub_block_ids = []
        sub_block_id_pair = []
        sub_op_dict = {}
        for i in range(op_size):
            op = block_desc.op(i)
            if op.type() == fwd_op:
                sub_block_ids.append(op.attr("sub_block").id)
                sub_op_dict[op.attr("sub_block").id] = op
            elif op.type() == bwd_op:
                grad_sub_block_ids.append(op.attr("sub_block").id)
                sub_op_dict[op.attr("sub_block").id] = op
298

299 300
        # Find fwd_op/bwd_op block pair
        for grad_id in grad_sub_block_ids:
Q
qijun 已提交
301 302 303 304
            fwd_id = pdesc.block(grad_id).get_forward_block_idx()
            if fwd_id in sub_block_ids:
                sub_block_id_pair.append((fwd_id, grad_id))
                sub_block_ids.remove(fwd_id)
305

306
        # Get fwd_op/bwd_op block ops
Q
qijun 已提交
307
        for fwd_id, grad_id in sub_block_id_pair:
308
            sub_block_ops = []
Q
qijun 已提交
309
            sub_block = pdesc.block(fwd_id)
310 311 312
            block_op_size = sub_block.op_size()
            for i in range(block_op_size):
                sub_block_ops.append(sub_block.op(i))
313

314 315 316 317
            grad_sub_block = pdesc.block(grad_id)
            grad_sub_block_op_size = grad_sub_block.op_size()
            for i in range(grad_sub_block_op_size):
                sub_block_ops.append(grad_sub_block.op(i))
318

319
            sub_op_output = set()
Q
qijun 已提交
320
            sub_op_output.update(sub_op_dict[fwd_id].output_arg_names())
321 322
            sub_op_output.update(sub_op_dict[grad_id].output_arg_names())
            ops_list.append((sub_block_ops, block_op_size, sub_op_output))
323

324
        # Process rest fwd_op block ops
Q
qijun 已提交
325
        for fwd_id in sub_block_ids:
326
            sub_block_ops = []
Q
qijun 已提交
327
            sub_block = pdesc.block(fwd_id)
328 329 330 331
            sub_block_op_size = sub_block.op_size()
            for i in range(sub_block_op_size):
                sub_block_ops.append(sub_block.op(i))
            sub_op_output = set()
Q
qijun 已提交
332
            sub_op_output.update(sub_op_dict[fwd_id].output_arg_names())
333 334
            ops_list.append((sub_block_ops, sub_block_op_size, sub_op_output))
    return ops_list
335

336

337
def _get_cfgs(input_program):
338 339 340 341 342
    """Process each block and create ControlFlowGraph for each of them.

    :param input_program: Program object.
    :return: A list of ControlFlowGraph, each corresponds to a block.
    """
343 344 345 346 347 348 349
    ops_list = []
    pdesc = input_program.get_desc()
    block_desc = pdesc.block(0)
    op_size = block_desc.op_size()
    # Get global block ops
    ops_list.append(
        ([block_desc.op(i) for i in range(op_size)], op_size, set()))
350

351 352
    # Only process one level of nested subblock.
    ops_list.extend(_process_sub_block_pair(pdesc, SUB_BLOCK_PAIR))
353

354 355 356 357
    cfgs = [
        ControlFlowGraph(input_program, ops, forward_num, skip_opt)
        for ops, forward_num, skip_opt in ops_list
    ]
358
    return cfgs
359 360


361
def memory_optimize(input_program, print_log=False, level=0):
362 363 364 365 366 367 368 369 370 371 372
    """Optimize memory by reusing var memory.

      Note: it doesn't not support subblock nested in subblock.

    :param input_program: Input Program
    :param print_log: whether to print debug log.
    :param level: If level=0, reuse if the shape is completely equal, o
    :return:
    """
    if level != 0 and level != 1:
        raise ValueError("only support opt_level 0 or 1.")
Q
qiaolongfei 已提交
373 374
    global PRINT_LOG
    PRINT_LOG = print_log
375
    cfgs = _get_cfgs(input_program)
376
    for cfg in cfgs:
377 378 379 380 381 382 383
        cfg.memory_optimize(level)


def release_memory(input_program):
    cfgs = _get_cfgs(input_program)
    for cfg in cfgs:
        cfg.release_memory()