memory_optimization_transpiler.py 13.4 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16 17 18 19
from collections import defaultdict
import framework
from framework import Program, default_main_program, Parameter, Variable
import backward
from backward import _rename_arg_
20 21 22
from . import core

dtype_to_size = {
23 24 25 26 27 28 29
    core.VarDesc.VarType.FP16: 2,
    core.VarDesc.VarType.FP32: 4,
    core.VarDesc.VarType.FP64: 8,
    core.VarDesc.VarType.INT16: 2,
    core.VarDesc.VarType.INT32: 4,
    core.VarDesc.VarType.INT64: 8,
    core.VarDesc.VarType.BOOL: 1
30
}
31

32 33 34 35
sub_block_ops = [
    "while", "while_grad", "parallel_do", "parallel_do_grad",
    "conditional_block", "conditional_block_grad"
]
36

Q
qiaolongfei 已提交
37 38
PRINT_LOG = False

39 40

class ControlFlowGraph(object):
41
    def __init__(self, Program, ops, forward_num, skip_opt):
42
        self._program = Program
43 44 45 46
        self._ops = ops
        self._forward_num = forward_num
        self._successors = defaultdict(set)
        self._presuccessors = defaultdict(set)
47 48 49 50
        self._uses = defaultdict(set)
        self._defs = defaultdict(set)
        self._live_in = defaultdict(set)
        self._live_out = defaultdict(set)
51
        self._skip_opt = skip_opt
52 53 54 55 56 57

    def _add_connections(self, connections):
        for node1, node2 in connections:
            self._add(node1, node2)

    def _add(self, node1, node2):
58 59
        self._successors[node1].add(node2)
        self._presuccessors[node2].add(node1)
60 61

    def _build_graph(self):
62
        self.op_size = len(self._ops)
63 64 65
        op_node_connections = [(i, i + 1) for i in range(self.op_size - 1)]
        self._add_connections(op_node_connections)
        for i in range(self.op_size):
66 67
            self._uses[i].update(self._ops[i].input_arg_names())
            self._defs[i].update(self._ops[i].output_arg_names())
68

69 70 71 72 73 74 75 76 77 78 79 80 81 82 83
    def _update_graph(self, old_name, new_name, begin_idx=0):
        for i in range(begin_idx, self.op_size):
            if old_name in self._uses[i]:
                self._uses[i].remove(old_name)
                self._uses[i].add(new_name)
            if old_name in self._defs[i]:
                self._defs[i].remove(old_name)
                self._defs[i].add(new_name)
            if old_name in self._live_in[i]:
                self._live_in[i].remove(old_name)
                self._live_out[i].add(new_name)
            if old_name in self._live_out[i]:
                self._live_out[i].remove(old_name)
                self._live_out[i].add(new_name)

84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
    def _reach_fixed_point(self, live_in, live_out):
        if len(live_in) != len(self._live_in):
            return False
        if len(live_out) != len(self._live_out):
            return False
        for i in range(self.op_size):
            if live_in[i] != self._live_in[i]:
                return False
        for i in range(self.op_size):
            if live_out[i] != self._live_out[i]:
                return False
        return True

    def _dataflow_analyze(self):
        self._build_graph()
        live_in = defaultdict(set)
        live_out = defaultdict(set)
        while True:
Q
QI JUN 已提交
102
            for i in range(self.op_size, 0, -1):
103 104
                live_in[i] = set(self._live_in[i])
                live_out[i] = set(self._live_out[i])
105
                for s in self._successors[i]:
106
                    self._live_out[i] |= self._live_in[s]
Q
QI JUN 已提交
107 108
                self._live_in[i] = self._uses[i] | (
                    self._live_out[i] - self._defs[i])
109 110 111 112 113 114 115
            if self._reach_fixed_point(live_in, live_out):
                break

    def _get_diff(self, a, b):
        u = a & b
        return a - u, b - u

116 117 118 119 120 121 122 123 124 125 126 127
    def _has_var(self, block_desc, var_name, is_forward):
        if is_forward:
            return block_desc.has_var(str(var_name))
        else:
            return block_desc.has_var_recursive(str(var_name))

    def _find_var(self, block_desc, var_name, is_forward):
        if is_forward:
            return block_desc.find_var(str(var_name))
        else:
            return block_desc.find_var_recursive(str(var_name))

128 129 130 131 132 133 134 135 136 137 138 139 140 141 142
    def _check_var_validity(self, block_desc, x, is_forward):
        if str(x) == "@EMPTY@":
            return False
        if not self._has_var(block_desc, x, is_forward):
            return False
        if self._find_var(block_desc, x, is_forward).persistable():
            return False
        if self._find_var(block_desc, x,
                          is_forward).type() != core.VarDesc.VarType.LOD_TENSOR:
            return False
        if x in self._skip_opt:
            return False
        if not self._find_var(block_desc, x, is_forward).shape():
            return False
        return True
143

144 145 146 147 148 149 150
    def _update_skip_opt_set(self):
        for i in range(self.op_size):
            op = self._ops[i]
            if op.type() == "fill_constant" and op.attr("force_cpu") == True:
                self._skip_opt.update(op.output_arg_names())

    def release_memory(self):
151
        self._dataflow_analyze()
152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190
        self._update_skip_opt_set()
        fwd_id = 0
        bwd_id = 0
        for i in range(self.op_size):
            op = self._ops[i]
            if op.type() in sub_block_ops:
                continue
            block_desc = op.block()
            is_forward = i < self._forward_num
            in_diff, out_diff = self._get_diff(self._live_in[i],
                                               self._live_out[i])
            can_optimize = filter(
                lambda x: self._check_var_validity(block_desc, x, is_forward),
                in_diff)
            if can_optimize:
                index = i + fwd_id + 1 if is_forward else i - self._forward_num + bwd_id + 1
                delete_op = block_desc.insert_op(index)
                delete_op.set_type("delete_var")
                delete_op.set_input("X", can_optimize)
                if is_forward:
                    fwd_id += 1
                else:
                    bwd_id += 1

    def memory_optimize(self, level=0):
        def compare_shape(x_shape, cache_shape, opt_level):
            if opt_level == 0:
                return x_shape == cache_shape
            if opt_level == 1:
                if (x_shape[0] == -1) ^ (cache_shape[0] == -1):
                    return False
                x_size = abs(reduce(lambda x, y: x * y, x_shape))
                cache_size = abs(reduce(lambda x, y: x * y, cache_shape))
                if x_size <= cache_size:
                    return True
            return False

        self._dataflow_analyze()
        self._update_skip_opt_set()
191 192
        self.pool = []
        for i in range(self.op_size):
193
            op = self._ops[i]
194
            if op.type() in sub_block_ops:
195 196
                continue
            block_desc = op.block()
197
            self.current_block_desc = block_desc
198
            is_forward = i < self._forward_num
199
            if self.pool:
200
                defs_can_optimize = filter(
201
                    lambda x: self._check_var_validity(block_desc, x, is_forward),
202 203 204 205 206
                    self._defs[i])
                out_pair = [
                    (x, self._find_var(block_desc, x, is_forward).shape())
                    for x in defs_can_optimize
                ]
207
                for x, x_shape in out_pair:
208 209 210
                    # If x is both in uses and defs, it can not be optimized!
                    if x in self._uses[i]:
                        continue
211 212 213
                    for index, cache_pair in enumerate(self.pool):
                        cache_var = cache_pair[0]
                        cache_shape = cache_pair[1]
214
                        if compare_shape(x_shape, cache_shape, level):
215 216 217 218 219
                            if self._has_var(block_desc, cache_var, is_forward):
                                x_dtype = self._find_var(block_desc, x,
                                                         is_forward).dtype()
                                cache_dtype = self._find_var(
                                    block_desc, cache_var, is_forward).dtype()
220 221
                                # TODO(qijun): actually, we should compare dtype_to_size[x_dtype]
                                # and dtype_to_size[cache_dtype]
Q
fix bug  
qiaolongfei 已提交
222 223 224 225 226 227 228 229 230
                                if x_dtype == cache_dtype:
                                    if PRINT_LOG:
                                        print(
                                            ("Hit Cache !!!! cache pool index "
                                             "is %d, var name is %s, "
                                             "cached var name is %s, "
                                             "var shape is %s ") %
                                            (index, x, cache_var,
                                             str(cache_shape)))
231
                                    self.pool.pop(index)
232 233
                                    if x == cache_var:
                                        break
234
                                    _rename_arg_(
235 236 237 238
                                        self._ops, x, cache_var, begin_idx=i)
                                    self._program.block(block_desc.id).var(
                                        str(x)).desc = self._find_var(
                                            block_desc, cache_var, is_forward)
239 240 241
                                    self._update_graph(
                                        x, cache_var, begin_idx=i)
                                    break
242 243 244 245

            in_diff, out_diff = self._get_diff(self._live_in[i],
                                               self._live_out[i])
            can_optimize = filter(
246
                lambda x: self._check_var_validity(block_desc, x, is_forward),
247 248 249
                in_diff)
            if can_optimize:
                for var_name in can_optimize:
250 251 252 253
                    self.pool.append((var_name, self._find_var(
                        block_desc, var_name, is_forward).shape()))


254
def _process_sub_block_pair(pdesc, sub_block_pair):
255 256 257
    ops_list = []
    block_desc = pdesc.block(0)
    op_size = block_desc.op_size()
258 259 260 261 262 263 264 265 266 267 268 269 270
    for fwd_op, bwd_op in sub_block_pair:
        sub_block_ids = []
        grad_sub_block_ids = []
        sub_block_id_pair = []
        sub_op_dict = {}
        for i in range(op_size):
            op = block_desc.op(i)
            if op.type() == fwd_op:
                sub_block_ids.append(op.attr("sub_block").id)
                sub_op_dict[op.attr("sub_block").id] = op
            elif op.type() == bwd_op:
                grad_sub_block_ids.append(op.attr("sub_block").id)
                sub_op_dict[op.attr("sub_block").id] = op
271

272 273
        # Find fwd_op/bwd_op block pair
        for grad_id in grad_sub_block_ids:
Q
qijun 已提交
274 275 276 277
            fwd_id = pdesc.block(grad_id).get_forward_block_idx()
            if fwd_id in sub_block_ids:
                sub_block_id_pair.append((fwd_id, grad_id))
                sub_block_ids.remove(fwd_id)
278

279
        # Get fwd_op/bwd_op block ops
Q
qijun 已提交
280
        for fwd_id, grad_id in sub_block_id_pair:
281
            sub_block_ops = []
Q
qijun 已提交
282
            sub_block = pdesc.block(fwd_id)
283 284 285
            block_op_size = sub_block.op_size()
            for i in range(block_op_size):
                sub_block_ops.append(sub_block.op(i))
286

287 288 289 290
            grad_sub_block = pdesc.block(grad_id)
            grad_sub_block_op_size = grad_sub_block.op_size()
            for i in range(grad_sub_block_op_size):
                sub_block_ops.append(grad_sub_block.op(i))
291

292
            sub_op_output = set()
Q
qijun 已提交
293
            sub_op_output.update(sub_op_dict[fwd_id].output_arg_names())
294 295
            sub_op_output.update(sub_op_dict[grad_id].output_arg_names())
            ops_list.append((sub_block_ops, block_op_size, sub_op_output))
296

297
        # Process rest fwd_op block ops
Q
qijun 已提交
298
        for fwd_id in sub_block_ids:
299
            sub_block_ops = []
Q
qijun 已提交
300
            sub_block = pdesc.block(fwd_id)
301 302 303 304
            sub_block_op_size = sub_block.op_size()
            for i in range(sub_block_op_size):
                sub_block_ops.append(sub_block.op(i))
            sub_op_output = set()
Q
qijun 已提交
305
            sub_op_output.update(sub_op_dict[fwd_id].output_arg_names())
306 307
            ops_list.append((sub_block_ops, sub_block_op_size, sub_op_output))
    return ops_list
308

309

310 311 312 313 314 315 316 317
def _get_cfgs(input_program):
    ops_list = []
    pdesc = input_program.get_desc()
    block_desc = pdesc.block(0)
    op_size = block_desc.op_size()
    # Get global block ops
    ops_list.append(
        ([block_desc.op(i) for i in range(op_size)], op_size, set()))
318

319
    sub_block_pair = [("while", "while_grad"), ("parallel_do",
320 321
                                                "parallel_do_grad"),
                      ("conditional_block", "conditional_block_grad")]
322

323
    ops_list.extend(_process_sub_block_pair(pdesc, sub_block_pair))
324

325 326 327 328
    cfgs = [
        ControlFlowGraph(input_program, ops, forward_num, skip_opt)
        for ops, forward_num, skip_opt in ops_list
    ]
329
    return cfgs
330 331


332
def memory_optimize(input_program, print_log=False, level=0):
Q
qiaolongfei 已提交
333 334
    global PRINT_LOG
    PRINT_LOG = print_log
335
    cfgs = _get_cfgs(input_program)
336
    for cfg in cfgs:
337 338 339 340 341 342 343
        cfg.memory_optimize(level)


def release_memory(input_program):
    cfgs = _get_cfgs(input_program)
    for cfg in cfgs:
        cfg.release_memory()