未验证 提交 6ecbf083 编写于 作者: Q QI JUN 提交者: GitHub

add memory optimization transpiler (#7356)

上级 45e77154
...@@ -19,12 +19,13 @@ from data_feeder import DataFeeder ...@@ -19,12 +19,13 @@ from data_feeder import DataFeeder
from core import LoDTensor, CPUPlace, CUDAPlace from core import LoDTensor, CPUPlace, CUDAPlace
from distribute_transpiler import DistributeTranspiler from distribute_transpiler import DistributeTranspiler
import clip import clip
from memory_optimization_transpiler import memory_optimize
Tensor = LoDTensor Tensor = LoDTensor
__all__ = framework.__all__ + executor.__all__ + [ __all__ = framework.__all__ + executor.__all__ + [
'io', 'initializer', 'layers', 'nets', 'optimizer', 'backward', 'io', 'initializer', 'layers', 'nets', 'optimizer', 'backward',
'regularizer', 'LoDTensor', 'CPUPlace', 'CUDAPlace', 'Tensor', 'ParamAttr' 'regularizer', 'LoDTensor', 'CPUPlace', 'CUDAPlace', 'Tensor', 'ParamAttr'
'DataFeeder', 'clip', 'DistributeTranspiler' 'DataFeeder', 'clip', 'DistributeTranspiler', 'memory_optimize'
] ]
......
...@@ -773,6 +773,9 @@ class Program(object): ...@@ -773,6 +773,9 @@ class Program(object):
proto = framework_pb2.ProgramDesc.FromString(str(protostr)) proto = framework_pb2.ProgramDesc.FromString(str(protostr))
return _debug_string_(proto, throw_on_error) return _debug_string_(proto, throw_on_error)
def get_desc(self):
return self.desc
def clone(self): def clone(self):
p = Program() p = Program()
p.desc = core.ProgramDesc(self.desc) p.desc = core.ProgramDesc(self.desc)
......
from collections import defaultdict
import framework
from framework import Program, default_main_program, Parameter, Variable
import backward
from backward import _rename_arg_
class ControlFlowGraph(object):
def __init__(self, Program):
self._program = Program
self._succesors = defaultdict(set)
self._presucessors = defaultdict(set)
self._uses = defaultdict(set)
self._defs = defaultdict(set)
self._live_in = defaultdict(set)
self._live_out = defaultdict(set)
def _add_connections(self, connections):
for node1, node2 in connections:
self._add(node1, node2)
def _add(self, node1, node2):
self._succesors[node1].add(node2)
self._presucessors[node2].add(node1)
def _build_graph(self):
program_desc = self._program.get_desc()
block_size = program_desc.num_blocks()
# TODO(qijun) handle Program with if/while operators
self.global_block = program_desc.block(0)
self.op_size = self.global_block.op_size()
op_node_connections = [(i, i + 1) for i in range(self.op_size - 1)]
self._add_connections(op_node_connections)
self.ops = [self.global_block.op(i) for i in range(self.op_size)]
for i in range(self.op_size):
self._uses[i].update(self.ops[i].input_arg_names())
self._defs[i].update(self.ops[i].output_arg_names())
def _reach_fixed_point(self, live_in, live_out):
if len(live_in) != len(self._live_in):
return False
if len(live_out) != len(self._live_out):
return False
for i in range(self.op_size):
if live_in[i] != self._live_in[i]:
return False
for i in range(self.op_size):
if live_out[i] != self._live_out[i]:
return False
return True
def _dataflow_analyze(self):
self._build_graph()
live_in = defaultdict(set)
live_out = defaultdict(set)
while True:
for i in range(self.op_size):
live_in[i] = set(self._live_in[i])
live_out[i] = set(self._live_out[i])
self._live_in[i] = self._uses[i] | (
self._live_out[i] - self._defs[i])
for s in self._succesors[i]:
self._live_out[i] |= self._live_in[s]
if self._reach_fixed_point(live_in, live_out):
break
def _get_diff(self, a, b):
u = a & b
return a - u, b - u
def memory_optimize(self):
self._build_graph()
self._dataflow_analyze()
self.pool = []
for i in range(self.op_size):
if self.pool:
out_pair = [(x, self.global_block.var(str(x)).shape())
for x in self._defs[i]]
for x, x_shape in out_pair:
for index, cache_pair in enumerate(self.pool):
cache_var = cache_pair[0]
cache_shape = cache_pair[1]
if x_shape == cache_shape:
print(
"Hit Cache !!!! cache pool index is %d, var name is %s, cached var name is %s, var shape is %s "
% (index, x, cache_var, str(cache_shape)))
self.pool.pop(index)
_rename_arg_(self.ops, x, cache_var, begin_idx=i)
self._dataflow_analyze()
break
in_diff, out_diff = self._get_diff(self._live_in[i],
self._live_out[i])
can_optimize = filter(
lambda x: not self.global_block.var(str(x)).persistable(),
in_diff)
if can_optimize:
for var_name in can_optimize:
self.pool.append((
var_name, self.global_block.var(str(var_name)).shape()))
def get_program(self):
return self._program
def memory_optimize(input_program):
graph = ControlFlowGraph(input_program)
graph.memory_optimize()
result_program = graph.get_program()
return result_program
from __future__ import print_function
import unittest
import paddle.v2.fluid.layers as layers
import paddle.v2.fluid.optimizer as optimizer
from paddle.v2.fluid.framework import Program, program_guard
from paddle.v2.fluid.memory_optimization_transpiler import memory_optimize
class TestControlFlowGraph(unittest.TestCase):
def setUp(self):
program = Program()
with program_guard(program, startup_program=Program()):
x = layers.data(name='x', shape=[13], dtype='float32')
y_predict = layers.fc(input=x, size=1, act=None)
y = layers.data(name='y', shape=[1], dtype='float32')
cost = layers.square_error_cost(input=y_predict, label=y)
avg_cost = layers.mean(x=cost)
opt = optimizer.SGD(learning_rate=0.001)
opt = opt.minimize(avg_cost)
self.program = program
def test_control_flow_graph(self):
print("before optimization")
print(str(self.program))
result_program = memory_optimize(self.program)
print("after optimization")
print(str(result_program))
if __name__ == "__main__":
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册