未验证 提交 af2fa429 编写于 作者: L LoneRanger 提交者: GitHub

remove net_drawer.py, memory_analysis.py (#51869)

* remove net_drawer.py

* remove memory_analysis.py

* remove test_memory_analysis.py
上级 3065fa2c
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import core
import numpy as np
def get_var_and_memory_size(block, var_name, batch_size=None):
var = block._find_var_recursive(var_name)
assert var is not None, "Variable {} cannot be found".format(var_name)
assert (
var.type == core.VarDesc.VarType.LOD_TENSOR
), "Variable {} is not Tensor".format(var_name)
shape = list(var.shape)
if not shape:
return var, 0
has_none = False
for i, s in enumerate(shape):
if s is None or s < 0:
assert not has_none
shape[i] = batch_size
has_none = True
assert all([s >= 0 for s in shape]), "shape {} is not deterministic".format(
shape
)
mem_size = int(np.prod(shape)) * core.size_of_dtype(var.dtype)
return var, mem_size
def pre_allocate_memory(size, place):
t = core.LoDTensor()
t._set_dims([size])
t._mutable_data(place, core.VarDesc.VarType.INT8)
del t
# NOTE: does not consider inplace yet.
def get_max_memory_info(program, batch_size=None):
assert (
program.num_blocks == 1
), "only support to analysis program with only one block"
cur_tmp_mem = 0
max_tmp_mem = 0
max_persistable_mem = 0
visited_vars = set()
alived_vars = []
block = program.global_block()
gc_vars = core._get_eager_deletion_vars(program.desc, [])[0]
for i, op in enumerate(block.ops):
var_names = op.input_arg_names + op.output_arg_names
for var_name in var_names:
if var_name in visited_vars:
continue
visited_vars.add(var_name)
var, mem_size = get_var_and_memory_size(block, var_name, batch_size)
if var.persistable:
max_persistable_mem += mem_size
else:
cur_tmp_mem += mem_size
max_tmp_mem = max(max_tmp_mem, cur_tmp_mem)
cur_gc_vars = gc_vars[i]
for var_name in var_names:
if var_name not in cur_gc_vars:
continue
_, mem_size = get_var_and_memory_size(block, var_name, batch_size)
cur_tmp_mem -= mem_size
return max_tmp_mem, max_persistable_mem
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import logging
from collections import defaultdict
import paddle.fluid.core as core
import paddle.fluid.proto.framework_pb2 as framework_pb2
from paddle.fluid.log_helper import get_logger
logger = get_logger(__name__, logging.INFO)
try:
from .graphviz import Graph
except ImportError:
logger.info(
'Cannot import graphviz, which is required for drawing a network. This '
'can usually be installed in python with "pip install graphviz". Also, '
'pydot requires graphviz to convert dot files to pdf: in ubuntu, this '
'can usually be installed with "sudo apt-get install graphviz".'
)
print(
'net_drawer will not run correctly. Please install the correct '
'dependencies.'
)
exit(0)
OP_STYLE = {
'shape': 'oval',
'color': '#0F9D58',
'style': 'filled',
'fontcolor': '#FFFFFF',
}
VAR_STYLE = {}
GRAPH_STYLE = {
"rankdir": "TB",
}
GRAPH_ID = 0
def unique_id():
def generator():
GRAPH_ID += 1
return GRAPH_ID
return generator
def draw_node(op):
node = OP_STYLE
node["name"] = op.type
node["label"] = op.type
return node
def draw_edge(var_parent, op, var, arg):
edge = VAR_STYLE
edge["label"] = "%s(%s)" % (var.parameter, arg)
edge["head_name"] = op.type
edge["tail_name"] = var_parent[arg]
return edge
def parse_graph(program, graph, var_dict, **kwargs):
# fill the known variables
for block in program.blocks:
for var in block.vars:
if var not in var_dict:
var_dict[var] = "Feed"
temp_id = 0
proto = framework_pb2.ProgramDesc.FromString(
program.desc.serialize_to_string()
)
for block in proto.blocks:
for op in block.ops:
op.type = op.type + "_" + str(temp_id)
temp_id += 1
graph.node(**draw_node(op))
for o in op.outputs:
for arg in o.arguments:
var_dict[arg] = op.type
for e in op.inputs:
for arg in e.arguments:
if arg in var_dict:
graph.edge(**draw_edge(var_dict, op, e, arg))
break # only plot the first block
def draw_graph(startup_program, main_program, **kwargs):
if "graph_attr" in kwargs:
GRAPH_STYLE.update(kwargs["graph_attr"])
if "node_attr" in kwargs:
OP_STYLE.update(kwargs["node_attr"])
if "edge_attr" in kwargs:
VAR_STYLE.update(kwargs["edge_attr"])
graph_id = unique_id()
filename = kwargs.get("filename")
if filename is None:
filename = str(graph_id) + ".gv"
g = Graph(
name=str(graph_id),
filename=filename,
graph_attr=GRAPH_STYLE,
node_attr=OP_STYLE,
edge_attr=VAR_STYLE,
**kwargs
)
var_dict = {}
parse_graph(startup_program, g, var_dict)
parse_graph(main_program, g, var_dict)
if filename is not None:
g.save()
return g
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from simple_nets import simple_fc_net
import paddle
from paddle.fluid.memory_analysis import (
get_max_memory_info,
pre_allocate_memory,
)
class TestMemoryAnalysis(unittest.TestCase):
def setUp(self):
paddle.enable_static()
def test_get_memory_info(self):
loss = simple_fc_net()
optimizer = paddle.optimizer.Adam(learning_rate=1e-3)
optimizer.minimize(loss)
main_prog = paddle.static.default_main_program()
max_tmp_mem_1, max_persitable_mem_1 = get_max_memory_info(
main_prog, batch_size=32
)
self.assertGreater(max_tmp_mem_1, 0)
self.assertGreater(max_persitable_mem_1, 0)
max_tmp_mem_2, max_persitable_mem_2 = get_max_memory_info(
main_prog, batch_size=64
)
self.assertEqual(max_persitable_mem_1, max_persitable_mem_2)
self.assertLess(max_tmp_mem_1, max_tmp_mem_2)
class TestPreAllocateMemory(unittest.TestCase):
def setUp(self):
paddle.enable_static()
def test_pre_allocate(self):
size = 32 * 1024 * 1024
pre_allocate_memory(size, paddle.CPUPlace())
if paddle.is_compiled_with_cuda():
pre_allocate_memory(size, paddle.CUDAPlace(0))
if __name__ == "__main__":
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册