提交 5944588c 编写于 作者: 李滨

Merge branch 'debug' into 'master'

Add MACE visualization

See merge request !945
......@@ -96,6 +96,8 @@ Debug model conversion
After model is converted to MACE model, a literal model graph is generated in directory `mace/codegen/models/your_model`.
You can refer to it when debugging model conversion.
MACE also provides model visualization HTML generated in `builds` directory, generated after converting model.
Debug engine using log
--------------------------
......
......@@ -289,9 +289,11 @@ class Tensor {
inline void Reshape(const std::vector<index_t> &shape) {
shape_ = shape;
if (has_opencl_image()) {
MACE_CHECK(raw_size() <= 4 * buffer_->size());
MACE_CHECK(raw_size() <= 4 * buffer_->size(),
"Must satisfy: ", raw_size(), " <= ", 4 * buffer_->size());
} else {
MACE_CHECK(raw_size() <= buffer_->size());
MACE_CHECK(raw_size() <= buffer_->size(),
"Must satisfy: ", raw_size(), " <= ", buffer_->size());
}
}
......
......@@ -45,6 +45,7 @@ py_binary(
deps = [
":converter_lib",
":model_saver_lib",
"//mace/python/tools/visualization:visualization_lib",
"@six_archive//:six",
],
)
......
......@@ -25,6 +25,7 @@ from mace.python.tools import model_saver
from mace.python.tools.converter_tool import base_converter as cvt
from mace.python.tools.converter_tool import transformer
from mace.python.tools.convert_util import mace_check
from mace.python.tools.visualization import visualize_model
# ./bazel-bin/mace/python/tools/tf_converter --model_file quantized_test.pb \
# --output quantized_test_dsp.pb \
......@@ -201,6 +202,13 @@ def main(unused_args):
option, output_graph_def, quantize_activation_info)
output_graph_def = converter.run()
try:
visualizer = visualize_model.ModelVisualizer(FLAGS.model_tag,
output_graph_def)
visualizer.save_html()
except: # noqa
print("Failed to visualize model:", sys.exc_info()[0])
model_saver.save_model(
option, output_graph_def, model_checksum, weight_checksum,
FLAGS.template_dir, FLAGS.obfuscate, FLAGS.model_tag,
......
......@@ -272,6 +272,7 @@ class TensorflowConverter(base_converter.ConverterInterface):
print("Run transform_graph: %s" % TFTransformGraphOptions[
option.device])
try:
print ("output keys: ", option.output_nodes.keys())
transformed_graph_def = TransformGraph(tf_graph_def,
option.input_nodes.keys(),
option.output_nodes.keys(),
......
py_library(
name = "visualization_lib",
srcs = [
"visualize_model.py",
],
data = [
"index.html",
],
srcs_version = "PY2AND3",
visibility = ["//visibility:public"],
deps = [
"//mace/proto:mace_py",
],
)
<!DOCTYPE html>
<html lang="en">
<style>
line {
stroke: #999;
stroke-width: 1.5px;
stroke: black;
fill: transparent;
opacity: 0.5;
}
ellipse {
stroke-width: 1.5px;
stroke: black;
opacity: 0.5;
}
</style>
<head>
<meta charset="UTF-8">
<title>MACE Model</title>
</head>
<body>
<h1>MACE Visualization</h1>
<h2>Ops</h2>
<div id="op_table"></div>
<h2>Tensors</h2>
<div id="tensor_table"></div>
<h2>Graph</h2>
Click node to see details at bottom of this page.
<div class='graph'>
<svg id='graph' width='1000' height='1600' viewBox='0 0 1000 1600'></svg>
</div>
<div>
<pre id='info'></pre>
</div>
</body>
<script src="https://d3js.org/d3.v5.min.js"></script>
<script>
/*
var graph = {
"nodes": [
{
"name": "A_",
"id": "A",
},
{
"name": "B_",
"id": "B",
}
],
"links": [
{
"source": "A",
"target": "B"
}
]
}
*/
var graph =%s;
function create_table(divid, data, columns) {
var table = d3.select(divid).append("table");
var thead = table.append("thead");
var tbody = table.append("tbody");
thead.append("tr")
.selectAll("th")
.data(columns).enter()
.append("th")
.text(function (column) {
return column;
});
var tr = tbody.selectAll("tr")
.data(data)
.enter()
.append("tr");
var td = tr.selectAll("td")
.data(function (row) {
return columns.map(function (column) {
return {column: column, value: row[column]};
});
})
.enter()
.append("td")
.text(function (d) {
return d.value;
});
return table;
}
nodes = graph["nodes"];
op_data = [];
tensor_data = [];
for (var i = 0; i < nodes.length; i++) {
var node = nodes[i];
if (node.node_type === "op") {
var output_shapes = [];
if (typeof node["outputShape"] !== "undefined") {
for (var j = 0; j < node["outputShape"].length; j++) {
var output_shape = node["outputShape"][j].dims.join(",");
output_shapes.push(output_shape);
}
}
var quantize_infos = [];
if (typeof node["quantizeInfo"] !== "undefined") {
for (var j = 0; j < node["quantizeInfo"].length; j++) {
quantize_infos.push(
"scale=" + node["quantizeInfo"][j]["scale"] +
" zero=" + node["quantizeInfo"][j]["zeroPoint"] +
" min=" + node["quantizeInfo"][j]["minval"] +
" max=" + node["quantizeInfo"][j]["maxval"]);
}
}
op_data.push({
"idx": op_data.length,
"name": node["name"],
"type": node["type"],
"output_shape": output_shapes.join("; "),
"quantize_info": quantize_infos.join("; ")
});
} else if (node.node_type === "tensor") {
var quantize_info = "";
if (typeof node["scale"] !== "undefined") {
quantize_info = "scale=" + node["scale"] +
" zero=" + node["zeroPoint"] +
" min=" + node["minval"] +
" max=" + node["maxval"];
}
tensor_data.push({
"idx": tensor_data.length,
"name": node["name"],
"data_type": node["dataType"],
"dims": node["dims"].join(","),
"quantize_info": quantize_info
})
}
}
create_table("#op_table", op_data, ["idx", "name", "type",
"output_shape", "quantize_info"]);
create_table("#tensor_table", tensor_data, ["idx", "name", "data_type",
"dims", "quantize_info"]);
var svg = d3.select("#graph");
svg.append("svg:defs").append("svg:marker")
.attr("id", "arrow")
.attr("viewBox", "0 -5 10 10")
.attr('refX', 30)
.attr("markerWidth", 5)
.attr("markerHeight", 5)
.attr("orient", "auto")
.append("svg:path")
.attr("d", "M0,-5L10,0L0,5");
var width = svg.attr("width");
var height = svg.attr("height");
var is_mouse_down = false;
var mouse_x = 0;
var mouse_y = 0;
var view_box_x = 0;
var view_box_y = 0;
svg.on("mousedown", function () {
is_mouse_down = true;
mouse_x = d3.mouse(this)[0];
mouse_y = d3.mouse(this)[1];
});
svg.on("mouseup", function () {
is_mouse_down = false;
view_box_x = view_box_x - d3.mouse(this)[0] + mouse_x;
view_box_y = view_box_y - d3.mouse(this)[1] + mouse_y;
svg.attr("viewBox", view_box_x + " " + view_box_y + " "
+ width + " " + height);
});
svg.on("mousemove", function () {
if (is_mouse_down) {
view_box_x = view_box_x - d3.mouse(this)[0] + mouse_x;
view_box_y = view_box_y - d3.mouse(this)[1] + mouse_y;
svg.attr("viewBox", view_box_x + " " + view_box_y + " "
+ width + " " + height);
}
});
var simulation = d3.forceSimulation()
.force("link", d3.forceLink().id(function (d) {
return d.id;
}).distance(50))
.force("gravity", d3.forceManyBody().strength(-50))
.force("center", d3.forceCenter(0.5 * width, 0.5 * height));
var node = svg.selectAll(".nodes")
.data(graph.nodes)
.enter().append("g")
.attr("class", "nodes")
.call(d3.drag()
.on("start", function (d) {
if (!d3.event.active) simulation.alphaTarget(1.0).restart();
d.fx = d.x;
d.fy = d.y;
})
.on("drag", function (d) {
d.fx = d3.event.x;
d.fy = d3.event.y;
})
.on("end", function (d) {
if (!d3.event.active) simulation.alphaTarget(0);
d.fx = d.fy = null;
}))
.on("click", function (d) {
d3.select('#info').text(JSON.stringify(d, null, 2))
})
function shape_size(node_type) {
if (node_type === "op") return ["60px", "20px"];
else if (node_type === "input") return ["15px", "15px"];
else return ["5px", "5px"];
}
function color(node_type) {
if (node_type === "op") return "#3366cc";
else if (node_type === "input") return "#ff9900";
else return "#dc3912";
}
function font_size(node_type) {
if (node_type === "op" || node_type === "input") return "14px";
else return "8px";
}
node.append("ellipse")
.attr("rx", function (d) {
return shape_size(d.node_type)[0];
})
.attr("ry", function (d) {
return shape_size(d.node_type)[1];
})
.attr("fill", function (d) {
return color(d.node_type);
})
node.append("text")
.attr("font-size", function (d) {
return font_size(d.node_type);
})
.attr("dx", 60).attr("dy", 5).text(function (d) {
return d.name;
});
node.append("text")
.attr("font-size", function (d) {
return font_size(d.node_type);
})
.attr("dx", -25).attr("dy", 5).text(function (d) {
if (d.node_type === "op") return d.type;
else return "";
}).style("fill", "white");
var link = svg.append("g").attr("class", "links").selectAll("line")
.data(graph.links).enter().append("line")
.attr("marker-end", "url(#arrow)");
simulation.nodes(graph.nodes).on("tick", tick);
simulation.force("link").links(graph.links);
function tick() {
link.attr("x1", function (d) {
return d.source.x;
})
.attr("y1", function (d) {
return d.source.y;
})
.attr("x2", function (d) {
return d.target.x;
})
.attr("y2", function (d) {
return d.target.y;
});
node.attr("transform", function (d) {
return "translate(" + d.x + "," + d.y + ")";
});
}
</script>
</html>
\ No newline at end of file
import json
import numpy as np
from google.protobuf.json_format import _Printer
THREASHOLD = 16
class NPEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(MyEncoder, self).default(obj)
class ModelVisualizer(object):
def __init__(self, model_name, proto):
self._output_file = "builds/%s_index.html" % model_name
self._proto = proto
def render_html(self):
json_obj = {
"nodes": [],
"links": []
}
json_printer = _Printer()
for op in self._proto.op:
op_json = json_printer._MessageToJsonObject(op)
op_json["id"] = op_json["name"]
op_json["node_type"] = "op"
json_obj["nodes"].append(op_json)
for tensor in self._proto.tensors:
tensor_json = json_printer._MessageToJsonObject(tensor)
tensor_json["id"] = tensor_json["name"]
if "floatData" in tensor_json and \
len(tensor_json["floatData"]) > THREASHOLD:
del tensor_json["floatData"]
if "int32Data" in tensor_json and \
len(tensor_json["int32Data"]) > THREASHOLD:
del tensor_json["int32Data"]
tensor_json["node_type"] = "tensor"
json_obj["nodes"].append(tensor_json)
node_ids = [node["id"] for node in json_obj["nodes"]]
tensor_to_op = {}
for op in self._proto.op:
for tensor in op.output:
tensor_to_op[tensor] = op.name
for op in json_obj["nodes"]:
if "input" in op:
for input in op["input"]:
if input in node_ids and op["name"] in node_ids:
# for weights
json_obj["links"].append(
{"source": input, "target": op["name"]})
elif input in tensor_to_op and \
tensor_to_op[input] in node_ids:
# for intermediate tensor
json_obj["links"].append(
{"source": tensor_to_op[input],
"target": op["name"]})
else:
# for input
json_obj["nodes"].append({
"id": input,
"name": input,
"node_type": "input"
})
json_obj["links"].append(
{"source": input, "target": op["name"]})
json_msg = json.dumps(json_obj, cls=NPEncoder)
with open("mace/python/tools/visualization/index.html") as f:
html = f.read()
return html % json_msg
def save_html(self):
html = self.render_html()
with open(self._output_file, "wb") as f:
f.write(html)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册