提交 8924b03b 编写于 作者: B Bin Li

Support specifying layers to validate

上级 740d2dc1
......@@ -147,7 +147,7 @@ python_tools_tests:
python tools/converter.py convert --config=${CONF_FILE} --model_graph_format=file --model_data_format=file || exit 1;
python tools/converter.py run --config=${CONF_FILE} --device_yml=${DEVICE_CONF_FILE} --round=1 --target_abis=armeabi-v7a,armhf --validate --model_graph_format=file --model_data_format=file || exit 1;
python tools/converter.py run --config=${CONF_FILE} --device_yml=${DEVICE_CONF_FILE} --example --target_abis=armeabi-v7a,armhf --round=1 --validate --model_graph_format=file --model_data_format=file || exit 1;
python tools/converter.py run --config=${CONF_FILE} --device_yml=${DEVICE_CONF_FILE} --example --target_abis=armeabi-v7a,armhf --round=1 --validate_all_layers --model_graph_format=file --model_data_format=file || exit 1;
python tools/converter.py run --config=${CONF_FILE} --device_yml=${DEVICE_CONF_FILE} --example --target_abis=armeabi-v7a,armhf --round=1 --validate --layers=0 --model_graph_format=file --model_data_format=file || exit 1;
model_tests:
stage: model_tests
......@@ -190,7 +190,7 @@ quantization_tests:
python tools/converter.py convert --config=${CONF_FILE} --model_graph_format=file --model_data_format=file || exit 1;
python tools/converter.py run --config=${CONF_FILE} --device_yml=${DEVICE_CONF_FILE} --round=1 --validate --model_graph_format=file --model_data_format=file || exit 1;
python tools/converter.py run --config=${CONF_FILE} --device_yml=${DEVICE_CONF_FILE} --example --round=1 --validate --model_graph_format=file --model_data_format=file || exit 1;
python tools/converter.py run --config=${CONF_FILE} --device_yml=${DEVICE_CONF_FILE} --example --round=1 --validate_all_layers --model_graph_format=file --model_data_format=file || exit 1;
python tools/converter.py run --config=${CONF_FILE} --device_yml=${DEVICE_CONF_FILE} --example --round=1 --validate --layers=0 --model_graph_format=file --model_data_format=file || exit 1;
done
- rm -rf mace-models
......
......@@ -71,7 +71,7 @@ If you want to specify input data to use, you can add an option in yaml config u
If model's output is suspected to be incorrect, it might be useful to debug your model layer by layer by specifying an intermediate layer as output,
or use binary search method until suspicious layer is found.
You can also specify `--validate_all_layers` to validate all the layers of the model(excluding some layers changed by MACE, e.g., BatchToSpaceND),
You can also specify `--layers` after `--validate` to validate all or some of the layers of the model(excluding some layers changed by MACE, e.g., BatchToSpaceND),
it only supports TensorFlow now. You can find validation results in `builds/your_model/model/runtime_in_yaml/log.csv`.
For quantized model, if you want to check one layer, you can add `check_tensors` and `check_shapes` like in the yaml above. You can only specify
......
......@@ -365,9 +365,17 @@ class HexagonConverter(base_converter.ConverterInterface):
node_id_map[tensor_op] = tensor.node_id
print("Hexagon op:")
index = 0
for op in self._model.op:
op.node_id = node_id_counter
print('Op: %s (%s, %d)' % (op.name, op.type, op.node_id))
if op.type not in [HexagonOp.QuantizeINPUT_f_to_8,
HexagonOp.DequantizeOUTPUT_8tof.name]:
index_str = str(index)
index += 1
else:
index_str = ''
print('Op: %s (%s, node_id:%d, index:%s)' %
(op.name, op.type, op.node_id, index_str))
node_id_counter += 1
node_id_map[op.name] = op.node_id
for ipt in op.input:
......
......@@ -1370,8 +1370,14 @@ class Transformer(base_converter.ConverterInterface):
net.op.extend(sorted_nodes)
print("Final ops:")
index = 0
for op in net.op:
print("%s (%s): %s" % (op.name, op.type, [
if op.type not in [MaceOp.Quantize.name, MaceOp.Dequantize.name]:
index_str = str(index)
index += 1
else:
index_str = ''
print("%s (%s, index:%s): %s" % (op.name, op.type, index_str, [
out_shape.dims for out_shape in op.output_shape]))
return False
......
......@@ -31,6 +31,27 @@ def normalize_op_name(name):
return name.replace('/', '_').replace(':', '_')
def handle_index(start, end, layers):
num_layers = end - start + 1
if ':' in layers:
start_index, end_index = layers.split(':')
start_index = int(start_index) if start_index else 0
end_index = int(end_index) if end_index else num_layers - 1
else:
start_index = int(layers)
end_index = start_index + 1
if start_index < 0:
start_index += num_layers
if end_index < 0:
end_index += num_layers
start_index += start
end_index += start
start_index = max(start, min(end - 1, start_index))
end_index = max(start + 1, min(end, end_index))
return start_index, end_index
def main(unused_args):
mace_check(os.path.isfile(FLAGS.model_file),
"Input graph file '" + FLAGS.model_file + "' does not exist!")
......@@ -68,6 +89,8 @@ def main(unused_args):
# omit original output
end_index -= 1
index, end_index = handle_index(index, end_index, FLAGS.layers)
data_format = net_def.output_info[0].data_format
output_configs = {"subgraphs": []}
while index < end_index:
......@@ -82,7 +105,7 @@ def main(unused_args):
net = copy.deepcopy(net_def)
if hexagon_flag:
# reuse dequantize op and it's min/max tensor's node_id
del net.op[index+1:end_index+1]
del net.op[index+1:-1]
else:
del net.op[index+1:]
del net.output_info[:]
......@@ -163,6 +186,12 @@ def parse_args():
type=str,
default="",
help="Directory to save the output graph to.")
parser.add_argument(
"--layers",
type=str,
default="-1",
help="'start_layer:end_layer' or 'layer', similar to python slice."
" Use with --validate flag.")
return parser.parse_known_args()
......
......@@ -1194,10 +1194,11 @@ def parse_args():
help="whether to verify the results are consistent with "
"the frameworks.")
run.add_argument(
"--validate_all_layers",
action="store_true",
help="whether to verify the results of all layers are "
"consistent with the frameworks.")
"--layers",
type=str,
default="-1",
help="'start_layer:end_layer' or 'layer', similar to python slice."
" Use with --validate flag.")
run.add_argument(
"--caffe_env",
type=str_to_caffe_env_type,
......
......@@ -416,7 +416,7 @@ class DeviceWrapper:
six.print_('Tuning done! \n')
@staticmethod
def get_layers(model_dir, model_name):
def get_layers(model_dir, model_name, layers):
sh_commands.bazel_build_common("//mace/python/tools:layers_validate")
model_file = "%s/%s.pb" % (model_dir, model_name)
......@@ -428,6 +428,7 @@ class DeviceWrapper:
"-u",
"--model_file=%s" % model_file,
"--output_dir=%s" % output_dir,
"--layers=%s" % layers,
_fg=True)
output_configs_path = output_dir + "outputs.yml"
......@@ -553,14 +554,15 @@ class DeviceWrapper:
output_shapes = subgraphs[0][YAMLKeyword.check_shapes]
output_configs = []
log_file = ""
if flags.validate_all_layers:
if flags.layers != "-1":
mace_check(configs[YAMLKeyword.model_graph_format] ==
ModelFormat.file and
configs[YAMLKeyword.model_data_format] ==
ModelFormat.file, "Device",
"'--validate_all_layers' only supports model format 'file'.") # noqa
output_configs = \
self.get_layers(mace_model_dir, model_name)
"'--layers' only supports model format 'file'.")
output_configs = self.get_layers(mace_model_dir,
model_name,
flags.layers)
log_dir = mace_model_dir + "/" + runtime
if os.path.exists(log_dir):
sh.rm('-rf', log_dir)
......@@ -611,7 +613,7 @@ class DeviceWrapper:
layers_validate_file=output_config[
YAMLKeyword.model_file_path]
)
if flags.validate or flags.validate_all_layers:
if flags.validate:
model_file_path, weight_file_path = get_model_files(
model_config[YAMLKeyword.model_file_path],
model_config[YAMLKeyword.model_sha256_checksum],
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册