未验证 提交 36e51fd0 编写于 作者: T teng 提交者: GitHub

[fix] fix typo (#1222)

* [fix] fix typo

* [fix] fix typo
上级 d5077ee7
...@@ -133,7 +133,7 @@ class AlignOnnx(): ...@@ -133,7 +133,7 @@ class AlignOnnx():
save results of onnx save results of onnx
:param outputs: list of outputs name :param outputs: list of outputs name
:param result: onnx inference result :param result: onnx inference result
:param const_name: onnx const name that shuold not contain in output node :param const_name: onnx const name that should not contain in output node
:return: :return:
""" """
check_make_folder(self.export_path) check_make_folder(self.export_path)
...@@ -150,7 +150,7 @@ class AlignOnnx(): ...@@ -150,7 +150,7 @@ class AlignOnnx():
def _write_data(self, text_path, data): def _write_data(self, text_path, data):
""" """
too dificult to write, I don't want to do this again. :( too difficult to write, I don't want to do this again. :(
If anyone has better way to realizing, pls tell me or PR. If anyone has better way to realizing, pls tell me or PR.
""" """
shape_str = '{' + ' '.join([str(i) for i in data.shape]) + '}' shape_str = '{' + ' '.join([str(i) for i in data.shape]) + '}'
...@@ -280,7 +280,7 @@ class AlignOnnx(): ...@@ -280,7 +280,7 @@ class AlignOnnx():
try: try:
sf_ndarray = result[outputs.index(name[0])] sf_ndarray = result[outputs.index(name[0])]
except: except:
print(f"load onnx output fiaeld {name}") print(f"load onnx output failed {name}")
return return
# calculate distance # calculate distance
...@@ -327,7 +327,7 @@ def main(): ...@@ -327,7 +327,7 @@ def main():
print("is save result : %s" % (args.s)) print("is save result : %s" % (args.s))
print("save text path : %s" % (args.sp)) print("save text path : %s" % (args.sp))
print("is align by layer : %s" % (args.a)) print("is align by layer : %s" % (args.a))
print("tengine ouput path : %s" % (args.to)) print("tengine output path : %s" % (args.to))
print("tengine model : %s" % (args.tm)) print("tengine model : %s" % (args.tm))
align = AlignOnnx(args.m, args.s, args.sp, args.a, args.to, args.tm) align = AlignOnnx(args.m, args.s, args.sp, args.a, args.to, args.tm)
......
...@@ -44,7 +44,7 @@ input onnx model : mnist_sim.onnx ...@@ -44,7 +44,7 @@ input onnx model : mnist_sim.onnx
is save result : True is save result : True
save text path : ./output_onnx save text path : ./output_onnx
is align by layer : True is align by layer : True
tengine ouput path : None tengine output path : None
tengine model : mnist.tmfile tengine model : mnist.tmfile
onnx inference over onnx inference over
------------------------------------------------- -------------------------------------------------
......
...@@ -1369,5 +1369,5 @@ void caffe_serializer::register_op_load() ...@@ -1369,5 +1369,5 @@ void caffe_serializer::register_op_load()
op_load_map["Reduction"] = std::pair<int, op_load_t>(OP_REDUCTION, load_reduction); op_load_map["Reduction"] = std::pair<int, op_load_t>(OP_REDUCTION, load_reduction);
} }
/* /*
* OPERAOTR REGISTER FUNCTION DEFINE FOR CAFFE SERIALIZER END * OPERATOR REGISTER FUNCTION DEFINE FOR CAFFE SERIALIZER END
*/ */
...@@ -1502,5 +1502,5 @@ void ncnn_serializer::register_op_load() ...@@ -1502,5 +1502,5 @@ void ncnn_serializer::register_op_load()
op_load_map["DeconvolutionDepthWise"] = std::pair<int, op_load_t>(OP_DECONV, load_deconv); op_load_map["DeconvolutionDepthWise"] = std::pair<int, op_load_t>(OP_DECONV, load_deconv);
} }
/* /*
* OPERAOTR REGISTER FUNCTION DEFINE FOR NCNN SERIALIZER END * OPERATOR REGISTER FUNCTION DEFINE FOR NCNN SERIALIZER END
*/ */
...@@ -2306,5 +2306,5 @@ void onnx_serializer::register_op_load() ...@@ -2306,5 +2306,5 @@ void onnx_serializer::register_op_load()
op_load_map["Gelu"] = std::pair<int, op_load_t>(OP_GELU, load_no_param); op_load_map["Gelu"] = std::pair<int, op_load_t>(OP_GELU, load_no_param);
} }
/* /*
* OPERAOTR REGISTER FUNCTION DEFINE FOR ONNX SERIALIZER END * OPERATOR REGISTER FUNCTION DEFINE FOR ONNX SERIALIZER END
*/ */
...@@ -784,7 +784,7 @@ int save_graph_u8_perchannel(const char* model_file, const char* scale_file, con ...@@ -784,7 +784,7 @@ int save_graph_u8_perchannel(const char* model_file, const char* scale_file, con
fprintf(stderr, "[Quant Tools Info]: Step 4, quantize activation tensor done.\n"); fprintf(stderr, "[Quant Tools Info]: Step 4, quantize activation tensor done.\n");
/* Set the params of acitvation ir_tensor */ /* Set the params of activation ir_tensor */
for (int i = 0; i < ir_graph->tensor_num; i++) for (int i = 0; i < ir_graph->tensor_num; i++)
{ {
struct tensor* ir_tensor = ir_graph->tensor_list[i]; struct tensor* ir_tensor = ir_graph->tensor_list[i];
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册