提交 800eb69a 编写于 作者: 李寅

Merge branch 'feature_wuch' into 'master'

revert dsp convert tool to support both nn_graph & nnlib

See merge request !158
......@@ -17,6 +17,7 @@ static NetDef BuildNetDef() {
input_op->set_type("INPUT");
input_op->set_node_id(0);
input_op->set_padding(0);
input_op->add_out_max_byte_size(1000);
// add op
OperatorDef *add_op = net.add_op();
......@@ -58,6 +59,10 @@ static NetDef BuildNetDef() {
input_node_input->set_node_id(16);
input_node_input->set_output_port(0);
add_op->add_out_max_byte_size(1000);
add_op->add_out_max_byte_size(1000);
add_op->add_out_max_byte_size(1000);
// output op
OperatorDef *output_op = net.add_op();
output_op->set_name("__output__");
......
......@@ -24,6 +24,7 @@ static NetDef BuildNetDef(const vector<index_t> &input_shape,
input_op->set_type("INPUT");
input_op->set_node_id(0);
input_op->set_padding(0);
input_op->add_out_max_byte_size(1000);
// maxpool op
OperatorDef *maxpool_op = net.add_op();
......@@ -57,6 +58,9 @@ static NetDef BuildNetDef(const vector<index_t> &input_shape,
input_node_input = maxpool_op->add_node_input();
input_node_input->set_node_id(13);
input_node_input->set_output_port(0);
maxpool_op->add_out_max_byte_size(1000);
maxpool_op->add_out_max_byte_size(1000);
maxpool_op->add_out_max_byte_size(1000);
// output op
OperatorDef *output_op = net.add_op();
......
......@@ -16,6 +16,7 @@ static NetDef BuildNetDef() {
input_op->set_type("INPUT");
input_op->set_node_id(0);
input_op->set_padding(0);
input_op->add_out_max_byte_size(1000);
// relu op
OperatorDef *relu_op = net.add_op();
......@@ -38,6 +39,9 @@ static NetDef BuildNetDef() {
input_node_input = relu_op->add_node_input();
input_node_input->set_node_id(11);
input_node_input->set_output_port(0);
relu_op->add_out_max_byte_size(1000);
relu_op->add_out_max_byte_size(1000);
relu_op->add_out_max_byte_size(1000);
// output op
OperatorDef *output_op = net.add_op();
......
......@@ -18,6 +18,7 @@ static NetDef BuildNetDef() {
input_op->set_type("INPUT");
input_op->set_node_id(0);
input_op->set_padding(0);
input_op->add_out_max_byte_size(1200);
// relu op
OperatorDef *resize_bilinear_op = net.add_op();
......@@ -45,6 +46,9 @@ static NetDef BuildNetDef() {
input_node_input = resize_bilinear_op->add_node_input();
input_node_input->set_node_id(12);
input_node_input->set_output_port(0);
resize_bilinear_op->add_out_max_byte_size(1200);
resize_bilinear_op->add_out_max_byte_size(1000);
resize_bilinear_op->add_out_max_byte_size(1000);
// output op
OperatorDef *output_op = net.add_op();
......
......@@ -17,6 +17,7 @@ static NetDef BuildNetDef() {
input_op->set_type("INPUT");
input_op->set_node_id(0);
input_op->set_padding(0);
input_op->add_out_max_byte_size(1000);
// add op
OperatorDef *supernode_op = net.add_op();
......@@ -76,6 +77,10 @@ static NetDef BuildNetDef() {
input_node_input->set_node_id(20);
input_node_input->set_output_port(0);
supernode_op->add_out_max_byte_size(1000);
supernode_op->add_out_max_byte_size(1000);
supernode_op->add_out_max_byte_size(1000);
// output op
OperatorDef *output_op = net.add_op();
output_op->set_name("__output__");
......
......@@ -93,6 +93,7 @@ message OperatorDef {
optional uint32 op_id = 101;
optional uint32 padding = 102;
repeated NodeInput node_input = 103;
repeated int32 out_max_byte_size = 104; // only support 32-bit len
}
// for memory optimization
......
......@@ -132,6 +132,7 @@ def convert_ops(unresolved_ops, resolved_ops, net_def, output_node, dsp_ops):
op_def.input.append(input_tensor.name)
op_def.input.extend([t.name for t in s2b_op.inputs[1:]])
op_def.input.extend([min_tensor.name, max_tensor.name])
op_def.out_max_byte_size.extend([max_elem_size(out) for out in quantize_op.outputs])
convert_op_outputs(op_def, quantize_op)
elif has_padding_and_strides(first_op):
op_def.padding = padding_mode[first_op.get_attr('padding')]
......@@ -143,6 +144,7 @@ def convert_ops(unresolved_ops, resolved_ops, net_def, output_node, dsp_ops):
strides = first_op.get_attr('strides')
strides_tensor = add_shape_const_node(net_def, first_op, strides, 'strides')
op_def.input.extend([strides_tensor])
op_def.out_max_byte_size.extend([max_elem_size(out) for out in first_op.outputs])
convert_op_outputs(op_def, first_op)
elif is_node_flatten_reshape(first_op):
op_def.type = 'Flatten'
......@@ -150,6 +152,7 @@ def convert_ops(unresolved_ops, resolved_ops, net_def, output_node, dsp_ops):
convert_op_outputs(op_def, first_op)
elif dsp_ops.has_op(first_op.type):
op_def.input.extend([t.name for t in first_op.inputs])
op_def.out_max_byte_size.extend([max_elem_size(out) for out in first_op.outputs])
convert_op_outputs(op_def, first_op)
else:
raise Exception('Unsupported op: ', first_op)
......@@ -201,9 +204,11 @@ def reverse_batch_to_space_and_biasadd(net_def):
new_biasadd_op.input[0] = get_tensor_name_from_op(conv_requantize_op.name, 0)
new_biasadd_op.input[2] = get_tensor_name_from_op(conv_requantize_op.name, 1)
new_biasadd_op.input[3] = get_tensor_name_from_op(conv_requantize_op.name, 2)
new_biasadd_op.out_max_byte_size[0] = conv_requantize_op.out_max_byte_size[0] * 4
new_biasadd_requantize_op = mace_pb2.OperatorDef()
new_biasadd_requantize_op.CopyFrom(biasadd_requantize_op)
new_biasadd_requantize_op.out_max_byte_size[0] = new_biasadd_op.out_max_byte_size[0] / 4
new_b2s_op = mace_pb2.OperatorDef()
new_b2s_op.CopyFrom(b2s_op)
......@@ -320,6 +325,7 @@ def strip_input_quantize_and_output_dequantize(net_def, input_node, output_node)
new_input_op.name = input_op.name
new_input_op.type = input_op.type
new_input_op.padding = input_op.padding
new_input_op.out_max_byte_size.extend([input_op.out_max_byte_size[0]/4, 4, 4])
new_ops.append(new_input_op)
new_input_op.output_shape.extend([input_op.output_shape[0],
minf_op.output_shape[0],
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册