提交 97e2d5d4 编写于 作者: 李滨

Merge branch 'gpu_buffer' into 'master'

Don't quantize non_zero for dsp

See merge request !933
......@@ -116,7 +116,7 @@ class HexagonConverter(base_converter.ConverterInterface):
for op in self._model.op:
if not self._hexagon_ops.has_op(op.type):
raise Exception('Unsupported op: ', op)
print('Op: ', op.name, op.type)
print('Op: %s (%s)' % (op.name, op.type))
for i in range(len(op.input)):
if ':' not in op.input[i]:
node_name = op.input[i]
......
......@@ -1482,7 +1482,9 @@ class Transformer(base_converter.ConverterInterface):
mace_check(False, "wrong device.")
tensor.data_type = mace_pb2.DT_INT32
else:
quantized_tensor = quantize_util.quantize(tensor.float_data)
non_zero = self._option.device == DeviceType.CPU.value
quantized_tensor = quantize_util.quantize(tensor.float_data,
non_zero)
tensor.data_type = mace_pb2.DT_UINT8
del tensor.float_data[:]
......@@ -1718,6 +1720,11 @@ class Transformer(base_converter.ConverterInterface):
and op.type != MaceOp.Dequantize.name): # noqa
mace_check(len(op.output) == len(op.quantize_info),
"missing quantize info: %s" % op)
for i in six.moves.range(len(op.quantize_info)):
print("Op output %s range: [%f, %f]" % (
op.output[i],
op.quantize_info[i].minval,
op.quantize_info[i].maxval))
def add_opencl_informations(self):
print("Add OpenCL informations")
......
......@@ -108,11 +108,12 @@ def quantize_with_scale_and_zero(data, scale, zero):
return quantized_data
def quantize(data):
def quantize(data, non_zero):
np_data = np.array(data).astype(float)
in_min = np_data.min()
in_max = np_data.max()
scale, zero, out_min, out_max = adjust_range(in_min, in_max, non_zero=True)
scale, zero, out_min, out_max = adjust_range(in_min, in_max,
non_zero=non_zero)
output = np.clip((np.round(zero + data / scale).astype(int)), 0, 255)
quantized_data = QuantizedData()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册