提交 0c993bad 编写于 作者: 李寅

Merge branch 'mace_tensor_ownership' into 'master'

Add documents to pass c buffer to MaceTensor

See merge request !932
......@@ -262,7 +262,12 @@ class MACE_API MaceTensor {
public:
// shape - the shape of the tensor, with size n
// data - the buffer of the tensor, must not be null with size equals
// shape[0] * shape[1] * ... * shape[n-1]
// shape[0] * shape[1] * ... * shape[n-1].
// If you want to pass a buffer which is unsuitable to use the default
// shared_ptr deleter (for example, the buffer is not dynamically
// allocated by C++, e.g. a C buffer), you can set customized deleter
// of shared_ptr and manage the life cycle of the buffer by yourself.
// For example, std::shared_ptr<float>(raw_buffer, [](float *){});
MaceTensor(const std::vector<int64_t> &shape,
std::shared_ptr<float> data,
const DataFormat format = DataFormat::NHWC);
......
......@@ -116,7 +116,7 @@ class HexagonConverter(base_converter.ConverterInterface):
for op in self._model.op:
if not self._hexagon_ops.has_op(op.type):
raise Exception('Unsupported op: ', op)
print('Op: ', op.name, op.type)
print('Op: %s (%s)' % (op.name, op.type))
for i in range(len(op.input)):
if ':' not in op.input[i]:
node_name = op.input[i]
......
......@@ -1482,7 +1482,9 @@ class Transformer(base_converter.ConverterInterface):
mace_check(False, "wrong device.")
tensor.data_type = mace_pb2.DT_INT32
else:
quantized_tensor = quantize_util.quantize(tensor.float_data)
non_zero = self._option.device == DeviceType.CPU.value
quantized_tensor = quantize_util.quantize(tensor.float_data,
non_zero)
tensor.data_type = mace_pb2.DT_UINT8
del tensor.float_data[:]
......@@ -1718,6 +1720,11 @@ class Transformer(base_converter.ConverterInterface):
and op.type != MaceOp.Dequantize.name): # noqa
mace_check(len(op.output) == len(op.quantize_info),
"missing quantize info: %s" % op)
for i in six.moves.range(len(op.quantize_info)):
print("Op output %s range: [%f, %f]" % (
op.output[i],
op.quantize_info[i].minval,
op.quantize_info[i].maxval))
def add_opencl_informations(self):
print("Add OpenCL informations")
......
......@@ -108,11 +108,12 @@ def quantize_with_scale_and_zero(data, scale, zero):
return quantized_data
def quantize(data):
def quantize(data, non_zero):
np_data = np.array(data).astype(float)
in_min = np_data.min()
in_max = np_data.max()
scale, zero, out_min, out_max = adjust_range(in_min, in_max, non_zero=True)
scale, zero, out_min, out_max = adjust_range(in_min, in_max,
non_zero=non_zero)
output = np.clip((np.round(zero + data / scale).astype(int)), 0, 255)
quantized_data = QuantizedData()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册