Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
OpenCV
opencv_extra
提交
e2bb990e
O
opencv_extra
项目概览
OpenCV
/
opencv_extra
10 个月 前同步成功
通知
119
Star
882
Fork
1584
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
O
opencv_extra
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
前往新版Gitcode,体验更适合开发者的 AI 搜索 >>
提交
e2bb990e
编写于
10月 19, 2022
作者:
Z
Zihao Mu
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add QDQ format onnx model.
上级
bf616317
变更
10
隐藏空白更改
内联
并排
Showing
10 changed file
with
11 addition
and
5 deletion
+11
-5
testdata/dnn/onnx/data/input_quantized_conv_int8_weights_qdq.npy
...a/dnn/onnx/data/input_quantized_conv_int8_weights_qdq.npy
+0
-0
testdata/dnn/onnx/data/input_quantized_conv_per_channel_weights_qdq.npy
...nnx/data/input_quantized_conv_per_channel_weights_qdq.npy
+0
-0
testdata/dnn/onnx/data/input_quantized_conv_uint8_weights_qdq.npy
.../dnn/onnx/data/input_quantized_conv_uint8_weights_qdq.npy
+0
-0
testdata/dnn/onnx/data/output_quantized_conv_int8_weights_qdq.npy
.../dnn/onnx/data/output_quantized_conv_int8_weights_qdq.npy
+0
-0
testdata/dnn/onnx/data/output_quantized_conv_per_channel_weights_qdq.npy
...nx/data/output_quantized_conv_per_channel_weights_qdq.npy
+0
-0
testdata/dnn/onnx/data/output_quantized_conv_uint8_weights_qdq.npy
...dnn/onnx/data/output_quantized_conv_uint8_weights_qdq.npy
+0
-0
testdata/dnn/onnx/generate_quantized_onnx_models.py
testdata/dnn/onnx/generate_quantized_onnx_models.py
+11
-5
testdata/dnn/onnx/models/quantized_conv_int8_weights_qdq.onnx
...data/dnn/onnx/models/quantized_conv_int8_weights_qdq.onnx
+0
-0
testdata/dnn/onnx/models/quantized_conv_per_channel_weights_qdq.onnx
...n/onnx/models/quantized_conv_per_channel_weights_qdq.onnx
+0
-0
testdata/dnn/onnx/models/quantized_conv_uint8_weights_qdq.onnx
...ata/dnn/onnx/models/quantized_conv_uint8_weights_qdq.onnx
+0
-0
未找到文件。
testdata/dnn/onnx/data/input_quantized_conv_int8_weights_qdq.npy
0 → 100644
浏览文件 @
e2bb990e
文件已添加
testdata/dnn/onnx/data/input_quantized_conv_per_channel_weights_qdq.npy
0 → 100644
浏览文件 @
e2bb990e
文件已添加
testdata/dnn/onnx/data/input_quantized_conv_uint8_weights_qdq.npy
0 → 100644
浏览文件 @
e2bb990e
文件已添加
testdata/dnn/onnx/data/output_quantized_conv_int8_weights_qdq.npy
0 → 100644
浏览文件 @
e2bb990e
文件已添加
testdata/dnn/onnx/data/output_quantized_conv_per_channel_weights_qdq.npy
0 → 100644
浏览文件 @
e2bb990e
文件已添加
testdata/dnn/onnx/data/output_quantized_conv_uint8_weights_qdq.npy
0 → 100644
浏览文件 @
e2bb990e
文件已添加
testdata/dnn/onnx/generate_quantized_onnx_models.py
浏览文件 @
e2bb990e
...
...
@@ -5,9 +5,9 @@ import torch.nn as nn
import
torch.nn.functional
as
F
import
numpy
as
np
import
os
import
onnx
import
onnx
# version >= 1.12.0
import
onnxruntime
as
rt
from
onnxruntime.quantization
import
quantize_static
,
CalibrationDataReader
,
QuantType
from
onnxruntime.quantization
import
quantize_static
,
CalibrationDataReader
,
QuantType
,
QuantFormat
class
DataReader
(
CalibrationDataReader
):
def
__init__
(
self
,
model_path
,
batchsize
=
5
):
...
...
@@ -20,16 +20,16 @@ class DataReader(CalibrationDataReader):
def
get_next
(
self
):
return
next
(
self
.
enum_data_dicts
,
None
)
def
quantize_and_save_model
(
name
,
input
,
model
,
act_type
=
"uint8"
,
wt_type
=
"uint8"
,
per_channel
=
False
):
def
quantize_and_save_model
(
name
,
input
,
model
,
act_type
=
"uint8"
,
wt_type
=
"uint8"
,
per_channel
=
False
,
ops_version
=
13
,
quanFormat
=
QuantFormat
.
QOperator
):
float_model_path
=
os
.
path
.
join
(
"models"
,
"dummy.onnx"
)
quantized_model_path
=
os
.
path
.
join
(
"models"
,
name
+
".onnx"
)
type_dict
=
{
"uint8"
:
QuantType
.
QUInt8
,
"int8"
:
QuantType
.
QInt8
}
model
.
eval
()
torch
.
onnx
.
export
(
model
,
input
,
float_model_path
,
export_params
=
True
,
opset_version
=
12
)
torch
.
onnx
.
export
(
model
,
input
,
float_model_path
,
export_params
=
True
,
opset_version
=
ops_version
)
dr
=
DataReader
(
float_model_path
)
quantize_static
(
float_model_path
,
quantized_model_path
,
dr
,
per_channel
=
per_channel
,
quantize_static
(
float_model_path
,
quantized_model_path
,
dr
,
quant_format
=
quanFormat
,
per_channel
=
per_channel
,
activation_type
=
type_dict
[
act_type
],
weight_type
=
type_dict
[
wt_type
])
os
.
remove
(
float_model_path
)
...
...
@@ -53,10 +53,16 @@ np.random.seed(0)
input
=
Variable
(
torch
.
randn
(
1
,
3
,
10
,
10
))
conv
=
nn
.
Conv2d
(
3
,
5
,
kernel_size
=
3
,
stride
=
2
,
padding
=
1
)
# generate QOperator qunatized model
quantize_and_save_model
(
"quantized_conv_uint8_weights"
,
input
,
conv
)
quantize_and_save_model
(
"quantized_conv_int8_weights"
,
input
,
conv
,
wt_type
=
"int8"
)
quantize_and_save_model
(
"quantized_conv_per_channel_weights"
,
input
,
conv
,
per_channel
=
True
)
# generate QDQ qunatized model
quantize_and_save_model
(
"quantized_conv_uint8_weights_qdq"
,
input
,
conv
,
quanFormat
=
QuantFormat
.
QDQ
)
quantize_and_save_model
(
"quantized_conv_int8_weights_qdq"
,
input
,
conv
,
wt_type
=
"int8"
,
quanFormat
=
QuantFormat
.
QDQ
)
quantize_and_save_model
(
"quantized_conv_per_channel_weights_qdq"
,
input
,
conv
,
per_channel
=
True
,
quanFormat
=
QuantFormat
.
QDQ
)
input
=
Variable
(
torch
.
randn
(
1
,
3
))
linear
=
nn
.
Linear
(
3
,
4
,
bias
=
True
)
quantize_and_save_model
(
"quantized_matmul_uint8_weights"
,
input
,
linear
)
...
...
testdata/dnn/onnx/models/quantized_conv_int8_weights_qdq.onnx
0 → 100644
浏览文件 @
e2bb990e
文件已添加
testdata/dnn/onnx/models/quantized_conv_per_channel_weights_qdq.onnx
0 → 100644
浏览文件 @
e2bb990e
文件已添加
testdata/dnn/onnx/models/quantized_conv_uint8_weights_qdq.onnx
0 → 100644
浏览文件 @
e2bb990e
文件已添加
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录