Benchmark is done with latest `opencv-python==4.7.0.72` and `opencv-contrib-python==4.7.0.72` on the following platforms. Some models are excluded because of support issues.
Benchmark is done with latest `opencv-python==4.7.0.72` and `opencv-contrib-python==4.7.0.72` on the following platforms. Some models are excluded because of support issues.
# draw table and trigger changing the column width value
fig.canvas.draw()
# calculate table height and width
table_height=0
table_height=0
table_width=0
table_width=0
# calculate table height and width
foriinrange(len(table_texts)):
foriinrange(len(table_texts)):
cell=table.get_celld()[(i,0)]
cell=table.get_celld()[(i,0)]
table_height+=cell.get_height()
table_height+=cell.get_height()
foriinrange(len(table_texts[0])):
foriinrange(len(table_texts[0])):
cell=table.get_celld()[(0,i)]
cell=table.get_celld()[(0,i)]
table_width+=cell.get_width()+0.1
table_width+=cell.get_width()
# add notes for table
# add notes for table
axs[2].text(0,-table_height-0.8,"\*: Models are quantized in per-channel mode, which run slower than per-tensor quantized models on NPU.",va='bottom',ha='left',fontsize=11,transform=axs[1].transAxes)
axs[2].text(0,-table_height-1,"Units: All data in milliseconds (ms).",va='bottom',ha='left',fontsize=11,transform=axs[1].transAxes)
axs[2].text(0,-table_height-2,"\\*: Models are quantized in per-channel mode, which run slower than per-tensor quantized models on NPU.",va='bottom',ha='left',fontsize=11,transform=axs[1].transAxes)