未验证 提交 a7b93e04 编写于 作者: C ceci3 提交者: GitHub

fix bugs (#110)

上级 ea24d105
...@@ -16,13 +16,6 @@ import imagenet_reader ...@@ -16,13 +16,6 @@ import imagenet_reader
_logger = get_logger(__name__, level=logging.INFO) _logger = get_logger(__name__, level=logging.INFO)
reduce_rate = 0.85
init_temperature = 10.24
max_flops = 321208544
server_address = ""
port = 8979
retain_epoch = 5
def create_data_loader(image_shape): def create_data_loader(image_shape):
data_shape = [None] + image_shape data_shape = [None] + image_shape
...@@ -71,17 +64,13 @@ def search_mobilenetv2_block(config, args, image_size): ...@@ -71,17 +64,13 @@ def search_mobilenetv2_block(config, args, image_size):
if args.is_server: if args.is_server:
sa_nas = SANAS( sa_nas = SANAS(
config, config,
server_addr=("", port), server_addr=(args.server_address, args.port),
init_temperature=init_temperature,
reduce_rate=reduce_rate,
search_steps=args.search_steps, search_steps=args.search_steps,
is_server=True) is_server=True)
else: else:
sa_nas = SANAS( sa_nas = SANAS(
config, config,
server_addr=(server_address, port), server_addr=(args.server_address, args.port),
init_temperature=init_temperature,
reduce_rate=reduce_rate,
search_steps=args.search_steps, search_steps=args.search_steps,
is_server=False) is_server=False)
...@@ -140,7 +129,7 @@ def search_mobilenetv2_block(config, args, image_size): ...@@ -140,7 +129,7 @@ def search_mobilenetv2_block(config, args, image_size):
current_flops = flops(train_program) current_flops = flops(train_program)
print('step: {}, current_flops: {}'.format(step, current_flops)) print('step: {}, current_flops: {}'.format(step, current_flops))
if current_flops > max_flops: if current_flops > int(321208544):
continue continue
place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace() place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
...@@ -178,7 +167,7 @@ def search_mobilenetv2_block(config, args, image_size): ...@@ -178,7 +167,7 @@ def search_mobilenetv2_block(config, args, image_size):
train_compiled_program = fluid.CompiledProgram( train_compiled_program = fluid.CompiledProgram(
train_program).with_data_parallel( train_program).with_data_parallel(
loss_name=avg_cost.name, build_strategy=build_strategy) loss_name=avg_cost.name, build_strategy=build_strategy)
for epoch_id in range(retain_epoch): for epoch_id in range(args.retain_epoch):
for batch_id, data in enumerate(train_loader()): for batch_id, data in enumerate(train_loader()):
fetches = [avg_cost.name] fetches = [avg_cost.name]
s_time = time.time() s_time = time.time()
...@@ -243,6 +232,11 @@ if __name__ == '__main__': ...@@ -243,6 +232,11 @@ if __name__ == '__main__':
type=int, type=int,
default=100, default=100,
help='controller server number.') help='controller server number.')
parser.add_argument(
'--server_address', type=str, default="", help='server ip.')
parser.add_argument('--port', type=int, default=8881, help='server port')
parser.add_argument(
'--retain_epoch', type=int, default=5, help='epoch for each token.')
parser.add_argument('--lr', type=float, default=0.1, help='learning rate.') parser.add_argument('--lr', type=float, default=0.1, help='learning rate.')
args = parser.parse_args() args = parser.parse_args()
print(args) print(args)
...@@ -257,7 +251,7 @@ if __name__ == '__main__': ...@@ -257,7 +251,7 @@ if __name__ == '__main__':
args.data)) args.data))
# block mask means block number, 1 mean downsample, 0 means the size of feature map don't change after this block # block mask means block number, 1 mean downsample, 0 means the size of feature map don't change after this block
config_info = {'block_mask': [0, 1, 1, 1, 1, 0, 1, 0]} config_info = {'block_mask': [0, 1, 1, 1, 0]}
config = [('MobileNetV2BlockSpace', config_info)] config = [('MobileNetV2BlockSpace', config_info)]
search_mobilenetv2_block(config, args, image_size) search_mobilenetv2_block(config, args, image_size)
...@@ -18,13 +18,6 @@ import imagenet_reader ...@@ -18,13 +18,6 @@ import imagenet_reader
_logger = get_logger(__name__, level=logging.INFO) _logger = get_logger(__name__, level=logging.INFO)
reduce_rate = 0.85
init_temperature = 10.24
max_flops = 321208544
server_address = ""
port = 8989
retain_epoch = 5
def create_data_loader(image_shape): def create_data_loader(image_shape):
data_shape = [None] + image_shape data_shape = [None] + image_shape
...@@ -66,18 +59,14 @@ def search_mobilenetv2(config, args, image_size, is_server=True): ...@@ -66,18 +59,14 @@ def search_mobilenetv2(config, args, image_size, is_server=True):
### start a server and a client ### start a server and a client
sa_nas = SANAS( sa_nas = SANAS(
config, config,
server_addr=("", port), server_addr=(args.server_address, args.port),
init_temperature=init_temperature,
reduce_rate=reduce_rate,
search_steps=args.search_steps, search_steps=args.search_steps,
is_server=True) is_server=True)
else: else:
### start a client ### start a client
sa_nas = SANAS( sa_nas = SANAS(
config, config,
server_addr=(server_address, port), server_addr=(args.server_address, args.port),
init_temperature=init_temperature,
reduce_rate=reduce_rate,
search_steps=args.search_steps, search_steps=args.search_steps,
is_server=False) is_server=False)
...@@ -93,7 +82,7 @@ def search_mobilenetv2(config, args, image_size, is_server=True): ...@@ -93,7 +82,7 @@ def search_mobilenetv2(config, args, image_size, is_server=True):
current_flops = flops(train_program) current_flops = flops(train_program)
print('step: {}, current_flops: {}'.format(step, current_flops)) print('step: {}, current_flops: {}'.format(step, current_flops))
if current_flops > max_flops: if current_flops > int(321208544):
continue continue
test_loader, test_avg_cost, test_acc_top1, test_acc_top5 = build_program( test_loader, test_avg_cost, test_acc_top1, test_acc_top5 = build_program(
...@@ -139,7 +128,7 @@ def search_mobilenetv2(config, args, image_size, is_server=True): ...@@ -139,7 +128,7 @@ def search_mobilenetv2(config, args, image_size, is_server=True):
train_compiled_program = fluid.CompiledProgram( train_compiled_program = fluid.CompiledProgram(
train_program).with_data_parallel( train_program).with_data_parallel(
loss_name=avg_cost.name, build_strategy=build_strategy) loss_name=avg_cost.name, build_strategy=build_strategy)
for epoch_id in range(retain_epoch): for epoch_id in range(args.retain_epoch):
for batch_id, data in enumerate(train_loader()): for batch_id, data in enumerate(train_loader()):
fetches = [avg_cost.name] fetches = [avg_cost.name]
s_time = time.time() s_time = time.time()
...@@ -179,7 +168,7 @@ def search_mobilenetv2(config, args, image_size, is_server=True): ...@@ -179,7 +168,7 @@ def search_mobilenetv2(config, args, image_size, is_server=True):
def test_search_result(tokens, image_size, args, config): def test_search_result(tokens, image_size, args, config):
sa_nas = SANAS( sa_nas = SANAS(
config, config,
server_addr=("", 8887), server_addr=(args.server_address, args.port),
init_temperature=args.init_temperature, init_temperature=args.init_temperature,
reduce_rate=args.reduce_rate, reduce_rate=args.reduce_rate,
search_steps=args.search_steps, search_steps=args.search_steps,
...@@ -234,7 +223,7 @@ def test_search_result(tokens, image_size, args, config): ...@@ -234,7 +223,7 @@ def test_search_result(tokens, image_size, args, config):
train_compiled_program = fluid.CompiledProgram( train_compiled_program = fluid.CompiledProgram(
train_program).with_data_parallel( train_program).with_data_parallel(
loss_name=avg_cost.name, build_strategy=build_strategy) loss_name=avg_cost.name, build_strategy=build_strategy)
for epoch_id in range(retain_epoch): for epoch_id in range(args.retain_epoch):
for batch_id, data in enumerate(train_loader()): for batch_id, data in enumerate(train_loader()):
fetches = [avg_cost.name] fetches = [avg_cost.name]
s_time = time.time() s_time = time.time()
...@@ -298,6 +287,11 @@ if __name__ == '__main__': ...@@ -298,6 +287,11 @@ if __name__ == '__main__':
type=int, type=int,
default=100, default=100,
help='controller server number.') help='controller server number.')
parser.add_argument(
'--server_address', type=str, default="", help='server ip.')
parser.add_argument('--port', type=int, default=8881, help='server port')
parser.add_argument(
'--retain_epoch', type=int, default=5, help='epoch for each token.')
parser.add_argument('--lr', type=float, default=0.1, help='learning rate.') parser.add_argument('--lr', type=float, default=0.1, help='learning rate.')
args = parser.parse_args() args = parser.parse_args()
print(args) print(args)
......
...@@ -16,7 +16,7 @@ Dataset:ImageNet1000 ...@@ -16,7 +16,7 @@ Dataset:ImageNet1000
| MobileNetV2 | quant_aware |72.05%/90.63% (-0.1%/-0.02%)| 4.0 | - | [model](https://paddlemodels.bj.bcebos.com/PaddleSlim/MobileNetV2_quant_aware.tar) | | MobileNetV2 | quant_aware |72.05%/90.63% (-0.1%/-0.02%)| 4.0 | - | [model](https://paddlemodels.bj.bcebos.com/PaddleSlim/MobileNetV2_quant_aware.tar) |
|ResNet50|-|76.50%/93.00%| 99 | 2.71 | [model](http://paddle-imagenet-models-name.bj.bcebos.com/ResNet50_pretrained.tar) | |ResNet50|-|76.50%/93.00%| 99 | 2.71 | [model](http://paddle-imagenet-models-name.bj.bcebos.com/ResNet50_pretrained.tar) |
|ResNet50|quant_post|76.33%/93.02% (-0.17%/+0.02%)| 25.1| 1.19 | [model](https://paddlemodels.bj.bcebos.com/PaddleSlim/ResNet50_quant_post.tar) | |ResNet50|quant_post|76.33%/93.02% (-0.17%/+0.02%)| 25.1| 1.19 | [model](https://paddlemodels.bj.bcebos.com/PaddleSlim/ResNet50_quant_post.tar) |
|ResNet50|quant_aware| 76.48%/93.11% (-0.02%/+0.11%)| 25.1 | 1.17 | [model](https://paddlemodels.bj.bcebos.com/PaddleSlim/ResNet50_quant_awre.tar) | |ResNet50|quant_aware| 76.48%/93.11% (-0.02%/+0.11%)| 25.1 | 1.17 | [model](https://paddlemodels.bj.bcebos.com/PaddleSlim/ResNet50_quant_awre.tar) |
PaddleLite latency(ms) PaddleLite latency(ms)
...@@ -90,6 +90,13 @@ PaddleLite latency(ms) ...@@ -90,6 +90,13 @@ PaddleLite latency(ms)
<a name="trans1">[1]</a>:The `_vd` suffix indicates that the pre-trained model uses Mixup. Please refer to the detailed introduction: [mixup: Beyond Empirical Risk Minimization](https://arxiv.org/abs/1710.09412) <a name="trans1">[1]</a>:The `_vd` suffix indicates that the pre-trained model uses Mixup. Please refer to the detailed introduction: [mixup: Beyond Empirical Risk Minimization](https://arxiv.org/abs/1710.09412)
### 1.4 NAS
| Model | Method | Top-1/Top-5 Acc | Volume(MB) | GFLOPs | Download |
|:--:|:---:|:--:|:--:|:--:|:--:|
| MobileNetV2 | - | 72.15%/90.65% | 15 | 0.59 | [model](https://paddle-imagenet-models-name.bj.bcebos.com/MobileNetV2_pretrained.tar) |
| MobileNetV2 | SANAS | 71.518%/90.208% (-0.632%/-0.442%) | 14 | 0.295 | [model](https://paddlemodels.cdn.bcebos.com/PaddleSlim/MobileNetV2_sanas.tar) |
## 2. Object Detection ## 2. Object Detection
### 2.1 Quantization ### 2.1 Quantization
...@@ -99,8 +106,8 @@ Dataset: COCO 2017 ...@@ -99,8 +106,8 @@ Dataset: COCO 2017
| Model | Method | Dataset | Image/GPU | Input 608 Box AP | Input 416 Box AP | Input 320 Box AP | Model Size(MB) | TensorRT latency(V100, ms) | Download | | Model | Method | Dataset | Image/GPU | Input 608 Box AP | Input 416 Box AP | Input 320 Box AP | Model Size(MB) | TensorRT latency(V100, ms) | Download |
| :----------------------------: | :---------: | :----: | :-------: | :------------: | :------------: | :------------: | :------------: | :----------: |:----------: | | :----------------------------: | :---------: | :----: | :-------: | :------------: | :------------: | :------------: | :------------: | :----------: |:----------: |
| MobileNet-V1-YOLOv3 | - | COCO | 8 | 29.3 | 29.3 | 27.1 | 95 | - | [model](https://paddlemodels.bj.bcebos.com/object_detection/yolov3_mobilenet_v1.tar) | | MobileNet-V1-YOLOv3 | - | COCO | 8 | 29.3 | 29.3 | 27.1 | 95 | - | [model](https://paddlemodels.bj.bcebos.com/object_detection/yolov3_mobilenet_v1.tar) |
| MobileNet-V1-YOLOv3 | quant_post | COCO | 8 | 27.9 (-1.4)| 28.0 (-1.3) | 26.0 (-1.0) | 25 | - | [model](https://paddlemodels.bj.bcebos.com/PaddleSlim/yolov3_mobilenetv1_coco_quant_post.tar) | | MobileNet-V1-YOLOv3 | quant_post | COCO | 8 | 27.9 (-1.4)| 28.0 (-1.3) | 26.0 (-1.0) | 25 | - | [model](https://paddlemodels.bj.bcebos.com/PaddleSlim/yolov3_mobilenetv1_coco_quant_post.tar) |
| MobileNet-V1-YOLOv3 | quant_aware | COCO | 8 | 28.1 (-1.2)| 28.2 (-1.1) | 25.8 (-1.2) | 26.3 | - | [model](https://paddlemodels.bj.bcebos.com/PaddleSlim/yolov3_mobilenet_coco_quant_aware.tar) | | MobileNet-V1-YOLOv3 | quant_aware | COCO | 8 | 28.1 (-1.2)| 28.2 (-1.1) | 25.8 (-1.2) | 26.3 | - | [model](https://paddlemodels.bj.bcebos.com/PaddleSlim/yolov3_mobilenet_coco_quant_aware.tar) |
| R34-YOLOv3 | - | COCO | 8 | 36.2 | 34.3 | 31.4 | 162 | - | [model](https://paddlemodels.bj.bcebos.com/object_detection/yolov3_r34.tar) | | R34-YOLOv3 | - | COCO | 8 | 36.2 | 34.3 | 31.4 | 162 | - | [model](https://paddlemodels.bj.bcebos.com/object_detection/yolov3_r34.tar) |
| R34-YOLOv3 | quant_post | COCO | 8 | 35.7 (-0.5) | - | - | 42.7 | - | [model](https://paddlemodels.bj.bcebos.com/PaddleSlim/yolov3_r34_coco_quant_post.tar) | | R34-YOLOv3 | quant_post | COCO | 8 | 35.7 (-0.5) | - | - | 42.7 | - | [model](https://paddlemodels.bj.bcebos.com/PaddleSlim/yolov3_r34_coco_quant_post.tar) |
| R34-YOLOv3 | quant_aware | COCO | 8 | 35.2 (-1.0) | 33.3 (-1.0) | 30.3 (-1.1)| 44 | - | [model](https://paddlemodels.bj.bcebos.com/PaddleSlim/yolov3_r34_coco_quant_aware.tar) | | R34-YOLOv3 | quant_aware | COCO | 8 | 35.2 (-1.0) | 33.3 (-1.0) | 30.3 (-1.1)| 44 | - | [model](https://paddlemodels.bj.bcebos.com/PaddleSlim/yolov3_r34_coco_quant_aware.tar) |
...@@ -157,8 +164,22 @@ Dataset:Pasacl VOC & COCO 2017 ...@@ -157,8 +164,22 @@ Dataset:Pasacl VOC & COCO 2017
| MobileNet-V1-YOLOv3 | ResNet34-YOLOv3 distill | COCO | 8 | 31.4 (+2.1) | 30.0 (+0.7) | 27.1 (+0.1) | 95 | [model](https://paddlemodels.bj.bcebos.com/PaddleSlim/yolov3_mobilenetv1_coco_distilled.tar) | | MobileNet-V1-YOLOv3 | ResNet34-YOLOv3 distill | COCO | 8 | 31.4 (+2.1) | 30.0 (+0.7) | 27.1 (+0.1) | 95 | [model](https://paddlemodels.bj.bcebos.com/PaddleSlim/yolov3_mobilenetv1_coco_distilled.tar) |
## 3. Image Segmentation ### 2.4 NAS
Dataset: WIDER-FACE
| Model | Method | Image/GPU | Input size | Easy/Medium/Hard | volume(KB) | latency(ms)| Download |
| :------------: | :---------: | :-------: | :------: | :-----------------------------: | :------------: | :------------: | :----------------------------------------------------------: |
| BlazeFace | - | 8 | 640 | 91.5/89.2/79.7 | 815 | 71.862 | [model](https://paddlemodels.bj.bcebos.com/object_detection/blazeface_original.tar) |
| BlazeFace-NAS | - | 8 | 640 | 83.7/80.7/65.8 | 244 | 21.117 |[model](https://paddlemodels.bj.bcebos.com/object_detection/blazeface_nas.tar) |
| BlazeFace-NAS1 | SANAS | 8 | 640 | 87.0/83.7/68.5 | 389 | 22.558 | [model](https://paddlemodels.bj.bcebos.com/object_detection/blazeface_nas2.tar) |
!!! note "Note"
<a name="trans1">[1]</a>: latency is based on latency_855.txt, the file is test on 855 by PaddleLite。
## 3. Image Segmentation
Dataset:Cityscapes Dataset:Cityscapes
### 3.1 Quantization ### 3.1 Quantization
......
...@@ -128,7 +128,7 @@ SANAS(Simulated Annealing Neural Architecture Search)是基于模拟退火 ...@@ -128,7 +128,7 @@ SANAS(Simulated Annealing Neural Architecture Search)是基于模拟退火
- **tokens(list):** - 一组tokens。tokens的长度和范围取决于搜索空间。 - **tokens(list):** - 一组tokens。tokens的长度和范围取决于搜索空间。
**返回:** **返回:**
根据传入的token得到一个模型结构实例。 根据传入的token得到一个模型结构实例列表
**示例代码:** **示例代码:**
...@@ -153,8 +153,10 @@ SANAS(Simulated Annealing Neural Architecture Search)是基于模拟退火 ...@@ -153,8 +153,10 @@ SANAS(Simulated Annealing Neural Architecture Search)是基于模拟退火
**示例代码:** **示例代码:**
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
from paddleslim.nas import SANAS from paddleslim.nas import SANAS
config = [('MobileNetV2Space')] config = [('MobileNetV2Space')]
sanas = SANAS(configs=config) sanas = SANAS(configs=config)
print(sanas.current_info()) print(sanas.current_info())
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
| MobileNetV2 | quant_aware |72.05%/90.63% (-0.1%/-0.02%)| 4.0 | - | [下载链接](https://paddlemodels.bj.bcebos.com/PaddleSlim/MobileNetV2_quant_aware.tar) | | MobileNetV2 | quant_aware |72.05%/90.63% (-0.1%/-0.02%)| 4.0 | - | [下载链接](https://paddlemodels.bj.bcebos.com/PaddleSlim/MobileNetV2_quant_aware.tar) |
|ResNet50|-|76.50%/93.00%| 99 | 2.71 | [下载链接](http://paddle-imagenet-models-name.bj.bcebos.com/ResNet50_pretrained.tar) | |ResNet50|-|76.50%/93.00%| 99 | 2.71 | [下载链接](http://paddle-imagenet-models-name.bj.bcebos.com/ResNet50_pretrained.tar) |
|ResNet50|quant_post|76.33%/93.02% (-0.17%/+0.02%)| 25.1| 1.19 | [下载链接](https://paddlemodels.bj.bcebos.com/PaddleSlim/ResNet50_quant_post.tar) | |ResNet50|quant_post|76.33%/93.02% (-0.17%/+0.02%)| 25.1| 1.19 | [下载链接](https://paddlemodels.bj.bcebos.com/PaddleSlim/ResNet50_quant_post.tar) |
|ResNet50|quant_aware| 76.48%/93.11% (-0.02%/+0.11%)| 25.1 | 1.17 | [下载链接](https://paddlemodels.bj.bcebos.com/PaddleSlim/ResNet50_quant_awre.tar) | |ResNet50|quant_aware| 76.48%/93.11% (-0.02%/+0.11%)| 25.1 | 1.17 | [下载链接](https://paddlemodels.bj.bcebos.com/PaddleSlim/ResNet50_quant_awre.tar) |
分类模型Lite时延(ms) 分类模型Lite时延(ms)
...@@ -89,6 +89,12 @@ ...@@ -89,6 +89,12 @@
<a name="trans1">[1]</a>:带_vd后缀代表该预训练模型使用了Mixup,Mixup相关介绍参考[mixup: Beyond Empirical Risk Minimization](https://arxiv.org/abs/1710.09412) <a name="trans1">[1]</a>:带_vd后缀代表该预训练模型使用了Mixup,Mixup相关介绍参考[mixup: Beyond Empirical Risk Minimization](https://arxiv.org/abs/1710.09412)
### 1.4 搜索
| 模型 | 压缩方法 | Top-1/Top-5 Acc | 模型体积(MB) | GFLOPs | 下载 |
|:--:|:---:|:--:|:--:|:--:|:--:|
| MobileNetV2 | - | 72.15%/90.65% | 15 | 0.59 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/MobileNetV2_pretrained.tar) |
| MobileNetV2 | SANAS | 71.518%/90.208% (-0.632%/-0.442%) | 14 | 0.295 | [下载链接](https://paddlemodels.cdn.bcebos.com/PaddleSlim/MobileNetV2_sanas.tar) |
## 2. 目标检测 ## 2. 目标检测
...@@ -99,8 +105,8 @@ ...@@ -99,8 +105,8 @@
| 模型 | 压缩方法 | 数据集 | Image/GPU | 输入608 Box AP | 输入416 Box AP | 输入320 Box AP | 模型体积(MB) | TensorRT时延(V100, ms) | 下载 | | 模型 | 压缩方法 | 数据集 | Image/GPU | 输入608 Box AP | 输入416 Box AP | 输入320 Box AP | 模型体积(MB) | TensorRT时延(V100, ms) | 下载 |
| :----------------------------: | :---------: | :----: | :-------: | :------------: | :------------: | :------------: | :------------: | :----------: |:----------: | | :----------------------------: | :---------: | :----: | :-------: | :------------: | :------------: | :------------: | :------------: | :----------: |:----------: |
| MobileNet-V1-YOLOv3 | - | COCO | 8 | 29.3 | 29.3 | 27.1 | 95 | - | [下载链接](https://paddlemodels.bj.bcebos.com/object_detection/yolov3_mobilenet_v1.tar) | | MobileNet-V1-YOLOv3 | - | COCO | 8 | 29.3 | 29.3 | 27.1 | 95 | - | [下载链接](https://paddlemodels.bj.bcebos.com/object_detection/yolov3_mobilenet_v1.tar) |
| MobileNet-V1-YOLOv3 | quant_post | COCO | 8 | 27.9 (-1.4)| 28.0 (-1.3) | 26.0 (-1.0) | 25 | - | [下载链接](https://paddlemodels.bj.bcebos.com/PaddleSlim/yolov3_mobilenetv1_coco_quant_post.tar) | | MobileNet-V1-YOLOv3 | quant_post | COCO | 8 | 27.9 (-1.4)| 28.0 (-1.3) | 26.0 (-1.0) | 25 | - | [下载链接](https://paddlemodels.bj.bcebos.com/PaddleSlim/yolov3_mobilenetv1_coco_quant_post.tar) |
| MobileNet-V1-YOLOv3 | quant_aware | COCO | 8 | 28.1 (-1.2)| 28.2 (-1.1) | 25.8 (-1.2) | 26.3 | - | [下载链接](https://paddlemodels.bj.bcebos.com/PaddleSlim/yolov3_mobilenet_coco_quant_aware.tar) | | MobileNet-V1-YOLOv3 | quant_aware | COCO | 8 | 28.1 (-1.2)| 28.2 (-1.1) | 25.8 (-1.2) | 26.3 | - | [下载链接](https://paddlemodels.bj.bcebos.com/PaddleSlim/yolov3_mobilenet_coco_quant_aware.tar) |
| R34-YOLOv3 | - | COCO | 8 | 36.2 | 34.3 | 31.4 | 162 | - | [下载链接](https://paddlemodels.bj.bcebos.com/object_detection/yolov3_r34.tar) | | R34-YOLOv3 | - | COCO | 8 | 36.2 | 34.3 | 31.4 | 162 | - | [下载链接](https://paddlemodels.bj.bcebos.com/object_detection/yolov3_r34.tar) |
| R34-YOLOv3 | quant_post | COCO | 8 | 35.7 (-0.5) | - | - | 42.7 | - | [下载链接](https://paddlemodels.bj.bcebos.com/PaddleSlim/yolov3_r34_coco_quant_post.tar) | | R34-YOLOv3 | quant_post | COCO | 8 | 35.7 (-0.5) | - | - | 42.7 | - | [下载链接](https://paddlemodels.bj.bcebos.com/PaddleSlim/yolov3_r34_coco_quant_post.tar) |
| R34-YOLOv3 | quant_aware | COCO | 8 | 35.2 (-1.0) | 33.3 (-1.0) | 30.3 (-1.1)| 44 | - | [下载链接](https://paddlemodels.bj.bcebos.com/PaddleSlim/yolov3_r34_coco_quant_aware.tar) | | R34-YOLOv3 | quant_aware | COCO | 8 | 35.2 (-1.0) | 33.3 (-1.0) | 30.3 (-1.1)| 44 | - | [下载链接](https://paddlemodels.bj.bcebos.com/PaddleSlim/yolov3_r34_coco_quant_aware.tar) |
...@@ -157,6 +163,20 @@ ...@@ -157,6 +163,20 @@
| MobileNet-V1-YOLOv3 | ResNet34-YOLOv3 distill | COCO | 8 | 31.4 (+2.1) | 30.0 (+0.7) | 27.1 (+0.1) | 95 | [下载链接](https://paddlemodels.bj.bcebos.com/PaddleSlim/yolov3_mobilenetv1_coco_distilled.tar) | | MobileNet-V1-YOLOv3 | ResNet34-YOLOv3 distill | COCO | 8 | 31.4 (+2.1) | 30.0 (+0.7) | 27.1 (+0.1) | 95 | [下载链接](https://paddlemodels.bj.bcebos.com/PaddleSlim/yolov3_mobilenetv1_coco_distilled.tar) |
### 2.4 搜索
数据集:WIDER-FACE
| 模型 | 压缩方法 | Image/GPU | 输入尺寸 | Easy/Medium/Hard | 模型体积(KB) | 硬件延时(ms)| 下载 |
| :------------: | :---------: | :-------: | :------: | :-----------------------------: | :------------: | :------------: | :----------------------------------------------------------: |
| BlazeFace | - | 8 | 640 | 91.5/89.2/79.7 | 815 | 71.862 | [下载链接](https://paddlemodels.bj.bcebos.com/object_detection/blazeface_original.tar) |
| BlazeFace-NAS | - | 8 | 640 | 83.7/80.7/65.8 | 244 | 21.117 |[下载链接](https://paddlemodels.bj.bcebos.com/object_detection/blazeface_nas.tar) |
| BlazeFace-NAS1 | SANAS | 8 | 640 | 87.0/83.7/68.5 | 389 | 22.558 | [下载链接](https://paddlemodels.bj.bcebos.com/object_detection/blazeface_nas2.tar) |
!!! note "Note"
<a name="trans1">[1]</a>: 硬件延时时间是利用提供的硬件延时表得到的,硬件延时表是在855芯片上基于PaddleLite测试的结果。
## 3. 图像分割 ## 3. 图像分割
数据集:Cityscapes 数据集:Cityscapes
......
...@@ -190,7 +190,10 @@ class SANAS(object): ...@@ -190,7 +190,10 @@ class SANAS(object):
self._iter = 0 self._iter = 0
def _get_host_ip(self): def _get_host_ip(self):
return socket.gethostbyname(socket.gethostname()) if os.name == 'posix':
return socket.gethostbyname('localhost')
else:
return socket.gethostbyname(socket.gethostname())
def tokens2arch(self, tokens): def tokens2arch(self, tokens):
""" """
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册