未验证 提交 b9786424 编写于 作者: C cuicheng01 提交者: GitHub

Merge pull request #819 from cuicheng01/develop_reg

Update configs
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "HRNet_W18_C"
name: HRNet_W18_C
# loss function config for traing/eval process
Loss:
......@@ -46,80 +46,80 @@ Optimizer:
DataLoader:
Train:
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
PostProcess:
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
Train:
- TopkAcc:
topk: [1, 5]
Eval:
Eval:
- TopkAcc:
topk: [1, 5]
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "HRNet_W30_C"
name: HRNet_W30_C
# loss function config for traing/eval process
Loss:
......@@ -46,80 +46,80 @@ Optimizer:
DataLoader:
Train:
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
PostProcess:
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
Train:
- TopkAcc:
topk: [1, 5]
Eval:
Eval:
- TopkAcc:
topk: [1, 5]
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "HRNet_W32_C"
name: HRNet_W32_C
# loss function config for traing/eval process
Loss:
......@@ -46,80 +46,80 @@ Optimizer:
DataLoader:
Train:
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
PostProcess:
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
Train:
- TopkAcc:
topk: [1, 5]
Eval:
Eval:
- TopkAcc:
topk: [1, 5]
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "HRNet_W40_C"
name: HRNet_W40_C
# loss function config for traing/eval process
Loss:
......@@ -46,80 +46,80 @@ Optimizer:
DataLoader:
Train:
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
PostProcess:
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
Train:
- TopkAcc:
topk: [1, 5]
Eval:
Eval:
- TopkAcc:
topk: [1, 5]
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "HRNet_W44_C"
name: HRNet_W44_C
# loss function config for traing/eval process
Loss:
......@@ -46,78 +46,80 @@ Optimizer:
DataLoader:
Train:
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
transform_ops:
- ResizeImage:
size: 224
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
PostProcess:
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
Train:
- TopkAcc:
topk: [1, 5]
Eval:
Eval:
- TopkAcc:
topk: [1, 5]
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "HRNet_W48_C"
name: HRNet_W48_C
# loss function config for traing/eval process
Loss:
......@@ -46,80 +46,80 @@ Optimizer:
DataLoader:
Train:
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
PostProcess:
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
Train:
- TopkAcc:
topk: [1, 5]
Eval:
Eval:
- TopkAcc:
topk: [1, 5]
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "HRNet_W64_C"
name: HRNet_W64_C
# loss function config for traing/eval process
Loss:
......@@ -46,80 +46,80 @@ Optimizer:
DataLoader:
Train:
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
PostProcess:
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
Train:
- TopkAcc:
topk: [1, 5]
Eval:
Eval:
- TopkAcc:
topk: [1, 5]
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -12,18 +12,19 @@ Global:
print_batch_step: 10
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
image_shape: [3, 299, 299]
save_inference_dir: ./inference
# model architecture
Arch:
name: "InceptionV3"
name: InceptionV3
# loss function config for traing/eval process
Loss:
Train:
- CELoss:
weight: 1.0
epsilon: 0.1
Eval:
- CELoss:
weight: 1.0
......@@ -35,8 +36,6 @@ Optimizer:
lr:
name: Cosine
learning_rate: 0.045
decay_epochs: [30, 60, 90]
values: [0.1, 0.01, 0.001, 0.0001]
regularizer:
name: 'L2'
coeff: 0.0001
......@@ -46,80 +45,80 @@ Optimizer:
DataLoader:
Train:
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
transform_ops:
- RandCropImage:
size: 299
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 299
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
transform_ops:
- ResizeImage:
resize_short: 320
- CropImage:
size: 299
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 320
- CropImage:
size: 299
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 320
- CropImage:
size: 299
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 320
- CropImage:
size: 299
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
PostProcess:
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
Train:
- TopkAcc:
topk: [1, 5]
Eval:
Eval:
- TopkAcc:
topk: [1, 5]
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "MobileNetV1"
name: MobileNetV1
# loss function config for traing/eval process
Loss:
......@@ -39,87 +39,87 @@ Optimizer:
values: [0.1, 0.01, 0.001, 0.0001]
regularizer:
name: 'L2'
coeff: 0.00003
coeff: 0.0003
# data loader for train and eval
DataLoader:
Train:
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
PostProcess:
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
Train:
- TopkAcc:
topk: [1, 5]
Eval:
Eval:
- TopkAcc:
topk: [1, 5]
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "MobileNetV1_x0_25"
name: MobileNetV1_x0_25
# loss function config for traing/eval process
Loss:
......@@ -39,87 +39,87 @@ Optimizer:
values: [0.1, 0.01, 0.001, 0.0001]
regularizer:
name: 'L2'
coeff: 0.00003
coeff: 0.0003
# data loader for train and eval
DataLoader:
Train:
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
PostProcess:
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
Train:
- TopkAcc:
topk: [1, 5]
Eval:
Eval:
- TopkAcc:
topk: [1, 5]
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "MobileNetV1_x0_5"
name: MobileNetV1_x0_5
# loss function config for traing/eval process
Loss:
......@@ -39,87 +39,87 @@ Optimizer:
values: [0.1, 0.01, 0.001, 0.0001]
regularizer:
name: 'L2'
coeff: 0.00003
coeff: 0.0003
# data loader for train and eval
DataLoader:
Train:
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
PostProcess:
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
Train:
- TopkAcc:
topk: [1, 5]
Eval:
Eval:
- TopkAcc:
topk: [1, 5]
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "MobileNetV1_x0_75"
name: MobileNetV1_x0_75
# loss function config for traing/eval process
Loss:
......@@ -39,87 +39,87 @@ Optimizer:
values: [0.1, 0.01, 0.001, 0.0001]
regularizer:
name: 'L2'
coeff: 0.00003
coeff: 0.0003
# data loader for train and eval
DataLoader:
Train:
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
PostProcess:
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
Train:
- TopkAcc:
topk: [1, 5]
Eval:
Eval:
- TopkAcc:
topk: [1, 5]
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "MobileNetV3_large_x0_35"
name: MobileNetV3_large_x0_35
# loss function config for traing/eval process
Loss:
......@@ -45,80 +45,80 @@ Optimizer:
DataLoader:
Train:
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 512
drop_last: False
shuffle: True
name: DistributedBatchSampler
batch_size: 512
drop_last: False
shuffle: True
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
PostProcess:
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
Train:
- TopkAcc:
topk: [1, 5]
Eval:
Eval:
- TopkAcc:
topk: [1, 5]
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "MobileNetV3_large_x0_5"
name: MobileNetV3_large_x0_5
# loss function config for traing/eval process
Loss:
......@@ -45,80 +45,80 @@ Optimizer:
DataLoader:
Train:
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 512
drop_last: False
shuffle: True
name: DistributedBatchSampler
batch_size: 512
drop_last: False
shuffle: True
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
PostProcess:
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
Train:
- TopkAcc:
topk: [1, 5]
Eval:
Eval:
- TopkAcc:
topk: [1, 5]
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "MobileNetV3_large_x0_75"
name: MobileNetV3_large_x0_75
# loss function config for traing/eval process
Loss:
......@@ -45,80 +45,80 @@ Optimizer:
DataLoader:
Train:
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 512
drop_last: False
shuffle: True
name: DistributedBatchSampler
batch_size: 512
drop_last: False
shuffle: True
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
PostProcess:
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
Train:
- TopkAcc:
topk: [1, 5]
Eval:
Eval:
- TopkAcc:
topk: [1, 5]
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "MobileNetV3_large_x1_0"
name: MobileNetV3_large_x1_0
# loss function config for traing/eval process
Loss:
......@@ -45,81 +45,81 @@ Optimizer:
DataLoader:
Train:
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- AutoAugment:
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- AutoAugment:
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 512
drop_last: False
shuffle: True
name: DistributedBatchSampler
batch_size: 512
drop_last: False
shuffle: True
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
PostProcess:
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
Train:
- TopkAcc:
topk: [1, 5]
Eval:
Eval:
- TopkAcc:
topk: [1, 5]
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "MobileNetV3_large_x1_25"
name: MobileNetV3_large_x1_25
# loss function config for traing/eval process
Loss:
......@@ -38,87 +38,87 @@ Optimizer:
learning_rate: 1.3
regularizer:
name: 'L2'
coeff: 0.00002
coeff: 0.00004
# data loader for train and eval
DataLoader:
Train:
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 512
drop_last: False
shuffle: True
name: DistributedBatchSampler
batch_size: 512
drop_last: False
shuffle: True
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
PostProcess:
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
Train:
- TopkAcc:
topk: [1, 5]
Eval:
Eval:
- TopkAcc:
topk: [1, 5]
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "MobileNetV3_small_x0_35"
name: MobileNetV3_small_x0_35
# loss function config for traing/eval process
Loss:
......@@ -38,87 +38,87 @@ Optimizer:
learning_rate: 1.3
regularizer:
name: 'L2'
coeff: 0.00002
coeff: 0.00001
# data loader for train and eval
DataLoader:
Train:
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 512
drop_last: False
shuffle: True
name: DistributedBatchSampler
batch_size: 512
drop_last: False
shuffle: True
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
PostProcess:
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
Train:
- TopkAcc:
topk: [1, 5]
Eval:
Eval:
- TopkAcc:
topk: [1, 5]
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "MobileNetV3_small_x0_5"
name: MobileNetV3_small_x0_5
# loss function config for traing/eval process
Loss:
......@@ -38,87 +38,87 @@ Optimizer:
learning_rate: 1.3
regularizer:
name: 'L2'
coeff: 0.00002
coeff: 0.00001
# data loader for train and eval
DataLoader:
Train:
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 512
drop_last: False
shuffle: True
name: DistributedBatchSampler
batch_size: 512
drop_last: False
shuffle: True
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
PostProcess:
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
Train:
- TopkAcc:
topk: [1, 5]
Eval:
Eval:
- TopkAcc:
topk: [1, 5]
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "MobileNetV3_small_x0_75"
name: MobileNetV3_small_x0_75
# loss function config for traing/eval process
Loss:
......@@ -45,80 +45,80 @@ Optimizer:
DataLoader:
Train:
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 512
drop_last: False
shuffle: True
name: DistributedBatchSampler
batch_size: 512
drop_last: False
shuffle: True
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
PostProcess:
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
Train:
- TopkAcc:
topk: [1, 5]
Eval:
Eval:
- TopkAcc:
topk: [1, 5]
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "MobileNetV3_small_x1_0"
name: MobileNetV3_small_x1_0
# loss function config for traing/eval process
Loss:
......@@ -45,80 +45,80 @@ Optimizer:
DataLoader:
Train:
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 512
drop_last: False
shuffle: True
name: DistributedBatchSampler
batch_size: 512
drop_last: False
shuffle: True
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
PostProcess:
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
Train:
- TopkAcc:
topk: [1, 5]
Eval:
Eval:
- TopkAcc:
topk: [1, 5]
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "MobileNetV3_small_x1_25"
name: MobileNetV3_small_x1_25
# loss function config for traing/eval process
Loss:
......@@ -45,80 +45,80 @@ Optimizer:
DataLoader:
Train:
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 512
drop_last: False
shuffle: True
name: DistributedBatchSampler
batch_size: 512
drop_last: False
shuffle: True
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
PostProcess:
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
Train:
- TopkAcc:
topk: [1, 5]
Eval:
Eval:
- TopkAcc:
topk: [1, 5]
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "ResNet101"
name: ResNet101
# loss function config for traing/eval process
Loss:
......@@ -30,10 +30,10 @@ Loss:
Optimizer:
name: "Momentum"
name: Momentum
momentum: 0.9
lr:
name: "Piecewise"
name: Piecewise
learning_rate: 0.1
decay_epochs: [30, 60, 90]
values: [0.1, 0.01, 0.001, 0.0001]
......@@ -46,9 +46,9 @@ Optimizer:
DataLoader:
Train:
dataset:
name: "ImageNetDataset"
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
......@@ -61,20 +61,20 @@ DataLoader:
order: ''
sampler:
name: "DistributedBatchSampler"
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
loader:
num_workers: 6
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: "ImageNetDataset"
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
......@@ -86,16 +86,16 @@ DataLoader:
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: "DistributedBatchSampler"
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
......@@ -112,9 +112,9 @@ Infer:
order: ''
- ToCHWImage:
PostProcess:
name: "Topk"
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
......
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "ResNet101_vd"
name: ResNet101_vd
# loss function config for traing/eval process
Loss:
......@@ -31,10 +31,10 @@ Loss:
Optimizer:
name: "Momentum"
name: Momentum
momentum: 0.9
lr:
name: "Cosine"
name: Cosine
learning_rate: 0.1
regularizer:
name: 'L2'
......@@ -45,9 +45,9 @@ Optimizer:
DataLoader:
Train:
dataset:
name: "ImageNetDataset"
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
......@@ -63,20 +63,20 @@ DataLoader:
alpha: 0.2
sampler:
name: "DistributedBatchSampler"
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
loader:
num_workers: 6
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: "ImageNetDataset"
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
......@@ -88,16 +88,16 @@ DataLoader:
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: "DistributedBatchSampler"
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
......@@ -114,9 +114,9 @@ Infer:
order: ''
- ToCHWImage:
PostProcess:
name: "Topk"
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
......
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "ResNet152"
name: ResNet152
# loss function config for traing/eval process
Loss:
......@@ -30,10 +30,10 @@ Loss:
Optimizer:
name: "Momentum"
name: Momentum
momentum: 0.9
lr:
name: "Piecewise"
name: Piecewise
learning_rate: 0.1
decay_epochs: [30, 60, 90]
values: [0.1, 0.01, 0.001, 0.0001]
......@@ -46,9 +46,9 @@ Optimizer:
DataLoader:
Train:
dataset:
name: "ImageNetDataset"
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
......@@ -61,20 +61,20 @@ DataLoader:
order: ''
sampler:
name: "DistributedBatchSampler"
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
loader:
num_workers: 6
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: "ImageNetDataset"
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
......@@ -86,16 +86,16 @@ DataLoader:
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: "DistributedBatchSampler"
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
......@@ -112,9 +112,9 @@ Infer:
order: ''
- ToCHWImage:
PostProcess:
name: "Topk"
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
......
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "ResNet152_vd"
name: ResNet152_vd
# loss function config for traing/eval process
Loss:
......@@ -31,10 +31,10 @@ Loss:
Optimizer:
name: "Momentum"
name: Momentum
momentum: 0.9
lr:
name: "Cosine"
name: Cosine
learning_rate: 0.1
regularizer:
name: 'L2'
......@@ -45,9 +45,9 @@ Optimizer:
DataLoader:
Train:
dataset:
name: "ImageNetDataset"
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
......@@ -63,20 +63,20 @@ DataLoader:
alpha: 0.2
sampler:
name: "DistributedBatchSampler"
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
loader:
num_workers: 6
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: "ImageNetDataset"
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
......@@ -88,16 +88,16 @@ DataLoader:
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: "DistributedBatchSampler"
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
......@@ -114,9 +114,9 @@ Infer:
order: ''
- ToCHWImage:
PostProcess:
name: "Topk"
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
......
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "ResNet18"
name: ResNet18
# loss function config for traing/eval process
Loss:
......@@ -30,10 +30,10 @@ Loss:
Optimizer:
name: "Momentum"
name: Momentum
momentum: 0.9
lr:
name: "Piecewise"
name: Piecewise
learning_rate: 0.1
decay_epochs: [30, 60, 90]
values: [0.1, 0.01, 0.001, 0.0001]
......@@ -46,9 +46,9 @@ Optimizer:
DataLoader:
Train:
dataset:
name: "ImageNetDataset"
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
......@@ -61,20 +61,20 @@ DataLoader:
order: ''
sampler:
name: "DistributedBatchSampler"
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
loader:
num_workers: 6
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: "ImageNetDataset"
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
......@@ -86,16 +86,16 @@ DataLoader:
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: "DistributedBatchSampler"
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
......@@ -112,9 +112,9 @@ Infer:
order: ''
- ToCHWImage:
PostProcess:
name: "Topk"
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
......
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "ResNet18_vd"
name: ResNet18_vd
# loss function config for traing/eval process
Loss:
......@@ -31,10 +31,10 @@ Loss:
Optimizer:
name: "Momentum"
name: Momentum
momentum: 0.9
lr:
name: "Cosine"
name: Cosine
learning_rate: 0.1
regularizer:
name: 'L2'
......@@ -45,9 +45,9 @@ Optimizer:
DataLoader:
Train:
dataset:
name: "ImageNetDataset"
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
......@@ -63,20 +63,20 @@ DataLoader:
alpha: 0.2
sampler:
name: "DistributedBatchSampler"
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
loader:
num_workers: 6
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: "ImageNetDataset"
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
......@@ -88,16 +88,16 @@ DataLoader:
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: "DistributedBatchSampler"
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
......@@ -114,9 +114,9 @@ Infer:
order: ''
- ToCHWImage:
PostProcess:
name: "Topk"
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
......
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "ResNet200_vd"
name: ResNet200_vd
# loss function config for traing/eval process
Loss:
......@@ -31,10 +31,10 @@ Loss:
Optimizer:
name: "Momentum"
name: Momentum
momentum: 0.9
lr:
name: "Cosine"
name: Cosine
learning_rate: 0.1
regularizer:
name: 'L2'
......@@ -45,9 +45,9 @@ Optimizer:
DataLoader:
Train:
dataset:
name: "ImageNetDataset"
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
......@@ -63,20 +63,20 @@ DataLoader:
alpha: 0.2
sampler:
name: "DistributedBatchSampler"
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
loader:
num_workers: 6
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: "ImageNetDataset"
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
......@@ -88,16 +88,16 @@ DataLoader:
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: "DistributedBatchSampler"
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
......@@ -114,9 +114,9 @@ Infer:
order: ''
- ToCHWImage:
PostProcess:
name: "Topk"
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
......
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "ResNet34"
name: ResNet34
# loss function config for traing/eval process
Loss:
......@@ -30,10 +30,10 @@ Loss:
Optimizer:
name: "Momentum"
name: Momentum
momentum: 0.9
lr:
name: "Piecewise"
name: Piecewise
learning_rate: 0.1
decay_epochs: [30, 60, 90]
values: [0.1, 0.01, 0.001, 0.0001]
......@@ -46,9 +46,9 @@ Optimizer:
DataLoader:
Train:
dataset:
name: "ImageNetDataset"
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
......@@ -61,20 +61,20 @@ DataLoader:
order: ''
sampler:
name: "DistributedBatchSampler"
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
loader:
num_workers: 6
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: "ImageNetDataset"
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
......@@ -86,16 +86,16 @@ DataLoader:
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: "DistributedBatchSampler"
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
......@@ -112,9 +112,9 @@ Infer:
order: ''
- ToCHWImage:
PostProcess:
name: "Topk"
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
......
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "ResNet34_vd"
name: ResNet34_vd
# loss function config for traing/eval process
Loss:
......@@ -31,10 +31,10 @@ Loss:
Optimizer:
name: "Momentum"
name: Momentum
momentum: 0.9
lr:
name: "Cosine"
name: Cosine
learning_rate: 0.1
regularizer:
name: 'L2'
......@@ -45,9 +45,9 @@ Optimizer:
DataLoader:
Train:
dataset:
name: "ImageNetDataset"
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
......@@ -63,20 +63,20 @@ DataLoader:
alpha: 0.2
sampler:
name: "DistributedBatchSampler"
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
loader:
num_workers: 6
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: "ImageNetDataset"
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
......@@ -88,16 +88,16 @@ DataLoader:
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: "DistributedBatchSampler"
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
......@@ -114,9 +114,9 @@ Infer:
order: ''
- ToCHWImage:
PostProcess:
name: "Topk"
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
......
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "ResNet50"
name: ResNet50
# loss function config for traing/eval process
Loss:
......@@ -30,10 +30,10 @@ Loss:
Optimizer:
name: "Momentum"
name: Momentum
momentum: 0.9
lr:
name: "Piecewise"
name: Piecewise
learning_rate: 0.1
decay_epochs: [30, 60, 90]
values: [0.1, 0.01, 0.001, 0.0001]
......@@ -46,9 +46,9 @@ Optimizer:
DataLoader:
Train:
dataset:
name: "ImageNetDataset"
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
......@@ -61,20 +61,20 @@ DataLoader:
order: ''
sampler:
name: "DistributedBatchSampler"
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
loader:
num_workers: 6
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: "ImageNetDataset"
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
......@@ -86,16 +86,16 @@ DataLoader:
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: "DistributedBatchSampler"
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
......@@ -112,9 +112,9 @@ Infer:
order: ''
- ToCHWImage:
PostProcess:
name: "Topk"
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
......
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "ResNet50_vd"
name: ResNet50_vd
# loss function config for traing/eval process
Loss:
......@@ -31,10 +31,10 @@ Loss:
Optimizer:
name: "Momentum"
name: Momentum
momentum: 0.9
lr:
name: "Cosine"
name: Cosine
learning_rate: 0.1
regularizer:
name: 'L2'
......@@ -45,9 +45,9 @@ Optimizer:
DataLoader:
Train:
dataset:
name: "ImageNetDataset"
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
......@@ -63,20 +63,20 @@ DataLoader:
alpha: 0.2
sampler:
name: "DistributedBatchSampler"
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
loader:
num_workers: 6
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: "ImageNetDataset"
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
......@@ -88,16 +88,16 @@ DataLoader:
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: "DistributedBatchSampler"
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
......@@ -114,9 +114,9 @@ Infer:
order: ''
- ToCHWImage:
PostProcess:
name: "Topk"
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
......
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "VGG11"
name: VGG11
# loss function config for traing/eval process
Loss:
......@@ -44,80 +44,80 @@ Optimizer:
DataLoader:
Train:
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
name: DistributedBatchSampler
batch_size: 128
drop_last: False
shuffle: True
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
PostProcess:
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
Train:
- TopkAcc:
topk: [1, 5]
Eval:
Eval:
- TopkAcc:
topk: [1, 5]
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "VGG13"
name: VGG13
# loss function config for traing/eval process
Loss:
......@@ -44,80 +44,80 @@ Optimizer:
DataLoader:
Train:
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
PostProcess:
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
Train:
- TopkAcc:
topk: [1, 5]
Eval:
Eval:
- TopkAcc:
topk: [1, 5]
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "VGG16"
name: VGG16
# loss function config for traing/eval process
Loss:
......@@ -44,80 +44,80 @@ Optimizer:
DataLoader:
Train:
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
PostProcess:
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
Train:
- TopkAcc:
topk: [1, 5]
Eval:
Eval:
- TopkAcc:
topk: [1, 5]
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "VGG19"
name: VGG19
# loss function config for traing/eval process
Loss:
......@@ -44,80 +44,80 @@ Optimizer:
DataLoader:
Train:
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
PostProcess:
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
Train:
- TopkAcc:
topk: [1, 5]
Eval:
Eval:
- TopkAcc:
topk: [1, 5]
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 50030
save_interval: 1
eval_during_train: True
......@@ -13,23 +13,23 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
eval_mode: "classification"
save_inference_dir: ./inference
eval_mode: classification
# model architecture
Arch:
name: "RecModel"
name: RecModel
Backbone:
name: "ResNet50_vd"
pretrained: False
name: ResNet50_vd
pretrained: True
BackboneStopLayer:
name: "flatten_0"
name: flatten_0
Neck:
name: "FC"
name: FC
embedding_size: 2048
class_num: 512
Head:
name: "FC"
name: FC
embedding_size: 512
class_num: 50030
......@@ -56,52 +56,52 @@ Optimizer:
DataLoader:
Train:
dataset:
name: "ImageNetDataset"
image_root: "./dataset/Aliproduct/"
cls_label_path: "./dataset/Aliproduct/train_list.txt"
transform_ops:
- ResizeImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/Aliproduct/
cls_label_path: ./dataset/Aliproduct/train_list.txt
transform_ops:
- ResizeImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: "ImageNetDataset"
image_root: "./dataset/Aliproduct/"
cls_label_path: "./dataset/Aliproduct/val_list.txt"
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/Aliproduct/
cls_label_path: ./dataset/Aliproduct/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Metric:
Train:
- TopkAcc:
......@@ -111,17 +111,17 @@ Metric:
topk: [1, 5]
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
# global configs
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
# please download pretrained model via this link:
# https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/pretrain/product_ResNet50_vd_Aliproduct_v1.0_pretrained.pdparams
pretrained_model: product_ResNet50_vd_Aliproduct_v1.0_pretrained
output_dir: ./output/
device: gpu
class_num: 3997
save_interval: 10
eval_during_train: True
......@@ -13,29 +15,30 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
eval_mode: "retrieval"
save_inference_dir: ./inference
eval_mode: retrieval
# model architecture
Arch:
name: "RecModel"
name: RecModel
infer_output_key: features
infer_add_softmax: False
Backbone:
name: "ResNet50_vd"
name: ResNet50_vd
pretrained: False
BackboneStopLayer:
name: "flatten_0"
name: flatten_0
Neck:
name: "FC"
name: FC
embedding_size: 2048
class_num: 512
Head:
name: "ArcMargin"
name: ArcMargin
embedding_size: 512
class_num: 3997
margin: 0.15
scale: 30
infer_output_key: "features"
infer_add_softmax: False
# loss function config for traing/eval process
Loss:
......@@ -67,42 +70,41 @@ Optimizer:
DataLoader:
Train:
dataset:
name: "ImageNetDataset"
image_root: "./dataset/Inshop/"
cls_label_path: "./dataset/Inshop/train_list.txt"
transform_ops:
- ResizeImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- RandomErasing:
EPSILON: 0.5
sl: 0.02
sh: 0.4
r1: 0.3
mean: [0., 0., 0.]
name: ImageNetDataset
image_root: ./dataset/Inshop/
cls_label_path: ./dataset/Inshop/train_list.txt
transform_ops:
- ResizeImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- RandomErasing:
EPSILON: 0.5
sl: 0.02
sh: 0.4
r1: 0.3
mean: [0., 0., 0.]
sampler:
name: DistributedRandomIdentitySampler
batch_size: 64
num_instances: 2
drop_last: False
shuffle: True
name: DistributedRandomIdentitySampler
batch_size: 64
num_instances: 2
drop_last: False
shuffle: True
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Eval:
Query:
# TOTO: modify to the latest trainer
dataset:
name: "ImageNetDataset"
image_root: "./dataset/Inshop/"
cls_label_path: "./dataset/Inshop/query_list.txt"
name: ImageNetDataset
image_root: ./dataset/Inshop/
cls_label_path: ./dataset/Inshop/query_list.txt
transform_ops:
- ResizeImage:
size: 224
......@@ -117,20 +119,19 @@ DataLoader:
drop_last: False
shuffle: False
loader:
num_workers: 6
num_workers: 4
use_shared_memory: True
Gallery:
# TOTO: modify to the latest trainer
dataset:
name: "ImageNetDataset"
image_root: "./dataset/Inshop/"
cls_label_path: "./dataset/Inshop/gallery_list.txt"
name: ImageNetDataset
image_root: ./dataset/Inshop/
cls_label_path: ./dataset/Inshop/gallery_list.txt
transform_ops:
- ResizeImage:
size: 224
- NormalizeImage:
scale: 0.00392157
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
......@@ -140,7 +141,7 @@ DataLoader:
drop_last: False
shuffle: False
loader:
num_workers: 6
num_workers: 4
use_shared_memory: True
Metric:
......@@ -149,17 +150,17 @@ Metric:
topk: [1, 5]
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
# global configs
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
# please download pretrained model via this link:
# https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/pretrain/product_ResNet50_vd_Aliproduct_v1.0_pretrained.pdparams
pretrained_model: product_ResNet50_vd_Aliproduct_v1.0_pretrained
output_dir: ./output/
device: gpu
class_num: 11319
save_interval: 10
eval_during_train: True
......@@ -13,28 +15,28 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
eval_mode: "retrieval"
save_inference_dir: ./inference
eval_mode: retrieval
# model architecture
Arch:
name: "RecModel"
name: RecModel
Backbone:
name: "ResNet50_vd"
name: ResNet50_vd
pretrained: False
BackboneStopLayer:
name: "flatten_0"
name: flatten_0
Neck:
name: "FC"
name: FC
embedding_size: 2048
class_num: 512
Head:
name: "ArcMargin"
name: ArcMargin
embedding_size: 512
class_num: 11319
margin: 0.15
scale: 30
infer_output_key: "features"
infer_output_key: features
infer_add_softmax: False
# loss function config for traing/eval process
......@@ -67,42 +69,41 @@ Optimizer:
DataLoader:
Train:
dataset:
name: "ImageNetDataset"
image_root: "./dataset/Stanford_Online_Products/"
cls_label_path: "./dataset/Stanford_Online_Products/train_list.txt"
transform_ops:
- ResizeImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- RandomErasing:
EPSILON: 0.5
sl: 0.02
sh: 0.4
r1: 0.3
mean: [0., 0., 0.]
name: ImageNetDataset
image_root: ./dataset/Stanford_Online_Products/
cls_label_path: ./dataset/Stanford_Online_Products/train_list.txt
transform_ops:
- ResizeImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- RandomErasing:
EPSILON: 0.5
sl: 0.02
sh: 0.4
r1: 0.3
mean: [0., 0., 0.]
sampler:
name: DistributedRandomIdentitySampler
batch_size: 64
num_instances: 2
drop_last: False
shuffle: True
name: DistributedRandomIdentitySampler
batch_size: 64
num_instances: 2
drop_last: False
shuffle: True
loader:
num_workers: 6
use_shared_memory: True
num_workers: 6
use_shared_memory: True
Eval:
Query:
# TOTO: modify to the latest trainer
dataset:
name: "ImageNetDataset"
image_root: "./dataset/Stanford_Online_Products/"
cls_label_path: "./dataset/Stanford_Online_Products/test_list.txt"
name: ImageNetDataset
image_root: ./dataset/Stanford_Online_Products/
cls_label_path: ./dataset/Stanford_Online_Products/test_list.txt
transform_ops:
- ResizeImage:
size: 224
......@@ -117,15 +118,14 @@ DataLoader:
drop_last: False
shuffle: False
loader:
num_workers: 6
num_workers: 4
use_shared_memory: True
Gallery:
# TOTO: modify to the latest trainer
dataset:
name: "ImageNetDataset"
image_root: "./dataset/Stanford_Online_Products/"
cls_label_path: "./dataset/Stanford_Online_Products/test_list.txt"
name: ImageNetDataset
image_root: ./dataset/Stanford_Online_Products/
cls_label_path: ./dataset/Stanford_Online_Products/test_list.txt
transform_ops:
- ResizeImage:
size: 224
......@@ -140,7 +140,7 @@ DataLoader:
drop_last: False
shuffle: False
loader:
num_workers: 6
num_workers: 4
use_shared_memory: True
Metric:
......@@ -149,17 +149,17 @@ Metric:
topk: [1, 5]
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册