未验证 提交 b9786424 编写于 作者: C cuicheng01 提交者: GitHub

Merge pull request #819 from cuicheng01/develop_reg

Update configs
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "HRNet_W18_C"
name: HRNet_W18_C
# loss function config for traing/eval process
Loss:
......@@ -46,80 +46,80 @@ Optimizer:
DataLoader:
Train:
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
PostProcess:
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
Train:
- TopkAcc:
topk: [1, 5]
Eval:
Eval:
- TopkAcc:
topk: [1, 5]
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "HRNet_W30_C"
name: HRNet_W30_C
# loss function config for traing/eval process
Loss:
......@@ -46,80 +46,80 @@ Optimizer:
DataLoader:
Train:
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
PostProcess:
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
Train:
- TopkAcc:
topk: [1, 5]
Eval:
Eval:
- TopkAcc:
topk: [1, 5]
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "HRNet_W32_C"
name: HRNet_W32_C
# loss function config for traing/eval process
Loss:
......@@ -46,80 +46,80 @@ Optimizer:
DataLoader:
Train:
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
PostProcess:
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
Train:
- TopkAcc:
topk: [1, 5]
Eval:
Eval:
- TopkAcc:
topk: [1, 5]
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "HRNet_W40_C"
name: HRNet_W40_C
# loss function config for traing/eval process
Loss:
......@@ -46,80 +46,80 @@ Optimizer:
DataLoader:
Train:
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
PostProcess:
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
Train:
- TopkAcc:
topk: [1, 5]
Eval:
Eval:
- TopkAcc:
topk: [1, 5]
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "HRNet_W44_C"
name: HRNet_W44_C
# loss function config for traing/eval process
Loss:
......@@ -46,78 +46,80 @@ Optimizer:
DataLoader:
Train:
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
transform_ops:
- ResizeImage:
size: 224
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
PostProcess:
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
Train:
- TopkAcc:
topk: [1, 5]
Eval:
Eval:
- TopkAcc:
topk: [1, 5]
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "HRNet_W48_C"
name: HRNet_W48_C
# loss function config for traing/eval process
Loss:
......@@ -46,80 +46,80 @@ Optimizer:
DataLoader:
Train:
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
PostProcess:
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
Train:
- TopkAcc:
topk: [1, 5]
Eval:
Eval:
- TopkAcc:
topk: [1, 5]
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "HRNet_W64_C"
name: HRNet_W64_C
# loss function config for traing/eval process
Loss:
......@@ -46,80 +46,80 @@ Optimizer:
DataLoader:
Train:
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
PostProcess:
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
Train:
- TopkAcc:
topk: [1, 5]
Eval:
Eval:
- TopkAcc:
topk: [1, 5]
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -12,18 +12,19 @@ Global:
print_batch_step: 10
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
image_shape: [3, 299, 299]
save_inference_dir: ./inference
# model architecture
Arch:
name: "InceptionV3"
name: InceptionV3
# loss function config for traing/eval process
Loss:
Train:
- CELoss:
weight: 1.0
epsilon: 0.1
Eval:
- CELoss:
weight: 1.0
......@@ -35,8 +36,6 @@ Optimizer:
lr:
name: Cosine
learning_rate: 0.045
decay_epochs: [30, 60, 90]
values: [0.1, 0.01, 0.001, 0.0001]
regularizer:
name: 'L2'
coeff: 0.0001
......@@ -46,80 +45,80 @@ Optimizer:
DataLoader:
Train:
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
transform_ops:
- RandCropImage:
size: 299
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 299
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
transform_ops:
- ResizeImage:
resize_short: 320
- CropImage:
size: 299
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 320
- CropImage:
size: 299
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 320
- CropImage:
size: 299
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 320
- CropImage:
size: 299
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
PostProcess:
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
Train:
- TopkAcc:
topk: [1, 5]
Eval:
Eval:
- TopkAcc:
topk: [1, 5]
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "MobileNetV1"
name: MobileNetV1
# loss function config for traing/eval process
Loss:
......@@ -39,87 +39,87 @@ Optimizer:
values: [0.1, 0.01, 0.001, 0.0001]
regularizer:
name: 'L2'
coeff: 0.00003
coeff: 0.0003
# data loader for train and eval
DataLoader:
Train:
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
PostProcess:
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
Train:
- TopkAcc:
topk: [1, 5]
Eval:
Eval:
- TopkAcc:
topk: [1, 5]
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "MobileNetV1_x0_25"
name: MobileNetV1_x0_25
# loss function config for traing/eval process
Loss:
......@@ -39,87 +39,87 @@ Optimizer:
values: [0.1, 0.01, 0.001, 0.0001]
regularizer:
name: 'L2'
coeff: 0.00003
coeff: 0.0003
# data loader for train and eval
DataLoader:
Train:
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
PostProcess:
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
Train:
- TopkAcc:
topk: [1, 5]
Eval:
Eval:
- TopkAcc:
topk: [1, 5]
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "MobileNetV1_x0_5"
name: MobileNetV1_x0_5
# loss function config for traing/eval process
Loss:
......@@ -39,87 +39,87 @@ Optimizer:
values: [0.1, 0.01, 0.001, 0.0001]
regularizer:
name: 'L2'
coeff: 0.00003
coeff: 0.0003
# data loader for train and eval
DataLoader:
Train:
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
PostProcess:
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
Train:
- TopkAcc:
topk: [1, 5]
Eval:
Eval:
- TopkAcc:
topk: [1, 5]
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "MobileNetV1_x0_75"
name: MobileNetV1_x0_75
# loss function config for traing/eval process
Loss:
......@@ -39,87 +39,87 @@ Optimizer:
values: [0.1, 0.01, 0.001, 0.0001]
regularizer:
name: 'L2'
coeff: 0.00003
coeff: 0.0003
# data loader for train and eval
DataLoader:
Train:
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
PostProcess:
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
Train:
- TopkAcc:
topk: [1, 5]
Eval:
Eval:
- TopkAcc:
topk: [1, 5]
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "MobileNetV3_large_x0_35"
name: MobileNetV3_large_x0_35
# loss function config for traing/eval process
Loss:
......@@ -45,80 +45,80 @@ Optimizer:
DataLoader:
Train:
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 512
drop_last: False
shuffle: True
name: DistributedBatchSampler
batch_size: 512
drop_last: False
shuffle: True
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
PostProcess:
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
Train:
- TopkAcc:
topk: [1, 5]
Eval:
Eval:
- TopkAcc:
topk: [1, 5]
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "MobileNetV3_large_x0_5"
name: MobileNetV3_large_x0_5
# loss function config for traing/eval process
Loss:
......@@ -45,80 +45,80 @@ Optimizer:
DataLoader:
Train:
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 512
drop_last: False
shuffle: True
name: DistributedBatchSampler
batch_size: 512
drop_last: False
shuffle: True
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
PostProcess:
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
Train:
- TopkAcc:
topk: [1, 5]
Eval:
Eval:
- TopkAcc:
topk: [1, 5]
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "MobileNetV3_large_x0_75"
name: MobileNetV3_large_x0_75
# loss function config for traing/eval process
Loss:
......@@ -45,80 +45,80 @@ Optimizer:
DataLoader:
Train:
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 512
drop_last: False
shuffle: True
name: DistributedBatchSampler
batch_size: 512
drop_last: False
shuffle: True
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
PostProcess:
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
Train:
- TopkAcc:
topk: [1, 5]
Eval:
Eval:
- TopkAcc:
topk: [1, 5]
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "MobileNetV3_large_x1_0"
name: MobileNetV3_large_x1_0
# loss function config for traing/eval process
Loss:
......@@ -45,81 +45,81 @@ Optimizer:
DataLoader:
Train:
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- AutoAugment:
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- AutoAugment:
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 512
drop_last: False
shuffle: True
name: DistributedBatchSampler
batch_size: 512
drop_last: False
shuffle: True
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
PostProcess:
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
Train:
- TopkAcc:
topk: [1, 5]
Eval:
Eval:
- TopkAcc:
topk: [1, 5]
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "MobileNetV3_large_x1_25"
name: MobileNetV3_large_x1_25
# loss function config for traing/eval process
Loss:
......@@ -38,87 +38,87 @@ Optimizer:
learning_rate: 1.3
regularizer:
name: 'L2'
coeff: 0.00002
coeff: 0.00004
# data loader for train and eval
DataLoader:
Train:
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 512
drop_last: False
shuffle: True
name: DistributedBatchSampler
batch_size: 512
drop_last: False
shuffle: True
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
PostProcess:
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
Train:
- TopkAcc:
topk: [1, 5]
Eval:
Eval:
- TopkAcc:
topk: [1, 5]
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "MobileNetV3_small_x0_35"
name: MobileNetV3_small_x0_35
# loss function config for traing/eval process
Loss:
......@@ -38,87 +38,87 @@ Optimizer:
learning_rate: 1.3
regularizer:
name: 'L2'
coeff: 0.00002
coeff: 0.00001
# data loader for train and eval
DataLoader:
Train:
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 512
drop_last: False
shuffle: True
name: DistributedBatchSampler
batch_size: 512
drop_last: False
shuffle: True
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
PostProcess:
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
Train:
- TopkAcc:
topk: [1, 5]
Eval:
Eval:
- TopkAcc:
topk: [1, 5]
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "MobileNetV3_small_x0_5"
name: MobileNetV3_small_x0_5
# loss function config for traing/eval process
Loss:
......@@ -38,87 +38,87 @@ Optimizer:
learning_rate: 1.3
regularizer:
name: 'L2'
coeff: 0.00002
coeff: 0.00001
# data loader for train and eval
DataLoader:
Train:
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 512
drop_last: False
shuffle: True
name: DistributedBatchSampler
batch_size: 512
drop_last: False
shuffle: True
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
PostProcess:
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
Train:
- TopkAcc:
topk: [1, 5]
Eval:
Eval:
- TopkAcc:
topk: [1, 5]
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "MobileNetV3_small_x0_75"
name: MobileNetV3_small_x0_75
# loss function config for traing/eval process
Loss:
......@@ -45,80 +45,80 @@ Optimizer:
DataLoader:
Train:
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 512
drop_last: False
shuffle: True
name: DistributedBatchSampler
batch_size: 512
drop_last: False
shuffle: True
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
PostProcess:
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
Train:
- TopkAcc:
topk: [1, 5]
Eval:
Eval:
- TopkAcc:
topk: [1, 5]
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "MobileNetV3_small_x1_0"
name: MobileNetV3_small_x1_0
# loss function config for traing/eval process
Loss:
......@@ -45,80 +45,80 @@ Optimizer:
DataLoader:
Train:
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 512
drop_last: False
shuffle: True
name: DistributedBatchSampler
batch_size: 512
drop_last: False
shuffle: True
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
PostProcess:
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
Train:
- TopkAcc:
topk: [1, 5]
Eval:
Eval:
- TopkAcc:
topk: [1, 5]
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "MobileNetV3_small_x1_25"
name: MobileNetV3_small_x1_25
# loss function config for traing/eval process
Loss:
......@@ -45,80 +45,80 @@ Optimizer:
DataLoader:
Train:
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 512
drop_last: False
shuffle: True
name: DistributedBatchSampler
batch_size: 512
drop_last: False
shuffle: True
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
PostProcess:
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
Train:
- TopkAcc:
topk: [1, 5]
Eval:
Eval:
- TopkAcc:
topk: [1, 5]
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "ResNet101"
name: ResNet101
# loss function config for traing/eval process
Loss:
......@@ -30,10 +30,10 @@ Loss:
Optimizer:
name: "Momentum"
name: Momentum
momentum: 0.9
lr:
name: "Piecewise"
name: Piecewise
learning_rate: 0.1
decay_epochs: [30, 60, 90]
values: [0.1, 0.01, 0.001, 0.0001]
......@@ -46,9 +46,9 @@ Optimizer:
DataLoader:
Train:
dataset:
name: "ImageNetDataset"
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
......@@ -61,20 +61,20 @@ DataLoader:
order: ''
sampler:
name: "DistributedBatchSampler"
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
loader:
num_workers: 6
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: "ImageNetDataset"
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
......@@ -86,16 +86,16 @@ DataLoader:
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: "DistributedBatchSampler"
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
......@@ -112,9 +112,9 @@ Infer:
order: ''
- ToCHWImage:
PostProcess:
name: "Topk"
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
......
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "ResNet101_vd"
name: ResNet101_vd
# loss function config for traing/eval process
Loss:
......@@ -31,10 +31,10 @@ Loss:
Optimizer:
name: "Momentum"
name: Momentum
momentum: 0.9
lr:
name: "Cosine"
name: Cosine
learning_rate: 0.1
regularizer:
name: 'L2'
......@@ -45,9 +45,9 @@ Optimizer:
DataLoader:
Train:
dataset:
name: "ImageNetDataset"
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
......@@ -63,20 +63,20 @@ DataLoader:
alpha: 0.2
sampler:
name: "DistributedBatchSampler"
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
loader:
num_workers: 6
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: "ImageNetDataset"
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
......@@ -88,16 +88,16 @@ DataLoader:
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: "DistributedBatchSampler"
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
......@@ -114,9 +114,9 @@ Infer:
order: ''
- ToCHWImage:
PostProcess:
name: "Topk"
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
......
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "ResNet152"
name: ResNet152
# loss function config for traing/eval process
Loss:
......@@ -30,10 +30,10 @@ Loss:
Optimizer:
name: "Momentum"
name: Momentum
momentum: 0.9
lr:
name: "Piecewise"
name: Piecewise
learning_rate: 0.1
decay_epochs: [30, 60, 90]
values: [0.1, 0.01, 0.001, 0.0001]
......@@ -46,9 +46,9 @@ Optimizer:
DataLoader:
Train:
dataset:
name: "ImageNetDataset"
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
......@@ -61,20 +61,20 @@ DataLoader:
order: ''
sampler:
name: "DistributedBatchSampler"
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
loader:
num_workers: 6
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: "ImageNetDataset"
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
......@@ -86,16 +86,16 @@ DataLoader:
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: "DistributedBatchSampler"
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
......@@ -112,9 +112,9 @@ Infer:
order: ''
- ToCHWImage:
PostProcess:
name: "Topk"
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
......
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "ResNet152_vd"
name: ResNet152_vd
# loss function config for traing/eval process
Loss:
......@@ -31,10 +31,10 @@ Loss:
Optimizer:
name: "Momentum"
name: Momentum
momentum: 0.9
lr:
name: "Cosine"
name: Cosine
learning_rate: 0.1
regularizer:
name: 'L2'
......@@ -45,9 +45,9 @@ Optimizer:
DataLoader:
Train:
dataset:
name: "ImageNetDataset"
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
......@@ -63,20 +63,20 @@ DataLoader:
alpha: 0.2
sampler:
name: "DistributedBatchSampler"
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
loader:
num_workers: 6
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: "ImageNetDataset"
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
......@@ -88,16 +88,16 @@ DataLoader:
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: "DistributedBatchSampler"
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
......@@ -114,9 +114,9 @@ Infer:
order: ''
- ToCHWImage:
PostProcess:
name: "Topk"
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
......
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "ResNet18"
name: ResNet18
# loss function config for traing/eval process
Loss:
......@@ -30,10 +30,10 @@ Loss:
Optimizer:
name: "Momentum"
name: Momentum
momentum: 0.9
lr:
name: "Piecewise"
name: Piecewise
learning_rate: 0.1
decay_epochs: [30, 60, 90]
values: [0.1, 0.01, 0.001, 0.0001]
......@@ -46,9 +46,9 @@ Optimizer:
DataLoader:
Train:
dataset:
name: "ImageNetDataset"
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
......@@ -61,20 +61,20 @@ DataLoader:
order: ''
sampler:
name: "DistributedBatchSampler"
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
loader:
num_workers: 6
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: "ImageNetDataset"
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
......@@ -86,16 +86,16 @@ DataLoader:
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: "DistributedBatchSampler"
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
......@@ -112,9 +112,9 @@ Infer:
order: ''
- ToCHWImage:
PostProcess:
name: "Topk"
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
......
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "ResNet18_vd"
name: ResNet18_vd
# loss function config for traing/eval process
Loss:
......@@ -31,10 +31,10 @@ Loss:
Optimizer:
name: "Momentum"
name: Momentum
momentum: 0.9
lr:
name: "Cosine"
name: Cosine
learning_rate: 0.1
regularizer:
name: 'L2'
......@@ -45,9 +45,9 @@ Optimizer:
DataLoader:
Train:
dataset:
name: "ImageNetDataset"
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
......@@ -63,20 +63,20 @@ DataLoader:
alpha: 0.2
sampler:
name: "DistributedBatchSampler"
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
loader:
num_workers: 6
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: "ImageNetDataset"
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
......@@ -88,16 +88,16 @@ DataLoader:
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: "DistributedBatchSampler"
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
......@@ -114,9 +114,9 @@ Infer:
order: ''
- ToCHWImage:
PostProcess:
name: "Topk"
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
......
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "ResNet200_vd"
name: ResNet200_vd
# loss function config for traing/eval process
Loss:
......@@ -31,10 +31,10 @@ Loss:
Optimizer:
name: "Momentum"
name: Momentum
momentum: 0.9
lr:
name: "Cosine"
name: Cosine
learning_rate: 0.1
regularizer:
name: 'L2'
......@@ -45,9 +45,9 @@ Optimizer:
DataLoader:
Train:
dataset:
name: "ImageNetDataset"
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
......@@ -63,20 +63,20 @@ DataLoader:
alpha: 0.2
sampler:
name: "DistributedBatchSampler"
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
loader:
num_workers: 6
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: "ImageNetDataset"
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
......@@ -88,16 +88,16 @@ DataLoader:
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: "DistributedBatchSampler"
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
......@@ -114,9 +114,9 @@ Infer:
order: ''
- ToCHWImage:
PostProcess:
name: "Topk"
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
......
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "ResNet34"
name: ResNet34
# loss function config for traing/eval process
Loss:
......@@ -30,10 +30,10 @@ Loss:
Optimizer:
name: "Momentum"
name: Momentum
momentum: 0.9
lr:
name: "Piecewise"
name: Piecewise
learning_rate: 0.1
decay_epochs: [30, 60, 90]
values: [0.1, 0.01, 0.001, 0.0001]
......@@ -46,9 +46,9 @@ Optimizer:
DataLoader:
Train:
dataset:
name: "ImageNetDataset"
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
......@@ -61,20 +61,20 @@ DataLoader:
order: ''
sampler:
name: "DistributedBatchSampler"
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
loader:
num_workers: 6
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: "ImageNetDataset"
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
......@@ -86,16 +86,16 @@ DataLoader:
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: "DistributedBatchSampler"
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
......@@ -112,9 +112,9 @@ Infer:
order: ''
- ToCHWImage:
PostProcess:
name: "Topk"
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
......
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "ResNet34_vd"
name: ResNet34_vd
# loss function config for traing/eval process
Loss:
......@@ -31,10 +31,10 @@ Loss:
Optimizer:
name: "Momentum"
name: Momentum
momentum: 0.9
lr:
name: "Cosine"
name: Cosine
learning_rate: 0.1
regularizer:
name: 'L2'
......@@ -45,9 +45,9 @@ Optimizer:
DataLoader:
Train:
dataset:
name: "ImageNetDataset"
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
......@@ -63,20 +63,20 @@ DataLoader:
alpha: 0.2
sampler:
name: "DistributedBatchSampler"
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
loader:
num_workers: 6
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: "ImageNetDataset"
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
......@@ -88,16 +88,16 @@ DataLoader:
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: "DistributedBatchSampler"
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
......@@ -114,9 +114,9 @@ Infer:
order: ''
- ToCHWImage:
PostProcess:
name: "Topk"
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
......
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "ResNet50"
name: ResNet50
# loss function config for traing/eval process
Loss:
......@@ -30,10 +30,10 @@ Loss:
Optimizer:
name: "Momentum"
name: Momentum
momentum: 0.9
lr:
name: "Piecewise"
name: Piecewise
learning_rate: 0.1
decay_epochs: [30, 60, 90]
values: [0.1, 0.01, 0.001, 0.0001]
......@@ -46,9 +46,9 @@ Optimizer:
DataLoader:
Train:
dataset:
name: "ImageNetDataset"
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
......@@ -61,20 +61,20 @@ DataLoader:
order: ''
sampler:
name: "DistributedBatchSampler"
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
loader:
num_workers: 6
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: "ImageNetDataset"
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
......@@ -86,16 +86,16 @@ DataLoader:
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: "DistributedBatchSampler"
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
......@@ -112,9 +112,9 @@ Infer:
order: ''
- ToCHWImage:
PostProcess:
name: "Topk"
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
......
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "ResNet50_vd"
name: ResNet50_vd
# loss function config for traing/eval process
Loss:
......@@ -31,10 +31,10 @@ Loss:
Optimizer:
name: "Momentum"
name: Momentum
momentum: 0.9
lr:
name: "Cosine"
name: Cosine
learning_rate: 0.1
regularizer:
name: 'L2'
......@@ -45,9 +45,9 @@ Optimizer:
DataLoader:
Train:
dataset:
name: "ImageNetDataset"
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
......@@ -63,20 +63,20 @@ DataLoader:
alpha: 0.2
sampler:
name: "DistributedBatchSampler"
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
loader:
num_workers: 6
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: "ImageNetDataset"
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
......@@ -88,16 +88,16 @@ DataLoader:
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: "DistributedBatchSampler"
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
......@@ -114,9 +114,9 @@ Infer:
order: ''
- ToCHWImage:
PostProcess:
name: "Topk"
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
......
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "VGG11"
name: VGG11
# loss function config for traing/eval process
Loss:
......@@ -44,80 +44,80 @@ Optimizer:
DataLoader:
Train:
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
name: DistributedBatchSampler
batch_size: 128
drop_last: False
shuffle: True
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
PostProcess:
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
Train:
- TopkAcc:
topk: [1, 5]
Eval:
Eval:
- TopkAcc:
topk: [1, 5]
......@@ -2,8 +2,8 @@
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
output_dir: ./output/
device: gpu
class_num: 1000
save_interval: 1
eval_during_train: True
......@@ -13,11 +13,11 @@ Global:
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
save_inference_dir: ./inference
# model architecture
Arch:
name: "VGG13"
name: VGG13
# loss function config for traing/eval process
Loss:
......@@ -44,80 +44,80 @@ Optimizer:
DataLoader:
Train:
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/train_list.txt"
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/train_list.txt
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: True
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Eval:
# TOTO: modify to the latest trainer
dataset:
name: ImageNetDataset
image_root: "./dataset/ILSVRC2012/"
cls_label_path: "./dataset/ILSVRC2012/val_list.txt"
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
name: ImageNetDataset
image_root: ./dataset/ILSVRC2012/
cls_label_path: ./dataset/ILSVRC2012/val_list.txt
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
use_shared_memory: True
num_workers: 4
use_shared_memory: True
Infer:
infer_imgs: "docs/images/whl/demo.jpg"
infer_imgs: docs/images/whl/demo.jpg
batch_size: 10
transforms:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
PostProcess:
name: Topk
topk: 5
class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt"
class_id_map_file: ppcls/utils/imagenet1k_label_list.txt
Metric:
Train:
Train:
- TopkAcc:
topk: [1, 5]
Eval:
Eval:
- TopkAcc:
topk: [1, 5]
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册