未验证 提交 c13fe196 编写于 作者: F Felix 提交者: GitHub

Merge branch 'develop_reg' into patch-6

# global configs
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output/"
device: "gpu"
class_num: 5013
save_interval: 1
eval_mode: "retrieval"
eval_during_train: True
eval_interval: 1
epochs: 120
print_batch_step: 10
use_visualdl: False
image_shape: [3, 224, 224]
infer_imgs:
save_inference_dir: "./output"
feature_normalize: True
Arch:
name: "RecModel"
Backbone:
name: "ResNet50"
pretrained: True
BackboneStopLayer:
name: "flatten_0"
output_dim: 2048
Head:
name: "FC"
class_num: 5013
embedding_size: 2048
# margin: 0.5
# scale: 80
infer_output_key: "features"
infer_add_softmax: "false"
Loss:
Train:
- CELoss:
weight: 1.0
# - TripletLoss:
# margin: 0.1
# weight: 0.1
Eval:
- CELoss:
weight: 1.0
Optimizer:
name: Momentum
momentum: 0.9
lr:
name: Piecewise
learning_rate: 0.1
decay_epochs: [30, 60, 90]
values: [0.1, 0.01, 0.001, 0.0001]
regularizer:
name: 'L2'
coeff: 0.0001
DataLoader:
Train:
dataset:
name: ICartoonDataset
image_root: "./dataset/iCartoonFace"
cls_label_path: "./dataset/iCartoonFace/train_list.txt"
transform_ops:
- RandCropImage:
size: 224
- RandFlipImage:
flip_code: 1
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
#num_instances: 2
batch_size: 256
drop_last: False
shuffle: True
loader:
num_workers: 6
use_shared_memory: False
Eval:
Query:
dataset:
name: ICartoonDataset
image_root: "./dataset/iCartoonFace"
cls_label_path: "./dataset/iCartoonFace/query.txt"
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
use_shared_memory: False
Gallery:
dataset:
name: ICartoonDataset
image_root: "./dataset/iCartoonFace"
cls_label_path: "./dataset/iCartoonFace/gallery.txt"
transform_ops:
- ResizeImage:
resize_short: 256
- CropImage:
size: 224
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
use_shared_memory: False
Metric:
Train:
- TopkAcc:
topk: [1, 5]
Eval:
- Recallk:
topk: 1
......@@ -26,6 +26,8 @@ from ppcls.data.dataloader.multilabel_dataset import MultiLabelDataset
from ppcls.data.dataloader.common_dataset import create_operators
from ppcls.data.dataloader.vehicle_dataset import CompCars, VeriWild
from ppcls.data.dataloader.logo_dataset import LogoDataset
from ppcls.data.dataloader.icartoon_dataset import ICartoonDataset
# sampler
from ppcls.data.dataloader.DistributedRandomIdentitySampler import DistributedRandomIdentitySampler
......
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import numpy as np
import os
from .common_dataset import CommonDataset
class ICartoonDataset(CommonDataset):
def _load_anno(self, seed=None):
assert os.path.exists(self._cls_path)
assert os.path.exists(self._img_root)
self.images = []
self.labels = []
with open(self._cls_path) as fd:
lines = fd.readlines()
if seed is not None:
np.random.RandomState(seed).shuffle(lines)
else:
np.random.shuffle(lines)
for l in lines:
l = l.strip().split("\t")
self.images.append(os.path.join(self._img_root, l[0][2:]))
self.labels.append(int(l[1]))
assert os.path.exists(self.images[-1])
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册