提交 97e8abc3 编写于 作者: H HydrogenSulfate

refine code and yaml

上级 661a1909
......@@ -17,22 +17,32 @@ from __future__ import absolute_import, division, print_function
import paddle
import paddle.nn as nn
from ppcls.arch.utils import get_param_attr_dict
class BNNeck(nn.Layer):
def __init__(self, num_features):
def __init__(self, num_features, **kwargs):
super().__init__()
weight_attr = paddle.ParamAttr(
initializer=paddle.nn.initializer.Constant(value=1.0))
bias_attr = paddle.ParamAttr(
initializer=paddle.nn.initializer.Constant(value=0.0),
trainable=False)
if 'weight_attr' in kwargs:
weight_attr = get_param_attr_dict(kwargs['weight_attr'])
bias_attr = None
if 'bias_attr' in kwargs:
bias_attr = get_param_attr_dict(kwargs['bias_attr'])
self.feat_bn = nn.BatchNorm1D(
num_features,
momentum=0.9,
epsilon=1e-05,
weight_attr=weight_attr,
bias_attr=bias_attr)
# TODO: set bnneck.bias learnable=False
self.flatten = nn.Flatten()
def forward(self, x):
......
......@@ -2,8 +2,6 @@
Global:
checkpoints: null
pretrained_model: null
# pretrained_model: "./pd_model_trace/ISE/ISE_M_model" # pretrained ISE model for Market1501
# pretrained_model: "./pd_model_trace/ISE/ISE_MS_model" # pretrained ISE model for MSMT17
output_dir: "./output/"
device: "gpu"
save_interval: 40
......@@ -12,10 +10,12 @@ Global:
epochs: 120
print_batch_step: 20
use_visualdl: False
eval_mode: "retrieval"
re_ranking: False
feat_from: "backbone" # 'backbone' or 'neck'
# used for static mode and model export
image_shape: [3, 256, 128]
save_inference_dir: "./inference"
eval_mode: "retrieval"
# model architecture
Arch:
......@@ -32,6 +32,12 @@ Arch:
name: "FC"
embedding_size: 2048
class_num: 751
weight_attr:
initializer:
name: Normal
std: 0.001
bias_attr: False
# loss function config for traing/eval process
Loss:
Train:
......@@ -63,9 +69,9 @@ Optimizer:
DataLoader:
Train:
dataset:
name: "VeriWild"
image_root: "./dataset/market1501"
cls_label_path: "./dataset/market1501/bounding_box_train.txt"
name: "Market1501"
image_root: "./dataset/"
cls_label_path: "bounding_box_train"
transform_ops:
- ResizeImage:
size: [128, 256]
......@@ -88,14 +94,14 @@ DataLoader:
drop_last: True
shuffle: True
loader:
num_workers: 6
num_workers: 4
use_shared_memory: True
Eval:
Query:
dataset:
name: "VeriWild"
image_root: "./dataset/market1501"
cls_label_path: "./dataset/market1501/query.txt"
name: "Market1501"
image_root: "./dataset/"
cls_label_path: "query"
transform_ops:
- ResizeImage:
size: [128, 256]
......@@ -109,14 +115,14 @@ DataLoader:
drop_last: False
shuffle: False
loader:
num_workers: 6
num_workers: 4
use_shared_memory: True
Gallery:
dataset:
name: "VeriWild"
image_root: "./dataset/market1501"
cls_label_path: "./dataset/market1501/bounding_box_test.txt"
name: "Market1501"
image_root: "./dataset/"
cls_label_path: "bounding_box_test"
transform_ops:
- ResizeImage:
size: [128, 256]
......@@ -130,7 +136,7 @@ DataLoader:
drop_last: False
shuffle: False
loader:
num_workers: 6
num_workers: 4
use_shared_memory: True
Metric:
......
......@@ -32,11 +32,19 @@ Arch:
Neck:
name: BNNeck
num_features: &feat_dim 2048
# trainable: False # TODO: freeze bn.bias
weight_attr:
initializer:
name: Constant
value: 1.0
bias_attr:
initializer:
name: Constant
value: 0.0
learning_rate: 1.0e-20 # NOTE: Temporarily set lr small enough to freeze the bias
Head:
name: "FC"
embedding_size: *feat_dim
class_num: &class_num 751
class_num: 751
weight_attr:
initializer:
name: Normal
......@@ -73,9 +81,9 @@ Optimizer:
DataLoader:
Train:
dataset:
name: "VeriWild"
image_root: "./dataset/market1501"
cls_label_path: "./dataset/market1501/bounding_box_train.txt"
name: "Market1501"
image_root: "./dataset/"
cls_label_path: "bounding_box_train"
transform_ops:
- DecodeImage:
to_rgb: True
......@@ -108,14 +116,14 @@ DataLoader:
drop_last: True
shuffle: True
loader:
num_workers: 6
num_workers: 4
use_shared_memory: True
Eval:
Query:
dataset:
name: "VeriWild"
image_root: "./dataset/market1501"
cls_label_path: "./dataset/market1501/query.txt"
name: "Market1501"
image_root: "./dataset/"
cls_label_path: "query"
transform_ops:
- DecodeImage:
to_rgb: True
......@@ -133,14 +141,14 @@ DataLoader:
drop_last: False
shuffle: False
loader:
num_workers: 6
num_workers: 4
use_shared_memory: True
Gallery:
dataset:
name: "VeriWild"
image_root: "./dataset/market1501"
cls_label_path: "./dataset/market1501/bounding_box_test.txt"
name: "Market1501"
image_root: "./dataset/"
cls_label_path: "bounding_box_test"
transform_ops:
- DecodeImage:
to_rgb: True
......@@ -158,7 +166,7 @@ DataLoader:
drop_last: False
shuffle: False
loader:
num_workers: 6
num_workers: 4
use_shared_memory: True
Metric:
......
......@@ -25,14 +25,22 @@ Arch:
infer_add_softmax: False
Backbone:
name: "ResNet50_last_stage_stride1"
pretrained: True
pretrained: "./dataset/resnet50-19c8e357_for_strong_baseline"
stem_act: null
BackboneStopLayer:
name: "flatten"
Neck:
name: BNNeck
num_features: &feat_dim 2048
# trainable: False # TODO: freeze bn.bias
weight_attr:
initializer:
name: Constant
value: 1.0
bias_attr:
initializer:
name: Constant
value: 0.0
learning_rate: 1.0e-20 # TODO: Temporarily set lr small enough to freeze the bias
Head:
name: "FC"
embedding_size: *feat_dim
......@@ -78,7 +86,7 @@ Optimizer:
scope: CenterLoss
lr:
name: Constant
learning_rate: 1000.0 # set to ori_lr*(1/centerloss_weight) to void manually scaling centers' gradidents.
learning_rate: 1000.0 # NOTE: set to ori_lr*(1/centerloss_weight) to avoid manually scaling centers' gradidents.
# data loader for train and eval
DataLoader:
......
......@@ -6,9 +6,9 @@ Global:
# pretrained_model: "./pd_model_trace/ISE/ISE_MS_model" # pretrained ISE model for MSMT17
output_dir: "./output/"
device: "gpu"
save_interval: 10
save_interval: 1
eval_during_train: True
eval_interval: 10
eval_interval: 1
epochs: 120
print_batch_step: 10
use_visualdl: False
......@@ -25,7 +25,6 @@ Arch:
Backbone:
name: "ResNet50_last_stage_stride1"
pretrained: True
stem_act: null
BackboneStopLayer:
name: "avg_pool"
Neck:
......@@ -115,7 +114,7 @@ DataLoader:
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 128
batch_size: 64
drop_last: False
shuffle: False
loader:
......@@ -138,7 +137,7 @@ DataLoader:
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 128
batch_size: 64
drop_last: False
shuffle: False
loader:
......
......@@ -299,11 +299,12 @@ class Engine(object):
self.max_iter = len(self.train_dataloader) - 1 if platform.system(
) == "Windows" else len(self.train_dataloader)
# step lr once before first epoch when when Global.warmup_by_epoch=True
if self.config["Global"].get("warmup_by_epoch", False):
for i in range(len(self.lr_sch)):
self.lr_sch[i].step()
logger.info(
"lr_sch step once before first epoch, when Global.warmup_by_epoch=True"
"lr_sch step once before the first epoch, when Global.warmup_by_epoch=True"
)
for epoch_id in range(best_metric["epoch"] + 1,
......@@ -312,6 +313,7 @@ class Engine(object):
# for one epoch train
self.train_epoch_func(self, epoch_id, print_batch_step)
# lr step when Global.warmup_by_epoch=True
if self.config["Global"].get("warmup_by_epoch", False):
for i in range(len(self.lr_sch)):
self.lr_sch[i].step()
......
......@@ -53,20 +53,14 @@ def train_epoch(engine, epoch_id, print_batch_step):
out = forward(engine, batch)
loss_dict = engine.train_loss_func(out, batch[1])
# step opt
# backward & step opt
if engine.amp:
scaled = engine.scaler.scale(loss_dict["loss"])
scaled.backward()
# set BNneck.bias grad to zero
engine.model.neck.feat_bn.bias.grad.set_value(
paddle.zeros_like(engine.model.neck.feat_bn.bias.grad))
for i in range(len(engine.optimizer)):
engine.scaler.minimize(engine.optimizer[i], scaled)
else:
loss_dict["loss"].backward()
# set BNneck.bias grad to zero
engine.model.neck.feat_bn.bias.grad.set_value(
paddle.zeros_like(engine.model.neck.feat_bn.bias.grad))
for i in range(len(engine.optimizer)):
engine.optimizer[i].step()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册