提交 36406212 编写于 作者: D dongshuilong

Merge branch 'develop' of https://github.com/PaddlePaddle/PaddleClas into whole_chain

......@@ -41,11 +41,11 @@ def main():
'inference.pdmodel')) and os.path.exists(
os.path.join(config["Global"]["save_inference_dir"],
'inference.pdiparams'))
config["DataLoader"]["Train"]["sampler"]["batch_size"] = 1
config["DataLoader"]["Train"]["loader"]["num_workers"] = 0
config["DataLoader"]["Eval"]["sampler"]["batch_size"] = 1
config["DataLoader"]["Eval"]["loader"]["num_workers"] = 0
init_logger()
device = paddle.set_device("cpu")
train_dataloader = build_dataloader(config["DataLoader"], "Train", device,
train_dataloader = build_dataloader(config["DataLoader"], "Eval", device,
False)
def sample_generator(loader):
......
# global configs
# pretrained_model: https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/pretrain/vehicle_reid_PPLCNet2.5x_VERIWild_v1.0_pretrained.pdparams
# VeriWild v1 small: recall1: 0.93736, recall5: 0.98427, mAP: 0.82125
Global:
checkpoints: null
pretrained_model: null
output_dir: "./output_reid/"
device: "gpu"
save_interval: 1
eval_during_train: True
eval_interval: 1
epochs: 160
print_batch_step: 10
use_visualdl: False
# used for static mode and model export
image_shape: [3, 224, 224]
save_inference_dir: "./inference"
eval_mode: "retrieval"
# model architecture
Arch:
name: "RecModel"
infer_output_key: "features"
infer_add_softmax: False
Backbone:
name: "PPLCNet_x2_5"
pretrained: True
use_ssld: True
BackboneStopLayer:
name: "flatten_0"
Neck:
name: "FC"
embedding_size: 1280
class_num: 512
Head:
name: "ArcMargin"
embedding_size: 512
class_num: 30671
margin: 0.15
scale: 32
# loss function config for traing/eval process
Loss:
Train:
- CELoss:
weight: 1.0
- SupConLoss:
weight: 1.0
views: 2
Eval:
- CELoss:
weight: 1.0
Optimizer:
name: Momentum
momentum: 0.9
lr:
name: Cosine
learning_rate: 0.04
regularizer:
name: 'L2'
coeff: 0.0005
# data loader for train and eval
DataLoader:
Train:
dataset:
name: "VeriWild"
image_root: "./dataset/VeRI-Wild/images/"
cls_label_path: "./dataset/VeRI-Wild/train_test_split/train_list_start0.txt"
transform_ops:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
size: 224
- RandFlipImage:
flip_code: 1
- AugMix:
prob: 0.5
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- RandomErasing:
EPSILON: 0.5
sl: 0.02
sh: 0.4
r1: 0.3
mean: [0., 0., 0.]
sampler:
name: PKSampler
batch_size: 128
sample_per_id: 2
drop_last: True
shuffle: True
loader:
num_workers: 6
use_shared_memory: True
Eval:
Query:
dataset:
name: "VeriWild"
image_root: "./dataset/VeRI-Wild/images"
cls_label_path: "./dataset/VeRI-Wild/train_test_split/test_3000_id_query.txt"
transform_ops:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
size: 224
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
use_shared_memory: True
Gallery:
dataset:
name: "VeriWild"
image_root: "./dataset/VeRI-Wild/images"
cls_label_path: "./dataset/VeRI-Wild/train_test_split/test_3000_id.txt"
transform_ops:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
size: 224
- NormalizeImage:
scale: 0.00392157
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
sampler:
name: DistributedBatchSampler
batch_size: 64
drop_last: False
shuffle: False
loader:
num_workers: 6
use_shared_memory: True
Metric:
Eval:
- Recallk:
topk: [1, 5]
- mAP: {}
......@@ -44,12 +44,18 @@ class CombinedLoss(nn.Layer):
def __call__(self, input, batch):
loss_dict = {}
for idx, loss_func in enumerate(self.loss_func):
loss = loss_func(input, batch)
weight = self.loss_weight[idx]
loss = {key: loss[key] * weight for key in loss}
# just for accelerate classification traing speed
if len(self.loss_func) == 1:
loss = self.loss_func[0](input, batch)
loss_dict.update(loss)
loss_dict["loss"] = paddle.add_n(list(loss_dict.values()))
loss_dict["loss"] = list(loss.values())[0]
else:
for idx, loss_func in enumerate(self.loss_func):
loss = loss_func(input, batch)
weight = self.loss_weight[idx]
loss = {key: loss[key] * weight for key in loss}
loss_dict.update(loss)
loss_dict["loss"] = paddle.add_n(list(loss_dict.values()))
return loss_dict
......
......@@ -51,7 +51,7 @@ if [ ${MODE} = "lite_train_lite_infer" ] || [ ${MODE} = "lite_train_whole_infer"
cp -r train/* val/
fi
cd ../../
elif [ ${MODE} = "whole_infer" ] || [ ${MODE} = "cpp_infer" ];then
elif [ ${MODE} = "whole_infer" ] || [ ${MODE} = "klquant_whole_infer" ];then
# download data
cd dataset
rm -rf ILSVRC2012
......
......@@ -156,7 +156,7 @@ function func_inference(){
done
}
if [ ${MODE} = "whole_infer" ] || [${MODE} = "klquant_whole_infer" ]; then
if [ ${MODE} = "whole_infer" ] || [ ${MODE} = "klquant_whole_infer" ]; then
IFS="|"
infer_export_flag=(${infer_export_flag})
if [ ${infer_export_flag} != "null" ] && [ ${infer_export_flag} != "False" ]; then
......@@ -199,7 +199,7 @@ elif [ ${MODE} = "klquant_whole_infer" ]; then
ln -s __params__ inference.pdiparams
cd ../../deploy
is_quant=True
func_inference "${python}" "${inference_py}" "${infer_model}/quant_post_static_model" "../${LOG_PATH}" "${infer_img_dir}" ${is_quant}
func_inference "${python}" "${inference_py}" "${infer_model_dir_list}/quant_post_static_model" "../${LOG_PATH}" "${infer_img_dir}" ${is_quant}
cd ..
fi
else
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册