提交 daf7eea2 编写于 作者: H HydrogenSulfate

fix code

上级 dfd77498
......@@ -70,14 +70,14 @@ def get_param_attr_dict(ParamAttr_config: Union[None, bool, Dict[str, Dict]]
if isinstance(ParamAttr_config, bool):
return ParamAttr_config
ParamAttr_dict = {}
if 'initiliazer' in ParamAttr_config:
initiliazer_cfg = ParamAttr_config.get('initiliazer')
if 'name' in initiliazer_cfg:
initiliazer_name = initiliazer_cfg.pop('name')
ParamAttr_dict['initiliazer'] = getattr(
paddle.nn.initializer, initiliazer_name)(**initiliazer_cfg)
if 'initializer' in ParamAttr_config:
initializer_cfg = ParamAttr_config.get('initializer')
if 'name' in initializer_cfg:
initializer_name = initializer_cfg.pop('name')
ParamAttr_dict['initializer'] = getattr(
paddle.nn.initializer, initializer_name)(**initializer_cfg)
else:
raise ValueError(f"'name' must specified in initiliazer_cfg")
raise ValueError(f"'name' must specified in initializer_cfg")
if 'learning_rate' in ParamAttr_config:
# NOTE: only support an single value now
learning_rate_value = ParamAttr_config.get('learning_rate')
......
......@@ -10,7 +10,7 @@ Global:
epochs: 120
print_batch_step: 20
use_visualdl: False
warmup_epoch_by_epoch: True
warmup_by_epoch: True
eval_mode: "retrieval"
re_ranking: True
# used for static mode and model export
......@@ -68,7 +68,7 @@ Optimizer:
values: [0.00035, 0.000035, 0.0000035]
warmup_epoch: 10
warmup_start_lr: 0.0000035
warmup_epoch_by_epoch: True
warmup_by_epoch: True
regularizer:
name: 'L2'
coeff: 0.0005
......
......@@ -299,11 +299,11 @@ class Engine(object):
self.max_iter = len(self.train_dataloader) - 1 if platform.system(
) == "Windows" else len(self.train_dataloader)
if self.config["Global"].get("warmup_epoch_by_epoch", False):
if self.config["Global"].get("warmup_by_epoch", False):
for i in range(len(self.lr_sch)):
self.lr_sch[i].step()
logger.info(
"lr_sch step once before first epoch, when Global.warmup_epoch_by_epoch=True"
"lr_sch step once before first epoch, when Global.warmup_by_epoch=True"
)
for epoch_id in range(best_metric["epoch"] + 1,
......@@ -312,7 +312,7 @@ class Engine(object):
# for one epoch train
self.train_epoch_func(self, epoch_id, print_batch_step)
if self.config["Global"].get("warmup_epoch_by_epoch", False):
if self.config["Global"].get("warmup_by_epoch", False):
for i in range(len(self.lr_sch)):
self.lr_sch[i].step()
......
......@@ -83,20 +83,16 @@ def retrieval_eval(engine, epoch_id=0):
0] / len(query_feas)
else:
distmat = re_ranking(
query_feas,
gallery_feas,
query_img_id,
query_query_id,
gallery_img_id,
gallery_unique_id,
k1=20,
k2=6,
lambda_value=0.3)
query_feas, gallery_feas, k1=20, k2=6, lambda_value=0.3)
cmc, mAP = eval_func(distmat,
np.squeeze(query_img_id.numpy()),
np.squeeze(gallery_img_id.numpy()),
np.squeeze(query_query_id.numpy()),
np.squeeze(gallery_unique_id.numpy()))
metric_dict["recall1(RK)"] = cmc[0]
metric_dict["recall5(RK)"] = cmc[4]
metric_dict["mAP(RK)"] = mAP
for key in metric_tmp:
metric_dict[key] = metric_tmp[key] * block_fea.shape[0] / len(
query_feas)
......@@ -176,7 +172,6 @@ def re_ranking(queFea,
k_reciprocal_expansion_index = np.unique(k_reciprocal_expansion_index)
weight = np.exp(-original_dist[i, k_reciprocal_expansion_index])
V[i, k_reciprocal_expansion_index] = weight / np.sum(weight)
all_num_cost = time.time() - t
original_dist = original_dist[:query_num, ]
if k2 != 1:
V_qe = np.zeros_like(V, dtype=np.float16)
......@@ -190,7 +185,6 @@ def re_ranking(queFea,
invIndex.append(np.where(V[:, i] != 0)[0])
jaccard_dist = np.zeros_like(original_dist, dtype=np.float16)
gallery_num_cost = time.time() - t
for i in range(query_num):
temp_min = np.zeros(shape=[1, gallery_num], dtype=np.float16)
indNonZero = np.where(V[i, :] != 0)[0]
......@@ -206,7 +200,6 @@ def re_ranking(queFea,
del V
del jaccard_dist
final_dist = final_dist[:query_num, query_num:]
query_num_cost = time.time() - t
return final_dist
......
......@@ -57,24 +57,27 @@ def train_epoch(engine, epoch_id, print_batch_step):
if engine.amp:
scaled = engine.scaler.scale(loss_dict["loss"])
scaled.backward()
# set BNneck.bias grad to zero
engine.model.neck.feat_bn.bias.grad.set_value(
paddle.zeros_like(engine.model.neck.feat_bn.bias.grad))
for i in range(len(engine.optimizer)):
engine.scaler.minimize(engine.optimizer[i], scaled)
else:
loss_dict["loss"].backward()
# set BNneck.bias grad to zero
engine.model.neck.feat_bn.bias.grad.set_value(
paddle.zeros_like(engine.model.neck.feat_bn.bias.grad))
for i in range(len(engine.optimizer)):
engine.optimizer[i].step()
if hasattr(engine.model.neck, 'bn'):
engine.model.neck.bn.bias.grad.set_value(
paddle.zeros_like(engine.model.neck.bn.bias.grad))
# clear grad
for i in range(len(engine.optimizer)):
engine.optimizer[i].clear_grad()
# step lr
for i in range(len(engine.lr_sch)):
engine.lr_sch[i].step()
if engine.config["Global"].get("warmup_by_epoch", False) is False:
for i in range(len(engine.lr_sch)):
engine.lr_sch[i].step()
# below code just for logging
# update metric_for_logger
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册