提交 7d9f4dcb 编写于 作者: H HydrogenSulfate

change Tensor.numpy()[0] to float(Tensor) for 0-D tensor case

上级 fe24d676
...@@ -324,8 +324,7 @@ class PyramidVisionTransformer(nn.Layer): ...@@ -324,8 +324,7 @@ class PyramidVisionTransformer(nn.Layer):
self.pos_drops.append(nn.Dropout(p=drop_rate)) self.pos_drops.append(nn.Dropout(p=drop_rate))
dpr = [ dpr = [
x.numpy()[0] float(x) for x in paddle.linspace(0, drop_path_rate, sum(depths))
for x in paddle.linspace(0, drop_path_rate, sum(depths))
] # stochastic depth decay rule ] # stochastic depth decay rule
cur = 0 cur = 0
...@@ -551,8 +550,7 @@ class ALTGVT(PCPVT): ...@@ -551,8 +550,7 @@ class ALTGVT(PCPVT):
self.wss = wss self.wss = wss
# transformer encoder # transformer encoder
dpr = [ dpr = [
x.numpy()[0] float(x) for x in paddle.linspace(0, drop_path_rate, sum(depths))
for x in paddle.linspace(0, drop_path_rate, sum(depths))
] # stochastic depth decay rule ] # stochastic depth decay rule
cur = 0 cur = 0
self.blocks = nn.LayerList() self.blocks = nn.LayerList()
......
...@@ -126,8 +126,7 @@ def classification_eval(engine, epoch_id=0): ...@@ -126,8 +126,7 @@ def classification_eval(engine, epoch_id=0):
for key in loss_dict: for key in loss_dict:
if key not in output_info: if key not in output_info:
output_info[key] = AverageMeter(key, '7.5f') output_info[key] = AverageMeter(key, '7.5f')
output_info[key].update(loss_dict[key].numpy()[0], output_info[key].update(float(loss_dict[key]), current_samples)
current_samples)
# calc metric # calc metric
if engine.eval_metric_func is not None: if engine.eval_metric_func is not None:
......
...@@ -20,6 +20,7 @@ from typing import Optional ...@@ -20,6 +20,7 @@ from typing import Optional
import numpy as np import numpy as np
import paddle import paddle
from ppcls.engine.train.utils import type_name
from ppcls.utils import logger from ppcls.utils import logger
...@@ -65,7 +66,7 @@ def retrieval_eval(engine, epoch_id=0): ...@@ -65,7 +66,7 @@ def retrieval_eval(engine, epoch_id=0):
engine.eval_metric_func.metric_func_list[ engine.eval_metric_func.metric_func_list[
i].descending = False i].descending = False
logger.warning( logger.warning(
f"re_ranking=True,{engine.eval_metric_func.metric_func_list[i].__class__.__name__}.descending has been set to False" f"re_ranking=True,{type_name(engine.eval_metric_func.metric_func_list[i])}.descending has been set to False"
) )
# compute distance matrix(The smaller the value, the more similar) # compute distance matrix(The smaller the value, the more similar)
......
...@@ -25,8 +25,8 @@ def update_metric(trainer, out, batch, batch_size): ...@@ -25,8 +25,8 @@ def update_metric(trainer, out, batch, batch_size):
for key in metric_dict: for key in metric_dict:
if key not in trainer.output_info: if key not in trainer.output_info:
trainer.output_info[key] = AverageMeter(key, '7.5f') trainer.output_info[key] = AverageMeter(key, '7.5f')
trainer.output_info[key].update(metric_dict[key].numpy()[0], trainer.output_info[key].update(
batch_size) float(metric_dict[key]), batch_size)
def update_loss(trainer, loss_dict, batch_size): def update_loss(trainer, loss_dict, batch_size):
...@@ -34,12 +34,12 @@ def update_loss(trainer, loss_dict, batch_size): ...@@ -34,12 +34,12 @@ def update_loss(trainer, loss_dict, batch_size):
for key in loss_dict: for key in loss_dict:
if key not in trainer.output_info: if key not in trainer.output_info:
trainer.output_info[key] = AverageMeter(key, '7.5f') trainer.output_info[key] = AverageMeter(key, '7.5f')
trainer.output_info[key].update(loss_dict[key].numpy()[0], batch_size) trainer.output_info[key].update(float(loss_dict[key]), batch_size)
def log_info(trainer, batch_size, epoch_id, iter_id): def log_info(trainer, batch_size, epoch_id, iter_id):
lr_msg = ", ".join([ lr_msg = ", ".join([
"lr({}): {:.8f}".format(lr.__class__.__name__, lr.get_lr()) "lr({}): {:.8f}".format(type_name(lr), lr.get_lr())
for i, lr in enumerate(trainer.lr_sch) for i, lr in enumerate(trainer.lr_sch)
]) ])
metric_msg = ", ".join([ metric_msg = ", ".join([
...@@ -54,17 +54,17 @@ def log_info(trainer, batch_size, epoch_id, iter_id): ...@@ -54,17 +54,17 @@ def log_info(trainer, batch_size, epoch_id, iter_id):
ips_msg = "ips: {:.5f} samples/s".format( ips_msg = "ips: {:.5f} samples/s".format(
batch_size / trainer.time_info["batch_cost"].avg) batch_size / trainer.time_info["batch_cost"].avg)
eta_sec = ((trainer.config["Global"]["epochs"] - epoch_id + 1) * eta_sec = (
trainer.iter_per_epoch - iter_id) * trainer.time_info["batch_cost"].avg (trainer.config["Global"]["epochs"] - epoch_id + 1) *
trainer.iter_per_epoch - iter_id) * trainer.time_info["batch_cost"].avg
eta_msg = "eta: {:s}".format(str(datetime.timedelta(seconds=int(eta_sec)))) eta_msg = "eta: {:s}".format(str(datetime.timedelta(seconds=int(eta_sec))))
logger.info("[Train][Epoch {}/{}][Iter: {}/{}]{}, {}, {}, {}, {}".format( logger.info("[Train][Epoch {}/{}][Iter: {}/{}]{}, {}, {}, {}, {}".format(
epoch_id, trainer.config["Global"]["epochs"], iter_id, trainer.iter_per_epoch, epoch_id, trainer.config["Global"]["epochs"], iter_id, trainer.
lr_msg, metric_msg, time_msg, ips_msg, eta_msg)) iter_per_epoch, lr_msg, metric_msg, time_msg, ips_msg, eta_msg))
for i, lr in enumerate(trainer.lr_sch): for i, lr in enumerate(trainer.lr_sch):
logger.scaler( logger.scaler(
name="lr({})".format(lr.__class__.__name__), name="lr({})".format(type_name(lr)),
value=lr.get_lr(), value=lr.get_lr(),
step=trainer.global_step, step=trainer.global_step,
writer=trainer.vdl_writer) writer=trainer.vdl_writer)
......
...@@ -113,7 +113,7 @@ class mAP(nn.Layer): ...@@ -113,7 +113,7 @@ class mAP(nn.Layer):
precision_mask = paddle.multiply(equal_flag, precision) precision_mask = paddle.multiply(equal_flag, precision)
ap = paddle.sum(precision_mask, axis=1) / paddle.sum(equal_flag, ap = paddle.sum(precision_mask, axis=1) / paddle.sum(equal_flag,
axis=1) axis=1)
metric_dict["mAP"] = paddle.mean(ap).numpy()[0] metric_dict["mAP"] = float(paddle.mean(ap))
return metric_dict return metric_dict
...@@ -157,7 +157,7 @@ class mINP(nn.Layer): ...@@ -157,7 +157,7 @@ class mINP(nn.Layer):
hard_index = paddle.argmax(auxilary, axis=1).astype("float32") hard_index = paddle.argmax(auxilary, axis=1).astype("float32")
all_INP = paddle.divide(paddle.sum(equal_flag, axis=1), hard_index) all_INP = paddle.divide(paddle.sum(equal_flag, axis=1), hard_index)
mINP = paddle.mean(all_INP) mINP = paddle.mean(all_INP)
metric_dict["mINP"] = mINP.numpy()[0] metric_dict["mINP"] = float(mINP)
return metric_dict return metric_dict
...@@ -360,7 +360,7 @@ class HammingDistance(MultiLabelMetric): ...@@ -360,7 +360,7 @@ class HammingDistance(MultiLabelMetric):
metric_dict["HammingDistance"] = paddle.to_tensor( metric_dict["HammingDistance"] = paddle.to_tensor(
hamming_loss(target, preds)) hamming_loss(target, preds))
self.avg_meters["HammingDistance"].update( self.avg_meters["HammingDistance"].update(
metric_dict["HammingDistance"].numpy()[0], output.shape[0]) float(metric_dict["HammingDistance"]), output.shape[0])
return metric_dict return metric_dict
...@@ -400,7 +400,7 @@ class AccuracyScore(MultiLabelMetric): ...@@ -400,7 +400,7 @@ class AccuracyScore(MultiLabelMetric):
sum(tps) + sum(tns) + sum(fns) + sum(fps)) sum(tps) + sum(tns) + sum(fns) + sum(fps))
metric_dict["AccuracyScore"] = paddle.to_tensor(accuracy) metric_dict["AccuracyScore"] = paddle.to_tensor(accuracy)
self.avg_meters["AccuracyScore"].update( self.avg_meters["AccuracyScore"].update(
metric_dict["AccuracyScore"].numpy()[0], output.shape[0]) float(metric_dict["AccuracyScore"]), output.shape[0])
return metric_dict return metric_dict
......
...@@ -20,6 +20,7 @@ import copy ...@@ -20,6 +20,7 @@ import copy
import paddle import paddle
from typing import Dict, List from typing import Dict, List
from ppcls.engine.train.utils import type_name
from ppcls.utils import logger from ppcls.utils import logger
from . import optimizer from . import optimizer
...@@ -111,11 +112,11 @@ def build_optimizer(config, epochs, step_each_epoch, model_list=None): ...@@ -111,11 +112,11 @@ def build_optimizer(config, epochs, step_each_epoch, model_list=None):
if optim_scope.endswith("Loss"): if optim_scope.endswith("Loss"):
# optimizer for loss # optimizer for loss
for m in model_list[i].sublayers(True): for m in model_list[i].sublayers(True):
if m.__class__.__name__ == optim_scope: if type_name(m) == optim_scope:
optim_model.append(m) optim_model.append(m)
else: else:
# opmizer for module in model, such as backbone, neck, head... # opmizer for module in model, such as backbone, neck, head...
if optim_scope == model_list[i].__class__.__name__: if optim_scope == type_name(model_list[i]):
optim_model.append(model_list[i]) optim_model.append(model_list[i])
elif hasattr(model_list[i], optim_scope): elif hasattr(model_list[i], optim_scope):
optim_model.append(getattr(model_list[i], optim_scope)) optim_model.append(getattr(model_list[i], optim_scope))
......
...@@ -47,7 +47,7 @@ class AverageMeter(object): ...@@ -47,7 +47,7 @@ class AverageMeter(object):
@property @property
def avg_info(self): def avg_info(self):
if isinstance(self.avg, paddle.Tensor): if isinstance(self.avg, paddle.Tensor):
self.avg = self.avg.numpy()[0] self.avg = float(self.avg)
return "{}: {:.5f}".format(self.name, self.avg) return "{}: {:.5f}".format(self.name, self.avg)
@property @property
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册