提交 9de22673 编写于 作者: W weishengyu

dbg

上级 7f0b7a04
......@@ -8,8 +8,8 @@ class BNNeck(paddle.nn.Layer):
self.bn = paddle.nn.BatchNorm1D(
self.num_filters)
if not trainable:
self.bn.bias.trainable = False
# if not trainable:
# self.bn.bias.trainable = False
def forward(self, input, label=None):
out = self.bn(input)
......
......@@ -21,14 +21,14 @@ import paddle.nn as nn
class FC(nn.Layer):
def __init__(self, embedding_size, class_num):
def __init__(self, embedding_size, class_num, bias_attr=None):
super(FC, self).__init__()
self.embedding_size = embedding_size
self.class_num = class_num
weight_attr = paddle.ParamAttr(
initializer=paddle.nn.initializer.XavierNormal())
self.fc = paddle.nn.Linear(
self.embedding_size, self.class_num, weight_attr=weight_attr)
self.embedding_size, self.class_num, weight_attr=weight_attr, bias_attr=bias_attr)
def forward(self, input, label=None):
out = self.fc(input)
......
......@@ -26,14 +26,14 @@ Arch:
stem_act: null
BackboneStopLayer:
name: "flatten"
#Neck:
# name: BNNeck
# num_filters: 2048
# trainable: false
Neck:
name: BNNeck
num_filters: 2048
Head:
name: "FC"
embedding_size: 2048
class_num: 751
bias_attr: false
# loss function config for traing/eval process
Loss:
......
......@@ -125,7 +125,7 @@ def cal_feature(engine, name='gallery'):
out = engine.model(batch[0], batch[1])
if "Student" in out:
out = out["Student"]
batch_feas = out["features"]
batch_feas = out["backbone"]
# do norm
if engine.config["Global"].get("feature_normalize", True):
......
......@@ -24,7 +24,7 @@ class TripletLossV2(nn.Layer):
inputs: feature matrix with shape (batch_size, feat_dim)
target: ground truth labels with shape (num_classes)
"""
inputs = input["features"]
inputs = input["backbone"]
if self.normalize_feature:
inputs = 1. * inputs / (paddle.expand_as(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册