提交 1effa5f3 编写于 作者: T tink2123

rm anno

上级 3c906d41
...@@ -369,7 +369,7 @@ class SEEDLabelEncode(BaseRecLabelEncode): ...@@ -369,7 +369,7 @@ class SEEDLabelEncode(BaseRecLabelEncode):
return None return None
if len(text) >= self.max_text_len: if len(text) >= self.max_text_len:
return None return None
data['length'] = np.array(len(text)) + 1 # conclue eos data['length'] = np.array(len(text)) + 1 # conclude eos
text = text + [len(self.character) - 1] * (self.max_text_len - len(text) text = text + [len(self.character) - 1] * (self.max_text_len - len(text)
) )
data['label'] = np.array(text) data['label'] = np.array(text)
......
...@@ -138,10 +138,3 @@ class ResNet_ASTER(nn.Layer): ...@@ -138,10 +138,3 @@ class ResNet_ASTER(nn.Layer):
return rnn_feat return rnn_feat
else: else:
return cnn_feat return cnn_feat
if __name__ == "__main__":
x = paddle.randn([3, 3, 32, 100])
net = ResNet_ASTER()
encoder_feat = net(x)
print(encoder_feat.shape)
...@@ -150,7 +150,6 @@ class AttentionRecognitionHead(nn.Layer): ...@@ -150,7 +150,6 @@ class AttentionRecognitionHead(nn.Layer):
# https://github.com/IBM/pytorch-seq2seq/blob/fede87655ddce6c94b38886089e05321dc9802af/seq2seq/models/TopKDecoder.py # https://github.com/IBM/pytorch-seq2seq/blob/fede87655ddce6c94b38886089e05321dc9802af/seq2seq/models/TopKDecoder.py
batch_size, l, d = x.shape batch_size, l, d = x.shape
# inflated_encoder_feats = _inflate(encoder_feats, beam_width, 0) # ABC --> AABBCC -/-> ABCABC
x = paddle.tile( x = paddle.tile(
paddle.transpose( paddle.transpose(
x.unsqueeze(1), perm=[1, 0, 2, 3]), [beam_width, 1, 1, 1]) x.unsqueeze(1), perm=[1, 0, 2, 3]), [beam_width, 1, 1, 1])
......
...@@ -63,8 +63,6 @@ def build_output_control_points(num_control_points, margins): ...@@ -63,8 +63,6 @@ def build_output_control_points(num_control_points, margins):
ctrl_pts_y_bottom = np.ones(num_ctrl_pts_per_side) * (1.0 - margin_y) ctrl_pts_y_bottom = np.ones(num_ctrl_pts_per_side) * (1.0 - margin_y)
ctrl_pts_top = np.stack([ctrl_pts_x, ctrl_pts_y_top], axis=1) ctrl_pts_top = np.stack([ctrl_pts_x, ctrl_pts_y_top], axis=1)
ctrl_pts_bottom = np.stack([ctrl_pts_x, ctrl_pts_y_bottom], axis=1) ctrl_pts_bottom = np.stack([ctrl_pts_x, ctrl_pts_y_bottom], axis=1)
# ctrl_pts_top = ctrl_pts_top[1:-1,:]
# ctrl_pts_bottom = ctrl_pts_bottom[1:-1,:]
output_ctrl_pts_arr = np.concatenate( output_ctrl_pts_arr = np.concatenate(
[ctrl_pts_top, ctrl_pts_bottom], axis=0) [ctrl_pts_top, ctrl_pts_bottom], axis=0)
output_ctrl_pts = paddle.to_tensor(output_ctrl_pts_arr) output_ctrl_pts = paddle.to_tensor(output_ctrl_pts_arr)
...@@ -85,7 +83,6 @@ class TPSSpatialTransformer(nn.Layer): ...@@ -85,7 +83,6 @@ class TPSSpatialTransformer(nn.Layer):
target_control_points = build_output_control_points(num_control_points, target_control_points = build_output_control_points(num_control_points,
margins) margins)
N = num_control_points N = num_control_points
# N = N - 4
# create padded kernel matrix # create padded kernel matrix
forward_kernel = paddle.zeros(shape=[N + 3, N + 3]) forward_kernel = paddle.zeros(shape=[N + 3, N + 3])
...@@ -112,7 +109,6 @@ class TPSSpatialTransformer(nn.Layer): ...@@ -112,7 +109,6 @@ class TPSSpatialTransformer(nn.Layer):
target_coordinate = paddle.to_tensor(target_coordinate) # HW x 2 target_coordinate = paddle.to_tensor(target_coordinate) # HW x 2
Y, X = paddle.split( Y, X = paddle.split(
target_coordinate, target_coordinate.shape[1], axis=1) target_coordinate, target_coordinate.shape[1], axis=1)
#Y, X = target_coordinate.split(1, dim = 1)
Y = Y / (self.target_height - 1) Y = Y / (self.target_height - 1)
X = X / (self.target_width - 1) X = X / (self.target_width - 1)
target_coordinate = paddle.concat( target_coordinate = paddle.concat(
...@@ -136,7 +132,6 @@ class TPSSpatialTransformer(nn.Layer): ...@@ -136,7 +132,6 @@ class TPSSpatialTransformer(nn.Layer):
assert source_control_points.ndimension() == 3 assert source_control_points.ndimension() == 3
assert source_control_points.shape[1] == self.num_control_points assert source_control_points.shape[1] == self.num_control_points
assert source_control_points.shape[2] == 2 assert source_control_points.shape[2] == 2
#batch_size = source_control_points.shape[0]
batch_size = paddle.shape(source_control_points)[0] batch_size = paddle.shape(source_control_points)[0]
self.padding_matrix = paddle.expand( self.padding_matrix = paddle.expand(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册