提交 4e9f9514 编写于 作者: H Hongkun Yu 提交者: A. Unique TensorFlower

Internal change

PiperOrigin-RevId: 464934071
上级 a81f8590
......@@ -237,7 +237,7 @@ class TransformerScaffold(tf.keras.layers.Layer):
self._output_layer_norm = tf.keras.layers.LayerNormalization(
name="output_layer_norm", axis=-1, epsilon=1e-12, dtype=tf.float32)
super(TransformerScaffold, self).build(input_shape)
super().build(input_shape)
logging.info("%s configs: %s", self.__class__.__name__, self.get_config())
def get_config(self):
......
......@@ -173,7 +173,7 @@ class AlbertEncoder(tf.keras.Model):
# created using the Functional API. Once super().__init__ is called, we
# can assign attributes to `self` - note that all `self` assignments are
# below this line.
super(AlbertEncoder, self).__init__(
super().__init__(
inputs=[word_ids, mask, type_ids], outputs=outputs, **kwargs)
config_dict = {
'vocab_size': vocab_size,
......
......@@ -74,7 +74,7 @@ class Classification(tf.keras.Model):
('Unknown `output` value "%s". `output` can be either "logits" or '
'"predictions"') % output)
super(Classification, self).__init__(
super().__init__(
inputs=[cls_output], outputs=output_tensors, **kwargs)
# b/164516224
......
......@@ -271,7 +271,7 @@ class EncoderScaffold(tf.keras.Model):
# created using the Functional API. Once super().__init__ is called, we
# can assign attributes to `self` - note that all `self` assignments are
# below this line.
super(EncoderScaffold, self).__init__(
super().__init__(
inputs=inputs, outputs=outputs, **kwargs)
self._hidden_cls = hidden_cls
......
......@@ -163,7 +163,7 @@ class MobileBERTEncoder(tf.keras.Model):
encoder_outputs=all_layer_outputs,
attention_scores=all_attention_scores)
super(MobileBERTEncoder, self).__init__(
super().__init__(
inputs=self.inputs, outputs=outputs, **kwargs)
def get_embedding_table(self):
......
......@@ -143,7 +143,7 @@ class PackedSequenceEmbedding(tf.keras.Model):
[attention_mask, sub_seq_mask])
outputs = [embeddings, attention_mask]
super(PackedSequenceEmbedding, self).__init__(
super().__init__(
inputs=inputs, outputs=outputs, **kwargs)
# TF does not track immutable attrs which do not contain Trackables,
# so by creating a config namedtuple instead of a dict we avoid tracking it.
......@@ -221,7 +221,7 @@ class PositionEmbeddingWithSubSeqMask(tf.keras.layers.Layer):
if 'dtype' not in kwargs:
kwargs['dtype'] = 'float32'
super(PositionEmbeddingWithSubSeqMask, self).__init__(**kwargs)
super().__init__(**kwargs)
if use_dynamic_slicing and max_sequence_length is None:
raise ValueError(
'If `use_dynamic_slicing` is True, `max_sequence_length` must be set.'
......@@ -236,7 +236,7 @@ class PositionEmbeddingWithSubSeqMask(tf.keras.layers.Layer):
'initializer': tf.keras.initializers.serialize(self._initializer),
'use_dynamic_slicing': self._use_dynamic_slicing,
}
base_config = super(PositionEmbeddingWithSubSeqMask, self).get_config()
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def build(self, input_shape):
......@@ -273,7 +273,7 @@ class PositionEmbeddingWithSubSeqMask(tf.keras.layers.Layer):
shape=[weight_sequence_length, width],
initializer=self._initializer)
super(PositionEmbeddingWithSubSeqMask, self).build(input_shape)
super().build(input_shape)
def call(self, inputs, position_ids=None, sub_sequence_mask=None):
"""Implements call() for the layer.
......
......@@ -81,7 +81,7 @@ class SpanLabeling(tf.keras.Model):
# created using the Functional API. Once super().__init__ is called, we
# can assign attributes to `self` - note that all `self` assignments are
# below this line.
super(SpanLabeling, self).__init__(
super().__init__(
inputs=[sequence_data], outputs=output_tensors, **kwargs)
config_dict = {
'input_width': input_width,
......
......@@ -384,7 +384,7 @@ class RelativePositionEncoding(tf.keras.layers.Layer):
"""
def __init__(self, hidden_size, **kwargs):
super(RelativePositionEncoding, self).__init__(**kwargs)
super().__init__(**kwargs)
self._hidden_size = hidden_size
self._inv_freq = 1.0 / (10000.0**(
tf.range(0, self._hidden_size, 2.0) / self._hidden_size))
......@@ -476,7 +476,7 @@ class XLNetBase(tf.keras.layers.Layer):
use_cls_mask=False,
embedding_width=None,
**kwargs):
super(XLNetBase, self).__init__(**kwargs)
super().__init__(**kwargs)
self._vocab_size = vocab_size
self._initializer = initializer
......@@ -574,7 +574,7 @@ class XLNetBase(tf.keras.layers.Layer):
"embedding_width":
self._embedding_width,
}
base_config = super(XLNetBase, self).get_config()
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def get_embedding_lookup_table(self):
......@@ -601,7 +601,7 @@ class XLNetBase(tf.keras.layers.Layer):
"target_mapping": target_mapping,
"masked_tokens": masked_tokens
}
return super(XLNetBase, self).__call__(inputs, **kwargs)
return super().__call__(inputs, **kwargs)
def call(self, inputs):
"""Implements call() for the layer."""
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册