未验证 提交 993b6d37 编写于 作者: qq_22305325's avatar qq_22305325 提交者: GitHub

remove deprecated call (#3246)

Co-authored-by: NLi Xinqi <lixinqi2010@gmail.com>
上级 3f2ed3f2
......@@ -25,8 +25,8 @@ class BertBackbone(object):
initializer_range=0.02,
):
with flow.deprecated.variable_scope("bert"):
with flow.deprecated.variable_scope("embeddings"):
with flow.scope.namespace("bert"):
with flow.scope.namespace("embeddings"):
(self.embedding_output_, self.embedding_table_) = _EmbeddingLookup(
input_ids_blob=input_ids_blob,
vocab_size=vocab_size,
......@@ -48,7 +48,7 @@ class BertBackbone(object):
max_position_embeddings=max_position_embeddings,
dropout_prob=hidden_dropout_prob,
)
with flow.deprecated.variable_scope("encoder"):
with flow.scope.namespace("encoder"):
attention_mask_blob = _CreateAttentionMaskFromInputMask(
input_mask_blob,
from_seq_length=seq_length,
......@@ -112,10 +112,10 @@ def _TransformerModel(
prev_output_blob = flow.reshape(input_blob, (-1, input_width))
all_layer_output_blobs = []
for layer_idx in range(num_hidden_layers):
with flow.deprecated.variable_scope("layer_%d" % layer_idx):
with flow.scope.namespace("layer_%d" % layer_idx):
layer_input_blob = prev_output_blob
with flow.deprecated.variable_scope("attention"):
with flow.deprecated.variable_scope("self"):
with flow.scope.namespace("attention"):
with flow.scope.namespace("self"):
attention_output_blob = _AttentionLayer(
from_blob=layer_input_blob,
to_blob=layer_input_blob,
......@@ -128,7 +128,7 @@ def _TransformerModel(
from_seq_length=seq_length,
to_seq_length=seq_length,
)
with flow.deprecated.variable_scope("output"):
with flow.scope.namespace("output"):
attention_output_blob = _FullyConnected(
attention_output_blob,
input_size=num_attention_heads * attention_head_size,
......@@ -143,7 +143,7 @@ def _TransformerModel(
attention_output_blob = _LayerNorm(
attention_output_blob, hidden_size
)
with flow.deprecated.variable_scope("intermediate"):
with flow.scope.namespace("intermediate"):
if callable(intermediate_act_fn):
act_fn = op_conf_util.kNone
else:
......@@ -160,7 +160,7 @@ def _TransformerModel(
intermediate_output_blob = intermediate_act_fn(
intermediate_output_blob
)
with flow.deprecated.variable_scope("output"):
with flow.scope.namespace("output"):
layer_output_blob = _FullyConnected(
intermediate_output_blob,
input_size=intermediate_size,
......
......@@ -65,13 +65,13 @@ def PreTrain(
hidden_size=hidden_size,
initializer_range=initializer_range,
)
with flow.deprecated.variable_scope("cls-loss"):
with flow.scope.namespace("cls-loss"):
total_loss = lm_loss + ns_loss
return total_loss, lm_loss, ns_loss
def PooledOutput(sequence_output, hidden_size, initializer_range):
with flow.deprecated.variable_scope("bert-pooler"):
with flow.scope.namespace("bert-pooler"):
first_token_tensor = flow.slice(sequence_output, [None, 0, 0], [None, 1, -1])
first_token_tensor = flow.reshape(first_token_tensor, [-1, hidden_size])
pooled_output = bert_util._FullyConnected(
......@@ -98,15 +98,15 @@ def _AddMaskedLanguageModelLoss(
hidden_act,
initializer_range,
):
with flow.deprecated.variable_scope("other"):
with flow.scope.namespace("other"):
sum_label_weight_blob = flow.math.reduce_sum(label_weight_blob, axis=[-1])
ones = sum_label_weight_blob * 0.0 + 1.0
sum_label_weight_blob = flow.math.reduce_sum(sum_label_weight_blob)
batch_size = flow.math.reduce_sum(ones)
sum_label_weight_blob = sum_label_weight_blob / batch_size
with flow.deprecated.variable_scope("cls-predictions"):
with flow.scope.namespace("cls-predictions"):
input_blob = _GatherIndexes(input_blob, positions_blob, seq_length, hidden_size)
with flow.deprecated.variable_scope("transform"):
with flow.scope.namespace("transform"):
if callable(hidden_act):
act_fn = op_conf_util.kNone
else:
......@@ -136,7 +136,7 @@ def _AddMaskedLanguageModelLoss(
)
pre_example_loss = flow.reshape(pre_example_loss, [-1, max_predictions_per_seq])
numerator = pre_example_loss * label_weight_blob
with flow.deprecated.variable_scope("loss"):
with flow.scope.namespace("loss"):
numerator = flow.math.reduce_sum(numerator, axis=[-1])
denominator = sum_label_weight_blob + 1e-5
loss = numerator / denominator
......@@ -152,7 +152,7 @@ def _GatherIndexes(sequence_blob, positions_blob, seq_length, hidden_size):
def _AddNextSentenceOutput(input_blob, label_blob, hidden_size, initializer_range):
with flow.deprecated.variable_scope("cls-seq_relationship"):
with flow.scope.namespace("cls-seq_relationship"):
output_weight_blob = flow.get_variable(
name="output_weights",
shape=[2, hidden_size],
......
......@@ -54,12 +54,12 @@ def _conv2d_layer(
def InceptionA(in_blob, index):
with flow.deprecated.variable_scope("mixed_{}".format(index)):
with flow.deprecated.variable_scope("branch1x1"):
with flow.scope.namespace("mixed_{}".format(index)):
with flow.scope.namespace("branch1x1"):
branch1x1 = _conv2d_layer(
"conv0", in_blob, filters=64, kernel_size=1, strides=1, padding="SAME"
)
with flow.deprecated.variable_scope("branch5x5"):
with flow.scope.namespace("branch5x5"):
branch5x5_1 = _conv2d_layer(
"conv0", in_blob, filters=48, kernel_size=1, strides=1, padding="SAME"
)
......@@ -71,7 +71,7 @@ def InceptionA(in_blob, index):
strides=1,
padding="SAME",
)
with flow.deprecated.variable_scope("branch3x3dbl"):
with flow.scope.namespace("branch3x3dbl"):
branch3x3dbl_1 = _conv2d_layer(
"conv0", in_blob, filters=64, kernel_size=1, strides=1, padding="SAME"
)
......@@ -91,7 +91,7 @@ def InceptionA(in_blob, index):
strides=1,
padding="SAME",
)
with flow.deprecated.variable_scope("branch_pool"):
with flow.scope.namespace("branch_pool"):
branch_pool_1 = flow.nn.avg_pool2d(
in_blob,
ksize=3,
......@@ -121,12 +121,12 @@ def InceptionA(in_blob, index):
def InceptionB(in_blob, index):
with flow.deprecated.variable_scope("mixed_{}".format(index)):
with flow.deprecated.variable_scope("branch3x3"):
with flow.scope.namespace("mixed_{}".format(index)):
with flow.scope.namespace("branch3x3"):
branch3x3 = _conv2d_layer(
"conv0", in_blob, filters=384, kernel_size=3, strides=2, padding="VALID"
)
with flow.deprecated.variable_scope("branch3x3dbl"):
with flow.scope.namespace("branch3x3dbl"):
branch3x3dbl_1 = _conv2d_layer(
"conv0", in_blob, filters=64, kernel_size=1, strides=1, padding="SAME"
)
......@@ -146,7 +146,7 @@ def InceptionB(in_blob, index):
strides=2,
padding="VALID",
)
with flow.deprecated.variable_scope("branch_pool"):
with flow.scope.namespace("branch_pool"):
branch_pool = flow.nn.max_pool2d(
in_blob,
ksize=3,
......@@ -166,12 +166,12 @@ def InceptionB(in_blob, index):
def InceptionC(in_blob, index, filters):
with flow.deprecated.variable_scope("mixed_{}".format(index)):
with flow.deprecated.variable_scope("branch1x1"):
with flow.scope.namespace("mixed_{}".format(index)):
with flow.scope.namespace("branch1x1"):
branch1x1 = _conv2d_layer(
"conv0", in_blob, filters=192, kernel_size=1, strides=1, padding="SAME"
)
with flow.deprecated.variable_scope("branch7x7"):
with flow.scope.namespace("branch7x7"):
branch7x7_1 = _conv2d_layer(
"conv0",
in_blob,
......@@ -196,7 +196,7 @@ def InceptionC(in_blob, index, filters):
strides=[1, 1],
padding="SAME",
)
with flow.deprecated.variable_scope("branch7x7dbl"):
with flow.scope.namespace("branch7x7dbl"):
branch7x7dbl_1 = _conv2d_layer(
"conv0",
in_blob,
......@@ -237,7 +237,7 @@ def InceptionC(in_blob, index, filters):
strides=1,
padding="SAME",
)
with flow.deprecated.variable_scope("branch_pool"):
with flow.scope.namespace("branch_pool"):
branch_pool_1 = flow.nn.avg_pool2d(
in_blob,
ksize=3,
......@@ -266,8 +266,8 @@ def InceptionC(in_blob, index, filters):
def InceptionD(in_blob, index):
with flow.deprecated.variable_scope("mixed_{}".format(index)):
with flow.deprecated.variable_scope("branch3x3"):
with flow.scope.namespace("mixed_{}".format(index)):
with flow.scope.namespace("branch3x3"):
branch3x3_1 = _conv2d_layer(
"conv0", in_blob, filters=192, kernel_size=1, strides=1, padding="SAME"
)
......@@ -279,7 +279,7 @@ def InceptionD(in_blob, index):
strides=2,
padding="VALID",
)
with flow.deprecated.variable_scope("branch7x7x3"):
with flow.scope.namespace("branch7x7x3"):
branch7x7x3_1 = _conv2d_layer(
"conv0", in_blob, filters=192, kernel_size=1, strides=1, padding="SAME"
)
......@@ -307,7 +307,7 @@ def InceptionD(in_blob, index):
strides=2,
padding="VALID",
)
with flow.deprecated.variable_scope("branch_pool"):
with flow.scope.namespace("branch_pool"):
branch_pool = flow.nn.max_pool2d(
in_blob,
ksize=3,
......@@ -328,12 +328,12 @@ def InceptionD(in_blob, index):
def InceptionE(in_blob, index):
with flow.deprecated.variable_scope("mixed_{}".format(index)):
with flow.deprecated.variable_scope("branch1x1"):
with flow.scope.namespace("mixed_{}".format(index)):
with flow.scope.namespace("branch1x1"):
branch1x1 = _conv2d_layer(
"conv0", in_blob, filters=320, kernel_size=1, strides=1, padding="SAME"
)
with flow.deprecated.variable_scope("branch3x3"):
with flow.scope.namespace("branch3x3"):
branch3x3_1 = _conv2d_layer(
"conv0", in_blob, filters=384, kernel_size=1, strides=1, padding="SAME"
)
......@@ -359,7 +359,7 @@ def InceptionE(in_blob, index):
concat_branch3x3 = flow.concat(
values=inceptionE_1_bn, axis=1, name="concat"
)
with flow.deprecated.variable_scope("branch3x3dbl"):
with flow.scope.namespace("branch3x3dbl"):
branch3x3dbl_1 = _conv2d_layer(
"conv0", in_blob, filters=448, kernel_size=1, strides=1, padding="SAME"
)
......@@ -393,7 +393,7 @@ def InceptionE(in_blob, index):
concat_branch3x3dbl = flow.concat(
values=inceptionE_2_bn, axis=1, name="concat"
)
with flow.deprecated.variable_scope("branch_pool"):
with flow.scope.namespace("branch_pool"):
branch_pool_1 = flow.nn.avg_pool2d(
in_blob,
ksize=3,
......@@ -473,7 +473,7 @@ def inceptionv3(images, labels, trainable=True):
mixed_10, ksize=8, strides=1, padding="VALID", data_format="NCHW", name="pool3"
)
with flow.deprecated.variable_scope("logits"):
with flow.scope.namespace("logits"):
pool3 = flow.reshape(pool3, [pool3.shape[0], -1])
# TODO: Need to transpose weight when converting model from TF to OF if
# you want to use layers.dense interface.
......
......@@ -122,7 +122,7 @@ def resnet50(images, trainable=True):
images = flow.transpose(images, name="transpose", perm=[0, 3, 1, 2])
with flow.deprecated.variable_scope("Resnet"):
with flow.scope.namespace("Resnet"):
stem = resnet_stem(images)
body = resnet_conv_x_body(stem, lambda x: x)
pool5 = flow.nn.avg_pool2d(
......
......@@ -4,7 +4,7 @@ import oneflow as of
@flow.global_function
def variable_scope_test_job_1(a=of.FixedTensorDef((1, 3, 6, 6))):
with of.deprecated.variable_scope("job1_scope1"):
with of.scope.namespace("job1_scope1"):
convw = of.get_variable(
"conv_weight",
shape=(5, 3, 3, 3),
......@@ -14,7 +14,7 @@ def variable_scope_test_job_1(a=of.FixedTensorDef((1, 3, 6, 6))):
)
conv = of.nn.conv2d(a, convw, 1, "SAME", "NCHW", name="conv")
with of.deprecated.variable_scope("job1_scope2"):
with of.scope.namespace("job1_scope2"):
fcw = of.get_variable(
"fc_weight",
shape=(180, 10),
......@@ -54,7 +54,7 @@ def variable_scope_test_job_1(a=of.FixedTensorDef((1, 3, 6, 6))):
@flow.global_function
def variable_scope_test_job_2(a=of.FixedTensorDef((2, 5))):
with of.deprecated.variable_scope("job2_scope1"):
with of.scope.namespace("job2_scope1"):
indices = of.get_variable(
"gather_inds",
shape=(2,),
......
......@@ -25,8 +25,8 @@ class BertBackbone(object):
initializer_range=0.02,
):
with flow.deprecated.variable_scope("bert"):
with flow.deprecated.variable_scope("embeddings"):
with flow.scope.namespace("bert"):
with flow.scope.namespace("embeddings"):
(self.embedding_output_, self.embedding_table_) = _EmbeddingLookup(
input_ids_blob=input_ids_blob,
vocab_size=vocab_size,
......@@ -48,7 +48,7 @@ class BertBackbone(object):
max_position_embeddings=max_position_embeddings,
dropout_prob=hidden_dropout_prob,
)
with flow.deprecated.variable_scope("encoder"):
with flow.scope.namespace("encoder"):
addr_blob = _CreateAttentionMaskFromInputMask(
input_mask_blob,
from_seq_length=seq_length,
......@@ -112,10 +112,10 @@ def _TransformerModel(
prev_output_blob = flow.reshape(input_blob, (-1, input_width))
all_layer_output_blobs = []
for layer_idx in range(num_hidden_layers):
with flow.deprecated.variable_scope("layer_%d" % layer_idx):
with flow.scope.namespace("layer_%d" % layer_idx):
layer_input_blob = prev_output_blob
with flow.deprecated.variable_scope("attention"):
with flow.deprecated.variable_scope("self"):
with flow.scope.namespace("attention"):
with flow.scope.namespace("self"):
attention_output_blob = _AttentionLayer(
from_blob=layer_input_blob,
to_blob=layer_input_blob,
......@@ -128,7 +128,7 @@ def _TransformerModel(
from_seq_length=seq_length,
to_seq_length=seq_length,
)
with flow.deprecated.variable_scope("output"):
with flow.scope.namespace("output"):
attention_output_blob = _FullyConnected(
attention_output_blob,
input_size=num_attention_heads * attention_head_size,
......@@ -143,7 +143,7 @@ def _TransformerModel(
attention_output_blob = _LayerNorm(
attention_output_blob, hidden_size
)
with flow.deprecated.variable_scope("intermediate"):
with flow.scope.namespace("intermediate"):
if callable(intermediate_act_fn):
act_fn = op_conf_util.kNone
else:
......@@ -160,7 +160,7 @@ def _TransformerModel(
intermediate_output_blob = intermediate_act_fn(
intermediate_output_blob
)
with flow.deprecated.variable_scope("output"):
with flow.scope.namespace("output"):
layer_output_blob = _FullyConnected(
intermediate_output_blob,
input_size=intermediate_size,
......
......@@ -133,12 +133,12 @@ def _data_load_layer(args, data_dir):
def InceptionA(in_blob, index):
with flow.deprecated.variable_scope("mixed_{}".format(index)):
with flow.deprecated.variable_scope("branch1x1"):
with flow.scope.namespace("mixed_{}".format(index)):
with flow.scope.namespace("branch1x1"):
branch1x1 = _conv2d_layer(
"conv0", in_blob, filters=64, kernel_size=1, strides=1, padding="SAME"
)
with flow.deprecated.variable_scope("branch5x5"):
with flow.scope.namespace("branch5x5"):
branch5x5_1 = _conv2d_layer(
"conv0", in_blob, filters=48, kernel_size=1, strides=1, padding="SAME"
)
......@@ -150,7 +150,7 @@ def InceptionA(in_blob, index):
strides=1,
padding="SAME",
)
with flow.deprecated.variable_scope("branch3x3dbl"):
with flow.scope.namespace("branch3x3dbl"):
branch3x3dbl_1 = _conv2d_layer(
"conv0", in_blob, filters=64, kernel_size=1, strides=1, padding="SAME"
)
......@@ -170,7 +170,7 @@ def InceptionA(in_blob, index):
strides=1,
padding="SAME",
)
with flow.deprecated.variable_scope("branch_pool"):
with flow.scope.namespace("branch_pool"):
branch_pool_1 = flow.nn.avg_pool2d(
in_blob,
ksize=3,
......@@ -200,12 +200,12 @@ def InceptionA(in_blob, index):
def InceptionB(in_blob, index):
with flow.deprecated.variable_scope("mixed_{}".format(index)):
with flow.deprecated.variable_scope("branch3x3"):
with flow.scope.namespace("mixed_{}".format(index)):
with flow.scope.namespace("branch3x3"):
branch3x3 = _conv2d_layer(
"conv0", in_blob, filters=384, kernel_size=3, strides=2, padding="VALID"
)
with flow.deprecated.variable_scope("branch3x3dbl"):
with flow.scope.namespace("branch3x3dbl"):
branch3x3dbl_1 = _conv2d_layer(
"conv0", in_blob, filters=64, kernel_size=1, strides=1, padding="SAME"
)
......@@ -225,7 +225,7 @@ def InceptionB(in_blob, index):
strides=2,
padding="VALID",
)
with flow.deprecated.variable_scope("branch_pool"):
with flow.scope.namespace("branch_pool"):
branch_pool = flow.nn.max_pool2d(
in_blob,
ksize=3,
......@@ -245,12 +245,12 @@ def InceptionB(in_blob, index):
def InceptionC(in_blob, index, filters):
with flow.deprecated.variable_scope("mixed_{}".format(index)):
with flow.deprecated.variable_scope("branch1x1"):
with flow.scope.namespace("mixed_{}".format(index)):
with flow.scope.namespace("branch1x1"):
branch1x1 = _conv2d_layer(
"conv0", in_blob, filters=192, kernel_size=1, strides=1, padding="SAME"
)
with flow.deprecated.variable_scope("branch7x7"):
with flow.scope.namespace("branch7x7"):
branch7x7_1 = _conv2d_layer(
"conv0",
in_blob,
......@@ -275,7 +275,7 @@ def InceptionC(in_blob, index, filters):
strides=[1, 1],
padding="SAME",
)
with flow.deprecated.variable_scope("branch7x7dbl"):
with flow.scope.namespace("branch7x7dbl"):
branch7x7dbl_1 = _conv2d_layer(
"conv0",
in_blob,
......@@ -316,7 +316,7 @@ def InceptionC(in_blob, index, filters):
strides=1,
padding="SAME",
)
with flow.deprecated.variable_scope("branch_pool"):
with flow.scope.namespace("branch_pool"):
branch_pool_1 = flow.nn.avg_pool2d(
in_blob,
ksize=3,
......@@ -345,8 +345,8 @@ def InceptionC(in_blob, index, filters):
def InceptionD(in_blob, index):
with flow.deprecated.variable_scope("mixed_{}".format(index)):
with flow.deprecated.variable_scope("branch3x3"):
with flow.scope.namespace("mixed_{}".format(index)):
with flow.scope.namespace("branch3x3"):
branch3x3_1 = _conv2d_layer(
"conv0", in_blob, filters=192, kernel_size=1, strides=1, padding="SAME"
)
......@@ -358,7 +358,7 @@ def InceptionD(in_blob, index):
strides=2,
padding="VALID",
)
with flow.deprecated.variable_scope("branch7x7x3"):
with flow.scope.namespace("branch7x7x3"):
branch7x7x3_1 = _conv2d_layer(
"conv0", in_blob, filters=192, kernel_size=1, strides=1, padding="SAME"
)
......@@ -386,7 +386,7 @@ def InceptionD(in_blob, index):
strides=2,
padding="VALID",
)
with flow.deprecated.variable_scope("branch_pool"):
with flow.scope.namespace("branch_pool"):
branch_pool = flow.nn.max_pool2d(
in_blob,
ksize=3,
......@@ -407,12 +407,12 @@ def InceptionD(in_blob, index):
def InceptionE(in_blob, index):
with flow.deprecated.variable_scope("mixed_{}".format(index)):
with flow.deprecated.variable_scope("branch1x1"):
with flow.scope.namespace("mixed_{}".format(index)):
with flow.scope.namespace("branch1x1"):
branch1x1 = _conv2d_layer(
"conv0", in_blob, filters=320, kernel_size=1, strides=1, padding="SAME"
)
with flow.deprecated.variable_scope("branch3x3"):
with flow.scope.namespace("branch3x3"):
branch3x3_1 = _conv2d_layer(
"conv0", in_blob, filters=384, kernel_size=1, strides=1, padding="SAME"
)
......@@ -438,7 +438,7 @@ def InceptionE(in_blob, index):
concat_branch3x3 = flow.concat(
values=inceptionE_1_bn, axis=1, name="concat"
)
with flow.deprecated.variable_scope("branch3x3dbl"):
with flow.scope.namespace("branch3x3dbl"):
branch3x3dbl_1 = _conv2d_layer(
"conv0", in_blob, filters=448, kernel_size=1, strides=1, padding="SAME"
)
......@@ -472,7 +472,7 @@ def InceptionE(in_blob, index):
concat_branch3x3dbl = flow.concat(
values=inceptionE_2_bn, axis=1, name="concat"
)
with flow.deprecated.variable_scope("branch_pool"):
with flow.scope.namespace("branch_pool"):
branch_pool_1 = flow.nn.avg_pool2d(
in_blob,
ksize=3,
......@@ -552,7 +552,7 @@ def InceptionV3(images, labels, trainable=True):
mixed_10, ksize=8, strides=1, padding="VALID", data_format="NCHW", name="pool3"
)
with flow.deprecated.variable_scope("logits"):
with flow.scope.namespace("logits"):
pool3 = flow.reshape(pool3, [pool3.shape[0], -1])
# TODO: Need to transpose weight when converting model from TF to OF if
# you want to use layers.dense interface.
......
......@@ -65,13 +65,13 @@ def PreTrain(
hidden_size=hidden_size,
initializer_range=initializer_range,
)
with flow.deprecated.variable_scope("cls-loss"):
with flow.scope.namespace("cls-loss"):
total_loss = lm_loss + ns_loss
return total_loss
def PooledOutput(sequence_output, hidden_size, initializer_range):
with flow.deprecated.variable_scope("bert-pooler"):
with flow.scope.namespace("bert-pooler"):
first_token_tensor = flow.slice(sequence_output, [None, 0, 0], [None, 1, -1])
first_token_tensor = flow.reshape(first_token_tensor, [-1, hidden_size])
pooled_output = bert_util._FullyConnected(
......@@ -99,15 +99,15 @@ def _AddMaskedLanguageModelLoss(
initializer_range,
):
with flow.deprecated.variable_scope("other"):
with flow.scope.namespace("other"):
sum_label_weight_blob = flow.math.reduce_sum(label_weight_blob, axis=[-1])
ones = sum_label_weight_blob * 0.0 + 1.0
sum_label_weight_blob = flow.math.reduce_sum(sum_label_weight_blob)
batch_size = flow.math.reduce_sum(ones)
sum_label_weight_blob = sum_label_weight_blob / batch_size
with flow.deprecated.variable_scope("cls-predictions"):
with flow.scope.namespace("cls-predictions"):
input_blob = _GatherIndexes(input_blob, positions_blob, seq_length, hidden_size)
with flow.deprecated.variable_scope("transform"):
with flow.scope.namespace("transform"):
if callable(hidden_act):
act_fn = op_conf_util.kNone
else:
......@@ -137,7 +137,7 @@ def _AddMaskedLanguageModelLoss(
)
pre_example_loss = flow.reshape(pre_example_loss, [-1, max_predictions_per_seq])
numerator = pre_example_loss * label_weight_blob
with flow.deprecated.variable_scope("loss"):
with flow.scope.namespace("loss"):
numerator = flow.math.reduce_sum(numerator, axis=[-1])
denominator = sum_label_weight_blob + 1e-5
loss = numerator / denominator
......@@ -153,7 +153,7 @@ def _GatherIndexes(sequence_blob, positions_blob, seq_length, hidden_size):
def _AddNextSentenceOutput(input_blob, label_blob, hidden_size, initializer_range):
with flow.deprecated.variable_scope("cls-seq_relationship"):
with flow.scope.namespace("cls-seq_relationship"):
output_weight_blob = flow.get_variable(
name="output_weights",
shape=[2, hidden_size],
......
......@@ -231,7 +231,7 @@ def resnet50(args, data_dir):
g_output_key.append("input_img")
g_output.append(images)
with flow.deprecated.variable_scope("Resnet"):
with flow.scope.namespace("Resnet"):
stem = resnet_stem(images)
body = resnet_conv_x_body(stem, lambda x: x)
pool5 = flow.nn.avg_pool2d(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册