未验证 提交 0b583235 编写于 作者: T Tao Luo 提交者: GitHub

Revert "Solve the conflict of ops with the same name. (#23199)" (#23494)

This reverts commit abe3e690.
test=develop
上级 6577f91b
......@@ -150,13 +150,8 @@ void FastThreadedSSAGraphExecutor::InsertFetchOps(
"Possible reasons are:\n"
" 1. The variable to be fetched is not defined in main program.\n"
" 2. The variable to be fetched is not an input or output of any "
"operator.\n"
" 3. Confirm that you have used the fetch `Variable` format "
"instead of the string literal('%s') in `fetch_list` parameter "
"when using `executor.run` method. In other words, the format of "
"`executor.run(fetch_list=[fetch_var])`(fetch_var is a Variable) "
"is recommended.",
var_name, var_name));
"operator.",
var_name));
auto &vars = fetched_var_it->second;
......
......@@ -186,13 +186,8 @@ void ThreadedSSAGraphExecutor::InsertFetchOps(
"Possible reasons are:\n"
" 1. The variable to be fetched is not defined in main program.\n"
" 2. The variable to be fetched is not an input or output of any "
"operator.\n"
" 3. Confirm that you have used the fetch `Variable` format "
"instead of the string literal('%s') in `fetch_list` parameter "
"when using `executor.run` method. In other words, the format of "
"`executor.run(fetch_list=[fetch_var])`(fetch_var is a Variable) "
"is recommended.",
var_name, var_name));
"operator.",
var_name));
auto &vars = fetched_var_it->second;
......
......@@ -34,19 +34,13 @@ class FetchOp : public framework::OperatorBase {
auto fetch_var_name = Input("X");
auto *fetch_var = scope.FindVar(fetch_var_name);
PADDLE_ENFORCE(fetch_var != nullptr,
"Cannot find the fetch variable(%s) in scope.\n"
"Confirm that you have used the fetch `Variable` format "
"instead of the string literal('%s') in `fetch_list` "
"parameter when using `executor.run` method. In other "
"words, the format of "
"`executor.run(fetch_list=[fetch_var])`(fetch_var is a "
"Variable) is recommended.",
fetch_var_name, fetch_var_name);
"Cannot find fetch variable in scope, fetch_var_name is %s",
fetch_var_name);
auto out_name = this->Output("Out");
auto *out_var = scope.FindVar(out_name);
PADDLE_ENFORCE(out_var != nullptr,
"Cannot find out_var in scope, out_var_name is %s.",
"Cannot find out_var in scope, out_var_name is %s",
out_name);
auto col = static_cast<size_t>(Attr<int>("col"));
......
......@@ -691,7 +691,11 @@ def iou_similarity(x, y, box_normalized=True, name=None):
# [0. ]] with shape: [2, 1]
"""
helper = LayerHelper("iou_similarity", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
if name is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
helper.append_op(
type="iou_similarity",
......@@ -823,8 +827,12 @@ def box_coder(prior_box,
"""
helper = LayerHelper("box_coder", **locals())
output_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype)
if name is None:
output_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype)
else:
output_box = helper.create_variable(
name=name, dtype=prior_box.dtype, persistable=False)
inputs = {"PriorBox": prior_box, "TargetBox": target_box}
attrs = {
......@@ -868,7 +876,11 @@ def polygon_box_transform(input, name=None):
out = fluid.layers.polygon_box_transform(input)
"""
helper = LayerHelper("polygon_box_transform", **locals())
output = helper.create_variable_for_type_inference(dtype=input.dtype)
if name is None:
output = helper.create_variable_for_type_inference(dtype=input.dtype)
else:
output = helper.create_variable(
name=name, dtype=prior_box.input, persistable=False)
helper.append_op(
type="polygon_box_transform",
......@@ -967,7 +979,11 @@ def yolov3_loss(x,
raise TypeError(
"Attr use_label_smooth of yolov3_loss must be a bool value")
loss = helper.create_variable_for_type_inference(dtype=x.dtype)
if name is None:
loss = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
loss = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
objectness_mask = helper.create_variable_for_type_inference(dtype='int32')
gt_match_mask = helper.create_variable_for_type_inference(dtype='int32')
......
......@@ -1427,7 +1427,11 @@ def sigmoid_cross_entropy_with_logits(x,
helper = LayerHelper("sigmoid_cross_entropy_with_logits", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
if name is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
helper.append_op(
type="sigmoid_cross_entropy_with_logits",
......
......@@ -7885,7 +7885,11 @@ def gather_nd(input, index, name=None):
"""
helper = LayerHelper('gather_nd', **locals())
dtype = helper.input_dtype()
output = helper.create_variable_for_type_inference(dtype)
if name is None:
output = helper.create_variable_for_type_inference(dtype)
else:
output = helper.create_variable(
name=name, dtype=dtype, persistable=False)
helper.append_op(
type="gather_nd",
inputs={"X": input,
......@@ -8048,7 +8052,11 @@ def scatter_nd_add(ref, index, updates, name=None):
helper = LayerHelper('scatter_nd_add', **locals())
dtype = helper.input_dtype(input_param_name='ref')
output = helper.create_variable_for_type_inference(dtype)
if name is None:
output = helper.create_variable_for_type_inference(dtype)
else:
output = helper.create_variable(
name=name, dtype=dtype, persistable=False)
helper.append_op(
type="scatter_nd_add",
inputs={"X": ref,
......@@ -10631,7 +10639,11 @@ def _elementwise_op(helper):
axis = helper.kwargs.get('axis', -1)
use_mkldnn = helper.kwargs.get('use_mkldnn', False)
name = helper.kwargs.get('name', None)
out = helper.create_variable_for_type_inference(dtype=x.dtype)
if name is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
helper.append_op(
type=op_type,
......@@ -10726,7 +10738,11 @@ def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None):
else:
attrs['scale'] = float(scale)
helper = LayerHelper('scale', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
if name is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
helper.append_op(
type='scale', inputs=inputs, outputs={'Out': out}, attrs=attrs)
......@@ -11362,7 +11378,11 @@ def _logical_op(op_name, x, y, out=None, name=None, binary_op=True):
assert x.dtype == y.dtype
if out is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
if name is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
if binary_op:
helper.append_op(
......@@ -11684,7 +11704,11 @@ def mean(x, name=None):
helper = LayerHelper("mean", **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'mean')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
if name is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
helper.append_op(
type="mean", inputs={"X": x}, attrs={}, outputs={"Out": out})
......@@ -11767,7 +11791,11 @@ def mul(x, y, x_num_col_dims=1, y_num_col_dims=1, name=None):
helper = LayerHelper("mul", **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'mul')
check_variable_and_dtype(y, 'y', ['float16', 'float32', 'float64'], 'mul')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
if name is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
helper.append_op(
type="mul", inputs={"X": x,
......@@ -11813,7 +11841,11 @@ def maxout(x, groups, name=None, axis=1):
if axis == -1:
axis = 3
out = helper.create_variable_for_type_inference(dtype=x.dtype)
if name is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
helper.append_op(
type="maxout",
......@@ -11913,7 +11945,12 @@ def space_to_depth(x, blocksize, name=None):
if not (isinstance(blocksize, int)):
raise ValueError("blocksize must be a python Int")
out = helper.create_variable_for_type_inference(dtype=x.dtype)
if name is None:
out = helper.create_variable_for_type_inference(
dtype=x.dtype) #fix create
else:
out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
helper.append_op(
type="space_to_depth",
......@@ -11986,7 +12023,12 @@ def affine_channel(x,
"""
helper = LayerHelper("affine_channel", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
if name is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
helper.append_op(
type="affine_channel",
......@@ -12100,7 +12142,11 @@ def similarity_focus(input, axis, indexes, name=None):
if len(indexes) == 0:
raise ValueError("indexes can not be empty.")
out = helper.create_variable_for_type_inference(dtype=input.dtype)
if name is None:
out = helper.create_variable_for_type_inference(dtype=input.dtype)
else:
out = helper.create_variable(
name=name, dtype=input.dtype, persistable=False)
helper.append_op(
type='similarity_focus',
inputs={'X': input},
......@@ -12302,7 +12348,11 @@ def log_loss(input, label, epsilon=1e-4, name=None):
"""
helper = LayerHelper('log_loss', **locals())
loss = helper.create_variable_for_type_inference(dtype=input.dtype)
if name is None:
loss = helper.create_variable_for_type_inference(dtype=input.dtype)
else:
loss = helper.create_variable(
name=name, dtype=input.dtype, persistable=False)
helper.append_op(
type='log_loss',
......@@ -12366,7 +12416,10 @@ def add_position_encoding(input, alpha, beta, name=None):
helper = LayerHelper('add_position_encoding', **locals())
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype=dtype)
if name is None:
out = helper.create_variable_for_type_inference(dtype=dtype)
else:
out = helper.create_variable(name=name, dtype=dtype, persistable=False)
helper.append_op(
type="add_position_encoding",
......@@ -12433,7 +12486,11 @@ def bilinear_tensor_product(x,
w = helper.create_parameter(
attr=helper.param_attr, shape=param_shape, dtype=dtype, is_bias=False)
out = helper.create_variable_for_type_inference(dtype=dtype)
if name is None:
out = helper.create_variable_for_type_inference(dtype=dtype)
else:
out = helper.create_variable(name=name, dtype=dtype, persistable=False)
inputs = {"X": x, "Y": y, "Weight": w}
if helper.bias_attr:
......
......@@ -1269,7 +1269,10 @@ def sequence_mask(x, maxlen=None, dtype='int64', name=None):
"""
helper = LayerHelper('sequence_mask', **locals())
out = helper.create_variable_for_type_inference(dtype=dtype)
if name is None:
out = helper.create_variable_for_type_inference(dtype=dtype)
else:
out = helper.create_variable_for_type_inference(dtype=dtype, name=name)
inputs = {'X': [x]}
attrs = {'out_dtype': out.dtype}
......@@ -1334,7 +1337,11 @@ def sequence_reverse(x, name=None):
assert not in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper("sequence_reverse", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
if name is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
helper.append_op(
type="sequence_reverse",
......
......@@ -78,7 +78,7 @@ class TestCacheProgramWithOptimizer(unittest.TestCase):
# set optimizer
# TODO: Need a better interfaces to set optimizer.
program_translator = ProgramTranslator()
program_translator.set_optimizer(adam, 'avg_loss.tmp_1')
program_translator.set_optimizer(adam, 'avg_loss')
for batch_id in range(self.batch_num):
pred, avg_loss = static_net(self.data)
......
......@@ -51,8 +51,9 @@ class TestDyToStaticSaveInferenceModel(unittest.TestCase):
layer = SimpleFcLayer(fc_size)
program_translator = ProgramTranslator.get_instance()
program_cache = ProgramTranslator().get_program_cache
adam = fluid.optimizer.SGD(learning_rate=0.001)
program_translator.set_optimizer(adam, 'mean.tmp_0')
program_translator.set_optimizer(adam, 'mean')
for i in range(5):
out = layer(x)
......
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.fluid as fluid
import numpy as np
import unittest
class TestOpNameConflict(unittest.TestCase):
def test_conflict(self):
main = fluid.Program()
startup = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, startup):
x = fluid.data(name="x", shape=[1], dtype='float32')
y = fluid.data(name="y", shape=[1], dtype='float32')
z = fluid.data(name="z", shape=[1], dtype='float32')
m = fluid.layers.elementwise_add(x, y, name="add")
n = fluid.layers.elementwise_add(y, z, name="add")
p = m + n
place = fluid.CPUPlace()
exe = fluid.Executor(place)
m_v, n_v, p_v = exe.run(feed={
"x": np.ones((1), "float32") * 2,
"y": np.ones((1), "float32") * 3,
"z": np.ones((1), "float32") * 5
},
fetch_list=[m, n, p])
self.assertEqual(m_v[0], 5.0)
self.assertEqual(n_v[0], 8.0)
self.assertEqual(p_v[0], 13.0)
def test_layers(self):
main = fluid.Program()
startup = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, startup):
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
exe = fluid.Executor(place)
data = fluid.data(
name='data', shape=[None, 1, 2, 2], dtype='float32')
tensor = fluid.data(
name='tensor', shape=[None, 32, 64], dtype='float32')
x = fluid.data(
name='x', shape=[None, 1], dtype='float32', lod_level=1)
input_scale = fluid.layers.create_parameter(
shape=[1],
dtype="float32",
default_initializer=fluid.initializer.Constant(2.0))
input_bias = fluid.layers.create_parameter(
shape=[1],
dtype="float32",
default_initializer=fluid.initializer.Constant(0.5))
out_affine = fluid.layers.affine_channel(
data, scale=input_scale, bias=input_bias)
out_similarity = fluid.layers.similarity_focus(
input=data, axis=1, indexes=[0])
position_tensor = fluid.layers.add_position_encoding(
input=tensor, alpha=1.0, beta=1.0)
x_reversed = fluid.layers.sequence_reverse(x)
exe.run(fluid.default_startup_program())
test_program = fluid.default_main_program().clone(for_test=True)
x_d = fluid.create_lod_tensor(
np.array([[1.1], [2.2], [3.3], [4.4]]).astype('float32'),
[[1, 3]], place)
outs = exe.run(
test_program,
fetch_list=[
out_affine, out_similarity, position_tensor, x_reversed
],
feed={
data.name: np.ones([1, 1, 2, 2]).astype('float32'),
tensor.name: np.ones([1, 32, 64]).astype('float32'),
x.name: x_d
},
return_numpy=False)
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册