未验证 提交 84cd45f6 编写于 作者: Z Zhen Wang 提交者: GitHub

Solve the conflict of ops with the same name, test for CI. (#23573)

* solve the conflict of ops with the same name. test=develop
上级 795a0a9a
......@@ -150,8 +150,13 @@ void FastThreadedSSAGraphExecutor::InsertFetchOps(
"Possible reasons are:\n"
" 1. The variable to be fetched is not defined in main program.\n"
" 2. The variable to be fetched is not an input or output of any "
"operator.",
var_name));
"operator.\n"
" 3. Confirm that you have used the fetch `Variable` format "
"instead of the string literal('%s') in `fetch_list` parameter "
"when using `executor.run` method. In other words, the format of "
"`executor.run(fetch_list=[fetch_var])`(fetch_var is a Variable) "
"is recommended.",
var_name, var_name));
auto &vars = fetched_var_it->second;
......
......@@ -186,8 +186,13 @@ void ThreadedSSAGraphExecutor::InsertFetchOps(
"Possible reasons are:\n"
" 1. The variable to be fetched is not defined in main program.\n"
" 2. The variable to be fetched is not an input or output of any "
"operator.",
var_name));
"operator.\n"
" 3. Confirm that you have used the fetch `Variable` format "
"instead of the string literal('%s') in `fetch_list` parameter "
"when using `executor.run` method. In other words, the format of "
"`executor.run(fetch_list=[fetch_var])`(fetch_var is a Variable) "
"is recommended.",
var_name, var_name));
auto &vars = fetched_var_it->second;
......
......@@ -39,8 +39,14 @@ class FetchOp : public framework::OperatorBase {
PADDLE_ENFORCE_NOT_NULL(
fetch_var,
platform::errors::NotFound(
"Input variable(%s) cannot be found in scope for operator 'Fetch'.",
fetch_var_name));
"Input variable(%s) cannot be found in scope for operator 'Fetch'."
"Confirm that you have used the fetch `Variable` format "
"instead of the string literal('%s') in `fetch_list` "
"parameter when using `executor.run` method. In other "
"words, the format of "
"`executor.run(fetch_list=[fetch_var])`(fetch_var is a "
"Variable) is recommended.",
fetch_var_name, fetch_var_name));
auto out_name = Output("Out");
auto *out_var = scope.FindVar(out_name);
......
......@@ -692,11 +692,7 @@ def iou_similarity(x, y, box_normalized=True, name=None):
# [0. ]] with shape: [2, 1]
"""
helper = LayerHelper("iou_similarity", **locals())
if name is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
helper.append_op(
type="iou_similarity",
......@@ -828,12 +824,8 @@ def box_coder(prior_box,
"""
helper = LayerHelper("box_coder", **locals())
if name is None:
output_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype)
else:
output_box = helper.create_variable(
name=name, dtype=prior_box.dtype, persistable=False)
inputs = {"PriorBox": prior_box, "TargetBox": target_box}
attrs = {
......@@ -877,11 +869,7 @@ def polygon_box_transform(input, name=None):
out = fluid.layers.polygon_box_transform(input)
"""
helper = LayerHelper("polygon_box_transform", **locals())
if name is None:
output = helper.create_variable_for_type_inference(dtype=input.dtype)
else:
output = helper.create_variable(
name=name, dtype=prior_box.input, persistable=False)
helper.append_op(
type="polygon_box_transform",
......@@ -980,11 +968,7 @@ def yolov3_loss(x,
raise TypeError(
"Attr use_label_smooth of yolov3_loss must be a bool value")
if name is None:
loss = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
loss = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
objectness_mask = helper.create_variable_for_type_inference(dtype='int32')
gt_match_mask = helper.create_variable_for_type_inference(dtype='int32')
......
......@@ -1427,11 +1427,7 @@ def sigmoid_cross_entropy_with_logits(x,
helper = LayerHelper("sigmoid_cross_entropy_with_logits", **locals())
if name is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
helper.append_op(
type="sigmoid_cross_entropy_with_logits",
......
......@@ -7859,11 +7859,7 @@ def gather_nd(input, index, name=None):
"""
helper = LayerHelper('gather_nd', **locals())
dtype = helper.input_dtype()
if name is None:
output = helper.create_variable_for_type_inference(dtype)
else:
output = helper.create_variable(
name=name, dtype=dtype, persistable=False)
helper.append_op(
type="gather_nd",
inputs={"X": input,
......@@ -8026,11 +8022,7 @@ def scatter_nd_add(ref, index, updates, name=None):
helper = LayerHelper('scatter_nd_add', **locals())
dtype = helper.input_dtype(input_param_name='ref')
if name is None:
output = helper.create_variable_for_type_inference(dtype)
else:
output = helper.create_variable(
name=name, dtype=dtype, persistable=False)
helper.append_op(
type="scatter_nd_add",
inputs={"X": ref,
......@@ -10606,11 +10598,7 @@ def _elementwise_op(helper):
axis = helper.kwargs.get('axis', -1)
use_mkldnn = helper.kwargs.get('use_mkldnn', False)
name = helper.kwargs.get('name', None)
if name is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
helper.append_op(
type=op_type,
......@@ -10705,11 +10693,7 @@ def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None):
else:
attrs['scale'] = float(scale)
helper = LayerHelper('scale', **locals())
if name is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
helper.append_op(
type='scale', inputs=inputs, outputs={'Out': out}, attrs=attrs)
......@@ -11345,11 +11329,7 @@ def _logical_op(op_name, x, y, out=None, name=None, binary_op=True):
assert x.dtype == y.dtype
if out is None:
if name is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
if binary_op:
helper.append_op(
......@@ -11671,11 +11651,7 @@ def mean(x, name=None):
helper = LayerHelper("mean", **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'mean')
if name is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
helper.append_op(
type="mean", inputs={"X": x}, attrs={}, outputs={"Out": out})
......@@ -11758,11 +11734,7 @@ def mul(x, y, x_num_col_dims=1, y_num_col_dims=1, name=None):
helper = LayerHelper("mul", **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'mul')
check_variable_and_dtype(y, 'y', ['float16', 'float32', 'float64'], 'mul')
if name is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
helper.append_op(
type="mul", inputs={"X": x,
......@@ -11808,11 +11780,7 @@ def maxout(x, groups, name=None, axis=1):
if axis == -1:
axis = 3
if name is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
helper.append_op(
type="maxout",
......@@ -11912,12 +11880,7 @@ def space_to_depth(x, blocksize, name=None):
if not (isinstance(blocksize, int)):
raise ValueError("blocksize must be a python Int")
if name is None:
out = helper.create_variable_for_type_inference(
dtype=x.dtype) #fix create
else:
out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="space_to_depth",
......@@ -11990,12 +11953,7 @@ def affine_channel(x,
"""
helper = LayerHelper("affine_channel", **locals())
if name is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
helper.append_op(
type="affine_channel",
......@@ -12109,11 +12067,7 @@ def similarity_focus(input, axis, indexes, name=None):
if len(indexes) == 0:
raise ValueError("indexes can not be empty.")
if name is None:
out = helper.create_variable_for_type_inference(dtype=input.dtype)
else:
out = helper.create_variable(
name=name, dtype=input.dtype, persistable=False)
helper.append_op(
type='similarity_focus',
inputs={'X': input},
......@@ -12318,11 +12272,7 @@ def log_loss(input, label, epsilon=1e-4, name=None):
"""
helper = LayerHelper('log_loss', **locals())
if name is None:
loss = helper.create_variable_for_type_inference(dtype=input.dtype)
else:
loss = helper.create_variable(
name=name, dtype=input.dtype, persistable=False)
helper.append_op(
type='log_loss',
......@@ -12386,10 +12336,7 @@ def add_position_encoding(input, alpha, beta, name=None):
helper = LayerHelper('add_position_encoding', **locals())
dtype = helper.input_dtype()
if name is None:
out = helper.create_variable_for_type_inference(dtype=dtype)
else:
out = helper.create_variable(name=name, dtype=dtype, persistable=False)
helper.append_op(
type="add_position_encoding",
......@@ -12456,11 +12403,7 @@ def bilinear_tensor_product(x,
w = helper.create_parameter(
attr=helper.param_attr, shape=param_shape, dtype=dtype, is_bias=False)
if name is None:
out = helper.create_variable_for_type_inference(dtype=dtype)
else:
out = helper.create_variable(name=name, dtype=dtype, persistable=False)
inputs = {"X": x, "Y": y, "Weight": w}
if helper.bias_attr:
......
......@@ -1269,10 +1269,7 @@ def sequence_mask(x, maxlen=None, dtype='int64', name=None):
"""
helper = LayerHelper('sequence_mask', **locals())
if name is None:
out = helper.create_variable_for_type_inference(dtype=dtype)
else:
out = helper.create_variable_for_type_inference(dtype=dtype, name=name)
inputs = {'X': [x]}
attrs = {'out_dtype': out.dtype}
......@@ -1337,11 +1334,7 @@ def sequence_reverse(x, name=None):
assert not in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper("sequence_reverse", **locals())
if name is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
helper.append_op(
type="sequence_reverse",
......
......@@ -51,7 +51,6 @@ class TestDyToStaticSaveInferenceModel(unittest.TestCase):
layer = SimpleFcLayer(fc_size)
program_translator = ProgramTranslator.get_instance()
program_cache = ProgramTranslator().get_program_cache
adam = fluid.optimizer.SGD(learning_rate=0.001)
program_translator.set_optimizer(adam, index_of_loss=0)
......
......@@ -75,10 +75,12 @@ class TestImperativeStaticModelRunnerMnist(unittest.TestCase):
return _reader_impl
def train_and_save_model(self):
with new_program_scope():
startup_program = fluid.default_startup_program()
main_program = fluid.default_main_program()
img = fluid.data(name='img', shape=[None, 1, 28, 28], dtype='float32')
img = fluid.data(
name='img', shape=[None, 1, 28, 28], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
prediction, avg_loss = static_train_net(img, label)
......@@ -248,11 +250,12 @@ class TestImperativeStaticModelRunnerMnist(unittest.TestCase):
key += core.loaded_var_suffix()
self.assertTrue(np.array_equal(value, dy_param_init_value[key]))
self.assertTrue(np.allclose(static_out, dy_out))
# np.testing.assert_array_almost_equal(static_out, dy_out)
self.assertTrue(np.allclose(static_out, dy_out, atol=1e-04))
for key, value in six.iteritems(static_param_value):
key += core.loaded_var_suffix()
self.assertTrue(np.allclose(value, dy_param_value[key], atol=1e-5))
self.assertTrue(np.allclose(value, dy_param_value[key], atol=1e-4))
def test_mnist_with_params_filename(self):
self.save_dirname = "mnist.inference.model"
......@@ -275,11 +278,12 @@ class TestImperativeStaticModelRunnerMnist(unittest.TestCase):
key += core.loaded_var_suffix()
self.assertTrue(np.array_equal(value, dy_param_init_value[key]))
self.assertTrue(np.allclose(static_out, dy_out))
# np.testing.assert_array_almost_equal(static_out, dy_out)
self.assertTrue(np.allclose(static_out, dy_out, atol=1e-04))
for key, value in six.iteritems(static_param_value):
key += core.loaded_var_suffix()
self.assertTrue(np.allclose(value, dy_param_value[key], atol=1e-5))
self.assertTrue(np.allclose(value, dy_param_value[key], atol=1e-4))
if __name__ == '__main__':
......
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.fluid as fluid
import numpy as np
import unittest
class TestOpNameConflict(unittest.TestCase):
def test_conflict(self):
main = fluid.Program()
startup = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, startup):
x = fluid.data(name="x", shape=[1], dtype='float32')
y = fluid.data(name="y", shape=[1], dtype='float32')
z = fluid.data(name="z", shape=[1], dtype='float32')
m = fluid.layers.elementwise_add(x, y, name="add")
n = fluid.layers.elementwise_add(y, z, name="add")
p = m + n
place = fluid.CPUPlace()
exe = fluid.Executor(place)
m_v, n_v, p_v = exe.run(feed={
"x": np.ones((1), "float32") * 2,
"y": np.ones((1), "float32") * 3,
"z": np.ones((1), "float32") * 5
},
fetch_list=[m, n, p])
self.assertEqual(m_v[0], 5.0)
self.assertEqual(n_v[0], 8.0)
self.assertEqual(p_v[0], 13.0)
def test_layers(self):
main = fluid.Program()
startup = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, startup):
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
exe = fluid.Executor(place)
data = fluid.data(
name='data', shape=[None, 1, 2, 2], dtype='float32')
tensor = fluid.data(
name='tensor', shape=[None, 32, 64], dtype='float32')
x = fluid.data(
name='x', shape=[None, 1], dtype='float32', lod_level=1)
input_scale = fluid.layers.create_parameter(
shape=[1],
dtype="float32",
default_initializer=fluid.initializer.Constant(2.0))
input_bias = fluid.layers.create_parameter(
shape=[1],
dtype="float32",
default_initializer=fluid.initializer.Constant(0.5))
out_affine = fluid.layers.affine_channel(
data, scale=input_scale, bias=input_bias)
out_similarity = fluid.layers.similarity_focus(
input=data, axis=1, indexes=[0])
position_tensor = fluid.layers.add_position_encoding(
input=tensor, alpha=1.0, beta=1.0)
x_reversed = fluid.layers.sequence_reverse(x)
exe.run(fluid.default_startup_program())
test_program = fluid.default_main_program().clone(for_test=True)
x_d = fluid.create_lod_tensor(
np.array([[1.1], [2.2], [3.3], [4.4]]).astype('float32'),
[[1, 3]], place)
outs = exe.run(
test_program,
fetch_list=[
out_affine, out_similarity, position_tensor, x_reversed
],
feed={
data.name: np.ones([1, 1, 2, 2]).astype('float32'),
tensor.name: np.ones([1, 32, 64]).astype('float32'),
x.name: x_d
},
return_numpy=False)
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册