未验证 提交 84cd45f6 编写于 作者: Z Zhen Wang 提交者: GitHub

Solve the conflict of ops with the same name, test for CI. (#23573)

* solve the conflict of ops with the same name. test=develop
上级 795a0a9a
...@@ -150,8 +150,13 @@ void FastThreadedSSAGraphExecutor::InsertFetchOps( ...@@ -150,8 +150,13 @@ void FastThreadedSSAGraphExecutor::InsertFetchOps(
"Possible reasons are:\n" "Possible reasons are:\n"
" 1. The variable to be fetched is not defined in main program.\n" " 1. The variable to be fetched is not defined in main program.\n"
" 2. The variable to be fetched is not an input or output of any " " 2. The variable to be fetched is not an input or output of any "
"operator.", "operator.\n"
var_name)); " 3. Confirm that you have used the fetch `Variable` format "
"instead of the string literal('%s') in `fetch_list` parameter "
"when using `executor.run` method. In other words, the format of "
"`executor.run(fetch_list=[fetch_var])`(fetch_var is a Variable) "
"is recommended.",
var_name, var_name));
auto &vars = fetched_var_it->second; auto &vars = fetched_var_it->second;
......
...@@ -186,8 +186,13 @@ void ThreadedSSAGraphExecutor::InsertFetchOps( ...@@ -186,8 +186,13 @@ void ThreadedSSAGraphExecutor::InsertFetchOps(
"Possible reasons are:\n" "Possible reasons are:\n"
" 1. The variable to be fetched is not defined in main program.\n" " 1. The variable to be fetched is not defined in main program.\n"
" 2. The variable to be fetched is not an input or output of any " " 2. The variable to be fetched is not an input or output of any "
"operator.", "operator.\n"
var_name)); " 3. Confirm that you have used the fetch `Variable` format "
"instead of the string literal('%s') in `fetch_list` parameter "
"when using `executor.run` method. In other words, the format of "
"`executor.run(fetch_list=[fetch_var])`(fetch_var is a Variable) "
"is recommended.",
var_name, var_name));
auto &vars = fetched_var_it->second; auto &vars = fetched_var_it->second;
......
...@@ -39,8 +39,14 @@ class FetchOp : public framework::OperatorBase { ...@@ -39,8 +39,14 @@ class FetchOp : public framework::OperatorBase {
PADDLE_ENFORCE_NOT_NULL( PADDLE_ENFORCE_NOT_NULL(
fetch_var, fetch_var,
platform::errors::NotFound( platform::errors::NotFound(
"Input variable(%s) cannot be found in scope for operator 'Fetch'.", "Input variable(%s) cannot be found in scope for operator 'Fetch'."
fetch_var_name)); "Confirm that you have used the fetch `Variable` format "
"instead of the string literal('%s') in `fetch_list` "
"parameter when using `executor.run` method. In other "
"words, the format of "
"`executor.run(fetch_list=[fetch_var])`(fetch_var is a "
"Variable) is recommended.",
fetch_var_name, fetch_var_name));
auto out_name = Output("Out"); auto out_name = Output("Out");
auto *out_var = scope.FindVar(out_name); auto *out_var = scope.FindVar(out_name);
......
...@@ -692,11 +692,7 @@ def iou_similarity(x, y, box_normalized=True, name=None): ...@@ -692,11 +692,7 @@ def iou_similarity(x, y, box_normalized=True, name=None):
# [0. ]] with shape: [2, 1] # [0. ]] with shape: [2, 1]
""" """
helper = LayerHelper("iou_similarity", **locals()) helper = LayerHelper("iou_similarity", **locals())
if name is None: out = helper.create_variable_for_type_inference(dtype=x.dtype)
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
helper.append_op( helper.append_op(
type="iou_similarity", type="iou_similarity",
...@@ -828,12 +824,8 @@ def box_coder(prior_box, ...@@ -828,12 +824,8 @@ def box_coder(prior_box,
""" """
helper = LayerHelper("box_coder", **locals()) helper = LayerHelper("box_coder", **locals())
if name is None: output_box = helper.create_variable_for_type_inference(
output_box = helper.create_variable_for_type_inference( dtype=prior_box.dtype)
dtype=prior_box.dtype)
else:
output_box = helper.create_variable(
name=name, dtype=prior_box.dtype, persistable=False)
inputs = {"PriorBox": prior_box, "TargetBox": target_box} inputs = {"PriorBox": prior_box, "TargetBox": target_box}
attrs = { attrs = {
...@@ -877,11 +869,7 @@ def polygon_box_transform(input, name=None): ...@@ -877,11 +869,7 @@ def polygon_box_transform(input, name=None):
out = fluid.layers.polygon_box_transform(input) out = fluid.layers.polygon_box_transform(input)
""" """
helper = LayerHelper("polygon_box_transform", **locals()) helper = LayerHelper("polygon_box_transform", **locals())
if name is None: output = helper.create_variable_for_type_inference(dtype=input.dtype)
output = helper.create_variable_for_type_inference(dtype=input.dtype)
else:
output = helper.create_variable(
name=name, dtype=prior_box.input, persistable=False)
helper.append_op( helper.append_op(
type="polygon_box_transform", type="polygon_box_transform",
...@@ -980,11 +968,7 @@ def yolov3_loss(x, ...@@ -980,11 +968,7 @@ def yolov3_loss(x,
raise TypeError( raise TypeError(
"Attr use_label_smooth of yolov3_loss must be a bool value") "Attr use_label_smooth of yolov3_loss must be a bool value")
if name is None: loss = helper.create_variable_for_type_inference(dtype=x.dtype)
loss = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
loss = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
objectness_mask = helper.create_variable_for_type_inference(dtype='int32') objectness_mask = helper.create_variable_for_type_inference(dtype='int32')
gt_match_mask = helper.create_variable_for_type_inference(dtype='int32') gt_match_mask = helper.create_variable_for_type_inference(dtype='int32')
......
...@@ -1427,11 +1427,7 @@ def sigmoid_cross_entropy_with_logits(x, ...@@ -1427,11 +1427,7 @@ def sigmoid_cross_entropy_with_logits(x,
helper = LayerHelper("sigmoid_cross_entropy_with_logits", **locals()) helper = LayerHelper("sigmoid_cross_entropy_with_logits", **locals())
if name is None: out = helper.create_variable_for_type_inference(dtype=x.dtype)
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
helper.append_op( helper.append_op(
type="sigmoid_cross_entropy_with_logits", type="sigmoid_cross_entropy_with_logits",
......
...@@ -7859,11 +7859,7 @@ def gather_nd(input, index, name=None): ...@@ -7859,11 +7859,7 @@ def gather_nd(input, index, name=None):
""" """
helper = LayerHelper('gather_nd', **locals()) helper = LayerHelper('gather_nd', **locals())
dtype = helper.input_dtype() dtype = helper.input_dtype()
if name is None: output = helper.create_variable_for_type_inference(dtype)
output = helper.create_variable_for_type_inference(dtype)
else:
output = helper.create_variable(
name=name, dtype=dtype, persistable=False)
helper.append_op( helper.append_op(
type="gather_nd", type="gather_nd",
inputs={"X": input, inputs={"X": input,
...@@ -8026,11 +8022,7 @@ def scatter_nd_add(ref, index, updates, name=None): ...@@ -8026,11 +8022,7 @@ def scatter_nd_add(ref, index, updates, name=None):
helper = LayerHelper('scatter_nd_add', **locals()) helper = LayerHelper('scatter_nd_add', **locals())
dtype = helper.input_dtype(input_param_name='ref') dtype = helper.input_dtype(input_param_name='ref')
if name is None: output = helper.create_variable_for_type_inference(dtype)
output = helper.create_variable_for_type_inference(dtype)
else:
output = helper.create_variable(
name=name, dtype=dtype, persistable=False)
helper.append_op( helper.append_op(
type="scatter_nd_add", type="scatter_nd_add",
inputs={"X": ref, inputs={"X": ref,
...@@ -10606,11 +10598,7 @@ def _elementwise_op(helper): ...@@ -10606,11 +10598,7 @@ def _elementwise_op(helper):
axis = helper.kwargs.get('axis', -1) axis = helper.kwargs.get('axis', -1)
use_mkldnn = helper.kwargs.get('use_mkldnn', False) use_mkldnn = helper.kwargs.get('use_mkldnn', False)
name = helper.kwargs.get('name', None) name = helper.kwargs.get('name', None)
if name is None: out = helper.create_variable_for_type_inference(dtype=x.dtype)
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
helper.append_op( helper.append_op(
type=op_type, type=op_type,
...@@ -10705,11 +10693,7 @@ def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None): ...@@ -10705,11 +10693,7 @@ def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None):
else: else:
attrs['scale'] = float(scale) attrs['scale'] = float(scale)
helper = LayerHelper('scale', **locals()) helper = LayerHelper('scale', **locals())
if name is None: out = helper.create_variable_for_type_inference(dtype=x.dtype)
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
helper.append_op( helper.append_op(
type='scale', inputs=inputs, outputs={'Out': out}, attrs=attrs) type='scale', inputs=inputs, outputs={'Out': out}, attrs=attrs)
...@@ -11345,11 +11329,7 @@ def _logical_op(op_name, x, y, out=None, name=None, binary_op=True): ...@@ -11345,11 +11329,7 @@ def _logical_op(op_name, x, y, out=None, name=None, binary_op=True):
assert x.dtype == y.dtype assert x.dtype == y.dtype
if out is None: if out is None:
if name is None: out = helper.create_variable_for_type_inference(dtype=x.dtype)
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
if binary_op: if binary_op:
helper.append_op( helper.append_op(
...@@ -11671,11 +11651,7 @@ def mean(x, name=None): ...@@ -11671,11 +11651,7 @@ def mean(x, name=None):
helper = LayerHelper("mean", **locals()) helper = LayerHelper("mean", **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'mean') check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'mean')
if name is None: out = helper.create_variable_for_type_inference(dtype=x.dtype)
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
helper.append_op( helper.append_op(
type="mean", inputs={"X": x}, attrs={}, outputs={"Out": out}) type="mean", inputs={"X": x}, attrs={}, outputs={"Out": out})
...@@ -11758,11 +11734,7 @@ def mul(x, y, x_num_col_dims=1, y_num_col_dims=1, name=None): ...@@ -11758,11 +11734,7 @@ def mul(x, y, x_num_col_dims=1, y_num_col_dims=1, name=None):
helper = LayerHelper("mul", **locals()) helper = LayerHelper("mul", **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'mul') check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'mul')
check_variable_and_dtype(y, 'y', ['float16', 'float32', 'float64'], 'mul') check_variable_and_dtype(y, 'y', ['float16', 'float32', 'float64'], 'mul')
if name is None: out = helper.create_variable_for_type_inference(dtype=x.dtype)
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
helper.append_op( helper.append_op(
type="mul", inputs={"X": x, type="mul", inputs={"X": x,
...@@ -11808,11 +11780,7 @@ def maxout(x, groups, name=None, axis=1): ...@@ -11808,11 +11780,7 @@ def maxout(x, groups, name=None, axis=1):
if axis == -1: if axis == -1:
axis = 3 axis = 3
if name is None: out = helper.create_variable_for_type_inference(dtype=x.dtype)
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
helper.append_op( helper.append_op(
type="maxout", type="maxout",
...@@ -11912,12 +11880,7 @@ def space_to_depth(x, blocksize, name=None): ...@@ -11912,12 +11880,7 @@ def space_to_depth(x, blocksize, name=None):
if not (isinstance(blocksize, int)): if not (isinstance(blocksize, int)):
raise ValueError("blocksize must be a python Int") raise ValueError("blocksize must be a python Int")
if name is None: out = helper.create_variable_for_type_inference(dtype=x.dtype)
out = helper.create_variable_for_type_inference(
dtype=x.dtype) #fix create
else:
out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
helper.append_op( helper.append_op(
type="space_to_depth", type="space_to_depth",
...@@ -11990,12 +11953,7 @@ def affine_channel(x, ...@@ -11990,12 +11953,7 @@ def affine_channel(x,
""" """
helper = LayerHelper("affine_channel", **locals()) helper = LayerHelper("affine_channel", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
if name is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
helper.append_op( helper.append_op(
type="affine_channel", type="affine_channel",
...@@ -12109,11 +12067,7 @@ def similarity_focus(input, axis, indexes, name=None): ...@@ -12109,11 +12067,7 @@ def similarity_focus(input, axis, indexes, name=None):
if len(indexes) == 0: if len(indexes) == 0:
raise ValueError("indexes can not be empty.") raise ValueError("indexes can not be empty.")
if name is None: out = helper.create_variable_for_type_inference(dtype=input.dtype)
out = helper.create_variable_for_type_inference(dtype=input.dtype)
else:
out = helper.create_variable(
name=name, dtype=input.dtype, persistable=False)
helper.append_op( helper.append_op(
type='similarity_focus', type='similarity_focus',
inputs={'X': input}, inputs={'X': input},
...@@ -12318,11 +12272,7 @@ def log_loss(input, label, epsilon=1e-4, name=None): ...@@ -12318,11 +12272,7 @@ def log_loss(input, label, epsilon=1e-4, name=None):
""" """
helper = LayerHelper('log_loss', **locals()) helper = LayerHelper('log_loss', **locals())
if name is None: loss = helper.create_variable_for_type_inference(dtype=input.dtype)
loss = helper.create_variable_for_type_inference(dtype=input.dtype)
else:
loss = helper.create_variable(
name=name, dtype=input.dtype, persistable=False)
helper.append_op( helper.append_op(
type='log_loss', type='log_loss',
...@@ -12386,10 +12336,7 @@ def add_position_encoding(input, alpha, beta, name=None): ...@@ -12386,10 +12336,7 @@ def add_position_encoding(input, alpha, beta, name=None):
helper = LayerHelper('add_position_encoding', **locals()) helper = LayerHelper('add_position_encoding', **locals())
dtype = helper.input_dtype() dtype = helper.input_dtype()
if name is None: out = helper.create_variable_for_type_inference(dtype=dtype)
out = helper.create_variable_for_type_inference(dtype=dtype)
else:
out = helper.create_variable(name=name, dtype=dtype, persistable=False)
helper.append_op( helper.append_op(
type="add_position_encoding", type="add_position_encoding",
...@@ -12456,11 +12403,7 @@ def bilinear_tensor_product(x, ...@@ -12456,11 +12403,7 @@ def bilinear_tensor_product(x,
w = helper.create_parameter( w = helper.create_parameter(
attr=helper.param_attr, shape=param_shape, dtype=dtype, is_bias=False) attr=helper.param_attr, shape=param_shape, dtype=dtype, is_bias=False)
out = helper.create_variable_for_type_inference(dtype=dtype)
if name is None:
out = helper.create_variable_for_type_inference(dtype=dtype)
else:
out = helper.create_variable(name=name, dtype=dtype, persistable=False)
inputs = {"X": x, "Y": y, "Weight": w} inputs = {"X": x, "Y": y, "Weight": w}
if helper.bias_attr: if helper.bias_attr:
......
...@@ -1269,10 +1269,7 @@ def sequence_mask(x, maxlen=None, dtype='int64', name=None): ...@@ -1269,10 +1269,7 @@ def sequence_mask(x, maxlen=None, dtype='int64', name=None):
""" """
helper = LayerHelper('sequence_mask', **locals()) helper = LayerHelper('sequence_mask', **locals())
if name is None: out = helper.create_variable_for_type_inference(dtype=dtype)
out = helper.create_variable_for_type_inference(dtype=dtype)
else:
out = helper.create_variable_for_type_inference(dtype=dtype, name=name)
inputs = {'X': [x]} inputs = {'X': [x]}
attrs = {'out_dtype': out.dtype} attrs = {'out_dtype': out.dtype}
...@@ -1337,11 +1334,7 @@ def sequence_reverse(x, name=None): ...@@ -1337,11 +1334,7 @@ def sequence_reverse(x, name=None):
assert not in_dygraph_mode(), ( assert not in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.") "sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper("sequence_reverse", **locals()) helper = LayerHelper("sequence_reverse", **locals())
if name is None: out = helper.create_variable_for_type_inference(dtype=x.dtype)
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
helper.append_op( helper.append_op(
type="sequence_reverse", type="sequence_reverse",
......
...@@ -51,7 +51,6 @@ class TestDyToStaticSaveInferenceModel(unittest.TestCase): ...@@ -51,7 +51,6 @@ class TestDyToStaticSaveInferenceModel(unittest.TestCase):
layer = SimpleFcLayer(fc_size) layer = SimpleFcLayer(fc_size)
program_translator = ProgramTranslator.get_instance() program_translator = ProgramTranslator.get_instance()
program_cache = ProgramTranslator().get_program_cache
adam = fluid.optimizer.SGD(learning_rate=0.001) adam = fluid.optimizer.SGD(learning_rate=0.001)
program_translator.set_optimizer(adam, index_of_loss=0) program_translator.set_optimizer(adam, index_of_loss=0)
......
...@@ -75,41 +75,43 @@ class TestImperativeStaticModelRunnerMnist(unittest.TestCase): ...@@ -75,41 +75,43 @@ class TestImperativeStaticModelRunnerMnist(unittest.TestCase):
return _reader_impl return _reader_impl
def train_and_save_model(self): def train_and_save_model(self):
startup_program = fluid.default_startup_program() with new_program_scope():
main_program = fluid.default_main_program() startup_program = fluid.default_startup_program()
main_program = fluid.default_main_program()
img = fluid.data(name='img', shape=[None, 1, 28, 28], dtype='float32') img = fluid.data(
label = fluid.data(name='label', shape=[None, 1], dtype='int64') name='img', shape=[None, 1, 28, 28], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
prediction, avg_loss = static_train_net(img, label) prediction, avg_loss = static_train_net(img, label)
place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda( place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(
) else fluid.CPUPlace() ) else fluid.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
feeder = fluid.DataFeeder(feed_list=[img, label], place=place) feeder = fluid.DataFeeder(feed_list=[img, label], place=place)
exe.run(startup_program) exe.run(startup_program)
train_reader = paddle.batch( train_reader = paddle.batch(
paddle.reader.shuffle( paddle.reader.shuffle(
paddle.dataset.mnist.train(), buf_size=100), paddle.dataset.mnist.train(), buf_size=100),
batch_size=self.batch_size) batch_size=self.batch_size)
for _ in range(0, self.epoch_num): for _ in range(0, self.epoch_num):
for batch_id, data in enumerate(train_reader()): for batch_id, data in enumerate(train_reader()):
exe.run(main_program, exe.run(main_program,
feed=feeder.feed(data), feed=feeder.feed(data),
fetch_list=[avg_loss]) fetch_list=[avg_loss])
if batch_id > self.batch_num: if batch_id > self.batch_num:
break break
fluid.io.save_inference_model( fluid.io.save_inference_model(
self.save_dirname, ["img"], [prediction], self.save_dirname, ["img"], [prediction],
exe, exe,
model_filename=self.model_filename, model_filename=self.model_filename,
params_filename=self.params_filename) params_filename=self.params_filename)
def load_and_train_dygraph(self): def load_and_train_dygraph(self):
place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda( place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(
...@@ -248,11 +250,12 @@ class TestImperativeStaticModelRunnerMnist(unittest.TestCase): ...@@ -248,11 +250,12 @@ class TestImperativeStaticModelRunnerMnist(unittest.TestCase):
key += core.loaded_var_suffix() key += core.loaded_var_suffix()
self.assertTrue(np.array_equal(value, dy_param_init_value[key])) self.assertTrue(np.array_equal(value, dy_param_init_value[key]))
self.assertTrue(np.allclose(static_out, dy_out)) # np.testing.assert_array_almost_equal(static_out, dy_out)
self.assertTrue(np.allclose(static_out, dy_out, atol=1e-04))
for key, value in six.iteritems(static_param_value): for key, value in six.iteritems(static_param_value):
key += core.loaded_var_suffix() key += core.loaded_var_suffix()
self.assertTrue(np.allclose(value, dy_param_value[key], atol=1e-5)) self.assertTrue(np.allclose(value, dy_param_value[key], atol=1e-4))
def test_mnist_with_params_filename(self): def test_mnist_with_params_filename(self):
self.save_dirname = "mnist.inference.model" self.save_dirname = "mnist.inference.model"
...@@ -275,11 +278,12 @@ class TestImperativeStaticModelRunnerMnist(unittest.TestCase): ...@@ -275,11 +278,12 @@ class TestImperativeStaticModelRunnerMnist(unittest.TestCase):
key += core.loaded_var_suffix() key += core.loaded_var_suffix()
self.assertTrue(np.array_equal(value, dy_param_init_value[key])) self.assertTrue(np.array_equal(value, dy_param_init_value[key]))
self.assertTrue(np.allclose(static_out, dy_out)) # np.testing.assert_array_almost_equal(static_out, dy_out)
self.assertTrue(np.allclose(static_out, dy_out, atol=1e-04))
for key, value in six.iteritems(static_param_value): for key, value in six.iteritems(static_param_value):
key += core.loaded_var_suffix() key += core.loaded_var_suffix()
self.assertTrue(np.allclose(value, dy_param_value[key], atol=1e-5)) self.assertTrue(np.allclose(value, dy_param_value[key], atol=1e-4))
if __name__ == '__main__': if __name__ == '__main__':
......
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.fluid as fluid
import numpy as np
import unittest
class TestOpNameConflict(unittest.TestCase):
def test_conflict(self):
main = fluid.Program()
startup = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, startup):
x = fluid.data(name="x", shape=[1], dtype='float32')
y = fluid.data(name="y", shape=[1], dtype='float32')
z = fluid.data(name="z", shape=[1], dtype='float32')
m = fluid.layers.elementwise_add(x, y, name="add")
n = fluid.layers.elementwise_add(y, z, name="add")
p = m + n
place = fluid.CPUPlace()
exe = fluid.Executor(place)
m_v, n_v, p_v = exe.run(feed={
"x": np.ones((1), "float32") * 2,
"y": np.ones((1), "float32") * 3,
"z": np.ones((1), "float32") * 5
},
fetch_list=[m, n, p])
self.assertEqual(m_v[0], 5.0)
self.assertEqual(n_v[0], 8.0)
self.assertEqual(p_v[0], 13.0)
def test_layers(self):
main = fluid.Program()
startup = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, startup):
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
exe = fluid.Executor(place)
data = fluid.data(
name='data', shape=[None, 1, 2, 2], dtype='float32')
tensor = fluid.data(
name='tensor', shape=[None, 32, 64], dtype='float32')
x = fluid.data(
name='x', shape=[None, 1], dtype='float32', lod_level=1)
input_scale = fluid.layers.create_parameter(
shape=[1],
dtype="float32",
default_initializer=fluid.initializer.Constant(2.0))
input_bias = fluid.layers.create_parameter(
shape=[1],
dtype="float32",
default_initializer=fluid.initializer.Constant(0.5))
out_affine = fluid.layers.affine_channel(
data, scale=input_scale, bias=input_bias)
out_similarity = fluid.layers.similarity_focus(
input=data, axis=1, indexes=[0])
position_tensor = fluid.layers.add_position_encoding(
input=tensor, alpha=1.0, beta=1.0)
x_reversed = fluid.layers.sequence_reverse(x)
exe.run(fluid.default_startup_program())
test_program = fluid.default_main_program().clone(for_test=True)
x_d = fluid.create_lod_tensor(
np.array([[1.1], [2.2], [3.3], [4.4]]).astype('float32'),
[[1, 3]], place)
outs = exe.run(
test_program,
fetch_list=[
out_affine, out_similarity, position_tensor, x_reversed
],
feed={
data.name: np.ones([1, 1, 2, 2]).astype('float32'),
tensor.name: np.ones([1, 32, 64]).astype('float32'),
x.name: x_d
},
return_numpy=False)
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册