Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
84cd45f6
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
84cd45f6
编写于
4月 10, 2020
作者:
Z
Zhen Wang
提交者:
GitHub
4月 10, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Solve the conflict of ops with the same name, test for CI. (#23573)
* solve the conflict of ops with the same name. test=develop
上级
795a0a9a
变更
10
隐藏空白更改
内联
并排
Showing
10 changed file
with
177 addition
and
143 deletion
+177
-143
paddle/fluid/framework/details/fast_threaded_ssa_graph_executor.cc
...uid/framework/details/fast_threaded_ssa_graph_executor.cc
+7
-2
paddle/fluid/framework/details/threaded_ssa_graph_executor.cc
...le/fluid/framework/details/threaded_ssa_graph_executor.cc
+7
-2
paddle/fluid/operators/controlflow/fetch_op.cc
paddle/fluid/operators/controlflow/fetch_op.cc
+8
-2
python/paddle/fluid/layers/detection.py
python/paddle/fluid/layers/detection.py
+5
-21
python/paddle/fluid/layers/loss.py
python/paddle/fluid/layers/loss.py
+1
-5
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+14
-71
python/paddle/fluid/layers/sequence_lod.py
python/paddle/fluid/layers/sequence_lod.py
+2
-9
python/paddle/fluid/tests/unittests/dygraph_to_static/test_save_inference_model.py
.../unittests/dygraph_to_static/test_save_inference_model.py
+0
-1
python/paddle/fluid/tests/unittests/test_imperative_static_runner_mnist.py
...id/tests/unittests/test_imperative_static_runner_mnist.py
+34
-30
python/paddle/fluid/tests/unittests/test_op_name_conflict.py
python/paddle/fluid/tests/unittests/test_op_name_conflict.py
+99
-0
未找到文件。
paddle/fluid/framework/details/fast_threaded_ssa_graph_executor.cc
浏览文件 @
84cd45f6
...
...
@@ -150,8 +150,13 @@ void FastThreadedSSAGraphExecutor::InsertFetchOps(
"Possible reasons are:
\n
"
" 1. The variable to be fetched is not defined in main program.
\n
"
" 2. The variable to be fetched is not an input or output of any "
"operator."
,
var_name
));
"operator.
\n
"
" 3. Confirm that you have used the fetch `Variable` format "
"instead of the string literal('%s') in `fetch_list` parameter "
"when using `executor.run` method. In other words, the format of "
"`executor.run(fetch_list=[fetch_var])`(fetch_var is a Variable) "
"is recommended."
,
var_name
,
var_name
));
auto
&
vars
=
fetched_var_it
->
second
;
...
...
paddle/fluid/framework/details/threaded_ssa_graph_executor.cc
浏览文件 @
84cd45f6
...
...
@@ -186,8 +186,13 @@ void ThreadedSSAGraphExecutor::InsertFetchOps(
"Possible reasons are:
\n
"
" 1. The variable to be fetched is not defined in main program.
\n
"
" 2. The variable to be fetched is not an input or output of any "
"operator."
,
var_name
));
"operator.
\n
"
" 3. Confirm that you have used the fetch `Variable` format "
"instead of the string literal('%s') in `fetch_list` parameter "
"when using `executor.run` method. In other words, the format of "
"`executor.run(fetch_list=[fetch_var])`(fetch_var is a Variable) "
"is recommended."
,
var_name
,
var_name
));
auto
&
vars
=
fetched_var_it
->
second
;
...
...
paddle/fluid/operators/controlflow/fetch_op.cc
浏览文件 @
84cd45f6
...
...
@@ -39,8 +39,14 @@ class FetchOp : public framework::OperatorBase {
PADDLE_ENFORCE_NOT_NULL
(
fetch_var
,
platform
::
errors
::
NotFound
(
"Input variable(%s) cannot be found in scope for operator 'Fetch'."
,
fetch_var_name
));
"Input variable(%s) cannot be found in scope for operator 'Fetch'."
"Confirm that you have used the fetch `Variable` format "
"instead of the string literal('%s') in `fetch_list` "
"parameter when using `executor.run` method. In other "
"words, the format of "
"`executor.run(fetch_list=[fetch_var])`(fetch_var is a "
"Variable) is recommended."
,
fetch_var_name
,
fetch_var_name
));
auto
out_name
=
Output
(
"Out"
);
auto
*
out_var
=
scope
.
FindVar
(
out_name
);
...
...
python/paddle/fluid/layers/detection.py
浏览文件 @
84cd45f6
...
...
@@ -692,11 +692,7 @@ def iou_similarity(x, y, box_normalized=True, name=None):
# [0. ]] with shape: [2, 1]
"""
helper
=
LayerHelper
(
"iou_similarity"
,
**
locals
())
if
name
is
None
:
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
else
:
out
=
helper
.
create_variable
(
name
=
name
,
dtype
=
x
.
dtype
,
persistable
=
False
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
"iou_similarity"
,
...
...
@@ -828,12 +824,8 @@ def box_coder(prior_box,
"""
helper
=
LayerHelper
(
"box_coder"
,
**
locals
())
if
name
is
None
:
output_box
=
helper
.
create_variable_for_type_inference
(
dtype
=
prior_box
.
dtype
)
else
:
output_box
=
helper
.
create_variable
(
name
=
name
,
dtype
=
prior_box
.
dtype
,
persistable
=
False
)
output_box
=
helper
.
create_variable_for_type_inference
(
dtype
=
prior_box
.
dtype
)
inputs
=
{
"PriorBox"
:
prior_box
,
"TargetBox"
:
target_box
}
attrs
=
{
...
...
@@ -877,11 +869,7 @@ def polygon_box_transform(input, name=None):
out = fluid.layers.polygon_box_transform(input)
"""
helper
=
LayerHelper
(
"polygon_box_transform"
,
**
locals
())
if
name
is
None
:
output
=
helper
.
create_variable_for_type_inference
(
dtype
=
input
.
dtype
)
else
:
output
=
helper
.
create_variable
(
name
=
name
,
dtype
=
prior_box
.
input
,
persistable
=
False
)
output
=
helper
.
create_variable_for_type_inference
(
dtype
=
input
.
dtype
)
helper
.
append_op
(
type
=
"polygon_box_transform"
,
...
...
@@ -980,11 +968,7 @@ def yolov3_loss(x,
raise
TypeError
(
"Attr use_label_smooth of yolov3_loss must be a bool value"
)
if
name
is
None
:
loss
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
else
:
loss
=
helper
.
create_variable
(
name
=
name
,
dtype
=
x
.
dtype
,
persistable
=
False
)
loss
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
objectness_mask
=
helper
.
create_variable_for_type_inference
(
dtype
=
'int32'
)
gt_match_mask
=
helper
.
create_variable_for_type_inference
(
dtype
=
'int32'
)
...
...
python/paddle/fluid/layers/loss.py
浏览文件 @
84cd45f6
...
...
@@ -1427,11 +1427,7 @@ def sigmoid_cross_entropy_with_logits(x,
helper
=
LayerHelper
(
"sigmoid_cross_entropy_with_logits"
,
**
locals
())
if
name
is
None
:
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
else
:
out
=
helper
.
create_variable
(
name
=
name
,
dtype
=
x
.
dtype
,
persistable
=
False
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
"sigmoid_cross_entropy_with_logits"
,
...
...
python/paddle/fluid/layers/nn.py
浏览文件 @
84cd45f6
...
...
@@ -7859,11 +7859,7 @@ def gather_nd(input, index, name=None):
"""
helper = LayerHelper('gather_nd', **locals())
dtype = helper.input_dtype()
if name is None:
output = helper.create_variable_for_type_inference(dtype)
else:
output = helper.create_variable(
name=name, dtype=dtype, persistable=False)
output = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="gather_nd",
inputs={"X": input,
...
...
@@ -8026,11 +8022,7 @@ def scatter_nd_add(ref, index, updates, name=None):
helper = LayerHelper('scatter_nd_add', **locals())
dtype = helper.input_dtype(input_param_name='ref')
if name is None:
output = helper.create_variable_for_type_inference(dtype)
else:
output = helper.create_variable(
name=name, dtype=dtype, persistable=False)
output = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="scatter_nd_add",
inputs={"X": ref,
...
...
@@ -10606,11 +10598,7 @@ def _elementwise_op(helper):
axis = helper.kwargs.get('axis', -1)
use_mkldnn = helper.kwargs.get('use_mkldnn', False)
name = helper.kwargs.get('name', None)
if name is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type=op_type,
...
...
@@ -10705,11 +10693,7 @@ def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None):
else:
attrs['scale'] = float(scale)
helper = LayerHelper('scale', **locals())
if name is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='scale', inputs=inputs, outputs={'Out': out}, attrs=attrs)
...
...
@@ -11345,11 +11329,7 @@ def _logical_op(op_name, x, y, out=None, name=None, binary_op=True):
assert x.dtype == y.dtype
if out is None:
if name is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
out = helper.create_variable_for_type_inference(dtype=x.dtype)
if binary_op:
helper.append_op(
...
...
@@ -11671,11 +11651,7 @@ def mean(x, name=None):
helper = LayerHelper("mean", **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'mean')
if name is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="mean", inputs={"X": x}, attrs={}, outputs={"Out": out})
...
...
@@ -11758,11 +11734,7 @@ def mul(x, y, x_num_col_dims=1, y_num_col_dims=1, name=None):
helper = LayerHelper("mul", **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'mul')
check_variable_and_dtype(y, 'y', ['float16', 'float32', 'float64'], 'mul')
if name is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="mul", inputs={"X": x,
...
...
@@ -11808,11 +11780,7 @@ def maxout(x, groups, name=None, axis=1):
if axis == -1:
axis = 3
if name is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="maxout",
...
...
@@ -11912,12 +11880,7 @@ def space_to_depth(x, blocksize, name=None):
if not (isinstance(blocksize, int)):
raise ValueError("blocksize must be a python Int")
if name is None:
out = helper.create_variable_for_type_inference(
dtype=x.dtype) #fix create
else:
out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="space_to_depth",
...
...
@@ -11990,12 +11953,7 @@ def affine_channel(x,
"""
helper = LayerHelper("affine_channel", **locals())
if name is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="affine_channel",
...
...
@@ -12109,11 +12067,7 @@ def similarity_focus(input, axis, indexes, name=None):
if len(indexes) == 0:
raise ValueError("indexes can not be empty.")
if name is None:
out = helper.create_variable_for_type_inference(dtype=input.dtype)
else:
out = helper.create_variable(
name=name, dtype=input.dtype, persistable=False)
out = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type='similarity_focus',
inputs={'X': input},
...
...
@@ -12318,11 +12272,7 @@ def log_loss(input, label, epsilon=1e-4, name=None):
"""
helper = LayerHelper('log_loss', **locals())
if name is None:
loss = helper.create_variable_for_type_inference(dtype=input.dtype)
else:
loss = helper.create_variable(
name=name, dtype=input.dtype, persistable=False)
loss = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type='log_loss',
...
...
@@ -12386,10 +12336,7 @@ def add_position_encoding(input, alpha, beta, name=None):
helper = LayerHelper('add_position_encoding', **locals())
dtype = helper.input_dtype()
if name is None:
out = helper.create_variable_for_type_inference(dtype=dtype)
else:
out = helper.create_variable(name=name, dtype=dtype, persistable=False)
out = helper.create_variable_for_type_inference(dtype=dtype)
helper.append_op(
type="add_position_encoding",
...
...
@@ -12456,11 +12403,7 @@ def bilinear_tensor_product(x,
w = helper.create_parameter(
attr=helper.param_attr, shape=param_shape, dtype=dtype, is_bias=False)
if name is None:
out = helper.create_variable_for_type_inference(dtype=dtype)
else:
out = helper.create_variable(name=name, dtype=dtype, persistable=False)
out = helper.create_variable_for_type_inference(dtype=dtype)
inputs = {"X": x, "Y": y, "Weight": w}
if helper.bias_attr:
...
...
python/paddle/fluid/layers/sequence_lod.py
浏览文件 @
84cd45f6
...
...
@@ -1269,10 +1269,7 @@ def sequence_mask(x, maxlen=None, dtype='int64', name=None):
"""
helper
=
LayerHelper
(
'sequence_mask'
,
**
locals
())
if
name
is
None
:
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
)
else
:
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
,
name
=
name
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
)
inputs
=
{
'X'
:
[
x
]}
attrs
=
{
'out_dtype'
:
out
.
dtype
}
...
...
@@ -1337,11 +1334,7 @@ def sequence_reverse(x, name=None):
assert
not
in_dygraph_mode
(),
(
"sequence layer is not supported in dygraph mode yet."
)
helper
=
LayerHelper
(
"sequence_reverse"
,
**
locals
())
if
name
is
None
:
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
else
:
out
=
helper
.
create_variable
(
name
=
name
,
dtype
=
x
.
dtype
,
persistable
=
False
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
"sequence_reverse"
,
...
...
python/paddle/fluid/tests/unittests/dygraph_to_static/test_save_inference_model.py
浏览文件 @
84cd45f6
...
...
@@ -51,7 +51,6 @@ class TestDyToStaticSaveInferenceModel(unittest.TestCase):
layer
=
SimpleFcLayer
(
fc_size
)
program_translator
=
ProgramTranslator
.
get_instance
()
program_cache
=
ProgramTranslator
().
get_program_cache
adam
=
fluid
.
optimizer
.
SGD
(
learning_rate
=
0.001
)
program_translator
.
set_optimizer
(
adam
,
index_of_loss
=
0
)
...
...
python/paddle/fluid/tests/unittests/test_imperative_static_runner_mnist.py
浏览文件 @
84cd45f6
...
...
@@ -75,41 +75,43 @@ class TestImperativeStaticModelRunnerMnist(unittest.TestCase):
return
_reader_impl
def
train_and_save_model
(
self
):
startup_program
=
fluid
.
default_startup_program
()
main_program
=
fluid
.
default_main_program
()
with
new_program_scope
():
startup_program
=
fluid
.
default_startup_program
()
main_program
=
fluid
.
default_main_program
()
img
=
fluid
.
data
(
name
=
'img'
,
shape
=
[
None
,
1
,
28
,
28
],
dtype
=
'float32'
)
label
=
fluid
.
data
(
name
=
'label'
,
shape
=
[
None
,
1
],
dtype
=
'int64'
)
img
=
fluid
.
data
(
name
=
'img'
,
shape
=
[
None
,
1
,
28
,
28
],
dtype
=
'float32'
)
label
=
fluid
.
data
(
name
=
'label'
,
shape
=
[
None
,
1
],
dtype
=
'int64'
)
prediction
,
avg_loss
=
static_train_net
(
img
,
label
)
prediction
,
avg_loss
=
static_train_net
(
img
,
label
)
place
=
fluid
.
CUDAPlace
(
0
)
if
core
.
is_compiled_with_cuda
(
)
else
fluid
.
CPUPlace
()
place
=
fluid
.
CUDAPlace
(
0
)
if
core
.
is_compiled_with_cuda
(
)
else
fluid
.
CPUPlace
()
exe
=
fluid
.
Executor
(
place
)
exe
=
fluid
.
Executor
(
place
)
feeder
=
fluid
.
DataFeeder
(
feed_list
=
[
img
,
label
],
place
=
place
)
exe
.
run
(
startup_program
)
feeder
=
fluid
.
DataFeeder
(
feed_list
=
[
img
,
label
],
place
=
place
)
exe
.
run
(
startup_program
)
train_reader
=
paddle
.
batch
(
paddle
.
reader
.
shuffle
(
paddle
.
dataset
.
mnist
.
train
(),
buf_size
=
100
),
batch_size
=
self
.
batch_size
)
train_reader
=
paddle
.
batch
(
paddle
.
reader
.
shuffle
(
paddle
.
dataset
.
mnist
.
train
(),
buf_size
=
100
),
batch_size
=
self
.
batch_size
)
for
_
in
range
(
0
,
self
.
epoch_num
):
for
batch_id
,
data
in
enumerate
(
train_reader
()):
exe
.
run
(
main_program
,
feed
=
feeder
.
feed
(
data
),
fetch_list
=
[
avg_loss
])
for
_
in
range
(
0
,
self
.
epoch_num
):
for
batch_id
,
data
in
enumerate
(
train_reader
()):
exe
.
run
(
main_program
,
feed
=
feeder
.
feed
(
data
),
fetch_list
=
[
avg_loss
])
if
batch_id
>
self
.
batch_num
:
break
if
batch_id
>
self
.
batch_num
:
break
fluid
.
io
.
save_inference_model
(
self
.
save_dirname
,
[
"img"
],
[
prediction
],
exe
,
model_filename
=
self
.
model_filename
,
params_filename
=
self
.
params_filename
)
fluid
.
io
.
save_inference_model
(
self
.
save_dirname
,
[
"img"
],
[
prediction
],
exe
,
model_filename
=
self
.
model_filename
,
params_filename
=
self
.
params_filename
)
def
load_and_train_dygraph
(
self
):
place
=
fluid
.
CUDAPlace
(
0
)
if
core
.
is_compiled_with_cuda
(
...
...
@@ -248,11 +250,12 @@ class TestImperativeStaticModelRunnerMnist(unittest.TestCase):
key
+=
core
.
loaded_var_suffix
()
self
.
assertTrue
(
np
.
array_equal
(
value
,
dy_param_init_value
[
key
]))
self
.
assertTrue
(
np
.
allclose
(
static_out
,
dy_out
))
# np.testing.assert_array_almost_equal(static_out, dy_out)
self
.
assertTrue
(
np
.
allclose
(
static_out
,
dy_out
,
atol
=
1e-04
))
for
key
,
value
in
six
.
iteritems
(
static_param_value
):
key
+=
core
.
loaded_var_suffix
()
self
.
assertTrue
(
np
.
allclose
(
value
,
dy_param_value
[
key
],
atol
=
1e-
5
))
self
.
assertTrue
(
np
.
allclose
(
value
,
dy_param_value
[
key
],
atol
=
1e-
4
))
def
test_mnist_with_params_filename
(
self
):
self
.
save_dirname
=
"mnist.inference.model"
...
...
@@ -275,11 +278,12 @@ class TestImperativeStaticModelRunnerMnist(unittest.TestCase):
key
+=
core
.
loaded_var_suffix
()
self
.
assertTrue
(
np
.
array_equal
(
value
,
dy_param_init_value
[
key
]))
self
.
assertTrue
(
np
.
allclose
(
static_out
,
dy_out
))
# np.testing.assert_array_almost_equal(static_out, dy_out)
self
.
assertTrue
(
np
.
allclose
(
static_out
,
dy_out
,
atol
=
1e-04
))
for
key
,
value
in
six
.
iteritems
(
static_param_value
):
key
+=
core
.
loaded_var_suffix
()
self
.
assertTrue
(
np
.
allclose
(
value
,
dy_param_value
[
key
],
atol
=
1e-
5
))
self
.
assertTrue
(
np
.
allclose
(
value
,
dy_param_value
[
key
],
atol
=
1e-
4
))
if
__name__
==
'__main__'
:
...
...
python/paddle/fluid/tests/unittests/test_op_name_conflict.py
0 → 100644
浏览文件 @
84cd45f6
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
paddle.fluid
as
fluid
import
numpy
as
np
import
unittest
class
TestOpNameConflict
(
unittest
.
TestCase
):
def
test_conflict
(
self
):
main
=
fluid
.
Program
()
startup
=
fluid
.
Program
()
with
fluid
.
unique_name
.
guard
():
with
fluid
.
program_guard
(
main
,
startup
):
x
=
fluid
.
data
(
name
=
"x"
,
shape
=
[
1
],
dtype
=
'float32'
)
y
=
fluid
.
data
(
name
=
"y"
,
shape
=
[
1
],
dtype
=
'float32'
)
z
=
fluid
.
data
(
name
=
"z"
,
shape
=
[
1
],
dtype
=
'float32'
)
m
=
fluid
.
layers
.
elementwise_add
(
x
,
y
,
name
=
"add"
)
n
=
fluid
.
layers
.
elementwise_add
(
y
,
z
,
name
=
"add"
)
p
=
m
+
n
place
=
fluid
.
CPUPlace
()
exe
=
fluid
.
Executor
(
place
)
m_v
,
n_v
,
p_v
=
exe
.
run
(
feed
=
{
"x"
:
np
.
ones
((
1
),
"float32"
)
*
2
,
"y"
:
np
.
ones
((
1
),
"float32"
)
*
3
,
"z"
:
np
.
ones
((
1
),
"float32"
)
*
5
},
fetch_list
=
[
m
,
n
,
p
])
self
.
assertEqual
(
m_v
[
0
],
5.0
)
self
.
assertEqual
(
n_v
[
0
],
8.0
)
self
.
assertEqual
(
p_v
[
0
],
13.0
)
def
test_layers
(
self
):
main
=
fluid
.
Program
()
startup
=
fluid
.
Program
()
with
fluid
.
unique_name
.
guard
():
with
fluid
.
program_guard
(
main
,
startup
):
place
=
fluid
.
CUDAPlace
(
0
)
if
fluid
.
core
.
is_compiled_with_cuda
(
)
else
fluid
.
CPUPlace
()
exe
=
fluid
.
Executor
(
place
)
data
=
fluid
.
data
(
name
=
'data'
,
shape
=
[
None
,
1
,
2
,
2
],
dtype
=
'float32'
)
tensor
=
fluid
.
data
(
name
=
'tensor'
,
shape
=
[
None
,
32
,
64
],
dtype
=
'float32'
)
x
=
fluid
.
data
(
name
=
'x'
,
shape
=
[
None
,
1
],
dtype
=
'float32'
,
lod_level
=
1
)
input_scale
=
fluid
.
layers
.
create_parameter
(
shape
=
[
1
],
dtype
=
"float32"
,
default_initializer
=
fluid
.
initializer
.
Constant
(
2.0
))
input_bias
=
fluid
.
layers
.
create_parameter
(
shape
=
[
1
],
dtype
=
"float32"
,
default_initializer
=
fluid
.
initializer
.
Constant
(
0.5
))
out_affine
=
fluid
.
layers
.
affine_channel
(
data
,
scale
=
input_scale
,
bias
=
input_bias
)
out_similarity
=
fluid
.
layers
.
similarity_focus
(
input
=
data
,
axis
=
1
,
indexes
=
[
0
])
position_tensor
=
fluid
.
layers
.
add_position_encoding
(
input
=
tensor
,
alpha
=
1.0
,
beta
=
1.0
)
x_reversed
=
fluid
.
layers
.
sequence_reverse
(
x
)
exe
.
run
(
fluid
.
default_startup_program
())
test_program
=
fluid
.
default_main_program
().
clone
(
for_test
=
True
)
x_d
=
fluid
.
create_lod_tensor
(
np
.
array
([[
1.1
],
[
2.2
],
[
3.3
],
[
4.4
]]).
astype
(
'float32'
),
[[
1
,
3
]],
place
)
outs
=
exe
.
run
(
test_program
,
fetch_list
=
[
out_affine
,
out_similarity
,
position_tensor
,
x_reversed
],
feed
=
{
data
.
name
:
np
.
ones
([
1
,
1
,
2
,
2
]).
astype
(
'float32'
),
tensor
.
name
:
np
.
ones
([
1
,
32
,
64
]).
astype
(
'float32'
),
x
.
name
:
x_d
},
return_numpy
=
False
)
if
__name__
==
'__main__'
:
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录