未验证 提交 72c711bb 编写于 作者: G GGBond8488 提交者: GitHub

【fluid clean】remove fluid.data (#50699)

* remove fluid.data

* fix typro

* fix somme unitest error

* fix conflicts

* fix sample code error

* fxi sample coder error

* fxi sample code error

* fxi sample code error

* fix xpu test error

* fix xpu test error

* Delete ps_pb2.py

* fix test error

* fix typro

* fix sample code error

* fix comments

* fix test norm op data

* fix sample code error

* fix conflicts
上级 457b9fb1
......@@ -92,8 +92,6 @@ class ReaderBase {
std::vector<proto::VarType::Type> var_types_;
// Whether to check the shape and dtype of fed variables.
// For Backward compatibility, variables created by old API fluid.layers.data
// doesn't check shape but fluid.data checks.
std::vector<bool> need_check_feed_;
private:
......
......@@ -46,8 +46,6 @@ from .data_feed_desc import *
from . import dataset
from .dataset import *
from .data import *
from . import trainer_desc
from . import io
......@@ -117,7 +115,6 @@ __all__ = (
'initializer',
'layers',
'contrib',
'data',
'dygraph',
'enable_dygraph',
'disable_dygraph',
......
......@@ -567,8 +567,9 @@ def partial_concat(input, start_index=0, length=-1):
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name="x", shape=[None,3], dtype="float32")
y = fluid.data(name="y", shape=[None,3], dtype="float32")
import paddle
x = paddle.randn(name="x", shape=[1,3], dtype="float32")
y = paddle.randn(name="y", shape=[1,3], dtype="float32")
concat = fluid.contrib.layers.partial_concat(
[x, y], start_index=0, length=2)
"""
......@@ -629,9 +630,12 @@ def partial_sum(input, start_index=0, length=-1):
import paddle.fluid.layers as layers
import paddle.fluid as fluid
import numpy as np
x = fluid.data(name="x", shape=[None, 3], dtype="float32")
y = fluid.data(name="y", shape=[None, 3], dtype="float32")
sum = layers.partial_sum([x,y], start_index=0, length=2)
import paddle
paddle.enable_static()
x = paddle.static.data(name="x", shape=[2, 3], dtype="float32")
y = paddle.static.data(name="y", shape=[2, 3], dtype="float32")
sum = fluid.contrib.layers.partial_sum([x,y], start_index=0, length=2)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
xx = np.array([1,2,3,4,5,6]).reshape((2,3)).astype("float32")
......@@ -898,7 +902,7 @@ def tdm_child(x, node_nums, child_nums, param_attr=None, dtype='int32'):
import paddle.fluid as fluid
import numpy as np
paddle.enable_static()
x = fluid.data(name="x", shape=[None, 1], dtype="int32", lod_level=1)
x = paddle.static.data(name="x", shape=[None, 1], dtype="int32", lod_level=1)
tree_info = [[0,0,0,1,2],
[0,1,0,3,4],[0,1,0,5,6],
[0,2,1,0,0],[1,2,1,0,0],[2,2,2,0,0],[3,2,2,0,0]]
......@@ -1007,7 +1011,7 @@ def tdm_sampler(
import paddle.fluid as fluid
import numpy as np
paddle.enable_static()
x = fluid.data(name="x", shape=[None, 1], dtype="int32", lod_level=1)
x = paddle.static.data(name="x", shape=[None, 1], dtype="int32", lod_level=1)
travel_list = [[1, 3], [1, 4], [2, 5], [2, 6]] # leaf node's travel path, shape(leaf_node_num, layer_num)
layer_list_flat = [[1], [2], [3], [4], [5], [6]] # shape(node_nums, 1)
......@@ -1197,18 +1201,17 @@ def rank_attention(
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
import paddle
paddle.enable_static()
input = fluid.data(name="input", shape=[None, 2], dtype="float32")
rank_offset = fluid.data(name="rank_offset", shape=[None, 7], dtype="int32")
input = paddle.static.data(name="input", shape=[None, 2], dtype="float32")
rank_offset = paddle.static.data(name="rank_offset", shape=[None, 7], dtype="int32")
out = fluid.contrib.layers.rank_attention(input=input,
rank_offset=rank_offset,
rank_param_shape=[18,3],
rank_param_attr=
fluid.ParamAttr(learning_rate=1.0,
name="ubm_rank_param.w_0",
initializer=
fluid.initializer.Xavier(uniform=False)),
paddle.ParamAttr(learning_rate=1.0,
name="ubm_rank_param.w_0"),
max_rank=3,
max_size=0)
"""
......@@ -1259,21 +1262,20 @@ def batch_fc(input, param_size, param_attr, bias_size, bias_attr, act=None):
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
input = fluid.data(name="input", shape=[16, 2, 3], dtype="float32")
input = paddle.static.data(name="input", shape=[16, 2, 3], dtype="float32")
out = fluid.contrib.layers.batch_fc(input=input,
param_size=[16, 3, 10],
param_attr=
fluid.ParamAttr(learning_rate=1.0,
name="w_0",
initializer=
fluid.initializer.Xavier(uniform=False)),
paddle.ParamAttr(learning_rate=1.0,
name="w_0"),
bias_size=[16, 10],
bias_attr=
fluid.ParamAttr(learning_rate=1.0,
name="b_0",
initializer=
fluid.initializer.Xavier(uniform=False)),
paddle.ParamAttr(learning_rate=1.0,
name="b_0"),
act="relu")
"""
......@@ -1380,10 +1382,12 @@ def bilateral_slice(x, guide, grid, has_offset, name=None):
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
x = fluid.data(name='x', shape=[None, 3, 101, 60], dtype='float32')
guide = fluid.data(name='guide', shape=[None, 101, 60], dtype='float32')
grid = fluid.data(name='grid', shape=[None, 12, 8, 10, 6], dtype='float32')
x = paddle.randn(name='x', shape=[1, 3, 101, 60], dtype='float32')
guide = paddle.randn(name='guide', shape=[1, 101, 60], dtype='float32')
grid = paddle.randn(name='grid', shape=[1, 12, 8, 10, 6], dtype='float32')
# without offset
output = fluid.contrib.bilateral_slice(x, guide, grid, has_offset=False)
......
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from paddle.fluid import core
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.data_feeder import check_dtype, check_type
from ..utils import deprecated
from paddle.fluid.framework import static_only
__all__ = ['data']
@static_only
@deprecated(since="2.0.0", update_to="paddle.static.data")
def data(name, shape, dtype='float32', lod_level=0):
"""
**Data Layer**
This function creates a variable on the global block. The global variable
can be accessed by all the following operators in the graph. The variable
is a placeholder that could be fed with input, such as Executor can feed
input into the variable.
Note:
`paddle.fluid.layers.data` is deprecated. It will be removed in a
future version. Please use this `paddle.fluid.data`.
The `paddle.fluid.layers.data` set shape and dtype at compile time but
does NOT check the shape or the dtype of fed data, this
`paddle.fluid.data` checks the shape and the dtype of data fed by
Executor or ParallelExecutor during run time.
To feed variable size inputs, users can set None or -1 on the variable
dimension when using :code:`paddle.fluid.data`, or feed variable size
inputs directly to :code:`paddle.fluid.layers.data` and PaddlePaddle
will fit the size accordingly.
The default :code:`stop_gradient` attribute of the Variable created by
this API is true, which means the gradient won't be passed backward
through the data Variable. Set :code:`var.stop_gradient = False` If
user would like to pass backward gradient.
Args:
name (str): The name/alias of the variable, see :ref:`api_guide_Name`
for more details.
shape (list|tuple): List|Tuple of integers declaring the shape. You can
set "None" or -1 at a dimension to indicate the dimension can be of any
size. For example, it is useful to set changeable batch size as "None" or -1.
dtype (np.dtype|VarType|str, optional): The type of the data. Supported
dtype: bool, float16, float32, float64, int8, int16, int32, int64,
uint8. Default: float32.
lod_level (int, optional): The LoD level of the LoDTensor. Usually users
don't have to set this value. For more details about when and how to
use LoD level, see :ref:`user_guide_lod_tensor` . Default: 0.
Returns:
Variable: The global variable that gives access to the data.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
paddle.enable_static()
# Creates a variable with fixed size [3, 2, 1]
# User can only feed data of the same shape to x
x = fluid.data(name='x', shape=[3, 2, 1], dtype='float32')
# Creates a variable with changeable batch size -1.
# Users can feed data of any batch size into y,
# but size of each data sample has to be [2, 1]
y = fluid.data(name='y', shape=[-1, 2, 1], dtype='float32')
z = x + y
# In this example, we will feed x and y with np-ndarray "1"
# and fetch z, like implementing "1 + 1 = 2" in PaddlePaddle
feed_data = np.ones(shape=[3, 2, 1], dtype=np.float32)
exe = fluid.Executor(fluid.CPUPlace())
out = exe.run(fluid.default_main_program(),
feed={
'x': feed_data,
'y': feed_data
},
fetch_list=[z.name])
# np-ndarray of shape=[3, 2, 1], dtype=float32, whose elements are 2
print(out)
"""
helper = LayerHelper('data', **locals())
check_type(name, 'name', (bytes, str), 'data')
check_type(shape, 'shape', (list, tuple), 'data')
shape = list(shape)
for i in range(len(shape)):
if shape[i] is None:
shape[i] = -1
return helper.create_global_variable(
name=name,
shape=shape,
dtype=dtype,
type=core.VarDesc.VarType.LOD_TENSOR,
stop_gradient=True,
lod_level=lod_level,
is_data=True,
need_check_feed=True,
)
......@@ -347,8 +347,8 @@ class DataFeeder:
startup_program = fluid.Program()
with fluid.program_guard(main_program, startup_program):
data_1 = fluid.data(name='data_1', shape=[None, 2, 2], dtype='float32')
data_2 = fluid.data(name='data_2', shape=[None, 1, 3], dtype='float32')
data_1 = paddle.static.data(name='data_1', shape=[None, 2, 2], dtype='float32')
data_2 = paddle.static.data(name='data_2', shape=[None, 1, 3], dtype='float32')
out = paddle.static.nn.fc(x=[data_1, data_2], size=2)
# ...
feeder = fluid.DataFeeder([data_1, data_2], place)
......@@ -414,9 +414,9 @@ class DataFeeder:
for i in range(1, limit + 1):
yield np.ones([6]).astype('float32') * i , np.ones([1]).astype('int64') * i, np.random.random([9]).astype('float32')
data_1 = fluid.data(name='data_1', shape=[None, 2, 1, 3])
data_2 = fluid.data(name='data_2', shape=[None, 1], dtype='int64')
data_3 = fluid.data(name='data_3', shape=[None, 3, 3], dtype='float32')
data_1 = paddle.static.data(name='data_1', shape=[None, 2, 1, 3])
data_2 = paddle.static.data(name='data_2', shape=[None, 1], dtype='int64')
data_3 = paddle.static.data(name='data_3', shape=[None, 3, 3], dtype='float32')
feeder = fluid.DataFeeder(['data_1','data_2', 'data_3'], fluid.CPUPlace())
......@@ -482,8 +482,8 @@ class DataFeeder:
yield np.ones([4]) * factor + base, np.ones([4]) * factor + base + 5
return _reader()
x = fluid.data(name='x', shape=[None, 2, 2])
y = fluid.data(name='y', shape=[None, 2, 2], dtype='float32')
x = paddle.static.data(name='x', shape=[None, 2, 2])
y = paddle.static.data(name='y', shape=[None, 2, 2], dtype='float32')
z = paddle.add(x, y)
......@@ -582,8 +582,8 @@ class DataFeeder:
places = [fluid.CPUPlace() for _ in range(place_num)]
# a simple network sample
data = fluid.data(name='data', shape=[None, 4, 4], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
data = paddle.static.data(name='data', shape=[None, 4, 4], dtype='float32')
label = paddle.static.data(name='label', shape=[None, 1], dtype='int64')
hidden = paddle.static.nn.fc(x=data, size=10)
feeder = fluid.DataFeeder(place=places[0], feed_list=[data, label])
......
......@@ -1687,7 +1687,7 @@ class Executor:
compiled = isinstance(program, compiler.CompiledProgram)
# Check if fluid.data() variable no feed data
# Check if paddle.static.data() variable no feed data
if use_prune:
if compiled:
global_block = program._program.global_block()
......
......@@ -2072,9 +2072,9 @@ class Variable(metaclass=VariableMetaClass):
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle
x = fluid.data(name="x", shape=[-1, 23, 48], dtype='float32')
x = paddle.static.data(name="x", shape=[-1, 23, 48], dtype='float32')
print(x.grad_name) # output is ``x@GRAD``
"""
......
......@@ -190,8 +190,8 @@ def save_inference_model(
path = "./infer_model"
# User defined network, here a softmax regession example
image = fluid.data(name='img', shape=[None, 28, 28], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
image = paddle.static.data(name='img', shape=[None, 28, 28], dtype='float32')
label = paddle.static.data(name='label', shape=[None, 1], dtype='int64')
feeder = fluid.DataFeeder(feed_list=[image, label], place=fluid.CPUPlace())
predict = paddle.static.nn.fc(x=image, size=10, activation='softmax')
......
......@@ -335,7 +335,7 @@ class StaticRNN:
vocab_size, hidden_size=10000, 200
paddle.enable_static()
x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64')
x = paddle.static.data(name="x", shape=[None, 1, 1], dtype='int64')
# create word sequence
x_emb = layers.embedding(
input=x,
......@@ -426,7 +426,7 @@ class StaticRNN:
vocab_size, hidden_size=10000, 200
paddle.enable_static()
x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64')
x = paddle.static.data(name="x", shape=[None, 1, 1], dtype='int64')
# create word sequence
x_emb = layers.embedding(
input=x,
......@@ -455,7 +455,7 @@ class StaticRNN:
import paddle.fluid.layers as layers
vocab_size, hidden_size=10000, 200
paddle.enable_static()
x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64')
x = paddle.static.data(name="x", shape=[None, 1, 1], dtype='int64')
# create word sequence
x_emb = layers.embedding(
input=x,
......@@ -558,7 +558,7 @@ class StaticRNN:
vocab_size, hidden_size=10000, 200
paddle.enable_static()
x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64')
x = paddle.static.data(name="x", shape=[None, 1, 1], dtype='int64')
# create word sequence
x_emb = layers.embedding(
input=x,
......@@ -611,7 +611,7 @@ class StaticRNN:
vocab_size, hidden_size=10000, 200
paddle.enable_static()
x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64')
x = paddle.static.data(name="x", shape=[None, 1, 1], dtype='int64')
# create word sequence
x_emb = layers.embedding(
input=x,
......@@ -673,7 +673,7 @@ class StaticRNN:
vocab_size, hidden_size=10000, 200
paddle.enable_static()
x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64')
x = paddle.static.data(name="x", shape=[None, 1, 1], dtype='int64')
# create word sequence
x_emb = layers.embedding(
input=x,
......@@ -955,7 +955,7 @@ class While:
i = paddle.full(shape=[1], dtype='int64', fill_value=0)
loop_len = paddle.full(shape=[1], dtype='int64', fill_value=10)
one = paddle.full(shape=[1], dtype='float32', fill_value=1)
data = fluid.data(name='data', shape=[1], dtype='float32')
data = paddle.static.data(name='data', shape=[1], dtype='float32')
sums = paddle.full(shape=[1], dtype='float32', fill_value=0) # Define the variable to be obtained ouside of While, which name should be different from the variable inside the While to be obtained
cond = paddle.less_than(x=i, y=loop_len)
......
......@@ -183,13 +183,13 @@ def monkey_patch_variable():
In Static Graph Mode:
.. code-block:: python
import paddle
import paddle.fluid as fluid
paddle.enable_static()
startup_prog = fluid.Program()
main_prog = fluid.Program()
with fluid.program_guard(startup_prog, main_prog):
original_variable = fluid.data(name = "new_variable", shape=[2,2], dtype='float32')
original_variable = paddle.static.data(name = "new_variable", shape=[2,2], dtype='float32')
new_variable = original_variable.astype('int64')
print("new var's dtype is: {}".format(new_variable.dtype))
......
......@@ -206,7 +206,7 @@ def embedding(
import paddle
paddle.enable_static()
data = fluid.data(name='x', shape=[None, 1], dtype='int64')
data = paddle.static.data(name='x', shape=[None, 1], dtype='int64')
# example 1
emb_1 = paddle.static.nn.embedding(input=data, size=[128, 64])
......@@ -572,7 +572,7 @@ def reduce_sum(input, dim=None, keep_dim=False, name=None):
# [[0.2, 0.3, 0.5, 0.9]
# [0.1, 0.2, 0.6, 0.7]]
# Each example is followed by the corresponding output tensor.
x = fluid.data(name='x', shape=[2, 4], dtype='float32')
x = paddle.static.data(name='x', shape=[2, 4], dtype='float32')
fluid.layers.nn.reduce_sum(x) # [3.5]
fluid.layers.nn.reduce_sum(x, dim=0) # [0.3, 0.5, 1.1, 1.6]
fluid.layers.nn.reduce_sum(x, dim=-1) # [1.9, 1.6]
......@@ -582,7 +582,7 @@ def reduce_sum(input, dim=None, keep_dim=False, name=None):
# [[[1, 2], [3, 4]],
# [[5, 6], [7, 8]]]
# Each example is followed by the corresponding output tensor.
y = fluid.data(name='y', shape=[2, 2, 2], dtype='float32')
y = paddle.static.data(name='y', shape=[2, 2, 2], dtype='float32')
fluid.layers.nn.reduce_sum(y, dim=[1, 2]) # [10, 26]
fluid.layers.nn.reduce_sum(y, dim=[0, 1]) # [16, 20]
......
......@@ -111,7 +111,7 @@ def simple_img_conv_pool(
import paddle.fluid as fluid
import paddle
paddle.enable_static()
img = fluid.data(name='img', shape=[100, 1, 28, 28], dtype='float32')
img = paddle.static.data(name='img', shape=[100, 1, 28, 28], dtype='float32')
conv_pool = fluid.nets.simple_img_conv_pool(input=img,
filter_size=5,
num_filters=20,
......@@ -214,7 +214,7 @@ def img_conv_group(
import paddle
paddle.enable_static()
img = fluid.data(name='img', shape=[None, 1, 28, 28], dtype='float32')
img = paddle.static.data(name='img', shape=[None, 1, 28, 28], dtype='float32')
conv_pool = fluid.nets.img_conv_group(input=img,
conv_padding=1,
conv_num_filter=[3, 3],
......@@ -331,7 +331,7 @@ def sequence_conv_pool(
input_dim = 100 #len(word_dict)
emb_dim = 128
hid_dim = 512
data = fluid.data(name="words", shape=[None, 1], dtype="int64", lod_level=1)
data = paddle.static.data(name="words", shape=[None, 1], dtype="int64", lod_level=1)
emb = fluid.layers.embedding(input=data, size=[input_dim, emb_dim], is_sparse=True)
seq_conv = fluid.nets.sequence_conv_pool(input=emb,
num_filters=hid_dim,
......@@ -391,7 +391,7 @@ def glu(input, dim=-1):
import paddle
paddle.enable_static()
data = fluid.data(
data = paddle.static.data(
name="words", shape=[-1, 6, 3, 9], dtype="float32")
# shape of output: [-1, 3, 3, 9]
output = fluid.nets.glu(input=data, dim=1)
......@@ -472,9 +472,9 @@ def scaled_dot_product_attention(
import paddle
paddle.enable_static()
queries = fluid.data(name="queries", shape=[3, 5, 9], dtype="float32")
keys = fluid.data(name="keys", shape=[3, 6, 9], dtype="float32")
values = fluid.data(name="values", shape=[3, 6, 10], dtype="float32")
queries = paddle.static.data(name="queries", shape=[3, 5, 9], dtype="float32")
keys = paddle.static.data(name="keys", shape=[3, 6, 9], dtype="float32")
values = paddle.static.data(name="values", shape=[3, 6, 10], dtype="float32")
contexts = fluid.nets.scaled_dot_product_attention(queries, keys, values)
contexts.shape # [3, 5, 10]
"""
......
......@@ -2036,7 +2036,7 @@ class AdagradOptimizer(Optimizer):
paddle.enable_static()
np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)
inp = fluid.data(name="inp", shape=[2, 2])
inp = paddle.static.data(name="inp", shape=[2, 2], dtype="float32")
out = paddle.static.nn.fc(inp, size=3)
out = paddle.sum(out)
optimizer = fluid.optimizer.AdagradOptimizer(learning_rate=0.2)
......@@ -2228,8 +2228,8 @@ class AdamOptimizer(Optimizer):
place = fluid.CPUPlace()
main = fluid.Program()
with fluid.program_guard(main):
x = fluid.data(name='x', shape=[None, 13], dtype='float32')
y = fluid.data(name='y', shape=[None, 1], dtype='float32')
x = paddle.static.data(name='x', shape=[None, 13], dtype='float32')
y = paddle.static.data(name='y', shape=[None, 1], dtype='float32')
y_predict = paddle.static.nn.fc(x, size=1, activation=None)
cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y)
avg_cost = paddle.mean(cost)
......@@ -2257,8 +2257,8 @@ class AdamOptimizer(Optimizer):
place = fluid.CPUPlace()
main = fluid.Program()
with fluid.program_guard(main):
x = fluid.data(name='x', shape=[None, 13], dtype='float32')
y = fluid.data(name='y', shape=[None, 1], dtype='float32')
x = paddle.static.data(name='x', shape=[None, 13], dtype='float32')
y = paddle.static.data(name='y', shape=[None, 1], dtype='float32')
y_predict = paddle.static.nn.fc(x, size=1, activation=None)
cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y)
avg_cost = paddle.mean(cost)
......@@ -2292,8 +2292,8 @@ class AdamOptimizer(Optimizer):
div_res = global_step / decay_steps
decayed_beta1 = beta1_init * (decay_rate**div_res)
decayed_beta2 = beta2_init * (decay_rate**div_res)
fluid.layers.assign(decayed_beta1, beta1)
fluid.layers.assign(decayed_beta2, beta2)
paddle.assign(decayed_beta1, beta1)
paddle.assign(decayed_beta2, beta2)
return beta1, beta2, epsilon
......@@ -2651,7 +2651,7 @@ class AdamaxOptimizer(Optimizer):
train_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(train_program, startup_program):
data = fluid.data(name='X', shape=[None, 1], dtype='float32')
data = paddle.static.data(name='X', shape=[None, 1], dtype='float32')
hidden = paddle.static.nn.fc(x=data, size=10)
loss = paddle.mean(hidden)
adam = fluid.optimizer.AdamaxOptimizer(learning_rate=0.2)
......@@ -2994,7 +2994,7 @@ class DecayedAdagradOptimizer(Optimizer):
import paddle.fluid as fluid
paddle.enable_static()
x = fluid.data(name='x', shape=[None, 10], dtype='float32')
x = paddle.static.data(name='x', shape=[None, 10], dtype='float32')
trans = paddle.static.nn.fc(x, 100)
cost = paddle.mean(trans)
optimizer = fluid.optimizer.DecayedAdagradOptimizer(learning_rate=0.2)
......@@ -3118,7 +3118,7 @@ class AdadeltaOptimizer(Optimizer):
import paddle.fluid as fluid
paddle.enable_static()
image = fluid.data(name='image', shape=[None, 28], dtype='float32')
image = paddle.static.data(name='image', shape=[None, 28], dtype='float32')
fc = paddle.static.nn.fc(image, size=10)
cost = paddle.mean(fc)
optimizer = fluid.optimizer.Adadelta(
......@@ -3747,7 +3747,7 @@ class LambOptimizer(AdamOptimizer):
import paddle.fluid as fluid
paddle.enable_static()
data = fluid.data(name='x', shape=[-1, 5], dtype='float32')
data = paddle.static.data(name='x', shape=[-1, 5], dtype='float32')
hidden = paddle.static.nn.fc(x=data, size=10)
cost = paddle.mean(hidden)
......@@ -3964,7 +3964,7 @@ class ModelAverage(Optimizer):
startup_program = fluid.Program()
with fluid.program_guard(train_program, startup_program):
# build net
data = fluid.data(name='X', shape=[None, 1], dtype='float32')
data = paddle.static.data(name='X', shape=[None, 1], dtype='float32')
hidden = paddle.static.nn.fc(x=data, size=10)
loss = paddle.mean(hidden)
optimizer = fluid.optimizer.Momentum(learning_rate=0.2, momentum=0.1)
......@@ -4143,7 +4143,7 @@ class ModelAverage(Optimizer):
startup_program = fluid.Program()
with fluid.program_guard(train_program, startup_program):
# build net
data = fluid.data(name='X', shape=[None, 1], dtype='float32')
data = paddle.static.data(name='X', shape=[None, 1], dtype='float32')
hidden = paddle.static.nn.fc(x=data, size=10)
loss = paddle.mean(hidden)
optimizer = fluid.optimizer.Momentum(learning_rate=0.2, momentum=0.1)
......@@ -4199,7 +4199,7 @@ class ModelAverage(Optimizer):
startup_program = fluid.Program()
with fluid.program_guard(train_program, startup_program):
# build net
data = fluid.data(name='X', shape=[None, 1], dtype='float32')
data = paddle.static.data(name='X', shape=[None, 1], dtype='float32')
hidden = paddle.static.nn.fc(x=data, size=10)
loss = paddle.mean(hidden)
optimizer = fluid.optimizer.Momentum(learning_rate=0.2, momentum=0.1)
......
......@@ -84,10 +84,11 @@ def npu_profiler(output_file, config=None):
import paddle.fluid as fluid
import paddle.fluid.profiler as profiler
import numpy as np
import paddle
epoc = 8
dshape = [4, 3, 28, 28]
data = fluid.data(name='data', shape=[None, 3, 28, 28], dtype='float32')
data = paddle.static.data(name='data', shape=[None, 3, 28, 28], dtype='float32')
conv = paddle.static.nn.conv2d(data, 20, 3, stride=[1, 1], padding=[1, 1])
place = fluid.NPUPlace(0)
......@@ -337,7 +338,7 @@ def profiler(
epoc = 8
dshape = [4, 3, 28, 28]
data = fluid.data(name='data', shape=[None, 3, 28, 28], dtype='float32')
data = paddle.static.data(name='data', shape=[None, 3, 28, 28], dtype='float32')
conv = paddle.static.nn.conv2d(data, 20, 3, stride=[1, 1], padding=[1, 1])
place = fluid.CPUPlace()
......
......@@ -655,7 +655,7 @@ class DataLoader:
Args:
feed_list (list(Tensor)|tuple(Tensor)): feed Tensor list.
The Tensors should be created by :code:`fluid.data()`.
The Tensors should be created by :code:`paddle.static.data()`.
capacity (int): capacity of the queue maintained in DataLoader.
The unit is batch number. Set larger capacity if your reader
is fast.
......@@ -1651,8 +1651,8 @@ class PyReader(DataLoaderBase):
yield fake_image, fake_label
return reader
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
image = paddle.static.data(name='image', shape=[None, 784, 784], dtype='float32')
label = paddle.static.data(name='label', shape=[None, 1], dtype='int64')
reader = fluid.io.PyReader(feed_list=[image, label],
capacity=4,
......@@ -1708,8 +1708,8 @@ class PyReader(DataLoaderBase):
yield fake_image, fake_label
return reader
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
image = paddle.static.data(name='image', shape=[None, 784, 784], dtype='float32')
label = paddle.static.data(name='label', shape=[None, 1], dtype='int64')
reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True, return_list=False)
user_defined_reader = reader_creator_random_image(784, 784)
......@@ -1800,7 +1800,7 @@ class PyReader(DataLoaderBase):
for i in range(5):
yield np.random.uniform(low=0, high=255, size=[784, 784]),
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
image = paddle.static.data(name='image', shape=[None, 784, 784], dtype='float32')
reader = fluid.io.PyReader(feed_list=[image], capacity=4, iterable=False)
reader.decorate_sample_list_generator(
paddle.batch(generator, batch_size=BATCH_SIZE))
......@@ -1837,7 +1837,7 @@ class PyReader(DataLoaderBase):
for i in range(5):
yield np.random.uniform(low=0, high=255, size=[784, 784]),
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
image = paddle.static.data(name='image', shape=[None, 784, 784], dtype='float32')
reader = fluid.io.PyReader(feed_list=[image], capacity=4, iterable=False)
reader.decorate_sample_list_generator(
paddle.batch(generator, batch_size=BATCH_SIZE))
......@@ -1908,8 +1908,8 @@ class PyReader(DataLoaderBase):
yield fake_image, fake_label
return generator
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
image = paddle.static.data(name='image', shape=[None, 784, 784], dtype='float32')
label = paddle.static.data(name='label', shape=[None, 1], dtype='int64')
reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True)
user_defined_generator = random_image_and_label_generator(784, 784)
......@@ -1975,8 +1975,8 @@ class PyReader(DataLoaderBase):
yield fake_image, fake_label
return generator
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
image = paddle.static.data(name='image', shape=[None, 784, 784], dtype='float32')
label = paddle.static.data(name='label', shape=[None, 1], dtype='int64')
reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True)
user_defined_generator = random_image_and_label_generator(784, 784)
......@@ -2043,8 +2043,8 @@ class PyReader(DataLoaderBase):
yield batch_image, batch_label
return generator
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
image = paddle.static.data(name='image', shape=[None, 784, 784], dtype='float32')
label = paddle.static.data(name='label', shape=[None, 1], dtype='int64')
reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True)
user_defined_generator = random_image_and_label_generator(784, 784)
......
......@@ -85,17 +85,19 @@ class TestGenerateProposals(LayerTest):
variances_np = np.ones((4, 4, 3, 4)).astype('float32')
with self.static_graph():
scores = fluid.data(
scores = paddle.static.data(
name='scores', shape=[2, 3, 4, 4], dtype='float32'
)
bbox_deltas = fluid.data(
bbox_deltas = paddle.static.data(
name='bbox_deltas', shape=[2, 12, 4, 4], dtype='float32'
)
im_info = fluid.data(name='im_info', shape=[2, 3], dtype='float32')
anchors = fluid.data(
im_info = paddle.static.data(
name='im_info', shape=[2, 3], dtype='float32'
)
anchors = paddle.static.data(
name='anchors', shape=[4, 4, 3, 4], dtype='float32'
)
variances = fluid.data(
variances = paddle.static.data(
name='var', shape=[4, 4, 3, 4], dtype='float32'
)
rois, roi_probs, rois_num = paddle.vision.ops.generate_proposals(
......@@ -175,8 +177,12 @@ class TestDistributeFpnProposals(LayerTest):
rois_np = np.random.rand(10, 4).astype('float32')
rois_num_np = np.array([4, 6]).astype('int32')
with self.static_graph():
rois = fluid.data(name='rois', shape=[10, 4], dtype='float32')
rois_num = fluid.data(name='rois_num', shape=[None], dtype='int32')
rois = paddle.static.data(
name='rois', shape=[10, 4], dtype='float32'
)
rois_num = paddle.static.data(
name='rois_num', shape=[None], dtype='int32'
)
(
multi_rois,
restore_ind,
......@@ -230,7 +236,7 @@ class TestDistributeFpnProposals(LayerTest):
def test_distribute_fpn_proposals_error(self):
program = Program()
with program_guard(program):
fpn_rois = fluid.data(
fpn_rois = paddle.static.data(
name='data_error', shape=[10, 4], dtype='int32', lod_level=1
)
self.assertRaises(
......
......@@ -31,10 +31,12 @@ class TestASPHelperPruningBase(unittest.TestCase):
self.startup_program = fluid.Program()
def build_model():
img = fluid.data(
img = paddle.static.data(
name='img', shape=[None, 3, 32, 32], dtype='float32'
)
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
label = paddle.static.data(
name='label', shape=[None, 1], dtype='int64'
)
hidden = paddle.static.nn.conv2d(
input=img, num_filters=4, filter_size=3, padding=2, act="relu"
)
......
......@@ -196,10 +196,12 @@ class TestASPStaticCustomerizedPruneFunc(unittest.TestCase):
self.customer_prefix = "customer_layer"
def build_model():
img = fluid.data(
img = paddle.static.data(
name='img', shape=[None, 3, 32, 32], dtype='float32'
)
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
label = paddle.static.data(
name='label', shape=[None, 1], dtype='int64'
)
hidden = paddle.static.nn.conv2d(
input=img, num_filters=4, filter_size=3, padding=2, act="relu"
)
......
......@@ -31,10 +31,12 @@ class TestASPStaticOptimize(unittest.TestCase):
self.startup_program = fluid.Program()
def build_model():
img = fluid.data(
img = paddle.static.data(
name='img', shape=[None, 3, 24, 24], dtype='float32'
)
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
label = paddle.static.data(
name='label', shape=[None, 1], dtype='int64'
)
hidden = paddle.static.nn.conv2d(
input=img, num_filters=4, filter_size=3, padding=2, act="relu"
)
......
......@@ -31,10 +31,12 @@ class TestASPStaticPruningBase(unittest.TestCase):
self.startup_program = fluid.Program()
def build_model():
img = fluid.data(
img = paddle.static.data(
name='img', shape=[None, 3, 24, 24], dtype='float32'
)
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
label = paddle.static.data(
name='label', shape=[None, 1], dtype='int64'
)
hidden = paddle.static.nn.conv2d(
input=img, num_filters=2, filter_size=3, padding=2, act="relu"
)
......
......@@ -128,10 +128,12 @@ class TestASPStaticOptimize(unittest.TestCase):
self.startup_program = fluid.Program()
def build_model():
img = fluid.data(
img = paddle.static.data(
name='img', shape=[None, 3, 32, 32], dtype='float32'
)
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
label = paddle.static.data(
name='label', shape=[None, 1], dtype='int64'
)
hidden = paddle.static.nn.conv2d(
input=img, num_filters=4, filter_size=3, padding=2, act="relu"
)
......
......@@ -65,8 +65,12 @@ class AutoCheckpointBase(unittest.TestCase):
self, exe, main_prog, startup_prog, minimize=True, iterable=True
):
def simple_net():
image = fluid.data(name='image', shape=[-1, 4, 4], dtype='float32')
label = fluid.data(name='label', shape=[-1, 1], dtype='int64')
image = paddle.static.data(
name='image', shape=[-1, 4, 4], dtype='float32'
)
label = paddle.static.data(
name='label', shape=[-1, 1], dtype='int64'
)
fc_tmp = paddle.static.nn.fc(image, size=CLASS_NUM)
cross_entropy = paddle.nn.functional.softmax_with_cross_entropy(
......
......@@ -71,7 +71,7 @@ def create_model(data, rank):
class TestModelParallel(TestDistRunnerBase):
def get_model(self, batch_size=2, use_dgc=False, dist_strategy=None):
# Input data
data_in = fluid.data(
data_in = paddle.static.data(
name='data_in', shape=[batch_size, IN_SIZE], dtype=DTYPE
)
......
......@@ -75,7 +75,7 @@ def create_model(data, rank):
class TestModelParallel(TestDistRunnerBase):
def get_model(self, batch_size=2, use_dgc=False, dist_strategy=None):
# Input data
data_in = fluid.data(
data_in = paddle.static.data(
name='data_in', shape=[batch_size, IN_SIZE], dtype=DTYPE
)
......
......@@ -65,7 +65,7 @@ def create_model(data, rank):
class TestModelParallel(TestDistRunnerBase):
def get_model(self, batch_size=2, use_dgc=False, dist_strategy=None):
# Input data
data_in = fluid.data(
data_in = paddle.static.data(
name='data_in', shape=[batch_size, IN_SIZE], dtype=DTYPE
)
......
......@@ -35,8 +35,10 @@ class FleetTest(unittest.TestCase):
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
image = fluid.data(name='img', shape=[None, 28, 28], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
image = paddle.static.data(
name='img', shape=[None, 28, 28], dtype='float32'
)
label = paddle.static.data(name='label', shape=[None, 1], dtype='int64')
feeder = fluid.DataFeeder(
feed_list=[image, label], place=fluid.CPUPlace()
)
......
......@@ -103,13 +103,13 @@ class CategoricalTest(unittest.TestCase):
def init_static_data(self, batch_size, dims):
with fluid.program_guard(self.test_program):
self.logits_static = fluid.data(
self.logits_static = paddle.static.data(
name='logits', shape=self.logits_shape, dtype='float32'
)
self.other_logits_static = fluid.data(
self.other_logits_static = paddle.static.data(
name='other_logits', shape=self.logits_shape, dtype='float32'
)
self.value_static = fluid.data(
self.value_static = paddle.static.data(
name='value', shape=self.value_shape, dtype='int64'
)
......@@ -211,13 +211,13 @@ class CategoricalTest2(CategoricalTest):
def init_static_data(self, batch_size, dims):
with fluid.program_guard(self.test_program):
self.logits_static = fluid.data(
self.logits_static = paddle.static.data(
name='logits', shape=self.logits_shape, dtype='float64'
)
self.other_logits_static = fluid.data(
self.other_logits_static = paddle.static.data(
name='other_logits', shape=self.logits_shape, dtype='float64'
)
self.value_static = fluid.data(
self.value_static = paddle.static.data(
name='value', shape=self.value_shape, dtype='int64'
)
......@@ -234,7 +234,7 @@ class CategoricalTest3(CategoricalTest):
with fluid.program_guard(self.test_program):
self.logits_static = self.logits_np
self.other_logits_static = self.other_logits_np
self.value_static = fluid.data(
self.value_static = paddle.static.data(
name='value', shape=self.value_shape, dtype='int64'
)
......@@ -263,7 +263,7 @@ class CategoricalTest4(CategoricalTest):
with fluid.program_guard(self.test_program):
self.logits_static = self.logits_np
self.other_logits_static = self.other_logits_np
self.value_static = fluid.data(
self.value_static = paddle.static.data(
name='value', shape=self.value_shape, dtype='int64'
)
......@@ -344,7 +344,7 @@ class CategoricalTest8(CategoricalTest):
with fluid.program_guard(self.test_program):
self.logits_static = self.logits_np.tolist()
self.other_logits_static = self.other_logits_np.tolist()
self.value_static = fluid.data(
self.value_static = paddle.static.data(
name='value', shape=self.value_shape, dtype='int64'
)
......@@ -361,7 +361,7 @@ class CategoricalTest9(CategoricalTest):
with fluid.program_guard(self.test_program):
self.logits_static = tuple(self.logits_np.tolist())
self.other_logits_static = tuple(self.other_logits_np.tolist())
self.value_static = fluid.data(
self.value_static = paddle.static.data(
name='value', shape=self.value_shape, dtype='int64'
)
......
......@@ -108,7 +108,7 @@ def func_to_test5():
a = inner_int_func()
b = inner_bool_float_func(3)
c = inner_unknown_func(None)
d = paddle.fluid.data('x', [1, 2])
d = paddle.static.data('x', [1, 2])
result_var_type5 = {
......
......@@ -69,7 +69,7 @@ class TestCase1(TestBase):
class TestError(TestBase):
@IPUOpTest.static_graph
def build_model(self):
x = paddle.fluid.data('x', [-1, 3, 13], 'float32')
x = paddle.static.data('x', [-1, 3, 13], 'float32')
x_fill = paddle.full_like(x, **self.attrs)
out = paddle.add(x_fill, x_fill)
self.fetch_list = [out.name]
......
......@@ -26,7 +26,7 @@ class TestMKLDNNCpuBfloat16Pass(InferencePassTest):
def setUp(self):
self.init_data()
with fluid.program_guard(self.main_program, self.startup_program):
x = fluid.data(
x = paddle.static.data(
name='x', shape=[-1] + self.shape_x, dtype=self.d_type
)
......
......@@ -31,10 +31,10 @@ class ElementwiseActivationMkldnnFusePassTest(InferencePassTest):
def setUp(self):
self.set_params()
with fluid.program_guard(self.main_program, self.startup_program):
data_A = fluid.data(
data_A = paddle.static.data(
name="data_A", shape=[-1, 3, 100, 100], dtype="float32"
)
data_B = fluid.data(
data_B = paddle.static.data(
name="data_B", shape=[-1, 3, 100, 100], dtype="float32"
)
elt_out = self.operand(data_A, data_B)
......
......@@ -32,10 +32,10 @@ class TestMKLDNNMatmulFuseOp(InferencePassTest):
def make_network(self):
with fluid.program_guard(self.main_program, self.startup_program):
x = fluid.data(
x = paddle.static.data(
name='x', shape=[-1] + self.shape_x, dtype=self.d_type
)
y = fluid.data(
y = paddle.static.data(
name='y', shape=[-1] + self.shape_y, dtype=self.d_type
)
out = paddle.matmul(x, y)
......@@ -74,10 +74,10 @@ class TestMKLDNNMatmulOtherDimsFuseOp(TestMKLDNNMatmulFuseOp):
class TestMKLDNNMatmulOpNotFusedWrongTransposeAxis(TestMKLDNNMatmulFuseOp):
def make_network(self):
with fluid.program_guard(self.main_program, self.startup_program):
x = fluid.data(
x = paddle.static.data(
name='x', shape=[-1] + self.shape_x, dtype=self.d_type
)
y = fluid.data(
y = paddle.static.data(
name='y', shape=[-1] + self.shape_y, dtype=self.d_type
)
out = paddle.matmul(x, y)
......@@ -97,10 +97,10 @@ class TestMKLDNNMatmulOpNotFusedBreakPattern(TestMKLDNNMatmulFuseOp):
def make_network(self):
with fluid.program_guard(self.main_program, self.startup_program):
x = fluid.data(
x = paddle.static.data(
name='x', shape=[-1] + self.shape_x, dtype=self.d_type
)
y = fluid.data(
y = paddle.static.data(
name='y', shape=[-1] + self.shape_y, dtype=self.d_type
)
out = paddle.matmul(x, y)
......
......@@ -29,7 +29,7 @@ class TestReshapeTransposeMatmulV2OneDNNFusePass(InferencePassTest):
self.pass_name = 'reshape_transpose_matmul_mkldnn_fuse_pass'
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
data = paddle.static.data(
name="data", shape=self.data_shape, dtype="float32"
)
weight = paddle.create_parameter(
......
......@@ -37,7 +37,7 @@ class TensorRTSubgraphPassActivationTest(InferencePassTest):
def setUp(self):
self.setUpTensorRTParam()
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
data = paddle.static.data(
name="data", shape=[-1, 6, 32, 32], dtype="float32"
)
act_out = self.append_act(data)
......
......@@ -28,7 +28,7 @@ class TensorRTSubgraphPassConv3dTest(InferencePassTest):
self.init_params()
self.set_params()
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
data = paddle.static.data(
name="data", shape=[-1, 3, 6, 32, 32], dtype="float32"
)
conv_out = paddle.static.nn.conv3d(
......@@ -112,7 +112,7 @@ class DynamicShapeTensorRTSubgraphPassConv3dTest(InferencePassTest):
def setUp(self):
self.set_params()
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
data = paddle.static.data(
name="data", shape=[-1, 6, -1, -1, -1], dtype="float32"
)
conv_out = paddle.static.nn.conv3d(
......
......@@ -27,7 +27,7 @@ class TensorRTSubgraphPassConv3dTransposeTest(InferencePassTest):
def setUp(self):
self.set_params()
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
data = paddle.static.data(
name="data", shape=[-1, 4, 4, 32, 32], dtype="float32"
)
conv_out = paddle.static.nn.conv3d_transpose(
......@@ -94,7 +94,7 @@ class DynamicShapeTensorRTSubgraphPassConv3dTransposeTest(InferencePassTest):
def setUp(self):
self.set_params()
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
data = paddle.static.data(
name="data", shape=[-1, 6, -1, -1, -1], dtype="float32"
)
conv_out = paddle.static.nn.conv3d_transpose(
......
......@@ -30,7 +30,7 @@ class TensorRTSubgraphPassConvTest(InferencePassTest):
def setUp(self):
self.set_params()
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
data = paddle.static.data(
name="data", shape=[-1, 6, 64, 64], dtype="float32"
)
conv_out = paddle.static.nn.conv2d(
......@@ -108,7 +108,7 @@ class TensorRTSubgraphPassConvTransposeTest(InferencePassTest):
def setUp(self):
self.set_params()
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
data = paddle.static.data(
name="data", shape=[-1, 6, 64, 64], dtype="float32"
)
conv_out = paddle.static.nn.conv2d_transpose(
......@@ -207,7 +207,7 @@ class DynamicShapeTensorRTSubgraphPassConvTest(InferencePassTest):
def setUp(self):
self.set_params()
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
data = paddle.static.data(
name="data", shape=[-1, 6, -1, -1], dtype="float32"
)
conv_out = paddle.static.nn.conv2d(
......
......@@ -29,11 +29,13 @@ class QuantDequantTensorRTSubgraphPassConvTest(QuantDequantTest):
self.set_params()
def network():
self.data = fluid.data(
self.data = paddle.static.data(
name='data', shape=[1, 28, 28], dtype='float32'
)
data_reshape = paddle.reshape(self.data, shape=[1, 4, 14, 14])
self.label = fluid.data(name='label', shape=[1, 1], dtype='int64')
self.label = paddle.static.data(
name='label', shape=[1, 1], dtype='int64'
)
label_shape = paddle.reshape(self.label, shape=[1, 1, 1])
conv_out = paddle.static.nn.conv2d(
input=data_reshape,
......@@ -144,11 +146,13 @@ class DynamicShapeQuantDequantTensorRTSubgraphPassConvTest(QuantDequantTest):
self.set_params()
def network():
self.data = fluid.data(
self.data = paddle.static.data(
name='data', shape=[1, 28, 28], dtype='float32'
)
data_reshape = paddle.reshape(self.data, shape=[1, 4, 14, 14])
self.label = fluid.data(name='label', shape=[1, 1], dtype='int64')
self.label = paddle.static.data(
name='label', shape=[1, 1], dtype='int64'
)
label_shape = paddle.reshape(self.label, shape=[1, 1, 1])
conv_out = paddle.static.nn.conv2d(
input=data_reshape,
......@@ -243,11 +247,13 @@ class QuantDequantTensorRTSubgraphPassConvTransposeTest(QuantDequantTest):
self.set_params()
def network():
self.data = fluid.data(
self.data = paddle.static.data(
name='data', shape=[1, 28, 28], dtype='float32'
)
data_reshape = paddle.reshape(self.data, shape=[1, 4, 14, 14])
self.label = fluid.data(name='label', shape=[1, 1], dtype='int64')
self.label = paddle.static.data(
name='label', shape=[1, 1], dtype='int64'
)
label_shape = paddle.reshape(self.label, shape=[1, 1, 1])
conv_out = paddle.static.nn.conv2d_transpose(
input=data_reshape,
......
......@@ -30,13 +30,13 @@ class TRTDeformableConvTest(InferencePassTest):
def setUp(self):
self.set_params()
with fluid.program_guard(self.main_program, self.startup_program):
input = fluid.data(
input = paddle.static.data(
name='input', shape=self.input_size, dtype=self.dtype
)
offset = fluid.data(
offset = paddle.static.data(
name='offset', shape=self.offset_size, dtype=self.dtype
)
mask = fluid.data(
mask = paddle.static.data(
name='mask', shape=self.mask_size, dtype=self.dtype
)
......
......@@ -26,7 +26,7 @@ from paddle.fluid.core import AnalysisConfig
class TRTDynamicShapeTest(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
data = paddle.static.data(
name="data", shape=[-1, 3, 16, 16], dtype="float32"
)
out = paddle.static.nn.conv2d(
......
......@@ -29,10 +29,10 @@ from paddle.fluid.core import AnalysisConfig, PassVersionChecker
class TensorRTSubgraphPassElementwiseBroadcastTest(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data1 = fluid.data(
data1 = paddle.static.data(
name="data1", shape=[-1, 3, 64, 64], dtype="float32"
)
data2 = fluid.data(
data2 = paddle.static.data(
name="data2", shape=[-1, 3, 64, 1], dtype="float32"
)
eltwise_out = self.append_eltwise(data1, data2)
......
......@@ -26,7 +26,7 @@ from paddle.fluid.core import AnalysisConfig
class FCFusePassTRTTest(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
data = paddle.static.data(
name="data", shape=[32, 128, 2, 2], dtype="float32"
)
fc_out1 = paddle.static.nn.fc(
......@@ -56,7 +56,7 @@ class FCFusePassTRTTest(InferencePassTest):
class FCFusePassTRTStaticDims4Cols1Test(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
data = paddle.static.data(
name="data", shape=[32, 128, 32, 8], dtype="float32"
)
fc_out1 = paddle.static.nn.fc(
......@@ -84,7 +84,7 @@ class FCFusePassTRTStaticDims4Cols1Test(InferencePassTest):
class FCFusePassTRTStaticDims4Cols2Test(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
data = paddle.static.data(
name="data", shape=[3, 24, 16, 16], dtype="float32"
)
fc_out1 = paddle.static.nn.fc(
......@@ -112,7 +112,9 @@ class FCFusePassTRTStaticDims4Cols2Test(InferencePassTest):
class FCFusePassTRTDynamicDims2Test(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(name="data", shape=[32, 128], dtype="float32")
data = paddle.static.data(
name="data", shape=[32, 128], dtype="float32"
)
fc_out1 = paddle.static.nn.fc(
x=data, size=64, num_flatten_dims=1, activation="relu"
)
......@@ -144,7 +146,9 @@ class FCFusePassTRTDynamicDims2Test(InferencePassTest):
class FCFusePassTRTDynamicDims3Cols1Test(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(name="data", shape=[32, 128, 32], dtype="float32")
data = paddle.static.data(
name="data", shape=[32, 128, 32], dtype="float32"
)
fc_out1 = paddle.static.nn.fc(
x=data, size=64, num_flatten_dims=1, activation="relu"
)
......@@ -176,7 +180,9 @@ class FCFusePassTRTDynamicDims3Cols1Test(InferencePassTest):
class FCFusePassTRTDynamicDims3Cols2Test(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(name="data", shape=[32, 128, 32], dtype="float32")
data = paddle.static.data(
name="data", shape=[32, 128, 32], dtype="float32"
)
fc_out1 = paddle.static.nn.fc(
x=data, size=64, num_flatten_dims=2, activation="relu"
)
......@@ -208,7 +214,7 @@ class FCFusePassTRTDynamicDims3Cols2Test(InferencePassTest):
class FCFusePassTRTDynamicDims4Cols1Test(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
data = paddle.static.data(
name="data", shape=[32, 12, 4, 6], dtype="float32"
)
fc_out1 = paddle.static.nn.fc(
......@@ -244,7 +250,7 @@ class FCFusePassTRTDynamicDims4Cols1Test(InferencePassTest):
class FCFusePassTRTDynamicDims4Cols2Test(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
data = paddle.static.data(
name="data", shape=[32, 128, 32, 32], dtype="float32"
)
fc_out1 = paddle.static.nn.fc(
......@@ -280,7 +286,7 @@ class FCFusePassTRTDynamicDims4Cols2Test(InferencePassTest):
class FCFusePassTRTDynamicDims4Cols3Test(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
data = paddle.static.data(
name="data", shape=[32, 128, 32, 32], dtype="float32"
)
fc_out1 = paddle.static.nn.fc(
......
......@@ -27,10 +27,12 @@ from paddle.fluid.core import AnalysisConfig, PassVersionChecker
class FCQuantDequantFusePassTRTDims3Cols1Test(QuantDequantTest):
def setUp(self):
def network():
self.data = fluid.data(
self.data = paddle.static.data(
name='data', shape=[1, 28, 28], dtype='float32'
)
self.label = fluid.data(name='label', shape=[1, 1], dtype='int64')
self.label = paddle.static.data(
name='label', shape=[1, 1], dtype='int64'
)
fc_out = paddle.static.nn.fc(
x=self.data,
size=10,
......@@ -98,10 +100,12 @@ class FCQuantDequantFusePassTRTDims3Cols1Test(QuantDequantTest):
class FCQuantDequantFusePassTRTDims3Cols2Test(QuantDequantTest):
def setUp(self):
def network():
self.data = fluid.data(
self.data = paddle.static.data(
name='data', shape=[1, 28, 28], dtype='float32'
)
self.label = fluid.data(name='label', shape=[1, 1], dtype='int64')
self.label = paddle.static.data(
name='label', shape=[1, 1], dtype='int64'
)
fc_out = paddle.static.nn.fc(
x=self.data,
size=28,
......@@ -170,10 +174,12 @@ class FCQuantDequantFusePassTRTDims3Cols2Test(QuantDequantTest):
class FCQuantDequantFusePassTRTDims3Cols3Test(QuantDequantTest):
def setUp(self):
def network():
self.data = fluid.data(
self.data = paddle.static.data(
name='data', shape=[1, 28, 28], dtype='float32'
)
self.label = fluid.data(name='label', shape=[1, 1], dtype='int64')
self.label = paddle.static.data(
name='label', shape=[1, 1], dtype='int64'
)
label_shape = paddle.reshape(self.label, shape=[1, 1, 1])
reshape_out = paddle.reshape(self.data, shape=[1, 14, 14, 4])
fc_out = paddle.static.nn.fc(
......
......@@ -27,7 +27,7 @@ from paddle.fluid.core import AnalysisConfig, PassVersionChecker
class TRTFlattenTest(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
data = paddle.static.data(
name="data", shape=[-1, 6, 64, 64], dtype="float32"
)
flatten_out = self.append_flatten(data)
......@@ -56,7 +56,7 @@ class TRTFlattenTest(InferencePassTest):
class TRTFlattenDynamicTest(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
data = paddle.static.data(
name="data", shape=[-1, 6, 64, 64], dtype="float32"
)
flatten_out = self.append_flatten(data)
......
......@@ -27,8 +27,12 @@ from paddle.fluid.core import AnalysisConfig, PassVersionChecker
class TRTGatherNdTest(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(name="data", shape=[-1, 3, 4], dtype="float32")
index = fluid.data(name="index", shape=[-1, 2, 2], dtype="int32")
data = paddle.static.data(
name="data", shape=[-1, 3, 4], dtype="float32"
)
index = paddle.static.data(
name="index", shape=[-1, 2, 2], dtype="int32"
)
gather_nd = paddle.gather_nd(data, index)
out = nn.batch_norm(gather_nd, is_test=True)
......@@ -62,10 +66,12 @@ class TRTGatherNdTest(InferencePassTest):
class TRTGatherNdFp16Test(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
data = paddle.static.data(
name="data", shape=[-1, 1280, 192], dtype="float32"
)
index = fluid.data(name="index", shape=[-1, 1028, 2], dtype="int32")
index = paddle.static.data(
name="index", shape=[-1, 1028, 2], dtype="int32"
)
gather_nd = paddle.gather_nd(data, index)
out = nn.batch_norm(gather_nd, is_test=True)
......
......@@ -27,8 +27,12 @@ class TRTGatherTest1(InferencePassTest):
def setUp(self):
self.set_params()
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(name='data', shape=[-1, 128], dtype='float32')
index = fluid.data(name='index', shape=[-1, 1], dtype='int32')
data = paddle.static.data(
name='data', shape=[-1, 128], dtype='float32'
)
index = paddle.static.data(
name='index', shape=[-1, 1], dtype='int32'
)
scale_out = paddle.gather(data, index=index)
out = paddle.nn.functional.softmax(scale_out)
......@@ -66,8 +70,10 @@ class TRTGatherTest2(InferencePassTest):
def setUp(self):
self.set_params()
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(name='data', shape=[16, 64], dtype='float32')
index = fluid.data(name='index', shape=[2], dtype='int32')
data = paddle.static.data(
name='data', shape=[16, 64], dtype='float32'
)
index = paddle.static.data(name='index', shape=[2], dtype='int32')
scale_out = paddle.gather(data, index=index)
out = paddle.nn.functional.softmax(scale_out)
......
......@@ -29,7 +29,9 @@ class TensorRTInspectorTest(InferencePassTest):
def setUp(self):
self.set_params()
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(name="data", shape=[1, 16, 16], dtype="float32")
data = paddle.static.data(
name="data", shape=[1, 16, 16], dtype="float32"
)
matmul_out = paddle.matmul(
x=data,
y=data,
......
......@@ -20,6 +20,7 @@ import unittest
import numpy as np
from inference_pass_test import InferencePassTest
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.static.nn as nn
......@@ -43,7 +44,7 @@ class TRTInstanceNormTest(InferencePassTest):
with fluid.program_guard(self.main_program, self.startup_program):
shape = [-1, self.channel, self.height, self.width]
data = fluid.data(name='in', shape=shape, dtype='float32')
data = paddle.static.data(name='in', shape=shape, dtype='float32')
instance_norm_out = nn.instance_norm(data)
out = nn.batch_norm(instance_norm_out, is_test=True)
......
......@@ -28,7 +28,9 @@ class TensorRTMatMulDims2Test(InferencePassTest):
def setUp(self):
self.set_params()
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(name="data", shape=[24, 24], dtype="float32")
data = paddle.static.data(
name="data", shape=[24, 24], dtype="float32"
)
matmul_out = paddle.matmul(
x=data,
y=data,
......@@ -65,7 +67,7 @@ class TensorRTMatMulTest(InferencePassTest):
def setUp(self):
self.set_params()
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
data = paddle.static.data(
name="data", shape=[-1, 6, 24, 24], dtype="float32"
)
matmul_out = paddle.matmul(
......@@ -126,10 +128,12 @@ class TensorRTMatMulBroadcastTest(InferencePassTest):
self.set_params()
place = fluid.CPUPlace()
with fluid.program_guard(self.main_program, self.startup_program):
data_x = fluid.data(
data_x = paddle.static.data(
name="data_x", shape=[-1, 6, 24], dtype="float32"
)
data_y = fluid.data(name="data_y", shape=[24, 16], dtype="float32")
data_y = paddle.static.data(
name="data_y", shape=[24, 16], dtype="float32"
)
matmul_out = paddle.matmul(
x=data_x,
y=data_y,
......
......@@ -29,10 +29,12 @@ class TensorRTMatMulQuantDequantDims3Test(QuantDequantTest):
self.set_params()
def network():
self.data = fluid.data(
self.data = paddle.static.data(
name='data', shape=[1, 28, 28], dtype='float32'
)
self.label = fluid.data(name='label', shape=[1, 1], dtype='int64')
self.label = paddle.static.data(
name='label', shape=[1, 1], dtype='int64'
)
matmul_out = paddle.matmul(
x=self.data,
y=self.data,
......@@ -129,10 +131,12 @@ class TensorRTMatMulQuantDequantDims4Test(QuantDequantTest):
self.set_params()
def network():
self.data = fluid.data(
self.data = paddle.static.data(
name='data', shape=[1, 28, 28], dtype='float32'
)
self.label = fluid.data(name='label', shape=[1, 1], dtype='int64')
self.label = paddle.static.data(
name='label', shape=[1, 1], dtype='int64'
)
reshape_out = paddle.reshape(self.data, shape=[1, 4, 14, 14])
matmul_out = paddle.matmul(
x=reshape_out,
......@@ -231,10 +235,12 @@ class TensorRTMatMulQuantDequantDims3DynamicTest(QuantDequantTest):
self.set_params()
def network():
self.data = fluid.data(
self.data = paddle.static.data(
name='data', shape=[-1, 28, 28], dtype='float32'
)
self.label = fluid.data(name='label', shape=[1, 1], dtype='int64')
self.label = paddle.static.data(
name='label', shape=[1, 1], dtype='int64'
)
matmul_out = paddle.matmul(
x=self.data,
y=self.data,
......
......@@ -218,10 +218,10 @@ class TensorRTMultiClassNMS3Test(InferencePassTest):
def build(self):
with fluid.program_guard(self.main_program, self.startup_program):
boxes = fluid.data(
boxes = paddle.static.data(
name='bboxes', shape=[-1, self.num_boxes, 4], dtype='float32'
)
scores = fluid.data(
scores = paddle.static.data(
name='scores',
shape=[-1, self.num_classes, self.num_boxes],
dtype='float32',
......
......@@ -43,7 +43,7 @@ class TRTNearestInterpTest(InferencePassTest):
self.origin_shape[1],
self.channels,
]
data = fluid.data(name='data', shape=shape, dtype='float32')
data = paddle.static.data(name='data', shape=shape, dtype='float32')
resize_out = self.append_nearest_interp(data)
out = nn.batch_norm(resize_out, is_test=True)
......
......@@ -17,6 +17,7 @@ import unittest
import numpy as np
from inference_pass_test import InferencePassTest
import paddle
import paddle.fluid.core as core
import paddle.nn.functional as F
import paddle.static.nn as nn
......@@ -43,7 +44,7 @@ class TRTNearestInterpTest(InferencePassTest):
self.origin_shape[1],
self.channels,
]
data = fluid.data(name='data', shape=shape, dtype='float32')
data = paddle.static.data(name='data', shape=shape, dtype='float32')
resize_out = self.append_nearest_interp(data)
out = nn.batch_norm(resize_out, is_test=True)
......
......@@ -27,7 +27,7 @@ from paddle.fluid.core import AnalysisConfig
class PadOpTRTTest(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
data = paddle.static.data(
name="data", shape=[1, 3, 128, 128], dtype="float32"
)
pad_out = paddle.nn.functional.pad(
......
......@@ -58,7 +58,7 @@ class TensorRTPool3dTest(InferencePassTest):
)
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
data = paddle.static.data(
name='data',
shape=[-1, self.channel, self.depth, self.height, self.width],
dtype='float32',
......@@ -190,7 +190,7 @@ class TensorRTAdaptiveAvgPool3DTest(InferencePassTest):
)
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
data = paddle.static.data(
name='data',
shape=[-1, self.channel, self.depth, self.height, self.width],
dtype='float32',
......@@ -290,7 +290,7 @@ class TensorRTAdaptiveMaxPool3DTest(InferencePassTest):
)
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
data = paddle.static.data(
name='data',
shape=[-1, self.channel, self.depth, self.height, self.width],
dtype='float32',
......
......@@ -59,7 +59,7 @@ class TensorRTPoolTest(InferencePassTest):
)
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
data = paddle.static.data(
name='data',
shape=[-1, self.channel, self.height, self.width],
dtype='float32',
......
......@@ -27,7 +27,7 @@ from paddle.fluid.core import AnalysisConfig, PassVersionChecker
class TRTReduceSumTest(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
data = paddle.static.data(
name="data", shape=[-1, 3, 10, 192], dtype="float32"
)
reduce_sum = paddle.sum(data, axis=[2, -1], keepdim=True)
......@@ -60,7 +60,7 @@ class TRTReduceSumTest(InferencePassTest):
class TRTReduceSumAllTest(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
data = paddle.static.data(
name="data", shape=[-1, 3, 10, 192], dtype="float32"
)
reduce_sum = paddle.sum(data, keepdim=True)
......
......@@ -36,7 +36,7 @@ class TRTReshapeTest(InferencePassTest):
self.input_shape[2],
]
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
data = paddle.static.data(
name='data', shape=self.data_shape, dtype='float32'
)
reshape_out = self.append_reshape(data, self.reshape)
......@@ -74,7 +74,7 @@ class TRTReshapeTest1(TRTReshapeTest):
self.input_shape[2],
]
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
data = paddle.static.data(
name='data', shape=self.data_shape, dtype='float32'
)
reshape_out = self.append_reshape(data, self.reshape)
......@@ -101,7 +101,7 @@ class TRTReshapeTest2(TRTReshapeTest):
self.input_shape[2],
]
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
data = paddle.static.data(
name='data', shape=self.data_shape, dtype='float32'
)
reshape_out = paddle.reshape(x=data, shape=self.reshape)
......@@ -128,7 +128,7 @@ class TRTReshapeTest3(TRTReshapeTest):
self.input_shape[2],
]
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
data = paddle.static.data(
name='data', shape=self.data_shape, dtype='float32'
)
bn_out = nn.batch_norm(data, is_test=True)
......
......@@ -27,7 +27,9 @@ from paddle.fluid.core import AnalysisConfig, PassVersionChecker
class TRTScaleTest(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(name="data", shape=[-1, 512], dtype="float32")
data = paddle.static.data(
name="data", shape=[-1, 512], dtype="float32"
)
scale_out = self.append_scale(data)
out = nn.batch_norm(scale_out, is_test=True)
......@@ -57,7 +59,7 @@ class TRTScaleTest(InferencePassTest):
class TRTScaleShape2Test(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
data = paddle.static.data(
name="data", shape=[-1, 512, 512], dtype="float32"
)
scale_out = self.append_scale(data)
......
......@@ -26,7 +26,7 @@ from paddle.fluid.core import AnalysisConfig, PassVersionChecker
class ShuffleChannelFuseTRTPassTest(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
data = paddle.static.data(
name="data", shape=[-1, 6, 64, 64], dtype="float32"
)
reshape1 = paddle.reshape(x=data, shape=[-1, 2, 3, 64, 64])
......
......@@ -46,7 +46,9 @@ class SlicePluginTRTDynamicTest(InferencePassTest):
self.setUpSliceParams()
self.setUpTensorRTParams()
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(name="data", shape=[3, 3, 3, 3], dtype="float32")
data = paddle.static.data(
name="data", shape=[3, 3, 3, 3], dtype="float32"
)
axes = self.params_axes
starts = self.params_starts
ends = self.params_ends
......
......@@ -41,7 +41,9 @@ class SlicePluginTRTTest(InferencePassTest):
self.setUpSliceParams()
self.setUpTensorRTParams()
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(name="data", shape=[3, 3, 3, 3], dtype="float32")
data = paddle.static.data(
name="data", shape=[3, 3, 3, 3], dtype="float32"
)
axes = self.params_axes
starts = self.params_starts
ends = self.params_ends
......@@ -110,7 +112,9 @@ class SlicePluginTRTTestInt32(SlicePluginTRTTest):
self.setUpSliceParams()
self.setUpTensorRTParams()
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(name="data", shape=[3, 3, 3, 3], dtype="int32")
data = paddle.static.data(
name="data", shape=[3, 3, 3, 3], dtype="int32"
)
axes = self.params_axes
starts = self.params_starts
ends = self.params_ends
......@@ -135,7 +139,9 @@ class StaticSlicePluginTRTTestInt32(SlicePluginTRTTest):
self.setUpSliceParams()
self.setUpTensorRTParams()
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(name="data", shape=[3, 3, 3, 3], dtype="int32")
data = paddle.static.data(
name="data", shape=[3, 3, 3, 3], dtype="int32"
)
axes = self.params_axes
starts = self.params_starts
ends = self.params_ends
......
......@@ -28,7 +28,7 @@ from paddle.fluid.core import AnalysisConfig, PassVersionChecker
class TensorRTSubgraphPassFcTest(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
data = paddle.static.data(
name="data", shape=[-1, 6, 64, 64], dtype="float32"
)
fc_out = paddle.static.nn.fc(x=[data], activation=None, size=1000)
......@@ -55,10 +55,10 @@ class TensorRTSubgraphPassFcTest(InferencePassTest):
class TensorRTSubgraphPassConcatTest(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data1 = fluid.data(
data1 = paddle.static.data(
name="data1", shape=[-1, 3, 64, 64], dtype="float32"
)
data2 = fluid.data(
data2 = paddle.static.data(
name="data2", shape=[-1, 3, 64, 64], dtype="float32"
)
concat_out = paddle.concat([data1, data2], axis=2)
......@@ -85,7 +85,7 @@ class TensorRTSubgraphPassConcatTest(InferencePassTest):
class TensorRTSubgraphPassSplitTest(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
data = paddle.static.data(
name="data", shape=[-1, 3, 64, 64], dtype="float32"
)
split_out = paddle.split(data, axis=-1, num_or_sections=2)
......@@ -111,7 +111,7 @@ class TensorRTSubgraphPassSplitTest(InferencePassTest):
class TensorRTSubgraphPassSplitSerializeTest(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
data = paddle.static.data(
name="data", shape=[-1, 3, 64, 64], dtype="float32"
)
split_out = paddle.split(data, axis=-1, num_or_sections=2)
......@@ -139,7 +139,7 @@ class TensorRTSubgraphPassSplitSerializeTest(InferencePassTest):
class TensorRTSubgraphPassDynamicSplitFp16SerializeTest(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
data = paddle.static.data(
name="data", shape=[-1, 3, 64, 64], dtype="float32"
)
split_out = paddle.split(data, axis=-1, num_or_sections=2)
......@@ -175,7 +175,7 @@ class TensorRTSubgraphPassDynamicSplitFp16SerializeTest(InferencePassTest):
class TensorRTSubgraphPassInstanceNormTest(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
data = paddle.static.data(
name="data", shape=[-1, 3, 64, 64], dtype="float32"
)
param_attr = fluid.ParamAttr(
......@@ -212,7 +212,7 @@ class TensorRTSubgraphPassInstanceNormTest(InferencePassTest):
class TensorRTSubgraphPassTransposeTest(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
data = paddle.static.data(
name="data", shape=[-1, 6, 64, 64], dtype="float32"
)
transpose_out = self.append_transpose(data)
......@@ -242,7 +242,7 @@ class TensorRTSubgraphPassLayerNormTest(InferencePassTest):
def setUp(self):
self.set_params()
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
data = paddle.static.data(
name="data", shape=[-1, 3, 64, 64], dtype="float32"
)
out = paddle.static.nn.layer_norm(
......@@ -273,7 +273,7 @@ class TensorRTSubgraphPassLayerNormDynamicTest(InferencePassTest):
def setUp(self):
self.set_params()
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
data = paddle.static.data(
name="data", shape=[-1, 3, 64, 64], dtype="float32"
)
out = paddle.static.nn.layer_norm(
......@@ -359,10 +359,10 @@ class TensorRTSubgraphPassLayerNormBeginNormAxis3Test(
class TensorRTSubgraphPassElementwiseTest(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data1 = fluid.data(
data1 = paddle.static.data(
name="data1", shape=[-1, 3, 64, 64], dtype="float32"
)
data2 = fluid.data(
data2 = paddle.static.data(
name="data2", shape=[-1, 3, 64, 64], dtype="float32"
)
eltwise_out = self.append_eltwise(data1, data2)
......@@ -414,10 +414,12 @@ class TensorRTSubgraphPassElementwiseSerializeTest(
class TensorRTSubgraphPassElementwiseBroadcastDynamicTest(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data1 = fluid.data(
data1 = paddle.static.data(
name="data1", shape=[-1, 3, 64, 64], dtype="float32"
)
data2 = fluid.data(name="data2", shape=[64, 64], dtype="float32")
data2 = paddle.static.data(
name="data2", shape=[64, 64], dtype="float32"
)
eltwise_out = self.append_eltwise(data1, data2)
out = nn.batch_norm(eltwise_out, is_test=True)
self.feeds = {
......
......@@ -26,7 +26,7 @@ from paddle.fluid.core import AnalysisConfig, PassVersionChecker
class TRTTileTest(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
data = paddle.static.data(
name="data", shape=[4, 3, 224, 256], dtype="float32"
)
tile_out = paddle.tile(x=data, repeat_times=[1, 1, 1, 1])
......@@ -53,7 +53,9 @@ class TRTTileTest(InferencePassTest):
class TRTTileExpandTest(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(name="data", shape=[1, 1, 1, 1], dtype="float32")
data = paddle.static.data(
name="data", shape=[1, 1, 1, 1], dtype="float32"
)
tile_out = paddle.tile(x=data, repeat_times=[1, 4, 1080, 1920])
out = paddle.static.nn.batch_norm(tile_out, is_test=True)
......@@ -78,7 +80,9 @@ class TRTTileExpandTest(InferencePassTest):
class TRTTileExpandStaticTest(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(name="data", shape=[1, 1, 1, 1], dtype="float32")
data = paddle.static.data(
name="data", shape=[1, 1, 1, 1], dtype="float32"
)
tile_out = paddle.tile(x=data, repeat_times=[1, 4, 1080, 1920])
out = paddle.static.nn.batch_norm(tile_out, is_test=True)
......@@ -103,7 +107,9 @@ class TRTTileExpandStaticTest(InferencePassTest):
class TRTTileExpandHalfTest(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(name="data", shape=[1, 1, 1, 1], dtype="float32")
data = paddle.static.data(
name="data", shape=[1, 1, 1, 1], dtype="float32"
)
tile_out = paddle.tile(x=data, repeat_times=[1, 4, 1080, 1920])
out = paddle.static.nn.batch_norm(tile_out, is_test=True)
......
......@@ -26,10 +26,10 @@ from paddle.fluid.core import AnalysisConfig
class TransposeFlattenConcatFusePassTRTTest(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data1 = fluid.data(
data1 = paddle.static.data(
name="data1", shape=[8, 32, 128], dtype="float32"
)
data2 = fluid.data(
data2 = paddle.static.data(
name="data2", shape=[8, 32, 128], dtype="float32"
)
......
......@@ -31,7 +31,7 @@ class TRTTunedDynamicShapeTest(unittest.TestCase):
main_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(main_program, startup_program):
data = fluid.data(
data = paddle.static.data(
name="data", shape=[-1, 6, 64, 64], dtype="float32"
)
conv_out = paddle.static.nn.conv2d(
......
......@@ -27,8 +27,10 @@ class TRTYoloBoxTest(InferencePassTest):
self.set_params()
with fluid.program_guard(self.main_program, self.startup_program):
image_shape = [self.bs, self.channel, self.height, self.width]
image = fluid.data(name='image', shape=image_shape, dtype='float32')
image_size = fluid.data(
image = paddle.static.data(
name='image', shape=image_shape, dtype='float32'
)
image_size = paddle.static.data(
name='image_size', shape=[self.bs, 2], dtype='int32'
)
boxes, scores = self.append_yolobox(image, image_size)
......@@ -79,8 +81,10 @@ class TRTYoloBoxFP16Test(InferencePassTest):
self.set_params()
with fluid.program_guard(self.main_program, self.startup_program):
image_shape = [self.bs, self.channel, self.height, self.width]
image = fluid.data(name='image', shape=image_shape, dtype='float32')
image_size = fluid.data(
image = paddle.static.data(
name='image', shape=image_shape, dtype='float32'
)
image_size = paddle.static.data(
name='image_size', shape=[self.bs, 2], dtype='int32'
)
boxes, scores = self.append_yolobox(image, image_size)
......@@ -129,8 +133,10 @@ class TRTYoloBoxIoUAwareTest(InferencePassTest):
self.set_params()
with fluid.program_guard(self.main_program, self.startup_program):
image_shape = [self.bs, self.channel, self.height, self.width]
image = fluid.data(name='image', shape=image_shape, dtype='float32')
image_size = fluid.data(
image = paddle.static.data(
name='image', shape=image_shape, dtype='float32'
)
image_size = paddle.static.data(
name='image_size', shape=[self.bs, 2], dtype='int32'
)
boxes, scores = self.append_yolobox(image, image_size)
......
......@@ -25,7 +25,7 @@ import paddle.fluid.core as core
class FCFusePassTest(PassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
data = paddle.static.data(
name="data", shape=[32, 128], dtype="float32", lod_level=0
)
tmp_0 = paddle.static.nn.fc(
......
......@@ -27,7 +27,7 @@ class FusionGroupPassTest(PassTest):
with fluid.program_guard(self.main_program, self.startup_program):
self.feed_vars = self._prepare_feed_vars([32, 128], dtype, 2)
self.feed_vars.append(
fluid.data(name="data2", shape=[128, 128], dtype=dtype)
paddle.static.data(name="data2", shape=[128, 128], dtype=dtype)
)
# subgraph with only 1 op node
......@@ -51,7 +51,9 @@ class FusionGroupPassTest(PassTest):
def _prepare_feed_vars(self, shape, dtype, num_data, stop_gradient=True):
feed_vars = []
for i in range(num_data):
var = fluid.data(name=("data" + str(i)), shape=shape, dtype=dtype)
var = paddle.static.data(
name=("data" + str(i)), shape=shape, dtype=dtype
)
var.stop_gradient = stop_gradient
feed_vars.append(var)
return feed_vars
......@@ -108,7 +110,7 @@ class FusionGroupPassInplaceTest(FusionGroupPassTest):
with fluid.program_guard(self.main_program, self.startup_program):
self.feed_vars = self._prepare_feed_vars([32, 128], dtype, 3)
self.feed_vars.append(
fluid.data(name="data3", shape=[128, 32], dtype=dtype)
paddle.static.data(name="data3", shape=[128, 32], dtype=dtype)
)
# subgraph with 3 op node
......@@ -134,7 +136,7 @@ class FusionGroupPassTestCastAndFP16(FusionGroupPassTest):
with fluid.program_guard(self.main_program, self.startup_program):
self.feed_vars = self._prepare_feed_vars([32, 128], dtype, 2)
self.feed_vars.append(
fluid.data(name="data2", shape=[128, 128], dtype=dtype)
paddle.static.data(name="data2", shape=[128, 128], dtype=dtype)
)
# subgraph with 2 op nodes
......@@ -165,7 +167,7 @@ class FusionGroupPassSumTest(FusionGroupPassTest):
with fluid.program_guard(self.main_program, self.startup_program):
self.feed_vars = self._prepare_feed_vars([32, 128], dtype, 3)
self.feed_vars.append(
fluid.data(name="data3", shape=[128, 128], dtype=dtype)
paddle.static.data(name="data3", shape=[128, 128], dtype=dtype)
)
# subgraph with 2 op nodes
......
......@@ -25,10 +25,10 @@ class SkipLayerNormFusePassTest(PassTest):
def setUp(self):
paddle.enable_static()
with fluid.program_guard(self.main_program, self.startup_program):
x = fluid.data(
x = paddle.static.data(
name="x", shape=[128, 768], dtype="float32", lod_level=0
)
y = fluid.data(
y = paddle.static.data(
name="y", shape=[128, 768], dtype="float32", lod_level=0
)
elementwise_out = paddle.add(x=x, y=y)
......
......@@ -803,7 +803,7 @@ class TestDygraphBatchNormTrainableStats(unittest.TestCase):
is_test=is_test,
trainable_statistics=trainable_statistics,
)
x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype)
x = paddle.static.data(name='x', shape=x_np.shape, dtype=x_np.dtype)
y = bn(x)
exe.run(fluid.default_startup_program())
r = exe.run(feed={'x': x_np}, fetch_list=[y])[0]
......@@ -820,7 +820,7 @@ class TestDygraphBatchNormOpenReserveSpace(unittest.TestCase):
with program_guard(Program(), Program()):
paddle.enable_static()
x = np.random.random(size=(3, 10, 3, 7)).astype('float32')
x = fluid.data(name='x', shape=x.shape, dtype=x.dtype)
x = paddle.static.data(name='x', shape=x.shape, dtype=x.dtype)
# Set this FLAG, the BatchNorm API will pass "reserve_space" argument into batch_norm op.
os.environ['FLAGS_cudnn_batchnorm_spatial_persistent'] = '1'
batch_norm = paddle.nn.BatchNorm(7, data_layout="NHWC")
......
......@@ -157,7 +157,7 @@ class TestBatchNorm(unittest.TestCase):
is_test=is_test,
trainable_statistics=trainable_statistics,
)
x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype)
x = paddle.static.data(name='x', shape=x_np.shape, dtype=x_np.dtype)
y = bn(x)
exe.run(fluid.default_startup_program())
r = exe.run(feed={'x': x_np}, fetch_list=[y])[0]
......@@ -166,7 +166,7 @@ class TestBatchNorm(unittest.TestCase):
def compute_v2(x_np):
with program_guard(Program(), Program()):
bn = paddle.nn.BatchNorm2D(shape[1])
x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype)
x = paddle.static.data(name='x', shape=x_np.shape, dtype=x_np.dtype)
y = bn(x)
exe.run(fluid.default_startup_program())
r = exe.run(feed={'x': x_np}, fetch_list=[y])[0]
......
......@@ -30,14 +30,14 @@ def test_static_layer(
prog = paddle.static.Program()
startup_prog = paddle.static.Program()
with paddle.static.program_guard(prog, startup_prog):
input = paddle.fluid.data(
input = paddle.static.data(
name='input', shape=input_np.shape, dtype='float32'
)
label = paddle.fluid.data(
label = paddle.static.data(
name='label', shape=label_np.shape, dtype='float32'
)
if weight_np is not None:
weight = paddle.fluid.data(
weight = paddle.static.data(
name='weight', shape=weight_np.shape, dtype='float32'
)
bce_loss = paddle.nn.loss.BCELoss(
......@@ -63,14 +63,14 @@ def test_static_functional(
prog = paddle.static.Program()
startup_prog = paddle.static.Program()
with paddle.static.program_guard(prog, startup_prog):
input = paddle.fluid.data(
input = paddle.static.data(
name='input', shape=input_np.shape, dtype='float32'
)
label = paddle.fluid.data(
label = paddle.static.data(
name='label', shape=label_np.shape, dtype='float32'
)
if weight_np is not None:
weight = paddle.fluid.data(
weight = paddle.static.data(
name='weight', shape=weight_np.shape, dtype='float32'
)
res = paddle.nn.functional.binary_cross_entropy(
......
......@@ -41,10 +41,10 @@ def test_static(
prog = paddle.static.Program()
startup_prog = paddle.static.Program()
with paddle.static.program_guard(prog, startup_prog):
logit = paddle.fluid.data(
logit = paddle.static.data(
name='logit', shape=logit_np.shape, dtype='float32'
)
label = paddle.fluid.data(
label = paddle.static.data(
name='label', shape=label_np.shape, dtype='float32'
)
feed_dict = {"logit": logit_np, "label": label_np}
......@@ -52,12 +52,12 @@ def test_static(
pos_weight = None
weight = None
if pos_weight_np is not None:
pos_weight = paddle.fluid.data(
pos_weight = paddle.static.data(
name='pos_weight', shape=pos_weight_np.shape, dtype='float32'
)
feed_dict["pos_weight"] = pos_weight_np
if weight_np is not None:
weight = paddle.fluid.data(
weight = paddle.static.data(
name='weight', shape=weight_np.shape, dtype='float32'
)
feed_dict["weight"] = weight_np
......
......@@ -224,7 +224,7 @@ class TestDropoutAPI(unittest.TestCase):
def check_static_result(self, place):
with fluid.program_guard(fluid.Program(), fluid.Program()):
input = fluid.data(name="input", shape=[40, 40], dtype="float32")
input = paddle.static.data(name="input", shape=[40, 40], dtype="float32")
res1 = paddle.nn.functional.dropout(
x=input, p=0.0, training=False, mode='upscale_in_train'
)
......
......@@ -402,8 +402,8 @@ class TestAddApi(unittest.TestCase):
def test_name(self):
with fluid.program_guard(fluid.Program()):
x = fluid.data(name="x", shape=[2, 3], dtype="float32")
y = fluid.data(name='y', shape=[2, 3], dtype='float32')
x = paddle.static.data(name="x", shape=[2, 3], dtype="float32")
y = paddle.static.data(name='y', shape=[2, 3], dtype='float32')
y_1 = self._executed_api(x, y, name='add_res')
self.assertEqual(('add_res' in y_1.name), True)
......@@ -417,8 +417,8 @@ class TestAddApi(unittest.TestCase):
"y": np.array([1, 5, 2]).astype('float32'),
}
x = fluid.data(name="x", shape=[3], dtype='float32')
y = fluid.data(name="y", shape=[3], dtype='float32')
x = paddle.static.data(name="x", shape=[3], dtype='float32')
y = paddle.static.data(name="y", shape=[3], dtype='float32')
z = self._executed_api(x, y)
place = fluid.MLUPlace(0)
......
......@@ -271,10 +271,10 @@ class TestFillConstantAPI(unittest.TestCase):
positive_2_int32 = paddle.tensor.fill_constant([1], "int32", 2)
positive_2_int64 = paddle.tensor.fill_constant([1], "int64", 2)
shape_tensor_int32 = fluid.data(
shape_tensor_int32 = paddle.static.data(
name="shape_tensor_int32", shape=[2], dtype="int32"
)
shape_tensor_int64 = fluid.data(
shape_tensor_int64 = paddle.static.data(
name="shape_tensor_int64", shape=[2], dtype="int64"
)
......@@ -446,7 +446,7 @@ class TestFillConstantOpError(unittest.TestCase):
# The shape dtype of fill_constant_op must be int32 or int64.
def test_shape_tensor_dtype():
shape = fluid.data(
shape = paddle.static.data(
name="shape_tensor", shape=[2], dtype="float32"
)
paddle.tensor.fill_constant(
......@@ -456,7 +456,7 @@ class TestFillConstantOpError(unittest.TestCase):
self.assertRaises(TypeError, test_shape_tensor_dtype)
def test_shape_tensor_list_dtype():
shape = fluid.data(
shape = paddle.static.data(
name="shape_tensor_list", shape=[1], dtype="bool"
)
paddle.tensor.fill_constant(
......
......@@ -129,10 +129,10 @@ class TestGathertError(unittest.TestCase):
):
shape = [8, 9, 6]
x = paddle.fluid.data(shape=shape, dtype='int8', name='x')
axis = paddle.fluid.data(shape=[1], dtype='float32', name='axis')
index = paddle.fluid.data(shape=shape, dtype='int32', name='index')
index_float = paddle.fluid.data(
x = paddle.static.data(shape=shape, dtype='int8', name='x')
axis = paddle.static.data(shape=[1], dtype='float32', name='axis')
index = paddle.static.data(shape=shape, dtype='int32', name='index')
index_float = paddle.static.data(
shape=shape, dtype='float32', name='index_float'
)
......@@ -160,9 +160,9 @@ class TestGathertError(unittest.TestCase):
with fluid.program_guard(fluid.Program(), fluid.Program()):
shape = [8, 9, 6]
x = fluid.data(shape=shape, dtype='int8', name='x')
index = fluid.data(shape=shape, dtype='int32', name='mask')
index_float = fluid.data(
x = paddle.static.data(shape=shape, dtype='int8', name='x')
index = paddle.static.data(shape=shape, dtype='int32', name='mask')
index_float = paddle.static.data(
shape=shape, dtype='float32', name='index_float'
)
......
......@@ -161,7 +161,7 @@ class TestHardsigmoidAPI(unittest.TestCase):
def test_fluid_api(self):
paddle.enable_static()
with fluid.program_guard(fluid.Program()):
x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
out = paddle.nn.functional.hardsigmoid(x)
exe = fluid.Executor(self.place)
res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
......@@ -179,12 +179,12 @@ class TestHardsigmoidAPI(unittest.TestCase):
# The input type must be Variable.
self.assertRaises(TypeError, F.hardsigmoid, 1)
# The input dtype must be float16, float32, float64.
x_int32 = paddle.fluid.data(
x_int32 = paddle.static.data(
name='x_int32', shape=[12, 10], dtype='int32'
)
self.assertRaises(TypeError, F.hardsigmoid, x_int32)
# support the input dtype is float16
x_fp16 = paddle.fluid.data(
x_fp16 = paddle.static.data(
name='x_fp16', shape=[12, 10], dtype='float16'
)
F.hardsigmoid(x_fp16)
......
......@@ -140,7 +140,7 @@ class TestNNLogSoftmaxAPI(unittest.TestCase):
paddle.enable_static()
# test static api
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.fluid.data(name='x', shape=self.x_shape)
x = paddle.static.data(name='x', shape=self.x_shape)
y = logsoftmax(x)
exe = paddle.static.Executor(self.place)
out = exe.run(feed={'x': self.x}, fetch_list=[y])
......@@ -174,7 +174,7 @@ class TestNNFunctionalLogSoftmaxAPI(unittest.TestCase):
x = x.astype(dtype)
ref_out = np.apply_along_axis(ref_log_softmax, axis, x)
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.fluid.data(name='x', shape=self.x_shape)
x = paddle.static.data(name='x', shape=self.x_shape)
y = F.log_softmax(x, axis, dtype)
exe = paddle.static.Executor(self.place)
out = exe.run(feed={'x': self.x}, fetch_list=[y])
......@@ -194,10 +194,10 @@ class TestNNFunctionalLogSoftmaxAPI(unittest.TestCase):
def test_errors(self):
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.fluid.data(name='X1', shape=[100], dtype='int32')
x = paddle.static.data(name='X1', shape=[100], dtype='int32')
self.assertRaises(TypeError, F.log_softmax, x)
x = paddle.fluid.data(name='X2', shape=[100], dtype='float32')
x = paddle.static.data(name='X2', shape=[100], dtype='float32')
self.assertRaises(TypeError, F.log_softmax, x, dtype='int32')
paddle.disable_static()
......
......@@ -107,8 +107,8 @@ class TestMaskedSelectAPI(unittest.TestCase):
def test_static_mode(self):
shape = [8, 9, 6]
x = paddle.fluid.data(shape=shape, dtype='float32', name='x')
mask = paddle.fluid.data(shape=shape, dtype='bool', name='mask')
x = paddle.static.data(shape=shape, dtype='float32', name='x')
mask = paddle.static.data(shape=shape, dtype='bool', name='mask')
np_x = np.random.random(shape).astype('float32')
np_mask = np.array(np.random.randint(2, size=shape, dtype=bool))
......@@ -132,9 +132,9 @@ class TestMaskedSelectError(unittest.TestCase):
):
shape = [8, 9, 6]
x = paddle.fluid.data(shape=shape, dtype='float32', name='x')
mask = paddle.fluid.data(shape=shape, dtype='bool', name='mask')
mask_float = paddle.fluid.data(
x = paddle.static.data(shape=shape, dtype='float32', name='x')
mask = paddle.static.data(shape=shape, dtype='bool', name='mask')
mask_float = paddle.static.data(
shape=shape, dtype='float32', name='mask_float'
)
np_x = np.random.random(shape).astype('float32')
......
......@@ -350,8 +350,8 @@ class TestMatMulV2API(unittest.TestCase):
def check_static_result(self, place):
with fluid.program_guard(fluid.Program(), fluid.Program()):
input_x = fluid.data(name="input_x", shape=[4, 3], dtype="float32")
input_y = fluid.data(name="input_y", shape=[3, 4], dtype="float32")
input_x = paddle.static.data(name="input_x", shape=[4, 3], dtype="float32")
input_y = paddle.static.data(name="input_y", shape=[3, 4], dtype="float32")
result = paddle.matmul(input_x, input_y)
......
......@@ -68,8 +68,8 @@ class TestMeshgridOp2(TestMeshgridOp):
class TestMeshgridOp3(unittest.TestCase):
def test_api(self):
x = fluid.data(shape=[100], dtype='int32', name='x')
y = fluid.data(shape=[200], dtype='int32', name='y')
x = paddle.static.data(shape=[100], dtype='int32', name='x')
y = paddle.static.data(shape=[200], dtype='int32', name='y')
input_1 = np.random.randint(
0,
......@@ -104,8 +104,8 @@ class TestMeshgridOp3(unittest.TestCase):
class TestMeshgridOp4(unittest.TestCase):
def test_list_input(self):
x = fluid.data(shape=[100], dtype='int32', name='x')
y = fluid.data(shape=[200], dtype='int32', name='y')
x = paddle.static.data(shape=[100], dtype='int32', name='x')
y = paddle.static.data(shape=[200], dtype='int32', name='y')
input_1 = np.random.randint(
0,
......@@ -141,8 +141,8 @@ class TestMeshgridOp4(unittest.TestCase):
class TestMeshgridOp5(unittest.TestCase):
def test_tuple_input(self):
x = fluid.data(shape=[100], dtype='int32', name='x')
y = fluid.data(shape=[200], dtype='int32', name='y')
x = paddle.static.data(shape=[100], dtype='int32', name='x')
y = paddle.static.data(shape=[200], dtype='int32', name='y')
input_1 = np.random.randint(
0,
......
......@@ -127,9 +127,9 @@ class TestScatterAPI(unittest.TestCase):
def check_static_result(self, place):
with fluid.program_guard(fluid.Program(), fluid.Program()):
input = fluid.data(name="input", shape=[3, 2], dtype="float32")
index = fluid.data(name="index", shape=[4], dtype="int64")
updates = fluid.data(name="updates", shape=[4, 2], dtype="float32")
input = paddle.static.data(name="input", shape=[3, 2], dtype="float32")
index = paddle.static.data(name="index", shape=[4], dtype="int64")
updates = paddle.static.data(name="updates", shape=[4, 2], dtype="float32")
result = self.scatter(input, index, updates, False)
input_data = np.array([[1, 1], [2, 2], [3, 3]]).astype(np.float32)
......
......@@ -67,8 +67,8 @@ class TestSizeAPI(unittest.TestCase):
with fluid.program_guard(main_program, startup_program):
shape1 = [2, 1, 4, 5]
shape2 = [1, 4, 5]
x_1 = paddle.fluid.data(shape=shape1, dtype='int32', name='x_1')
x_2 = paddle.fluid.data(shape=shape2, dtype='int32', name='x_2')
x_1 = paddle.static.data(shape=shape1, dtype='int32', name='x_1')
x_2 = paddle.static.data(shape=shape2, dtype='int32', name='x_2')
input_1 = np.random.random(shape1).astype("int32")
input_2 = np.random.random(shape2).astype("int32")
out_1 = paddle.numel(x_1)
......
......@@ -132,7 +132,7 @@ class TestSoftmaxAPI(unittest.TestCase):
def test_static_check(self):
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.fluid.data('X', self.x_np.shape, 'float32')
x = paddle.static.data('X', self.x_np.shape, 'float32')
out1 = self.softmax(x)
m = paddle.nn.Softmax()
out2 = m(x)
......@@ -173,12 +173,12 @@ class TestSoftmaxAPI(unittest.TestCase):
# The input type must be Variable.
self.assertRaises(TypeError, self.softmax, 1)
# The input dtype must be float16, float32
x_int32 = paddle.fluid.data(
x_int32 = paddle.static.data(
name='x_int32', shape=[2, 3], dtype='int32'
)
self.assertRaises(TypeError, self.softmax, x_int32)
# support the input dtype is float16
x_fp16 = paddle.fluid.data(
x_fp16 = paddle.static.data(
name='x_fp16', shape=[2, 3], dtype='float16'
)
self.softmax(x_fp16)
......
......@@ -256,7 +256,7 @@ class TestTransposeApi(unittest.TestCase):
class TestTAPI(unittest.TestCase):
def test_out(self):
with fluid.program_guard(fluid.Program()):
data = fluid.data(shape=[10], dtype="float32", name="data")
data = paddle.static.data(shape=[10], dtype="float32", name="data")
data_t = paddle.t(data)
place = fluid.MLUPlace(0)
exe = fluid.Executor(place)
......@@ -266,7 +266,7 @@ class TestTAPI(unittest.TestCase):
self.assertEqual((result == expected_result).all(), True)
with fluid.program_guard(fluid.Program()):
data = fluid.data(shape=[10, 5], dtype="float32", name="data")
data = paddle.static.data(shape=[10, 5], dtype="float32", name="data")
data_t = paddle.t(data)
place = fluid.MLUPlace(0)
exe = fluid.Executor(place)
......@@ -276,7 +276,7 @@ class TestTAPI(unittest.TestCase):
self.assertEqual((result == expected_result).all(), True)
with fluid.program_guard(fluid.Program()):
data = fluid.data(shape=[1, 5], dtype="float32", name="data")
data = paddle.static.data(shape=[1, 5], dtype="float32", name="data")
data_t = paddle.t(data)
place = fluid.MLUPlace(0)
exe = fluid.Executor(place)
......@@ -311,7 +311,7 @@ class TestTAPI(unittest.TestCase):
def test_errors(self):
with fluid.program_guard(fluid.Program()):
x = fluid.data(name='x', shape=[10, 5, 3], dtype='float32')
x = paddle.static.data(name='x', shape=[10, 5, 3], dtype='float32')
def test_x_dimension_check():
paddle.t(x)
......
......@@ -81,7 +81,7 @@ def case_generator(op_type, Xshape, diagonal, expected):
def test_failure(self):
paddle.enable_static()
data = fluid.data(shape=Xshape, dtype='float64', name=cls_name)
data = paddle.static.data(shape=Xshape, dtype='float64', name=cls_name)
with self.assertRaisesRegex(
eval(expected.split(':')[-1]), errmsg[expected]
):
......@@ -146,7 +146,7 @@ class TestTrilTriuOpAPI(unittest.TestCase):
startup_prog = Program()
with program_guard(prog, startup_prog):
data = np.random.random([1, 9, 9, 4]).astype(dtype)
x = fluid.data(shape=[1, 9, -1, 4], dtype=dtype, name='x')
x = paddle.static.data(shape=[1, 9, -1, 4], dtype=dtype, name='x')
tril_out, triu_out = tensor.tril(x), tensor.triu(x)
place = fluid.MLUPlace(0)
......@@ -183,7 +183,7 @@ class TestTrilTriuOpAPI(unittest.TestCase):
startup_prog = Program()
with program_guard(prog, startup_prog):
data = np.random.random([1, 9, 9, 4]).astype(dtype)
x = fluid.data(shape=[1, 9, -1, 4], dtype=dtype, name='x')
x = paddle.static.data(shape=[1, 9, -1, 4], dtype=dtype, name='x')
triu_out = paddle.triu(x)
place = fluid.MLUPlace(0)
......
......@@ -588,7 +588,7 @@ class TestDygraphBatchNormTrainableStats(unittest.TestCase):
is_test=is_test,
trainable_statistics=trainable_statistics,
)
x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype)
x = paddle.static.data(name='x', shape=x_np.shape, dtype=x_np.dtype)
y = bn(x)
exe.run(fluid.default_startup_program())
r = exe.run(feed={'x': x_np}, fetch_list=[y])[0]
......
......@@ -30,14 +30,14 @@ def test_static_layer(
prog = paddle.static.Program()
startup_prog = paddle.static.Program()
with paddle.static.program_guard(prog, startup_prog):
input = paddle.fluid.data(
input = paddle.static.data(
name='input', shape=input_np.shape, dtype='float32'
)
label = paddle.fluid.data(
label = paddle.static.data(
name='label', shape=label_np.shape, dtype='float32'
)
if weight_np is not None:
weight = paddle.fluid.data(
weight = paddle.static.data(
name='weight', shape=weight_np.shape, dtype='float32'
)
bce_loss = paddle.nn.loss.BCELoss(
......@@ -63,14 +63,14 @@ def test_static_functional(
prog = paddle.static.Program()
startup_prog = paddle.static.Program()
with paddle.static.program_guard(prog, startup_prog):
input = paddle.fluid.data(
input = paddle.static.data(
name='input', shape=input_np.shape, dtype='float32'
)
label = paddle.fluid.data(
label = paddle.static.data(
name='label', shape=label_np.shape, dtype='float32'
)
if weight_np is not None:
weight = paddle.fluid.data(
weight = paddle.static.data(
name='weight', shape=weight_np.shape, dtype='float32'
)
res = paddle.nn.functional.binary_cross_entropy(
......
......@@ -137,9 +137,9 @@ class TestClipAPI(unittest.TestCase):
paddle.enable_static()
data_shape = [1, 9, 9, 4]
data = np.random.random(data_shape).astype('float32')
images = fluid.data(name='image', shape=data_shape, dtype='float32')
min = fluid.data(name='min', shape=[1], dtype='float32')
max = fluid.data(name='max', shape=[1], dtype='float32')
images = paddle.static.data(name='image', shape=data_shape, dtype='float32')
min = paddle.static.data(name='min', shape=[1], dtype='float32')
max = paddle.static.data(name='max', shape=[1], dtype='float32')
place = (
fluid.NPUPlace(0)
......@@ -203,8 +203,8 @@ class TestClipAPI(unittest.TestCase):
def test_errors(self):
paddle.enable_static()
x1 = fluid.data(name='x1', shape=[1], dtype="int16")
x2 = fluid.data(name='x2', shape=[1], dtype="int8")
x1 = paddle.static.data(name='x1', shape=[1], dtype="int16")
x2 = paddle.static.data(name='x2', shape=[1], dtype="int8")
self.assertRaises(TypeError, paddle.clip, x=x1, min=0.2, max=0.8)
self.assertRaises(TypeError, paddle.clip, x=x2, min=0.2, max=0.8)
paddle.disable_static()
......
......@@ -215,7 +215,7 @@ class TestDropoutAPI(unittest.TestCase):
def check_static_result(self, place):
with fluid.program_guard(fluid.Program(), fluid.Program()):
input = fluid.data(name="input", shape=[40, 40], dtype="float32")
input = paddle.static.data(name="input", shape=[40, 40], dtype="float32")
res1 = paddle.nn.functional.dropout(
x=input, p=0.0, training=False, mode='upscale_in_train'
)
......
......@@ -510,8 +510,8 @@ class TestAddApi(unittest.TestCase):
def test_name(self):
with fluid.program_guard(fluid.Program()):
x = fluid.data(name="x", shape=[2, 3], dtype="float32")
y = fluid.data(name='y', shape=[2, 3], dtype='float32')
x = paddle.static.data(name="x", shape=[2, 3], dtype="float32")
y = paddle.static.data(name='y', shape=[2, 3], dtype='float32')
y_1 = self._executed_api(x, y, name='add_res')
self.assertEqual(('add_res' in y_1.name), True)
......@@ -525,8 +525,8 @@ class TestAddApi(unittest.TestCase):
"y": np.array([1, 5, 2]).astype('float32'),
}
x = fluid.data(name="x", shape=[3], dtype='float32')
y = fluid.data(name="y", shape=[3], dtype='float32')
x = paddle.static.data(name="x", shape=[3], dtype='float32')
y = paddle.static.data(name="y", shape=[3], dtype='float32')
z = self._executed_api(x, y)
place = fluid.NPUPlace(0)
......
......@@ -144,8 +144,8 @@ class TestRemainderOp(unittest.TestCase):
def test_name(self):
paddle.set_device('npu:0')
with fluid.program_guard(fluid.Program()):
x = fluid.data(name="x", shape=[2, 3], dtype="int64")
y = fluid.data(name='y', shape=[2, 3], dtype='int64')
x = paddle.static.data(name="x", shape=[2, 3], dtype="int64")
y = paddle.static.data(name='y', shape=[2, 3], dtype='int64')
y_1 = paddle.remainder(x, y, name='div_res')
self.assertEqual(('div_res' in y_1.name), True)
......
......@@ -101,8 +101,8 @@ class API_TestGather(unittest.TestCase):
with paddle.static.program_guard(
paddle.static.Program(), paddle.static.Program()
):
x = paddle.fluid.data('x', shape=[-1, 2], dtype='float32')
index = paddle.fluid.data('index', shape=[-1, 1], dtype='int32')
x = paddle.static.data('x', shape=[-1, 2], dtype='float32')
index = paddle.static.data('index', shape=[-1, 1], dtype='int32')
out = paddle.gather(x, index)
place = paddle.NPUPlace(0)
exe = paddle.static.Executor(place)
......
......@@ -216,7 +216,7 @@ class TestGroupNormOpFP16_With_NHWC(TestGroupNormOp):
class TestGroupNormException(unittest.TestCase):
# data_layout is not NHWC or NCHW
def test_exception(self):
data = fluid.data(name='data', shape=[None, 3, 3, 4], dtype="float64")
data = paddle.static.data(name='data', shape=[None, 3, 3, 4], dtype="float64")
def attr_data_format():
out = paddle.static.nn.group_norm(
......
......@@ -122,7 +122,7 @@ class TestHardsigmoidAPI(unittest.TestCase):
def test_fluid_api(self):
with fluid.program_guard(fluid.Program()):
x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
out = paddle.nn.functional.hardsigmoid(x)
exe = fluid.Executor(self.place)
res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
......@@ -140,12 +140,12 @@ class TestHardsigmoidAPI(unittest.TestCase):
# The input type must be Variable.
self.assertRaises(TypeError, F.hardsigmoid, 1)
# The input dtype must be float16, float32, float64.
x_int32 = paddle.fluid.data(
x_int32 = paddle.static.data(
name='x_int32', shape=[12, 10], dtype='int32'
)
self.assertRaises(TypeError, F.hardsigmoid, x_int32)
# support the input dtype is float16
x_fp16 = paddle.fluid.data(
x_fp16 = paddle.static.data(
name='x_fp16', shape=[12, 10], dtype='float16'
)
F.hardsigmoid(x_fp16)
......
......@@ -160,8 +160,8 @@ class TestIndexSampleShape(unittest.TestCase):
low=0, high=x_shape[1], size=index_shape
).astype(index_type)
x = fluid.data(name='x', shape=[-1, 5], dtype='float32')
index = fluid.data(name='index', shape=[-1, 3], dtype='int32')
x = paddle.static.data(name='x', shape=[-1, 5], dtype='float32')
index = paddle.static.data(name='index', shape=[-1, 3], dtype='int32')
output = paddle.index_sample(x=x, index=index)
place = fluid.NPUPlace(0)
......
......@@ -61,7 +61,7 @@ class TestInstanceNorm(unittest.TestCase):
def compute_v1(x_np):
with program_guard(Program(), Program()):
ins = paddle.nn.InstanceNorm(shape[1])
x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype)
x = paddle.static.data(name='x', shape=x_np.shape, dtype=x_np.dtype)
y = ins(x)
exe.run(fluid.default_startup_program())
r = exe.run(feed={'x': x_np}, fetch_list=[y])[0]
......@@ -70,7 +70,7 @@ class TestInstanceNorm(unittest.TestCase):
def compute_v2(x_np):
with program_guard(Program(), Program()):
ins = paddle.nn.InstanceNorm2D(shape[1])
x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype)
x = paddle.static.data(name='x', shape=x_np.shape, dtype=x_np.dtype)
y = ins(x)
exe.run(fluid.default_startup_program())
r = exe.run(feed={'x': x_np}, fetch_list=[y])[0]
......
......@@ -137,8 +137,8 @@ class TestKLDivLossDygraph(unittest.TestCase):
self.run_kl_loss('none')
def test_kl_loss_static_api(self):
input = paddle.fluid.data(name='input', shape=[5, 20])
label = paddle.fluid.data(name='label', shape=[5, 20])
input = paddle.static.data(name='input', shape=[5, 20])
label = paddle.static.data(name='label', shape=[5, 20])
pred_loss = paddle.nn.functional.kl_div(input, label)
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册