diff --git a/paddle/fluid/framework/reader.h b/paddle/fluid/framework/reader.h index b2c48c5877dc71192ce2494d94e963295a94ca29..9215d701510a1070e48d3ce291ec8779dbfdf34f 100644 --- a/paddle/fluid/framework/reader.h +++ b/paddle/fluid/framework/reader.h @@ -92,8 +92,6 @@ class ReaderBase { std::vector var_types_; // Whether to check the shape and dtype of fed variables. - // For Backward compatibility, variables created by old API fluid.layers.data - // doesn't check shape but fluid.data checks. std::vector need_check_feed_; private: diff --git a/python/paddle/fluid/__init__.py b/python/paddle/fluid/__init__.py index db3fefcbaceb63152c596bf2c4da025e28279f30..51ed68d6cf0aa4febcc542c93d77c9a1fa0b6abf 100644 --- a/python/paddle/fluid/__init__.py +++ b/python/paddle/fluid/__init__.py @@ -46,8 +46,6 @@ from .data_feed_desc import * from . import dataset from .dataset import * -from .data import * - from . import trainer_desc from . import io @@ -117,7 +115,6 @@ __all__ = ( 'initializer', 'layers', 'contrib', - 'data', 'dygraph', 'enable_dygraph', 'disable_dygraph', diff --git a/python/paddle/fluid/contrib/layers/nn.py b/python/paddle/fluid/contrib/layers/nn.py index 5763b895becaf05dbd73613d6a5c4a66d9636325..92c95f8eac23af1391c9eabe52095ff39ceab2e7 100644 --- a/python/paddle/fluid/contrib/layers/nn.py +++ b/python/paddle/fluid/contrib/layers/nn.py @@ -567,8 +567,9 @@ def partial_concat(input, start_index=0, length=-1): Examples: .. code-block:: python import paddle.fluid as fluid - x = fluid.data(name="x", shape=[None,3], dtype="float32") - y = fluid.data(name="y", shape=[None,3], dtype="float32") + import paddle + x = paddle.randn(name="x", shape=[1,3], dtype="float32") + y = paddle.randn(name="y", shape=[1,3], dtype="float32") concat = fluid.contrib.layers.partial_concat( [x, y], start_index=0, length=2) """ @@ -629,9 +630,12 @@ def partial_sum(input, start_index=0, length=-1): import paddle.fluid.layers as layers import paddle.fluid as fluid import numpy as np - x = fluid.data(name="x", shape=[None, 3], dtype="float32") - y = fluid.data(name="y", shape=[None, 3], dtype="float32") - sum = layers.partial_sum([x,y], start_index=0, length=2) + import paddle + paddle.enable_static() + + x = paddle.static.data(name="x", shape=[2, 3], dtype="float32") + y = paddle.static.data(name="y", shape=[2, 3], dtype="float32") + sum = fluid.contrib.layers.partial_sum([x,y], start_index=0, length=2) place = fluid.CPUPlace() exe = fluid.Executor(place) xx = np.array([1,2,3,4,5,6]).reshape((2,3)).astype("float32") @@ -898,7 +902,7 @@ def tdm_child(x, node_nums, child_nums, param_attr=None, dtype='int32'): import paddle.fluid as fluid import numpy as np paddle.enable_static() - x = fluid.data(name="x", shape=[None, 1], dtype="int32", lod_level=1) + x = paddle.static.data(name="x", shape=[None, 1], dtype="int32", lod_level=1) tree_info = [[0,0,0,1,2], [0,1,0,3,4],[0,1,0,5,6], [0,2,1,0,0],[1,2,1,0,0],[2,2,2,0,0],[3,2,2,0,0]] @@ -1007,7 +1011,7 @@ def tdm_sampler( import paddle.fluid as fluid import numpy as np paddle.enable_static() - x = fluid.data(name="x", shape=[None, 1], dtype="int32", lod_level=1) + x = paddle.static.data(name="x", shape=[None, 1], dtype="int32", lod_level=1) travel_list = [[1, 3], [1, 4], [2, 5], [2, 6]] # leaf node's travel path, shape(leaf_node_num, layer_num) layer_list_flat = [[1], [2], [3], [4], [5], [6]] # shape(node_nums, 1) @@ -1197,18 +1201,17 @@ def rank_attention( Examples: .. code-block:: python import paddle.fluid as fluid - import numpy as np + import paddle + paddle.enable_static() - input = fluid.data(name="input", shape=[None, 2], dtype="float32") - rank_offset = fluid.data(name="rank_offset", shape=[None, 7], dtype="int32") + input = paddle.static.data(name="input", shape=[None, 2], dtype="float32") + rank_offset = paddle.static.data(name="rank_offset", shape=[None, 7], dtype="int32") out = fluid.contrib.layers.rank_attention(input=input, rank_offset=rank_offset, rank_param_shape=[18,3], rank_param_attr= - fluid.ParamAttr(learning_rate=1.0, - name="ubm_rank_param.w_0", - initializer= - fluid.initializer.Xavier(uniform=False)), + paddle.ParamAttr(learning_rate=1.0, + name="ubm_rank_param.w_0"), max_rank=3, max_size=0) """ @@ -1259,22 +1262,21 @@ def batch_fc(input, param_size, param_attr, bias_size, bias_attr, act=None): Examples: .. code-block:: python import paddle.fluid as fluid + import paddle - input = fluid.data(name="input", shape=[16, 2, 3], dtype="float32") + paddle.enable_static() + + input = paddle.static.data(name="input", shape=[16, 2, 3], dtype="float32") out = fluid.contrib.layers.batch_fc(input=input, param_size=[16, 3, 10], param_attr= - fluid.ParamAttr(learning_rate=1.0, - name="w_0", - initializer= - fluid.initializer.Xavier(uniform=False)), + paddle.ParamAttr(learning_rate=1.0, + name="w_0"), bias_size=[16, 10], bias_attr= - fluid.ParamAttr(learning_rate=1.0, - name="b_0", - initializer= - fluid.initializer.Xavier(uniform=False)), - act="relu") + paddle.ParamAttr(learning_rate=1.0, + name="b_0"), + act="relu") """ helper = LayerHelper("batch_fc", **locals()) @@ -1380,10 +1382,12 @@ def bilateral_slice(x, guide, grid, has_offset, name=None): .. code-block:: python import paddle.fluid as fluid + import paddle + paddle.enable_static() - x = fluid.data(name='x', shape=[None, 3, 101, 60], dtype='float32') - guide = fluid.data(name='guide', shape=[None, 101, 60], dtype='float32') - grid = fluid.data(name='grid', shape=[None, 12, 8, 10, 6], dtype='float32') + x = paddle.randn(name='x', shape=[1, 3, 101, 60], dtype='float32') + guide = paddle.randn(name='guide', shape=[1, 101, 60], dtype='float32') + grid = paddle.randn(name='grid', shape=[1, 12, 8, 10, 6], dtype='float32') # without offset output = fluid.contrib.bilateral_slice(x, guide, grid, has_offset=False) diff --git a/python/paddle/fluid/data.py b/python/paddle/fluid/data.py deleted file mode 100644 index 00173a29c28ac873992b06b578f947e80d33182f..0000000000000000000000000000000000000000 --- a/python/paddle/fluid/data.py +++ /dev/null @@ -1,126 +0,0 @@ -# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import numpy as np - -from paddle.fluid import core -from paddle.fluid.layer_helper import LayerHelper -from paddle.fluid.data_feeder import check_dtype, check_type -from ..utils import deprecated -from paddle.fluid.framework import static_only - -__all__ = ['data'] - - -@static_only -@deprecated(since="2.0.0", update_to="paddle.static.data") -def data(name, shape, dtype='float32', lod_level=0): - """ - **Data Layer** - - This function creates a variable on the global block. The global variable - can be accessed by all the following operators in the graph. The variable - is a placeholder that could be fed with input, such as Executor can feed - input into the variable. - - Note: - `paddle.fluid.layers.data` is deprecated. It will be removed in a - future version. Please use this `paddle.fluid.data`. - - The `paddle.fluid.layers.data` set shape and dtype at compile time but - does NOT check the shape or the dtype of fed data, this - `paddle.fluid.data` checks the shape and the dtype of data fed by - Executor or ParallelExecutor during run time. - - To feed variable size inputs, users can set None or -1 on the variable - dimension when using :code:`paddle.fluid.data`, or feed variable size - inputs directly to :code:`paddle.fluid.layers.data` and PaddlePaddle - will fit the size accordingly. - - The default :code:`stop_gradient` attribute of the Variable created by - this API is true, which means the gradient won't be passed backward - through the data Variable. Set :code:`var.stop_gradient = False` If - user would like to pass backward gradient. - - Args: - name (str): The name/alias of the variable, see :ref:`api_guide_Name` - for more details. - shape (list|tuple): List|Tuple of integers declaring the shape. You can - set "None" or -1 at a dimension to indicate the dimension can be of any - size. For example, it is useful to set changeable batch size as "None" or -1. - dtype (np.dtype|VarType|str, optional): The type of the data. Supported - dtype: bool, float16, float32, float64, int8, int16, int32, int64, - uint8. Default: float32. - lod_level (int, optional): The LoD level of the LoDTensor. Usually users - don't have to set this value. For more details about when and how to - use LoD level, see :ref:`user_guide_lod_tensor` . Default: 0. - - Returns: - Variable: The global variable that gives access to the data. - - Examples: - .. code-block:: python - - import paddle - import paddle.fluid as fluid - import numpy as np - paddle.enable_static() - - # Creates a variable with fixed size [3, 2, 1] - # User can only feed data of the same shape to x - x = fluid.data(name='x', shape=[3, 2, 1], dtype='float32') - - # Creates a variable with changeable batch size -1. - # Users can feed data of any batch size into y, - # but size of each data sample has to be [2, 1] - y = fluid.data(name='y', shape=[-1, 2, 1], dtype='float32') - - z = x + y - - # In this example, we will feed x and y with np-ndarray "1" - # and fetch z, like implementing "1 + 1 = 2" in PaddlePaddle - feed_data = np.ones(shape=[3, 2, 1], dtype=np.float32) - - exe = fluid.Executor(fluid.CPUPlace()) - out = exe.run(fluid.default_main_program(), - feed={ - 'x': feed_data, - 'y': feed_data - }, - fetch_list=[z.name]) - - # np-ndarray of shape=[3, 2, 1], dtype=float32, whose elements are 2 - print(out) - - """ - helper = LayerHelper('data', **locals()) - - check_type(name, 'name', (bytes, str), 'data') - check_type(shape, 'shape', (list, tuple), 'data') - - shape = list(shape) - for i in range(len(shape)): - if shape[i] is None: - shape[i] = -1 - - return helper.create_global_variable( - name=name, - shape=shape, - dtype=dtype, - type=core.VarDesc.VarType.LOD_TENSOR, - stop_gradient=True, - lod_level=lod_level, - is_data=True, - need_check_feed=True, - ) diff --git a/python/paddle/fluid/data_feeder.py b/python/paddle/fluid/data_feeder.py index 7fee96e6477d40764d8e11467c26083138a2f3ad..e74e336ab59a100a9634190340954686948a0af4 100644 --- a/python/paddle/fluid/data_feeder.py +++ b/python/paddle/fluid/data_feeder.py @@ -347,8 +347,8 @@ class DataFeeder: startup_program = fluid.Program() with fluid.program_guard(main_program, startup_program): - data_1 = fluid.data(name='data_1', shape=[None, 2, 2], dtype='float32') - data_2 = fluid.data(name='data_2', shape=[None, 1, 3], dtype='float32') + data_1 = paddle.static.data(name='data_1', shape=[None, 2, 2], dtype='float32') + data_2 = paddle.static.data(name='data_2', shape=[None, 1, 3], dtype='float32') out = paddle.static.nn.fc(x=[data_1, data_2], size=2) # ... feeder = fluid.DataFeeder([data_1, data_2], place) @@ -414,9 +414,9 @@ class DataFeeder: for i in range(1, limit + 1): yield np.ones([6]).astype('float32') * i , np.ones([1]).astype('int64') * i, np.random.random([9]).astype('float32') - data_1 = fluid.data(name='data_1', shape=[None, 2, 1, 3]) - data_2 = fluid.data(name='data_2', shape=[None, 1], dtype='int64') - data_3 = fluid.data(name='data_3', shape=[None, 3, 3], dtype='float32') + data_1 = paddle.static.data(name='data_1', shape=[None, 2, 1, 3]) + data_2 = paddle.static.data(name='data_2', shape=[None, 1], dtype='int64') + data_3 = paddle.static.data(name='data_3', shape=[None, 3, 3], dtype='float32') feeder = fluid.DataFeeder(['data_1','data_2', 'data_3'], fluid.CPUPlace()) @@ -482,8 +482,8 @@ class DataFeeder: yield np.ones([4]) * factor + base, np.ones([4]) * factor + base + 5 return _reader() - x = fluid.data(name='x', shape=[None, 2, 2]) - y = fluid.data(name='y', shape=[None, 2, 2], dtype='float32') + x = paddle.static.data(name='x', shape=[None, 2, 2]) + y = paddle.static.data(name='y', shape=[None, 2, 2], dtype='float32') z = paddle.add(x, y) @@ -582,8 +582,8 @@ class DataFeeder: places = [fluid.CPUPlace() for _ in range(place_num)] # a simple network sample - data = fluid.data(name='data', shape=[None, 4, 4], dtype='float32') - label = fluid.data(name='label', shape=[None, 1], dtype='int64') + data = paddle.static.data(name='data', shape=[None, 4, 4], dtype='float32') + label = paddle.static.data(name='label', shape=[None, 1], dtype='int64') hidden = paddle.static.nn.fc(x=data, size=10) feeder = fluid.DataFeeder(place=places[0], feed_list=[data, label]) diff --git a/python/paddle/fluid/executor.py b/python/paddle/fluid/executor.py index 2c5713e37615a220ed10bad4c600013d6e0d1f6f..3b018f30c64938588bc3b8bc60529dbc19e8dccc 100755 --- a/python/paddle/fluid/executor.py +++ b/python/paddle/fluid/executor.py @@ -1687,7 +1687,7 @@ class Executor: compiled = isinstance(program, compiler.CompiledProgram) - # Check if fluid.data() variable no feed data + # Check if paddle.static.data() variable no feed data if use_prune: if compiled: global_block = program._program.global_block() diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index a838f06dbe8cb5f1522c6ddcae1852c17ad3264d..43c23eb003e2ee2b86df2a61522315344e39a810 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -2072,9 +2072,9 @@ class Variable(metaclass=VariableMetaClass): Examples: .. code-block:: python - import paddle.fluid as fluid + import paddle - x = fluid.data(name="x", shape=[-1, 23, 48], dtype='float32') + x = paddle.static.data(name="x", shape=[-1, 23, 48], dtype='float32') print(x.grad_name) # output is ``x@GRAD`` """ diff --git a/python/paddle/fluid/io.py b/python/paddle/fluid/io.py index f61b4bbe2339ec7c3d4de9d8395da4eaf5f8b481..9a3e3c9b8352367ce37653c51484ddbb769cc0eb 100644 --- a/python/paddle/fluid/io.py +++ b/python/paddle/fluid/io.py @@ -190,8 +190,8 @@ def save_inference_model( path = "./infer_model" # User defined network, here a softmax regession example - image = fluid.data(name='img', shape=[None, 28, 28], dtype='float32') - label = fluid.data(name='label', shape=[None, 1], dtype='int64') + image = paddle.static.data(name='img', shape=[None, 28, 28], dtype='float32') + label = paddle.static.data(name='label', shape=[None, 1], dtype='int64') feeder = fluid.DataFeeder(feed_list=[image, label], place=fluid.CPUPlace()) predict = paddle.static.nn.fc(x=image, size=10, activation='softmax') diff --git a/python/paddle/fluid/layers/control_flow.py b/python/paddle/fluid/layers/control_flow.py index 0697b53914f17dc74161918ceefa2334c3ef1039..b1f9a03d0d9231bb55289c60fc298ee3590c5a21 100755 --- a/python/paddle/fluid/layers/control_flow.py +++ b/python/paddle/fluid/layers/control_flow.py @@ -335,7 +335,7 @@ class StaticRNN: vocab_size, hidden_size=10000, 200 paddle.enable_static() - x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64') + x = paddle.static.data(name="x", shape=[None, 1, 1], dtype='int64') # create word sequence x_emb = layers.embedding( input=x, @@ -426,7 +426,7 @@ class StaticRNN: vocab_size, hidden_size=10000, 200 paddle.enable_static() - x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64') + x = paddle.static.data(name="x", shape=[None, 1, 1], dtype='int64') # create word sequence x_emb = layers.embedding( input=x, @@ -455,7 +455,7 @@ class StaticRNN: import paddle.fluid.layers as layers vocab_size, hidden_size=10000, 200 paddle.enable_static() - x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64') + x = paddle.static.data(name="x", shape=[None, 1, 1], dtype='int64') # create word sequence x_emb = layers.embedding( input=x, @@ -558,7 +558,7 @@ class StaticRNN: vocab_size, hidden_size=10000, 200 paddle.enable_static() - x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64') + x = paddle.static.data(name="x", shape=[None, 1, 1], dtype='int64') # create word sequence x_emb = layers.embedding( input=x, @@ -611,7 +611,7 @@ class StaticRNN: vocab_size, hidden_size=10000, 200 paddle.enable_static() - x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64') + x = paddle.static.data(name="x", shape=[None, 1, 1], dtype='int64') # create word sequence x_emb = layers.embedding( input=x, @@ -673,7 +673,7 @@ class StaticRNN: vocab_size, hidden_size=10000, 200 paddle.enable_static() - x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64') + x = paddle.static.data(name="x", shape=[None, 1, 1], dtype='int64') # create word sequence x_emb = layers.embedding( input=x, @@ -955,7 +955,7 @@ class While: i = paddle.full(shape=[1], dtype='int64', fill_value=0) loop_len = paddle.full(shape=[1], dtype='int64', fill_value=10) one = paddle.full(shape=[1], dtype='float32', fill_value=1) - data = fluid.data(name='data', shape=[1], dtype='float32') + data = paddle.static.data(name='data', shape=[1], dtype='float32') sums = paddle.full(shape=[1], dtype='float32', fill_value=0) # Define the variable to be obtained ouside of While, which name should be different from the variable inside the While to be obtained cond = paddle.less_than(x=i, y=loop_len) diff --git a/python/paddle/fluid/layers/math_op_patch.py b/python/paddle/fluid/layers/math_op_patch.py index 136670bf27c9c1d24185c13258dd9502cd6e80b6..01426a0c7928282a91626734dacc031b4906c811 100644 --- a/python/paddle/fluid/layers/math_op_patch.py +++ b/python/paddle/fluid/layers/math_op_patch.py @@ -183,13 +183,13 @@ def monkey_patch_variable(): In Static Graph Mode: .. code-block:: python - + import paddle import paddle.fluid as fluid - + paddle.enable_static() startup_prog = fluid.Program() main_prog = fluid.Program() with fluid.program_guard(startup_prog, main_prog): - original_variable = fluid.data(name = "new_variable", shape=[2,2], dtype='float32') + original_variable = paddle.static.data(name = "new_variable", shape=[2,2], dtype='float32') new_variable = original_variable.astype('int64') print("new var's dtype is: {}".format(new_variable.dtype)) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 441c737c0644be8efc1f1fef1ae3526ecafd0387..a2d962d11d1c6862e8a9ca3c86dd654b72e6f913 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -206,7 +206,7 @@ def embedding( import paddle paddle.enable_static() - data = fluid.data(name='x', shape=[None, 1], dtype='int64') + data = paddle.static.data(name='x', shape=[None, 1], dtype='int64') # example 1 emb_1 = paddle.static.nn.embedding(input=data, size=[128, 64]) @@ -572,7 +572,7 @@ def reduce_sum(input, dim=None, keep_dim=False, name=None): # [[0.2, 0.3, 0.5, 0.9] # [0.1, 0.2, 0.6, 0.7]] # Each example is followed by the corresponding output tensor. - x = fluid.data(name='x', shape=[2, 4], dtype='float32') + x = paddle.static.data(name='x', shape=[2, 4], dtype='float32') fluid.layers.nn.reduce_sum(x) # [3.5] fluid.layers.nn.reduce_sum(x, dim=0) # [0.3, 0.5, 1.1, 1.6] fluid.layers.nn.reduce_sum(x, dim=-1) # [1.9, 1.6] @@ -582,7 +582,7 @@ def reduce_sum(input, dim=None, keep_dim=False, name=None): # [[[1, 2], [3, 4]], # [[5, 6], [7, 8]]] # Each example is followed by the corresponding output tensor. - y = fluid.data(name='y', shape=[2, 2, 2], dtype='float32') + y = paddle.static.data(name='y', shape=[2, 2, 2], dtype='float32') fluid.layers.nn.reduce_sum(y, dim=[1, 2]) # [10, 26] fluid.layers.nn.reduce_sum(y, dim=[0, 1]) # [16, 20] diff --git a/python/paddle/fluid/nets.py b/python/paddle/fluid/nets.py index caacc658a3105b16747d43c067e16d87979df747..324f6227983aaa9e897573d94c3f8cdb7a1738be 100644 --- a/python/paddle/fluid/nets.py +++ b/python/paddle/fluid/nets.py @@ -111,7 +111,7 @@ def simple_img_conv_pool( import paddle.fluid as fluid import paddle paddle.enable_static() - img = fluid.data(name='img', shape=[100, 1, 28, 28], dtype='float32') + img = paddle.static.data(name='img', shape=[100, 1, 28, 28], dtype='float32') conv_pool = fluid.nets.simple_img_conv_pool(input=img, filter_size=5, num_filters=20, @@ -214,7 +214,7 @@ def img_conv_group( import paddle paddle.enable_static() - img = fluid.data(name='img', shape=[None, 1, 28, 28], dtype='float32') + img = paddle.static.data(name='img', shape=[None, 1, 28, 28], dtype='float32') conv_pool = fluid.nets.img_conv_group(input=img, conv_padding=1, conv_num_filter=[3, 3], @@ -331,7 +331,7 @@ def sequence_conv_pool( input_dim = 100 #len(word_dict) emb_dim = 128 hid_dim = 512 - data = fluid.data(name="words", shape=[None, 1], dtype="int64", lod_level=1) + data = paddle.static.data(name="words", shape=[None, 1], dtype="int64", lod_level=1) emb = fluid.layers.embedding(input=data, size=[input_dim, emb_dim], is_sparse=True) seq_conv = fluid.nets.sequence_conv_pool(input=emb, num_filters=hid_dim, @@ -391,7 +391,7 @@ def glu(input, dim=-1): import paddle paddle.enable_static() - data = fluid.data( + data = paddle.static.data( name="words", shape=[-1, 6, 3, 9], dtype="float32") # shape of output: [-1, 3, 3, 9] output = fluid.nets.glu(input=data, dim=1) @@ -472,9 +472,9 @@ def scaled_dot_product_attention( import paddle paddle.enable_static() - queries = fluid.data(name="queries", shape=[3, 5, 9], dtype="float32") - keys = fluid.data(name="keys", shape=[3, 6, 9], dtype="float32") - values = fluid.data(name="values", shape=[3, 6, 10], dtype="float32") + queries = paddle.static.data(name="queries", shape=[3, 5, 9], dtype="float32") + keys = paddle.static.data(name="keys", shape=[3, 6, 9], dtype="float32") + values = paddle.static.data(name="values", shape=[3, 6, 10], dtype="float32") contexts = fluid.nets.scaled_dot_product_attention(queries, keys, values) contexts.shape # [3, 5, 10] """ diff --git a/python/paddle/fluid/optimizer.py b/python/paddle/fluid/optimizer.py index 3e5b4babc69137b6cedcb1b368615076b3fcc3f4..d835a3fbfcf8ba6aff16c7f3638d13634a5f27e9 100755 --- a/python/paddle/fluid/optimizer.py +++ b/python/paddle/fluid/optimizer.py @@ -2036,7 +2036,7 @@ class AdagradOptimizer(Optimizer): paddle.enable_static() np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32) - inp = fluid.data(name="inp", shape=[2, 2]) + inp = paddle.static.data(name="inp", shape=[2, 2], dtype="float32") out = paddle.static.nn.fc(inp, size=3) out = paddle.sum(out) optimizer = fluid.optimizer.AdagradOptimizer(learning_rate=0.2) @@ -2228,8 +2228,8 @@ class AdamOptimizer(Optimizer): place = fluid.CPUPlace() main = fluid.Program() with fluid.program_guard(main): - x = fluid.data(name='x', shape=[None, 13], dtype='float32') - y = fluid.data(name='y', shape=[None, 1], dtype='float32') + x = paddle.static.data(name='x', shape=[None, 13], dtype='float32') + y = paddle.static.data(name='y', shape=[None, 1], dtype='float32') y_predict = paddle.static.nn.fc(x, size=1, activation=None) cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y) avg_cost = paddle.mean(cost) @@ -2257,8 +2257,8 @@ class AdamOptimizer(Optimizer): place = fluid.CPUPlace() main = fluid.Program() with fluid.program_guard(main): - x = fluid.data(name='x', shape=[None, 13], dtype='float32') - y = fluid.data(name='y', shape=[None, 1], dtype='float32') + x = paddle.static.data(name='x', shape=[None, 13], dtype='float32') + y = paddle.static.data(name='y', shape=[None, 1], dtype='float32') y_predict = paddle.static.nn.fc(x, size=1, activation=None) cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y) avg_cost = paddle.mean(cost) @@ -2292,8 +2292,8 @@ class AdamOptimizer(Optimizer): div_res = global_step / decay_steps decayed_beta1 = beta1_init * (decay_rate**div_res) decayed_beta2 = beta2_init * (decay_rate**div_res) - fluid.layers.assign(decayed_beta1, beta1) - fluid.layers.assign(decayed_beta2, beta2) + paddle.assign(decayed_beta1, beta1) + paddle.assign(decayed_beta2, beta2) return beta1, beta2, epsilon @@ -2651,7 +2651,7 @@ class AdamaxOptimizer(Optimizer): train_program = fluid.Program() startup_program = fluid.Program() with fluid.program_guard(train_program, startup_program): - data = fluid.data(name='X', shape=[None, 1], dtype='float32') + data = paddle.static.data(name='X', shape=[None, 1], dtype='float32') hidden = paddle.static.nn.fc(x=data, size=10) loss = paddle.mean(hidden) adam = fluid.optimizer.AdamaxOptimizer(learning_rate=0.2) @@ -2994,7 +2994,7 @@ class DecayedAdagradOptimizer(Optimizer): import paddle.fluid as fluid paddle.enable_static() - x = fluid.data(name='x', shape=[None, 10], dtype='float32') + x = paddle.static.data(name='x', shape=[None, 10], dtype='float32') trans = paddle.static.nn.fc(x, 100) cost = paddle.mean(trans) optimizer = fluid.optimizer.DecayedAdagradOptimizer(learning_rate=0.2) @@ -3118,7 +3118,7 @@ class AdadeltaOptimizer(Optimizer): import paddle.fluid as fluid paddle.enable_static() - image = fluid.data(name='image', shape=[None, 28], dtype='float32') + image = paddle.static.data(name='image', shape=[None, 28], dtype='float32') fc = paddle.static.nn.fc(image, size=10) cost = paddle.mean(fc) optimizer = fluid.optimizer.Adadelta( @@ -3747,7 +3747,7 @@ class LambOptimizer(AdamOptimizer): import paddle.fluid as fluid paddle.enable_static() - data = fluid.data(name='x', shape=[-1, 5], dtype='float32') + data = paddle.static.data(name='x', shape=[-1, 5], dtype='float32') hidden = paddle.static.nn.fc(x=data, size=10) cost = paddle.mean(hidden) @@ -3964,7 +3964,7 @@ class ModelAverage(Optimizer): startup_program = fluid.Program() with fluid.program_guard(train_program, startup_program): # build net - data = fluid.data(name='X', shape=[None, 1], dtype='float32') + data = paddle.static.data(name='X', shape=[None, 1], dtype='float32') hidden = paddle.static.nn.fc(x=data, size=10) loss = paddle.mean(hidden) optimizer = fluid.optimizer.Momentum(learning_rate=0.2, momentum=0.1) @@ -4143,7 +4143,7 @@ class ModelAverage(Optimizer): startup_program = fluid.Program() with fluid.program_guard(train_program, startup_program): # build net - data = fluid.data(name='X', shape=[None, 1], dtype='float32') + data = paddle.static.data(name='X', shape=[None, 1], dtype='float32') hidden = paddle.static.nn.fc(x=data, size=10) loss = paddle.mean(hidden) optimizer = fluid.optimizer.Momentum(learning_rate=0.2, momentum=0.1) @@ -4199,7 +4199,7 @@ class ModelAverage(Optimizer): startup_program = fluid.Program() with fluid.program_guard(train_program, startup_program): # build net - data = fluid.data(name='X', shape=[None, 1], dtype='float32') + data = paddle.static.data(name='X', shape=[None, 1], dtype='float32') hidden = paddle.static.nn.fc(x=data, size=10) loss = paddle.mean(hidden) optimizer = fluid.optimizer.Momentum(learning_rate=0.2, momentum=0.1) diff --git a/python/paddle/fluid/profiler.py b/python/paddle/fluid/profiler.py index 9b29b01fd60ccc35fe096667dc9426c13ce6ce2d..750ea5d8e13f14cd180d97dd66df4fbc3bf88bc9 100644 --- a/python/paddle/fluid/profiler.py +++ b/python/paddle/fluid/profiler.py @@ -84,10 +84,11 @@ def npu_profiler(output_file, config=None): import paddle.fluid as fluid import paddle.fluid.profiler as profiler import numpy as np + import paddle epoc = 8 dshape = [4, 3, 28, 28] - data = fluid.data(name='data', shape=[None, 3, 28, 28], dtype='float32') + data = paddle.static.data(name='data', shape=[None, 3, 28, 28], dtype='float32') conv = paddle.static.nn.conv2d(data, 20, 3, stride=[1, 1], padding=[1, 1]) place = fluid.NPUPlace(0) @@ -337,7 +338,7 @@ def profiler( epoc = 8 dshape = [4, 3, 28, 28] - data = fluid.data(name='data', shape=[None, 3, 28, 28], dtype='float32') + data = paddle.static.data(name='data', shape=[None, 3, 28, 28], dtype='float32') conv = paddle.static.nn.conv2d(data, 20, 3, stride=[1, 1], padding=[1, 1]) place = fluid.CPUPlace() diff --git a/python/paddle/fluid/reader.py b/python/paddle/fluid/reader.py index b9ed17304c8910e53590bfa410d4e08436935ed5..d36542da09b75e2082df912264117a86aecf6cee 100644 --- a/python/paddle/fluid/reader.py +++ b/python/paddle/fluid/reader.py @@ -655,7 +655,7 @@ class DataLoader: Args: feed_list (list(Tensor)|tuple(Tensor)): feed Tensor list. - The Tensors should be created by :code:`fluid.data()`. + The Tensors should be created by :code:`paddle.static.data()`. capacity (int): capacity of the queue maintained in DataLoader. The unit is batch number. Set larger capacity if your reader is fast. @@ -1651,8 +1651,8 @@ class PyReader(DataLoaderBase): yield fake_image, fake_label return reader - image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32') - label = fluid.data(name='label', shape=[None, 1], dtype='int64') + image = paddle.static.data(name='image', shape=[None, 784, 784], dtype='float32') + label = paddle.static.data(name='label', shape=[None, 1], dtype='int64') reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, @@ -1708,8 +1708,8 @@ class PyReader(DataLoaderBase): yield fake_image, fake_label return reader - image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32') - label = fluid.data(name='label', shape=[None, 1], dtype='int64') + image = paddle.static.data(name='image', shape=[None, 784, 784], dtype='float32') + label = paddle.static.data(name='label', shape=[None, 1], dtype='int64') reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True, return_list=False) user_defined_reader = reader_creator_random_image(784, 784) @@ -1800,7 +1800,7 @@ class PyReader(DataLoaderBase): for i in range(5): yield np.random.uniform(low=0, high=255, size=[784, 784]), - image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32') + image = paddle.static.data(name='image', shape=[None, 784, 784], dtype='float32') reader = fluid.io.PyReader(feed_list=[image], capacity=4, iterable=False) reader.decorate_sample_list_generator( paddle.batch(generator, batch_size=BATCH_SIZE)) @@ -1837,7 +1837,7 @@ class PyReader(DataLoaderBase): for i in range(5): yield np.random.uniform(low=0, high=255, size=[784, 784]), - image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32') + image = paddle.static.data(name='image', shape=[None, 784, 784], dtype='float32') reader = fluid.io.PyReader(feed_list=[image], capacity=4, iterable=False) reader.decorate_sample_list_generator( paddle.batch(generator, batch_size=BATCH_SIZE)) @@ -1908,8 +1908,8 @@ class PyReader(DataLoaderBase): yield fake_image, fake_label return generator - image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32') - label = fluid.data(name='label', shape=[None, 1], dtype='int64') + image = paddle.static.data(name='image', shape=[None, 784, 784], dtype='float32') + label = paddle.static.data(name='label', shape=[None, 1], dtype='int64') reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True) user_defined_generator = random_image_and_label_generator(784, 784) @@ -1975,8 +1975,8 @@ class PyReader(DataLoaderBase): yield fake_image, fake_label return generator - image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32') - label = fluid.data(name='label', shape=[None, 1], dtype='int64') + image = paddle.static.data(name='image', shape=[None, 784, 784], dtype='float32') + label = paddle.static.data(name='label', shape=[None, 1], dtype='int64') reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True) user_defined_generator = random_image_and_label_generator(784, 784) @@ -2043,8 +2043,8 @@ class PyReader(DataLoaderBase): yield batch_image, batch_label return generator - image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32') - label = fluid.data(name='label', shape=[None, 1], dtype='int64') + image = paddle.static.data(name='image', shape=[None, 784, 784], dtype='float32') + label = paddle.static.data(name='label', shape=[None, 1], dtype='int64') reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True) user_defined_generator = random_image_and_label_generator(784, 784) diff --git a/python/paddle/fluid/tests/test_detection.py b/python/paddle/fluid/tests/test_detection.py index 39b17cd0634be23bd81342e178087bb1815dbfe6..9bc9b39dbe99cb0a9a3fc4603052681a0e4252ed 100644 --- a/python/paddle/fluid/tests/test_detection.py +++ b/python/paddle/fluid/tests/test_detection.py @@ -85,17 +85,19 @@ class TestGenerateProposals(LayerTest): variances_np = np.ones((4, 4, 3, 4)).astype('float32') with self.static_graph(): - scores = fluid.data( + scores = paddle.static.data( name='scores', shape=[2, 3, 4, 4], dtype='float32' ) - bbox_deltas = fluid.data( + bbox_deltas = paddle.static.data( name='bbox_deltas', shape=[2, 12, 4, 4], dtype='float32' ) - im_info = fluid.data(name='im_info', shape=[2, 3], dtype='float32') - anchors = fluid.data( + im_info = paddle.static.data( + name='im_info', shape=[2, 3], dtype='float32' + ) + anchors = paddle.static.data( name='anchors', shape=[4, 4, 3, 4], dtype='float32' ) - variances = fluid.data( + variances = paddle.static.data( name='var', shape=[4, 4, 3, 4], dtype='float32' ) rois, roi_probs, rois_num = paddle.vision.ops.generate_proposals( @@ -175,8 +177,12 @@ class TestDistributeFpnProposals(LayerTest): rois_np = np.random.rand(10, 4).astype('float32') rois_num_np = np.array([4, 6]).astype('int32') with self.static_graph(): - rois = fluid.data(name='rois', shape=[10, 4], dtype='float32') - rois_num = fluid.data(name='rois_num', shape=[None], dtype='int32') + rois = paddle.static.data( + name='rois', shape=[10, 4], dtype='float32' + ) + rois_num = paddle.static.data( + name='rois_num', shape=[None], dtype='int32' + ) ( multi_rois, restore_ind, @@ -230,7 +236,7 @@ class TestDistributeFpnProposals(LayerTest): def test_distribute_fpn_proposals_error(self): program = Program() with program_guard(program): - fpn_rois = fluid.data( + fpn_rois = paddle.static.data( name='data_error', shape=[10, 4], dtype='int32', lod_level=1 ) self.assertRaises( diff --git a/python/paddle/fluid/tests/unittests/asp/asp_pruning_base.py b/python/paddle/fluid/tests/unittests/asp/asp_pruning_base.py index 50fb039974cd0f6eb321adf4039bfb8aac9454fc..bac539a437f34e8df681f4b89fafb1aa2b9c3efd 100644 --- a/python/paddle/fluid/tests/unittests/asp/asp_pruning_base.py +++ b/python/paddle/fluid/tests/unittests/asp/asp_pruning_base.py @@ -31,10 +31,12 @@ class TestASPHelperPruningBase(unittest.TestCase): self.startup_program = fluid.Program() def build_model(): - img = fluid.data( + img = paddle.static.data( name='img', shape=[None, 3, 32, 32], dtype='float32' ) - label = fluid.data(name='label', shape=[None, 1], dtype='int64') + label = paddle.static.data( + name='label', shape=[None, 1], dtype='int64' + ) hidden = paddle.static.nn.conv2d( input=img, num_filters=4, filter_size=3, padding=2, act="relu" ) diff --git a/python/paddle/fluid/tests/unittests/asp/test_asp_customized_pruning.py b/python/paddle/fluid/tests/unittests/asp/test_asp_customized_pruning.py index 2379e9e7029b6bc88400b1528f342b657d9193be..38fce44d1ec8c2710cf1af97ee003e63c0923b36 100644 --- a/python/paddle/fluid/tests/unittests/asp/test_asp_customized_pruning.py +++ b/python/paddle/fluid/tests/unittests/asp/test_asp_customized_pruning.py @@ -196,10 +196,12 @@ class TestASPStaticCustomerizedPruneFunc(unittest.TestCase): self.customer_prefix = "customer_layer" def build_model(): - img = fluid.data( + img = paddle.static.data( name='img', shape=[None, 3, 32, 32], dtype='float32' ) - label = fluid.data(name='label', shape=[None, 1], dtype='int64') + label = paddle.static.data( + name='label', shape=[None, 1], dtype='int64' + ) hidden = paddle.static.nn.conv2d( input=img, num_filters=4, filter_size=3, padding=2, act="relu" ) diff --git a/python/paddle/fluid/tests/unittests/asp/test_asp_optimize_static.py b/python/paddle/fluid/tests/unittests/asp/test_asp_optimize_static.py index 67b14cc549c9f932bed8b8ca459a34cbf8d61a36..4eca7d6adee40a8f6217715b2d35768b19ef284c 100644 --- a/python/paddle/fluid/tests/unittests/asp/test_asp_optimize_static.py +++ b/python/paddle/fluid/tests/unittests/asp/test_asp_optimize_static.py @@ -31,10 +31,12 @@ class TestASPStaticOptimize(unittest.TestCase): self.startup_program = fluid.Program() def build_model(): - img = fluid.data( + img = paddle.static.data( name='img', shape=[None, 3, 24, 24], dtype='float32' ) - label = fluid.data(name='label', shape=[None, 1], dtype='int64') + label = paddle.static.data( + name='label', shape=[None, 1], dtype='int64' + ) hidden = paddle.static.nn.conv2d( input=img, num_filters=4, filter_size=3, padding=2, act="relu" ) diff --git a/python/paddle/fluid/tests/unittests/asp/test_asp_pruning_static.py b/python/paddle/fluid/tests/unittests/asp/test_asp_pruning_static.py index cf011874ea89730029145e0a371f16e9b23f9d99..669dc9b09397ca3f498b10693c2bca72c5b87247 100644 --- a/python/paddle/fluid/tests/unittests/asp/test_asp_pruning_static.py +++ b/python/paddle/fluid/tests/unittests/asp/test_asp_pruning_static.py @@ -31,10 +31,12 @@ class TestASPStaticPruningBase(unittest.TestCase): self.startup_program = fluid.Program() def build_model(): - img = fluid.data( + img = paddle.static.data( name='img', shape=[None, 3, 24, 24], dtype='float32' ) - label = fluid.data(name='label', shape=[None, 1], dtype='int64') + label = paddle.static.data( + name='label', shape=[None, 1], dtype='int64' + ) hidden = paddle.static.nn.conv2d( input=img, num_filters=2, filter_size=3, padding=2, act="relu" ) diff --git a/python/paddle/fluid/tests/unittests/asp/test_asp_save_load.py b/python/paddle/fluid/tests/unittests/asp/test_asp_save_load.py index b4876bdce53478e21f2e94e72dfd0b24742648d1..5cd3f4b8e3e7f9d865d16dbb3f9930289196ed21 100644 --- a/python/paddle/fluid/tests/unittests/asp/test_asp_save_load.py +++ b/python/paddle/fluid/tests/unittests/asp/test_asp_save_load.py @@ -128,10 +128,12 @@ class TestASPStaticOptimize(unittest.TestCase): self.startup_program = fluid.Program() def build_model(): - img = fluid.data( + img = paddle.static.data( name='img', shape=[None, 3, 32, 32], dtype='float32' ) - label = fluid.data(name='label', shape=[None, 1], dtype='int64') + label = paddle.static.data( + name='label', shape=[None, 1], dtype='int64' + ) hidden = paddle.static.nn.conv2d( input=img, num_filters=4, filter_size=3, padding=2, act="relu" ) diff --git a/python/paddle/fluid/tests/unittests/auto_checkpoint_utils.py b/python/paddle/fluid/tests/unittests/auto_checkpoint_utils.py index d42c9f0ffe5fd1e7ab08c11b3d9043119aad8595..29ec0c67cde2d3fe99adc548ba75aba89e6f08ab 100644 --- a/python/paddle/fluid/tests/unittests/auto_checkpoint_utils.py +++ b/python/paddle/fluid/tests/unittests/auto_checkpoint_utils.py @@ -65,8 +65,12 @@ class AutoCheckpointBase(unittest.TestCase): self, exe, main_prog, startup_prog, minimize=True, iterable=True ): def simple_net(): - image = fluid.data(name='image', shape=[-1, 4, 4], dtype='float32') - label = fluid.data(name='label', shape=[-1, 1], dtype='int64') + image = paddle.static.data( + name='image', shape=[-1, 4, 4], dtype='float32' + ) + label = paddle.static.data( + name='label', shape=[-1, 1], dtype='int64' + ) fc_tmp = paddle.static.nn.fc(image, size=CLASS_NUM) cross_entropy = paddle.nn.functional.softmax_with_cross_entropy( diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/static_model_parallel_by_col.py b/python/paddle/fluid/tests/unittests/collective/fleet/static_model_parallel_by_col.py index 035a174775bd533737a284fb35139899275377ec..81c05805aca1c3cbd2f4a1273941d0ab095ee5a9 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/static_model_parallel_by_col.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/static_model_parallel_by_col.py @@ -71,7 +71,7 @@ def create_model(data, rank): class TestModelParallel(TestDistRunnerBase): def get_model(self, batch_size=2, use_dgc=False, dist_strategy=None): # Input data - data_in = fluid.data( + data_in = paddle.static.data( name='data_in', shape=[batch_size, IN_SIZE], dtype=DTYPE ) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/static_model_parallel_by_row.py b/python/paddle/fluid/tests/unittests/collective/fleet/static_model_parallel_by_row.py index a480993e8ec50236ace10696ca56026314da83c5..162ef1c7b61d06840a2eb02e294d3e6780a4a194 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/static_model_parallel_by_row.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/static_model_parallel_by_row.py @@ -75,7 +75,7 @@ def create_model(data, rank): class TestModelParallel(TestDistRunnerBase): def get_model(self, batch_size=2, use_dgc=False, dist_strategy=None): # Input data - data_in = fluid.data( + data_in = paddle.static.data( name='data_in', shape=[batch_size, IN_SIZE], dtype=DTYPE ) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/static_model_parallel_embedding.py b/python/paddle/fluid/tests/unittests/collective/fleet/static_model_parallel_embedding.py index 689b068f025f27a59dfbb8920cc0d1dd5f9e2c43..209af435c13e8ffd542f1e014352a4e68e13e998 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/static_model_parallel_embedding.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/static_model_parallel_embedding.py @@ -65,7 +65,7 @@ def create_model(data, rank): class TestModelParallel(TestDistRunnerBase): def get_model(self, batch_size=2, use_dgc=False, dist_strategy=None): # Input data - data_in = fluid.data( + data_in = paddle.static.data( name='data_in', shape=[batch_size, IN_SIZE], dtype=DTYPE ) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_checkpoint.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_checkpoint.py index 089a06c9ef1a0a2be5a4145612c7bb37175f0e65..1e80af9f104c1c9e7b12cc8315283b29467c031a 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_checkpoint.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_checkpoint.py @@ -35,8 +35,10 @@ class FleetTest(unittest.TestCase): role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) - image = fluid.data(name='img', shape=[None, 28, 28], dtype='float32') - label = fluid.data(name='label', shape=[None, 1], dtype='int64') + image = paddle.static.data( + name='img', shape=[None, 28, 28], dtype='float32' + ) + label = paddle.static.data(name='label', shape=[None, 1], dtype='int64') feeder = fluid.DataFeeder( feed_list=[image, label], place=fluid.CPUPlace() ) diff --git a/python/paddle/fluid/tests/unittests/distribution/test_distribution_categorical.py b/python/paddle/fluid/tests/unittests/distribution/test_distribution_categorical.py index 91e5b2257674330abf2b2fc07dc8534fa54b20ae..446321872684dd58545079dfae23c76a43e31709 100644 --- a/python/paddle/fluid/tests/unittests/distribution/test_distribution_categorical.py +++ b/python/paddle/fluid/tests/unittests/distribution/test_distribution_categorical.py @@ -103,13 +103,13 @@ class CategoricalTest(unittest.TestCase): def init_static_data(self, batch_size, dims): with fluid.program_guard(self.test_program): - self.logits_static = fluid.data( + self.logits_static = paddle.static.data( name='logits', shape=self.logits_shape, dtype='float32' ) - self.other_logits_static = fluid.data( + self.other_logits_static = paddle.static.data( name='other_logits', shape=self.logits_shape, dtype='float32' ) - self.value_static = fluid.data( + self.value_static = paddle.static.data( name='value', shape=self.value_shape, dtype='int64' ) @@ -211,13 +211,13 @@ class CategoricalTest2(CategoricalTest): def init_static_data(self, batch_size, dims): with fluid.program_guard(self.test_program): - self.logits_static = fluid.data( + self.logits_static = paddle.static.data( name='logits', shape=self.logits_shape, dtype='float64' ) - self.other_logits_static = fluid.data( + self.other_logits_static = paddle.static.data( name='other_logits', shape=self.logits_shape, dtype='float64' ) - self.value_static = fluid.data( + self.value_static = paddle.static.data( name='value', shape=self.value_shape, dtype='int64' ) @@ -234,7 +234,7 @@ class CategoricalTest3(CategoricalTest): with fluid.program_guard(self.test_program): self.logits_static = self.logits_np self.other_logits_static = self.other_logits_np - self.value_static = fluid.data( + self.value_static = paddle.static.data( name='value', shape=self.value_shape, dtype='int64' ) @@ -263,7 +263,7 @@ class CategoricalTest4(CategoricalTest): with fluid.program_guard(self.test_program): self.logits_static = self.logits_np self.other_logits_static = self.other_logits_np - self.value_static = fluid.data( + self.value_static = paddle.static.data( name='value', shape=self.value_shape, dtype='int64' ) @@ -344,7 +344,7 @@ class CategoricalTest8(CategoricalTest): with fluid.program_guard(self.test_program): self.logits_static = self.logits_np.tolist() self.other_logits_static = self.other_logits_np.tolist() - self.value_static = fluid.data( + self.value_static = paddle.static.data( name='value', shape=self.value_shape, dtype='int64' ) @@ -361,7 +361,7 @@ class CategoricalTest9(CategoricalTest): with fluid.program_guard(self.test_program): self.logits_static = tuple(self.logits_np.tolist()) self.other_logits_static = tuple(self.other_logits_np.tolist()) - self.value_static = fluid.data( + self.value_static = paddle.static.data( name='value', shape=self.value_shape, dtype='int64' ) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_static_analysis.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_static_analysis.py index 23eb6964c3e3d7b3c311de80188d33193e0d4837..8799126c6293419a424e7883446b174cc1b63d4a 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_static_analysis.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_static_analysis.py @@ -108,7 +108,7 @@ def func_to_test5(): a = inner_int_func() b = inner_bool_float_func(3) c = inner_unknown_func(None) - d = paddle.fluid.data('x', [1, 2]) + d = paddle.static.data('x', [1, 2]) result_var_type5 = { diff --git a/python/paddle/fluid/tests/unittests/ipu/test_fill_any_like_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_fill_any_like_op_ipu.py index 14a8d69a8e5218ee5ce364cf6d85f18e9b550178..6779cfe81e108ab91f4dd40915902bcd6fddeefc 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_fill_any_like_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_fill_any_like_op_ipu.py @@ -69,7 +69,7 @@ class TestCase1(TestBase): class TestError(TestBase): @IPUOpTest.static_graph def build_model(self): - x = paddle.fluid.data('x', [-1, 3, 13], 'float32') + x = paddle.static.data('x', [-1, 3, 13], 'float32') x_fill = paddle.full_like(x, **self.attrs) out = paddle.add(x_fill, x_fill) self.fetch_list = [out.name] diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_cpu_bfloat16_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_cpu_bfloat16_pass.py index 342b897d003acd9d95e6374caae54f2e637826cd..331c5f493857e8a1193362915c83f6aba5cd8c95 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_cpu_bfloat16_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_cpu_bfloat16_pass.py @@ -26,7 +26,7 @@ class TestMKLDNNCpuBfloat16Pass(InferencePassTest): def setUp(self): self.init_data() with fluid.program_guard(self.main_program, self.startup_program): - x = fluid.data( + x = paddle.static.data( name='x', shape=[-1] + self.shape_x, dtype=self.d_type ) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_elt_act_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_elt_act_fuse_pass.py index 24a63751cfec431d4335baa793543da3ba48d83d..6fad65569f73288f6d330d7b9c981cb4554cda99 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_elt_act_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_elt_act_fuse_pass.py @@ -31,10 +31,10 @@ class ElementwiseActivationMkldnnFusePassTest(InferencePassTest): def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): - data_A = fluid.data( + data_A = paddle.static.data( name="data_A", shape=[-1, 3, 100, 100], dtype="float32" ) - data_B = fluid.data( + data_B = paddle.static.data( name="data_B", shape=[-1, 3, 100, 100], dtype="float32" ) elt_out = self.operand(data_A, data_B) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_matmul_op_output_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_matmul_op_output_fuse_pass.py index e344c873ee263709e63d59c0ead1b8c5a4c020fc..302adcae3ba56abda6c59d5cecb11ed6b5ca686e 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_matmul_op_output_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_matmul_op_output_fuse_pass.py @@ -32,10 +32,10 @@ class TestMKLDNNMatmulFuseOp(InferencePassTest): def make_network(self): with fluid.program_guard(self.main_program, self.startup_program): - x = fluid.data( + x = paddle.static.data( name='x', shape=[-1] + self.shape_x, dtype=self.d_type ) - y = fluid.data( + y = paddle.static.data( name='y', shape=[-1] + self.shape_y, dtype=self.d_type ) out = paddle.matmul(x, y) @@ -74,10 +74,10 @@ class TestMKLDNNMatmulOtherDimsFuseOp(TestMKLDNNMatmulFuseOp): class TestMKLDNNMatmulOpNotFusedWrongTransposeAxis(TestMKLDNNMatmulFuseOp): def make_network(self): with fluid.program_guard(self.main_program, self.startup_program): - x = fluid.data( + x = paddle.static.data( name='x', shape=[-1] + self.shape_x, dtype=self.d_type ) - y = fluid.data( + y = paddle.static.data( name='y', shape=[-1] + self.shape_y, dtype=self.d_type ) out = paddle.matmul(x, y) @@ -97,10 +97,10 @@ class TestMKLDNNMatmulOpNotFusedBreakPattern(TestMKLDNNMatmulFuseOp): def make_network(self): with fluid.program_guard(self.main_program, self.startup_program): - x = fluid.data( + x = paddle.static.data( name='x', shape=[-1] + self.shape_x, dtype=self.d_type ) - y = fluid.data( + y = paddle.static.data( name='y', shape=[-1] + self.shape_y, dtype=self.d_type ) out = paddle.matmul(x, y) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_reshape_transpose_matmul_v2_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_reshape_transpose_matmul_v2_fuse_pass.py index ad2f5777f203b1613b5ecc6f3c768d2583bd73bc..0fc9cabcf7ea3a59b93fe3998270e14cbd3ec017 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_reshape_transpose_matmul_v2_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_reshape_transpose_matmul_v2_fuse_pass.py @@ -29,7 +29,7 @@ class TestReshapeTransposeMatmulV2OneDNNFusePass(InferencePassTest): self.pass_name = 'reshape_transpose_matmul_mkldnn_fuse_pass' with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=self.data_shape, dtype="float32" ) weight = paddle.create_parameter( diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_activation_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_activation_pass.py index 0c205fbee7c87079035221e457663c24b0234ced..11a81dd689930a5828d2c7a27383cb6550101c30 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_activation_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_activation_pass.py @@ -37,7 +37,7 @@ class TensorRTSubgraphPassActivationTest(InferencePassTest): def setUp(self): self.setUpTensorRTParam() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[-1, 6, 32, 32], dtype="float32" ) act_out = self.append_act(data) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_conv3d_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_conv3d_op.py index 592dede838eaeb5c032460250fe3b05d31407a0f..63de86f9b302d61050fb58d21aee259da1d7413c 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_conv3d_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_conv3d_op.py @@ -28,7 +28,7 @@ class TensorRTSubgraphPassConv3dTest(InferencePassTest): self.init_params() self.set_params() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[-1, 3, 6, 32, 32], dtype="float32" ) conv_out = paddle.static.nn.conv3d( @@ -112,7 +112,7 @@ class DynamicShapeTensorRTSubgraphPassConv3dTest(InferencePassTest): def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[-1, 6, -1, -1, -1], dtype="float32" ) conv_out = paddle.static.nn.conv3d( diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_conv3d_transpose_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_conv3d_transpose_op.py index b45b8dc17dce0a6e8a21380d445d363fd5644830..6f350df963f227b5e6e0f8ad278a78c043b744b8 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_conv3d_transpose_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_conv3d_transpose_op.py @@ -27,7 +27,7 @@ class TensorRTSubgraphPassConv3dTransposeTest(InferencePassTest): def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[-1, 4, 4, 32, 32], dtype="float32" ) conv_out = paddle.static.nn.conv3d_transpose( @@ -94,7 +94,7 @@ class DynamicShapeTensorRTSubgraphPassConv3dTransposeTest(InferencePassTest): def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[-1, 6, -1, -1, -1], dtype="float32" ) conv_out = paddle.static.nn.conv3d_transpose( diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_conv_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_conv_pass.py index 9b6ab8287f6c9eb36c0f4136c6e6e44fba052deb..45588525cf4ee0be92ce755db38a9b9f5f841188 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_conv_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_conv_pass.py @@ -30,7 +30,7 @@ class TensorRTSubgraphPassConvTest(InferencePassTest): def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[-1, 6, 64, 64], dtype="float32" ) conv_out = paddle.static.nn.conv2d( @@ -108,7 +108,7 @@ class TensorRTSubgraphPassConvTransposeTest(InferencePassTest): def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[-1, 6, 64, 64], dtype="float32" ) conv_out = paddle.static.nn.conv2d_transpose( @@ -207,7 +207,7 @@ class DynamicShapeTensorRTSubgraphPassConvTest(InferencePassTest): def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[-1, 6, -1, -1], dtype="float32" ) conv_out = paddle.static.nn.conv2d( diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_conv_quant_dequant_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_conv_quant_dequant_pass.py index b34e128ddf52782aec74d6cab293ac5b52e9d006..b3b99aa438de6d5120cc627a681823363c8a95c4 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_conv_quant_dequant_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_conv_quant_dequant_pass.py @@ -29,11 +29,13 @@ class QuantDequantTensorRTSubgraphPassConvTest(QuantDequantTest): self.set_params() def network(): - self.data = fluid.data( + self.data = paddle.static.data( name='data', shape=[1, 28, 28], dtype='float32' ) data_reshape = paddle.reshape(self.data, shape=[1, 4, 14, 14]) - self.label = fluid.data(name='label', shape=[1, 1], dtype='int64') + self.label = paddle.static.data( + name='label', shape=[1, 1], dtype='int64' + ) label_shape = paddle.reshape(self.label, shape=[1, 1, 1]) conv_out = paddle.static.nn.conv2d( input=data_reshape, @@ -144,11 +146,13 @@ class DynamicShapeQuantDequantTensorRTSubgraphPassConvTest(QuantDequantTest): self.set_params() def network(): - self.data = fluid.data( + self.data = paddle.static.data( name='data', shape=[1, 28, 28], dtype='float32' ) data_reshape = paddle.reshape(self.data, shape=[1, 4, 14, 14]) - self.label = fluid.data(name='label', shape=[1, 1], dtype='int64') + self.label = paddle.static.data( + name='label', shape=[1, 1], dtype='int64' + ) label_shape = paddle.reshape(self.label, shape=[1, 1, 1]) conv_out = paddle.static.nn.conv2d( input=data_reshape, @@ -243,11 +247,13 @@ class QuantDequantTensorRTSubgraphPassConvTransposeTest(QuantDequantTest): self.set_params() def network(): - self.data = fluid.data( + self.data = paddle.static.data( name='data', shape=[1, 28, 28], dtype='float32' ) data_reshape = paddle.reshape(self.data, shape=[1, 4, 14, 14]) - self.label = fluid.data(name='label', shape=[1, 1], dtype='int64') + self.label = paddle.static.data( + name='label', shape=[1, 1], dtype='int64' + ) label_shape = paddle.reshape(self.label, shape=[1, 1, 1]) conv_out = paddle.static.nn.conv2d_transpose( input=data_reshape, diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_deformable_conv.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_deformable_conv.py index 253a2b5eb8a0f820d9e3eca3f687d51abb22e072..3178226491b4a4c89f4368738798b24e71b99f5e 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_deformable_conv.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_deformable_conv.py @@ -30,13 +30,13 @@ class TRTDeformableConvTest(InferencePassTest): def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): - input = fluid.data( + input = paddle.static.data( name='input', shape=self.input_size, dtype=self.dtype ) - offset = fluid.data( + offset = paddle.static.data( name='offset', shape=self.offset_size, dtype=self.dtype ) - mask = fluid.data( + mask = paddle.static.data( name='mask', shape=self.mask_size, dtype=self.dtype ) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_dynamic_shape.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_dynamic_shape.py index 98ca955ee94167282e532874402cd597480bbf55..acd602609482ccd34014dbeab8a5ade0c3174608 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_dynamic_shape.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_dynamic_shape.py @@ -26,7 +26,7 @@ from paddle.fluid.core import AnalysisConfig class TRTDynamicShapeTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[-1, 3, 16, 16], dtype="float32" ) out = paddle.static.nn.conv2d( diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_elementwise_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_elementwise_op.py index 950550406068617f427e72e088ebf228e9315db1..87bfb350337f89fb269d1ff815b571e9e9454121 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_elementwise_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_elementwise_op.py @@ -29,10 +29,10 @@ from paddle.fluid.core import AnalysisConfig, PassVersionChecker class TensorRTSubgraphPassElementwiseBroadcastTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data1 = fluid.data( + data1 = paddle.static.data( name="data1", shape=[-1, 3, 64, 64], dtype="float32" ) - data2 = fluid.data( + data2 = paddle.static.data( name="data2", shape=[-1, 3, 64, 1], dtype="float32" ) eltwise_out = self.append_eltwise(data1, data2) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_fc_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_fc_fuse_pass.py index 252ea329edb26989f8f62dc11f28c24d096035a1..676fbdda0996aacc7a0834911b85d3fdda495102 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_fc_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_fc_fuse_pass.py @@ -26,7 +26,7 @@ from paddle.fluid.core import AnalysisConfig class FCFusePassTRTTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[32, 128, 2, 2], dtype="float32" ) fc_out1 = paddle.static.nn.fc( @@ -56,7 +56,7 @@ class FCFusePassTRTTest(InferencePassTest): class FCFusePassTRTStaticDims4Cols1Test(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[32, 128, 32, 8], dtype="float32" ) fc_out1 = paddle.static.nn.fc( @@ -84,7 +84,7 @@ class FCFusePassTRTStaticDims4Cols1Test(InferencePassTest): class FCFusePassTRTStaticDims4Cols2Test(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[3, 24, 16, 16], dtype="float32" ) fc_out1 = paddle.static.nn.fc( @@ -112,7 +112,9 @@ class FCFusePassTRTStaticDims4Cols2Test(InferencePassTest): class FCFusePassTRTDynamicDims2Test(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", shape=[32, 128], dtype="float32") + data = paddle.static.data( + name="data", shape=[32, 128], dtype="float32" + ) fc_out1 = paddle.static.nn.fc( x=data, size=64, num_flatten_dims=1, activation="relu" ) @@ -144,7 +146,9 @@ class FCFusePassTRTDynamicDims2Test(InferencePassTest): class FCFusePassTRTDynamicDims3Cols1Test(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", shape=[32, 128, 32], dtype="float32") + data = paddle.static.data( + name="data", shape=[32, 128, 32], dtype="float32" + ) fc_out1 = paddle.static.nn.fc( x=data, size=64, num_flatten_dims=1, activation="relu" ) @@ -176,7 +180,9 @@ class FCFusePassTRTDynamicDims3Cols1Test(InferencePassTest): class FCFusePassTRTDynamicDims3Cols2Test(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", shape=[32, 128, 32], dtype="float32") + data = paddle.static.data( + name="data", shape=[32, 128, 32], dtype="float32" + ) fc_out1 = paddle.static.nn.fc( x=data, size=64, num_flatten_dims=2, activation="relu" ) @@ -208,7 +214,7 @@ class FCFusePassTRTDynamicDims3Cols2Test(InferencePassTest): class FCFusePassTRTDynamicDims4Cols1Test(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[32, 12, 4, 6], dtype="float32" ) fc_out1 = paddle.static.nn.fc( @@ -244,7 +250,7 @@ class FCFusePassTRTDynamicDims4Cols1Test(InferencePassTest): class FCFusePassTRTDynamicDims4Cols2Test(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[32, 128, 32, 32], dtype="float32" ) fc_out1 = paddle.static.nn.fc( @@ -280,7 +286,7 @@ class FCFusePassTRTDynamicDims4Cols2Test(InferencePassTest): class FCFusePassTRTDynamicDims4Cols3Test(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[32, 128, 32, 32], dtype="float32" ) fc_out1 = paddle.static.nn.fc( diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_fc_fuse_quant_dequant_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_fc_fuse_quant_dequant_pass.py index 5179d0330d6ace9f6538e562c79dd15125e7f9d5..313bc5e2b8bd3575a52cbb864d03d125a3d73eae 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_fc_fuse_quant_dequant_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_fc_fuse_quant_dequant_pass.py @@ -27,10 +27,12 @@ from paddle.fluid.core import AnalysisConfig, PassVersionChecker class FCQuantDequantFusePassTRTDims3Cols1Test(QuantDequantTest): def setUp(self): def network(): - self.data = fluid.data( + self.data = paddle.static.data( name='data', shape=[1, 28, 28], dtype='float32' ) - self.label = fluid.data(name='label', shape=[1, 1], dtype='int64') + self.label = paddle.static.data( + name='label', shape=[1, 1], dtype='int64' + ) fc_out = paddle.static.nn.fc( x=self.data, size=10, @@ -98,10 +100,12 @@ class FCQuantDequantFusePassTRTDims3Cols1Test(QuantDequantTest): class FCQuantDequantFusePassTRTDims3Cols2Test(QuantDequantTest): def setUp(self): def network(): - self.data = fluid.data( + self.data = paddle.static.data( name='data', shape=[1, 28, 28], dtype='float32' ) - self.label = fluid.data(name='label', shape=[1, 1], dtype='int64') + self.label = paddle.static.data( + name='label', shape=[1, 1], dtype='int64' + ) fc_out = paddle.static.nn.fc( x=self.data, size=28, @@ -170,10 +174,12 @@ class FCQuantDequantFusePassTRTDims3Cols2Test(QuantDequantTest): class FCQuantDequantFusePassTRTDims3Cols3Test(QuantDequantTest): def setUp(self): def network(): - self.data = fluid.data( + self.data = paddle.static.data( name='data', shape=[1, 28, 28], dtype='float32' ) - self.label = fluid.data(name='label', shape=[1, 1], dtype='int64') + self.label = paddle.static.data( + name='label', shape=[1, 1], dtype='int64' + ) label_shape = paddle.reshape(self.label, shape=[1, 1, 1]) reshape_out = paddle.reshape(self.data, shape=[1, 14, 14, 4]) fc_out = paddle.static.nn.fc( diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_flatten_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_flatten_op.py index eec26fefec2d1bc1bded5e16acbbea408d562336..28ac16d8259ee42c31f4006b1da1af9b9843c779 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_flatten_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_flatten_op.py @@ -27,7 +27,7 @@ from paddle.fluid.core import AnalysisConfig, PassVersionChecker class TRTFlattenTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[-1, 6, 64, 64], dtype="float32" ) flatten_out = self.append_flatten(data) @@ -56,7 +56,7 @@ class TRTFlattenTest(InferencePassTest): class TRTFlattenDynamicTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[-1, 6, 64, 64], dtype="float32" ) flatten_out = self.append_flatten(data) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_gather_nd_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_gather_nd_op.py index 161a3142d5210350c7f48518bb9b9a0bee02cf62..d6706b6f0612f6ed66f2cb63199d7e2a36c9768d 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_gather_nd_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_gather_nd_op.py @@ -27,8 +27,12 @@ from paddle.fluid.core import AnalysisConfig, PassVersionChecker class TRTGatherNdTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", shape=[-1, 3, 4], dtype="float32") - index = fluid.data(name="index", shape=[-1, 2, 2], dtype="int32") + data = paddle.static.data( + name="data", shape=[-1, 3, 4], dtype="float32" + ) + index = paddle.static.data( + name="index", shape=[-1, 2, 2], dtype="int32" + ) gather_nd = paddle.gather_nd(data, index) out = nn.batch_norm(gather_nd, is_test=True) @@ -62,10 +66,12 @@ class TRTGatherNdTest(InferencePassTest): class TRTGatherNdFp16Test(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[-1, 1280, 192], dtype="float32" ) - index = fluid.data(name="index", shape=[-1, 1028, 2], dtype="int32") + index = paddle.static.data( + name="index", shape=[-1, 1028, 2], dtype="int32" + ) gather_nd = paddle.gather_nd(data, index) out = nn.batch_norm(gather_nd, is_test=True) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_gather_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_gather_op.py index 3b73ae07441c7ecc521e1bdccaa1ce7650ee16f2..90b3baab683c6e1edc6c9bc0cbfdd16a2b84c6cd 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_gather_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_gather_op.py @@ -27,8 +27,12 @@ class TRTGatherTest1(InferencePassTest): def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name='data', shape=[-1, 128], dtype='float32') - index = fluid.data(name='index', shape=[-1, 1], dtype='int32') + data = paddle.static.data( + name='data', shape=[-1, 128], dtype='float32' + ) + index = paddle.static.data( + name='index', shape=[-1, 1], dtype='int32' + ) scale_out = paddle.gather(data, index=index) out = paddle.nn.functional.softmax(scale_out) @@ -66,8 +70,10 @@ class TRTGatherTest2(InferencePassTest): def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name='data', shape=[16, 64], dtype='float32') - index = fluid.data(name='index', shape=[2], dtype='int32') + data = paddle.static.data( + name='data', shape=[16, 64], dtype='float32' + ) + index = paddle.static.data(name='index', shape=[2], dtype='int32') scale_out = paddle.gather(data, index=index) out = paddle.nn.functional.softmax(scale_out) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_inspector.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_inspector.py index 379c3872242f0ea297db5e8120c6d8f467e23c4d..923fa74701ac5e599a2da6e5a465de6ef5783f8a 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_inspector.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_inspector.py @@ -29,7 +29,9 @@ class TensorRTInspectorTest(InferencePassTest): def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", shape=[1, 16, 16], dtype="float32") + data = paddle.static.data( + name="data", shape=[1, 16, 16], dtype="float32" + ) matmul_out = paddle.matmul( x=data, y=data, diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_instance_norm_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_instance_norm_op.py index 4d98c8cb3f382d62d4802689cd357e523df84409..f9eeb2f935d74703a70dc599ad760aecb39781b9 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_instance_norm_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_instance_norm_op.py @@ -20,6 +20,7 @@ import unittest import numpy as np from inference_pass_test import InferencePassTest +import paddle import paddle.fluid as fluid import paddle.fluid.core as core import paddle.static.nn as nn @@ -43,7 +44,7 @@ class TRTInstanceNormTest(InferencePassTest): with fluid.program_guard(self.main_program, self.startup_program): shape = [-1, self.channel, self.height, self.width] - data = fluid.data(name='in', shape=shape, dtype='float32') + data = paddle.static.data(name='in', shape=shape, dtype='float32') instance_norm_out = nn.instance_norm(data) out = nn.batch_norm(instance_norm_out, is_test=True) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_matmul.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_matmul.py index 0d10acae95c3f3e9bb28d43785a05f7f2ed2527c..eb27f2e0afe0c0527eaf9b9ccafb1756d72eb757 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_matmul.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_matmul.py @@ -28,7 +28,9 @@ class TensorRTMatMulDims2Test(InferencePassTest): def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", shape=[24, 24], dtype="float32") + data = paddle.static.data( + name="data", shape=[24, 24], dtype="float32" + ) matmul_out = paddle.matmul( x=data, y=data, @@ -65,7 +67,7 @@ class TensorRTMatMulTest(InferencePassTest): def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[-1, 6, 24, 24], dtype="float32" ) matmul_out = paddle.matmul( @@ -126,10 +128,12 @@ class TensorRTMatMulBroadcastTest(InferencePassTest): self.set_params() place = fluid.CPUPlace() with fluid.program_guard(self.main_program, self.startup_program): - data_x = fluid.data( + data_x = paddle.static.data( name="data_x", shape=[-1, 6, 24], dtype="float32" ) - data_y = fluid.data(name="data_y", shape=[24, 16], dtype="float32") + data_y = paddle.static.data( + name="data_y", shape=[24, 16], dtype="float32" + ) matmul_out = paddle.matmul( x=data_x, y=data_y, diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_matmul_quant_dequant.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_matmul_quant_dequant.py index 413002d9885517cfeb457b265910d099a69432d3..2cdb13cd278f8671e2a34f0b473da2ecf1a7e338 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_matmul_quant_dequant.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_matmul_quant_dequant.py @@ -29,10 +29,12 @@ class TensorRTMatMulQuantDequantDims3Test(QuantDequantTest): self.set_params() def network(): - self.data = fluid.data( + self.data = paddle.static.data( name='data', shape=[1, 28, 28], dtype='float32' ) - self.label = fluid.data(name='label', shape=[1, 1], dtype='int64') + self.label = paddle.static.data( + name='label', shape=[1, 1], dtype='int64' + ) matmul_out = paddle.matmul( x=self.data, y=self.data, @@ -129,10 +131,12 @@ class TensorRTMatMulQuantDequantDims4Test(QuantDequantTest): self.set_params() def network(): - self.data = fluid.data( + self.data = paddle.static.data( name='data', shape=[1, 28, 28], dtype='float32' ) - self.label = fluid.data(name='label', shape=[1, 1], dtype='int64') + self.label = paddle.static.data( + name='label', shape=[1, 1], dtype='int64' + ) reshape_out = paddle.reshape(self.data, shape=[1, 4, 14, 14]) matmul_out = paddle.matmul( x=reshape_out, @@ -231,10 +235,12 @@ class TensorRTMatMulQuantDequantDims3DynamicTest(QuantDequantTest): self.set_params() def network(): - self.data = fluid.data( + self.data = paddle.static.data( name='data', shape=[-1, 28, 28], dtype='float32' ) - self.label = fluid.data(name='label', shape=[1, 1], dtype='int64') + self.label = paddle.static.data( + name='label', shape=[1, 1], dtype='int64' + ) matmul_out = paddle.matmul( x=self.data, y=self.data, diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_multiclass_nms3_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_multiclass_nms3_op.py index ac4e399f011ba009ba3da280113462edf4249240..5f8257d669625dd373d895e10eeea7f4cac4da16 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_multiclass_nms3_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_multiclass_nms3_op.py @@ -218,10 +218,10 @@ class TensorRTMultiClassNMS3Test(InferencePassTest): def build(self): with fluid.program_guard(self.main_program, self.startup_program): - boxes = fluid.data( + boxes = paddle.static.data( name='bboxes', shape=[-1, self.num_boxes, 4], dtype='float32' ) - scores = fluid.data( + scores = paddle.static.data( name='scores', shape=[-1, self.num_classes, self.num_boxes], dtype='float32', diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_nearest_interp_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_nearest_interp_op.py index f335bd8f82399b28bcb05e7c2e6349ee016be19d..d026a563324f4c3509f48cba06e4c3d0eed7965e 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_nearest_interp_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_nearest_interp_op.py @@ -43,7 +43,7 @@ class TRTNearestInterpTest(InferencePassTest): self.origin_shape[1], self.channels, ] - data = fluid.data(name='data', shape=shape, dtype='float32') + data = paddle.static.data(name='data', shape=shape, dtype='float32') resize_out = self.append_nearest_interp(data) out = nn.batch_norm(resize_out, is_test=True) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_nearest_interp_v2_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_nearest_interp_v2_op.py index 056e5b6e292123e6d667127b8782bdbca1304ea5..6bccf5572a5db2ea469a84af5ccdab431ebc9852 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_nearest_interp_v2_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_nearest_interp_v2_op.py @@ -17,6 +17,7 @@ import unittest import numpy as np from inference_pass_test import InferencePassTest +import paddle import paddle.fluid.core as core import paddle.nn.functional as F import paddle.static.nn as nn @@ -43,7 +44,7 @@ class TRTNearestInterpTest(InferencePassTest): self.origin_shape[1], self.channels, ] - data = fluid.data(name='data', shape=shape, dtype='float32') + data = paddle.static.data(name='data', shape=shape, dtype='float32') resize_out = self.append_nearest_interp(data) out = nn.batch_norm(resize_out, is_test=True) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_pad_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_pad_op.py index 4b7dc7c9cb689899373bb2aaee09100c2c7d9eda..8070f072aee71fc3dcba05f206be1f55225caee5 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_pad_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_pad_op.py @@ -27,7 +27,7 @@ from paddle.fluid.core import AnalysisConfig class PadOpTRTTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[1, 3, 128, 128], dtype="float32" ) pad_out = paddle.nn.functional.pad( diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_pool3d_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_pool3d_op.py index f8abf50dd10ffe46578fd5637d69ab73473ba474..83a80479ed46b22edf392b42cf7d7689dd875fb1 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_pool3d_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_pool3d_op.py @@ -58,7 +58,7 @@ class TensorRTPool3dTest(InferencePassTest): ) with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name='data', shape=[-1, self.channel, self.depth, self.height, self.width], dtype='float32', @@ -190,7 +190,7 @@ class TensorRTAdaptiveAvgPool3DTest(InferencePassTest): ) with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name='data', shape=[-1, self.channel, self.depth, self.height, self.width], dtype='float32', @@ -290,7 +290,7 @@ class TensorRTAdaptiveMaxPool3DTest(InferencePassTest): ) with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name='data', shape=[-1, self.channel, self.depth, self.height, self.width], dtype='float32', diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_pool_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_pool_op.py index e91f183b146dec02b8298a2ec7fa6951341e54ca..5f9f4c2100969b3a70d8bf2931d298daed246dc8 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_pool_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_pool_op.py @@ -59,7 +59,7 @@ class TensorRTPoolTest(InferencePassTest): ) with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name='data', shape=[-1, self.channel, self.height, self.width], dtype='float32', diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_reduce_sum_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_reduce_sum_op.py index cd66cb1e914b86948b809d22ff808c3074bf8c7b..6872542ffd4c4f2440616ae7b0fbaf5f49cd330b 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_reduce_sum_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_reduce_sum_op.py @@ -27,7 +27,7 @@ from paddle.fluid.core import AnalysisConfig, PassVersionChecker class TRTReduceSumTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[-1, 3, 10, 192], dtype="float32" ) reduce_sum = paddle.sum(data, axis=[2, -1], keepdim=True) @@ -60,7 +60,7 @@ class TRTReduceSumTest(InferencePassTest): class TRTReduceSumAllTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[-1, 3, 10, 192], dtype="float32" ) reduce_sum = paddle.sum(data, keepdim=True) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_reshape_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_reshape_op.py index 8edd7cafcbe4d64999c1b12dd6762d786886852b..c0b31088bfc4505550a82dadf8e9042ba9029d2d 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_reshape_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_reshape_op.py @@ -36,7 +36,7 @@ class TRTReshapeTest(InferencePassTest): self.input_shape[2], ] with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name='data', shape=self.data_shape, dtype='float32' ) reshape_out = self.append_reshape(data, self.reshape) @@ -74,7 +74,7 @@ class TRTReshapeTest1(TRTReshapeTest): self.input_shape[2], ] with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name='data', shape=self.data_shape, dtype='float32' ) reshape_out = self.append_reshape(data, self.reshape) @@ -101,7 +101,7 @@ class TRTReshapeTest2(TRTReshapeTest): self.input_shape[2], ] with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name='data', shape=self.data_shape, dtype='float32' ) reshape_out = paddle.reshape(x=data, shape=self.reshape) @@ -128,7 +128,7 @@ class TRTReshapeTest3(TRTReshapeTest): self.input_shape[2], ] with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name='data', shape=self.data_shape, dtype='float32' ) bn_out = nn.batch_norm(data, is_test=True) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_scale_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_scale_op.py index 3bca0dbf18482a64cfdc8c53dd553e2147104815..4ec5295261e79ff2b2c8919f741b6121f7161636 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_scale_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_scale_op.py @@ -27,7 +27,9 @@ from paddle.fluid.core import AnalysisConfig, PassVersionChecker class TRTScaleTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", shape=[-1, 512], dtype="float32") + data = paddle.static.data( + name="data", shape=[-1, 512], dtype="float32" + ) scale_out = self.append_scale(data) out = nn.batch_norm(scale_out, is_test=True) @@ -57,7 +59,7 @@ class TRTScaleTest(InferencePassTest): class TRTScaleShape2Test(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[-1, 512, 512], dtype="float32" ) scale_out = self.append_scale(data) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_shuffle_channel_detect_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_shuffle_channel_detect_pass.py index fc3b066556d6eb33dc006160b98324fb3fc81b2e..ef21aecb34da88c39868b01a9dbe5907b6119525 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_shuffle_channel_detect_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_shuffle_channel_detect_pass.py @@ -26,7 +26,7 @@ from paddle.fluid.core import AnalysisConfig, PassVersionChecker class ShuffleChannelFuseTRTPassTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[-1, 6, 64, 64], dtype="float32" ) reshape1 = paddle.reshape(x=data, shape=[-1, 2, 3, 64, 64]) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_slice_dynamic_plugin.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_slice_dynamic_plugin.py index d3c242c8d838342aa5fd3bd704ccc4243e71fb22..31f294b9cbb6a3da3aba45c478884018ccd9fe18 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_slice_dynamic_plugin.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_slice_dynamic_plugin.py @@ -46,7 +46,9 @@ class SlicePluginTRTDynamicTest(InferencePassTest): self.setUpSliceParams() self.setUpTensorRTParams() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", shape=[3, 3, 3, 3], dtype="float32") + data = paddle.static.data( + name="data", shape=[3, 3, 3, 3], dtype="float32" + ) axes = self.params_axes starts = self.params_starts ends = self.params_ends diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_slice_plugin.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_slice_plugin.py index aeea57e3888413832350b1cb3a3acfc7f5798270..6075add2c93feee08f35959525d8a86ad627144d 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_slice_plugin.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_slice_plugin.py @@ -41,7 +41,9 @@ class SlicePluginTRTTest(InferencePassTest): self.setUpSliceParams() self.setUpTensorRTParams() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", shape=[3, 3, 3, 3], dtype="float32") + data = paddle.static.data( + name="data", shape=[3, 3, 3, 3], dtype="float32" + ) axes = self.params_axes starts = self.params_starts ends = self.params_ends @@ -110,7 +112,9 @@ class SlicePluginTRTTestInt32(SlicePluginTRTTest): self.setUpSliceParams() self.setUpTensorRTParams() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", shape=[3, 3, 3, 3], dtype="int32") + data = paddle.static.data( + name="data", shape=[3, 3, 3, 3], dtype="int32" + ) axes = self.params_axes starts = self.params_starts ends = self.params_ends @@ -135,7 +139,9 @@ class StaticSlicePluginTRTTestInt32(SlicePluginTRTTest): self.setUpSliceParams() self.setUpTensorRTParams() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", shape=[3, 3, 3, 3], dtype="int32") + data = paddle.static.data( + name="data", shape=[3, 3, 3, 3], dtype="int32" + ) axes = self.params_axes starts = self.params_starts ends = self.params_ends diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_subgraph_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_subgraph_pass.py index 55347875152c5c0a97c1ad2642a2d72c63f4eee5..e567a329fbc7df6a5db44ace13dcc3e05ce1bd53 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_subgraph_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_subgraph_pass.py @@ -28,7 +28,7 @@ from paddle.fluid.core import AnalysisConfig, PassVersionChecker class TensorRTSubgraphPassFcTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[-1, 6, 64, 64], dtype="float32" ) fc_out = paddle.static.nn.fc(x=[data], activation=None, size=1000) @@ -55,10 +55,10 @@ class TensorRTSubgraphPassFcTest(InferencePassTest): class TensorRTSubgraphPassConcatTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data1 = fluid.data( + data1 = paddle.static.data( name="data1", shape=[-1, 3, 64, 64], dtype="float32" ) - data2 = fluid.data( + data2 = paddle.static.data( name="data2", shape=[-1, 3, 64, 64], dtype="float32" ) concat_out = paddle.concat([data1, data2], axis=2) @@ -85,7 +85,7 @@ class TensorRTSubgraphPassConcatTest(InferencePassTest): class TensorRTSubgraphPassSplitTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[-1, 3, 64, 64], dtype="float32" ) split_out = paddle.split(data, axis=-1, num_or_sections=2) @@ -111,7 +111,7 @@ class TensorRTSubgraphPassSplitTest(InferencePassTest): class TensorRTSubgraphPassSplitSerializeTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[-1, 3, 64, 64], dtype="float32" ) split_out = paddle.split(data, axis=-1, num_or_sections=2) @@ -139,7 +139,7 @@ class TensorRTSubgraphPassSplitSerializeTest(InferencePassTest): class TensorRTSubgraphPassDynamicSplitFp16SerializeTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[-1, 3, 64, 64], dtype="float32" ) split_out = paddle.split(data, axis=-1, num_or_sections=2) @@ -175,7 +175,7 @@ class TensorRTSubgraphPassDynamicSplitFp16SerializeTest(InferencePassTest): class TensorRTSubgraphPassInstanceNormTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[-1, 3, 64, 64], dtype="float32" ) param_attr = fluid.ParamAttr( @@ -212,7 +212,7 @@ class TensorRTSubgraphPassInstanceNormTest(InferencePassTest): class TensorRTSubgraphPassTransposeTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[-1, 6, 64, 64], dtype="float32" ) transpose_out = self.append_transpose(data) @@ -242,7 +242,7 @@ class TensorRTSubgraphPassLayerNormTest(InferencePassTest): def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[-1, 3, 64, 64], dtype="float32" ) out = paddle.static.nn.layer_norm( @@ -273,7 +273,7 @@ class TensorRTSubgraphPassLayerNormDynamicTest(InferencePassTest): def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[-1, 3, 64, 64], dtype="float32" ) out = paddle.static.nn.layer_norm( @@ -359,10 +359,10 @@ class TensorRTSubgraphPassLayerNormBeginNormAxis3Test( class TensorRTSubgraphPassElementwiseTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data1 = fluid.data( + data1 = paddle.static.data( name="data1", shape=[-1, 3, 64, 64], dtype="float32" ) - data2 = fluid.data( + data2 = paddle.static.data( name="data2", shape=[-1, 3, 64, 64], dtype="float32" ) eltwise_out = self.append_eltwise(data1, data2) @@ -414,10 +414,12 @@ class TensorRTSubgraphPassElementwiseSerializeTest( class TensorRTSubgraphPassElementwiseBroadcastDynamicTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data1 = fluid.data( + data1 = paddle.static.data( name="data1", shape=[-1, 3, 64, 64], dtype="float32" ) - data2 = fluid.data(name="data2", shape=[64, 64], dtype="float32") + data2 = paddle.static.data( + name="data2", shape=[64, 64], dtype="float32" + ) eltwise_out = self.append_eltwise(data1, data2) out = nn.batch_norm(eltwise_out, is_test=True) self.feeds = { diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_tile_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_tile_op.py index 9557f8c71c904fd47917e1b077e170706a098473..ad163c1100edf37ade54a38027a7b03d7542de38 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_tile_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_tile_op.py @@ -26,7 +26,7 @@ from paddle.fluid.core import AnalysisConfig, PassVersionChecker class TRTTileTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[4, 3, 224, 256], dtype="float32" ) tile_out = paddle.tile(x=data, repeat_times=[1, 1, 1, 1]) @@ -53,7 +53,9 @@ class TRTTileTest(InferencePassTest): class TRTTileExpandTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", shape=[1, 1, 1, 1], dtype="float32") + data = paddle.static.data( + name="data", shape=[1, 1, 1, 1], dtype="float32" + ) tile_out = paddle.tile(x=data, repeat_times=[1, 4, 1080, 1920]) out = paddle.static.nn.batch_norm(tile_out, is_test=True) @@ -78,7 +80,9 @@ class TRTTileExpandTest(InferencePassTest): class TRTTileExpandStaticTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", shape=[1, 1, 1, 1], dtype="float32") + data = paddle.static.data( + name="data", shape=[1, 1, 1, 1], dtype="float32" + ) tile_out = paddle.tile(x=data, repeat_times=[1, 4, 1080, 1920]) out = paddle.static.nn.batch_norm(tile_out, is_test=True) @@ -103,7 +107,9 @@ class TRTTileExpandStaticTest(InferencePassTest): class TRTTileExpandHalfTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", shape=[1, 1, 1, 1], dtype="float32") + data = paddle.static.data( + name="data", shape=[1, 1, 1, 1], dtype="float32" + ) tile_out = paddle.tile(x=data, repeat_times=[1, 4, 1080, 1920]) out = paddle.static.nn.batch_norm(tile_out, is_test=True) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_transpose_flatten_concat_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_transpose_flatten_concat_fuse_pass.py index 5d9b36429993a9c6a3d4956554bc03e2dc49fcf7..ad342e6d4796b7da9c5175a649dfdafea9d37bbf 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_transpose_flatten_concat_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_transpose_flatten_concat_fuse_pass.py @@ -26,10 +26,10 @@ from paddle.fluid.core import AnalysisConfig class TransposeFlattenConcatFusePassTRTTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data1 = fluid.data( + data1 = paddle.static.data( name="data1", shape=[8, 32, 128], dtype="float32" ) - data2 = fluid.data( + data2 = paddle.static.data( name="data2", shape=[8, 32, 128], dtype="float32" ) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_tuned_dynamic_shape.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_tuned_dynamic_shape.py index cb587a8a8069938e7573815ea2922df7d05b6a97..c6c90186f2640125943df00d2bdd91f072a6fb07 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_tuned_dynamic_shape.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_tuned_dynamic_shape.py @@ -31,7 +31,7 @@ class TRTTunedDynamicShapeTest(unittest.TestCase): main_program = fluid.Program() startup_program = fluid.Program() with fluid.program_guard(main_program, startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[-1, 6, 64, 64], dtype="float32" ) conv_out = paddle.static.nn.conv2d( diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_yolo_box_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_yolo_box_op.py index a578c5216f3e8abd94996778ab49f7218c573432..f51cfe685dc388201d33863b04e3f4bcf1161fc7 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_yolo_box_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_yolo_box_op.py @@ -27,8 +27,10 @@ class TRTYoloBoxTest(InferencePassTest): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): image_shape = [self.bs, self.channel, self.height, self.width] - image = fluid.data(name='image', shape=image_shape, dtype='float32') - image_size = fluid.data( + image = paddle.static.data( + name='image', shape=image_shape, dtype='float32' + ) + image_size = paddle.static.data( name='image_size', shape=[self.bs, 2], dtype='int32' ) boxes, scores = self.append_yolobox(image, image_size) @@ -79,8 +81,10 @@ class TRTYoloBoxFP16Test(InferencePassTest): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): image_shape = [self.bs, self.channel, self.height, self.width] - image = fluid.data(name='image', shape=image_shape, dtype='float32') - image_size = fluid.data( + image = paddle.static.data( + name='image', shape=image_shape, dtype='float32' + ) + image_size = paddle.static.data( name='image_size', shape=[self.bs, 2], dtype='int32' ) boxes, scores = self.append_yolobox(image, image_size) @@ -129,8 +133,10 @@ class TRTYoloBoxIoUAwareTest(InferencePassTest): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): image_shape = [self.bs, self.channel, self.height, self.width] - image = fluid.data(name='image', shape=image_shape, dtype='float32') - image_size = fluid.data( + image = paddle.static.data( + name='image', shape=image_shape, dtype='float32' + ) + image_size = paddle.static.data( name='image_size', shape=[self.bs, 2], dtype='int32' ) boxes, scores = self.append_yolobox(image, image_size) diff --git a/python/paddle/fluid/tests/unittests/ir/test_ir_fc_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/test_ir_fc_fuse_pass.py index 403729786d418d0df2832fcfa3c5add5558ca3ae..e490f1e23c69f79df72e6b058ca61684878fd447 100644 --- a/python/paddle/fluid/tests/unittests/ir/test_ir_fc_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/test_ir_fc_fuse_pass.py @@ -25,7 +25,7 @@ import paddle.fluid.core as core class FCFusePassTest(PassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[32, 128], dtype="float32", lod_level=0 ) tmp_0 = paddle.static.nn.fc( diff --git a/python/paddle/fluid/tests/unittests/ir/test_ir_fusion_group_pass.py b/python/paddle/fluid/tests/unittests/ir/test_ir_fusion_group_pass.py index f0a121e5fcbeb22adcb79ef83122ec8ae6dc880a..fbac04175f48c1da944a0c2acaf60d69a7ab5d5f 100644 --- a/python/paddle/fluid/tests/unittests/ir/test_ir_fusion_group_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/test_ir_fusion_group_pass.py @@ -27,7 +27,7 @@ class FusionGroupPassTest(PassTest): with fluid.program_guard(self.main_program, self.startup_program): self.feed_vars = self._prepare_feed_vars([32, 128], dtype, 2) self.feed_vars.append( - fluid.data(name="data2", shape=[128, 128], dtype=dtype) + paddle.static.data(name="data2", shape=[128, 128], dtype=dtype) ) # subgraph with only 1 op node @@ -51,7 +51,9 @@ class FusionGroupPassTest(PassTest): def _prepare_feed_vars(self, shape, dtype, num_data, stop_gradient=True): feed_vars = [] for i in range(num_data): - var = fluid.data(name=("data" + str(i)), shape=shape, dtype=dtype) + var = paddle.static.data( + name=("data" + str(i)), shape=shape, dtype=dtype + ) var.stop_gradient = stop_gradient feed_vars.append(var) return feed_vars @@ -108,7 +110,7 @@ class FusionGroupPassInplaceTest(FusionGroupPassTest): with fluid.program_guard(self.main_program, self.startup_program): self.feed_vars = self._prepare_feed_vars([32, 128], dtype, 3) self.feed_vars.append( - fluid.data(name="data3", shape=[128, 32], dtype=dtype) + paddle.static.data(name="data3", shape=[128, 32], dtype=dtype) ) # subgraph with 3 op node @@ -134,7 +136,7 @@ class FusionGroupPassTestCastAndFP16(FusionGroupPassTest): with fluid.program_guard(self.main_program, self.startup_program): self.feed_vars = self._prepare_feed_vars([32, 128], dtype, 2) self.feed_vars.append( - fluid.data(name="data2", shape=[128, 128], dtype=dtype) + paddle.static.data(name="data2", shape=[128, 128], dtype=dtype) ) # subgraph with 2 op nodes @@ -165,7 +167,7 @@ class FusionGroupPassSumTest(FusionGroupPassTest): with fluid.program_guard(self.main_program, self.startup_program): self.feed_vars = self._prepare_feed_vars([32, 128], dtype, 3) self.feed_vars.append( - fluid.data(name="data3", shape=[128, 128], dtype=dtype) + paddle.static.data(name="data3", shape=[128, 128], dtype=dtype) ) # subgraph with 2 op nodes diff --git a/python/paddle/fluid/tests/unittests/ir/test_ir_skip_layernorm_pass.py b/python/paddle/fluid/tests/unittests/ir/test_ir_skip_layernorm_pass.py index c79266711c86faee67fd01b17f2c07b4366b0b3c..e0cb1e759508666500ddc88890879546b15a4aae 100644 --- a/python/paddle/fluid/tests/unittests/ir/test_ir_skip_layernorm_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/test_ir_skip_layernorm_pass.py @@ -25,10 +25,10 @@ class SkipLayerNormFusePassTest(PassTest): def setUp(self): paddle.enable_static() with fluid.program_guard(self.main_program, self.startup_program): - x = fluid.data( + x = paddle.static.data( name="x", shape=[128, 768], dtype="float32", lod_level=0 ) - y = fluid.data( + y = paddle.static.data( name="y", shape=[128, 768], dtype="float32", lod_level=0 ) elementwise_out = paddle.add(x=x, y=y) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_batch_norm_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_batch_norm_op_mlu.py index abd86efcf84d08017172a5af16844b2985c2e379..69143234a9d3a7845825b1c81dd2c2f06ce01fd1 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_batch_norm_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_batch_norm_op_mlu.py @@ -803,7 +803,7 @@ class TestDygraphBatchNormTrainableStats(unittest.TestCase): is_test=is_test, trainable_statistics=trainable_statistics, ) - x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype) + x = paddle.static.data(name='x', shape=x_np.shape, dtype=x_np.dtype) y = bn(x) exe.run(fluid.default_startup_program()) r = exe.run(feed={'x': x_np}, fetch_list=[y])[0] @@ -820,7 +820,7 @@ class TestDygraphBatchNormOpenReserveSpace(unittest.TestCase): with program_guard(Program(), Program()): paddle.enable_static() x = np.random.random(size=(3, 10, 3, 7)).astype('float32') - x = fluid.data(name='x', shape=x.shape, dtype=x.dtype) + x = paddle.static.data(name='x', shape=x.shape, dtype=x.dtype) # Set this FLAG, the BatchNorm API will pass "reserve_space" argument into batch_norm op. os.environ['FLAGS_cudnn_batchnorm_spatial_persistent'] = '1' batch_norm = paddle.nn.BatchNorm(7, data_layout="NHWC") diff --git a/python/paddle/fluid/tests/unittests/mlu/test_batch_norm_op_mlu_v2.py b/python/paddle/fluid/tests/unittests/mlu/test_batch_norm_op_mlu_v2.py index 590ebbf63efa5fd986c93d7ad0cdc0ebf636ab0e..55a45b655178699489cf3fcfed787f8a018c43db 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_batch_norm_op_mlu_v2.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_batch_norm_op_mlu_v2.py @@ -157,7 +157,7 @@ class TestBatchNorm(unittest.TestCase): is_test=is_test, trainable_statistics=trainable_statistics, ) - x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype) + x = paddle.static.data(name='x', shape=x_np.shape, dtype=x_np.dtype) y = bn(x) exe.run(fluid.default_startup_program()) r = exe.run(feed={'x': x_np}, fetch_list=[y])[0] @@ -166,7 +166,7 @@ class TestBatchNorm(unittest.TestCase): def compute_v2(x_np): with program_guard(Program(), Program()): bn = paddle.nn.BatchNorm2D(shape[1]) - x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype) + x = paddle.static.data(name='x', shape=x_np.shape, dtype=x_np.dtype) y = bn(x) exe.run(fluid.default_startup_program()) r = exe.run(feed={'x': x_np}, fetch_list=[y])[0] diff --git a/python/paddle/fluid/tests/unittests/mlu/test_bce_loss_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_bce_loss_mlu.py index 58ccf798136f478784c40a5f3a3b2b14075486f7..de589a22ed0c2245254413b1c75dd5698ba261c5 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_bce_loss_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_bce_loss_mlu.py @@ -30,14 +30,14 @@ def test_static_layer( prog = paddle.static.Program() startup_prog = paddle.static.Program() with paddle.static.program_guard(prog, startup_prog): - input = paddle.fluid.data( + input = paddle.static.data( name='input', shape=input_np.shape, dtype='float32' ) - label = paddle.fluid.data( + label = paddle.static.data( name='label', shape=label_np.shape, dtype='float32' ) if weight_np is not None: - weight = paddle.fluid.data( + weight = paddle.static.data( name='weight', shape=weight_np.shape, dtype='float32' ) bce_loss = paddle.nn.loss.BCELoss( @@ -63,14 +63,14 @@ def test_static_functional( prog = paddle.static.Program() startup_prog = paddle.static.Program() with paddle.static.program_guard(prog, startup_prog): - input = paddle.fluid.data( + input = paddle.static.data( name='input', shape=input_np.shape, dtype='float32' ) - label = paddle.fluid.data( + label = paddle.static.data( name='label', shape=label_np.shape, dtype='float32' ) if weight_np is not None: - weight = paddle.fluid.data( + weight = paddle.static.data( name='weight', shape=weight_np.shape, dtype='float32' ) res = paddle.nn.functional.binary_cross_entropy( diff --git a/python/paddle/fluid/tests/unittests/mlu/test_bce_with_logits_loss_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_bce_with_logits_loss_mlu.py index 0aafe99276fb57d5bdf140677c59f1894f825da5..6e24c065107a227ca48cdf73614e1353c1450591 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_bce_with_logits_loss_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_bce_with_logits_loss_mlu.py @@ -41,10 +41,10 @@ def test_static( prog = paddle.static.Program() startup_prog = paddle.static.Program() with paddle.static.program_guard(prog, startup_prog): - logit = paddle.fluid.data( + logit = paddle.static.data( name='logit', shape=logit_np.shape, dtype='float32' ) - label = paddle.fluid.data( + label = paddle.static.data( name='label', shape=label_np.shape, dtype='float32' ) feed_dict = {"logit": logit_np, "label": label_np} @@ -52,12 +52,12 @@ def test_static( pos_weight = None weight = None if pos_weight_np is not None: - pos_weight = paddle.fluid.data( + pos_weight = paddle.static.data( name='pos_weight', shape=pos_weight_np.shape, dtype='float32' ) feed_dict["pos_weight"] = pos_weight_np if weight_np is not None: - weight = paddle.fluid.data( + weight = paddle.static.data( name='weight', shape=weight_np.shape, dtype='float32' ) feed_dict["weight"] = weight_np diff --git a/python/paddle/fluid/tests/unittests/mlu/test_dropout_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_dropout_op_mlu.py index 57d004541af5e99d38fe190d18d304dcd4e55101..65718123d6da201b244ccb46221416b6779a04dc 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_dropout_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_dropout_op_mlu.py @@ -224,7 +224,7 @@ class TestDropoutAPI(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input = fluid.data(name="input", shape=[40, 40], dtype="float32") + input = paddle.static.data(name="input", shape=[40, 40], dtype="float32") res1 = paddle.nn.functional.dropout( x=input, p=0.0, training=False, mode='upscale_in_train' ) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_elementwise_add_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_elementwise_add_op_mlu.py index ac043fc5f47769ac45689b8d686f4df0895b92c3..fc03a6042f872a8a52738e5d4bf2b2ce5c18ab33 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_elementwise_add_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_elementwise_add_op_mlu.py @@ -402,8 +402,8 @@ class TestAddApi(unittest.TestCase): def test_name(self): with fluid.program_guard(fluid.Program()): - x = fluid.data(name="x", shape=[2, 3], dtype="float32") - y = fluid.data(name='y', shape=[2, 3], dtype='float32') + x = paddle.static.data(name="x", shape=[2, 3], dtype="float32") + y = paddle.static.data(name='y', shape=[2, 3], dtype='float32') y_1 = self._executed_api(x, y, name='add_res') self.assertEqual(('add_res' in y_1.name), True) @@ -417,8 +417,8 @@ class TestAddApi(unittest.TestCase): "y": np.array([1, 5, 2]).astype('float32'), } - x = fluid.data(name="x", shape=[3], dtype='float32') - y = fluid.data(name="y", shape=[3], dtype='float32') + x = paddle.static.data(name="x", shape=[3], dtype='float32') + y = paddle.static.data(name="y", shape=[3], dtype='float32') z = self._executed_api(x, y) place = fluid.MLUPlace(0) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_fill_constant_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_fill_constant_op_mlu.py index ed03ac0d3ad6670a38765e5ece4b970843f6df6c..7b241075e47b169d43570dbc5c2724bbde013250 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_fill_constant_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_fill_constant_op_mlu.py @@ -271,10 +271,10 @@ class TestFillConstantAPI(unittest.TestCase): positive_2_int32 = paddle.tensor.fill_constant([1], "int32", 2) positive_2_int64 = paddle.tensor.fill_constant([1], "int64", 2) - shape_tensor_int32 = fluid.data( + shape_tensor_int32 = paddle.static.data( name="shape_tensor_int32", shape=[2], dtype="int32" ) - shape_tensor_int64 = fluid.data( + shape_tensor_int64 = paddle.static.data( name="shape_tensor_int64", shape=[2], dtype="int64" ) @@ -446,7 +446,7 @@ class TestFillConstantOpError(unittest.TestCase): # The shape dtype of fill_constant_op must be int32 or int64. def test_shape_tensor_dtype(): - shape = fluid.data( + shape = paddle.static.data( name="shape_tensor", shape=[2], dtype="float32" ) paddle.tensor.fill_constant( @@ -456,7 +456,7 @@ class TestFillConstantOpError(unittest.TestCase): self.assertRaises(TypeError, test_shape_tensor_dtype) def test_shape_tensor_list_dtype(): - shape = fluid.data( + shape = paddle.static.data( name="shape_tensor_list", shape=[1], dtype="bool" ) paddle.tensor.fill_constant( diff --git a/python/paddle/fluid/tests/unittests/mlu/test_gather_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_gather_op_mlu.py index d4c5e966570f4f9cb2fc65e1ac557046914a36d2..0ce65e30bcc8b799efbc2c2a2bcee337727cb458 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_gather_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_gather_op_mlu.py @@ -129,10 +129,10 @@ class TestGathertError(unittest.TestCase): ): shape = [8, 9, 6] - x = paddle.fluid.data(shape=shape, dtype='int8', name='x') - axis = paddle.fluid.data(shape=[1], dtype='float32', name='axis') - index = paddle.fluid.data(shape=shape, dtype='int32', name='index') - index_float = paddle.fluid.data( + x = paddle.static.data(shape=shape, dtype='int8', name='x') + axis = paddle.static.data(shape=[1], dtype='float32', name='axis') + index = paddle.static.data(shape=shape, dtype='int32', name='index') + index_float = paddle.static.data( shape=shape, dtype='float32', name='index_float' ) @@ -160,9 +160,9 @@ class TestGathertError(unittest.TestCase): with fluid.program_guard(fluid.Program(), fluid.Program()): shape = [8, 9, 6] - x = fluid.data(shape=shape, dtype='int8', name='x') - index = fluid.data(shape=shape, dtype='int32', name='mask') - index_float = fluid.data( + x = paddle.static.data(shape=shape, dtype='int8', name='x') + index = paddle.static.data(shape=shape, dtype='int32', name='mask') + index_float = paddle.static.data( shape=shape, dtype='float32', name='index_float' ) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_hard_sigmoid_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_hard_sigmoid_op_mlu.py index 6575b0decd4af5cfa5842a1d537c33e44d7b17c2..13046487ba621968a0dbb4672ffda891354d4a35 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_hard_sigmoid_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_hard_sigmoid_op_mlu.py @@ -161,7 +161,7 @@ class TestHardsigmoidAPI(unittest.TestCase): def test_fluid_api(self): paddle.enable_static() with fluid.program_guard(fluid.Program()): - x = fluid.data('X', self.x_np.shape, self.x_np.dtype) + x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) out = paddle.nn.functional.hardsigmoid(x) exe = fluid.Executor(self.place) res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) @@ -179,12 +179,12 @@ class TestHardsigmoidAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.hardsigmoid, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[12, 10], dtype='int32' ) self.assertRaises(TypeError, F.hardsigmoid, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[12, 10], dtype='float16' ) F.hardsigmoid(x_fp16) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_log_softmax_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_log_softmax_op_mlu.py index 82d22af8933034efd15c3acbb285437e9bcc73e6..3a35457a74bf7256b098c60f88bc102fdb0185f6 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_log_softmax_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_log_softmax_op_mlu.py @@ -140,7 +140,7 @@ class TestNNLogSoftmaxAPI(unittest.TestCase): paddle.enable_static() # test static api with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data(name='x', shape=self.x_shape) + x = paddle.static.data(name='x', shape=self.x_shape) y = logsoftmax(x) exe = paddle.static.Executor(self.place) out = exe.run(feed={'x': self.x}, fetch_list=[y]) @@ -174,7 +174,7 @@ class TestNNFunctionalLogSoftmaxAPI(unittest.TestCase): x = x.astype(dtype) ref_out = np.apply_along_axis(ref_log_softmax, axis, x) with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data(name='x', shape=self.x_shape) + x = paddle.static.data(name='x', shape=self.x_shape) y = F.log_softmax(x, axis, dtype) exe = paddle.static.Executor(self.place) out = exe.run(feed={'x': self.x}, fetch_list=[y]) @@ -194,10 +194,10 @@ class TestNNFunctionalLogSoftmaxAPI(unittest.TestCase): def test_errors(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data(name='X1', shape=[100], dtype='int32') + x = paddle.static.data(name='X1', shape=[100], dtype='int32') self.assertRaises(TypeError, F.log_softmax, x) - x = paddle.fluid.data(name='X2', shape=[100], dtype='float32') + x = paddle.static.data(name='X2', shape=[100], dtype='float32') self.assertRaises(TypeError, F.log_softmax, x, dtype='int32') paddle.disable_static() diff --git a/python/paddle/fluid/tests/unittests/mlu/test_masked_select_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_masked_select_op_mlu.py index 11e9dc86cd47016915c15c995746590d236da4b6..9c07c4984e4976c35c93461bbfd83353bd1896db 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_masked_select_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_masked_select_op_mlu.py @@ -107,8 +107,8 @@ class TestMaskedSelectAPI(unittest.TestCase): def test_static_mode(self): shape = [8, 9, 6] - x = paddle.fluid.data(shape=shape, dtype='float32', name='x') - mask = paddle.fluid.data(shape=shape, dtype='bool', name='mask') + x = paddle.static.data(shape=shape, dtype='float32', name='x') + mask = paddle.static.data(shape=shape, dtype='bool', name='mask') np_x = np.random.random(shape).astype('float32') np_mask = np.array(np.random.randint(2, size=shape, dtype=bool)) @@ -132,9 +132,9 @@ class TestMaskedSelectError(unittest.TestCase): ): shape = [8, 9, 6] - x = paddle.fluid.data(shape=shape, dtype='float32', name='x') - mask = paddle.fluid.data(shape=shape, dtype='bool', name='mask') - mask_float = paddle.fluid.data( + x = paddle.static.data(shape=shape, dtype='float32', name='x') + mask = paddle.static.data(shape=shape, dtype='bool', name='mask') + mask_float = paddle.static.data( shape=shape, dtype='float32', name='mask_float' ) np_x = np.random.random(shape).astype('float32') diff --git a/python/paddle/fluid/tests/unittests/mlu/test_matmul_v2_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_matmul_v2_op_mlu.py index f1b142902b10434dbac557eb4ebdcf5f264775a7..4c866e3f4299cafdb8f057a34d41a851f0384342 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_matmul_v2_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_matmul_v2_op_mlu.py @@ -350,8 +350,8 @@ class TestMatMulV2API(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input_x = fluid.data(name="input_x", shape=[4, 3], dtype="float32") - input_y = fluid.data(name="input_y", shape=[3, 4], dtype="float32") + input_x = paddle.static.data(name="input_x", shape=[4, 3], dtype="float32") + input_y = paddle.static.data(name="input_y", shape=[3, 4], dtype="float32") result = paddle.matmul(input_x, input_y) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_meshgrid_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_meshgrid_op_mlu.py index ecfb94e335b833e7d34e14de2ed84c170cacca1d..e89c46041bbde6ca67ac9f3089aed48baa01e583 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_meshgrid_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_meshgrid_op_mlu.py @@ -68,8 +68,8 @@ class TestMeshgridOp2(TestMeshgridOp): class TestMeshgridOp3(unittest.TestCase): def test_api(self): - x = fluid.data(shape=[100], dtype='int32', name='x') - y = fluid.data(shape=[200], dtype='int32', name='y') + x = paddle.static.data(shape=[100], dtype='int32', name='x') + y = paddle.static.data(shape=[200], dtype='int32', name='y') input_1 = np.random.randint( 0, @@ -104,8 +104,8 @@ class TestMeshgridOp3(unittest.TestCase): class TestMeshgridOp4(unittest.TestCase): def test_list_input(self): - x = fluid.data(shape=[100], dtype='int32', name='x') - y = fluid.data(shape=[200], dtype='int32', name='y') + x = paddle.static.data(shape=[100], dtype='int32', name='x') + y = paddle.static.data(shape=[200], dtype='int32', name='y') input_1 = np.random.randint( 0, @@ -141,8 +141,8 @@ class TestMeshgridOp4(unittest.TestCase): class TestMeshgridOp5(unittest.TestCase): def test_tuple_input(self): - x = fluid.data(shape=[100], dtype='int32', name='x') - y = fluid.data(shape=[200], dtype='int32', name='y') + x = paddle.static.data(shape=[100], dtype='int32', name='x') + y = paddle.static.data(shape=[200], dtype='int32', name='y') input_1 = np.random.randint( 0, diff --git a/python/paddle/fluid/tests/unittests/mlu/test_scatter_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_scatter_op_mlu.py index cb13f305caebaa7787161f2849d9f0b988278962..fcf0c399fe9e2872e5ee332ed249b15f97d324c6 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_scatter_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_scatter_op_mlu.py @@ -127,9 +127,9 @@ class TestScatterAPI(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input = fluid.data(name="input", shape=[3, 2], dtype="float32") - index = fluid.data(name="index", shape=[4], dtype="int64") - updates = fluid.data(name="updates", shape=[4, 2], dtype="float32") + input = paddle.static.data(name="input", shape=[3, 2], dtype="float32") + index = paddle.static.data(name="index", shape=[4], dtype="int64") + updates = paddle.static.data(name="updates", shape=[4, 2], dtype="float32") result = self.scatter(input, index, updates, False) input_data = np.array([[1, 1], [2, 2], [3, 3]]).astype(np.float32) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_size_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_size_op_mlu.py index a0cd8eba6dc5b1c592955b0e64fd3fa84d6b387e..bd5fe04a620fe0b533c706c02de0cd49f526d63d 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_size_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_size_op_mlu.py @@ -67,8 +67,8 @@ class TestSizeAPI(unittest.TestCase): with fluid.program_guard(main_program, startup_program): shape1 = [2, 1, 4, 5] shape2 = [1, 4, 5] - x_1 = paddle.fluid.data(shape=shape1, dtype='int32', name='x_1') - x_2 = paddle.fluid.data(shape=shape2, dtype='int32', name='x_2') + x_1 = paddle.static.data(shape=shape1, dtype='int32', name='x_1') + x_2 = paddle.static.data(shape=shape2, dtype='int32', name='x_2') input_1 = np.random.random(shape1).astype("int32") input_2 = np.random.random(shape2).astype("int32") out_1 = paddle.numel(x_1) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_softmax_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_softmax_op_mlu.py index 8c1ebbe01e5cffd83edf7f5b1ffbbcbc92abba61..0fb29029ede1cea9e9041785a93b0d76457b3c28 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_softmax_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_softmax_op_mlu.py @@ -132,7 +132,7 @@ class TestSoftmaxAPI(unittest.TestCase): def test_static_check(self): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.x_np.shape, 'float32') + x = paddle.static.data('X', self.x_np.shape, 'float32') out1 = self.softmax(x) m = paddle.nn.Softmax() out2 = m(x) @@ -173,12 +173,12 @@ class TestSoftmaxAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, self.softmax, 1) # The input dtype must be float16, float32 - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[2, 3], dtype='int32' ) self.assertRaises(TypeError, self.softmax, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[2, 3], dtype='float16' ) self.softmax(x_fp16) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_transpose_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_transpose_op_mlu.py index 60cd1e27c70ba36fd95800ffe68ba390cfacebcf..75dc659d0b3bc860d74b3830272785eeda01a53f 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_transpose_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_transpose_op_mlu.py @@ -256,7 +256,7 @@ class TestTransposeApi(unittest.TestCase): class TestTAPI(unittest.TestCase): def test_out(self): with fluid.program_guard(fluid.Program()): - data = fluid.data(shape=[10], dtype="float32", name="data") + data = paddle.static.data(shape=[10], dtype="float32", name="data") data_t = paddle.t(data) place = fluid.MLUPlace(0) exe = fluid.Executor(place) @@ -266,7 +266,7 @@ class TestTAPI(unittest.TestCase): self.assertEqual((result == expected_result).all(), True) with fluid.program_guard(fluid.Program()): - data = fluid.data(shape=[10, 5], dtype="float32", name="data") + data = paddle.static.data(shape=[10, 5], dtype="float32", name="data") data_t = paddle.t(data) place = fluid.MLUPlace(0) exe = fluid.Executor(place) @@ -276,7 +276,7 @@ class TestTAPI(unittest.TestCase): self.assertEqual((result == expected_result).all(), True) with fluid.program_guard(fluid.Program()): - data = fluid.data(shape=[1, 5], dtype="float32", name="data") + data = paddle.static.data(shape=[1, 5], dtype="float32", name="data") data_t = paddle.t(data) place = fluid.MLUPlace(0) exe = fluid.Executor(place) @@ -311,7 +311,7 @@ class TestTAPI(unittest.TestCase): def test_errors(self): with fluid.program_guard(fluid.Program()): - x = fluid.data(name='x', shape=[10, 5, 3], dtype='float32') + x = paddle.static.data(name='x', shape=[10, 5, 3], dtype='float32') def test_x_dimension_check(): paddle.t(x) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_tril_triu_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_tril_triu_op_mlu.py index 0f5b98c9bc959ca8a89e21170b132eb990379a9b..66271aa671551b2ffa3693b32e35c4152f5525e4 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_tril_triu_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_tril_triu_op_mlu.py @@ -81,7 +81,7 @@ def case_generator(op_type, Xshape, diagonal, expected): def test_failure(self): paddle.enable_static() - data = fluid.data(shape=Xshape, dtype='float64', name=cls_name) + data = paddle.static.data(shape=Xshape, dtype='float64', name=cls_name) with self.assertRaisesRegex( eval(expected.split(':')[-1]), errmsg[expected] ): @@ -146,7 +146,7 @@ class TestTrilTriuOpAPI(unittest.TestCase): startup_prog = Program() with program_guard(prog, startup_prog): data = np.random.random([1, 9, 9, 4]).astype(dtype) - x = fluid.data(shape=[1, 9, -1, 4], dtype=dtype, name='x') + x = paddle.static.data(shape=[1, 9, -1, 4], dtype=dtype, name='x') tril_out, triu_out = tensor.tril(x), tensor.triu(x) place = fluid.MLUPlace(0) @@ -183,7 +183,7 @@ class TestTrilTriuOpAPI(unittest.TestCase): startup_prog = Program() with program_guard(prog, startup_prog): data = np.random.random([1, 9, 9, 4]).astype(dtype) - x = fluid.data(shape=[1, 9, -1, 4], dtype=dtype, name='x') + x = paddle.static.data(shape=[1, 9, -1, 4], dtype=dtype, name='x') triu_out = paddle.triu(x) place = fluid.MLUPlace(0) diff --git a/python/paddle/fluid/tests/unittests/npu/test_batch_norm_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_batch_norm_op_npu.py index 353fd250a5e1b7caba84d9e01662f16d4c90fe61..08944b97e75d2d5cac9566be3e386b65945e5517 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_batch_norm_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_batch_norm_op_npu.py @@ -588,7 +588,7 @@ class TestDygraphBatchNormTrainableStats(unittest.TestCase): is_test=is_test, trainable_statistics=trainable_statistics, ) - x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype) + x = paddle.static.data(name='x', shape=x_np.shape, dtype=x_np.dtype) y = bn(x) exe.run(fluid.default_startup_program()) r = exe.run(feed={'x': x_np}, fetch_list=[y])[0] diff --git a/python/paddle/fluid/tests/unittests/npu/test_bce_loss_npu.py b/python/paddle/fluid/tests/unittests/npu/test_bce_loss_npu.py index d9e968964afb65c01042cea5c85f8d2bf78215b9..da7df48ecdfd32ec8f4ed1cd6c8ef73ce66b4cb5 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_bce_loss_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_bce_loss_npu.py @@ -30,14 +30,14 @@ def test_static_layer( prog = paddle.static.Program() startup_prog = paddle.static.Program() with paddle.static.program_guard(prog, startup_prog): - input = paddle.fluid.data( + input = paddle.static.data( name='input', shape=input_np.shape, dtype='float32' ) - label = paddle.fluid.data( + label = paddle.static.data( name='label', shape=label_np.shape, dtype='float32' ) if weight_np is not None: - weight = paddle.fluid.data( + weight = paddle.static.data( name='weight', shape=weight_np.shape, dtype='float32' ) bce_loss = paddle.nn.loss.BCELoss( @@ -63,14 +63,14 @@ def test_static_functional( prog = paddle.static.Program() startup_prog = paddle.static.Program() with paddle.static.program_guard(prog, startup_prog): - input = paddle.fluid.data( + input = paddle.static.data( name='input', shape=input_np.shape, dtype='float32' ) - label = paddle.fluid.data( + label = paddle.static.data( name='label', shape=label_np.shape, dtype='float32' ) if weight_np is not None: - weight = paddle.fluid.data( + weight = paddle.static.data( name='weight', shape=weight_np.shape, dtype='float32' ) res = paddle.nn.functional.binary_cross_entropy( diff --git a/python/paddle/fluid/tests/unittests/npu/test_clip_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_clip_op_npu.py index 37dcf8465bc2193367810d3aec791a22657fb138..9b5d14e0ff2f889d7690735bbe705781c4007d95 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_clip_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_clip_op_npu.py @@ -137,9 +137,9 @@ class TestClipAPI(unittest.TestCase): paddle.enable_static() data_shape = [1, 9, 9, 4] data = np.random.random(data_shape).astype('float32') - images = fluid.data(name='image', shape=data_shape, dtype='float32') - min = fluid.data(name='min', shape=[1], dtype='float32') - max = fluid.data(name='max', shape=[1], dtype='float32') + images = paddle.static.data(name='image', shape=data_shape, dtype='float32') + min = paddle.static.data(name='min', shape=[1], dtype='float32') + max = paddle.static.data(name='max', shape=[1], dtype='float32') place = ( fluid.NPUPlace(0) @@ -203,8 +203,8 @@ class TestClipAPI(unittest.TestCase): def test_errors(self): paddle.enable_static() - x1 = fluid.data(name='x1', shape=[1], dtype="int16") - x2 = fluid.data(name='x2', shape=[1], dtype="int8") + x1 = paddle.static.data(name='x1', shape=[1], dtype="int16") + x2 = paddle.static.data(name='x2', shape=[1], dtype="int8") self.assertRaises(TypeError, paddle.clip, x=x1, min=0.2, max=0.8) self.assertRaises(TypeError, paddle.clip, x=x2, min=0.2, max=0.8) paddle.disable_static() diff --git a/python/paddle/fluid/tests/unittests/npu/test_dropout_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_dropout_op_npu.py index 91b8508646788418a321f1313d17f3d76406bc93..6ed96bcfbff0e1ed5420700928154f2b21e3d594 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_dropout_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_dropout_op_npu.py @@ -215,7 +215,7 @@ class TestDropoutAPI(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input = fluid.data(name="input", shape=[40, 40], dtype="float32") + input = paddle.static.data(name="input", shape=[40, 40], dtype="float32") res1 = paddle.nn.functional.dropout( x=input, p=0.0, training=False, mode='upscale_in_train' ) diff --git a/python/paddle/fluid/tests/unittests/npu/test_elementwise_add_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_elementwise_add_op_npu.py index 2e35a398bb7f0412c0741a3399ecdd1c86e34613..f009b43bf5f18b15440f8b84d9cd5643ab527cc7 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_elementwise_add_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_elementwise_add_op_npu.py @@ -510,8 +510,8 @@ class TestAddApi(unittest.TestCase): def test_name(self): with fluid.program_guard(fluid.Program()): - x = fluid.data(name="x", shape=[2, 3], dtype="float32") - y = fluid.data(name='y', shape=[2, 3], dtype='float32') + x = paddle.static.data(name="x", shape=[2, 3], dtype="float32") + y = paddle.static.data(name='y', shape=[2, 3], dtype='float32') y_1 = self._executed_api(x, y, name='add_res') self.assertEqual(('add_res' in y_1.name), True) @@ -525,8 +525,8 @@ class TestAddApi(unittest.TestCase): "y": np.array([1, 5, 2]).astype('float32'), } - x = fluid.data(name="x", shape=[3], dtype='float32') - y = fluid.data(name="y", shape=[3], dtype='float32') + x = paddle.static.data(name="x", shape=[3], dtype='float32') + y = paddle.static.data(name="y", shape=[3], dtype='float32') z = self._executed_api(x, y) place = fluid.NPUPlace(0) diff --git a/python/paddle/fluid/tests/unittests/npu/test_elementwise_mod_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_elementwise_mod_op_npu.py index efb81fbad6ec0cb86e136f11e81d834635433dab..46eb7f90825b042e948cee9665c061a57f6e2187 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_elementwise_mod_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_elementwise_mod_op_npu.py @@ -144,8 +144,8 @@ class TestRemainderOp(unittest.TestCase): def test_name(self): paddle.set_device('npu:0') with fluid.program_guard(fluid.Program()): - x = fluid.data(name="x", shape=[2, 3], dtype="int64") - y = fluid.data(name='y', shape=[2, 3], dtype='int64') + x = paddle.static.data(name="x", shape=[2, 3], dtype="int64") + y = paddle.static.data(name='y', shape=[2, 3], dtype='int64') y_1 = paddle.remainder(x, y, name='div_res') self.assertEqual(('div_res' in y_1.name), True) diff --git a/python/paddle/fluid/tests/unittests/npu/test_gather_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_gather_op_npu.py index 3ce9042d75fbda9a7a8b521d076efb0250ccb78f..e2a5229f18a4c2ada33381f316b12e48a5c9062b 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_gather_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_gather_op_npu.py @@ -101,8 +101,8 @@ class API_TestGather(unittest.TestCase): with paddle.static.program_guard( paddle.static.Program(), paddle.static.Program() ): - x = paddle.fluid.data('x', shape=[-1, 2], dtype='float32') - index = paddle.fluid.data('index', shape=[-1, 1], dtype='int32') + x = paddle.static.data('x', shape=[-1, 2], dtype='float32') + index = paddle.static.data('index', shape=[-1, 1], dtype='int32') out = paddle.gather(x, index) place = paddle.NPUPlace(0) exe = paddle.static.Executor(place) diff --git a/python/paddle/fluid/tests/unittests/npu/test_group_norm_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_group_norm_op_npu.py index 56430ee7c13aacec87d0acc2731b5f16c53fcd0a..113b5e1e6a36841c31f06b3ad13cade6407040c7 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_group_norm_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_group_norm_op_npu.py @@ -216,7 +216,7 @@ class TestGroupNormOpFP16_With_NHWC(TestGroupNormOp): class TestGroupNormException(unittest.TestCase): # data_layout is not NHWC or NCHW def test_exception(self): - data = fluid.data(name='data', shape=[None, 3, 3, 4], dtype="float64") + data = paddle.static.data(name='data', shape=[None, 3, 3, 4], dtype="float64") def attr_data_format(): out = paddle.static.nn.group_norm( diff --git a/python/paddle/fluid/tests/unittests/npu/test_hard_sigmoid_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_hard_sigmoid_op_npu.py index 55dc1e0a1102b800c792349e00b123c61464ddc1..23c8fde44951ec95cd8094e5858de1fbc6749bc3 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_hard_sigmoid_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_hard_sigmoid_op_npu.py @@ -122,7 +122,7 @@ class TestHardsigmoidAPI(unittest.TestCase): def test_fluid_api(self): with fluid.program_guard(fluid.Program()): - x = fluid.data('X', self.x_np.shape, self.x_np.dtype) + x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) out = paddle.nn.functional.hardsigmoid(x) exe = fluid.Executor(self.place) res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) @@ -140,12 +140,12 @@ class TestHardsigmoidAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.hardsigmoid, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[12, 10], dtype='int32' ) self.assertRaises(TypeError, F.hardsigmoid, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[12, 10], dtype='float16' ) F.hardsigmoid(x_fp16) diff --git a/python/paddle/fluid/tests/unittests/npu/test_index_sample_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_index_sample_op_npu.py index 5883ef7b567017815ecfe063203a0c479f2014ce..6a27df3ad6e1bf0ff3ca068a86406a72f27bcd59 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_index_sample_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_index_sample_op_npu.py @@ -160,8 +160,8 @@ class TestIndexSampleShape(unittest.TestCase): low=0, high=x_shape[1], size=index_shape ).astype(index_type) - x = fluid.data(name='x', shape=[-1, 5], dtype='float32') - index = fluid.data(name='index', shape=[-1, 3], dtype='int32') + x = paddle.static.data(name='x', shape=[-1, 5], dtype='float32') + index = paddle.static.data(name='index', shape=[-1, 3], dtype='int32') output = paddle.index_sample(x=x, index=index) place = fluid.NPUPlace(0) diff --git a/python/paddle/fluid/tests/unittests/npu/test_instance_norm_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_instance_norm_op_npu.py index 65174d8caf5e3f9cbb4f9f98b4da83198389c66c..ce74c0d0df90cd41e386b26045a3e05cfd53a3e2 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_instance_norm_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_instance_norm_op_npu.py @@ -61,7 +61,7 @@ class TestInstanceNorm(unittest.TestCase): def compute_v1(x_np): with program_guard(Program(), Program()): ins = paddle.nn.InstanceNorm(shape[1]) - x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype) + x = paddle.static.data(name='x', shape=x_np.shape, dtype=x_np.dtype) y = ins(x) exe.run(fluid.default_startup_program()) r = exe.run(feed={'x': x_np}, fetch_list=[y])[0] @@ -70,7 +70,7 @@ class TestInstanceNorm(unittest.TestCase): def compute_v2(x_np): with program_guard(Program(), Program()): ins = paddle.nn.InstanceNorm2D(shape[1]) - x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype) + x = paddle.static.data(name='x', shape=x_np.shape, dtype=x_np.dtype) y = ins(x) exe.run(fluid.default_startup_program()) r = exe.run(feed={'x': x_np}, fetch_list=[y])[0] diff --git a/python/paddle/fluid/tests/unittests/npu/test_kldiv_loss_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_kldiv_loss_op_npu.py index 3a55d9973af9733122c1827d6696a55e905bfacf..160dfdff1784f0ff98215be13ff2a59955204bf5 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_kldiv_loss_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_kldiv_loss_op_npu.py @@ -137,8 +137,8 @@ class TestKLDivLossDygraph(unittest.TestCase): self.run_kl_loss('none') def test_kl_loss_static_api(self): - input = paddle.fluid.data(name='input', shape=[5, 20]) - label = paddle.fluid.data(name='label', shape=[5, 20]) + input = paddle.static.data(name='input', shape=[5, 20]) + label = paddle.static.data(name='label', shape=[5, 20]) pred_loss = paddle.nn.functional.kl_div(input, label) diff --git a/python/paddle/fluid/tests/unittests/npu/test_log_softmax_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_log_softmax_op_npu.py index fc0c428983fa9e89206750af7de533d1d3e34796..41b6d45620e3e46eaafe6e369e68c5f2362f33c5 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_log_softmax_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_log_softmax_op_npu.py @@ -124,7 +124,7 @@ class TestNNLogSoftmaxAPI(unittest.TestCase): logsoftmax = paddle.nn.LogSoftmax(axis) # test static api with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data(name='x', shape=self.x_shape) + x = paddle.static.data(name='x', shape=self.x_shape) y = logsoftmax(x) exe = paddle.static.Executor(self.place) out = exe.run(feed={'x': self.x}, fetch_list=[y]) @@ -158,7 +158,7 @@ class TestNNFunctionalLogSoftmaxAPI(unittest.TestCase): x = x.astype(dtype) ref_out = np.apply_along_axis(ref_log_softmax, axis, x) with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data(name='x', shape=self.x_shape) + x = paddle.static.data(name='x', shape=self.x_shape) y = F.log_softmax(x, axis, dtype) exe = paddle.static.Executor(self.place) out = exe.run(feed={'x': self.x}, fetch_list=[y]) diff --git a/python/paddle/fluid/tests/unittests/npu/test_masked_select_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_masked_select_op_npu.py index 8943b5ba95f428aa99d2a8cb205f824b78688862..379b13721f7f26783a57137b9711ad507e735d74 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_masked_select_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_masked_select_op_npu.py @@ -116,8 +116,8 @@ class TestMaskedSelectAPI(unittest.TestCase): def test_static_mode(self): shape = [8, 9, 6] - x = paddle.fluid.data(shape=shape, dtype='float32', name='x') - mask = paddle.fluid.data(shape=shape, dtype='bool', name='mask') + x = paddle.static.data(shape=shape, dtype='float32', name='x') + mask = paddle.static.data(shape=shape, dtype='bool', name='mask') np_x = np.random.random(shape).astype('float32') np_mask = np.array(np.random.randint(2, size=shape, dtype=bool)) @@ -141,9 +141,9 @@ class TestMaskedSelectError(unittest.TestCase): ): shape = [8, 9, 6] - x = paddle.fluid.data(shape=shape, dtype='float32', name='x') - mask = paddle.fluid.data(shape=shape, dtype='bool', name='mask') - mask_float = paddle.fluid.data( + x = paddle.static.data(shape=shape, dtype='float32', name='x') + mask = paddle.static.data(shape=shape, dtype='bool', name='mask') + mask_float = paddle.static.data( shape=shape, dtype='float32', name='mask_float' ) np_x = np.random.random(shape).astype('float32') diff --git a/python/paddle/fluid/tests/unittests/npu/test_matmulv2_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_matmulv2_op_npu.py index 91883824cf5b68e88ceef03e193cb65bfaf7ed35..06b5ff54ccecd1d271e8124f3d52b5c73764bcb7 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_matmulv2_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_matmulv2_op_npu.py @@ -337,8 +337,8 @@ class TestMatMulV2API(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input_x = fluid.data(name="input_x", shape=[4, 3], dtype="float32") - input_y = fluid.data(name="input_y", shape=[3, 4], dtype="float32") + input_x = paddle.static.data(name="input_x", shape=[4, 3], dtype="float32") + input_y = paddle.static.data(name="input_y", shape=[3, 4], dtype="float32") result = paddle.matmul(input_x, input_y) diff --git a/python/paddle/fluid/tests/unittests/npu/test_meshgrid_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_meshgrid_op_npu.py index 8af8f899244586c216648df481002fd39243ca70..c153dece3c72bb1ac8caf154c3ee1b9c1d138425 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_meshgrid_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_meshgrid_op_npu.py @@ -92,8 +92,8 @@ class TestMeshgridOp2(TestMeshgridOp): class TestMeshgridOp3(unittest.TestCase): def test_api(self): - x = fluid.data(shape=[100], dtype='int32', name='x') - y = fluid.data(shape=[200], dtype='int32', name='y') + x = paddle.static.data(shape=[100], dtype='int32', name='x') + y = paddle.static.data(shape=[200], dtype='int32', name='y') input_1 = np.random.randint( 0, @@ -129,8 +129,8 @@ class TestMeshgridOp3(unittest.TestCase): class TestMeshgridOp4(unittest.TestCase): def test_list_input(self): - x = fluid.data(shape=[100], dtype='int32', name='x') - y = fluid.data(shape=[200], dtype='int32', name='y') + x = paddle.static.data(shape=[100], dtype='int32', name='x') + y = paddle.static.data(shape=[200], dtype='int32', name='y') input_1 = np.random.randint( 0, @@ -166,8 +166,8 @@ class TestMeshgridOp4(unittest.TestCase): class TestMeshgridOp5(unittest.TestCase): def test_tuple_input(self): - x = fluid.data(shape=[100], dtype='int32', name='x') - y = fluid.data(shape=[200], dtype='int32', name='y') + x = paddle.static.data(shape=[100], dtype='int32', name='x') + y = paddle.static.data(shape=[200], dtype='int32', name='y') input_1 = np.random.randint( 0, diff --git a/python/paddle/fluid/tests/unittests/npu/test_multinomial_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_multinomial_op_npu.py index ccff4ffd0cfb5d5e7a534d07d334e184ecdf7bc8..f8b55aedd5c7b2332aef70a74f2c8fc4c790c003 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_multinomial_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_multinomial_op_npu.py @@ -163,7 +163,7 @@ class TestMultinomialApi(unittest.TestCase): startup_program = fluid.Program() train_program = fluid.Program() with fluid.program_guard(train_program, startup_program): - x = fluid.data('x', shape=[4], dtype='float32') + x = paddle.static.data('x', shape=[4], dtype='float32') out = paddle.multinomial(x, num_samples=100000, replacement=True) place = fluid.NPUPlace(0) diff --git a/python/paddle/fluid/tests/unittests/npu/test_norm_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_norm_op_npu.py index ca6979a7615e238503b81bffd220e59dd529f4f4..c0894edc51afa3d57d5863ec8ba4046238607a6b 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_norm_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_norm_op_npu.py @@ -106,7 +106,7 @@ class API_NormTest(unittest.TestCase): with fluid.program_guard(fluid.Program()): def test_norm_x_type(): - data = fluid.data(name="x", shape=[3, 3], dtype="float64") + data = paddle.static.data(name="x", shape=[3, 3], dtype="float64") out = paddle.nn.functional.normalize(data) self.assertRaises(TypeError, test_norm_x_type) diff --git a/python/paddle/fluid/tests/unittests/npu/test_pad3d_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_pad3d_op_npu.py index 5d0d25e1f2081b7c9b96a091265bfb1378c849ec..d8f68c4df2c3a28a64dcef7fbc01f7c8943d181d 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_pad3d_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_pad3d_op_npu.py @@ -176,7 +176,7 @@ class TestPadAPI(unittest.TestCase): mode = "constant" value = 0 input_data = np.random.rand(*input_shape).astype(np.float32) - x = paddle.fluid.data(name="x", shape=input_shape) + x = paddle.static.data(name="x", shape=input_shape) result1 = F.pad( x=x, pad=pad, value=value, mode=mode, data_format="NCDHW" ) @@ -454,7 +454,7 @@ class TestPad3dOpNpuError(unittest.TestCase): def test_value(): input_shape = (1, 2, 3, 4, 5) data = np.random.rand(*input_shape).astype(np.float32) - x = paddle.fluid.data(name="x", shape=input_shape) + x = paddle.static.data(name="x", shape=input_shape) y = F.pad(x, pad=[1, 1, 1, 1, 1, 1], value=1, mode='constant') place = paddle.NPUPlace() exe = Executor(place) @@ -463,7 +463,7 @@ class TestPad3dOpNpuError(unittest.TestCase): def test_mode_1(): input_shape = (1, 2, 3, 4, 5) data = np.random.rand(*input_shape).astype(np.float32) - x = paddle.fluid.data(name="x", shape=input_shape) + x = paddle.static.data(name="x", shape=input_shape) y = F.pad(x, pad=[1, 1, 1, 1, 1, 1], mode='reflect') place = paddle.NPUPlace() exe = Executor(place) @@ -472,7 +472,7 @@ class TestPad3dOpNpuError(unittest.TestCase): def test_mode_2(): input_shape = (1, 2, 3, 4, 5) data = np.random.rand(*input_shape).astype(np.float32) - x = paddle.fluid.data(name="x", shape=input_shape) + x = paddle.static.data(name="x", shape=input_shape) y = F.pad(x, pad=[1, 1, 1, 1, 1, 1], mode='replicate') place = paddle.NPUPlace() exe = Executor(place) @@ -481,7 +481,7 @@ class TestPad3dOpNpuError(unittest.TestCase): def test_mode_3(): input_shape = (1, 2, 3, 4, 5) data = np.random.rand(*input_shape).astype(np.float32) - x = paddle.fluid.data(name="x", shape=input_shape) + x = paddle.static.data(name="x", shape=input_shape) y = F.pad(x, pad=[1, 1, 1, 1, 1, 1], mode='circular') place = paddle.CPUPlace() exe = Executor(place) diff --git a/python/paddle/fluid/tests/unittests/npu/test_pad_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_pad_op_npu.py index c02d6012e413f16ab9797490b26d53093f2b3003..a6bbaf6f8de25a985c463ba91d6abafe860b8061 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_pad_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_pad_op_npu.py @@ -122,7 +122,7 @@ class TestPadOpError(unittest.TestCase): self.assertRaises(TypeError, test_Variable) - data = fluid.data(name='data', shape=[4], dtype='float16') + data = paddle.static.data(name='data', shape=[4], dtype='float16') paddle.nn.functional.pad(x=data, pad=[0, 1]) diff --git a/python/paddle/fluid/tests/unittests/npu/test_run_program_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_run_program_op_npu.py index 36bf75fafed8974812545fa74df83c79c679c925..88302c427f762675d3a1ed02dab7c2f045d10e49 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_run_program_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_run_program_op_npu.py @@ -289,7 +289,7 @@ class TestRunProgramOpWithFC(RunProgramNPUOpTest): def build_model(self): # 1. simple model - img = fluid.data( + img = paddle.static.data( name=self.input_names['X'][0], shape=[None, 1, 28, 28], dtype='float32', diff --git a/python/paddle/fluid/tests/unittests/npu/test_size_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_size_op_npu.py index 0b1d87b3dfaf26d2b3c582c9cdda099dd6dd8b57..c955c8281744b820bc7b754609763eece507d560 100755 --- a/python/paddle/fluid/tests/unittests/npu/test_size_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_size_op_npu.py @@ -96,8 +96,8 @@ class TestSizeAPI(unittest.TestCase): with fluid.program_guard(main_program, startup_program): shape1 = [2, 1, 4, 5] shape2 = [1, 4, 5] - x_1 = paddle.fluid.data(shape=shape1, dtype='int32', name='x_1') - x_2 = paddle.fluid.data(shape=shape2, dtype='int32', name='x_2') + x_1 = paddle.static.data(shape=shape1, dtype='int32', name='x_1') + x_2 = paddle.static.data(shape=shape2, dtype='int32', name='x_2') input_1 = np.random.random(shape1).astype("int32") input_2 = np.random.random(shape2).astype("int32") out_1 = paddle.numel(x_1) diff --git a/python/paddle/fluid/tests/unittests/npu/test_take_along_axis_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_take_along_axis_op_npu.py index f2bb48bad7e3cb870536a1d4d075060db601dbb2..853ccbc21016f73e2ff1d305f074d0f2468314b4 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_take_along_axis_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_take_along_axis_op_npu.py @@ -89,8 +89,8 @@ class TestTakeAlongAxisAPI(unittest.TestCase): def test_api_static(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.shape) - index = paddle.fluid.data('Index', self.index_shape, "int64") + x = paddle.static.data('X', self.shape) + index = paddle.static.data('Index', self.index_shape, "int64") out = paddle.take_along_axis(x, index, self.axis) exe = paddle.static.Executor(self.place) res = exe.run( diff --git a/python/paddle/fluid/tests/unittests/npu/test_tril_triu_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_tril_triu_op_npu.py index 97385ca04d7eaa83e0a5d4fc3f617ca799711c53..92d52bc604e7dc848239584962c1fcc4a54a01c7 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_tril_triu_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_tril_triu_op_npu.py @@ -82,7 +82,7 @@ def case_generator(op_type, Xshape, diagonal, expected): def test_failure(self): paddle.enable_static() - data = fluid.data(shape=Xshape, dtype='float32', name=cls_name) + data = paddle.static.data(shape=Xshape, dtype='float32', name=cls_name) with self.assertRaisesRegex( eval(expected.split(':')[-1]), errmsg[expected] ): @@ -147,7 +147,7 @@ class TestTrilTriuOpAPI(unittest.TestCase): startup_prog = Program() with program_guard(prog, startup_prog): data = np.random.random([1, 9, 9, 4]).astype(dtype) - x = fluid.data(shape=[1, 9, -1, 4], dtype=dtype, name='x') + x = paddle.static.data(shape=[1, 9, -1, 4], dtype=dtype, name='x') tril_out, triu_out = tensor.tril(x), tensor.triu(x) place = fluid.NPUPlace(0) @@ -184,7 +184,7 @@ class TestTrilTriuOpAPI(unittest.TestCase): startup_prog = Program() with program_guard(prog, startup_prog): data = np.random.random([1, 9, 9, 4]).astype(dtype) - x = fluid.data(shape=[1, 9, -1, 4], dtype=dtype, name='x') + x = paddle.static.data(shape=[1, 9, -1, 4], dtype=dtype, name='x') triu_out = paddle.triu(x) place = fluid.NPUPlace(0) diff --git a/python/paddle/fluid/tests/unittests/npu/test_update_loss_scaling_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_update_loss_scaling_op_npu.py index 07fb19031c0764012f45804a355e4f9fb84b125c..571280881b98a67ead8978265015f9dd90468517 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_update_loss_scaling_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_update_loss_scaling_op_npu.py @@ -104,17 +104,17 @@ class TestUpdateLossScalingOpBad(TestUpdateLossScalingOp): class TestUpdateLossScalingLayer(unittest.TestCase): def loss_scaling_check(self, use_npu=True, scope=fluid.Scope()): - a = fluid.data(name="a", shape=[1024, 1024], dtype='float32') - b = fluid.data(name="b", shape=[512, 128], dtype='float32') + a = paddle.static.data(name="a", shape=[1024, 1024], dtype='float32') + b = paddle.static.data(name="b", shape=[512, 128], dtype='float32') x = [a, b] - found_inf = fluid.data(name="found_inf", shape=[1], dtype='bool') - prev_loss_scaling = fluid.data( + found_inf = paddle.static.data(name="found_inf", shape=[1], dtype='bool') + prev_loss_scaling = paddle.static.data( name="prev_loss_scaling", shape=[1], dtype='float32' ) - num_good_steps = fluid.data( + num_good_steps = paddle.static.data( name="num_good_steps", shape=[1], dtype='int32' ) - num_bad_steps = fluid.data( + num_bad_steps = paddle.static.data( name="num_bad_steps", shape=[1], dtype='int32' ) @@ -175,17 +175,17 @@ class TestUpdateLossScalingLayer(unittest.TestCase): assert np.array_equal(result_v[7], np.zeros_like(num_bad_steps_v)) def loss_scaling_check_inf(self, use_npu=True, scope=fluid.Scope()): - a = fluid.data(name="a", shape=[1024, 1024], dtype='float32') - b = fluid.data(name="b", shape=[512, 128], dtype='float32') + a = paddle.static.data(name="a", shape=[1024, 1024], dtype='float32') + b = paddle.static.data(name="b", shape=[512, 128], dtype='float32') x = [a, b] - found_inf = fluid.data(name="found_inf", shape=[1], dtype='bool') - prev_loss_scaling = fluid.data( + found_inf = paddle.static.data(name="found_inf", shape=[1], dtype='bool') + prev_loss_scaling = paddle.static.data( name="prev_loss_scaling", shape=[1], dtype='float32' ) - num_good_steps = fluid.data( + num_good_steps = paddle.static.data( name="num_good_steps", shape=[1], dtype='int32' ) - num_bad_steps = fluid.data( + num_bad_steps = paddle.static.data( name="num_bad_steps", shape=[1], dtype='int32' ) diff --git a/python/paddle/fluid/tests/unittests/npu/test_where_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_where_op_npu.py index 5e9baa696fc368fffb85abe8f8b8643cd0c84008..bc4d9583dde396c20c166607a0ca327d1a461508 100755 --- a/python/paddle/fluid/tests/unittests/npu/test_where_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_where_op_npu.py @@ -89,11 +89,11 @@ class TestNPUWhereAPI(unittest.TestCase): train_prog = fluid.Program() startup = fluid.Program() with fluid.program_guard(train_prog, startup): - cond = fluid.data( + cond = paddle.static.data( name='cond', shape=self.shape, dtype='bool' ) - x = fluid.data(name='x', shape=self.shape, dtype='float32') - y = fluid.data(name='y', shape=self.shape, dtype='float32') + x = paddle.static.data(name='x', shape=self.shape, dtype='float32') + y = paddle.static.data(name='y', shape=self.shape, dtype='float32') x.stop_gradient = x_stop_gradient y.stop_gradient = y_stop_gradient diff --git a/python/paddle/fluid/tests/unittests/rnn/test_rnn_cells_static.py b/python/paddle/fluid/tests/unittests/rnn/test_rnn_cells_static.py index 646ab79fe33f40708ea8a41c2e2e72f39deef63c..a7edbc67496194bf2cf03ebd4efa74cf1acc3137 100644 --- a/python/paddle/fluid/tests/unittests/rnn/test_rnn_cells_static.py +++ b/python/paddle/fluid/tests/unittests/rnn/test_rnn_cells_static.py @@ -73,12 +73,12 @@ class TestSimpleRNNCell(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): - x_data = paddle.fluid.data( + x_data = paddle.static.data( "input", [-1, 16], dtype=paddle.framework.get_default_dtype(), ) - init_h = paddle.fluid.data( + init_h = paddle.static.data( "init_h", [-1, 32], dtype=paddle.framework.get_default_dtype(), @@ -105,7 +105,7 @@ class TestSimpleRNNCell(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): - x_data = paddle.fluid.data( + x_data = paddle.static.data( "input", [-1, 16], dtype=paddle.framework.get_default_dtype(), @@ -176,12 +176,12 @@ class TestGRUCell(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): - x_data = paddle.fluid.data( + x_data = paddle.static.data( "input", [-1, 16], dtype=paddle.framework.get_default_dtype(), ) - init_h = paddle.fluid.data( + init_h = paddle.static.data( "init_h", [-1, 32], dtype=paddle.framework.get_default_dtype(), @@ -208,7 +208,7 @@ class TestGRUCell(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): - x_data = paddle.fluid.data( + x_data = paddle.static.data( "input", [-1, 16], dtype=paddle.framework.get_default_dtype(), @@ -280,17 +280,17 @@ class TestLSTMCell(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): - x_data = paddle.fluid.data( + x_data = paddle.static.data( "input", [-1, 16], dtype=paddle.framework.get_default_dtype(), ) - init_h = paddle.fluid.data( + init_h = paddle.static.data( "init_h", [-1, 32], dtype=paddle.framework.get_default_dtype(), ) - init_c = paddle.fluid.data( + init_c = paddle.static.data( "init_c", [-1, 32], dtype=paddle.framework.get_default_dtype(), @@ -318,7 +318,7 @@ class TestLSTMCell(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): - x_data = paddle.fluid.data( + x_data = paddle.static.data( "input", [-1, 16], dtype=paddle.framework.get_default_dtype(), diff --git a/python/paddle/fluid/tests/unittests/rnn/test_rnn_nets_static.py b/python/paddle/fluid/tests/unittests/rnn/test_rnn_nets_static.py index d8b6ba125595e5fb68379fcd6119173d82c18d96..da04ca9e1bdb0d162f9eecf26db306e10aa281f7 100755 --- a/python/paddle/fluid/tests/unittests/rnn/test_rnn_nets_static.py +++ b/python/paddle/fluid/tests/unittests/rnn/test_rnn_nets_static.py @@ -88,12 +88,12 @@ class TestSimpleRNN(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): - x_data = paddle.fluid.data( + x_data = paddle.static.data( "input", [-1, -1, 16], dtype=paddle.framework.get_default_dtype(), ) - init_h = paddle.fluid.data( + init_h = paddle.static.data( "init_h", [2 * self.num_directions, -1, 32], dtype=paddle.framework.get_default_dtype(), @@ -123,7 +123,7 @@ class TestSimpleRNN(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): - x_data = paddle.fluid.data( + x_data = paddle.static.data( "input", [-1, -1, 16], dtype=paddle.framework.get_default_dtype(), @@ -155,12 +155,12 @@ class TestSimpleRNN(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): - x_data = paddle.fluid.data( + x_data = paddle.static.data( "input", [-1, -1, 16], dtype=paddle.framework.get_default_dtype(), ) - seq_len = paddle.fluid.data("seq_len", [-1], dtype="int64") + seq_len = paddle.static.data("seq_len", [-1], dtype="int64") mask = paddle.static.nn.sequence_lod.sequence_mask( seq_len, dtype=paddle.get_default_dtype() ) @@ -245,12 +245,12 @@ class TestGRU(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): - x_data = paddle.fluid.data( + x_data = paddle.static.data( "input", [-1, -1, 16], dtype=paddle.framework.get_default_dtype(), ) - init_h = paddle.fluid.data( + init_h = paddle.static.data( "init_h", [2 * self.num_directions, -1, 32], dtype=paddle.framework.get_default_dtype(), @@ -280,7 +280,7 @@ class TestGRU(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): - x_data = paddle.fluid.data( + x_data = paddle.static.data( "input", [-1, -1, 16], dtype=paddle.framework.get_default_dtype(), @@ -312,12 +312,12 @@ class TestGRU(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): - x_data = paddle.fluid.data( + x_data = paddle.static.data( "input", [-1, -1, 16], dtype=paddle.framework.get_default_dtype(), ) - seq_len = paddle.fluid.data("seq_len", [-1], dtype="int64") + seq_len = paddle.static.data("seq_len", [-1], dtype="int64") mask = paddle.static.nn.sequence_lod.sequence_mask( seq_len, dtype=paddle.get_default_dtype() ) @@ -401,17 +401,17 @@ class TestLSTM(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): - x_data = paddle.fluid.data( + x_data = paddle.static.data( "input", [-1, -1, 16], dtype=paddle.framework.get_default_dtype(), ) - init_h = paddle.fluid.data( + init_h = paddle.static.data( "init_h", [2 * self.num_directions, -1, 32], dtype=paddle.framework.get_default_dtype(), ) - init_c = paddle.fluid.data( + init_c = paddle.static.data( "init_c", [2 * self.num_directions, -1, 32], dtype=paddle.framework.get_default_dtype(), @@ -442,7 +442,7 @@ class TestLSTM(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): - x_data = paddle.fluid.data( + x_data = paddle.static.data( "input", [-1, -1, 16], dtype=paddle.framework.get_default_dtype(), @@ -475,12 +475,12 @@ class TestLSTM(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): - x_data = paddle.fluid.data( + x_data = paddle.static.data( "input", [-1, -1, 16], dtype=paddle.framework.get_default_dtype(), ) - seq_len = paddle.fluid.data("seq_len", [-1], dtype="int64") + seq_len = paddle.static.data("seq_len", [-1], dtype="int64") mask = paddle.static.nn.sequence_lod.sequence_mask( seq_len, dtype=paddle.get_default_dtype() ) diff --git a/python/paddle/fluid/tests/unittests/sequence/test_sequence_pad_op.py b/python/paddle/fluid/tests/unittests/sequence/test_sequence_pad_op.py index 09f216fd5efbbfad970be90c51c2a63e9e96c5bc..bf10e2c1883893e8fe730bb6471b1156e5996db4 100644 --- a/python/paddle/fluid/tests/unittests/sequence/test_sequence_pad_op.py +++ b/python/paddle/fluid/tests/unittests/sequence/test_sequence_pad_op.py @@ -21,7 +21,6 @@ sys.path.append("../") from op_test import OpTest import paddle -import paddle.fluid as fluid import paddle.fluid.core as core @@ -185,7 +184,9 @@ class TestSequencePadOpError(unittest.TestCase): self.assertRaises(TypeError, test_dtype) def test_length_dtype(self): - x = fluid.data(name='x', shape=[10, 5], dtype='float32', lod_level=1) + x = paddle.static.data( + name='x', shape=[10, 5], dtype='float32', lod_level=1 + ) pad_value = paddle.assign(np.array([0.0], dtype=np.float32)) out, length = paddle.static.nn.sequence_lod.sequence_pad( diff --git a/python/paddle/fluid/tests/unittests/static_model_parallel_fused_attention.py b/python/paddle/fluid/tests/unittests/static_model_parallel_fused_attention.py index 37048d7cd256b377ba8be08df3ac8658eb22884e..dca8c28e4d3bcc39a3112d4a34630a368c32a9f5 100644 --- a/python/paddle/fluid/tests/unittests/static_model_parallel_fused_attention.py +++ b/python/paddle/fluid/tests/unittests/static_model_parallel_fused_attention.py @@ -111,7 +111,7 @@ class TestModelParallel(TestDistRunnerBase): def get_model(self, batch_size=2, use_dgc=False, dist_strategy=None): # Input data seq_len = 2 - data_in = fluid.data( + data_in = paddle.static.data( name='data_in', shape=[batch_size, seq_len, hidden], dtype=DTYPE ) diff --git a/python/paddle/fluid/tests/unittests/static_model_parallel_fused_feedforward.py b/python/paddle/fluid/tests/unittests/static_model_parallel_fused_feedforward.py index 4fca47635a1deeb756263756e5c089d1ae10b0d6..5a4c2cc5697d9b6c06daeb1971064100d98bc47b 100644 --- a/python/paddle/fluid/tests/unittests/static_model_parallel_fused_feedforward.py +++ b/python/paddle/fluid/tests/unittests/static_model_parallel_fused_feedforward.py @@ -102,7 +102,7 @@ class TestModelParallel(TestDistRunnerBase): def get_model(self, batch_size=2, use_dgc=False, dist_strategy=None): # Input data seq_len = 2 - data_in = fluid.data( + data_in = paddle.static.data( name='data_in', shape=[batch_size, seq_len, IN_SIZE], dtype=DTYPE ) diff --git a/python/paddle/fluid/tests/unittests/static_model_parallel_fused_multi_transformer.py b/python/paddle/fluid/tests/unittests/static_model_parallel_fused_multi_transformer.py index 9c863d6d3be8b04748f4ae1daf1a79fc327aeb29..8f14f26478ccc9b6e0030904d01945a6189be054 100644 --- a/python/paddle/fluid/tests/unittests/static_model_parallel_fused_multi_transformer.py +++ b/python/paddle/fluid/tests/unittests/static_model_parallel_fused_multi_transformer.py @@ -144,7 +144,7 @@ class TestModelParallel(TestDistRunnerBase): def get_model(self, batch_size=2, use_dgc=False, dist_strategy=None): # Input data seq_len = 2 - data_in = fluid.data( + data_in = paddle.static.data( name='data_in', shape=[batch_size, seq_len, hidden], dtype=DTYPE ) diff --git a/python/paddle/fluid/tests/unittests/test_activation_op.py b/python/paddle/fluid/tests/unittests/test_activation_op.py index 64e6ea606fbcca22c7b64331fba31f37746f60ca..ed9bea13b8f3bf1059633c532d56bcb3ddcefccd 100644 --- a/python/paddle/fluid/tests/unittests/test_activation_op.py +++ b/python/paddle/fluid/tests/unittests/test_activation_op.py @@ -181,7 +181,7 @@ class TestExpm1API(unittest.TestCase): def run(place): with paddle_static_guard(): with paddle.static.program_guard(paddle.static.Program()): - X = paddle.fluid.data('X', self.shape, dtype=self.dtype) + X = paddle.static.data('X', self.shape, dtype=self.dtype) out = paddle.expm1(X) exe = paddle.static.Executor(place) res = exe.run(feed={'X': self.x}) @@ -203,7 +203,7 @@ class TestExpm1API(unittest.TestCase): def test_errors(self): with paddle_static_guard(): with paddle.static.program_guard(paddle.static.Program()): - X = paddle.fluid.data('X', self.shape, dtype='int32') + X = paddle.static.data('X', self.shape, dtype='int32') self.assertRaises(TypeError, paddle.expm1, X) # The input dtype must be float16, float32, float64. @@ -357,7 +357,7 @@ class TestSiluAPI(unittest.TestCase): def test_static_api(self): with paddle_static_guard(): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', [11, 17]) + x = paddle.static.data('X', [11, 17]) out1 = F.silu(x) m = paddle.nn.Silu() out2 = m(x) @@ -382,12 +382,12 @@ class TestSiluAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.silu, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[11, 17], dtype='int32' ) self.assertRaises(TypeError, F.silu, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[11, 17], dtype='float16' ) F.silu(x_fp16) @@ -432,7 +432,7 @@ class TestLogSigmoidAPI(unittest.TestCase): def test_static_api(self): with paddle_static_guard(): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', [11, 17]) + x = paddle.static.data('X', [11, 17]) out1 = F.log_sigmoid(x) m = paddle.nn.LogSigmoid() out2 = m(x) @@ -457,12 +457,12 @@ class TestLogSigmoidAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.log_sigmoid, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[11, 17], dtype='int32' ) self.assertRaises(TypeError, F.log_sigmoid, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[11, 17], dtype='float16' ) F.log_sigmoid(x_fp16) @@ -518,7 +518,7 @@ class TestTanhAPI(unittest.TestCase): def test_static_api(self): with paddle_static_guard(): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', [10, 12], self.dtype) + x = paddle.static.data('X', [10, 12], self.dtype) out1 = self.tanh(x) th = paddle.nn.Tanh() out2 = th(x) @@ -544,12 +544,12 @@ class TestTanhAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, self.tanh, 1) # The input dtype must be float16, float32. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[12, 10], dtype='int32' ) self.assertRaises(TypeError, self.tanh, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[12, 10], dtype='float16' ) self.tanh(x_fp16) @@ -688,12 +688,12 @@ class TestSinhOpError(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, paddle.sinh, 1) # The input dtype must be float16, float32, float64. - x_int32 = fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[12, 10], dtype='int32' ) self.assertRaises(TypeError, paddle.sinh, x_int32) # support the input dtype is float16 - x_fp16 = fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[12, 10], dtype='float16' ) paddle.sinh(x_fp16) @@ -779,12 +779,12 @@ class TestCoshOpError(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, paddle.cosh, 1) # The input dtype must be float16, float32, float64. - x_int32 = fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[12, 10], dtype='int32' ) self.assertRaises(TypeError, paddle.cosh, x_int32) # support the input dtype is float16 - x_fp16 = fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[12, 10], dtype='float16' ) paddle.cosh(x_fp16) @@ -834,7 +834,7 @@ class TestTanhshrinkAPI(unittest.TestCase): def test_static_api(self): with paddle_static_guard(): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype) + x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) out1 = F.tanhshrink(x) tanhshrink = paddle.nn.Tanhshrink() out2 = tanhshrink(x) @@ -859,12 +859,12 @@ class TestTanhshrinkAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.tanhshrink, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[12, 10], dtype='int32' ) self.assertRaises(TypeError, F.tanhshrink, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[12, 10], dtype='float16' ) F.tanhshrink(x_fp16) @@ -932,7 +932,7 @@ class TestHardShrinkAPI(unittest.TestCase): def test_static_api(self): with paddle_static_guard(): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', [10, 12]) + x = paddle.static.data('X', [10, 12], dtype="float32") out1 = F.hardshrink(x) hd = paddle.nn.Hardshrink() out2 = hd(x) @@ -964,12 +964,12 @@ class TestHardShrinkAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.hardshrink, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[12, 10], dtype='int32' ) self.assertRaises(TypeError, F.hardshrink, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[12, 10], dtype='float16' ) F.hardshrink(x_fp16) @@ -997,7 +997,7 @@ class TestHardtanhAPI(unittest.TestCase): def test_static_api(self): with paddle_static_guard(): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', [10, 12]) + x = paddle.static.data('X', [10, 12], dtype="float32") out1 = F.hardtanh(x) m = paddle.nn.Hardtanh() out2 = m(x) @@ -1029,12 +1029,12 @@ class TestHardtanhAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.hardtanh, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[12, 10], dtype='int32' ) self.assertRaises(TypeError, F.hardtanh, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[12, 10], dtype='float16' ) F.hardtanh(x_fp16) @@ -1090,7 +1090,7 @@ class TestSoftshrinkAPI(unittest.TestCase): def test_static_api(self): with paddle_static_guard(): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype) + x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) out1 = F.softshrink(x, self.threshold) softshrink = paddle.nn.Softshrink(self.threshold) out2 = softshrink(x) @@ -1115,17 +1115,17 @@ class TestSoftshrinkAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.softshrink, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[12, 10], dtype='int32' ) self.assertRaises(TypeError, F.softshrink, x_int32) # The threshold must be no less than zero - x_fp32 = paddle.fluid.data( + x_fp32 = paddle.static.data( name='x_fp32', shape=[12, 10], dtype='float32' ) self.assertRaises(ValueError, F.softshrink, x_fp32, -1.0) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[12, 10], dtype='float16' ) F.softshrink(x_fp16) @@ -1845,7 +1845,7 @@ class TestReluAPI(unittest.TestCase): def test_static_api(self): with paddle_static_guard(): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', [10, 12]) + x = paddle.static.data('X', [10, 12], dtype="float32") out1 = self.relu(x) m = paddle.nn.ReLU() out2 = m(x) @@ -1871,12 +1871,12 @@ class TestReluAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, self.relu, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[10, 12], dtype='int32' ) self.assertRaises(TypeError, self.relu, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[10, 12], dtype='float16' ) self.relu(x_fp16) @@ -1955,7 +1955,7 @@ class TestLeakyReluAPI(unittest.TestCase): def test_static_api(self): with paddle_static_guard(): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', [10, 12]) + x = paddle.static.data('X', [10, 12], dtype="float32") out1 = F.leaky_relu(x) m = paddle.nn.LeakyReLU() out2 = m(x) @@ -1987,12 +1987,12 @@ class TestLeakyReluAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.leaky_relu, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[12, 10], dtype='int32' ) self.assertRaises(TypeError, F.leaky_relu, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[12, 10], dtype='float16' ) F.leaky_relu(x_fp16) @@ -2092,7 +2092,7 @@ class TestGELUAPI(unittest.TestCase): def test_static_api(self): with paddle_static_guard(): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', [11, 17]) + x = paddle.static.data('X', [11, 17], dtype="float32") out1 = F.gelu(x) m = paddle.nn.GELU() out2 = m(x) @@ -2124,12 +2124,12 @@ class TestGELUAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.gelu, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[11, 17], dtype='int32' ) self.assertRaises(TypeError, F.gelu, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[11, 17], dtype='float16' ) F.gelu(x_fp16) @@ -2214,7 +2214,7 @@ class TestRelu6API(unittest.TestCase): def test_static_api(self): with paddle_static_guard(): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype) + x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) out1 = F.relu6(x) relu6 = paddle.nn.ReLU6() out2 = relu6(x) @@ -2236,7 +2236,7 @@ class TestRelu6API(unittest.TestCase): def test_fluid_api(self): with paddle_static_guard(): with fluid.program_guard(fluid.Program()): - x = fluid.data('X', self.x_np.shape, self.x_np.dtype) + x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) out = paddle.nn.functional.relu6(x) exe = fluid.Executor(self.place) res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) @@ -2249,12 +2249,12 @@ class TestRelu6API(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.relu6, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[12, 10], dtype='int32' ) self.assertRaises(TypeError, F.relu6, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[12, 10], dtype='float16' ) F.relu6(x_fp16) @@ -2361,7 +2361,7 @@ class TestHardswishAPI(unittest.TestCase): def test_static_api(self): with paddle_static_guard(): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype) + x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) out1 = F.hardswish(x) m = paddle.nn.Hardswish() out2 = m(x) @@ -2383,7 +2383,7 @@ class TestHardswishAPI(unittest.TestCase): def test_fluid_api(self): with paddle_static_guard(): with fluid.program_guard(fluid.Program()): - x = fluid.data('X', self.x_np.shape, self.x_np.dtype) + x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) out = paddle.nn.functional.hardswish(x) exe = fluid.Executor(self.place) res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) @@ -2400,12 +2400,12 @@ class TestHardswishAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.hardswish, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[12, 10], dtype='int32' ) self.assertRaises(TypeError, F.hardswish, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[12, 10], dtype='float16' ) F.hardswish(x_fp16) @@ -2501,7 +2501,7 @@ class TestELUAPI(unittest.TestCase): def test_static_api(self): with paddle_static_guard(): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', [10, 12]) + x = paddle.static.data('X', [10, 12], dtype="float32") out1 = self.elu(x) m = paddle.nn.ELU() out2 = m(x) @@ -2535,12 +2535,12 @@ class TestELUAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, self.elu, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[10, 12], dtype='int32' ) self.assertRaises(TypeError, self.elu, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[10, 12], dtype='float16' ) self.elu(x_fp16) @@ -2608,7 +2608,7 @@ class TestCELUAPI(unittest.TestCase): def test_static_api(self): with paddle_static_guard(): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', [10, 12]) + x = paddle.static.data('X', [10, 12], dtype="float32") out1 = self.celu(x, 1.5) m = paddle.nn.CELU(1.5) out2 = m(x) @@ -2642,17 +2642,17 @@ class TestCELUAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, self.celu, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[10, 12], dtype='int32' ) self.assertRaises(TypeError, self.celu, x_int32) # The alpha must be not equal 0 - x_fp32 = paddle.fluid.data( + x_fp32 = paddle.static.data( name='x_fp32', shape=[10, 12], dtype='float32' ) self.assertRaises(ZeroDivisionError, F.celu, x_fp32, 0) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[10, 12], dtype='float16' ) self.celu(x_fp16) @@ -3164,7 +3164,7 @@ class TestSTanhAPI(unittest.TestCase): def test_static_api(self): with paddle_static_guard(): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', [10, 12]) + x = paddle.static.data('X', [10, 12]) out = paddle.stanh(x, self.scale_a, self.scale_b) exe = paddle.static.Executor(self.place) res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) @@ -3182,7 +3182,7 @@ class TestSTanhAPI(unittest.TestCase): def test_fluid_api(self): with paddle_static_guard(): with fluid.program_guard(fluid.Program()): - x = fluid.data('X', [10, 12]) + x = paddle.static.data('X', [10, 12], dtype="float32") out = paddle.stanh(x, self.scale_a, self.scale_b) exe = fluid.Executor(self.place) res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) @@ -3195,12 +3195,12 @@ class TestSTanhAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, paddle.stanh, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[12, 10], dtype='int32' ) self.assertRaises(TypeError, paddle.stanh, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[12, 10], dtype='float16' ) paddle.stanh(x_fp16) @@ -3303,7 +3303,7 @@ class TestSoftplusAPI(unittest.TestCase): def test_static_api(self): with paddle_static_guard(): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype) + x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) out1 = F.softplus(x, self.beta, self.threshold) softplus = paddle.nn.Softplus(self.beta, self.threshold) out2 = softplus(x) @@ -3328,12 +3328,12 @@ class TestSoftplusAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.softplus, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[12, 10], dtype='int32' ) self.assertRaises(TypeError, F.softplus, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[12, 10], dtype='float16' ) F.softplus(x_fp16) @@ -3386,7 +3386,7 @@ class TestSoftsignAPI(unittest.TestCase): def test_static_api(self): with paddle_static_guard(): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype) + x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) out1 = F.softsign(x) softsign = paddle.nn.Softsign() out2 = softsign(x) @@ -3411,12 +3411,12 @@ class TestSoftsignAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.softsign, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[12, 10], dtype='int32' ) self.assertRaises(TypeError, F.softsign, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[12, 10], dtype='float16' ) F.softsign(x_fp16) @@ -3474,7 +3474,7 @@ class TestThresholdedReluAPI(unittest.TestCase): def test_static_api(self): with paddle_static_guard(): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype) + x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) out1 = F.thresholded_relu(x, self.threshold) thresholded_relu = paddle.nn.ThresholdedReLU(self.threshold) out2 = thresholded_relu(x) @@ -3499,12 +3499,12 @@ class TestThresholdedReluAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.thresholded_relu, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[12, 10], dtype='int32' ) self.assertRaises(TypeError, F.thresholded_relu, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[12, 10], dtype='float16' ) F.thresholded_relu(x_fp16) @@ -3597,13 +3597,14 @@ class TestHardsigmoidAPI(unittest.TestCase): def test_fluid_api(self): with paddle_static_guard(): with fluid.program_guard(fluid.Program()): - x = fluid.data('X', self.x_np.shape, self.x_np.dtype) + x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) out = paddle.nn.functional.hardsigmoid(x, slope=0.2) exe = fluid.Executor(self.place) res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) out_ref = ref_hardsigmoid(self.x_np, 0.2, 0.5) np.testing.assert_allclose(out_ref, res[0], rtol=1e-05) + paddle.disable_static(self.place) x = paddle.to_tensor(self.x_np) out = paddle.nn.functional.hardsigmoid(x, slope=0.2) np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05) @@ -3614,12 +3615,12 @@ class TestHardsigmoidAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.hardsigmoid, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[12, 10], dtype='int32' ) self.assertRaises(TypeError, F.hardsigmoid, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[12, 10], dtype='float16' ) F.hardsigmoid(x_fp16) @@ -3697,7 +3698,7 @@ class TestSwishAPI(unittest.TestCase): def test_fluid_api(self): with paddle_static_guard(): with fluid.program_guard(fluid.Program()): - x = fluid.data('X', self.x_np.shape, self.x_np.dtype) + x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) out = paddle.nn.functional.swish(x) exe = fluid.Executor(self.place) res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) @@ -3710,12 +3711,12 @@ class TestSwishAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.swish, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[12, 10], dtype='int32' ) self.assertRaises(TypeError, F.swish, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[12, 10], dtype='float16' ) F.swish(x_fp16) @@ -3794,7 +3795,7 @@ class TestMishAPI(unittest.TestCase): def test_fluid_api(self): with paddle_static_guard(): with fluid.program_guard(fluid.Program()): - x = fluid.data('X', self.x_np.shape, self.x_np.dtype) + x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) out = paddle.nn.functional.mish(x) exe = fluid.Executor(self.place) res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) @@ -3807,12 +3808,12 @@ class TestMishAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.mish, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[12, 10], dtype='int32' ) self.assertRaises(TypeError, F.mish, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[12, 10], dtype='float16' ) F.mish(x_fp16) diff --git a/python/paddle/fluid/tests/unittests/test_adam_op.py b/python/paddle/fluid/tests/unittests/test_adam_op.py index cd40d401c457f0c5f6d43145600c0bf510fc6ebc..1b1a01183abbe8ecf500527b9dcbba68370decc5 100644 --- a/python/paddle/fluid/tests/unittests/test_adam_op.py +++ b/python/paddle/fluid/tests/unittests/test_adam_op.py @@ -656,7 +656,7 @@ class TestAdamOpV2(unittest.TestCase): startup = fluid.Program() with fluid.program_guard(train_prog, startup): with fluid.unique_name.guard(): - data = fluid.data(name="data", shape=shape) + data = paddle.static.data(name="data", shape=shape) conv = paddle.static.nn.conv2d(data, 8, 3) loss = paddle.mean(conv) @@ -982,8 +982,8 @@ class TestAdamOptimizer(unittest.TestCase): trainable=True, ) with fluid.program_guard(main): - x = fluid.data(name='x', shape=[None, 13], dtype='float32') - y = fluid.data(name='y', shape=[None, 1], dtype='float32') + x = paddle.static.data(name='x', shape=[None, 13], dtype='float32') + y = paddle.static.data(name='y', shape=[None, 1], dtype='float32') y_predict = paddle.static.nn.fc(x, size=1, weight_attr=weight_attr) cost = paddle.nn.functional.square_error_cost( input=y_predict, label=y diff --git a/python/paddle/fluid/tests/unittests/test_adam_optimizer_fp32_fp64.py b/python/paddle/fluid/tests/unittests/test_adam_optimizer_fp32_fp64.py index bb863ad73e40a2cd632b5e5ac1ca9bc87371e005..cf9640988f10eb3c2849ea5e7ae1df82f736f3fe 100644 --- a/python/paddle/fluid/tests/unittests/test_adam_optimizer_fp32_fp64.py +++ b/python/paddle/fluid/tests/unittests/test_adam_optimizer_fp32_fp64.py @@ -30,8 +30,8 @@ def main_test_func(place, dtype): startup = fluid.Program() with fluid.program_guard(main, startup): with fluid.scope_guard(fluid.Scope()): - x = fluid.data(name='x', shape=[None, 13], dtype=dtype) - y = fluid.data(name='y', shape=[None, 1], dtype=dtype) + x = paddle.static.data(name='x', shape=[None, 13], dtype=dtype) + y = paddle.static.data(name='y', shape=[None, 1], dtype=dtype) y_predict = paddle.static.nn.fc(x, size=1) cost = paddle.nn.functional.square_error_cost( input=y_predict, label=y diff --git a/python/paddle/fluid/tests/unittests/test_adamax_api.py b/python/paddle/fluid/tests/unittests/test_adamax_api.py index 9aeeb9e0d4bb59c52c9cf2dc4c4abd3a1895208d..30eca5d305f7979b753e9b7eae18e3da5ab6f5ca 100644 --- a/python/paddle/fluid/tests/unittests/test_adamax_api.py +++ b/python/paddle/fluid/tests/unittests/test_adamax_api.py @@ -45,7 +45,7 @@ class TestAdamaxAPI(unittest.TestCase): startup = fluid.Program() with fluid.program_guard(train_prog, startup): with fluid.unique_name.guard(): - data = fluid.data(name="data", shape=shape) + data = paddle.static.data(name="data", shape=shape) conv = paddle.static.nn.conv2d(data, 8, 3) loss = paddle.mean(conv) beta1 = 0.85 diff --git a/python/paddle/fluid/tests/unittests/test_adamw_op.py b/python/paddle/fluid/tests/unittests/test_adamw_op.py index ead0a00ac119028788c981233ec0dc66274b0208..4037659b08c3875e842e055e576a15932fd7bc44 100644 --- a/python/paddle/fluid/tests/unittests/test_adamw_op.py +++ b/python/paddle/fluid/tests/unittests/test_adamw_op.py @@ -252,7 +252,7 @@ class TestAdamWOp(unittest.TestCase): startup = fluid.Program() with fluid.program_guard(train_prog, startup): with fluid.unique_name.guard(): - data = fluid.data(name="data", shape=shape) + data = paddle.static.data(name="data", shape=shape) conv = paddle.static.nn.conv2d(data, 8, 3) loss = paddle.mean(conv) @@ -767,8 +767,12 @@ class TestAdamWOpLayerwiseLR(TestAdamWOp): startup = fluid.Program() with fluid.program_guard(train_prog, startup): with fluid.unique_name.guard(): - x = fluid.data(name='x', shape=[None, 10], dtype='float32') - y = fluid.data(name='y', shape=[None, 1], dtype='float32') + x = paddle.static.data( + name='x', shape=[None, 10], dtype='float32' + ) + y = paddle.static.data( + name='y', shape=[None, 1], dtype='float32' + ) weight_attr1 = paddle.framework.ParamAttr(name="linear_0.w_0") bias_attr1 = paddle.framework.ParamAttr( diff --git a/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool1d.py b/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool1d.py index ab76a61017bfd9df1cb7f59d37c97b735457056b..de621bd2fb27f7d7c9ba25caf1321725b503689b 100644 --- a/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool1d.py +++ b/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool1d.py @@ -108,7 +108,9 @@ class TestPool1D_API(unittest.TestCase): def check_adaptive_avg_static_results(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input = fluid.data(name="input", shape=[2, 3, 32], dtype="float32") + input = paddle.static.data( + name="input", shape=[2, 3, 32], dtype="float32" + ) result = F.adaptive_avg_pool1d(input, output_size=16) input_np = np.random.random([2, 3, 32]).astype("float32") diff --git a/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool2d.py b/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool2d.py index f2dccd4d63bee70e3127d99dc9b33561187f7f6f..1566a4eb4add7b23da7e9bf4e25f34cbf266d701 100644 --- a/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool2d.py +++ b/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool2d.py @@ -121,7 +121,9 @@ class TestAdaptiveAvgPool2DAPI(unittest.TestCase): ): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.enable_static() - x = paddle.fluid.data(name="x", shape=[2, 3, 7, 7], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 7, 7], dtype="float32" + ) out_1 = paddle.nn.functional.adaptive_avg_pool2d( x=x, output_size=[3, 3] @@ -230,7 +232,9 @@ class TestAdaptiveAvgPool2DClassAPI(unittest.TestCase): ): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.enable_static() - x = paddle.fluid.data(name="x", shape=[2, 3, 7, 7], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 7, 7], dtype="float32" + ) adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2D(output_size=[3, 3]) out_1 = adaptive_avg_pool(x=x) diff --git a/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool3d.py b/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool3d.py index c1e6a88668816630c4906bce77376823bd97facd..77d28e4848934e093b1aa24629e695f54a1b62df 100755 --- a/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool3d.py +++ b/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool3d.py @@ -141,7 +141,7 @@ class TestAdaptiveAvgPool3DAPI(unittest.TestCase): ): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.enable_static() - x = paddle.fluid.data( + x = paddle.static.data( name="x", shape=[2, 3, 5, 7, 7], dtype="float32" ) @@ -255,7 +255,7 @@ class TestAdaptiveAvgPool3DClassAPI(unittest.TestCase): ): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.enable_static() - x = paddle.fluid.data( + x = paddle.static.data( name="x", shape=[2, 3, 5, 7, 7], dtype="float32" ) diff --git a/python/paddle/fluid/tests/unittests/test_adaptive_max_pool1d.py b/python/paddle/fluid/tests/unittests/test_adaptive_max_pool1d.py index 6b249ea134dfb94b66084456c58407fb463a0a47..ea2cd317d0fca93725b230ed0a7fdb1b504fe3d2 100644 --- a/python/paddle/fluid/tests/unittests/test_adaptive_max_pool1d.py +++ b/python/paddle/fluid/tests/unittests/test_adaptive_max_pool1d.py @@ -95,7 +95,7 @@ class TestPool1D_API(unittest.TestCase): def check_adaptive_max_static_results(self, place): with paddle_static_guard(): with fluid.program_guard(fluid.Program(), fluid.Program()): - input = fluid.data( + input = paddle.static.data( name="input", shape=[2, 3, 32], dtype="float32" ) result = F.adaptive_max_pool1d(input, output_size=16) diff --git a/python/paddle/fluid/tests/unittests/test_adaptive_max_pool2d.py b/python/paddle/fluid/tests/unittests/test_adaptive_max_pool2d.py index b828b7becda62f893d1f2153e9e6afd882ef72cd..52a2edd6f97fd52b782c3fef8f2225a7861d0d1a 100644 --- a/python/paddle/fluid/tests/unittests/test_adaptive_max_pool2d.py +++ b/python/paddle/fluid/tests/unittests/test_adaptive_max_pool2d.py @@ -122,7 +122,9 @@ class TestAdaptiveMaxPool2DAPI(unittest.TestCase): ): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.enable_static() - x = paddle.fluid.data(name="x", shape=[2, 3, 7, 7], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 7, 7], dtype="float32" + ) out_1 = paddle.nn.functional.adaptive_max_pool2d( x=x, output_size=[3, 3] @@ -225,7 +227,9 @@ class TestAdaptiveMaxPool2DClassAPI(unittest.TestCase): ): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.enable_static() - x = paddle.fluid.data(name="x", shape=[2, 3, 7, 7], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 7, 7], dtype="float32" + ) adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(output_size=[3, 3]) out_1 = adaptive_max_pool(x=x) diff --git a/python/paddle/fluid/tests/unittests/test_adaptive_max_pool3d.py b/python/paddle/fluid/tests/unittests/test_adaptive_max_pool3d.py index 521dacd6399dfe38409207d9e5a29ab38cfdb3ef..4e6cb9864f0c652ee693ee8b66a9fa024615cdba 100755 --- a/python/paddle/fluid/tests/unittests/test_adaptive_max_pool3d.py +++ b/python/paddle/fluid/tests/unittests/test_adaptive_max_pool3d.py @@ -143,7 +143,7 @@ class TestAdaptiveMaxPool3DAPI(unittest.TestCase): ): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.enable_static() - x = paddle.fluid.data( + x = paddle.static.data( name="x", shape=[2, 3, 5, 7, 7], dtype="float32" ) @@ -248,7 +248,7 @@ class TestAdaptiveMaxPool3DClassAPI(unittest.TestCase): ): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.enable_static() - x = paddle.fluid.data( + x = paddle.static.data( name="x", shape=[2, 3, 5, 7, 7], dtype="float32" ) diff --git a/python/paddle/fluid/tests/unittests/test_add_reader_dependency.py b/python/paddle/fluid/tests/unittests/test_add_reader_dependency.py index 5cf3953cb8447ca6a430dedd4ff8519c209a70fc..d9fbc1550830dc974ec4c0e2298eb3c95dbc73f2 100644 --- a/python/paddle/fluid/tests/unittests/test_add_reader_dependency.py +++ b/python/paddle/fluid/tests/unittests/test_add_reader_dependency.py @@ -17,6 +17,7 @@ import unittest import numpy as np +import paddle import paddle.fluid as fluid from paddle.fluid.layer_helper import LayerHelper @@ -47,7 +48,9 @@ class TestAddReaderDependency(unittest.TestCase): def run_main(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): with fluid.scope_guard(fluid.Scope()): - tmp_in = fluid.data(name='tmp_in', dtype='float32', shape=[1]) + tmp_in = paddle.static.data( + name='tmp_in', dtype='float32', shape=[1] + ) loader = fluid.io.DataLoader.from_generator( feed_list=[tmp_in], capacity=16, @@ -62,7 +65,7 @@ class TestAddReaderDependency(unittest.TestCase): low=-1, high=1, size=[1] ).astype('float32'), - persistable_in = fluid.data( + persistable_in = paddle.static.data( name='persistable_in', dtype='float32', shape=[1] ) persistable_in.persistable = True diff --git a/python/paddle/fluid/tests/unittests/test_affine_grid_function.py b/python/paddle/fluid/tests/unittests/test_affine_grid_function.py index 16a67d765f870ef162e48b3f1d683b40f0a90355..20114a5304d5dbb1de1d74ac1ac94327c7523c79 100644 --- a/python/paddle/fluid/tests/unittests/test_affine_grid_function.py +++ b/python/paddle/fluid/tests/unittests/test_affine_grid_function.py @@ -51,7 +51,7 @@ class AffineGridTestCase(unittest.TestCase): start = fluid.Program() with fluid.unique_name.guard(): with fluid.program_guard(main, start): - theta_var = fluid.data( + theta_var = paddle.static.data( "input", self.theta_shape, dtype=self.dtype ) y_var = paddle.nn.functional.affine_grid( @@ -69,7 +69,7 @@ class AffineGridTestCase(unittest.TestCase): start = fluid.Program() with fluid.unique_name.guard(): with fluid.program_guard(main, start): - theta_var = fluid.data( + theta_var = paddle.static.data( "input", self.theta_shape, dtype=self.dtype ) y_var = F.affine_grid( diff --git a/python/paddle/fluid/tests/unittests/test_allclose_layer.py b/python/paddle/fluid/tests/unittests/test_allclose_layer.py index 459c3e366262e7f88bce80262e0218712deaf6da..b1d6e06db9e2a7742638fc7068938d88a787868d 100644 --- a/python/paddle/fluid/tests/unittests/test_allclose_layer.py +++ b/python/paddle/fluid/tests/unittests/test_allclose_layer.py @@ -24,8 +24,8 @@ paddle.enable_static() class TestAllcloseLayer(unittest.TestCase): def allclose_check(self, use_cuda, dtype='float32'): - a = fluid.data(name="a", shape=[2], dtype=dtype) - b = fluid.data(name="b", shape=[2], dtype=dtype) + a = paddle.static.data(name="a", shape=[2], dtype=dtype) + b = paddle.static.data(name="b", shape=[2], dtype=dtype) result = paddle.allclose( a, b, rtol=1e-05, atol=1e-08, equal_nan=False, name="ignore_nan" diff --git a/python/paddle/fluid/tests/unittests/test_allclose_op.py b/python/paddle/fluid/tests/unittests/test_allclose_op.py index 94e30621ef1377344457aa3d94a725abac99ef31..ef5cc942da8f8b708d04e38033f4441b9d9908b5 100644 --- a/python/paddle/fluid/tests/unittests/test_allclose_op.py +++ b/python/paddle/fluid/tests/unittests/test_allclose_op.py @@ -135,8 +135,10 @@ class TestAllcloseError(unittest.TestCase): with paddle.static.program_guard( paddle.static.Program(), paddle.static.Program() ): - x = paddle.fluid.data(name='x', shape=[10, 10], dtype='int32') - y = paddle.fluid.data(name='y', shape=[10, 10], dtype='float64') + x = paddle.static.data(name='x', shape=[10, 10], dtype='int32') + y = paddle.static.data( + name='y', shape=[10, 10], dtype='float64' + ) result = paddle.allclose(x, y) self.assertRaises(TypeError, test_x_dtype) @@ -145,15 +147,17 @@ class TestAllcloseError(unittest.TestCase): with paddle.static.program_guard( paddle.static.Program(), paddle.static.Program() ): - x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float64') - y = paddle.fluid.data(name='y', shape=[10, 10], dtype='int32') + x = paddle.static.data( + name='x', shape=[10, 10], dtype='float64' + ) + y = paddle.static.data(name='y', shape=[10, 10], dtype='int32') result = paddle.allclose(x, y) self.assertRaises(TypeError, test_y_dtype) def test_attr(self): - x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float64') - y = paddle.fluid.data(name='y', shape=[10, 10], dtype='float64') + x = paddle.static.data(name='x', shape=[10, 10], dtype='float64') + y = paddle.static.data(name='y', shape=[10, 10], dtype='float64') def test_rtol(): result = paddle.allclose(x, y, rtol=True) diff --git a/python/paddle/fluid/tests/unittests/test_argsort_op.py b/python/paddle/fluid/tests/unittests/test_argsort_op.py index 2b993280af74c1a7e634f2079fa0b3a27d8c8844..3d3341a5d68e1abef8900a2d5b8f4221fcdcfc1f 100644 --- a/python/paddle/fluid/tests/unittests/test_argsort_op.py +++ b/python/paddle/fluid/tests/unittests/test_argsort_op.py @@ -392,7 +392,7 @@ class TestArgsort(unittest.TestCase): def test_api(self): with fluid.program_guard(fluid.Program()): - input = fluid.data( + input = paddle.static.data( name="input", shape=self.input_shape, dtype="float64" ) diff --git a/python/paddle/fluid/tests/unittests/test_ascend_trigger.py b/python/paddle/fluid/tests/unittests/test_ascend_trigger.py index 917ef5606f70d6dfeabf87c1b56d7e42614fc1e5..31b73a02b780da3c179119a9aaf4606fdfe9fb3f 100644 --- a/python/paddle/fluid/tests/unittests/test_ascend_trigger.py +++ b/python/paddle/fluid/tests/unittests/test_ascend_trigger.py @@ -26,8 +26,12 @@ class TestAscendTriggerOP(unittest.TestCase): program = fluid.Program() block = program.global_block() with fluid.program_guard(program): - x = fluid.data(name='x', shape=[1], dtype='int64', lod_level=0) - y = fluid.data(name='y', shape=[1], dtype='int64', lod_level=0) + x = paddle.static.data( + name='x', shape=[1], dtype='int64', lod_level=0 + ) + y = paddle.static.data( + name='y', shape=[1], dtype='int64', lod_level=0 + ) block.append_op( type="ascend_trigger", inputs={"FeedList": [x]}, diff --git a/python/paddle/fluid/tests/unittests/test_assign_op.py b/python/paddle/fluid/tests/unittests/test_assign_op.py index c610aeaa32d02f3dc067b78c98d23eca83da7684..5d35c82b9ae1962bb7d5133bee7f658641f993a9 100644 --- a/python/paddle/fluid/tests/unittests/test_assign_op.py +++ b/python/paddle/fluid/tests/unittests/test_assign_op.py @@ -76,7 +76,7 @@ class TestAssignOpWithLoDTensorArray(unittest.TestCase): main_program = Program() startup_program = Program() with program_guard(main_program): - x = fluid.data(name='x', shape=[100, 10], dtype='float32') + x = paddle.static.data(name='x', shape=[100, 10], dtype='float32') x.stop_gradient = False y = paddle.tensor.fill_constant( shape=[100, 10], dtype='float32', value=1 @@ -129,7 +129,7 @@ class TestAssignOApi(unittest.TestCase): main_program = Program() startup_program = Program() with program_guard(main_program): - x = fluid.data(name='x', shape=[100, 10], dtype='float32') + x = paddle.static.data(name='x', shape=[100, 10], dtype='float32') x.stop_gradient = False y = paddle.tensor.fill_constant( shape=[100, 10], dtype='float32', value=1 diff --git a/python/paddle/fluid/tests/unittests/test_assign_pos_op.py b/python/paddle/fluid/tests/unittests/test_assign_pos_op.py index 13cb3eccf80d8e929f1c8917396d8ccb4106baf5..ea517e2f9785c9eb327bb6ad0e92468112d262b7 100644 --- a/python/paddle/fluid/tests/unittests/test_assign_pos_op.py +++ b/python/paddle/fluid/tests/unittests/test_assign_pos_op.py @@ -105,8 +105,8 @@ class TestAssignPosAPI(unittest.TestCase): def test_api_static(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('x', self.x.shape, dtype="int64") - cum_count = paddle.fluid.data( + x = paddle.static.data('x', self.x.shape, dtype="int64") + cum_count = paddle.static.data( 'cum_count', self.cum_count.shape, dtype="int64" ) out = utils._assign_pos(x, cum_count) diff --git a/python/paddle/fluid/tests/unittests/test_atan2_op.py b/python/paddle/fluid/tests/unittests/test_atan2_op.py index 7dd3ceaca8b008f147c85193e65787e3487045fc..2e6272665f52d8b345889497b7c6e6f315b6b773 100644 --- a/python/paddle/fluid/tests/unittests/test_atan2_op.py +++ b/python/paddle/fluid/tests/unittests/test_atan2_op.py @@ -103,8 +103,8 @@ class TestAtan2API(unittest.TestCase): def run(place): with paddle.static.program_guard(paddle.static.Program()): - X1 = paddle.fluid.data('X1', self.shape, dtype=self.dtype) - X2 = paddle.fluid.data('X2', self.shape, dtype=self.dtype) + X1 = paddle.static.data('X1', self.shape, dtype=self.dtype) + X2 = paddle.static.data('X2', self.shape, dtype=self.dtype) out = paddle.atan2(X1, X2) exe = paddle.static.Executor(place) res = exe.run(feed={'X1': self.x1, 'X2': self.x2}) @@ -163,8 +163,8 @@ class TestAtan2Error(unittest.TestCase): paddle.enable_static() def test_mismatch_numel(): - X = paddle.fluid.data('X', (1,), dtype=np.float64) - Y = paddle.fluid.data('Y', (0,), dtype=np.float64) + X = paddle.static.data('X', (1,), dtype=np.float64) + Y = paddle.static.data('Y', (0,), dtype=np.float64) out = paddle.atan2(X, Y) self.assertRaises(ValueError, test_mismatch_numel) diff --git a/python/paddle/fluid/tests/unittests/test_auc_op.py b/python/paddle/fluid/tests/unittests/test_auc_op.py index 428e6f8ec76897f86c08c38e0251bbd1fecbc748..86057cbd083b6a49c0d5c9af8b0ab7d5aa7494cb 100644 --- a/python/paddle/fluid/tests/unittests/test_auc_op.py +++ b/python/paddle/fluid/tests/unittests/test_auc_op.py @@ -142,8 +142,12 @@ class TestAucOpError(unittest.TestCase): with fluid.program_guard(fluid.Program(), fluid.Program()): def test_type1(): - data1 = fluid.data(name="input1", shape=[-1, 2], dtype="int") - label1 = fluid.data(name="label1", shape=[-1], dtype="int") + data1 = paddle.static.data( + name="input1", shape=[-1, 2], dtype="int" + ) + label1 = paddle.static.data( + name="label1", shape=[-1], dtype="int" + ) ins_tag_w1 = paddle.static.data( name="label1", shape=[-1], dtype="int" ) @@ -154,10 +158,12 @@ class TestAucOpError(unittest.TestCase): self.assertRaises(TypeError, test_type1) def test_type2(): - data2 = fluid.data( + data2 = paddle.static.data( name="input2", shape=[-1, 2], dtype="float32" ) - label2 = fluid.data(name="label2", shape=[-1], dtype="float32") + label2 = paddle.static.data( + name="label2", shape=[-1], dtype="float32" + ) result2 = paddle.static.auc(input=data2, label=label2) self.assertRaises(TypeError, test_type2) diff --git a/python/paddle/fluid/tests/unittests/test_backward.py b/python/paddle/fluid/tests/unittests/test_backward.py index 41fa23658c4a07051704ab09292460baea32908d..9d5cdbcf542a208cf8bf9e62f4ffe6e6618c5606 100644 --- a/python/paddle/fluid/tests/unittests/test_backward.py +++ b/python/paddle/fluid/tests/unittests/test_backward.py @@ -226,10 +226,16 @@ class SimpleNet(BackwardNet): def build_model(self): # stop_gradient = True in input - x = fluid.data(name='x_no_grad', shape=self.shape, dtype='int64') - x2 = fluid.data(name='x2_no_grad', shape=self.shape, dtype='int64') - x3 = fluid.data(name='x3_no_grad', shape=self.shape, dtype='int64') - label = fluid.data( + x = paddle.static.data( + name='x_no_grad', shape=self.shape, dtype='int64' + ) + x2 = paddle.static.data( + name='x2_no_grad', shape=self.shape, dtype='int64' + ) + x3 = paddle.static.data( + name='x3_no_grad', shape=self.shape, dtype='int64' + ) + label = paddle.static.data( name='label_no_grad', shape=[self.shape[0], 1], dtype='float32' ) # shared layer, the grad of 'w2v' will be summed and renamed. @@ -283,7 +289,7 @@ class TestSimpleNet(TestBackward): class TestGradientsError(unittest.TestCase): def test_error(self): - x = fluid.data(name='x', shape=[None, 2, 8, 8], dtype='float32') + x = paddle.static.data(name='x', shape=[None, 2, 8, 8], dtype='float32') x.stop_gradient = False conv = paddle.static.nn.conv2d(x, 4, 1, bias_attr=False) y = F.relu(conv) @@ -309,7 +315,9 @@ class TestSimpleNetWithErrorParamList(TestBackward): with self.assertRaises(TypeError): self._check_error_param_list(self.net, "test") # The type of parameter_list's member must be Variable or str - test = fluid.data(name='test', shape=[None, 90], dtype='float32') + test = paddle.static.data( + name='test', shape=[None, 90], dtype='float32' + ) with self.assertRaises(TypeError): self._check_error_param_list(self.net, [test, "test", 3]) @@ -322,15 +330,17 @@ class TestSimpleNetWithErrorNoGradSet(TestBackward): with self.assertRaises(TypeError): self._check_error_no_grad_set(self.net, "test") # The type of no_grad_set's member must be Variable or str - test = fluid.data(name='test', shape=[None, 90], dtype='float32') + test = paddle.static.data( + name='test', shape=[None, 90], dtype='float32' + ) with self.assertRaises(TypeError): self._check_error_no_grad_set(self.net, [test, "test", 3]) class TestAppendBackwardWithError(unittest.TestCase): def build_net(self): - x = fluid.data(name='x', shape=[None, 13], dtype='int64') - y = fluid.data(name='y', shape=[None, 1], dtype='float32') + x = paddle.static.data(name='x', shape=[None, 13], dtype='int64') + y = paddle.static.data(name='y', shape=[None, 1], dtype='float32') x_emb = paddle.static.nn.embedding(x, size=[100, 256]) y_predict = paddle.static.nn.fc(x=x_emb, size=1, name='my_fc') loss = paddle.nn.functional.square_error_cost(input=y_predict, label=y) diff --git a/python/paddle/fluid/tests/unittests/test_batch_norm_op.py b/python/paddle/fluid/tests/unittests/test_batch_norm_op.py index 9d3c363dc040ef16418871793fbd6af421a2fcb8..b438fc5293ea6d7510c500baa643ffc7b8b13c18 100644 --- a/python/paddle/fluid/tests/unittests/test_batch_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_batch_norm_op.py @@ -829,7 +829,9 @@ class TestDygraphBatchNormTrainableStats(unittest.TestCase): is_test=is_test, trainable_statistics=trainable_statistics, ) - x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype) + x = paddle.static.data( + name='x', shape=x_np.shape, dtype=x_np.dtype + ) y = bn(x) exe.run(fluid.default_startup_program()) r = exe.run(feed={'x': x_np}, fetch_list=[y])[0] @@ -846,7 +848,7 @@ class TestDygraphBatchNormOpenReserveSpace(unittest.TestCase): with program_guard(Program(), Program()): paddle.enable_static() x = np.random.random(size=(3, 10, 3, 7)).astype('float32') - x = fluid.data(name='x', shape=x.shape, dtype=x.dtype) + x = paddle.static.data(name='x', shape=x.shape, dtype=x.dtype) # Set this FLAG, the BatchNorm API will pass "reserve_space" argument into batch_norm op. os.environ['FLAGS_cudnn_batchnorm_spatial_persistent'] = '1' batch_norm = paddle.nn.BatchNorm(7, data_layout="NHWC") diff --git a/python/paddle/fluid/tests/unittests/test_batch_norm_op_v2.py b/python/paddle/fluid/tests/unittests/test_batch_norm_op_v2.py index d6127ff5dd78a95ab18146c5954bd0366f3ff8d8..c3d6eba112bff933f68417512552ae4bd74cd3c3 100644 --- a/python/paddle/fluid/tests/unittests/test_batch_norm_op_v2.py +++ b/python/paddle/fluid/tests/unittests/test_batch_norm_op_v2.py @@ -226,7 +226,9 @@ class TestBatchNorm(unittest.TestCase): is_test=is_test, trainable_statistics=trainable_statistics, ) - x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype) + x = paddle.static.data( + name='x', shape=x_np.shape, dtype=x_np.dtype + ) y = bn(x) exe.run(fluid.default_startup_program()) r = exe.run(feed={'x': x_np}, fetch_list=[y])[0] @@ -235,7 +237,9 @@ class TestBatchNorm(unittest.TestCase): def compute_v2(x_np): with program_guard(Program(), Program()): bn = paddle.nn.BatchNorm2D(shape[1]) - x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype) + x = paddle.static.data( + name='x', shape=x_np.shape, dtype=x_np.dtype + ) y = bn(x) exe.run(fluid.default_startup_program()) r = exe.run(feed={'x': x_np}, fetch_list=[y])[0] diff --git a/python/paddle/fluid/tests/unittests/test_bce_loss.py b/python/paddle/fluid/tests/unittests/test_bce_loss.py index 0acb64502b98dc720a0f80d09581e254cd0c0938..84dfd8f8ef41ba17ce9890965cd5ff0983fc748d 100644 --- a/python/paddle/fluid/tests/unittests/test_bce_loss.py +++ b/python/paddle/fluid/tests/unittests/test_bce_loss.py @@ -27,14 +27,14 @@ def test_static_layer( prog = paddle.static.Program() startup_prog = paddle.static.Program() with paddle.static.program_guard(prog, startup_prog): - input = paddle.fluid.data( + input = paddle.static.data( name='input', shape=input_np.shape, dtype='float64' ) - label = paddle.fluid.data( + label = paddle.static.data( name='label', shape=label_np.shape, dtype='float64' ) if weight_np is not None: - weight = paddle.fluid.data( + weight = paddle.static.data( name='weight', shape=weight_np.shape, dtype='float64' ) bce_loss = paddle.nn.loss.BCELoss( @@ -60,14 +60,14 @@ def test_static_functional( prog = paddle.static.Program() startup_prog = paddle.static.Program() with paddle.static.program_guard(prog, startup_prog): - input = paddle.fluid.data( + input = paddle.static.data( name='input', shape=input_np.shape, dtype='float64' ) - label = paddle.fluid.data( + label = paddle.static.data( name='label', shape=label_np.shape, dtype='float64' ) if weight_np is not None: - weight = paddle.fluid.data( + weight = paddle.static.data( name='weight', shape=weight_np.shape, dtype='float64' ) res = paddle.nn.functional.binary_cross_entropy( diff --git a/python/paddle/fluid/tests/unittests/test_bce_with_logits_loss.py b/python/paddle/fluid/tests/unittests/test_bce_with_logits_loss.py index 91c818eba7517c74951ad4faa73a3a69c1d08118..42f0365635d5262f865d3b6f0b210170e4b5bf01 100644 --- a/python/paddle/fluid/tests/unittests/test_bce_with_logits_loss.py +++ b/python/paddle/fluid/tests/unittests/test_bce_with_logits_loss.py @@ -52,10 +52,10 @@ def test_static( prog = paddle.static.Program() startup_prog = paddle.static.Program() with paddle.static.program_guard(prog, startup_prog): - logit = paddle.fluid.data( + logit = paddle.static.data( name='logit', shape=logit_np.shape, dtype='float64' ) - label = paddle.fluid.data( + label = paddle.static.data( name='label', shape=label_np.shape, dtype='float64' ) feed_dict = {"logit": logit_np, "label": label_np} @@ -63,12 +63,12 @@ def test_static( pos_weight = None weight = None if pos_weight_np is not None: - pos_weight = paddle.fluid.data( + pos_weight = paddle.static.data( name='pos_weight', shape=pos_weight_np.shape, dtype='float64' ) feed_dict["pos_weight"] = pos_weight_np if weight_np is not None: - weight = paddle.fluid.data( + weight = paddle.static.data( name='weight', shape=weight_np.shape, dtype='float64' ) feed_dict["weight"] = weight_np diff --git a/python/paddle/fluid/tests/unittests/test_bicubic_interp_op.py b/python/paddle/fluid/tests/unittests/test_bicubic_interp_op.py index 7372755936d25d823813d2d44a20d373401a184a..a914459fa05cde7042c038117832dacbb7a889e9 100644 --- a/python/paddle/fluid/tests/unittests/test_bicubic_interp_op.py +++ b/python/paddle/fluid/tests/unittests/test_bicubic_interp_op.py @@ -296,16 +296,18 @@ class TestBicubicInterpOpAPI(unittest.TestCase): with fluid.program_guard(prog, startup_prog): - x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 6, 6], dtype="float32" + ) - dim = fluid.data(name="dim", shape=[1], dtype="int32") - shape_tensor = fluid.data( + dim = paddle.static.data(name="dim", shape=[1], dtype="int32") + shape_tensor = paddle.static.data( name="shape_tensor", shape=[2], dtype="int32" ) - actual_size = fluid.data( + actual_size = paddle.static.data( name="actual_size", shape=[2], dtype="int32" ) - scale_tensor = fluid.data( + scale_tensor = paddle.static.data( name="scale_tensor", shape=[1], dtype="float32" ) @@ -372,37 +374,45 @@ class TestBicubicOpError(unittest.TestCase): def test_mode_type(): # mode must be "BILINEAR" "TRILINEAR" "NEAREST" "BICUBIC" - x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 6, 6], dtype="float32" + ) out = interpolate( x, size=[12, 12], mode='UNKONWN', align_corners=False ) def test_input_shape(): - x = fluid.data(name="x", shape=[2], dtype="float32") + x = paddle.static.data(name="x", shape=[2], dtype="float32") out = interpolate( x, size=[12, 12], mode='BICUBIC', align_corners=False ) def test_size_shape(): - x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 6, 6], dtype="float32" + ) out = interpolate( x, size=[12], mode='BICUBIC', align_corners=False ) def test_align_corcers(): - x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 6, 6], dtype="float32" + ) interpolate(x, size=[12, 12], mode='BICUBIC', align_corners=3) def test_out_shape(): - x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 6, 6], dtype="float32" + ) out = interpolate( x, size=[12], mode='bicubic', align_corners=False ) def test_attr_data_format(): # for 5-D input, data_format only can be NCDHW or NDHWC - input = fluid.data( + input = paddle.static.data( name="input", shape=[2, 3, 6, 9, 4], dtype="float32" ) out = interpolate( @@ -423,7 +433,9 @@ class TestBicubicOpError(unittest.TestCase): def test_scale_value(): # the scale must be greater than zero. - x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 6, 6], dtype="float32" + ) out = interpolate( x, size=None, @@ -434,7 +446,7 @@ class TestBicubicOpError(unittest.TestCase): def test_attr_5D_input(): # for 5-D input, data_format only can be NCDHW or NDHWC - input = fluid.data( + input = paddle.static.data( name="input", shape=[2, 3, 6, 9, 4], dtype="float32" ) out = interpolate( @@ -446,7 +458,9 @@ class TestBicubicOpError(unittest.TestCase): def test_scale_type(): # the scale must be greater than zero. - x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 6, 6], dtype="float32" + ) scale = fluid.create_lod_tensor( np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace() ) @@ -459,7 +473,9 @@ class TestBicubicOpError(unittest.TestCase): ) def test_align_mode(): - x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 6, 6], dtype="float32" + ) out = interpolate( x, size=None, @@ -470,7 +486,9 @@ class TestBicubicOpError(unittest.TestCase): ) def test_outshape_and_scale(): - x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 6, 6], dtype="float32" + ) out = interpolate( x, size=None, diff --git a/python/paddle/fluid/tests/unittests/test_bicubic_interp_v2_op.py b/python/paddle/fluid/tests/unittests/test_bicubic_interp_v2_op.py index 30b6cb684d6373ffbaf8e559dc46b540d08762f4..afbbe26a6ad782aff7a086f29100a9aae9ddb569 100644 --- a/python/paddle/fluid/tests/unittests/test_bicubic_interp_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_bicubic_interp_v2_op.py @@ -374,16 +374,18 @@ class TestBicubicInterpOpAPI(unittest.TestCase): with fluid.program_guard(prog, startup_prog): - x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 6, 6], dtype="float32" + ) - dim = fluid.data(name="dim", shape=[1], dtype="int32") - shape_tensor = fluid.data( + dim = paddle.static.data(name="dim", shape=[1], dtype="int32") + shape_tensor = paddle.static.data( name="shape_tensor", shape=[2], dtype="int32" ) - actual_size = fluid.data( + actual_size = paddle.static.data( name="actual_size", shape=[2], dtype="int32" ) - scale_tensor = fluid.data( + scale_tensor = paddle.static.data( name="scale_tensor", shape=[1], dtype="float32" ) @@ -455,29 +457,35 @@ class TestBicubicOpError(unittest.TestCase): def test_mode_type(): # mode must be "BILINEAR" "TRILINEAR" "NEAREST" "BICUBIC" - x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 6, 6], dtype="float32" + ) out = interpolate( x, size=[12, 12], mode='UNKONWN', align_corners=False ) def test_input_shape(): - x = fluid.data(name="x", shape=[2], dtype="float32") + x = paddle.static.data(name="x", shape=[2], dtype="float32") out = interpolate( x, size=[12, 12], mode='BICUBIC', align_corners=False ) def test_align_corcers(): - x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 6, 6], dtype="float32" + ) interpolate(x, size=[12, 12], mode='BICUBIC', align_corners=3) def test_out_shape(): - x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 6, 6], dtype="float32" + ) out = interpolate(x, size=[12], mode='bicubic', align_corners=False) def test_attr_data_format(): # for 5-D input, data_format only can be NCDHW or NDHWC - input = fluid.data( + input = paddle.static.data( name="input", shape=[2, 3, 6, 9, 4], dtype="float32" ) out = interpolate( @@ -495,7 +503,9 @@ class TestBicubicOpError(unittest.TestCase): def test_scale_value(): # the scale must be greater than zero. - x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 6, 6], dtype="float32" + ) out = interpolate( x, size=None, @@ -506,7 +516,7 @@ class TestBicubicOpError(unittest.TestCase): def test_attr_5D_input(): # for 5-D input, data_format only can be NCDHW or NDHWC - input = fluid.data( + input = paddle.static.data( name="input", shape=[2, 3, 6, 9, 4], dtype="float32" ) out = interpolate( @@ -515,7 +525,9 @@ class TestBicubicOpError(unittest.TestCase): def test_scale_type(): # the scale must be greater than zero. - x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 6, 6], dtype="float32" + ) scale = fluid.create_lod_tensor( np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace() ) @@ -528,7 +540,9 @@ class TestBicubicOpError(unittest.TestCase): ) def test_align_mode(): - x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 6, 6], dtype="float32" + ) out = interpolate( x, size=None, @@ -539,7 +553,9 @@ class TestBicubicOpError(unittest.TestCase): ) def test_outshape_and_scale(): - x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 6, 6], dtype="float32" + ) out = interpolate( x, size=None, @@ -549,7 +565,9 @@ class TestBicubicOpError(unittest.TestCase): ) def test_align_corners_and_nearest(): - x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 6, 6], dtype="float32" + ) out = interpolate( x, size=None, @@ -559,7 +577,9 @@ class TestBicubicOpError(unittest.TestCase): ) def test_scale_shape(): - x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 6, 6], dtype="float32" + ) out = interpolate( x, size=None, @@ -569,7 +589,9 @@ class TestBicubicOpError(unittest.TestCase): ) def test_scale_value_1(): - x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 6, 6], dtype="float32" + ) out = interpolate( x, size=None, @@ -579,7 +601,9 @@ class TestBicubicOpError(unittest.TestCase): ) def test_size_and_scale(): - x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 6, 6], dtype="float32" + ) out = interpolate( x, size=None, @@ -589,7 +613,9 @@ class TestBicubicOpError(unittest.TestCase): ) def test_size_and_scale2(): - x = fluid.data(name="input", shape=[2, 3, 6, 9, 4], dtype="float32") + x = paddle.static.data( + name="input", shape=[2, 3, 6, 9, 4], dtype="float32" + ) out = interpolate( x, size=[2, 2, 2], @@ -599,27 +625,37 @@ class TestBicubicOpError(unittest.TestCase): ) def test_size_type(): - x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 6, 6], dtype="float32" + ) out = interpolate( x, size={2, 2}, mode='bicubic', align_corners=False ) def test_size_length(): - x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 6, 6], dtype="float32" + ) out = interpolate(x, size=[2], mode='bicubic', align_corners=False) def test_size_tensor_ndim(): - x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 6, 6], dtype="float32" + ) size = paddle.to_tensor(np.array([[2, 2]])) out = interpolate(x, size=size, mode='bicubic', align_corners=False) def test_size_tensor_length(): - x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 6, 6], dtype="float32" + ) size = paddle.to_tensor(np.array([2])) out = interpolate(x, size=size, mode='bicubic', align_corners=False) def test_input_shape_1(): - x = fluid.data(name="x", shape=[2, 1, 0, 0], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 1, 0, 0], dtype="float32" + ) out = interpolate( x, size=[3, 3], mode="bicubic", align_corners=False ) diff --git a/python/paddle/fluid/tests/unittests/test_bilateral_slice_op.py b/python/paddle/fluid/tests/unittests/test_bilateral_slice_op.py index ca919eebf6b83910443647d2abc07cc61edddf61..3ac5058c910c55b3faaf4618c8b883af173d30f1 100644 --- a/python/paddle/fluid/tests/unittests/test_bilateral_slice_op.py +++ b/python/paddle/fluid/tests/unittests/test_bilateral_slice_op.py @@ -193,13 +193,13 @@ class TestBilateralSliceOp1(TestBilateralSliceOp): class TestBilateralSliceApi(unittest.TestCase): def test_api(self): with paddle_static_guard(): - x = paddle.fluid.data( + x = paddle.static.data( name='x', shape=[None, 3, 25, 15], dtype='float32' ) - guide = paddle.fluid.data( + guide = paddle.static.data( name='guide', shape=[None, 25, 15], dtype='float32' ) - grid = paddle.fluid.data( + grid = paddle.static.data( name='grid', shape=[None, None, 8, 5, 3], dtype='float32' ) paddle.fluid.contrib.layers.bilateral_slice(x, guide, grid, False) diff --git a/python/paddle/fluid/tests/unittests/test_bilinear_api.py b/python/paddle/fluid/tests/unittests/test_bilinear_api.py index 925a45f3bada424bf14bcf440e8c42115f7eaf44..118d8cd358310beed19a39c19e50e3f972184f92 100644 --- a/python/paddle/fluid/tests/unittests/test_bilinear_api.py +++ b/python/paddle/fluid/tests/unittests/test_bilinear_api.py @@ -32,8 +32,8 @@ class TestBilinearAPI(unittest.TestCase): place = core.CPUPlace() exe = fluid.Executor(place) - data1 = fluid.data(name='X1', shape=[5, 5], dtype='float32') - data2 = fluid.data(name='X2', shape=[5, 4], dtype='float32') + data1 = paddle.static.data(name='X1', shape=[5, 5], dtype='float32') + data2 = paddle.static.data(name='X2', shape=[5, 4], dtype='float32') layer1 = np.random.random((5, 5)).astype('float32') layer2 = np.random.random((5, 4)).astype('float32') diff --git a/python/paddle/fluid/tests/unittests/test_bilinear_tensor_product_op.py b/python/paddle/fluid/tests/unittests/test_bilinear_tensor_product_op.py index 27fd48b653280c79a82250a2b705cec2be7f30b9..b18db80ede9e27cf979044a35e440fd48d97f972 100644 --- a/python/paddle/fluid/tests/unittests/test_bilinear_tensor_product_op.py +++ b/python/paddle/fluid/tests/unittests/test_bilinear_tensor_product_op.py @@ -32,8 +32,12 @@ class TestDygraphBilinearTensorProductAPIError(unittest.TestCase): ) self.assertRaises(TypeError, layer, x0) # the input dtype must be float32 or float64 - x1 = fluid.data(name='x1', shape=[-1, 5], dtype="float16") - x2 = fluid.data(name='x2', shape=[-1, 4], dtype="float32") + x1 = paddle.static.data( + name='x1', shape=[-1, 5], dtype="float16" + ) + x2 = paddle.static.data( + name='x2', shape=[-1, 4], dtype="float32" + ) self.assertRaises(TypeError, layer, x1, x2) # the dimensions of x and y must be 2 paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_bincount_op.py b/python/paddle/fluid/tests/unittests/test_bincount_op.py index 0e189e82c918632d3c63efd58a84f328b77b2381..6585f8f5bd1736a3e1a78c7310dffb890ae109a5 100644 --- a/python/paddle/fluid/tests/unittests/test_bincount_op.py +++ b/python/paddle/fluid/tests/unittests/test_bincount_op.py @@ -34,8 +34,10 @@ class TestBincountOpAPI(unittest.TestCase): startup_program = fluid.Program() train_program = fluid.Program() with fluid.program_guard(train_program, startup_program): - inputs = fluid.data(name='input', dtype='int64', shape=[7]) - weights = fluid.data(name='weights', dtype='int64', shape=[7]) + inputs = paddle.static.data(name='input', dtype='int64', shape=[7]) + weights = paddle.static.data( + name='weights', dtype='int64', shape=[7] + ) output = paddle.bincount(inputs, weights=weights) place = fluid.CPUPlace() if fluid.core.is_compiled_with_cuda(): diff --git a/python/paddle/fluid/tests/unittests/test_calc_gradient.py b/python/paddle/fluid/tests/unittests/test_calc_gradient.py index 38a1284f0ae4a06646d6725d5fb72f6158d9cd65..f244a5db0b7c60be7756ae94d0c73ba5c89f673c 100644 --- a/python/paddle/fluid/tests/unittests/test_calc_gradient.py +++ b/python/paddle/fluid/tests/unittests/test_calc_gradient.py @@ -86,7 +86,7 @@ class TestDoubleGrad(unittest.TestCase): class TestGradientWithPrune(unittest.TestCase): def test_prune(self): with paddle.fluid.scope_guard(paddle.static.Scope()): - x = fluid.data(name='x', shape=[3], dtype='float32') + x = paddle.static.data(name='x', shape=[3], dtype='float32') x.stop_gradient = False x1, x2, x3 = paddle.split(x, axis=0, num_or_sections=3) y = x1 * 2 diff --git a/python/paddle/fluid/tests/unittests/test_case.py b/python/paddle/fluid/tests/unittests/test_case.py index d0c6d2f9837708b6c38b5daf01467c72819dd71b..fd6be2ca851f751ebd920d7b77c6c6931f1871e9 100644 --- a/python/paddle/fluid/tests/unittests/test_case.py +++ b/python/paddle/fluid/tests/unittests/test_case.py @@ -602,14 +602,16 @@ class TestMutiTask(unittest.TestCase): INPUT_SIZE = 784 EPOCH_NUM = 2 - x = fluid.data( + x = paddle.static.data( name='x', shape=[BATCH_SIZE, INPUT_SIZE], dtype='float32' ) - y = fluid.data( + y = paddle.static.data( name='y', shape=[BATCH_SIZE, INPUT_SIZE], dtype='float32' ) - switch_id = fluid.data(name='switch_id', shape=[1], dtype='int32') + switch_id = paddle.static.data( + name='switch_id', shape=[1], dtype='int32' + ) one = paddle.tensor.fill_constant(shape=[1], dtype='int32', value=1) adam = optimizer.Adam(learning_rate=0.001) diff --git a/python/paddle/fluid/tests/unittests/test_channel_shuffle.py b/python/paddle/fluid/tests/unittests/test_channel_shuffle.py index 5faeed36dae19e6e43747cb627551ba8180720af..14565c4094a1303887fd47105c296b0a8d2cd519 100644 --- a/python/paddle/fluid/tests/unittests/test_channel_shuffle.py +++ b/python/paddle/fluid/tests/unittests/test_channel_shuffle.py @@ -92,10 +92,10 @@ class TestChannelShuffleAPI(unittest.TestCase): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.enable_static() - x_1 = paddle.fluid.data( + x_1 = paddle.static.data( name="x", shape=[2, 9, 4, 4], dtype="float64" ) - x_2 = paddle.fluid.data( + x_2 = paddle.static.data( name="x2", shape=[2, 4, 4, 9], dtype="float64" ) out_1 = F.channel_shuffle(x_1, 3) @@ -127,10 +127,10 @@ class TestChannelShuffleAPI(unittest.TestCase): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.enable_static() - x_1 = paddle.fluid.data( + x_1 = paddle.static.data( name="x", shape=[2, 9, 4, 4], dtype="float64" ) - x_2 = paddle.fluid.data( + x_2 = paddle.static.data( name="x2", shape=[2, 4, 4, 9], dtype="float64" ) # init instance diff --git a/python/paddle/fluid/tests/unittests/test_cholesky_op.py b/python/paddle/fluid/tests/unittests/test_cholesky_op.py index 764fd79cf5db294107189546a20354dc9666681c..9859e8da267104da5f7737b8022891fcd81c2d0e 100644 --- a/python/paddle/fluid/tests/unittests/test_cholesky_op.py +++ b/python/paddle/fluid/tests/unittests/test_cholesky_op.py @@ -116,7 +116,9 @@ class TestCholeskySingularAPI(unittest.TestCase): def check_static_result(self, place, with_out=False): with fluid.program_guard(fluid.Program(), fluid.Program()): - input = fluid.data(name="input", shape=[4, 4], dtype="float64") + input = paddle.static.data( + name="input", shape=[4, 4], dtype="float64" + ) result = paddle.cholesky(input) input_np = np.zeros([4, 4]).astype("float64") diff --git a/python/paddle/fluid/tests/unittests/test_cholesky_solve_op.py b/python/paddle/fluid/tests/unittests/test_cholesky_solve_op.py index 27334f12a86f8e99a2fc2999c91598a7ccbf69d7..47d69ff7d26a848421e6aa90e4b2c63ea677c7cd 100644 --- a/python/paddle/fluid/tests/unittests/test_cholesky_solve_op.py +++ b/python/paddle/fluid/tests/unittests/test_cholesky_solve_op.py @@ -172,8 +172,8 @@ class TestCholeskySolveAPI(unittest.TestCase): def check_static_result(self, place): paddle.enable_static() with fluid.program_guard(fluid.Program(), fluid.Program()): - x = fluid.data(name="x", shape=[10, 2], dtype=self.dtype) - y = fluid.data(name="y", shape=[10, 10], dtype=self.dtype) + x = paddle.static.data(name="x", shape=[10, 2], dtype=self.dtype) + y = paddle.static.data(name="y", shape=[10, 10], dtype=self.dtype) z = paddle.linalg.cholesky_solve(x, y, upper=self.upper) x_np = np.random.random([10, 2]).astype(self.dtype) @@ -252,31 +252,31 @@ class TestCholeskySolveOpError(unittest.TestCase): self.assertRaises(TypeError, paddle.linalg.cholesky_solve, x1, y1) # The data type of input must be float32 or float64. - x2 = fluid.data(name="x2", shape=[30, 30], dtype="bool") - y2 = fluid.data(name="y2", shape=[30, 10], dtype="bool") + x2 = paddle.static.data(name="x2", shape=[30, 30], dtype="bool") + y2 = paddle.static.data(name="y2", shape=[30, 10], dtype="bool") self.assertRaises(TypeError, paddle.linalg.cholesky_solve, x2, y2) - x3 = fluid.data(name="x3", shape=[30, 30], dtype="int32") - y3 = fluid.data(name="y3", shape=[30, 10], dtype="int32") + x3 = paddle.static.data(name="x3", shape=[30, 30], dtype="int32") + y3 = paddle.static.data(name="y3", shape=[30, 10], dtype="int32") self.assertRaises(TypeError, paddle.linalg.cholesky_solve, x3, y3) - x4 = fluid.data(name="x4", shape=[30, 30], dtype="float16") - y4 = fluid.data(name="y4", shape=[30, 10], dtype="float16") + x4 = paddle.static.data(name="x4", shape=[30, 30], dtype="float16") + y4 = paddle.static.data(name="y4", shape=[30, 10], dtype="float16") self.assertRaises(TypeError, paddle.linalg.cholesky_solve, x4, y4) # The number of dimensions of input'X must be >= 2. - x5 = fluid.data(name="x5", shape=[30], dtype="float64") - y5 = fluid.data(name="y5", shape=[30, 30], dtype="float64") + x5 = paddle.static.data(name="x5", shape=[30], dtype="float64") + y5 = paddle.static.data(name="y5", shape=[30, 30], dtype="float64") self.assertRaises(ValueError, paddle.linalg.cholesky_solve, x5, y5) # The number of dimensions of input'Y must be >= 2. - x6 = fluid.data(name="x6", shape=[30, 30], dtype="float64") - y6 = fluid.data(name="y6", shape=[30], dtype="float64") + x6 = paddle.static.data(name="x6", shape=[30, 30], dtype="float64") + y6 = paddle.static.data(name="y6", shape=[30], dtype="float64") self.assertRaises(ValueError, paddle.linalg.cholesky_solve, x6, y6) # The inner-most 2 dimensions of input'X should be equal to each other - x7 = fluid.data(name="x7", shape=[2, 3, 4], dtype="float64") - y7 = fluid.data(name="y7", shape=[2, 4, 3], dtype="float64") + x7 = paddle.static.data(name="x7", shape=[2, 3, 4], dtype="float64") + y7 = paddle.static.data(name="y7", shape=[2, 4, 3], dtype="float64") self.assertRaises(ValueError, paddle.linalg.cholesky_solve, x7, y7) diff --git a/python/paddle/fluid/tests/unittests/test_chunk_op.py b/python/paddle/fluid/tests/unittests/test_chunk_op.py index f6bc1e4f03b19bbab930052f912580a2ca7ae73b..6c9d5004c9279e4b1653abe1637a7e22fd31b1ef 100644 --- a/python/paddle/fluid/tests/unittests/test_chunk_op.py +++ b/python/paddle/fluid/tests/unittests/test_chunk_op.py @@ -26,28 +26,28 @@ class TestChunkOpError(unittest.TestCase): with program_guard(Program(), Program()): # The type of axis in chunk_op should be int or Variable. def test_axis_type(): - x1 = paddle.fluid.data(shape=[4], dtype='float16', name='x3') + x1 = paddle.static.data(shape=[4], dtype='float16', name='x3') paddle.chunk(x=x1, chunks=2, axis=3.2) self.assertRaises(TypeError, test_axis_type) # The type of axis in chunk op should be int or Variable. def test_axis_variable_type(): - x2 = paddle.fluid.data(shape=[4], dtype='float16', name='x9') - x3 = paddle.fluid.data(shape=[1], dtype='float16', name='x10') + x2 = paddle.static.data(shape=[4], dtype='float16', name='x9') + x3 = paddle.static.data(shape=[1], dtype='float16', name='x10') paddle.chunk(input=x2, chunks=2, axis=x3) self.assertRaises(TypeError, test_axis_variable_type) # The type of num_or_sections in chunk_op should be int, tuple or list. def test_chunks_type(): - x4 = paddle.fluid.data(shape=[4], dtype='float16', name='x4') + x4 = paddle.static.data(shape=[4], dtype='float16', name='x4') paddle.chunk(input=x4, chunks=2.1, axis=3) self.assertRaises(TypeError, test_chunks_type) def test_axis_type_tensor(): - x5 = paddle.fluid.data(shape=[4], dtype='float16', name='x6') + x5 = paddle.static.data(shape=[4], dtype='float16', name='x6') paddle.chunk(input=x5, chunks=2, axis=3.2) self.assertRaises(TypeError, test_axis_type_tensor) @@ -64,8 +64,10 @@ class TestChunkOpError(unittest.TestCase): class API_TestChunk(unittest.TestCase): def test_out(self): with fluid.program_guard(fluid.Program(), fluid.Program()): - data1 = paddle.fluid.data('data1', shape=[4, 6, 6], dtype='float64') - data2 = paddle.fluid.data('data2', shape=[1], dtype='int32') + data1 = paddle.static.data( + 'data1', shape=[4, 6, 6], dtype='float64' + ) + data2 = paddle.static.data('data2', shape=[1], dtype='int32') x0, x1, x2 = paddle.chunk(data1, chunks=3, axis=data2) place = paddle.CPUPlace() exe = paddle.static.Executor(place) @@ -83,7 +85,9 @@ class API_TestChunk(unittest.TestCase): class API_TestChunk1(unittest.TestCase): def test_out(self): with fluid.program_guard(fluid.Program(), fluid.Program()): - data1 = paddle.fluid.data('data1', shape=[4, 6, 6], dtype='float64') + data1 = paddle.static.data( + 'data1', shape=[4, 6, 6], dtype='float64' + ) x0, x1, x2 = paddle.chunk(data1, chunks=3, axis=2) place = paddle.CPUPlace() exe = paddle.static.Executor(place) diff --git a/python/paddle/fluid/tests/unittests/test_clip_op.py b/python/paddle/fluid/tests/unittests/test_clip_op.py index d5f5bc9f195ccc5459f38bb381cdd99261844e34..b8e3d150e875176e03f04d8f50e7b47fd454dd58 100644 --- a/python/paddle/fluid/tests/unittests/test_clip_op.py +++ b/python/paddle/fluid/tests/unittests/test_clip_op.py @@ -142,9 +142,11 @@ class TestClipAPI(unittest.TestCase): paddle.enable_static() data_shape = [1, 9, 9, 4] data = np.random.random(data_shape).astype('float32') - images = fluid.data(name='image', shape=data_shape, dtype='float32') - min = fluid.data(name='min', shape=[1], dtype='float32') - max = fluid.data(name='max', shape=[1], dtype='float32') + images = paddle.static.data( + name='image', shape=data_shape, dtype='float32' + ) + min = paddle.static.data(name='min', shape=[1], dtype='float32') + max = paddle.static.data(name='max', shape=[1], dtype='float32') place = ( fluid.CUDAPlace(0) @@ -292,8 +294,8 @@ class TestClipAPI(unittest.TestCase): def test_errors(self): paddle.enable_static() - x1 = fluid.data(name='x1', shape=[1], dtype="int16") - x2 = fluid.data(name='x2', shape=[1], dtype="int8") + x1 = paddle.static.data(name='x1', shape=[1], dtype="int16") + x2 = paddle.static.data(name='x2', shape=[1], dtype="int8") self.assertRaises(TypeError, paddle.clip, x=x1, min=0.2, max=0.8) self.assertRaises(TypeError, paddle.clip, x=x2, min=0.2, max=0.8) paddle.disable_static() diff --git a/python/paddle/fluid/tests/unittests/test_compare_op.py b/python/paddle/fluid/tests/unittests/test_compare_op.py index b105db9fa8da7868b96f55daa27b03ebd5a9a69e..0ea2e00a716bfa6b1db3acd1082e171024794d4f 100755 --- a/python/paddle/fluid/tests/unittests/test_compare_op.py +++ b/python/paddle/fluid/tests/unittests/test_compare_op.py @@ -81,8 +81,8 @@ def create_paddle_case(op_type, callback): def test_api(self): paddle.enable_static() with program_guard(Program(), Program()): - x = fluid.data(name='x', shape=[4], dtype='int64') - y = fluid.data(name='y', shape=[4], dtype='int64') + x = paddle.static.data(name='x', shape=[4], dtype='int64') + y = paddle.static.data(name='y', shape=[4], dtype='int64') op = eval("paddle.%s" % (self.op_type)) out = op(x, y) exe = fluid.Executor(self.place) @@ -96,8 +96,8 @@ def create_paddle_case(op_type, callback): if self.op_type == "equal": paddle.enable_static() with program_guard(Program(), Program()): - x = fluid.data(name='x', shape=[4], dtype='int64') - y = fluid.data(name='y', shape=[1], dtype='int64') + x = paddle.static.data(name='x', shape=[4], dtype='int64') + y = paddle.static.data(name='y', shape=[1], dtype='int64') op = eval("paddle.%s" % (self.op_type)) out = op(x, y) exe = fluid.Executor(self.place) diff --git a/python/paddle/fluid/tests/unittests/test_concat_op.py b/python/paddle/fluid/tests/unittests/test_concat_op.py index 17c176af2a8565ed9a5c59528139c80de483e2b4..8021f6ef26e7119cf8f49709ba45ce57abfbb52c 100644 --- a/python/paddle/fluid/tests/unittests/test_concat_op.py +++ b/python/paddle/fluid/tests/unittests/test_concat_op.py @@ -340,13 +340,15 @@ class TestConcatOpError(unittest.TestCase): class TestConcatAPI(unittest.TestCase): def test_fluid_api(self): paddle.enable_static() - x_1 = fluid.data(shape=[None, 1, 4, 5], dtype='int32', name='x_1') + x_1 = paddle.static.data( + shape=[None, 1, 4, 5], dtype='int32', name='x_1' + ) paddle.concat([x_1, x_1], 0) input_2 = np.random.random([2, 1, 4, 5]).astype("int32") input_3 = np.random.random([2, 2, 4, 5]).astype("int32") - x_2 = fluid.data(shape=[2, 1, 4, 5], dtype='int32', name='x_2') - x_3 = fluid.data(shape=[2, 2, 4, 5], dtype='int32', name='x_3') + x_2 = paddle.static.data(shape=[2, 1, 4, 5], dtype='int32', name='x_2') + x_3 = paddle.static.data(shape=[2, 2, 4, 5], dtype='int32', name='x_3') positive_1_int32 = paddle.tensor.fill_constant([1], "int32", 1) positive_1_int64 = paddle.tensor.fill_constant([1], "int64", 1) out_1 = paddle.concat([x_2, x_3], axis=1) @@ -365,15 +367,15 @@ class TestConcatAPI(unittest.TestCase): def test_api(self): paddle.enable_static() - x_1 = paddle.fluid.data( + x_1 = paddle.static.data( shape=[None, 1, 4, 5], dtype='int32', name='x_1' ) paddle.concat([x_1, x_1], 0) input_2 = np.random.random([2, 1, 4, 5]).astype("int32") input_3 = np.random.random([2, 2, 4, 5]).astype("int32") - x_2 = fluid.data(shape=[2, 1, 4, 5], dtype='int32', name='x_2') - x_3 = fluid.data(shape=[2, 2, 4, 5], dtype='int32', name='x_3') + x_2 = paddle.static.data(shape=[2, 1, 4, 5], dtype='int32', name='x_2') + x_3 = paddle.static.data(shape=[2, 2, 4, 5], dtype='int32', name='x_3') positive_1_int32 = paddle.tensor.fill_constant([1], "int32", 1) positive_1_int64 = paddle.tensor.fill_constant([1], "int64", 1) negative_int64 = paddle.tensor.fill_constant([1], "int64", -3) @@ -420,8 +422,8 @@ class TestConcatAPI(unittest.TestCase): ) self.assertRaises(TypeError, paddle.concat, [x2]) # The input dtype of concat_op must be float16, float32, float64, int32, int64. - x4 = paddle.fluid.data(shape=[4], dtype='uint8', name='x4') - x5 = paddle.fluid.data(shape=[4], dtype='uint8', name='x5') + x4 = paddle.static.data(shape=[4], dtype='uint8', name='x4') + x5 = paddle.static.data(shape=[4], dtype='uint8', name='x5') self.assertRaises(TypeError, paddle.concat, [x4, x5]) # The type of axis in concat_op should be int or Variable. diff --git a/python/paddle/fluid/tests/unittests/test_cond.py b/python/paddle/fluid/tests/unittests/test_cond.py index b5fb0a50f181fd45d4b6e39259212e4ca0ebb591..14aa0b638b7301d56faf76ec342b52f044a73f0d 100644 --- a/python/paddle/fluid/tests/unittests/test_cond.py +++ b/python/paddle/fluid/tests/unittests/test_cond.py @@ -285,7 +285,7 @@ class TestCondInputOutput(unittest.TestCase): a = paddle.tensor.fill_constant( shape=[3, 2, 1], dtype='int32', value=7 ) - i = fluid.data(name="i", shape=[1], dtype='int32') + i = paddle.static.data(name="i", shape=[1], dtype='int32') pred = (i % 2) == 0 a = paddle.static.nn.cond( pred, lambda: true_func(a, i), lambda: false_func(a, i) @@ -330,7 +330,7 @@ class TestCondInputOutput(unittest.TestCase): main_program = Program() startup_program = Program() with program_guard(main_program, startup_program): - i = fluid.data(name="i", shape=[1], dtype='int32') + i = paddle.static.data(name="i", shape=[1], dtype='int32') pred = (i % 2) == 0 out1 = paddle.static.nn.cond(pred, true_func, false_func) out2 = paddle.static.nn.cond(pred, None, false_func) @@ -371,7 +371,7 @@ class TestCondInputOutput(unittest.TestCase): main_program = Program() startup_program = Program() with program_guard(main_program, startup_program): - i = fluid.data(name="i", shape=[1], dtype='int32') + i = paddle.static.data(name="i", shape=[1], dtype='int32') pred = (i % 2) == 0 with self.assertRaises(TypeError): out = paddle.static.nn.cond(pred, i, func_return_one_tensor) @@ -477,7 +477,7 @@ class TestCondNestedControlFlow(unittest.TestCase): main_program = Program() startup_program = Program() with program_guard(main_program, startup_program): - i = fluid.data(name="i", shape=[1], dtype='float32') + i = paddle.static.data(name="i", shape=[1], dtype='float32') i.stop_gradient = False a = 2.0 * i out = paddle.static.nn.cond( @@ -629,10 +629,14 @@ class TestCondBackward(unittest.TestCase): startup_program = Program() startup_program.random_seed = 123 with program_guard(main_program, startup_program): - img = fluid.data(name='image', shape=[-1, 9], dtype='float32') + img = paddle.static.data( + name='image', shape=[-1, 9], dtype='float32' + ) img.stop_gradient = False - label = fluid.data(name='label', shape=[-1, 1], dtype='int64') - i = fluid.data(name="i", shape=[1], dtype='int32') + label = paddle.static.data( + name='label', shape=[-1, 1], dtype='int64' + ) + i = paddle.static.data(name="i", shape=[1], dtype='int32') loss = cond_func(i, img, label) append_backward(loss) place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() @@ -684,9 +688,13 @@ class TestCondBackward(unittest.TestCase): main_program = Program() startup_program = Program() with program_guard(main_program, startup_program): - img = fluid.data(name='image', shape=[-1, 784], dtype='float32') - label = fluid.data(name='label', shape=[-1, 1], dtype='int64') - i = fluid.data(name="i", shape=[1], dtype='int32') + img = paddle.static.data( + name='image', shape=[-1, 784], dtype='float32' + ) + label = paddle.static.data( + name='label', shape=[-1, 1], dtype='int64' + ) + i = paddle.static.data(name="i", shape=[1], dtype='int32') loss = cond_func(i, img, label) optimizer = fluid.optimizer.SGD(learning_rate=0.1) optimizer.minimize(loss) @@ -793,7 +801,7 @@ class TestCondWithError(unittest.TestCase): main_program = framework.Program() startup_program = framework.Program() with framework.program_guard(main_program, startup_program): - pred = fluid.data(name='y', shape=[1], dtype='bool') + pred = paddle.static.data(name='y', shape=[1], dtype='bool') def func(): return pred diff --git a/python/paddle/fluid/tests/unittests/test_conv1d_layer.py b/python/paddle/fluid/tests/unittests/test_conv1d_layer.py index 441318c51296275fe3c4297cdcd6e79ffaa0dfa0..5e3ce31f80640e6ea10e1500cfff32d642456aea 100644 --- a/python/paddle/fluid/tests/unittests/test_conv1d_layer.py +++ b/python/paddle/fluid/tests/unittests/test_conv1d_layer.py @@ -93,11 +93,13 @@ class Conv1DTestCase(unittest.TestCase): if not self.channel_last else (-1, -1, self.num_channels) ) - x_var = fluid.data("input", input_shape, dtype=self.dtype) - w_var = fluid.data( + x_var = paddle.static.data( + "input", input_shape, dtype=self.dtype + ) + w_var = paddle.static.data( "weight", self.weight_shape, dtype=self.dtype ) - b_var = fluid.data( + b_var = paddle.static.data( "bias", (self.num_filters,), dtype=self.dtype ) y_var = F.conv1d( diff --git a/python/paddle/fluid/tests/unittests/test_conv1d_transpose_layer.py b/python/paddle/fluid/tests/unittests/test_conv1d_transpose_layer.py index 45edd261bc6eda2b822ff4712d4e0b924f4b1dbb..43baf478a654119d8fb0a3064728beda97d83cda 100644 --- a/python/paddle/fluid/tests/unittests/test_conv1d_transpose_layer.py +++ b/python/paddle/fluid/tests/unittests/test_conv1d_transpose_layer.py @@ -100,11 +100,13 @@ class Conv1DTransposeTestCase(unittest.TestCase): if not self.channel_last else (-1, -1, self.in_channels) ) - x_var = fluid.data("input", input_shape, dtype=self.dtype) - w_var = fluid.data( + x_var = paddle.static.data( + "input", input_shape, dtype=self.dtype + ) + w_var = paddle.static.data( "weight", self.weight_shape, dtype=self.dtype ) - b_var = fluid.data( + b_var = paddle.static.data( "bias", (self.out_channels,), dtype=self.dtype ) y_var = F.conv1d_transpose( diff --git a/python/paddle/fluid/tests/unittests/test_conv2d_layer.py b/python/paddle/fluid/tests/unittests/test_conv2d_layer.py index 6c14ce054ffbad491691fd81cf7611254ec5d005..3c9a18419c2cbb171a8ddbbea1fce45f2b1dc6b2 100644 --- a/python/paddle/fluid/tests/unittests/test_conv2d_layer.py +++ b/python/paddle/fluid/tests/unittests/test_conv2d_layer.py @@ -108,7 +108,9 @@ class Conv2DTestCase(unittest.TestCase): if self.channel_last else (-1, self.num_channels, -1, -1) ) - x_var = fluid.data("input", input_shape, dtype=self.dtype) + x_var = paddle.static.data( + "input", input_shape, dtype=self.dtype + ) weight_attr = paddle.nn.initializer.Assign(self.weight) if self.bias is None: bias_attr = False @@ -154,11 +156,13 @@ class Conv2DTestCase(unittest.TestCase): if self.channel_last else (-1, self.num_channels, -1, -1) ) - x_var = fluid.data("input", input_shape, dtype=self.dtype) - w_var = fluid.data( + x_var = paddle.static.data( + "input", input_shape, dtype=self.dtype + ) + w_var = paddle.static.data( "weight", self.weight_shape, dtype=self.dtype ) - b_var = fluid.data( + b_var = paddle.static.data( "bias", (self.num_filters,), dtype=self.dtype ) diff --git a/python/paddle/fluid/tests/unittests/test_conv2d_transpose_layer.py b/python/paddle/fluid/tests/unittests/test_conv2d_transpose_layer.py index 50c80c3aa32d61bf61f588ea20c3f990ae98de99..6a7f77fdcdfc868261711b2d80acf5387d5c56a9 100644 --- a/python/paddle/fluid/tests/unittests/test_conv2d_transpose_layer.py +++ b/python/paddle/fluid/tests/unittests/test_conv2d_transpose_layer.py @@ -99,7 +99,9 @@ class Conv2DTransposeTestCase(unittest.TestCase): if self.channel_last else (-1, self.num_channels, -1, -1) ) - x_var = fluid.data("input", input_shape, dtype=self.dtype) + x_var = paddle.static.data( + "input", input_shape, dtype=self.dtype + ) weight_attr = paddle.nn.initializer.Assign(self.weight) if self.bias is None: bias_attr = False @@ -135,11 +137,13 @@ class Conv2DTransposeTestCase(unittest.TestCase): if self.channel_last else (-1, self.num_channels, -1, -1) ) - x_var = fluid.data("input", input_shape, dtype=self.dtype) - w_var = fluid.data( + x_var = paddle.static.data( + "input", input_shape, dtype=self.dtype + ) + w_var = paddle.static.data( "weight", self.weight_shape, dtype=self.dtype ) - b_var = fluid.data( + b_var = paddle.static.data( "bias", (self.num_filters,), dtype=self.dtype ) diff --git a/python/paddle/fluid/tests/unittests/test_conv3d_layer.py b/python/paddle/fluid/tests/unittests/test_conv3d_layer.py index 8ef86daf69a034a815f9b03df5338d914c47f548..d3b1dd6960106b30793caa7e95dd94020d339024 100644 --- a/python/paddle/fluid/tests/unittests/test_conv3d_layer.py +++ b/python/paddle/fluid/tests/unittests/test_conv3d_layer.py @@ -95,7 +95,9 @@ class Conv3DTestCase(unittest.TestCase): if self.channel_last else (-1, self.num_channels, -1, -1, -1) ) - x_var = fluid.data("input", input_shape, dtype=self.dtype) + x_var = paddle.static.data( + "input", input_shape, dtype=self.dtype + ) weight_attr = paddle.nn.initializer.Assign(self.weight) if self.bias is None: bias_attr = False @@ -129,11 +131,13 @@ class Conv3DTestCase(unittest.TestCase): if self.channel_last else (-1, self.num_channels, -1, -1, -1) ) - x_var = fluid.data("input", input_shape, dtype=self.dtype) - w_var = fluid.data( + x_var = paddle.static.data( + "input", input_shape, dtype=self.dtype + ) + w_var = paddle.static.data( "weight", self.weight_shape, dtype=self.dtype ) - b_var = fluid.data( + b_var = paddle.static.data( "bias", (self.num_filters,), dtype=self.dtype ) y_var = F.conv3d( diff --git a/python/paddle/fluid/tests/unittests/test_conv3d_transpose_layer.py b/python/paddle/fluid/tests/unittests/test_conv3d_transpose_layer.py index 82c08348f4bf1edffd4837b7edac13c3d14ec524..90ddc1ecfdb0049143327a1d941c2924be29bec9 100644 --- a/python/paddle/fluid/tests/unittests/test_conv3d_transpose_layer.py +++ b/python/paddle/fluid/tests/unittests/test_conv3d_transpose_layer.py @@ -97,7 +97,9 @@ class Conv3DTransposeTestCase(unittest.TestCase): if self.channel_last else (-1, self.num_channels, -1, -1, -1) ) - x_var = fluid.data("input", input_shape, dtype=self.dtype) + x_var = paddle.static.data( + "input", input_shape, dtype=self.dtype + ) weight_attr = paddle.nn.initializer.Assign(self.weight) if self.bias is None: bias_attr = False @@ -132,11 +134,13 @@ class Conv3DTransposeTestCase(unittest.TestCase): if self.channel_last else (-1, self.num_channels, -1, -1, -1) ) - x_var = fluid.data("input", input_shape, dtype=self.dtype) - w_var = fluid.data( + x_var = paddle.static.data( + "input", input_shape, dtype=self.dtype + ) + w_var = paddle.static.data( "weight", self.weight_shape, dtype=self.dtype ) - b_var = fluid.data( + b_var = paddle.static.data( "bias", (self.num_filters,), dtype=self.dtype ) y_var = F.conv3d_transpose( diff --git a/python/paddle/fluid/tests/unittests/test_corr.py b/python/paddle/fluid/tests/unittests/test_corr.py index eb3eacddedf00b4e6242a6eafb1e99df626a61ae..ce4fea47c75da7b129331cec9fd832dc845cf5ef 100644 --- a/python/paddle/fluid/tests/unittests/test_corr.py +++ b/python/paddle/fluid/tests/unittests/test_corr.py @@ -117,7 +117,7 @@ class Corr_Comeplex_Test(unittest.TestCase): def test_errors(self): paddle.enable_static() - x1 = fluid.data(name=self.dtype, shape=[2], dtype=self.dtype) + x1 = paddle.static.data(name=self.dtype, shape=[2], dtype=self.dtype) self.assertRaises(TypeError, paddle.linalg.corrcoef, x=x1) paddle.disable_static() diff --git a/python/paddle/fluid/tests/unittests/test_cosine_similarity_api.py b/python/paddle/fluid/tests/unittests/test_cosine_similarity_api.py index 0626066c34a83b4a9fa38a379a3f28f97aa64a23..7bbe44c1e805064eacc07997bbefff849c7d1c9e 100644 --- a/python/paddle/fluid/tests/unittests/test_cosine_similarity_api.py +++ b/python/paddle/fluid/tests/unittests/test_cosine_similarity_api.py @@ -48,8 +48,8 @@ class TestCosineSimilarityAPI(unittest.TestCase): np_x1 = np.random.rand(*shape).astype(np.float32) np_x2 = np.random.rand(*shape).astype(np.float32) - x1 = paddle.fluid.data(name="x1", shape=shape) - x2 = paddle.fluid.data(name="x2", shape=shape) + x1 = paddle.static.data(name="x1", shape=shape) + x2 = paddle.static.data(name="x2", shape=shape) result = F.cosine_similarity(x1, x2, axis=axis, eps=eps) exe = Executor(place) fetches = exe.run( diff --git a/python/paddle/fluid/tests/unittests/test_count_nonzero_api.py b/python/paddle/fluid/tests/unittests/test_count_nonzero_api.py index 516b0744598848dc89d9d1fa35c4100e4eb31c71..4e94a7e81395495c1f207a5e1c80f138c7dd5f76 100644 --- a/python/paddle/fluid/tests/unittests/test_count_nonzero_api.py +++ b/python/paddle/fluid/tests/unittests/test_count_nonzero_api.py @@ -37,7 +37,7 @@ class TestCountNonzeroAPI(unittest.TestCase): def test_api_static(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.x_shape) + x = paddle.static.data('X', self.x_shape) out1 = paddle.count_nonzero(x) out2 = paddle.tensor.count_nonzero(x) out3 = paddle.tensor.math.count_nonzero(x) @@ -80,7 +80,7 @@ class TestCountNonzeroAPI(unittest.TestCase): def test_errors(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', [10, 12], 'int32') + x = paddle.static.data('X', [10, 12], 'int32') self.assertRaises(ValueError, paddle.count_nonzero, x, axis=10) diff --git a/python/paddle/fluid/tests/unittests/test_crop_op.py b/python/paddle/fluid/tests/unittests/test_crop_op.py index 26ecda5fb68523dd59e8ab8450417117bc0200ac..394b4a1b6885cb97634f4155f64002db602e6b76 100644 --- a/python/paddle/fluid/tests/unittests/test_crop_op.py +++ b/python/paddle/fluid/tests/unittests/test_crop_op.py @@ -18,7 +18,6 @@ import numpy as np from eager_op_test import OpTest import paddle -import paddle.fluid as fluid def crop(data, offsets, crop_shape): @@ -136,7 +135,7 @@ class TestCase6(TestCropOp): class TestCropNoneOffset(unittest.TestCase): def test_crop_none_offset(self): - x = fluid.data(name="input1", shape=[3, 6, 6], dtype="float32") + x = paddle.static.data(name="input1", shape=[3, 6, 6], dtype="float32") crop_shape = [2, 2, 2] crop = paddle.crop(x, crop_shape, None) self.assertEqual(crop.shape, (2, 2, 2)) @@ -144,7 +143,7 @@ class TestCropNoneOffset(unittest.TestCase): class TestCropNoneShape(unittest.TestCase): def test_crop_none_shape(self): - x = fluid.data(name="input1", shape=[3, 6, 6], dtype="float32") + x = paddle.static.data(name="input1", shape=[3, 6, 6], dtype="float32") crop = paddle.crop(x) self.assertEqual(crop.shape, (3, 6, 6)) @@ -152,7 +151,7 @@ class TestCropNoneShape(unittest.TestCase): class TestCropError(unittest.TestCase): def test_neg_offset_error(self): with self.assertRaises(ValueError): - x = fluid.data(name='input2', shape=[1], dtype="float32") + x = paddle.static.data(name='input2', shape=[1], dtype="float32") out = paddle.crop(x, offsets=[-1]) diff --git a/python/paddle/fluid/tests/unittests/test_crop_tensor_op.py b/python/paddle/fluid/tests/unittests/test_crop_tensor_op.py index ec1028fba44bfead65e322d8ec39ced78fae0616..31141142ccf4dfee32d9f4b16a52740b77b04eb9 100644 --- a/python/paddle/fluid/tests/unittests/test_crop_tensor_op.py +++ b/python/paddle/fluid/tests/unittests/test_crop_tensor_op.py @@ -18,7 +18,6 @@ import numpy as np from eager_op_test import OpTest import paddle -import paddle.fluid as fluid def crop(data, offsets, crop_shape): @@ -227,10 +226,14 @@ class TestCropTensorOpTensorAttrCase4(TestCropTensorOpTensorAttr): class TestCropTensorException(unittest.TestCase): def test_exception(self): - input1 = fluid.data(name="input1", shape=[2, 3, 6, 6], dtype="float32") - input2 = fluid.data(name="input2", shape=[2, 3, 6, 6], dtype="float16") - dim = fluid.data(name='dim', shape=[1], dtype='int32') - offset = fluid.data(name='offset', shape=[1], dtype='int32') + input1 = paddle.static.data( + name="input1", shape=[2, 3, 6, 6], dtype="float32" + ) + input2 = paddle.static.data( + name="input2", shape=[2, 3, 6, 6], dtype="float16" + ) + dim = paddle.static.data(name='dim', shape=[1], dtype='int32') + offset = paddle.static.data(name='offset', shape=[1], dtype='int32') def attr_shape_type(): out = paddle.crop(input1, shape=3) diff --git a/python/paddle/fluid/tests/unittests/test_cross_entropy_loss.py b/python/paddle/fluid/tests/unittests/test_cross_entropy_loss.py index 65bcd38439d14a01b2cbfa19c97f03856247afc5..f172ce7c9e97ed67be6c4ef28ae0ac803e0d3ed7 100644 --- a/python/paddle/fluid/tests/unittests/test_cross_entropy_loss.py +++ b/python/paddle/fluid/tests/unittests/test_cross_entropy_loss.py @@ -303,10 +303,10 @@ class CrossEntropyLoss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data( + input = paddle.static.data( name='input', shape=[self.N, self.C], dtype=self.dtype ) - label = fluid.data( + label = paddle.static.data( name='label', shape=[self.N, self.C], dtype=self.dtype ) @@ -399,13 +399,15 @@ class CrossEntropyLoss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data( + input = paddle.static.data( name='input', shape=[self.N, self.C], dtype=self.dtype ) - label = fluid.data( + label = paddle.static.data( name='label', shape=[self.N, self.C], dtype=self.dtype ) - weight = fluid.data(name='weight', shape=[self.C], dtype=self.dtype) + weight = paddle.static.data( + name='weight', shape=[self.C], dtype=self.dtype + ) cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( weight=weight, reduction=self.reduction, soft_label=True @@ -488,10 +490,10 @@ class CrossEntropyLoss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data( + input = paddle.static.data( name='input', shape=[self.N, self.C], dtype=self.dtype ) - label = fluid.data( + label = paddle.static.data( name='label', shape=[self.N, self.C], dtype=self.dtype ) @@ -572,13 +574,15 @@ class CrossEntropyLoss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data( + input = paddle.static.data( name='input', shape=[self.N, self.C], dtype=self.dtype ) - label = fluid.data( + label = paddle.static.data( name='label', shape=[self.N, self.C], dtype=self.dtype ) - weight = fluid.data(name='weight', shape=[self.C], dtype=self.dtype) + weight = paddle.static.data( + name='weight', shape=[self.C], dtype=self.dtype + ) cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( weight=weight, reduction=self.reduction, soft_label=True @@ -671,12 +675,12 @@ class CrossEntropyLoss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data( + input = paddle.static.data( name='input', shape=[self.N, self.H, self.W, self.C], dtype=self.dtype, ) - label = fluid.data( + label = paddle.static.data( name='label', shape=[self.N, self.H, self.W, self.C], dtype=self.dtype, @@ -769,17 +773,19 @@ class CrossEntropyLoss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data( + input = paddle.static.data( name='input', shape=[self.N, self.H, self.W, self.C], dtype=self.dtype, ) - label = fluid.data( + label = paddle.static.data( name='label', shape=[self.N, self.H, self.W, self.C], dtype=self.dtype, ) - weight = fluid.data(name='weight', shape=[self.C], dtype=self.dtype) + weight = paddle.static.data( + name='weight', shape=[self.C], dtype=self.dtype + ) cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( weight=weight, reduction=self.reduction, soft_label=True @@ -816,8 +822,10 @@ class CrossEntropyLoss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', shape=[2, 4], dtype=self.dtype) - label = fluid.data(name='label', shape=[2], dtype='int64') + input = paddle.static.data( + name='input', shape=[2, 4], dtype=self.dtype + ) + label = paddle.static.data(name='label', shape=[2], dtype='int64') cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(ignore_index=0) ret = cross_entropy_loss(input, label) @@ -862,8 +870,10 @@ class CrossEntropyLoss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', shape=[N, C], dtype=self.dtype) - label = fluid.data(name='label', shape=[N], dtype='int64') + input = paddle.static.data( + name='input', shape=[N, C], dtype=self.dtype + ) + label = paddle.static.data(name='label', shape=[N], dtype='int64') cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( ignore_index=-1 ) @@ -910,9 +920,11 @@ class CrossEntropyLoss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', shape=[N, C], dtype=self.dtype) - label = fluid.data(name='label', shape=[N], dtype='int64') - weight = fluid.data( + input = paddle.static.data( + name='input', shape=[N, C], dtype=self.dtype + ) + label = paddle.static.data(name='label', shape=[N], dtype='int64') + weight = paddle.static.data( name='weight', shape=[C], dtype=self.dtype ) # weight for each class cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( @@ -989,9 +1001,11 @@ class CrossEntropyLoss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', shape=[2, 4], dtype=self.dtype) - label = fluid.data(name='label', shape=[2], dtype='int64') - weight = fluid.data( + input = paddle.static.data( + name='input', shape=[2, 4], dtype=self.dtype + ) + label = paddle.static.data(name='label', shape=[2], dtype='int64') + weight = paddle.static.data( name='weight', shape=[4], dtype=self.dtype ) # weight for each class cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(weight=weight) @@ -1042,9 +1056,13 @@ class CrossEntropyLoss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', shape=[100, 200], dtype=self.dtype) - label = fluid.data(name='label', shape=[100], dtype='int64') - weight = fluid.data(name='weight', shape=[200], dtype=self.dtype) + input = paddle.static.data( + name='input', shape=[100, 200], dtype=self.dtype + ) + label = paddle.static.data(name='label', shape=[100], dtype='int64') + weight = paddle.static.data( + name='weight', shape=[200], dtype=self.dtype + ) cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( weight=weight, reduction='sum' ) @@ -1092,9 +1110,13 @@ class CrossEntropyLoss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', shape=[100, 200], dtype=self.dtype) - label = fluid.data(name='label', shape=[100], dtype='int64') - weight = fluid.data(name='weight', shape=[200], dtype=self.dtype) + input = paddle.static.data( + name='input', shape=[100, 200], dtype=self.dtype + ) + label = paddle.static.data(name='label', shape=[100], dtype='int64') + weight = paddle.static.data( + name='weight', shape=[200], dtype=self.dtype + ) cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( weight=weight, reduction='none' @@ -1144,9 +1166,13 @@ class CrossEntropyLoss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', shape=[100, 200], dtype=self.dtype) - label = fluid.data(name='label', shape=[100], dtype='int64') - weight = fluid.data(name='weight', shape=[200], dtype=self.dtype) + input = paddle.static.data( + name='input', shape=[100, 200], dtype=self.dtype + ) + label = paddle.static.data(name='label', shape=[100], dtype='int64') + weight = paddle.static.data( + name='weight', shape=[200], dtype=self.dtype + ) ret = paddle.nn.functional.cross_entropy( input, label, weight=weight, reduction='none' ) @@ -1192,8 +1218,10 @@ class CrossEntropyLoss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', shape=[100, 200], dtype=self.dtype) - label = fluid.data(name='label', shape=[100], dtype='int64') + input = paddle.static.data( + name='input', shape=[100, 200], dtype=self.dtype + ) + label = paddle.static.data(name='label', shape=[100], dtype='int64') cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss() ret = cross_entropy_loss(input, label) exe = fluid.Executor(place) @@ -1228,8 +1256,10 @@ class CrossEntropyLoss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', shape=[100, 200], dtype=self.dtype) - label = fluid.data(name='label', shape=[100], dtype='int64') + input = paddle.static.data( + name='input', shape=[100, 200], dtype=self.dtype + ) + label = paddle.static.data(name='label', shape=[100], dtype='int64') cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( reduction='sum' ) @@ -1268,8 +1298,10 @@ class CrossEntropyLoss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', shape=[100, 200], dtype=self.dtype) - label = fluid.data(name='label', shape=[100], dtype='int64') + input = paddle.static.data( + name='input', shape=[100, 200], dtype=self.dtype + ) + label = paddle.static.data(name='label', shape=[100], dtype='int64') cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( reduction='none' ) @@ -1316,11 +1348,15 @@ class CrossEntropyLoss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data( + input = paddle.static.data( name='input', shape=[2, 2, 2, 3], dtype=self.dtype ) - label = fluid.data(name='label', shape=[2, 2, 2], dtype='int64') - weight = fluid.data(name='weight', shape=[3], dtype=self.dtype) + label = paddle.static.data( + name='label', shape=[2, 2, 2], dtype='int64' + ) + weight = paddle.static.data( + name='weight', shape=[3], dtype=self.dtype + ) cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( weight=weight, reduction='none' ) @@ -1374,11 +1410,15 @@ class CrossEntropyLoss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data( + input = paddle.static.data( name='input', shape=[2, 3, 2, 2], dtype=self.dtype ) - label = fluid.data(name='label', shape=[2, 2, 2], dtype='int64') - weight = fluid.data(name='weight', shape=[3], dtype=self.dtype) + label = paddle.static.data( + name='label', shape=[2, 2, 2], dtype='int64' + ) + weight = paddle.static.data( + name='weight', shape=[3], dtype=self.dtype + ) cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( weight=weight, reduction='mean', axis=1 ) @@ -1460,11 +1500,15 @@ class CrossEntropyLoss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data( + input = paddle.static.data( name='input', shape=[2, 2, 2, 3], dtype=self.dtype ) - label = fluid.data(name='label', shape=[2, 2, 2], dtype='int64') - weight = fluid.data(name='weight', shape=[3], dtype=self.dtype) + label = paddle.static.data( + name='label', shape=[2, 2, 2], dtype='int64' + ) + weight = paddle.static.data( + name='weight', shape=[3], dtype=self.dtype + ) cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( weight=weight, reduction='mean' ) @@ -1516,11 +1560,15 @@ class CrossEntropyLoss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data( + input = paddle.static.data( name='input', shape=[2, 2, 2, 3], dtype=self.dtype ) - label = fluid.data(name='label', shape=[2, 2, 2], dtype='int64') - weight = fluid.data(name='weight', shape=[3], dtype=self.dtype) + label = paddle.static.data( + name='label', shape=[2, 2, 2], dtype='int64' + ) + weight = paddle.static.data( + name='weight', shape=[3], dtype=self.dtype + ) cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( weight=weight, reduction='sum' ) @@ -1570,10 +1618,12 @@ class CrossEntropyLoss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data( + input = paddle.static.data( name='input', shape=[2, 2, 2, 3], dtype=self.dtype ) - label = fluid.data(name='label', shape=[2, 2, 2], dtype='int64') + label = paddle.static.data( + name='label', shape=[2, 2, 2], dtype='int64' + ) cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( reduction='none' ) @@ -1621,10 +1671,12 @@ class CrossEntropyLoss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data( + input = paddle.static.data( name='input', shape=[2, 2, 2, 3], dtype=self.dtype ) - label = fluid.data(name='label', shape=[2, 2, 2], dtype='int64') + label = paddle.static.data( + name='label', shape=[2, 2, 2], dtype='int64' + ) cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( reduction='mean' ) @@ -1673,10 +1725,12 @@ class CrossEntropyLoss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data( + input = paddle.static.data( name='input', shape=[2, 2, 2, 3], dtype=self.dtype ) - label = fluid.data(name='label', shape=[2, 2, 2], dtype='int64') + label = paddle.static.data( + name='label', shape=[2, 2, 2], dtype='int64' + ) cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( reduction='sum' ) @@ -1772,11 +1826,13 @@ class TestCrossEntropyFAPIError(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data( + input = paddle.static.data( name='input', shape=[2, 4], dtype='float32' ) - label = fluid.data(name='label', shape=[2], dtype='int64') - weight = fluid.data( + label = paddle.static.data( + name='label', shape=[2], dtype='int64' + ) + weight = paddle.static.data( name='weight', shape=[3], dtype='float32' ) # weight for each class cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( diff --git a/python/paddle/fluid/tests/unittests/test_cross_op.py b/python/paddle/fluid/tests/unittests/test_cross_op.py index a886c2e27be96ea2d123254913718fa59829259a..5ff34a337c42d62d7baae13de7130705a1d0d732 100644 --- a/python/paddle/fluid/tests/unittests/test_cross_op.py +++ b/python/paddle/fluid/tests/unittests/test_cross_op.py @@ -123,8 +123,8 @@ class TestCrossAPI(unittest.TestCase): # case 3: with program_guard(Program(), Program()): - x = fluid.data(name="x", shape=[-1, 3], dtype="float32") - y = fluid.data(name='y', shape=[-1, 3], dtype='float32') + x = paddle.static.data(name="x", shape=[-1, 3], dtype="float32") + y = paddle.static.data(name='y', shape=[-1, 3], dtype='float32') y_1 = paddle.cross(x, y, name='result') self.assertEqual(('result' in y_1.name), True) diff --git a/python/paddle/fluid/tests/unittests/test_cumprod_op.py b/python/paddle/fluid/tests/unittests/test_cumprod_op.py index 2a97e8af44d2a068d74e31524ac4effdd66c02a8..544b075ce0d8c59b7034077fe07f8d075228320a 100644 --- a/python/paddle/fluid/tests/unittests/test_cumprod_op.py +++ b/python/paddle/fluid/tests/unittests/test_cumprod_op.py @@ -166,7 +166,7 @@ class TestCumprodAPI(unittest.TestCase): def run(place): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.shape, dtype=self.dtype) + x = paddle.static.data('X', self.shape, dtype=self.dtype) out = paddle.cumprod(x, -2) exe = paddle.static.Executor(place) res = exe.run(feed={'X': self.x}, fetch_list=[out]) diff --git a/python/paddle/fluid/tests/unittests/test_data.py b/python/paddle/fluid/tests/unittests/test_data.py index 0e2223767dd924cfa9ec065ec67984c29ce399af..9bbf16fd618f3bd7a33218614692e2187e59f28b 100644 --- a/python/paddle/fluid/tests/unittests/test_data.py +++ b/python/paddle/fluid/tests/unittests/test_data.py @@ -15,30 +15,12 @@ import unittest import paddle -import paddle.fluid as fluid import paddle.fluid.core as core from paddle.fluid import Program, program_guard -class TestApiDataError(unittest.TestCase): - def test_fluid_data(self): - with program_guard(Program(), Program()): - - # 1. The type of 'name' in fluid.data must be str. - def test_name_type(): - fluid.data(name=1, shape=[2, 25], dtype="bool") - - self.assertRaises(TypeError, test_name_type) - - # 2. The type of 'shape' in fluid.data must be list or tuple. - def test_shape_type(): - fluid.data(name='data1', shape=2, dtype="bool") - - self.assertRaises(TypeError, test_shape_type) - - class TestApiStaticDataError(unittest.TestCase): - def test_fluid_dtype(self): + def test_dtype(self): with program_guard(Program(), Program()): x1 = paddle.static.data(name="x1", shape=[2, 25]) self.assertEqual(x1.dtype, core.VarDesc.VarType.FP32) @@ -50,16 +32,16 @@ class TestApiStaticDataError(unittest.TestCase): x3 = paddle.static.data(name="x3", shape=[2, 25]) self.assertEqual(x3.dtype, core.VarDesc.VarType.FP64) - def test_fluid_data(self): + def test_error(self): with program_guard(Program(), Program()): - # 1. The type of 'name' in fluid.data must be str. + # 1. The type of 'name' in paddle.static.data must be str. def test_name_type(): paddle.static.data(name=1, shape=[2, 25], dtype="bool") self.assertRaises(TypeError, test_name_type) - # 2. The type of 'shape' in fluid.data must be list or tuple. + # 2. The type of 'shape' in paddle.static.data must be list or tuple. def test_shape_type(): paddle.static.data(name='data1', shape=2, dtype="bool") @@ -84,10 +66,7 @@ class TestApiErrorWithDynamicMode(unittest.TestCase): def test_error(self): with program_guard(Program(), Program()): paddle.disable_static() - self.assertRaises(AssertionError, fluid.data, 'a', [2, 25]) - self.assertRaises( - AssertionError, paddle.static.data, 'c', shape=[2, 25] - ) + self.assertRaises(AssertionError, paddle.static.data, 'a', [2, 25]) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_data_norm_op.py b/python/paddle/fluid/tests/unittests/test_data_norm_op.py index 4cf038334f72f1f5782258a0311cd34e0c3cae1f..8fcd4d2af9ba8d8a21aea12ad0495891ba7cea40 100644 --- a/python/paddle/fluid/tests/unittests/test_data_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_data_norm_op.py @@ -542,7 +542,7 @@ class TestDataNormOpErrorr(unittest.TestCase): # The size of input in data_norm should not be 0. def test_0_size(): paddle.enable_static() - x = fluid.data(name='x', shape=[0, 3], dtype='float32') + x = paddle.static.data(name='x', shape=[0, 3], dtype='float32') out = paddle.static.nn.data_norm(x, slot_dim=1) cpu = fluid.core.CPUPlace() exe = fluid.Executor(cpu) diff --git a/python/paddle/fluid/tests/unittests/test_dataloader_early_reset.py b/python/paddle/fluid/tests/unittests/test_dataloader_early_reset.py index f84eb48147254eb332c533592756d66399b3d16b..504d37d42d98339bd0be21db0c5ffc00ce1f1472 100644 --- a/python/paddle/fluid/tests/unittests/test_dataloader_early_reset.py +++ b/python/paddle/fluid/tests/unittests/test_dataloader_early_reset.py @@ -46,7 +46,7 @@ class TestDataLoaderEarlyReset(unittest.TestCase): return fluid.CPUPlace() def create_data_loader(self): - self.x = fluid.data(name='x', shape=[None, 32], dtype='float32') + self.x = paddle.static.data(name='x', shape=[None, 32], dtype='float32') return fluid.io.DataLoader.from_generator( feed_list=[self.x], capacity=10, iterable=self.iterable ) diff --git a/python/paddle/fluid/tests/unittests/test_dataloader_keep_order.py b/python/paddle/fluid/tests/unittests/test_dataloader_keep_order.py index 70e90cd8eb522b2743a87032d275bb6867d6d7f9..66085a176a22b7d4e5a86ef843396d6e1a4224a7 100644 --- a/python/paddle/fluid/tests/unittests/test_dataloader_keep_order.py +++ b/python/paddle/fluid/tests/unittests/test_dataloader_keep_order.py @@ -43,7 +43,9 @@ class DataLoaderKeepOrderTestBase(unittest.TestCase): self.initParameters() def build_network(self, places): - input_data = fluid.data(shape=self.shape, dtype='float32', name="input") + input_data = paddle.static.data( + shape=self.shape, dtype='float32', name="input" + ) loader = fluid.io.DataLoader.from_generator( capacity=16, feed_list=[input_data], iterable=self.iterable ) diff --git a/python/paddle/fluid/tests/unittests/test_dataloader_unkeep_order.py b/python/paddle/fluid/tests/unittests/test_dataloader_unkeep_order.py index 9538e1895cf5acdec705a9b43e90830946370b39..9d0d63a2e888ffa7494d36d225a8e736ad9f4d70 100644 --- a/python/paddle/fluid/tests/unittests/test_dataloader_unkeep_order.py +++ b/python/paddle/fluid/tests/unittests/test_dataloader_unkeep_order.py @@ -49,7 +49,9 @@ class DataLoaderKeepOrderTestBase(unittest.TestCase): self.visited = set() def build_network(self, places): - input_data = fluid.data(shape=self.shape, dtype='float32', name="input") + input_data = paddle.static.data( + shape=self.shape, dtype='float32', name="input" + ) loader = fluid.io.DataLoader.from_generator( capacity=16, feed_list=[input_data], iterable=self.iterable ) diff --git a/python/paddle/fluid/tests/unittests/test_dataset.py b/python/paddle/fluid/tests/unittests/test_dataset.py index d04cb0965b39ea42ec23a6f62c6455de256118ff..dddc91e2b887f523ccc7e4f63a148f8bd3f51ae9 100644 --- a/python/paddle/fluid/tests/unittests/test_dataset.py +++ b/python/paddle/fluid/tests/unittests/test_dataset.py @@ -744,7 +744,7 @@ class TestDataset(unittest.TestCase): slots = ["slot1", "slot2", "slot3", "slot4"] slots_vars = [] for slot in slots: - var = fluid.data( + var = paddle.static.data( name=slot, shape=[None, 1], dtype="int64", lod_level=1 ) slots_vars.append(var) diff --git a/python/paddle/fluid/tests/unittests/test_deg2rad.py b/python/paddle/fluid/tests/unittests/test_deg2rad.py index 5d6d9ac646bfc8c352bd2e9026744ea7233b9068..9bede17461dde9c6b8e056287da449aadf68871f 100644 --- a/python/paddle/fluid/tests/unittests/test_deg2rad.py +++ b/python/paddle/fluid/tests/unittests/test_deg2rad.py @@ -36,7 +36,9 @@ class TestDeg2radAPI(unittest.TestCase): startup_program = fluid.Program() train_program = fluid.Program() with fluid.program_guard(startup_program, train_program): - x = fluid.data(name='input', dtype=self.x_dtype, shape=self.x_shape) + x = paddle.static.data( + name='input', dtype=self.x_dtype, shape=self.x_shape + ) out = paddle.deg2rad(x) place = ( diff --git a/python/paddle/fluid/tests/unittests/test_deprecated_decorator.py b/python/paddle/fluid/tests/unittests/test_deprecated_decorator.py index cb417eac4428d66263705b2be88141e1f9c00fc4..63c091d1b0c64955adf52f7400591e238afc203e 100755 --- a/python/paddle/fluid/tests/unittests/test_deprecated_decorator.py +++ b/python/paddle/fluid/tests/unittests/test_deprecated_decorator.py @@ -19,7 +19,6 @@ import warnings import numpy as np import paddle -import paddle.fluid as fluid import paddle.utils.deprecated as deprecated from paddle import _legacy_C_ops @@ -64,30 +63,10 @@ def get_warning_index(api): class TestDeprecatedDocorator(unittest.TestCase): """ tests for paddle's Deprecated Docorator. - test_fluid_data: test for old fluid.data API. test_new_multiply: test for new api, which should not insert warning information. test_ops_elementwise_mul: test for C++ elementwise_mul op, which should not insert warning information. """ - def test_fluid_data(self): - """ - test old fluid elementwise_mul api, it should fire Warinng function, - which insert the Warinng info on top of API's doc string. - """ - paddle.enable_static() - # Initialization - x = fluid.data(name='x', shape=[3, 2, 1], dtype='float32') - - # expected - expected = LOWEST_WARNING_POSTION - - # captured - captured = get_warning_index(fluid.data) - paddle.disable_static() - - # testting - self.assertGreater(expected, captured) - def test_new_multiply(self): """ Test for new multiply api, expected result should be False. diff --git a/python/paddle/fluid/tests/unittests/test_determinant_op.py b/python/paddle/fluid/tests/unittests/test_determinant_op.py index 332a9f010a6f5fda33b7619eb474a561fa889b19..ade000cda8712e5ab99e9fd984be4f9eb358a64f 100644 --- a/python/paddle/fluid/tests/unittests/test_determinant_op.py +++ b/python/paddle/fluid/tests/unittests/test_determinant_op.py @@ -69,7 +69,7 @@ class TestDeterminantAPI(unittest.TestCase): def test_api_static(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.shape) + x = paddle.static.data('X', self.shape) out = paddle.linalg.det(x) exe = paddle.static.Executor(self.place) res = exe.run(feed={'X': self.x}, fetch_list=[out]) @@ -126,7 +126,7 @@ class TestSlogDeterminantAPI(unittest.TestCase): def test_api_static(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.shape) + x = paddle.static.data('X', self.shape) out = paddle.linalg.slogdet(x) exe = paddle.static.Executor(self.place) res = exe.run(feed={'X': self.x}, fetch_list=[out]) diff --git a/python/paddle/fluid/tests/unittests/test_diag_embed.py b/python/paddle/fluid/tests/unittests/test_diag_embed.py index 96241df0691b83089d72a5f84e3d4c2f19198940..0c75197fd2813b9c1920e6f46a87f370e07e8844 100644 --- a/python/paddle/fluid/tests/unittests/test_diag_embed.py +++ b/python/paddle/fluid/tests/unittests/test_diag_embed.py @@ -17,6 +17,7 @@ import unittest import numpy as np from eager_op_test import OpTest, paddle_static_guard +import paddle import paddle.fluid as fluid import paddle.fluid.core as core import paddle.nn.functional as F @@ -53,7 +54,9 @@ class TestDiagEmbedAPICase(unittest.TestCase): def test_case1(self): with paddle_static_guard(): diag_embed = np.random.randn(2, 3, 4).astype('float32') - data1 = fluid.data(name='data1', shape=[2, 3, 4], dtype='float32') + data1 = paddle.static.data( + name='data1', shape=[2, 3, 4], dtype='float32' + ) out1 = F.diag_embed(data1) out2 = F.diag_embed(data1, offset=1, dim1=-2, dim2=3) diff --git a/python/paddle/fluid/tests/unittests/test_diagonal_op.py b/python/paddle/fluid/tests/unittests/test_diagonal_op.py index 9e56e3c9c7595d3d384ae7a7700d2b19f99be0b4..1cff06a18080acf5d5bf6d3d61ad4f6c03c8cecd 100644 --- a/python/paddle/fluid/tests/unittests/test_diagonal_op.py +++ b/python/paddle/fluid/tests/unittests/test_diagonal_op.py @@ -137,7 +137,7 @@ class TestDiagonalAPI(unittest.TestCase): def test_api_static(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.shape) + x = paddle.static.data('X', self.shape) out = paddle.diagonal(x) exe = paddle.static.Executor(self.place) res = exe.run(feed={'X': self.x}, fetch_list=[out]) diff --git a/python/paddle/fluid/tests/unittests/test_diff_op.py b/python/paddle/fluid/tests/unittests/test_diff_op.py index 1c1a8639ee2b67483861c98e9c7cc55c392117cc..7cbeee96eaec983551256c6fbecd4ef2a3f3eeba 100644 --- a/python/paddle/fluid/tests/unittests/test_diff_op.py +++ b/python/paddle/fluid/tests/unittests/test_diff_op.py @@ -84,7 +84,7 @@ class TestDiffOp(unittest.TestCase): places.append(fluid.CUDAPlace(0)) for place in places: with fluid.program_guard(fluid.Program(), fluid.Program()): - x = paddle.fluid.data( + x = paddle.static.data( name="input", shape=self.input.shape, dtype=self.input.dtype ) has_pend = False @@ -92,14 +92,14 @@ class TestDiffOp(unittest.TestCase): append = None if self.prepend is not None: has_pend = True - prepend = paddle.fluid.data( + prepend = paddle.static.data( name="prepend", shape=self.prepend.shape, dtype=self.prepend.dtype, ) if self.append is not None: has_pend = True - append = paddle.fluid.data( + append = paddle.static.data( name="append", shape=self.append.shape, dtype=self.append.dtype, diff --git a/python/paddle/fluid/tests/unittests/test_dist_op.py b/python/paddle/fluid/tests/unittests/test_dist_op.py index 988665d4ef2aef689c4789f6ea5b82672d7c51a7..88d6313d980696adfb290d112c6b288f5ca3eddb 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_op.py +++ b/python/paddle/fluid/tests/unittests/test_dist_op.py @@ -169,8 +169,12 @@ class TestDistAPI(unittest.TestCase): main_program = fluid.Program() startup_program = fluid.Program() with fluid.program_guard(main_program, startup_program): - x = fluid.data(name='x', shape=[2, 3, 4, 5], dtype=self.data_type) - y = fluid.data(name='y', shape=[3, 1, 5], dtype=self.data_type) + x = paddle.static.data( + name='x', shape=[2, 3, 4, 5], dtype=self.data_type + ) + y = paddle.static.data( + name='y', shape=[3, 1, 5], dtype=self.data_type + ) p = 2 x_i = np.random.random((2, 3, 4, 5)).astype(self.data_type) y_i = np.random.random((3, 1, 5)).astype(self.data_type) diff --git a/python/paddle/fluid/tests/unittests/test_dist_sparse_load_ps0.py b/python/paddle/fluid/tests/unittests/test_dist_sparse_load_ps0.py index 5ab7ad21dbdc9371a16271fe64140148f0ec1c9c..44ee15ea1a258236ef1cdb7f9b4e420b65f46c3e 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_sparse_load_ps0.py +++ b/python/paddle/fluid/tests/unittests/test_dist_sparse_load_ps0.py @@ -30,7 +30,9 @@ class SparseLoadOp(unittest.TestCase): def net(self, emb_array, fc_array): with fluid.unique_name.guard(): - dense_input = fluid.data('input', shape=[None, 1], dtype="int64") + dense_input = paddle.static.data( + 'input', shape=[None, 1], dtype="int64" + ) emb = fluid.layers.embedding( input=dense_input, diff --git a/python/paddle/fluid/tests/unittests/test_dist_sparse_tensor_load_sgd.py b/python/paddle/fluid/tests/unittests/test_dist_sparse_tensor_load_sgd.py index f2f526484ce6a7632327855f13f3b7eaab9c36ab..e29d31270c50b779380ec576ca7656ea479a91ac 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_sparse_tensor_load_sgd.py +++ b/python/paddle/fluid/tests/unittests/test_dist_sparse_tensor_load_sgd.py @@ -46,7 +46,9 @@ class TestSparseLoadProgram(unittest.TestCase): with fluid.scope_guard(scope): with fluid.program_guard(train_program, startup_program): with fluid.unique_name.guard(): - inputs = fluid.data('input', shape=[None, 1], dtype="int64") + inputs = paddle.static.data( + 'input', shape=[None, 1], dtype="int64" + ) emb = fluid.layers.embedding( inputs, is_sparse=True, size=[10000, 128] ) diff --git a/python/paddle/fluid/tests/unittests/test_dropout_op.py b/python/paddle/fluid/tests/unittests/test_dropout_op.py index 306c1bd3f2793175ce832d7c49850348e7f9ad04..ecdc8934016bc49877e5e4ecc971ab7c10b9b3b1 100644 --- a/python/paddle/fluid/tests/unittests/test_dropout_op.py +++ b/python/paddle/fluid/tests/unittests/test_dropout_op.py @@ -399,7 +399,9 @@ class TestDropoutFAPI(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input = fluid.data(name="input", shape=[-1, -1], dtype="float32") + input = paddle.static.data( + name="input", shape=[-1, -1], dtype="float32" + ) res1 = paddle.nn.functional.dropout(x=input, p=0.0, training=False) res2 = paddle.nn.functional.dropout( x=input, p=0.0, axis=0, training=True, mode='upscale_in_train' @@ -632,56 +634,72 @@ class TestDropoutFAPIError(unittest.TestCase): def test_dtype(): # the input dtype of dropout must be float32 or float64 # float16 only can be set on GPU place - xr = fluid.data(name='xr', shape=[3, 4, 5, 6], dtype="int32") + xr = paddle.static.data( + name='xr', shape=[3, 4, 5, 6], dtype="int32" + ) paddle.nn.functional.dropout(xr, p=0.5) self.assertRaises(TypeError, test_dtype) def test_pdtype(): # p should be int or float - x2 = fluid.data(name='x2', shape=[3, 4, 5, 6], dtype="float32") + x2 = paddle.static.data( + name='x2', shape=[3, 4, 5, 6], dtype="float32" + ) paddle.nn.functional.dropout(x2, p='0.5') self.assertRaises(TypeError, test_pdtype) def test_pvalue(): # p should be 0.<=p<=1. - x2 = fluid.data(name='x2', shape=[3, 4, 5, 6], dtype="float32") + x2 = paddle.static.data( + name='x2', shape=[3, 4, 5, 6], dtype="float32" + ) paddle.nn.functional.dropout(x2, p=1.2) self.assertRaises(ValueError, test_pvalue) def test_mode(): # mode should be 'downscale_in_infer' or 'upscale_in_train' - x2 = fluid.data(name='x2', shape=[3, 4, 5, 6], dtype="float32") + x2 = paddle.static.data( + name='x2', shape=[3, 4, 5, 6], dtype="float32" + ) paddle.nn.functional.dropout(x2, mode='abc') self.assertRaises(ValueError, test_mode) def test_axis(): # axis should be int or list - x2 = fluid.data(name='x2', shape=[3, 4, 5, 6], dtype="float32") + x2 = paddle.static.data( + name='x2', shape=[3, 4, 5, 6], dtype="float32" + ) paddle.nn.functional.dropout(x2, axis=1.2) self.assertRaises(TypeError, test_axis) def test_axis_max(): # maximum of axis should less than dimensions of x - x2 = fluid.data(name='x2', shape=[3, 4, 5, 6], dtype="float32") + x2 = paddle.static.data( + name='x2', shape=[3, 4, 5, 6], dtype="float32" + ) paddle.nn.functional.dropout(x2, axis=[0, 5]) self.assertRaises(ValueError, test_axis_max) def test_axis_min(): # minimum of axis should greater equal than 0 - x2 = fluid.data(name='x2', shape=[3, 4, 5, 6], dtype="float32") + x2 = paddle.static.data( + name='x2', shape=[3, 4, 5, 6], dtype="float32" + ) paddle.nn.functional.dropout(x2, axis=[0, -1]) self.assertRaises(ValueError, test_axis_min) def test_axis_len(): # length of axis should not greater than dimensions of x - x2 = fluid.data(name='x2', shape=[3, 4, 5, 6], dtype="float32") + x2 = paddle.static.data( + name='x2', shape=[3, 4, 5, 6], dtype="float32" + ) paddle.nn.functional.dropout(x2, axis=[0, 1, 2, 3, 4]) self.assertRaises(ValueError, test_axis_len) @@ -717,7 +735,7 @@ class TestDropout2DFAPI(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input = fluid.data( + input = paddle.static.data( name="input", shape=[2, 3, 4, 5], dtype="float32" ) res1 = paddle.nn.functional.dropout2d( @@ -769,14 +787,18 @@ class TestDropout2DFAPIError(unittest.TestCase): def test_xdim(): # dimentions of x should be 4 - x = fluid.data(name='x1', shape=[2, 3, 4, 5, 6], dtype="int32") + x = paddle.static.data( + name='x1', shape=[2, 3, 4, 5, 6], dtype="int32" + ) paddle.nn.functional.dropout2d(x) self.assertRaises(ValueError, test_xdim) def test_dataformat(): # data_format should be 'NCHW' or 'NHWC' - x = fluid.data(name='x2', shape=[2, 3, 4, 5], dtype="int32") + x = paddle.static.data( + name='x2', shape=[2, 3, 4, 5], dtype="int32" + ) paddle.nn.functional.dropout2d(x, data_format='CNHW') self.assertRaises(ValueError, test_dataformat) @@ -835,7 +857,7 @@ class TestDropout3DFAPI(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input = fluid.data( + input = paddle.static.data( name="input", shape=[2, 3, 4, 5, 6], dtype="float32" ) res1 = paddle.nn.functional.dropout3d( @@ -887,14 +909,18 @@ class TestDropout3DFAPIError(unittest.TestCase): def test_xdim(): # dimentions of x should be 5 - x = fluid.data(name='x1', shape=[2, 3, 4, 5], dtype="int32") + x = paddle.static.data( + name='x1', shape=[2, 3, 4, 5], dtype="int32" + ) paddle.nn.functional.dropout3d(x) self.assertRaises(ValueError, test_xdim) def test_dataformat(): # data_format should be 'NCDHW' or 'NDHWC' - x = fluid.data(name='x2', shape=[2, 3, 4, 5, 6], dtype="int32") + x = paddle.static.data( + name='x2', shape=[2, 3, 4, 5, 6], dtype="int32" + ) paddle.nn.functional.dropout3d(x, data_format='CNDHW') self.assertRaises(ValueError, test_dataformat) @@ -930,7 +956,9 @@ class TestAlphaDropoutFAPI(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input = fluid.data(name="input", shape=[40, 40], dtype="float32") + input = paddle.static.data( + name="input", shape=[40, 40], dtype="float32" + ) res1 = paddle.nn.functional.alpha_dropout(x=input, p=0.0) res2 = paddle.nn.functional.alpha_dropout( x=input, p=0.0, training=False @@ -996,21 +1024,27 @@ class TestAlphaDropoutFAPIError(unittest.TestCase): def test_dtype(): # the input dtype of dropout must be float32 or float64 - xr = fluid.data(name='xr', shape=[3, 4, 5, 6], dtype="int32") + xr = paddle.static.data( + name='xr', shape=[3, 4, 5, 6], dtype="int32" + ) paddle.nn.functional.alpha_dropout(xr) self.assertRaises(TypeError, test_dtype) def test_pdtype(): # p should be int or float - x2 = fluid.data(name='x2', shape=[3, 4, 5, 6], dtype="float32") + x2 = paddle.static.data( + name='x2', shape=[3, 4, 5, 6], dtype="float32" + ) paddle.nn.functional.alpha_dropout(x2, p='0.5') self.assertRaises(TypeError, test_pdtype) def test_pvalue(): # p should be 0.<=p<=1. - x2 = fluid.data(name='x2', shape=[3, 4, 5, 6], dtype="float32") + x2 = paddle.static.data( + name='x2', shape=[3, 4, 5, 6], dtype="float32" + ) paddle.nn.functional.alpha_dropout(x2, p=1.2) self.assertRaises(ValueError, test_pvalue) diff --git a/python/paddle/fluid/tests/unittests/test_dynamic_rnn_stop_gradient.py b/python/paddle/fluid/tests/unittests/test_dynamic_rnn_stop_gradient.py index 9ea90d152dad9c0c945b58cb5dda1b9bcb2dd7fe..d923ece57266bd49806d8f244d5d3820bc1a83d2 100644 --- a/python/paddle/fluid/tests/unittests/test_dynamic_rnn_stop_gradient.py +++ b/python/paddle/fluid/tests/unittests/test_dynamic_rnn_stop_gradient.py @@ -31,7 +31,9 @@ def build_and_run_program(place, batch_size, beam_size, stop_gradient=False): x = paddle.assign( np.random.rand(batch_size, beam_size, 32).astype("float32") ) - indices = fluid.data(shape=[None, beam_size], dtype="int64", name="indices") + indices = paddle.static.data( + shape=[None, beam_size], dtype="int64", name="indices" + ) step_idx = paddle.tensor.fill_constant( shape=[1], dtype="int64", value=0, force_cpu=True ) diff --git a/python/paddle/fluid/tests/unittests/test_eig_op.py b/python/paddle/fluid/tests/unittests/test_eig_op.py index 2860e078e749b79e1aacf05bd4a493208e6645e0..5e412908e616124c82a9c3de7be3857adcb54fda 100644 --- a/python/paddle/fluid/tests/unittests/test_eig_op.py +++ b/python/paddle/fluid/tests/unittests/test_eig_op.py @@ -244,7 +244,9 @@ class TestEigStatic(TestEigOp): input_np = np.random.random([3, 3]).astype('complex') expect_val, expect_vec = np.linalg.eig(input_np) with fluid.program_guard(fluid.Program(), fluid.Program()): - input = fluid.data(name="input", shape=[3, 3], dtype='complex') + input = paddle.static.data( + name="input", shape=[3, 3], dtype='complex' + ) act_val, act_vec = paddle.linalg.eig(input) exe = fluid.Executor(place) diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_add_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_add_op.py index c742a02b9e1657b01588cb3db44d3c279129bbde..cff2c505cba954b40682cda0403255a0d57bb69a 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_add_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_add_op.py @@ -570,8 +570,8 @@ class TestAddApi(unittest.TestCase): def test_name(self): with fluid.program_guard(fluid.Program()): - x = fluid.data(name="x", shape=[2, 3], dtype="float32") - y = fluid.data(name='y', shape=[2, 3], dtype='float32') + x = paddle.static.data(name="x", shape=[2, 3], dtype="float32") + y = paddle.static.data(name='y', shape=[2, 3], dtype='float32') y_1 = self._executed_api(x, y, name='add_res') self.assertEqual(('add_res' in y_1.name), True) @@ -585,8 +585,8 @@ class TestAddApi(unittest.TestCase): "y": np.array([1, 5, 2]).astype('float32'), } - x = fluid.data(name="x", shape=[3], dtype='float32') - y = fluid.data(name="y", shape=[3], dtype='float32') + x = paddle.static.data(name="x", shape=[3], dtype='float32') + y = paddle.static.data(name="y", shape=[3], dtype='float32') z = self._executed_api(x, y) place = fluid.CPUPlace() diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_div_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_div_op.py index 2138ac33a77a4d6902853c11bc8e3b385bfd9694..af06373ea0a2ded27e0fa1f057c3ff5fa9915c49 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_div_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_div_op.py @@ -485,7 +485,7 @@ create_test_fp16_class(TestElementwiseDivOpXsizeLessThanYsize) class TestElementwiseDivBroadcast(unittest.TestCase): def test_shape_with_batch_sizes(self): with fluid.program_guard(fluid.Program()): - x_var = fluid.data( + x_var = paddle.static.data( name='x', dtype='float32', shape=[None, 3, None, None] ) one = 2.0 @@ -499,8 +499,8 @@ class TestElementwiseDivBroadcast(unittest.TestCase): class TestDivideOp(unittest.TestCase): def test_name(self): with fluid.program_guard(fluid.Program()): - x = fluid.data(name="x", shape=[2, 3], dtype="float32") - y = fluid.data(name='y', shape=[2, 3], dtype='float32') + x = paddle.static.data(name="x", shape=[2, 3], dtype="float32") + y = paddle.static.data(name='y', shape=[2, 3], dtype='float32') y_1 = paddle.divide(x, y, name='div_res') self.assertEqual(('div_res' in y_1.name), True) diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_floordiv_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_floordiv_op.py index 29402bcf49afb5c1874bad4c0248aa329250c114..7aadfbd01e8670be54f89cac2279c541bb0cb3ff 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_floordiv_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_floordiv_op.py @@ -99,8 +99,8 @@ class TestFloorDivideOp(unittest.TestCase): def test_name(self): with paddle_static_guard(): with fluid.program_guard(fluid.Program()): - x = fluid.data(name="x", shape=[2, 3], dtype="int64") - y = fluid.data(name='y', shape=[2, 3], dtype='int64') + x = paddle.static.data(name="x", shape=[2, 3], dtype="int64") + y = paddle.static.data(name='y', shape=[2, 3], dtype='int64') y_1 = paddle.floor_divide(x, y, name='div_res') self.assertEqual(('div_res' in y_1.name), True) diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_mod_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_mod_op.py index 0264dd8a54bd8bfddfe3556afa9ab2b5769f1209..d61b2bcce1f1fe51a3dc4ec5aa1c2b857813c801 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_mod_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_mod_op.py @@ -133,8 +133,8 @@ class TestRemainderOp(unittest.TestCase): def test_name(self): with fluid.program_guard(fluid.Program()): - x = fluid.data(name="x", shape=[2, 3], dtype="int64") - y = fluid.data(name='y', shape=[2, 3], dtype='int64') + x = paddle.static.data(name="x", shape=[2, 3], dtype="int64") + y = paddle.static.data(name='y', shape=[2, 3], dtype='int64') y_1 = self._executed_api(x, y, name='div_res') self.assertEqual(('div_res' in y_1.name), True) diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_sub_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_sub_op.py index 89083641abc4c3cef78c19c601d5c1c46405f547..a076f2d02eb4664afc8b05032742de4b23d00587 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_sub_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_sub_op.py @@ -472,8 +472,8 @@ class TestSubtractApi(unittest.TestCase): def test_name(self): with fluid.program_guard(fluid.Program()): - x = fluid.data(name="x", shape=[2, 3], dtype="float32") - y = fluid.data(name='y', shape=[2, 3], dtype='float32') + x = paddle.static.data(name="x", shape=[2, 3], dtype="float32") + y = paddle.static.data(name='y', shape=[2, 3], dtype='float32') y_1 = self._executed_api(x, y, name='subtract_res') self.assertEqual(('subtract_res' in y_1.name), True) @@ -487,8 +487,8 @@ class TestSubtractApi(unittest.TestCase): "y": np.array([1, 5, 2]).astype('float32'), } - x = fluid.data(name="x", shape=[3], dtype='float32') - y = fluid.data(name="y", shape=[3], dtype='float32') + x = paddle.static.data(name="x", shape=[3], dtype='float32') + y = paddle.static.data(name="y", shape=[3], dtype='float32') z = self._executed_api(x, y) place = fluid.CPUPlace() exe = fluid.Executor(place) diff --git a/python/paddle/fluid/tests/unittests/test_ema.py b/python/paddle/fluid/tests/unittests/test_ema.py index 117acb132499ba5a00c523cfa2e86477cc2f0368..62100c314526019bc5c3ee9458bf15c6034cdf30 100644 --- a/python/paddle/fluid/tests/unittests/test_ema.py +++ b/python/paddle/fluid/tests/unittests/test_ema.py @@ -32,7 +32,9 @@ class TestExponentialMovingAverage(unittest.TestCase): self._startup_prog = fluid.Program() with fluid.program_guard(self._train_program, self._startup_prog): with fluid.unique_name.guard(): - data = fluid.data(name='x', shape=[-1, 5], dtype='float32') + data = paddle.static.data( + name='x', shape=[-1, 5], dtype='float32' + ) hidden = paddle.static.nn.fc( x=data, size=10, weight_attr=self._param_name ) diff --git a/python/paddle/fluid/tests/unittests/test_embedding_id_stop_gradient.py b/python/paddle/fluid/tests/unittests/test_embedding_id_stop_gradient.py index 7c5776cb4abd7efdee8dc2b84af7fd6c9c37e6e6..4f709367c189a0997fd7b182c9369fb9f7f2527c 100644 --- a/python/paddle/fluid/tests/unittests/test_embedding_id_stop_gradient.py +++ b/python/paddle/fluid/tests/unittests/test_embedding_id_stop_gradient.py @@ -49,8 +49,8 @@ class TestEmbeddingIdStopGradientBase(unittest.TestCase): scope = fluid.Scope() with fluid.program_guard(main_program, startup_program): with fluid.scope_guard(scope): - x_1 = fluid.data(name='x1', shape=[4, 1], dtype='int64') - x_2 = fluid.data(name='x2', shape=[4, 1], dtype='int64') + x_1 = paddle.static.data(name='x1', shape=[4, 1], dtype='int64') + x_2 = paddle.static.data(name='x2', shape=[4, 1], dtype='int64') x = paddle.concat([x_1, x_2], axis=-1) for _ in range(self.reshape_times): diff --git a/python/paddle/fluid/tests/unittests/test_empty_op.py b/python/paddle/fluid/tests/unittests/test_empty_op.py index d13b7f53d366e41ac074d2412ec66d3edce589b5..104495887e01a8cfead2871b18a9b90de54d7e6e 100644 --- a/python/paddle/fluid/tests/unittests/test_empty_op.py +++ b/python/paddle/fluid/tests/unittests/test_empty_op.py @@ -242,13 +242,13 @@ class TestEmptyAPI(unittest.TestCase): positive_2_int32 = paddle.tensor.fill_constant([1], "int32", 3) positive_2_int64 = paddle.tensor.fill_constant([1], "int64", 3) - shape_tensor_int32 = fluid.data( + shape_tensor_int32 = paddle.static.data( name="shape_tensor_int32", shape=[2], dtype="int32" ) - shape_tensor_int64 = fluid.data( + shape_tensor_int64 = paddle.static.data( name="shape_tensor_int64", shape=[2], dtype="int64" ) - shape_tensor_unknown = fluid.data( + shape_tensor_unknown = paddle.static.data( name="shape_tensor_unknown", shape=[-1], dtype="int64" ) diff --git a/python/paddle/fluid/tests/unittests/test_erfinv_op.py b/python/paddle/fluid/tests/unittests/test_erfinv_op.py index 695813bb24705f5d460234a5bd6e3e661840b743..9f6d9ca01df0b37777b1f110c3896391ad4a30be 100644 --- a/python/paddle/fluid/tests/unittests/test_erfinv_op.py +++ b/python/paddle/fluid/tests/unittests/test_erfinv_op.py @@ -77,7 +77,7 @@ class TestErfinvAPI(unittest.TestCase): def run(place): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('x', [1, 5], dtype=self.dtype) + x = paddle.static.data('x', [1, 5], dtype=self.dtype) out = paddle.erfinv(x) exe = paddle.static.Executor(place) res = exe.run(feed={'x': self.x.reshape([1, 5])}) diff --git a/python/paddle/fluid/tests/unittests/test_executor_check_feed.py b/python/paddle/fluid/tests/unittests/test_executor_check_feed.py index 700bbfa95d14bf05c4e1724cacf3f21f01b57f41..6f2408b376fec8719b5b359d87b5eea41191f880 100644 --- a/python/paddle/fluid/tests/unittests/test_executor_check_feed.py +++ b/python/paddle/fluid/tests/unittests/test_executor_check_feed.py @@ -20,9 +20,9 @@ import paddle.fluid as fluid class TestExecutor(unittest.TestCase): def net(self): - lr = fluid.data(name="lr", shape=[1], dtype='float32') - x = fluid.data(name="x", shape=[None, 1], dtype='float32') - y = fluid.data(name="y", shape=[None, 1], dtype='float32') + lr = paddle.static.data(name="lr", shape=[1], dtype='float32') + x = paddle.static.data(name="x", shape=[None, 1], dtype='float32') + y = paddle.static.data(name="y", shape=[None, 1], dtype='float32') y_predict = paddle.static.nn.fc(x, size=1) cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y) diff --git a/python/paddle/fluid/tests/unittests/test_executor_feed_non_tensor.py b/python/paddle/fluid/tests/unittests/test_executor_feed_non_tensor.py index acdb8b78549e8194e970be673bb440d25d306829..b3b6880bcd172ff9f325a7bbe8558a107075aa14 100644 --- a/python/paddle/fluid/tests/unittests/test_executor_feed_non_tensor.py +++ b/python/paddle/fluid/tests/unittests/test_executor_feed_non_tensor.py @@ -22,9 +22,9 @@ import paddle.fluid as fluid class TestExecutor(unittest.TestCase): def net(self): - lr = fluid.data(name="lr", shape=[1], dtype='float32') - x = fluid.data(name="x", shape=[None, 1], dtype='float32') - y = fluid.data(name="y", shape=[None, 1], dtype='float32') + lr = paddle.static.data(name="lr", shape=[1], dtype='float32') + x = paddle.static.data(name="x", shape=[None, 1], dtype='float32') + y = paddle.static.data(name="y", shape=[None, 1], dtype='float32') y_predict = paddle.static.nn.fc(x, size=1) cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y) diff --git a/python/paddle/fluid/tests/unittests/test_feed_data_check_shape_type.py b/python/paddle/fluid/tests/unittests/test_feed_data_check_shape_type.py index 8828b8b0e8138abd871bb975e61f7f81489c1153..0298c0ebc20f826e41c76b753538ecd49721db8f 100644 --- a/python/paddle/fluid/tests/unittests/test_feed_data_check_shape_type.py +++ b/python/paddle/fluid/tests/unittests/test_feed_data_check_shape_type.py @@ -28,8 +28,8 @@ np.random.seed(123) class TestFeedData(unittest.TestCase): ''' - Test paddle.fluid.data feeds with different shape and types. - Note: paddle.fluid.data is not paddle.static.data. + Test paddle.static.data feeds with different shape and types. + Note: paddle.static.data is not paddle.static.data. ''' def setUp(self): @@ -53,8 +53,12 @@ class TestFeedData(unittest.TestCase): return self.data_batch_size def _simple_fc_net(self, in_size, label_size, class_num, hidden_sizes): - in_data = fluid.data(name="data", dtype='float32', shape=in_size) - label = fluid.data(name='label', dtype='int64', shape=label_size) + in_data = paddle.static.data( + name="data", dtype='float32', shape=in_size + ) + label = paddle.static.data( + name='label', dtype='int64', shape=label_size + ) hidden = in_data for hidden_size in hidden_sizes: diff --git a/python/paddle/fluid/tests/unittests/test_fetch_lod_tensor_array.py b/python/paddle/fluid/tests/unittests/test_fetch_lod_tensor_array.py index c20f4ebcc24ac1e9358348b067c6a2cea926ec6b..4f1c26dfc4b647c5035f2d9738aaabf736afbcbf 100644 --- a/python/paddle/fluid/tests/unittests/test_fetch_lod_tensor_array.py +++ b/python/paddle/fluid/tests/unittests/test_fetch_lod_tensor_array.py @@ -27,8 +27,12 @@ class TestFetchLoDTensorArray(unittest.TestCase): with fluid.unique_name.guard(): with fluid.program_guard(main_program, startup_program): i = layers.zeros(shape=[1], dtype='int64') - img = fluid.data(name='image', shape=[-1, 784], dtype='float32') - label = fluid.data(name='label', shape=[-1, 1], dtype='int64') + img = paddle.static.data( + name='image', shape=[-1, 784], dtype='float32' + ) + label = paddle.static.data( + name='label', shape=[-1, 1], dtype='int64' + ) loss = simple_fc_net_with_inputs(img, label, class_num=10) loss = simple_fc_net() opt = fluid.optimizer.SGD(learning_rate=0.001) diff --git a/python/paddle/fluid/tests/unittests/test_fill_constant_op.py b/python/paddle/fluid/tests/unittests/test_fill_constant_op.py index 56a703cf08c748b2d3da3a0bfa1af07c75b9e3bf..4cd77d8968b4e67b424ed792677ea578a600c30b 100644 --- a/python/paddle/fluid/tests/unittests/test_fill_constant_op.py +++ b/python/paddle/fluid/tests/unittests/test_fill_constant_op.py @@ -285,10 +285,10 @@ class TestFillConstantAPI(unittest.TestCase): positive_2_int32 = paddle.tensor.fill_constant([1], "int32", 2) positive_2_int64 = paddle.tensor.fill_constant([1], "int64", 2) - shape_tensor_int32 = fluid.data( + shape_tensor_int32 = paddle.static.data( name="shape_tensor_int32", shape=[2], dtype="int32" ) - shape_tensor_int64 = fluid.data( + shape_tensor_int64 = paddle.static.data( name="shape_tensor_int64", shape=[2], dtype="int64" ) @@ -454,7 +454,7 @@ class TestFillConstantOpError(unittest.TestCase): # The shape dtype of fill_constant_op must be int32 or int64. def test_shape_tensor_dtype(): - shape = fluid.data( + shape = paddle.static.data( name="shape_tensor", shape=[2], dtype="float32" ) paddle.tensor.fill_constant( @@ -464,7 +464,7 @@ class TestFillConstantOpError(unittest.TestCase): self.assertRaises(TypeError, test_shape_tensor_dtype) def test_shape_tensor_list_dtype(): - shape = fluid.data( + shape = paddle.static.data( name="shape_tensor_list", shape=[1], dtype="bool" ) paddle.tensor.fill_constant( diff --git a/python/paddle/fluid/tests/unittests/test_fleet_pyramid_hash.py b/python/paddle/fluid/tests/unittests/test_fleet_pyramid_hash.py index e34b98747f79d858fa762578a1444da3b8e51d50..63c49f40123488278dff0fb43b3ac4220fca843b 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_pyramid_hash.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_pyramid_hash.py @@ -30,7 +30,9 @@ class TestPyramidHashOpApi(unittest.TestCase): num_voc = 128 embed_dim = 64 x_shape, x_lod = [16, 10], [[3, 5, 2, 6]] - x = fluid.data(name='x', shape=x_shape, dtype='int32', lod_level=1) + x = paddle.static.data( + name='x', shape=x_shape, dtype='int32', lod_level=1 + ) hash_embd = fluid.contrib.layers.search_pyramid_hash( input=x, num_emb=embed_dim, diff --git a/python/paddle/fluid/tests/unittests/test_flip.py b/python/paddle/fluid/tests/unittests/test_flip.py index 687132a11a133fc931d11f2069c757c8961592c9..6ea01f51bf9135818d771e5723ae50f652d77edc 100644 --- a/python/paddle/fluid/tests/unittests/test_flip.py +++ b/python/paddle/fluid/tests/unittests/test_flip.py @@ -32,7 +32,9 @@ class TestFlipOp_API(unittest.TestCase): train_program = fluid.Program() with fluid.program_guard(train_program, startup_program): axis = [0] - input = fluid.data(name='input', dtype='float32', shape=[2, 3]) + input = paddle.static.data( + name='input', dtype='float32', shape=[2, 3] + ) output = paddle.flip(input, axis) output = paddle.flip(output, -1) output = output.flip(0) @@ -201,13 +203,17 @@ class TestFlipError(unittest.TestCase): paddle.enable_static() def test_axis_rank(): - input = fluid.data(name='input', dtype='float32', shape=[2, 3]) + input = paddle.static.data( + name='input', dtype='float32', shape=[2, 3] + ) output = paddle.flip(input, axis=[[0]]) self.assertRaises(TypeError, test_axis_rank) def test_axis_rank2(): - input = fluid.data(name='input', dtype='float32', shape=[2, 3]) + input = paddle.static.data( + name='input', dtype='float32', shape=[2, 3] + ) output = paddle.flip(input, axis=[[0, 0], [1, 1]]) self.assertRaises(TypeError, test_axis_rank2) diff --git a/python/paddle/fluid/tests/unittests/test_frac_api.py b/python/paddle/fluid/tests/unittests/test_frac_api.py index a8395e5d458815168090038651d1067945905eb6..c9c1feec9929fd587108750892c5587ce9d64d79 100644 --- a/python/paddle/fluid/tests/unittests/test_frac_api.py +++ b/python/paddle/fluid/tests/unittests/test_frac_api.py @@ -44,7 +44,7 @@ class TestFracAPI(unittest.TestCase): def test_api_static(self): paddle.enable_static() with program_guard(Program()): - input = fluid.data('X', self.x_np.shape, self.x_np.dtype) + input = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) out = paddle.frac(input) place = fluid.CPUPlace() if fluid.core.is_compiled_with_cuda(): @@ -105,7 +105,7 @@ class TestFracError(unittest.TestCase): def test_static_error(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', [5, 5], 'bool') + x = paddle.static.data('X', [5, 5], 'bool') self.assertRaises(TypeError, paddle.frac, x) def test_dygraph_error(self): diff --git a/python/paddle/fluid/tests/unittests/test_frexp_api.py b/python/paddle/fluid/tests/unittests/test_frexp_api.py index 230afc993ae7f2a229e1cd9ba71c6120faf2e1e6..f14216ef718c6756b2d2e7690f7f53d61bf38d2e 100644 --- a/python/paddle/fluid/tests/unittests/test_frexp_api.py +++ b/python/paddle/fluid/tests/unittests/test_frexp_api.py @@ -39,7 +39,7 @@ class TestFrexpAPI(unittest.TestCase): # 开启静态图模式 paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - input_data = paddle.fluid.data( + input_data = paddle.static.data( 'X', self.x_np.shape, self.x_np.dtype ) out = paddle.frexp(input_data) diff --git a/python/paddle/fluid/tests/unittests/test_full_like_op.py b/python/paddle/fluid/tests/unittests/test_full_like_op.py index 3a30c070338684361329e49b1eddc7d094e1fcd3..5c2dc0903cb35483f9c812022735270300df0a73 100644 --- a/python/paddle/fluid/tests/unittests/test_full_like_op.py +++ b/python/paddle/fluid/tests/unittests/test_full_like_op.py @@ -40,7 +40,7 @@ class TestFullOp(unittest.TestCase): train_program = Program() with program_guard(train_program, startup_program): fill_value = 2.0 - input = paddle.fluid.data( + input = paddle.static.data( name='input', dtype='float32', shape=[2, 3] ) output = paddle.full_like(input, fill_value) @@ -88,7 +88,7 @@ class TestFullOpError(unittest.TestCase): with program_guard(Program(), Program()): # for ci coverage - input_data = paddle.fluid.data( + input_data = paddle.static.data( name='input', dtype='float32', shape=[2, 3] ) output = paddle.full_like(input_data, 2.0) diff --git a/python/paddle/fluid/tests/unittests/test_full_op.py b/python/paddle/fluid/tests/unittests/test_full_op.py index 670cf2acb7675c9e38f279ceada408c025230300..0040b4ed289264e083602ef4258c2fbd530b8231 100644 --- a/python/paddle/fluid/tests/unittests/test_full_op.py +++ b/python/paddle/fluid/tests/unittests/test_full_op.py @@ -27,11 +27,11 @@ class TestFullAPI(unittest.TestCase): positive_2_int32 = paddle.tensor.fill_constant([1], "int32", 2) positive_2_int64 = paddle.tensor.fill_constant([1], "int64", 2) - shape_tensor_int32 = fluid.data( + shape_tensor_int32 = paddle.static.data( name="shape_tensor_int32", shape=[2], dtype="int32" ) - shape_tensor_int64 = fluid.data( + shape_tensor_int64 = paddle.static.data( name="shape_tensor_int64", shape=[2], dtype="int64" ) @@ -167,7 +167,7 @@ class TestFullOpError(unittest.TestCase): # The shape dtype of full op must be int32 or int64. def test_shape_tensor_dtype(): - shape = fluid.data( + shape = paddle.static.data( name="shape_tensor", shape=[2], dtype="float32" ) paddle.full(shape=shape, dtype="float32", fill_value=1) @@ -175,7 +175,7 @@ class TestFullOpError(unittest.TestCase): self.assertRaises(TypeError, test_shape_tensor_dtype) def test_shape_tensor_list_dtype(): - shape = fluid.data( + shape = paddle.static.data( name="shape_tensor_list", shape=[1], dtype="bool" ) paddle.full(shape=[shape, 2], dtype="float32", fill_value=1) diff --git a/python/paddle/fluid/tests/unittests/test_functional_conv2d.py b/python/paddle/fluid/tests/unittests/test_functional_conv2d.py index c78f6c35b06316f79124dc905bea7ef6d835e6da..2d8484cbee1e28bb62a4e37ca52cd3da2b0ed9f9 100644 --- a/python/paddle/fluid/tests/unittests/test_functional_conv2d.py +++ b/python/paddle/fluid/tests/unittests/test_functional_conv2d.py @@ -77,13 +77,13 @@ class TestFunctionalConv2D(TestCase): with fluid.unique_name.guard(): with fluid.program_guard(main, start): if self.channel_last: - x = fluid.data( + x = paddle.static.data( "input", (-1, -1, -1, self.in_channels), dtype=self.dtype, ) else: - x = fluid.data( + x = paddle.static.data( "input", (-1, self.in_channels, -1, -1), dtype=self.dtype, @@ -114,22 +114,24 @@ class TestFunctionalConv2D(TestCase): with fluid.unique_name.guard(): with fluid.program_guard(main, start): if self.channel_last: - x = x = fluid.data( + x = x = paddle.static.data( "input", (-1, -1, -1, self.in_channels), dtype=self.dtype, ) else: - x = fluid.data( + x = paddle.static.data( "input", (-1, self.in_channels, -1, -1), dtype=self.dtype, ) - weight = fluid.data( + weight = paddle.static.data( "weight", self.weight.shape, dtype=self.dtype ) if not self.no_bias: - bias = fluid.data("bias", self.bias.shape, dtype=self.dtype) + bias = paddle.static.data( + "bias", self.bias.shape, dtype=self.dtype + ) y = F.conv2d( x, weight, @@ -234,22 +236,24 @@ class TestFunctionalConv2DError(TestCase): with fluid.program_guard(main, start): self.channel_last = self.data_format == "NHWC" if self.channel_last: - x = x = fluid.data( + x = x = paddle.static.data( "input", (-1, -1, -1, self.in_channels), dtype=self.dtype, ) else: - x = fluid.data( + x = paddle.static.data( "input", (-1, self.in_channels, -1, -1), dtype=self.dtype, ) - weight = fluid.data( + weight = paddle.static.data( "weight", self.weight_shape, dtype=self.dtype ) if not self.no_bias: - bias = fluid.data("bias", self.bias_shape, dtype=self.dtype) + bias = paddle.static.data( + "bias", self.bias_shape, dtype=self.dtype + ) y = F.conv2d( x, weight, @@ -505,7 +509,9 @@ class TestFunctionalConv2DErrorCase12(TestCase): start = fluid.Program() with fluid.unique_name.guard(): with fluid.program_guard(main, start): - x = fluid.data("input", self.input.shape, dtype=paddle.float32) + x = paddle.static.data( + "input", self.input.shape, dtype=paddle.float32 + ) y = paddle.static.nn.conv2d( x, self.num_filters, diff --git a/python/paddle/fluid/tests/unittests/test_functional_conv2d_transpose.py b/python/paddle/fluid/tests/unittests/test_functional_conv2d_transpose.py index 2981748cf61782a8986806a58c2801701b8c72a9..dd708614b8818f322feb83cfb684ad321b6399ca 100644 --- a/python/paddle/fluid/tests/unittests/test_functional_conv2d_transpose.py +++ b/python/paddle/fluid/tests/unittests/test_functional_conv2d_transpose.py @@ -78,13 +78,13 @@ class TestFunctionalConv2D(TestCase): with fluid.unique_name.guard(): with fluid.program_guard(main, start): if self.channel_last: - x = fluid.data( + x = paddle.static.data( "input", (-1, -1, -1, self.in_channels), dtype=self.dtype, ) else: - x = fluid.data( + x = paddle.static.data( "input", (-1, self.in_channels, -1, -1), dtype=self.dtype, @@ -115,22 +115,24 @@ class TestFunctionalConv2D(TestCase): with fluid.unique_name.guard(): with fluid.program_guard(main, start): if self.channel_last: - x = x = fluid.data( + x = x = paddle.static.data( "input", (-1, -1, -1, self.in_channels), dtype=self.dtype, ) else: - x = fluid.data( + x = paddle.static.data( "input", (-1, self.in_channels, -1, -1), dtype=self.dtype, ) - weight = fluid.data( + weight = paddle.static.data( "weight", self.weight.shape, dtype=self.dtype ) if not self.no_bias: - bias = fluid.data("bias", self.bias.shape, dtype=self.dtype) + bias = paddle.static.data( + "bias", self.bias.shape, dtype=self.dtype + ) y = F.conv2d_transpose( x, weight, @@ -230,22 +232,24 @@ class TestFunctionalConv2DError(TestCase): with fluid.program_guard(main, start): self.channel_last = self.data_format == "NHWC" if self.channel_last: - x = x = fluid.data( + x = x = paddle.static.data( "input", (-1, -1, -1, self.in_channels), dtype=self.dtype, ) else: - x = fluid.data( + x = paddle.static.data( "input", (-1, self.in_channels, -1, -1), dtype=self.dtype, ) - weight = fluid.data( + weight = paddle.static.data( "weight", self.weight_shape, dtype=self.dtype ) if not self.no_bias: - bias = fluid.data("bias", self.bias_shape, dtype=self.dtype) + bias = paddle.static.data( + "bias", self.bias_shape, dtype=self.dtype + ) y = F.conv2d_transpose( x, weight, @@ -513,7 +517,9 @@ class TestFunctionalConv2DErrorCase10(TestCase): start = fluid.Program() with fluid.unique_name.guard(): with fluid.program_guard(main, start): - x = fluid.data("input", self.input.shape, dtype=paddle.float32) + x = paddle.static.data( + "input", self.input.shape, dtype=paddle.float32 + ) y = paddle.static.nn.conv2d( x, self.num_filters, diff --git a/python/paddle/fluid/tests/unittests/test_functional_conv3d.py b/python/paddle/fluid/tests/unittests/test_functional_conv3d.py index 5e867036dd477f4171e47a3f8e7fd26493b6b1c9..3f3415afdbf11be51bcc08cfe824491388fd74d0 100644 --- a/python/paddle/fluid/tests/unittests/test_functional_conv3d.py +++ b/python/paddle/fluid/tests/unittests/test_functional_conv3d.py @@ -77,13 +77,13 @@ class TestFunctionalConv3D(TestCase): with fluid.unique_name.guard(): with fluid.program_guard(main, start): if self.channel_last: - x = fluid.data( + x = paddle.static.data( "input", (-1, -1, -1, -1, self.in_channels), dtype=self.dtype, ) else: - x = fluid.data( + x = paddle.static.data( "input", (-1, self.in_channels, -1, -1, -1), dtype=self.dtype, @@ -114,22 +114,24 @@ class TestFunctionalConv3D(TestCase): with fluid.unique_name.guard(): with fluid.program_guard(main, start): if self.channel_last: - x = x = fluid.data( + x = x = paddle.static.data( "input", (-1, -1, -1, -1, self.in_channels), dtype=self.dtype, ) else: - x = fluid.data( + x = paddle.static.data( "input", (-1, self.in_channels, -1, -1, -1), dtype=self.dtype, ) - weight = fluid.data( + weight = paddle.static.data( "weight", self.weight.shape, dtype=self.dtype ) if not self.no_bias: - bias = fluid.data("bias", self.bias.shape, dtype=self.dtype) + bias = paddle.static.data( + "bias", self.bias.shape, dtype=self.dtype + ) y = F.conv3d( x, weight, @@ -234,22 +236,24 @@ class TestFunctionalConv3DError(TestCase): with fluid.program_guard(main, start): self.channel_last = self.data_format == "NDHWC" if self.channel_last: - x = x = fluid.data( + x = x = paddle.static.data( "input", (-1, -1, -1, -1, self.in_channels), dtype=self.dtype, ) else: - x = fluid.data( + x = paddle.static.data( "input", (-1, self.in_channels, -1, -1, -1), dtype=self.dtype, ) - weight = fluid.data( + weight = paddle.static.data( "weight", self.weight_shape, dtype=self.dtype ) if not self.no_bias: - bias = fluid.data("bias", self.bias_shape, dtype=self.dtype) + bias = paddle.static.data( + "bias", self.bias_shape, dtype=self.dtype + ) y = F.conv3d( x, weight, @@ -480,7 +484,9 @@ class TestFunctionalConv3DErrorCase11(TestCase): start = fluid.Program() with fluid.unique_name.guard(): with fluid.program_guard(main, start): - x = fluid.data("input", self.input.shape, dtype=paddle.float32) + x = paddle.static.data( + "input", self.input.shape, dtype=paddle.float32 + ) y = paddle.static.nn.conv3d( x, self.num_filters, diff --git a/python/paddle/fluid/tests/unittests/test_functional_conv3d_transpose.py b/python/paddle/fluid/tests/unittests/test_functional_conv3d_transpose.py index 7a8549b1240aacdaaf64b1fc034d0f8a7c4802b0..22aaeb02a92f5c4d9f11beea8baefec652ef530f 100644 --- a/python/paddle/fluid/tests/unittests/test_functional_conv3d_transpose.py +++ b/python/paddle/fluid/tests/unittests/test_functional_conv3d_transpose.py @@ -78,13 +78,13 @@ class TestFunctionalConv3DTranspose(TestCase): with fluid.unique_name.guard(): with fluid.program_guard(main, start): if self.channel_last: - x = fluid.data( + x = paddle.static.data( "input", (-1, -1, -1, -1, self.in_channels), dtype=self.dtype, ) else: - x = fluid.data( + x = paddle.static.data( "input", (-1, self.in_channels, -1, -1, -1), dtype=self.dtype, @@ -116,22 +116,24 @@ class TestFunctionalConv3DTranspose(TestCase): with fluid.unique_name.guard(): with fluid.program_guard(main, start): if self.channel_last: - x = x = fluid.data( + x = x = paddle.static.data( "input", (-1, -1, -1, -1, self.in_channels), dtype=self.dtype, ) else: - x = fluid.data( + x = paddle.static.data( "input", (-1, self.in_channels, -1, -1, -1), dtype=self.dtype, ) - weight = fluid.data( + weight = paddle.static.data( "weight", self.weight.shape, dtype=self.dtype ) if not self.no_bias: - bias = fluid.data("bias", self.bias.shape, dtype=self.dtype) + bias = paddle.static.data( + "bias", self.bias.shape, dtype=self.dtype + ) y = F.conv3d_transpose( x, weight, @@ -235,22 +237,24 @@ class TestFunctionalConv3DTransposeError(TestCase): with fluid.program_guard(main, start): self.channel_last = self.data_format == "NDHWC" if self.channel_last: - x = x = fluid.data( + x = x = paddle.static.data( "input", (-1, -1, -1, -1, self.in_channels), dtype=self.dtype, ) else: - x = fluid.data( + x = paddle.static.data( "input", (-1, self.in_channels, -1, -1, -1), dtype=self.dtype, ) - weight = fluid.data( + weight = paddle.static.data( "weight", self.weight_shape, dtype=self.dtype ) if not self.no_bias: - bias = fluid.data("bias", self.bias_shape, dtype=self.dtype) + bias = paddle.static.data( + "bias", self.bias_shape, dtype=self.dtype + ) y = F.conv3d_transpose( x, weight, @@ -538,7 +542,9 @@ class TestFunctionalConv3DTransposeErrorCase10(TestCase): start = fluid.Program() with fluid.unique_name.guard(): with fluid.program_guard(main, start): - x = fluid.data("input", self.input.shape, dtype=paddle.float32) + x = paddle.static.data( + "input", self.input.shape, dtype=paddle.float32 + ) y = paddle.static.nn.conv3d_transpose( x, self.num_filters, diff --git a/python/paddle/fluid/tests/unittests/test_fuse_elewise_add_act_pass.py b/python/paddle/fluid/tests/unittests/test_fuse_elewise_add_act_pass.py index c5f7ef3dfb16bdd2d055878fcd2877b61141a5d1..9bb0031c48f8f077a278c83da6e597297bf48f3b 100644 --- a/python/paddle/fluid/tests/unittests/test_fuse_elewise_add_act_pass.py +++ b/python/paddle/fluid/tests/unittests/test_fuse_elewise_add_act_pass.py @@ -95,8 +95,8 @@ class TestMNIST(TestParallelExecutorBase): class TestFuseActElewiseAddInplaceGradPass(unittest.TestCase): def build_program(self, main_program, startup_program): with paddle.static.program_guard(main_program, startup_program): - X = fluid.data(name="X", shape=[3, 3], dtype='float32') - Y = fluid.data(name="Y", shape=[3, 3], dtype='float32') + X = paddle.static.data(name="X", shape=[3, 3], dtype='float32') + Y = paddle.static.data(name="Y", shape=[3, 3], dtype='float32') Out1 = X * 5 Out2 = F.relu(Out1) prediction = paddle.tensor.math._add_with_axis(Y, Out2, axis=1) diff --git a/python/paddle/fluid/tests/unittests/test_fused_multi_transformer_op.py b/python/paddle/fluid/tests/unittests/test_fused_multi_transformer_op.py index d6a7f73a9259f0548271ef6f56fa2f2ab2045538..523413865ff0c4be5a32ff36c1cb68a4d3a8256b 100644 --- a/python/paddle/fluid/tests/unittests/test_fused_multi_transformer_op.py +++ b/python/paddle/fluid/tests/unittests/test_fused_multi_transformer_op.py @@ -797,7 +797,7 @@ class TestFusedMultiTransformerOp(OpTest): def GetFusedMultiTransformerOutStatic(self): paddle.enable_static() - x = paddle.fluid.data('x', self.query.shape, self.query.dtype) + x = paddle.static.data('x', self.query.shape, self.query.dtype) cache_kvs, cache_kv = None, None cache_kvs_feed = None time_step = None @@ -809,7 +809,7 @@ class TestFusedMultiTransformerOp(OpTest): rotary_embs = None if self.rotary_emb_dims > 0: - rotary_embs = paddle.fluid.data( + rotary_embs = paddle.static.data( 'rotary_embs', self.rotary_embs.shape, self.rotary_embs.dtype ) @@ -867,7 +867,7 @@ class TestFusedMultiTransformerOp(OpTest): time_step_feed = self.cache_length if self.remove_padding: - seq_lens = paddle.fluid.data( + seq_lens = paddle.static.data( 'seq_lens', self.seq_lens.shape, self.seq_lens.dtype ) seq_lens_feed = self.seq_lens diff --git a/python/paddle/fluid/tests/unittests/test_gather_nd_op.py b/python/paddle/fluid/tests/unittests/test_gather_nd_op.py index d670822e940e10881fe24fe0ec959edbeb520c5c..ac6f1a32ccac37046b9b3da08a23b17de0b98bcd 100644 --- a/python/paddle/fluid/tests/unittests/test_gather_nd_op.py +++ b/python/paddle/fluid/tests/unittests/test_gather_nd_op.py @@ -237,9 +237,9 @@ class TestGatherNdError(unittest.TestCase): ): shape = [8, 9, 6] - x = paddle.fluid.data(shape=shape, dtype='float32', name='x') - index = paddle.fluid.data(shape=shape, dtype='bool', name='index') - index_float = paddle.fluid.data( + x = paddle.static.data(shape=shape, dtype='float32', name='x') + index = paddle.static.data(shape=shape, dtype='bool', name='index') + index_float = paddle.static.data( shape=shape, dtype='float32', name='index_float' ) np_x = np.random.random(shape).astype('float32') diff --git a/python/paddle/fluid/tests/unittests/test_gather_op.py b/python/paddle/fluid/tests/unittests/test_gather_op.py index 538cbb6a54fc9d95582c6c5496d0a69c41a540ab..ec3c400d9728a7fb0a37eaa050543e04b1b943af 100644 --- a/python/paddle/fluid/tests/unittests/test_gather_op.py +++ b/python/paddle/fluid/tests/unittests/test_gather_op.py @@ -246,9 +246,9 @@ class API_TestGather(unittest.TestCase): with paddle.static.program_guard( paddle.static.Program(), paddle.static.Program() ): - x = paddle.fluid.data('x', shape=[-1, 2], dtype='float64') - index = paddle.fluid.data('index', shape=[-1, 1], dtype='int32') - axis = paddle.fluid.data('axis', shape=[1], dtype='int32') + x = paddle.static.data('x', shape=[-1, 2], dtype='float64') + index = paddle.static.data('index', shape=[-1, 1], dtype='int32') + axis = paddle.static.data('axis', shape=[1], dtype='int32') out = paddle.gather(x, index, axis) place = paddle.CPUPlace() exe = paddle.static.Executor(place) @@ -340,10 +340,10 @@ class TestGathertError(unittest.TestCase): ): shape = [8, 9, 6] - x = paddle.fluid.data(shape=shape, dtype='int8', name='x') - axis = paddle.fluid.data(shape=[1], dtype='float32', name='axis') - index = paddle.fluid.data(shape=shape, dtype='int32', name='index') - index_float = paddle.fluid.data( + x = paddle.static.data(shape=shape, dtype='int8', name='x') + axis = paddle.static.data(shape=[1], dtype='float32', name='axis') + index = paddle.static.data(shape=shape, dtype='int32', name='index') + index_float = paddle.static.data( shape=shape, dtype='float32', name='index_float' ) @@ -371,9 +371,9 @@ class TestGathertError(unittest.TestCase): with fluid.program_guard(fluid.Program(), fluid.Program()): shape = [8, 9, 6] - x = fluid.data(shape=shape, dtype='int8', name='x') - index = fluid.data(shape=shape, dtype='int32', name='mask') - index_float = fluid.data( + x = paddle.static.data(shape=shape, dtype='int8', name='x') + index = paddle.static.data(shape=shape, dtype='int32', name='mask') + index_float = paddle.static.data( shape=shape, dtype='float32', name='index_float' ) @@ -393,10 +393,10 @@ class TestGathertError(unittest.TestCase): ): shape = [8, 9, 6] - x = paddle.fluid.data(shape=shape, dtype='int32', name='x') - axis = paddle.fluid.data(shape=[1], dtype='int32', name='axis') - index = paddle.fluid.data(shape=shape, dtype='int32', name='index') - index_float = paddle.fluid.data( + x = paddle.static.data(shape=shape, dtype='int32', name='x') + axis = paddle.static.data(shape=[1], dtype='int32', name='axis') + index = paddle.static.data(shape=shape, dtype='int32', name='index') + index_float = paddle.static.data( shape=shape, dtype='float32', name='index_float' ) diff --git a/python/paddle/fluid/tests/unittests/test_gaussian_random_op.py b/python/paddle/fluid/tests/unittests/test_gaussian_random_op.py index 44a1bf1e038e478d9558acbe45bea18707e67adf..6eec8d02d35d727f5772f8f67efed20c298f64aa 100644 --- a/python/paddle/fluid/tests/unittests/test_gaussian_random_op.py +++ b/python/paddle/fluid/tests/unittests/test_gaussian_random_op.py @@ -212,11 +212,11 @@ class TestGaussianRandomAPI(unittest.TestCase): positive_2_int32 = paddle.tensor.fill_constant([1], "int32", 2000) positive_2_int64 = paddle.tensor.fill_constant([1], "int64", 500) - shape_tensor_int32 = fluid.data( + shape_tensor_int32 = paddle.static.data( name="shape_tensor_int32", shape=[2], dtype="int32" ) - shape_tensor_int64 = fluid.data( + shape_tensor_int64 = paddle.static.data( name="shape_tensor_int64", shape=[2], dtype="int64" ) diff --git a/python/paddle/fluid/tests/unittests/test_gcd.py b/python/paddle/fluid/tests/unittests/test_gcd.py index 6b600d9c6f12e0f9786e869c239f23661b8c9b2a..7272cf3cff023a87df39198da062b6d7f443db91 100644 --- a/python/paddle/fluid/tests/unittests/test_gcd.py +++ b/python/paddle/fluid/tests/unittests/test_gcd.py @@ -34,8 +34,12 @@ class TestGcdAPI(unittest.TestCase): startup_program = fluid.Program() train_program = fluid.Program() with fluid.program_guard(startup_program, train_program): - x = fluid.data(name='input1', dtype='int32', shape=self.x_shape) - y = fluid.data(name='input2', dtype='int32', shape=self.y_shape) + x = paddle.static.data( + name='input1', dtype='int32', shape=self.x_shape + ) + y = paddle.static.data( + name='input2', dtype='int32', shape=self.y_shape + ) out = paddle.gcd(x, y) place = ( diff --git a/python/paddle/fluid/tests/unittests/test_get_tensor_from_selected_rows_op.py b/python/paddle/fluid/tests/unittests/test_get_tensor_from_selected_rows_op.py index d1e3e6df335b002a64d4dc33e5de001dab8c5546..8f40e391a71afe4b953cc42663ba54bcde8ffa09 100644 --- a/python/paddle/fluid/tests/unittests/test_get_tensor_from_selected_rows_op.py +++ b/python/paddle/fluid/tests/unittests/test_get_tensor_from_selected_rows_op.py @@ -16,7 +16,7 @@ import unittest import numpy as np -import paddle.fluid as fluid +import paddle import paddle.fluid.core as core from paddle.fluid import Program, program_guard from paddle.fluid.op import Operator @@ -28,7 +28,7 @@ class TestGetTensorFromSelectedRowsError(unittest.TestCase): def test_errors(self): with program_guard(Program()): - x_var = fluid.data('X', [2, 3]) + x_var = paddle.static.data('X', [2, 3]) x_data = np.random.random((2, 4)).astype("float32") def test_Variable(): diff --git a/python/paddle/fluid/tests/unittests/test_gradient_clip.py b/python/paddle/fluid/tests/unittests/test_gradient_clip.py index e67f0cf282c752ef2b60a97a7c3de4dfd8ad1d80..59cc1f5e24ba6176de62ab3c240d37577ad4c921 100644 --- a/python/paddle/fluid/tests/unittests/test_gradient_clip.py +++ b/python/paddle/fluid/tests/unittests/test_gradient_clip.py @@ -80,8 +80,10 @@ class TestGradientClip(unittest.TestCase): with fluid.program_guard( main_program=prog, startup_program=startup_program ): - image = fluid.data(name="a", shape=[-1, 784], dtype='float32') - label = fluid.data(name="b", shape=[-1, 1], dtype='int64') + image = paddle.static.data( + name="a", shape=[-1, 784], dtype='float32' + ) + label = paddle.static.data(name="b", shape=[-1, 1], dtype='int64') if dtype != 'float32': image_cast = paddle.cast(image, dtype) hidden = paddle.static.nn.fc( @@ -134,10 +136,12 @@ class TestGradientClip(unittest.TestCase): with fluid.program_guard( main_program=prog, startup_program=startup_program ): - data = fluid.data( + data = paddle.static.data( name="words", shape=[-1, 1], dtype="int64", lod_level=1 ) - label = fluid.data(name="label", shape=[-1, 1], dtype="int64") + label = paddle.static.data( + name="label", shape=[-1, 1], dtype="int64" + ) cost = bow_net(data, label, self.word_dict_len) self.backward_and_optimize(cost) diff --git a/python/paddle/fluid/tests/unittests/test_grid_sample_function.py b/python/paddle/fluid/tests/unittests/test_grid_sample_function.py index 3189fd6e11f7664ce50cb565ae252d2cf603c0ca..4f8042c8bad663a24ee082806acbcbcac2a3912f 100644 --- a/python/paddle/fluid/tests/unittests/test_grid_sample_function.py +++ b/python/paddle/fluid/tests/unittests/test_grid_sample_function.py @@ -50,8 +50,10 @@ class GridSampleTestCase(unittest.TestCase): start = fluid.Program() with fluid.unique_name.guard(): with fluid.program_guard(main, start): - x = fluid.data("x", self.x_shape, dtype=self.dtype) - grid = fluid.data("grid", self.grid_shape, dtype=self.dtype) + x = paddle.static.data("x", self.x_shape, dtype=self.dtype) + grid = paddle.static.data( + "grid", self.grid_shape, dtype=self.dtype + ) y_var = F.grid_sample( x, grid, diff --git a/python/paddle/fluid/tests/unittests/test_group_norm_op.py b/python/paddle/fluid/tests/unittests/test_group_norm_op.py index c2ca5e7976bfd432368824260266ed827300f22e..06e4ef863a75608528a585b65ed9f0cf4f499849 100644 --- a/python/paddle/fluid/tests/unittests/test_group_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_group_norm_op.py @@ -364,11 +364,15 @@ class TestGroupNormAPI_With_NHWC(unittest.TestCase): paddle.enable_static() def test_case1(self): - data1 = fluid.data(name='data1', shape=[None, 3, 3, 4], dtype='float64') + data1 = paddle.static.data( + name='data1', shape=[None, 3, 3, 4], dtype='float64' + ) out1 = paddle.static.nn.group_norm( input=data1, groups=2, data_layout="NHWC" ) - data2 = fluid.data(name='data2', shape=[None, 4, 3, 3], dtype='float64') + data2 = paddle.static.data( + name='data2', shape=[None, 4, 3, 3], dtype='float64' + ) out2 = paddle.static.nn.group_norm( input=data2, groups=2, data_layout="NCHW" ) @@ -399,7 +403,9 @@ class TestGroupNormAPI_With_NHWC(unittest.TestCase): class TestGroupNormException(unittest.TestCase): # data_layout is not NHWC or NCHW def test_exception(self): - data = fluid.data(name='data', shape=[None, 3, 3, 4], dtype="float64") + data = paddle.static.data( + name='data', shape=[None, 3, 3, 4], dtype="float64" + ) def attr_data_format(): out = paddle.static.nn.group_norm( diff --git a/python/paddle/fluid/tests/unittests/test_gumbel_softmax_op.py b/python/paddle/fluid/tests/unittests/test_gumbel_softmax_op.py index 0a2725c1e3e0812e7024e3b7e60389697febb4f7..25ac2d822b09b69fbba0dfe3626aad6339e7b509 100644 --- a/python/paddle/fluid/tests/unittests/test_gumbel_softmax_op.py +++ b/python/paddle/fluid/tests/unittests/test_gumbel_softmax_op.py @@ -235,7 +235,7 @@ class TestGumbelSoftmaxAPI(unittest.TestCase): def test_check_api(self): # test static api with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data(name='x', shape=self.x_shape) + x = paddle.static.data(name='x', shape=self.x_shape) y = paddle.nn.functional.gumbel_softmax(x, hard=True) exe = paddle.static.Executor(self.place) out = exe.run(feed={'x': self.x}, fetch_list=[y]) @@ -284,7 +284,7 @@ class TestGumbelSoftmaxOpError(unittest.TestCase): def test_dtype(): with paddle.static.program_guard(paddle.static.Program()): - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[2, 3], dtype='int32' ) paddle.nn.functional.gumbel_softmax(x_int32) diff --git a/python/paddle/fluid/tests/unittests/test_histogram_op.py b/python/paddle/fluid/tests/unittests/test_histogram_op.py index 5d4ae29ba0cb69316ad9a7d94a80d26d406c1d0c..9d437e447ff31db879e5b990e8e4a88617401cfa 100644 --- a/python/paddle/fluid/tests/unittests/test_histogram_op.py +++ b/python/paddle/fluid/tests/unittests/test_histogram_op.py @@ -29,7 +29,9 @@ class TestHistogramOpAPI(unittest.TestCase): startup_program = fluid.Program() train_program = fluid.Program() with fluid.program_guard(train_program, startup_program): - inputs = fluid.data(name='input', dtype='int64', shape=[2, 3]) + inputs = paddle.static.data( + name='input', dtype='int64', shape=[2, 3] + ) output = paddle.histogram(inputs, bins=5, min=1, max=5) place = fluid.CPUPlace() if fluid.core.is_compiled_with_cuda(): @@ -121,7 +123,9 @@ class TestHistogramOpError(unittest.TestCase): TypeError, paddle.histogram, 1, bins=5, min=1, max=5 ) # The input type must be 'int32', 'int64', 'float32', 'float64' - x_bool = fluid.data(name='x_bool', shape=[4, 3], dtype='bool') + x_bool = paddle.static.data( + name='x_bool', shape=[4, 3], dtype='bool' + ) self.assertRaises( TypeError, paddle.histogram, x_bool, bins=5, min=1, max=5 ) diff --git a/python/paddle/fluid/tests/unittests/test_hsigmoid_op.py b/python/paddle/fluid/tests/unittests/test_hsigmoid_op.py index abf0ba0ac2650d8283b2979a81d7fac06f811cb7..23b307016409b0e1974a28106d102f925e6b57fd 100644 --- a/python/paddle/fluid/tests/unittests/test_hsigmoid_op.py +++ b/python/paddle/fluid/tests/unittests/test_hsigmoid_op.py @@ -628,13 +628,13 @@ class TestHSigmoidLossAPI(unittest.TestCase): train_program = fluid.Program() startup_program = fluid.Program() with fluid.program_guard(train_program, startup_program): - x = fluid.data('x', [-1, self.feature_size]) - labels = fluid.data('labels', [-1, 1], 'int64') + x = paddle.static.data('x', [-1, self.feature_size]) + labels = paddle.static.data('labels', [-1, 1], 'int64') path_table = None path_code = None if self.is_custom: - path_table = fluid.data('path_table', [-1, -1], 'int64') - path_code = fluid.data('path_code', [-1, -1], 'int64') + path_table = paddle.static.data('path_table', [-1, -1], 'int64') + path_code = paddle.static.data('path_code', [-1, -1], 'int64') weight_attr = paddle.nn.initializer.Assign(self.weight_np) bias_attr = paddle.nn.initializer.Assign(self.bias_np) loss = paddle.nn.HSigmoidLoss( diff --git a/python/paddle/fluid/tests/unittests/test_identity_loss_op.py b/python/paddle/fluid/tests/unittests/test_identity_loss_op.py index d9b8ee8fad32812adf92194fec493fcddba361cc..576da1dbd99b7487933304d9789806695e480b2a 100644 --- a/python/paddle/fluid/tests/unittests/test_identity_loss_op.py +++ b/python/paddle/fluid/tests/unittests/test_identity_loss_op.py @@ -127,7 +127,7 @@ class TestIdentityLossAPI(unittest.TestCase): def test_api_static(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.x_shape) + x = paddle.static.data('X', self.x_shape) out1 = paddle.incubate.identity_loss(x) out2 = paddle.incubate.identity_loss(x, reduction=0) out3 = paddle.incubate.identity_loss(x, reduction=1) @@ -174,7 +174,7 @@ class TestIdentityLossAPI(unittest.TestCase): ) paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', [10, 12], 'int32') + x = paddle.static.data('X', [10, 12], 'int32') self.assertRaises(TypeError, paddle.incubate.identity_loss, x) diff --git a/python/paddle/fluid/tests/unittests/test_identity_op.py b/python/paddle/fluid/tests/unittests/test_identity_op.py index 311a609dd5146d14d27752cb9223abd2588f4b56..ea0e7d8938152b9897d22321496e254e4961b1e3 100644 --- a/python/paddle/fluid/tests/unittests/test_identity_op.py +++ b/python/paddle/fluid/tests/unittests/test_identity_op.py @@ -28,7 +28,7 @@ class TestIdentityAPI(unittest.TestCase): def test_api_static(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.shape) + x = paddle.static.data('X', self.shape) id_layer = paddle.nn.Identity() out = id_layer(x) exe = paddle.static.Executor(self.place) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_load_static_param.py b/python/paddle/fluid/tests/unittests/test_imperative_load_static_param.py index acb39868e86be2300c4f830b43811804f09d2d22..86f36d2c05bbad5ca8142d7b8f5934bedcf7f44c 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_load_static_param.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_load_static_param.py @@ -30,8 +30,8 @@ class TestDygraphLoadStatic(unittest.TestCase): def testLoadStaticModel(self): # static graph mode temp_dir = tempfile.TemporaryDirectory() - a = fluid.data(name="a", shape=[10, 10]) - conv_in = fluid.data(name="conv_in", shape=[None, 10, 10, 10]) + a = paddle.static.data(name="a", shape=[10, 10]) + conv_in = paddle.static.data(name="conv_in", shape=[None, 10, 10, 10]) fc_out1 = paddle.static.nn.fc(a, 10) fc_out2 = paddle.static.nn.fc(a, 20) @@ -43,7 +43,7 @@ class TestDygraphLoadStatic(unittest.TestCase): conv_in, num_filters=10, filter_size=5, act="relu" ) - conv3d_in = fluid.data( + conv3d_in = paddle.static.data( name='conv3d_in', shape=[None, 3, 12, 32, 32], dtype='float32' ) conv3d_out_1 = paddle.static.nn.conv3d( @@ -53,37 +53,43 @@ class TestDygraphLoadStatic(unittest.TestCase): input=conv3d_in, num_filters=2, filter_size=3, act="relu" ) - batchnorm_in = fluid.data( + batchnorm_in = paddle.static.data( name="batchnorm_in", shape=[None, 10], dtype='float32' ) batchnorm_out_1 = paddle.static.nn.batch_norm(batchnorm_in) batchnorm_out_2 = paddle.static.nn.batch_norm(batchnorm_in) - emb_in = fluid.data(name='emb_in', shape=[None, 10], dtype='int64') + emb_in = paddle.static.data( + name='emb_in', shape=[None, 10], dtype='int64' + ) emb_out_1 = paddle.static.nn.embedding(emb_in, [1000, 100]) emb_out_2 = paddle.static.nn.embedding(emb_in, [2000, 200]) - layernorm = fluid.data(name="ln", shape=[None, 10], dtype='float32') + layernorm = paddle.static.data( + name="ln", shape=[None, 10], dtype='float32' + ) layernorm_1 = paddle.static.nn.layer_norm(layernorm) layernorm_2 = paddle.static.nn.layer_norm(layernorm) - nce_in = fluid.data(name="nce_in", shape=[None, 100], dtype='float32') - nce_label = fluid.data( + nce_in = paddle.static.data( + name="nce_in", shape=[None, 100], dtype='float32' + ) + nce_label = paddle.static.data( name="nce_label", shape=[None, 10], dtype='int64' ) nce_out_1 = paddle.static.nn.nce(nce_in, nce_label, 10000) nce_out_2 = paddle.static.nn.nce(nce_in, nce_label, 10000) - prelu_in = fluid.data( + prelu_in = paddle.static.data( name="prelu_in", shape=[None, 5, 10, 10], dtype='float32' ) prelu_out_1 = paddle.static.nn.prelu(prelu_in, "channel") prelu_out_2 = paddle.static.nn.prelu(prelu_in, "channel") - bilinear_tensor_pro_x = fluid.data( + bilinear_tensor_pro_x = paddle.static.data( "t1", shape=[None, 5], dtype="float32" ) - bilinear_tensor_pro_y = fluid.data( + bilinear_tensor_pro_y = paddle.static.data( "t2", shape=[None, 4], dtype="float32" ) @@ -98,7 +104,7 @@ class TestDygraphLoadStatic(unittest.TestCase): ) ) - conv2d_trans_in = fluid.data( + conv2d_trans_in = paddle.static.data( name="conv2d_trans_in", shape=[None, 10, 10, 10] ) @@ -109,7 +115,7 @@ class TestDygraphLoadStatic(unittest.TestCase): conv2d_trans_in, num_filters=10, filter_size=5, act="relu" ) - conv3d_trans_in = fluid.data( + conv3d_trans_in = paddle.static.data( name='conv3d_trans_in', shape=[None, 3, 12, 32, 32], dtype='float32' ) conv3d_trans_out_1 = paddle.static.nn.conv3d_transpose( @@ -119,7 +125,7 @@ class TestDygraphLoadStatic(unittest.TestCase): input=conv3d_trans_in, num_filters=2, filter_size=3, act="relu" ) - groupnorm_in = fluid.data( + groupnorm_in = paddle.static.data( name='groupnorm_in', shape=[None, 8, 32, 32], dtype='float32' ) groupnorm_out1 = paddle.static.nn.group_norm( @@ -129,7 +135,7 @@ class TestDygraphLoadStatic(unittest.TestCase): input=groupnorm_in, groups=4, param_attr=True, bias_attr=True ) ''' - spec_norm = fluid.data(name='spec_norm', shape=[2, 8, 32, 32], dtype='float32') + spec_norm = paddle.static.data(name='spec_norm', shape=[2, 8, 32, 32], dtype='float32') spe_norm_out_1 = paddle.static.nn.spectral_norm(weight=spec_norm, dim=1, power_iters=2) spe_norm_out_2 = paddle.static.nn.spectral_norm(weight=spec_norm, dim=1, power_iters=2) ''' diff --git a/python/paddle/fluid/tests/unittests/test_imperative_star_gan_with_gradient_penalty.py b/python/paddle/fluid/tests/unittests/test_imperative_star_gan_with_gradient_penalty.py index 9687f0fe7a42447974bc03aa3e9cb8b30529ae2b..7192dfcf288744ee9b3eac9b49320ba8b9aff4c6 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_star_gan_with_gradient_penalty.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_star_gan_with_gradient_penalty.py @@ -561,15 +561,15 @@ class StaticGraphTrainModel: self.cfg = cfg def create_data_layer(): - image_real = fluid.data( + image_real = paddle.static.data( shape=[None, 3, cfg.image_size, cfg.image_size], dtype='float32', name='image_real', ) - label_org = fluid.data( + label_org = paddle.static.data( shape=[None, cfg.c_dim], dtype='float32', name='label_org' ) - label_trg = fluid.data( + label_trg = paddle.static.data( shape=[None, cfg.c_dim], dtype='float32', name='label_trg' ) return image_real, label_org, label_trg diff --git a/python/paddle/fluid/tests/unittests/test_index_sample_op.py b/python/paddle/fluid/tests/unittests/test_index_sample_op.py index d51474e97990ba5db5d581660b48258e43936d21..5d883f8bb8481caa112b7a96e5912fd78dbc558e 100755 --- a/python/paddle/fluid/tests/unittests/test_index_sample_op.py +++ b/python/paddle/fluid/tests/unittests/test_index_sample_op.py @@ -136,8 +136,8 @@ class TestIndexSampleShape(unittest.TestCase): low=0, high=x_shape[1], size=index_shape ).astype(index_type) - x = fluid.data(name='x', shape=[-1, 5], dtype='float64') - index = fluid.data(name='index', shape=[-1, 3], dtype='int32') + x = paddle.static.data(name='x', shape=[-1, 5], dtype='float64') + index = paddle.static.data(name='index', shape=[-1, 3], dtype='int32') output = paddle.index_sample(x=x, index=index) place = fluid.CPUPlace() diff --git a/python/paddle/fluid/tests/unittests/test_inference_api.py b/python/paddle/fluid/tests/unittests/test_inference_api.py index be470180c2f85f81e229afde221324aa03227395..8b126d501a4cb27077849e95e2ae3b8db3d5f0e9 100644 --- a/python/paddle/fluid/tests/unittests/test_inference_api.py +++ b/python/paddle/fluid/tests/unittests/test_inference_api.py @@ -79,7 +79,9 @@ def get_sample_model(): main_program = fluid.Program() startup_program = fluid.Program() with fluid.program_guard(main_program, startup_program): - data = fluid.data(name="data", shape=[-1, 6, 64, 64], dtype="float32") + data = paddle.static.data( + name="data", shape=[-1, 6, 64, 64], dtype="float32" + ) conv_out = paddle.static.nn.conv2d( input=data, num_filters=3, diff --git a/python/paddle/fluid/tests/unittests/test_initializer.py b/python/paddle/fluid/tests/unittests/test_initializer.py index f87e62cb020981413b5bb13ef6746a76ab23ea64..7e90a3665549a5f79c4637d139bf899b9e333bb2 100644 --- a/python/paddle/fluid/tests/unittests/test_initializer.py +++ b/python/paddle/fluid/tests/unittests/test_initializer.py @@ -668,7 +668,7 @@ class TestSetGlobalInitializer(unittest.TestCase): paddle.nn.initializer.Uniform(low=-0.5, high=0.5) ) with fluid.program_guard(main_prog, startup_prog): - x = fluid.data(name="x", shape=[1, 3, 32, 32]) + x = paddle.static.data(name="x", shape=[1, 3, 32, 32]) # default initilizer of param in layers.conv2d is NormalInitializer conv = paddle.static.nn.conv2d(x, 5, 3) @@ -696,7 +696,7 @@ class TestSetGlobalInitializer(unittest.TestCase): bias_init=paddle.nn.initializer.Normal(0.0, 2.0), ) with fluid.program_guard(main_prog, startup_prog): - x = fluid.data(name="x", shape=[1, 3, 32, 32]) + x = paddle.static.data(name="x", shape=[1, 3, 32, 32]) # default initilizer of bias in layers.conv2d is ConstantInitializer conv = paddle.static.nn.conv2d(x, 5, 3) diff --git a/python/paddle/fluid/tests/unittests/test_inplace_addto_strategy.py b/python/paddle/fluid/tests/unittests/test_inplace_addto_strategy.py index d1fc32cce1a3f6ee5d8b5b1be84b72834c621c7e..4408c50cc5c57bc92a568a728f0a570bc0318048 100644 --- a/python/paddle/fluid/tests/unittests/test_inplace_addto_strategy.py +++ b/python/paddle/fluid/tests/unittests/test_inplace_addto_strategy.py @@ -57,7 +57,7 @@ def create_program(data_format="NCHW"): main = fluid.Program() startup = fluid.Program() with fluid.program_guard(main, startup): - x = fluid.data(name='img', shape=[-1, 3, 224, 224]) + x = paddle.static.data(name='img', shape=[-1, 3, 224, 224]) x.stop_gradient = False if data_format == "NHWC": x = paddle.transpose(x, [0, 2, 3, 1]) diff --git a/python/paddle/fluid/tests/unittests/test_instance_norm_op_v2.py b/python/paddle/fluid/tests/unittests/test_instance_norm_op_v2.py index ee5479d02f3c254a4a9909f6fb4917661a319877..181d2d1a9db1b7e5570e9fba42576ce3f51d6b18 100644 --- a/python/paddle/fluid/tests/unittests/test_instance_norm_op_v2.py +++ b/python/paddle/fluid/tests/unittests/test_instance_norm_op_v2.py @@ -97,7 +97,9 @@ class TestInstanceNorm(unittest.TestCase): def compute_v1(x_np): with program_guard(Program(), Program()): ins = paddle.nn.InstanceNorm2D(shape[1]) - x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype) + x = paddle.static.data( + name='x', shape=x_np.shape, dtype=x_np.dtype + ) y = ins(x) exe.run(fluid.default_startup_program()) r = exe.run(feed={'x': x_np}, fetch_list=[y])[0] @@ -106,7 +108,9 @@ class TestInstanceNorm(unittest.TestCase): def compute_v2(x_np): with program_guard(Program(), Program()): ins = paddle.nn.InstanceNorm2D(shape[1]) - x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype) + x = paddle.static.data( + name='x', shape=x_np.shape, dtype=x_np.dtype + ) y = ins(x) exe.run(fluid.default_startup_program()) r = exe.run(feed={'x': x_np}, fetch_list=[y])[0] diff --git a/python/paddle/fluid/tests/unittests/test_inverse_op.py b/python/paddle/fluid/tests/unittests/test_inverse_op.py index 656b51bce3b9870e61ac4da5552f94490c4b23f1..bdca19eb1e85cd97c803235b599318c14ebc0f9a 100644 --- a/python/paddle/fluid/tests/unittests/test_inverse_op.py +++ b/python/paddle/fluid/tests/unittests/test_inverse_op.py @@ -100,7 +100,9 @@ class TestInverseAPI(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input = fluid.data(name="input", shape=[4, 4], dtype="float64") + input = paddle.static.data( + name="input", shape=[4, 4], dtype="float64" + ) result = paddle.inverse(x=input) input_np = np.random.random([4, 4]).astype("float64") result_np = np.linalg.inv(input_np) @@ -139,16 +141,20 @@ class TestInverseAPIError(unittest.TestCase): # The data type of input must be float32 or float64. for dtype in ["bool", "int32", "int64", "float16"]: - input = fluid.data(name='input_' + dtype, shape=[4, 4], dtype=dtype) + input = paddle.static.data( + name='input_' + dtype, shape=[4, 4], dtype=dtype + ) self.assertRaises(TypeError, paddle.inverse, input) # When out is set, the data type must be the same as input. - input = fluid.data(name='input_1', shape=[4, 4], dtype="float32") - out = fluid.data(name='output', shape=[4, 4], dtype="float64") + input = paddle.static.data( + name='input_1', shape=[4, 4], dtype="float32" + ) + out = paddle.static.data(name='output', shape=[4, 4], dtype="float64") self.assertRaises(TypeError, paddle.inverse, input, out) # The number of dimensions of input must be >= 2. - input = fluid.data(name='input_2', shape=[4], dtype="float32") + input = paddle.static.data(name='input_2', shape=[4], dtype="float32") self.assertRaises(ValueError, paddle.inverse, input) @@ -160,7 +166,9 @@ class TestInverseSingularAPI(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input = fluid.data(name="input", shape=[4, 4], dtype="float64") + input = paddle.static.data( + name="input", shape=[4, 4], dtype="float64" + ) result = paddle.inverse(x=input) input_np = np.zeros([4, 4]).astype("float64") diff --git a/python/paddle/fluid/tests/unittests/test_io_save_load.py b/python/paddle/fluid/tests/unittests/test_io_save_load.py index 5ae765af9db91260e2e51b329cdff49f06064149..4e714908577ce84c61a8f9f48ff3681511de25d0 100644 --- a/python/paddle/fluid/tests/unittests/test_io_save_load.py +++ b/python/paddle/fluid/tests/unittests/test_io_save_load.py @@ -71,8 +71,8 @@ class TestSaveInferenceModelAPIError(unittest.TestCase): start_prog = fluid.Program() main_prog = fluid.Program() with fluid.program_guard(main_prog, start_prog): - x = fluid.data(name='x', shape=[10, 16], dtype='float32') - y = fluid.data(name='y', shape=[10, 16], dtype='float32') + x = paddle.static.data(name='x', shape=[10, 16], dtype='float32') + y = paddle.static.data(name='y', shape=[10, 16], dtype='float32') z = paddle.static.nn.fc(x, 4) exe = fluid.Executor(fluid.CPUPlace()) diff --git a/python/paddle/fluid/tests/unittests/test_isclose_op.py b/python/paddle/fluid/tests/unittests/test_isclose_op.py index c587420a0ce1ac40542e8cc695201aa96018b9c7..cf60370df9ab815444c4903289045f55ea19f049 100644 --- a/python/paddle/fluid/tests/unittests/test_isclose_op.py +++ b/python/paddle/fluid/tests/unittests/test_isclose_op.py @@ -129,8 +129,12 @@ class TestIscloseStatic(unittest.TestCase): with paddle.static.program_guard( paddle.static.Program(), paddle.static.Program() ): - x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float64') - y = paddle.fluid.data(name='y', shape=[10, 10], dtype='float64') + x = paddle.static.data( + name='x', shape=[10, 10], dtype='float64' + ) + y = paddle.static.data( + name='y', shape=[10, 10], dtype='float64' + ) result = paddle.isclose(x, y) exe = paddle.fluid.Executor(place) fetches = exe.run( @@ -167,8 +171,10 @@ class TestIscloseError(unittest.TestCase): with paddle.static.program_guard( paddle.static.Program(), paddle.static.Program() ): - x = paddle.fluid.data(name='x', shape=[10, 10], dtype='int32') - y = paddle.fluid.data(name='y', shape=[10, 10], dtype='float64') + x = paddle.static.data(name='x', shape=[10, 10], dtype='int32') + y = paddle.static.data( + name='y', shape=[10, 10], dtype='float64' + ) result = paddle.isclose(x, y) self.assertRaises(TypeError, test_x_dtype) @@ -177,16 +183,18 @@ class TestIscloseError(unittest.TestCase): with paddle.static.program_guard( paddle.static.Program(), paddle.static.Program() ): - x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float64') - y = paddle.fluid.data(name='y', shape=[10, 10], dtype='int32') + x = paddle.static.data( + name='x', shape=[10, 10], dtype='float64' + ) + y = paddle.static.data(name='y', shape=[10, 10], dtype='int32') result = paddle.isclose(x, y) self.assertRaises(TypeError, test_y_dtype) def test_attr(self): paddle.enable_static() - x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float64') - y = paddle.fluid.data(name='y', shape=[10, 10], dtype='float64') + x = paddle.static.data(name='x', shape=[10, 10], dtype='float64') + y = paddle.static.data(name='y', shape=[10, 10], dtype='float64') def test_rtol(): result = paddle.isclose(x, y, rtol=True) diff --git a/python/paddle/fluid/tests/unittests/test_isfinite_v2_op.py b/python/paddle/fluid/tests/unittests/test_isfinite_v2_op.py index 914d2ec1f9f4a3fd90ce013333ec91b4129bbb03..52b02e04121b73f014ba58b606d310c91419bbd7 100644 --- a/python/paddle/fluid/tests/unittests/test_isfinite_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_isfinite_v2_op.py @@ -29,7 +29,7 @@ def run_static(x_np, dtype, op_str, use_gpu=False): place = paddle.CUDAPlace(0) exe = fluid.Executor(place) with fluid.program_guard(main_program, startup_program): - x = paddle.fluid.data(name='x', shape=x_np.shape, dtype=dtype) + x = paddle.static.data(name='x', shape=x_np.shape, dtype=dtype) res = getattr(paddle.tensor, op_str)(x) exe.run(startup_program) static_result = exe.run( diff --git a/python/paddle/fluid/tests/unittests/test_kldiv_loss_op.py b/python/paddle/fluid/tests/unittests/test_kldiv_loss_op.py index 5d6e3af092accb25697043ec0f8e6ccacb44e4ab..154a7bf7daedb826abda25278852712bf9b2f9a1 100644 --- a/python/paddle/fluid/tests/unittests/test_kldiv_loss_op.py +++ b/python/paddle/fluid/tests/unittests/test_kldiv_loss_op.py @@ -114,8 +114,8 @@ class TestKLDivLossDygraph(unittest.TestCase): self.run_kl_loss('none') def test_kl_loss_static_api(self): - input = paddle.fluid.data(name='input', shape=[5, 20]) - label = paddle.fluid.data(name='label', shape=[5, 20]) + input = paddle.static.data(name='input', shape=[5, 20]) + label = paddle.static.data(name='label', shape=[5, 20]) paddle.nn.functional.kl_div(input, label) paddle.nn.functional.kl_div(input, label, 'sum') diff --git a/python/paddle/fluid/tests/unittests/test_kron_op.py b/python/paddle/fluid/tests/unittests/test_kron_op.py index 1c1db6a223fac610f37066e9aa277081bc7ea0c5..5b5b63486c8f38871d1bb6537ca0623502550a0e 100644 --- a/python/paddle/fluid/tests/unittests/test_kron_op.py +++ b/python/paddle/fluid/tests/unittests/test_kron_op.py @@ -93,8 +93,8 @@ class TestKronLayer(unittest.TestCase): start = fluid.Program() with fluid.unique_name.guard(): with fluid.program_guard(main, start): - a_var = fluid.data("a", [-1, -1], dtype="float64") - b_var = fluid.data("b", [-1, -1], dtype="float64") + a_var = paddle.static.data("a", [-1, -1], dtype="float64") + b_var = paddle.static.data("b", [-1, -1], dtype="float64") out_var = paddle.kron(a_var, b_var) place = fluid.CPUPlace() diff --git a/python/paddle/fluid/tests/unittests/test_l1_loss.py b/python/paddle/fluid/tests/unittests/test_l1_loss.py index f1bce0d1b6a92caf3e14d38106ea8ba02d55ebaf..ea56eead9200af039dd730ab511ab89d85e9323c 100644 --- a/python/paddle/fluid/tests/unittests/test_l1_loss.py +++ b/python/paddle/fluid/tests/unittests/test_l1_loss.py @@ -44,10 +44,10 @@ class TestFunctionalL1Loss(unittest.TestCase): self.assertTrue(dy_result.shape, [10, 10, 5]) def run_static(self, use_gpu=False): - input = paddle.fluid.data( + input = paddle.static.data( name='input', shape=[10, 10, 5], dtype='float32' ) - label = paddle.fluid.data( + label = paddle.static.data( name='label', shape=[10, 10, 5], dtype='float32' ) result0 = paddle.nn.functional.l1_loss(input, label) @@ -94,10 +94,10 @@ class TestFunctionalL1Loss(unittest.TestCase): # test case the raise message def test_errors(self): def test_value_error(): - input = paddle.fluid.data( + input = paddle.static.data( name='input', shape=[10, 10, 5], dtype='float32' ) - label = paddle.fluid.data( + label = paddle.static.data( name='label', shape=[10, 10, 5], dtype='float32' ) loss = paddle.nn.functional.l1_loss( @@ -134,10 +134,10 @@ class TestClassL1Loss(unittest.TestCase): self.assertTrue(dy_result.shape, [10, 10, 5]) def run_static(self, use_gpu=False): - input = paddle.fluid.data( + input = paddle.static.data( name='input', shape=[10, 10, 5], dtype='float32' ) - label = paddle.fluid.data( + label = paddle.static.data( name='label', shape=[10, 10, 5], dtype='float32' ) l1_loss = paddle.nn.loss.L1Loss() diff --git a/python/paddle/fluid/tests/unittests/test_label_smooth_functional.py b/python/paddle/fluid/tests/unittests/test_label_smooth_functional.py index 3c97be9c42b85e9e8ef85876a38e2e2bbef5e096..621559286ea6d647864fec4a2451b3ae79cda353 100644 --- a/python/paddle/fluid/tests/unittests/test_label_smooth_functional.py +++ b/python/paddle/fluid/tests/unittests/test_label_smooth_functional.py @@ -47,7 +47,7 @@ class LabelSmoothTestCase(unittest.TestCase): start = fluid.Program() with fluid.unique_name.guard(): with fluid.program_guard(main, start): - label_var = fluid.data( + label_var = paddle.static.data( "input", self.label_shape, dtype=self.dtype ) y_var = F.label_smooth( @@ -67,7 +67,7 @@ class LabelSmoothTestCase(unittest.TestCase): start = fluid.Program() with fluid.unique_name.guard(): with fluid.program_guard(main, start): - label_var = fluid.data( + label_var = paddle.static.data( "input", self.label_shape, dtype=self.dtype ) y_var = F.label_smooth( diff --git a/python/paddle/fluid/tests/unittests/test_layer_norm_op_v2.py b/python/paddle/fluid/tests/unittests/test_layer_norm_op_v2.py index d91cff14a21039170d5744e3c2ae8bfba3cde43c..caddec044ba7d7ebc108f783cfb510f4106c5b4c 100644 --- a/python/paddle/fluid/tests/unittests/test_layer_norm_op_v2.py +++ b/python/paddle/fluid/tests/unittests/test_layer_norm_op_v2.py @@ -90,7 +90,9 @@ class TestDygraphLayerNormv2(unittest.TestCase): def compute_v1(x_np): with program_guard(Program(), Program()): ln = paddle.nn.LayerNorm(shape[1:]) - x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype) + x = paddle.static.data( + name='x', shape=x_np.shape, dtype=x_np.dtype + ) y = ln(x) exe.run(fluid.default_startup_program()) r = exe.run(feed={'x': x_np}, fetch_list=[y])[0] @@ -99,7 +101,9 @@ class TestDygraphLayerNormv2(unittest.TestCase): def compute_v2(x_np): with program_guard(Program(), Program()): ln = paddle.nn.LayerNorm(shape[1:]) - x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype) + x = paddle.static.data( + name='x', shape=x_np.shape, dtype=x_np.dtype + ) y = ln(x) exe.run(fluid.default_startup_program()) r = exe.run(feed={'x': x_np}, fetch_list=[y])[0] diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index 195f8d82a66b9cad6274b4e454768cf295731d8f..1ca2c4768480dc6f5960e30847a39683ddf16f96 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -1414,8 +1414,10 @@ class TestLayer(LayerTest): x = np.random.rand(3, 32, 32).astype("float32") y = np.array([[1], [0], [1]]) with self.static_graph(): - data = fluid.data(name="input", shape=[-1, 32, 32], dtype="float32") - label = fluid.data(name="label", shape=[-1, 1], dtype="int") + data = paddle.static.data( + name="input", shape=[-1, 32, 32], dtype="float32" + ) + label = paddle.static.data(name="label", shape=[-1, 1], dtype="int") data_new = paddle.reshape(data, [3, 32 * 32]) fc_out = paddle.nn.Linear(32 * 32, 10)(data_new) predict = paddle.nn.functional.softmax(fc_out) @@ -2155,8 +2157,8 @@ class TestBook(LayerTest): def test_partial_sum(self): with self.static_graph(): - x = fluid.data(name="x", shape=[None, 3], dtype="float32") - y = fluid.data(name="y", shape=[None, 3], dtype="float32") + x = paddle.static.data(name="x", shape=[None, 3], dtype="float32") + y = paddle.static.data(name="y", shape=[None, 3], dtype="float32") sum = fluid.contrib.layers.partial_sum( [x, y], start_index=0, length=2 ) @@ -2164,7 +2166,9 @@ class TestBook(LayerTest): def test_batch_fc(self): with self.static_graph(): - input = fluid.data(name="input", shape=[16, 2, 3], dtype="float32") + input = paddle.static.data( + name="input", shape=[16, 2, 3], dtype="float32" + ) out = fluid.contrib.layers.batch_fc( input=input, param_size=[16, 3, 10], @@ -2185,8 +2189,10 @@ class TestBook(LayerTest): def test_rank_attention(self): with self.static_graph(): - input = fluid.data(name="input", shape=[None, 2], dtype="float32") - rank_offset = fluid.data( + input = paddle.static.data( + name="input", shape=[None, 2], dtype="float32" + ) + rank_offset = paddle.static.data( name="rank_offset", shape=[None, 7], dtype="int32" ) out = fluid.contrib.layers.rank_attention( @@ -2258,8 +2264,8 @@ class TestBook(LayerTest): def test_partial_concat(self): with self.static_graph(): - x = fluid.data(name="x", shape=[None, 3], dtype="float32") - y = fluid.data(name="y", shape=[None, 3], dtype="float32") + x = paddle.static.data(name="x", shape=[None, 3], dtype="float32") + y = paddle.static.data(name="y", shape=[None, 3], dtype="float32") concat1 = fluid.contrib.layers.partial_concat( [x, y], start_index=0, length=2 ) diff --git a/python/paddle/fluid/tests/unittests/test_lcm.py b/python/paddle/fluid/tests/unittests/test_lcm.py index 0e110468d6a966e5d9ed0c69909426f5e109ba03..dc943a70a923f71cf33bfc55bb6c78147c9abad7 100644 --- a/python/paddle/fluid/tests/unittests/test_lcm.py +++ b/python/paddle/fluid/tests/unittests/test_lcm.py @@ -34,8 +34,12 @@ class TestLcmAPI(unittest.TestCase): startup_program = fluid.Program() train_program = fluid.Program() with fluid.program_guard(startup_program, train_program): - x1 = fluid.data(name='input1', dtype='int32', shape=self.x_shape) - x2 = fluid.data(name='input2', dtype='int32', shape=self.y_shape) + x1 = paddle.static.data( + name='input1', dtype='int32', shape=self.x_shape + ) + x2 = paddle.static.data( + name='input2', dtype='int32', shape=self.y_shape + ) out = paddle.lcm(x1, x2) place = ( diff --git a/python/paddle/fluid/tests/unittests/test_lerp_op.py b/python/paddle/fluid/tests/unittests/test_lerp_op.py index 625d5b1b13dfe774985ac7e6993ba7c5912af089..cf3704f1c25e517b8209b173c280b2dc223440f2 100644 --- a/python/paddle/fluid/tests/unittests/test_lerp_op.py +++ b/python/paddle/fluid/tests/unittests/test_lerp_op.py @@ -119,8 +119,8 @@ class TestLerpAPI(unittest.TestCase): def run(place): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('x', [1, 4], dtype=self.dtype) - y = paddle.fluid.data('y', [1, 4], dtype=self.dtype) + x = paddle.static.data('x', [1, 4], dtype=self.dtype) + y = paddle.static.data('y', [1, 4], dtype=self.dtype) out = paddle.lerp(x, y, 0.5) exe = paddle.static.Executor(place) res = exe.run( diff --git a/python/paddle/fluid/tests/unittests/test_linalg_lstsq_op.py b/python/paddle/fluid/tests/unittests/test_linalg_lstsq_op.py index 94dc901a56d0c92d47dd95f4fe1029e4919a2571..1ea2b939e897bbaf09bcf67f0c07af2bc54239d3 100644 --- a/python/paddle/fluid/tests/unittests/test_linalg_lstsq_op.py +++ b/python/paddle/fluid/tests/unittests/test_linalg_lstsq_op.py @@ -97,12 +97,12 @@ class LinalgLstsqTestCase(unittest.TestCase): paddle.set_device(dev) place = fluid.CPUPlace() if dev == "cpu" else fluid.CUDAPlace(0) with fluid.program_guard(fluid.Program(), fluid.Program()): - x = paddle.fluid.data( + x = paddle.static.data( name="x", shape=self._input_shape_1, dtype=self._input_data_1.dtype, ) - y = paddle.fluid.data( + y = paddle.static.data( name="y", shape=self._input_shape_2, dtype=self._input_data_2.dtype, diff --git a/python/paddle/fluid/tests/unittests/test_linalg_pinv_op.py b/python/paddle/fluid/tests/unittests/test_linalg_pinv_op.py index 074b0fb517aa96d4ad8a8d472c51dea822b74f65..15ea23ef2e50264ba380a4d436d31656cf2c3d7f 100644 --- a/python/paddle/fluid/tests/unittests/test_linalg_pinv_op.py +++ b/python/paddle/fluid/tests/unittests/test_linalg_pinv_op.py @@ -68,7 +68,7 @@ class LinalgPinvTestCase(unittest.TestCase): places.append(fluid.CUDAPlace(0)) for place in places: with fluid.program_guard(fluid.Program(), fluid.Program()): - x = paddle.fluid.data( + x = paddle.static.data( name="input", shape=self._input_shape, dtype=self._input_data.dtype, diff --git a/python/paddle/fluid/tests/unittests/test_linear_interp_op.py b/python/paddle/fluid/tests/unittests/test_linear_interp_op.py index 9dfb5391f4b67b8b6282555cb76c69ea9e2c0bcb..327853ace04994b18df8913cd521bcc602adc687 100755 --- a/python/paddle/fluid/tests/unittests/test_linear_interp_op.py +++ b/python/paddle/fluid/tests/unittests/test_linear_interp_op.py @@ -331,7 +331,7 @@ class TestLinearInterpOpError(unittest.TestCase): with program_guard(Program(), Program()): def input_shape_error(): - x1 = fluid.data(name="x1", shape=[1], dtype="float32") + x1 = paddle.static.data(name="x1", shape=[1], dtype="float32") out1 = paddle.nn.Upsample( size=[ 256, @@ -342,7 +342,9 @@ class TestLinearInterpOpError(unittest.TestCase): out1_res = out1(x1) def data_format_error(): - x2 = fluid.data(name="x2", shape=[1, 3, 128], dtype="float32") + x2 = paddle.static.data( + name="x2", shape=[1, 3, 128], dtype="float32" + ) out2 = paddle.nn.Upsample( size=[ 256, @@ -353,7 +355,9 @@ class TestLinearInterpOpError(unittest.TestCase): out2_res = out2(x2) def out_shape_error(): - x3 = fluid.data(name="x3", shape=[1, 3, 128], dtype="float32") + x3 = paddle.static.data( + name="x3", shape=[1, 3, 128], dtype="float32" + ) out3 = paddle.nn.Upsample( size=[ 256, diff --git a/python/paddle/fluid/tests/unittests/test_linear_interp_v2_op.py b/python/paddle/fluid/tests/unittests/test_linear_interp_v2_op.py index 563bebf87e49aaba1e2760fe47535fb5f8cf1c67..b98c0f7efeb45bf7a47c87f497f2f19d98f91060 100755 --- a/python/paddle/fluid/tests/unittests/test_linear_interp_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_linear_interp_v2_op.py @@ -405,14 +405,16 @@ class TestLinearInterpOpError(unittest.TestCase): with program_guard(Program(), Program()): def input_shape_error(): - x1 = fluid.data(name="x1", shape=[1], dtype="float32") + x1 = paddle.static.data( + name="x1", shape=[1], dtype="float32" + ) out1 = paddle.nn.Upsample( size=[256], data_format='NCW', mode='linear' ) out1_res = out1(x1) def data_format_error(): - x2 = fluid.data( + x2 = paddle.static.data( name="x2", shape=[1, 3, 128], dtype="float32" ) out2 = paddle.nn.Upsample( @@ -421,7 +423,7 @@ class TestLinearInterpOpError(unittest.TestCase): out2_res = out2(x2) def out_shape_error(): - x3 = fluid.data( + x3 = paddle.static.data( name="x3", shape=[1, 3, 128], dtype="float32" ) out3 = paddle.nn.Upsample( diff --git a/python/paddle/fluid/tests/unittests/test_linspace.py b/python/paddle/fluid/tests/unittests/test_linspace.py index 27020dd2e0c237469eb5469dbe67f4ac2fd504fb..253be8b45b877df0f0922b36c5856124b4e74bb9 100644 --- a/python/paddle/fluid/tests/unittests/test_linspace.py +++ b/python/paddle/fluid/tests/unittests/test_linspace.py @@ -158,19 +158,21 @@ class TestLinspaceOpError(unittest.TestCase): self.assertRaises(TypeError, test_step_dtype) def test_start_dtype(): - start = fluid.data(shape=[1], dtype="float64", name="start") + start = paddle.static.data( + shape=[1], dtype="float64", name="start" + ) paddle.linspace(start, 10, 1, dtype="float32") self.assertRaises(ValueError, test_start_dtype) def test_end_dtype(): - end = fluid.data(shape=[1], dtype="float64", name="end") + end = paddle.static.data(shape=[1], dtype="float64", name="end") paddle.linspace(0, end, 1, dtype="float32") self.assertRaises(ValueError, test_end_dtype) def test_num_dtype(): - num = fluid.data(shape=[1], dtype="int32", name="step") + num = paddle.static.data(shape=[1], dtype="int32", name="step") paddle.linspace(0, 10, num, dtype="float32") self.assertRaises(TypeError, test_step_dtype) diff --git a/python/paddle/fluid/tests/unittests/test_load_state_dict_from_old_format.py b/python/paddle/fluid/tests/unittests/test_load_state_dict_from_old_format.py index c52e534f8137a6e411abf2e9622bc44122463baf..11ae9e855f691b94c8d3da47ba99b1f011a2101e 100644 --- a/python/paddle/fluid/tests/unittests/test_load_state_dict_from_old_format.py +++ b/python/paddle/fluid/tests/unittests/test_load_state_dict_from_old_format.py @@ -80,10 +80,12 @@ class TestLoadStateDictFromSaveInferenceModel(unittest.TestCase): startup_program = fluid.default_startup_program() main_program = fluid.default_main_program() - img = fluid.data( + img = paddle.static.data( name='img', shape=[None, 1, 28, 28], dtype='float32' ) - label = fluid.data(name='label', shape=[None, 1], dtype='int64') + label = paddle.static.data( + name='label', shape=[None, 1], dtype='int64' + ) prediction, avg_loss = static_train_net(img, label) diff --git a/python/paddle/fluid/tests/unittests/test_log_softmax.py b/python/paddle/fluid/tests/unittests/test_log_softmax.py index 274d4cf05bd098d95e3cb17e4d40c239149433ca..9a8f336c9308be78a165e3b475e179a8240cac0e 100644 --- a/python/paddle/fluid/tests/unittests/test_log_softmax.py +++ b/python/paddle/fluid/tests/unittests/test_log_softmax.py @@ -151,7 +151,7 @@ class TestNNLogSoftmaxAPI(unittest.TestCase): logsoftmax = paddle.nn.LogSoftmax(axis) # test static api with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data(name='x', shape=self.x_shape) + x = paddle.static.data(name='x', shape=self.x_shape) y = logsoftmax(x) exe = paddle.static.Executor(self.place) out = exe.run(feed={'x': self.x}, fetch_list=[y]) @@ -185,7 +185,7 @@ class TestNNFunctionalLogSoftmaxAPI(unittest.TestCase): x = x.astype(dtype) ref_out = np.apply_along_axis(ref_log_softmax, axis, x) with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data(name='x', shape=self.x_shape) + x = paddle.static.data(name='x', shape=self.x_shape) y = F.log_softmax(x, axis, dtype) exe = paddle.static.Executor(self.place) out = exe.run(feed={'x': self.x}, fetch_list=[y]) @@ -204,10 +204,10 @@ class TestNNFunctionalLogSoftmaxAPI(unittest.TestCase): def test_errors(self): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data(name='X1', shape=[100], dtype='int32') + x = paddle.static.data(name='X1', shape=[100], dtype='int32') self.assertRaises(TypeError, F.log_softmax, x) - x = paddle.fluid.data(name='X2', shape=[100], dtype='float32') + x = paddle.static.data(name='X2', shape=[100], dtype='float32') self.assertRaises(TypeError, F.log_softmax, x, dtype='int32') diff --git a/python/paddle/fluid/tests/unittests/test_logit_op.py b/python/paddle/fluid/tests/unittests/test_logit_op.py index 0744b779fb481debfcccbfa711bd9b68e2867770..597f8fe197f23c0ba6af2a089e64b830e842c78e 100644 --- a/python/paddle/fluid/tests/unittests/test_logit_op.py +++ b/python/paddle/fluid/tests/unittests/test_logit_op.py @@ -90,7 +90,7 @@ class TestLogitAPI(unittest.TestCase): ref_out = logit(self.x, eps) # test static api with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data(name='x', shape=self.x_shape) + x = paddle.static.data(name='x', shape=self.x_shape) y = paddle.logit(x, eps) exe = paddle.static.Executor(self.place) out = exe.run(feed={'x': self.x}, fetch_list=[y]) @@ -110,10 +110,10 @@ class TestLogitAPI(unittest.TestCase): def test_errors(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data(name='X1', shape=[100], dtype='int32') + x = paddle.static.data(name='X1', shape=[100], dtype='int32') self.assertRaises(TypeError, paddle.logit, x) - x = paddle.fluid.data(name='X2', shape=[100], dtype='float32') + x = paddle.static.data(name='X2', shape=[100], dtype='float32') self.assertRaises(TypeError, paddle.logit, x, dtype='int32') diff --git a/python/paddle/fluid/tests/unittests/test_logsumexp.py b/python/paddle/fluid/tests/unittests/test_logsumexp.py index d65431f741783a584ef65c65d9d7ed62d792b5f0..41936d17b3d04efc49d2318cd21261211abb6077 100644 --- a/python/paddle/fluid/tests/unittests/test_logsumexp.py +++ b/python/paddle/fluid/tests/unittests/test_logsumexp.py @@ -189,7 +189,7 @@ class TestLogsumexpError(unittest.TestCase): def test_errors(self): with paddle.static.program_guard(paddle.static.Program()): self.assertRaises(TypeError, paddle.logsumexp, 1) - x1 = paddle.fluid.data(name='x1', shape=[120], dtype="int32") + x1 = paddle.static.data(name='x1', shape=[120], dtype="int32") self.assertRaises(TypeError, paddle.logsumexp, x1) @@ -206,7 +206,7 @@ class TestLogsumexpAPI(unittest.TestCase): def api_case(self, axis=None, keepdim=False): out_ref = ref_logsumexp(self.x, axis, keepdim) with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.shape) + x = paddle.static.data('X', self.shape) out = paddle.logsumexp(x, axis, keepdim) exe = paddle.static.Executor(self.place) res = exe.run(feed={'X': self.x}, fetch_list=[out]) diff --git a/python/paddle/fluid/tests/unittests/test_lookahead.py b/python/paddle/fluid/tests/unittests/test_lookahead.py index 5860f81f736d27bf6a9c3efcebd498b3c9494c75..d3647e50af7ba6587b554dbd08053f03f10c61d6 100644 --- a/python/paddle/fluid/tests/unittests/test_lookahead.py +++ b/python/paddle/fluid/tests/unittests/test_lookahead.py @@ -35,7 +35,9 @@ class TestLookAhead(unittest.TestCase): startup = fluid.Program() with fluid.program_guard(train_program, startup): with fluid.unique_name.guard(): - data = fluid.data(name='X', shape=[None, 1], dtype='float32') + data = paddle.static.data( + name='X', shape=[None, 1], dtype='float32' + ) hidden = paddle.static.nn.fc(x=data, size=10) loss = paddle.mean(hidden) diff --git a/python/paddle/fluid/tests/unittests/test_lookup_table_op.py b/python/paddle/fluid/tests/unittests/test_lookup_table_op.py index 1892ce5c56d55a5254389a636c2e984aeb96cc83..4802add016777bd2ffef2ca8dc8407895b3d1f29 100644 --- a/python/paddle/fluid/tests/unittests/test_lookup_table_op.py +++ b/python/paddle/fluid/tests/unittests/test_lookup_table_op.py @@ -17,6 +17,7 @@ import unittest import numpy as np from op_test import OpTest, check_out_dtype, skip_check_grad_ci +import paddle import paddle.fluid as fluid import paddle.fluid.core as core import paddle.nn.functional as F @@ -168,21 +169,25 @@ class TestEmbedOpError(unittest.TestCase): def test_input_dtype(): # the input dtype must be int64 - input = fluid.data(name='x', shape=[4, 1], dtype='float32') + input = paddle.static.data( + name='x', shape=[4, 1], dtype='float32' + ) fluid.layers.embedding(input=input, size=(10, 64)) self.assertRaises(TypeError, test_input_dtype) def test_param_dtype(): # dtype must be float32 or float64 - input2 = fluid.data(name='x2', shape=[4, 1], dtype='int64') + input2 = paddle.static.data( + name='x2', shape=[4, 1], dtype='int64' + ) fluid.layers.embedding( input=input2, size=(10, 64), dtype='int64' ) self.assertRaises(TypeError, test_param_dtype) - input3 = fluid.data(name='x3', shape=[4, 1], dtype='int64') + input3 = paddle.static.data(name='x3', shape=[4, 1], dtype='int64') fluid.layers.embedding(input=input3, size=(10, 64), dtype='float16') diff --git a/python/paddle/fluid/tests/unittests/test_lookup_table_v2_op.py b/python/paddle/fluid/tests/unittests/test_lookup_table_v2_op.py index dc0c8f3174bb5d63f125c492765b432639a16a82..5f8eba060038c571ee125aa574fc40e3df3a8454 100644 --- a/python/paddle/fluid/tests/unittests/test_lookup_table_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_lookup_table_v2_op.py @@ -275,20 +275,24 @@ class TestEmbedOpError(unittest.TestCase): def test_input_dtype(): # the input dtype must be int64 - input = fluid.data(name='x1', shape=[4, 6], dtype='float32') + input = paddle.static.data( + name='x1', shape=[4, 6], dtype='float32' + ) paddle.static.nn.embedding(input=input, size=(10, 64)) self.assertRaises(TypeError, test_input_dtype) def test_param_dtype(): # dtype must be float32 or float64 - input2 = fluid.data(name='x2', shape=[4, 6], dtype='int64') + input2 = paddle.static.data( + name='x2', shape=[4, 6], dtype='int64' + ) paddle.static.nn.embedding( input=input2, size=(10, 64), dtype='int64' ) self.assertRaises(TypeError, test_param_dtype) - input3 = fluid.data(name='x3', shape=[4, 6], dtype='int64') + input3 = paddle.static.data(name='x3', shape=[4, 6], dtype='int64') paddle.static.nn.embedding( input=input3, size=(10, 64), dtype='float16' ) diff --git a/python/paddle/fluid/tests/unittests/test_lrn_op.py b/python/paddle/fluid/tests/unittests/test_lrn_op.py index 25d90c07838df53c361b403e3bd128c86e6f302e..e80e89ff3a17cfa0f6f4a7be407cbd069d78c3e5 100644 --- a/python/paddle/fluid/tests/unittests/test_lrn_op.py +++ b/python/paddle/fluid/tests/unittests/test_lrn_op.py @@ -120,10 +120,10 @@ class TestLocalResponseNormFAPI(unittest.TestCase): in_np1 = np.random.random([3, 40, 40]).astype("float32") in_np2 = np.transpose(in_np1, (0, 2, 1)) - input1 = fluid.data( + input1 = paddle.static.data( name="input1", shape=[3, 40, 40], dtype="float32" ) - input2 = fluid.data( + input2 = paddle.static.data( name="input2", shape=[3, 40, 40], dtype="float32" ) res1 = paddle.nn.functional.local_response_norm( @@ -144,10 +144,10 @@ class TestLocalResponseNormFAPI(unittest.TestCase): def check_static_4d_input(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input1 = fluid.data( + input1 = paddle.static.data( name="input1", shape=[3, 3, 40, 40], dtype="float32" ) - input2 = fluid.data( + input2 = paddle.static.data( name="input2", shape=[3, 40, 40, 3], dtype="float32" ) @@ -173,10 +173,10 @@ class TestLocalResponseNormFAPI(unittest.TestCase): def check_static_5d_input(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input1 = fluid.data( + input1 = paddle.static.data( name="input1", shape=[3, 3, 3, 40, 40], dtype="float32" ) - input2 = fluid.data( + input2 = paddle.static.data( name="input2", shape=[3, 3, 40, 40, 3], dtype="float32" ) res1 = paddle.nn.functional.local_response_norm( @@ -280,13 +280,17 @@ class TestLocalResponseNormFAPIError(unittest.TestCase): self.assertRaises(TypeError, test_Variable) def test_datatype(): - x = fluid.data(name='x', shape=[3, 4, 5, 6], dtype="int32") + x = paddle.static.data( + name='x', shape=[3, 4, 5, 6], dtype="int32" + ) paddle.nn.functional.local_response_norm(x, size=5) self.assertRaises(TypeError, test_datatype) def test_dataformat(): - x = fluid.data(name='x', shape=[3, 4, 5, 6], dtype="float32") + x = paddle.static.data( + name='x', shape=[3, 4, 5, 6], dtype="float32" + ) paddle.nn.functional.local_response_norm( x, size=5, data_format="NCTHW" ) @@ -294,7 +298,7 @@ class TestLocalResponseNormFAPIError(unittest.TestCase): self.assertRaises(ValueError, test_dataformat) def test_dim(): - x = fluid.data(name='x', shape=[3, 4], dtype="float32") + x = paddle.static.data(name='x', shape=[3, 4], dtype="float32") paddle.nn.functional.local_response_norm(x, size=5) self.assertRaises(ValueError, test_dim) diff --git a/python/paddle/fluid/tests/unittests/test_lu_op.py b/python/paddle/fluid/tests/unittests/test_lu_op.py index 3e083c76b71df512a39e620e5ee90e45265552a4..ddba55e7d3faa45aef7666c6e737001048986ae5 100644 --- a/python/paddle/fluid/tests/unittests/test_lu_op.py +++ b/python/paddle/fluid/tests/unittests/test_lu_op.py @@ -273,7 +273,7 @@ class TestLUAPI(unittest.TestCase): NsU = np.pad(sU, upad) NLU = NsL + NsU - x = paddle.fluid.data( + x = paddle.static.data( name="input", shape=shape, dtype=dtype ) lu, p = paddle.linalg.lu(x, pivot=pivot) diff --git a/python/paddle/fluid/tests/unittests/test_lu_unpack_op.py b/python/paddle/fluid/tests/unittests/test_lu_unpack_op.py index d05b16df25cd5506406a814c513d5bf3fb7d87fe..2349f8251f23393a691eb35f459892a0f8633756 100644 --- a/python/paddle/fluid/tests/unittests/test_lu_unpack_op.py +++ b/python/paddle/fluid/tests/unittests/test_lu_unpack_op.py @@ -142,7 +142,7 @@ class TestLU_UnpackOp(OpTest): place = fluid.CPUPlace() if core.is_compiled_with_cuda(): place = fluid.CUDAPlace(0) - xv = paddle.fluid.data( + xv = paddle.static.data( name="input", shape=self.x_shape, dtype=self.dtype ) lu, p = paddle.linalg.lu(xv) @@ -278,7 +278,7 @@ class TestLU_UnpackAPI(unittest.TestCase): with fluid.program_guard(fluid.Program(), fluid.Program()): sP, sL, sU = scipy_lu_unpack(a) - x = paddle.fluid.data( + x = paddle.static.data( name="input", shape=shape, dtype=dtype ) lu, p = paddle.linalg.lu(x) diff --git a/python/paddle/fluid/tests/unittests/test_margin_rank_loss_op.py b/python/paddle/fluid/tests/unittests/test_margin_rank_loss_op.py index bf08137b100b7232b83a53c02ca21cb9de11ad4f..378c91aa8aad49d4b564b0dfcb1bcf7b7d337ec1 100644 --- a/python/paddle/fluid/tests/unittests/test_margin_rank_loss_op.py +++ b/python/paddle/fluid/tests/unittests/test_margin_rank_loss_op.py @@ -87,9 +87,11 @@ class TestMarginRankLossLayer(unittest.TestCase): start = fluid.Program() with fluid.unique_name.guard(): with fluid.program_guard(main, start): - label = fluid.data("label", (self.batch_size, 1), "float32") - x1 = fluid.data("x1", (self.batch_size, 1), "float32") - x2 = fluid.data("x2", (self.batch_size, 1), "float32") + label = paddle.static.data( + "label", (self.batch_size, 1), "float32" + ) + x1 = paddle.static.data("x1", (self.batch_size, 1), "float32") + x2 = paddle.static.data("x2", (self.batch_size, 1), "float32") out = paddle.nn.functional.margin_ranking_loss( x1, x2, label, self.margin, 'none' ) diff --git a/python/paddle/fluid/tests/unittests/test_masked_select_op.py b/python/paddle/fluid/tests/unittests/test_masked_select_op.py index 14d06a3d36b8256ce0e7f7960172f4e61040ccff..63446d865cbf4c648b6c3ba6b9a1526f6736a114 100644 --- a/python/paddle/fluid/tests/unittests/test_masked_select_op.py +++ b/python/paddle/fluid/tests/unittests/test_masked_select_op.py @@ -74,8 +74,8 @@ class TestMaskedSelectAPI(unittest.TestCase): def test_static_mode(self): shape = [8, 9, 6] - x = paddle.fluid.data(shape=shape, dtype='float32', name='x') - mask = paddle.fluid.data(shape=shape, dtype='bool', name='mask') + x = paddle.static.data(shape=shape, dtype='float32', name='x') + mask = paddle.static.data(shape=shape, dtype='bool', name='mask') np_x = np.random.random(shape).astype('float32') np_mask = np.array(np.random.randint(2, size=shape, dtype=bool)) @@ -99,9 +99,9 @@ class TestMaskedSelectError(unittest.TestCase): ): shape = [8, 9, 6] - x = paddle.fluid.data(shape=shape, dtype='float32', name='x') - mask = paddle.fluid.data(shape=shape, dtype='bool', name='mask') - mask_float = paddle.fluid.data( + x = paddle.static.data(shape=shape, dtype='float32', name='x') + mask = paddle.static.data(shape=shape, dtype='bool', name='mask') + mask_float = paddle.static.data( shape=shape, dtype='float32', name='mask_float' ) np_x = np.random.random(shape).astype('float32') diff --git a/python/paddle/fluid/tests/unittests/test_matmul_op.py b/python/paddle/fluid/tests/unittests/test_matmul_op.py index 1ac71759de5728edd18dd292d4fc68a26512abfb..fc4bbff2c57673d9cfc4dabbe1d69d8d88875a14 100644 --- a/python/paddle/fluid/tests/unittests/test_matmul_op.py +++ b/python/paddle/fluid/tests/unittests/test_matmul_op.py @@ -164,9 +164,9 @@ for dim in [4]: class API_TestMm(unittest.TestCase): def test_out(self): with fluid.program_guard(fluid.Program()): - x = fluid.data(name="x", shape=[2], dtype="float64") - y = fluid.data(name='y', shape=[2], dtype='float64') - res = fluid.data(name="output", shape=[1], dtype="float64") + x = paddle.static.data(name="x", shape=[2], dtype="float64") + y = paddle.static.data(name='y', shape=[2], dtype='float64') + res = paddle.static.data(name="output", shape=[1], dtype="float64") result = paddle.mm(x, y) exe = fluid.Executor(fluid.CPUPlace()) data1 = np.random.rand(2) @@ -215,18 +215,22 @@ class API_TestMmError(unittest.TestCase): def test_errors(self): def test_error1(): with fluid.program_guard(fluid.Program(), fluid.Program()): - data1 = fluid.data(name="data1", shape=[10, 2], dtype="float32") - data2 = fluid.data(name="data2", shape=[3, 10], dtype="float32") + data1 = paddle.static.data( + name="data1", shape=[10, 2], dtype="float32" + ) + data2 = paddle.static.data( + name="data2", shape=[3, 10], dtype="float32" + ) paddle.mm(data1, data2) self.assertRaises(ValueError, test_error1) def test_error2(): with fluid.program_guard(fluid.Program(), fluid.Program()): - data1 = fluid.data( + data1 = paddle.static.data( name="data1", shape=[-1, 10, 2], dtype="float32" ) - data2 = fluid.data( + data2 = paddle.static.data( name="data2", shape=[-1, 2, 10], dtype="float32" ) paddle.mm(data1, data2) @@ -235,10 +239,10 @@ class API_TestMmError(unittest.TestCase): def test_error3(): with fluid.program_guard(fluid.Program(), fluid.Program()): - data1 = fluid.data( + data1 = paddle.static.data( name="data1", shape=[10, 10, 2], dtype="float32" ) - data2 = fluid.data( + data2 = paddle.static.data( name="data2", shape=[3, 2, 10], dtype="float32" ) paddle.mm(data1, data2) diff --git a/python/paddle/fluid/tests/unittests/test_matmul_v2_op.py b/python/paddle/fluid/tests/unittests/test_matmul_v2_op.py index 0338517399c28a60a7fc72c15d7a7c0e736937b9..88831ffeb880c838094035a3eb84d32fa22987e1 100644 --- a/python/paddle/fluid/tests/unittests/test_matmul_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_matmul_v2_op.py @@ -464,8 +464,12 @@ class TestMatMulV2API(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input_x = fluid.data(name="input_x", shape=[4, 3], dtype="float32") - input_y = fluid.data(name="input_y", shape=[3, 4], dtype="float32") + input_x = paddle.static.data( + name="input_x", shape=[4, 3], dtype="float32" + ) + input_y = paddle.static.data( + name="input_y", shape=[3, 4], dtype="float32" + ) result = paddle.matmul(input_x, input_y) diff --git a/python/paddle/fluid/tests/unittests/test_matrix_nms_op.py b/python/paddle/fluid/tests/unittests/test_matrix_nms_op.py index 24ac8253d4384acbb980e4cb13023c89b4b564fa..7a0b12892e40bb7812673ecf623fae300beacf70 100644 --- a/python/paddle/fluid/tests/unittests/test_matrix_nms_op.py +++ b/python/paddle/fluid/tests/unittests/test_matrix_nms_op.py @@ -19,7 +19,6 @@ import numpy as np from op_test import OpTest import paddle -import paddle.fluid as fluid from paddle.fluid import Program, program_guard @@ -329,10 +328,10 @@ class TestMatrixNMSError(unittest.TestCase): scores_np = np.transpose(scores, (0, 2, 1)) with program_guard(Program(), Program()): - boxes_data = fluid.data( + boxes_data = paddle.static.data( name='bboxes', shape=[M, C, BOX_SIZE], dtype='float32' ) - scores_data = fluid.data( + scores_data = paddle.static.data( name='scores', shape=[N, C, M], dtype='float32' ) diff --git a/python/paddle/fluid/tests/unittests/test_matrix_power_op.py b/python/paddle/fluid/tests/unittests/test_matrix_power_op.py index 8296aa320f59bb2e853fda343f00cdeedc3d2660..0e638bb60e72b3d3c23be7550a50dfe5f6baa3d9 100644 --- a/python/paddle/fluid/tests/unittests/test_matrix_power_op.py +++ b/python/paddle/fluid/tests/unittests/test_matrix_power_op.py @@ -249,7 +249,9 @@ class TestMatrixPowerAPI(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input_x = fluid.data(name="input_x", shape=[4, 4], dtype="float64") + input_x = paddle.static.data( + name="input_x", shape=[4, 4], dtype="float64" + ) result = paddle.linalg.matrix_power(x=input_x, n=-2) input_np = np.random.random([4, 4]).astype("float64") result_np = np.linalg.matrix_power(input_np, -2) @@ -290,35 +292,45 @@ class TestMatrixPowerAPIError(unittest.TestCase): # n must be int for n in [2.0, '2', -2.0]: - input = fluid.data( + input = paddle.static.data( name="input_float32", shape=[4, 4], dtype='float32' ) self.assertRaises(TypeError, paddle.linalg.matrix_power, input, n) # The data type of input must be float32 or float64. for dtype in ["bool", "int32", "int64", "float16"]: - input = fluid.data(name="input_" + dtype, shape=[4, 4], dtype=dtype) + input = paddle.static.data( + name="input_" + dtype, shape=[4, 4], dtype=dtype + ) self.assertRaises(TypeError, paddle.linalg.matrix_power, input, 2) # When out is set, the data type must be the same as input. - input = fluid.data(name="input_1", shape=[4, 4], dtype="float32") - out = fluid.data(name="output", shape=[4, 4], dtype="float64") + input = paddle.static.data( + name="input_1", shape=[4, 4], dtype="float32" + ) + out = paddle.static.data(name="output", shape=[4, 4], dtype="float64") self.assertRaises(TypeError, paddle.linalg.matrix_power, input, 2, out) # The number of dimensions of input must be >= 2. - input = fluid.data(name="input_2", shape=[4], dtype="float32") + input = paddle.static.data(name="input_2", shape=[4], dtype="float32") self.assertRaises(ValueError, paddle.linalg.matrix_power, input, 2) # The inner-most 2 dimensions of input should be equal to each other - input = fluid.data(name="input_3", shape=[4, 5], dtype="float32") + input = paddle.static.data( + name="input_3", shape=[4, 5], dtype="float32" + ) self.assertRaises(ValueError, paddle.linalg.matrix_power, input, 2) # The size of input should not be 0 - input = fluid.data(name="input_4", shape=[1, 1, 0, 0], dtype="float32") + input = paddle.static.data( + name="input_4", shape=[1, 1, 0, 0], dtype="float32" + ) self.assertRaises(ValueError, paddle.linalg.matrix_power, input, 2) # The size of input should not be 0 - input = fluid.data(name="input_5", shape=[0, 0], dtype="float32") + input = paddle.static.data( + name="input_5", shape=[0, 0], dtype="float32" + ) self.assertRaises( ValueError, paddle.linalg.matrix_power, input, -956301312 ) @@ -332,7 +344,9 @@ class TestMatrixPowerSingularAPI(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input = fluid.data(name="input", shape=[4, 4], dtype="float64") + input = paddle.static.data( + name="input", shape=[4, 4], dtype="float64" + ) result = paddle.linalg.matrix_power(x=input, n=-2) input_np = np.zeros([4, 4]).astype("float64") diff --git a/python/paddle/fluid/tests/unittests/test_matrix_rank_op.py b/python/paddle/fluid/tests/unittests/test_matrix_rank_op.py index 86e751336e6e94fee72e39cd53e2f51954b367b2..5e740a45b642344ccc656d0c008bc7e5cdc24c0e 100644 --- a/python/paddle/fluid/tests/unittests/test_matrix_rank_op.py +++ b/python/paddle/fluid/tests/unittests/test_matrix_rank_op.py @@ -175,10 +175,10 @@ class TestMatrixRankAPI(unittest.TestCase): with fluid.program_guard(fluid.Program(), fluid.Program()): x_np = np.random.rand(3, 4, 7, 7).astype(np.float64) tol_np = np.random.random([3, 4]).astype(np.float32) - x_pd = paddle.fluid.data( + x_pd = paddle.static.data( name="X", shape=[3, 4, 7, 7], dtype='float64' ) - tol_pd = paddle.fluid.data( + tol_pd = paddle.static.data( name="TolTensor", shape=[3, 4], dtype='float32' ) rank_np = np.linalg.matrix_rank(x_np, tol_np, hermitian=False) @@ -196,7 +196,7 @@ class TestMatrixRankAPI(unittest.TestCase): for place in places: with fluid.program_guard(fluid.Program(), fluid.Program()): x_np = np.random.rand(3, 4, 7, 7).astype(np.float64) - x_pd = paddle.fluid.data( + x_pd = paddle.static.data( name="X", shape=[3, 4, 7, 7], dtype='float64' ) rank_np = np.linalg.matrix_rank(x_np, hermitian=True) @@ -212,7 +212,7 @@ class TestMatrixRankAPI(unittest.TestCase): for place in places: with fluid.program_guard(fluid.Program(), fluid.Program()): x_np = np.random.rand(3, 4, 7, 7).astype(np.float64) - x_pd = paddle.fluid.data( + x_pd = paddle.static.data( name="X", shape=[3, 4, 7, 7], dtype='float64' ) rank_np = np.linalg.matrix_rank(x_np, 0.1, hermitian=False) diff --git a/python/paddle/fluid/tests/unittests/test_max_min_amax_amin_op.py b/python/paddle/fluid/tests/unittests/test_max_min_amax_amin_op.py index 679dc7060f73967bbbe48ace0f639fed5dfd9c54..52cfa21424e5a8d2bcf5d21313278d48ebf0cd86 100644 --- a/python/paddle/fluid/tests/unittests/test_max_min_amax_amin_op.py +++ b/python/paddle/fluid/tests/unittests/test_max_min_amax_amin_op.py @@ -95,7 +95,9 @@ class TestMaxMinAmaxAminAPI(unittest.TestCase): startup_program = fluid.Program() train_program = fluid.Program() with fluid.program_guard(startup_program, train_program): - x = fluid.data(name='input', dtype=self.dtype, shape=self.shape) + x = paddle.static.data( + name='input', dtype=self.dtype, shape=self.shape + ) x.stop_gradient = False out = self._choose_paddle_func(func, x) diff --git a/python/paddle/fluid/tests/unittests/test_maxout_op.py b/python/paddle/fluid/tests/unittests/test_maxout_op.py index 7756f7d4ae841c38202aee1f80fb29e93af82ef8..0c3fb620f2c6ae3366d03720a3f561b23364870c 100644 --- a/python/paddle/fluid/tests/unittests/test_maxout_op.py +++ b/python/paddle/fluid/tests/unittests/test_maxout_op.py @@ -97,7 +97,7 @@ class TestMaxoutAPI(unittest.TestCase): def test_static_api(self): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype) + x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) out1 = F.maxout(x, self.groups, self.axis) m = paddle.nn.Maxout(self.groups, self.axis) out2 = m(x) @@ -127,12 +127,12 @@ class TestMaxoutAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.maxout, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[2, 4, 6, 8], dtype='int32' ) self.assertRaises(TypeError, F.maxout, x_int32) - x_float32 = paddle.fluid.data(name='x_float32', shape=[2, 4, 6, 8]) + x_float32 = paddle.static.data(name='x_float32', shape=[2, 4, 6, 8]) self.assertRaises(ValueError, F.maxout, x_float32, 2, 2) diff --git a/python/paddle/fluid/tests/unittests/test_mean_op.py b/python/paddle/fluid/tests/unittests/test_mean_op.py index 042ea928379291f4555c6d5ef2ca7aa73c73374e..56f8f40a953311fedea1468d18af247f3ccbf969 100644 --- a/python/paddle/fluid/tests/unittests/test_mean_op.py +++ b/python/paddle/fluid/tests/unittests/test_mean_op.py @@ -411,7 +411,7 @@ class TestMeanAPI(unittest.TestCase): def test_api_static(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.x_shape) + x = paddle.static.data('X', self.x_shape) out1 = paddle.mean(x) out2 = paddle.tensor.mean(x) out3 = paddle.tensor.stat.mean(x) @@ -452,7 +452,7 @@ class TestMeanAPI(unittest.TestCase): def test_fluid_api(self): with fluid.program_guard(fluid.Program(), fluid.Program()): - x = fluid.data("x", shape=[10, 10], dtype="float32") + x = paddle.static.data("x", shape=[10, 10], dtype="float32") out = paddle.mean(x=x, axis=1) place = fluid.CPUPlace() exe = fluid.Executor(place) @@ -476,7 +476,7 @@ class TestMeanAPI(unittest.TestCase): self.assertRaises(Exception, paddle.mean, x, 2) paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', [10, 12], 'int32') + x = paddle.static.data('X', [10, 12], 'int32') self.assertRaises(TypeError, paddle.mean, x) diff --git a/python/paddle/fluid/tests/unittests/test_median.py b/python/paddle/fluid/tests/unittests/test_median.py index 47a14fbf88969aba9ab13f02aa4e83c3092f014b..738f98ed782a92a265c2c8b4059d9d6ac0c6da97 100644 --- a/python/paddle/fluid/tests/unittests/test_median.py +++ b/python/paddle/fluid/tests/unittests/test_median.py @@ -36,7 +36,7 @@ class TestMedian(unittest.TestCase): startup_program = Program() exe = paddle.static.Executor() with program_guard(main_program, startup_program): - x_in = paddle.fluid.data(shape=x.shape, dtype=x.dtype, name='x') + x_in = paddle.static.data(shape=x.shape, dtype=x.dtype, name='x') y = paddle.median(x_in, axis, keepdims) [res_pd] = exe.run(feed={'x': x}, fetch_list=[y]) self.check_numpy_res(res_pd, res_np) diff --git a/python/paddle/fluid/tests/unittests/test_meshgrid_op.py b/python/paddle/fluid/tests/unittests/test_meshgrid_op.py index d74d3ea4fbcfb8104e7435f9f9b821c684d77f81..1c08d0bc83d6cd70d10b140390b970867c40b2ea 100644 --- a/python/paddle/fluid/tests/unittests/test_meshgrid_op.py +++ b/python/paddle/fluid/tests/unittests/test_meshgrid_op.py @@ -71,8 +71,8 @@ class TestMeshgridOp2(TestMeshgridOp): class TestMeshgridOp3(unittest.TestCase): def test_api(self): - x = fluid.data(shape=[100], dtype='int32', name='x') - y = fluid.data(shape=[200], dtype='int32', name='y') + x = paddle.static.data(shape=[100], dtype='int32', name='x') + y = paddle.static.data(shape=[200], dtype='int32', name='y') input_1 = np.random.randint( 0, @@ -107,8 +107,8 @@ class TestMeshgridOp3(unittest.TestCase): class TestMeshgridOp4(unittest.TestCase): def test_list_input(self): - x = fluid.data(shape=[100], dtype='int32', name='x') - y = fluid.data(shape=[200], dtype='int32', name='y') + x = paddle.static.data(shape=[100], dtype='int32', name='x') + y = paddle.static.data(shape=[200], dtype='int32', name='y') input_1 = np.random.randint( 0, @@ -144,8 +144,8 @@ class TestMeshgridOp4(unittest.TestCase): class TestMeshgridOp5(unittest.TestCase): def test_tuple_input(self): - x = fluid.data(shape=[100], dtype='int32', name='x') - y = fluid.data(shape=[200], dtype='int32', name='y') + x = paddle.static.data(shape=[100], dtype='int32', name='x') + y = paddle.static.data(shape=[200], dtype='int32', name='y') input_1 = np.random.randint( 0, diff --git a/python/paddle/fluid/tests/unittests/test_modelaverage.py b/python/paddle/fluid/tests/unittests/test_modelaverage.py index 156f0cfb8bce859d78c0d43f4ed8c92b4ec9a66c..8fe658917077f8581382e35a5424091f0184f840 100644 --- a/python/paddle/fluid/tests/unittests/test_modelaverage.py +++ b/python/paddle/fluid/tests/unittests/test_modelaverage.py @@ -32,7 +32,9 @@ class TestModelAverage(unittest.TestCase): test_program = fluid.Program() with fluid.program_guard(train_program, startup): with fluid.unique_name.guard(): - data = fluid.data(name='X', shape=[None, 1], dtype='float32') + data = paddle.static.data( + name='X', shape=[None, 1], dtype='float32' + ) hidden = paddle.static.nn.fc(x=data, size=10) loss = paddle.mean(hidden) test_program = train_program.clone() diff --git a/python/paddle/fluid/tests/unittests/test_mse_loss.py b/python/paddle/fluid/tests/unittests/test_mse_loss.py index 75dafb1ea3553a8cadf7127427956194cdffd285..9b96affb274aaea45a2f10fa20a5ed69c0f53014 100644 --- a/python/paddle/fluid/tests/unittests/test_mse_loss.py +++ b/python/paddle/fluid/tests/unittests/test_mse_loss.py @@ -30,8 +30,12 @@ class TestMseLoss(unittest.TestCase): sub = input_val - label_val np_result = np.mean(sub * sub) - input_var = fluid.data(name="input", shape=[-1, 3], dtype="float32") - label_var = fluid.data(name="label", shape=[-1, 3], dtype="float32") + input_var = paddle.static.data( + name="input", shape=[-1, 3], dtype="float32" + ) + label_var = paddle.static.data( + name="label", shape=[-1, 3], dtype="float32" + ) output = paddle.nn.functional.mse_loss(input=input_var, label=label_var) for use_cuda in ( @@ -52,13 +56,17 @@ class TestMseInvalidInput(unittest.TestCase): def test_error(self): def test_invalid_input(): input = [256, 3] - label = fluid.data(name='label1', shape=[None, 3], dtype='float32') + label = paddle.static.data( + name='label1', shape=[None, 3], dtype='float32' + ) loss = paddle.nn.functional.mse_loss(input, label) self.assertRaises(TypeError, test_invalid_input) def test_invalid_label(): - input = fluid.data(name='input1', shape=[None, 3], dtype='float32') + input = paddle.static.data( + name='input1', shape=[None, 3], dtype='float32' + ) label = [256, 3] loss = paddle.nn.functional.mse_loss(input, label) @@ -219,10 +227,10 @@ class TestNNFunctionalMseLoss(unittest.TestCase): else paddle.CPUPlace() ) with paddle.static.program_guard(prog, startup_prog): - input = paddle.fluid.data( + input = paddle.static.data( name='input', shape=dim, dtype='float32' ) - target = paddle.fluid.data( + target = paddle.static.data( name='target', shape=dim, dtype='float32' ) mse_loss = paddle.nn.functional.mse_loss(input, target, 'mean') @@ -261,10 +269,10 @@ class TestNNFunctionalMseLoss(unittest.TestCase): else paddle.CPUPlace() ) with paddle.static.program_guard(prog, startup_prog): - input = paddle.fluid.data( + input = paddle.static.data( name='input', shape=dim, dtype='float32' ) - target = paddle.fluid.data( + target = paddle.static.data( name='target', shape=dim, dtype='float32' ) mse_loss = paddle.nn.functional.mse_loss(input, target, 'sum') @@ -303,10 +311,10 @@ class TestNNFunctionalMseLoss(unittest.TestCase): else paddle.CPUPlace() ) with paddle.static.program_guard(prog, startup_prog): - input = paddle.fluid.data( + input = paddle.static.data( name='input', shape=dim, dtype='float32' ) - target = paddle.fluid.data( + target = paddle.static.data( name='target', shape=dim, dtype='float32' ) mse_loss = paddle.nn.functional.mse_loss(input, target, 'none') diff --git a/python/paddle/fluid/tests/unittests/test_multinomial_op.py b/python/paddle/fluid/tests/unittests/test_multinomial_op.py index c251dc696cd2dfbcc409ab12ab9bdac4cdf44dcc..4738496a2f48a86b38393eaaeb267b3398dc1125 100644 --- a/python/paddle/fluid/tests/unittests/test_multinomial_op.py +++ b/python/paddle/fluid/tests/unittests/test_multinomial_op.py @@ -169,7 +169,7 @@ class TestMultinomialApi(unittest.TestCase): startup_program = fluid.Program() train_program = fluid.Program() with fluid.program_guard(train_program, startup_program): - x = fluid.data('x', shape=[4], dtype='float32') + x = paddle.static.data('x', shape=[4], dtype='float32') out = paddle.multinomial(x, num_samples=100000, replacement=True) place = fluid.CPUPlace() diff --git a/python/paddle/fluid/tests/unittests/test_multiplex_op.py b/python/paddle/fluid/tests/unittests/test_multiplex_op.py index a0f8932ba23ab2ba2079d78346100f078ac42650..184e5597e8536a583f60666098146c6369145bc4 100644 --- a/python/paddle/fluid/tests/unittests/test_multiplex_op.py +++ b/python/paddle/fluid/tests/unittests/test_multiplex_op.py @@ -63,9 +63,11 @@ class TestMultiplexOp(OpTest): class TestMultiplexOpError(unittest.TestCase): def test_errors(self): with fluid.program_guard(fluid.Program(), fluid.Program()): - x1 = fluid.data(name='x1', shape=[None, 2], dtype='int64') - x2 = fluid.data(name='x2', shape=[None, 2], dtype='int64') - index = fluid.data(name='index', shape=[None, 1], dtype='int32') + x1 = paddle.static.data(name='x1', shape=[None, 2], dtype='int64') + x2 = paddle.static.data(name='x2', shape=[None, 2], dtype='int64') + index = paddle.static.data( + name='index', shape=[None, 1], dtype='int32' + ) def test_list(): # the inputs type must be list @@ -79,14 +81,18 @@ class TestMultiplexOpError(unittest.TestCase): self.assertRaises(ValueError, test_len) def test_type(): - y1 = fluid.data(name='y1', shape=[None, 2], dtype='int16') - y2 = fluid.data(name='y2', shape=[None, 2], dtype='int16') + y1 = paddle.static.data( + name='y1', shape=[None, 2], dtype='int16' + ) + y2 = paddle.static.data( + name='y2', shape=[None, 2], dtype='int16' + ) paddle.multiplex(inputs=[y1, y2], index=index) self.assertRaises(TypeError, test_type) def test_type2(): - index2 = fluid.data( + index2 = paddle.static.data( name='index2', shape=[None, 1], dtype='int16' ) paddle.multiplex(inputs=[x1, x2], index=index2) diff --git a/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_iterable_dataset_static.py b/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_iterable_dataset_static.py index 40b6f49a253870a0c062bdcb31e046672ce14137..639ca491789c25d68c540ae017236b2d1860d16d 100644 --- a/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_iterable_dataset_static.py +++ b/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_iterable_dataset_static.py @@ -52,10 +52,12 @@ def simple_fc_net_static(): with fluid.unique_name.guard(): with fluid.program_guard(main_prog, startup_prog): - image = fluid.data( + image = paddle.static.data( name='image', shape=[None, IMAGE_SIZE], dtype='float32' ) - label = fluid.data(name='label', shape=[None, 1], dtype='int64') + label = paddle.static.data( + name='label', shape=[None, 1], dtype='int64' + ) hidden = image param_attr = fluid.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.8) diff --git a/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_static.py b/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_static.py index f4585fe36220b041153d19aeb1964e79a963dd2b..42b333460833932f98b09a266a9525093e199b25 100644 --- a/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_static.py +++ b/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_static.py @@ -52,10 +52,12 @@ def simple_fc_net_static(): with fluid.unique_name.guard(): with fluid.program_guard(main_prog, startup_prog): - image = fluid.data( + image = paddle.static.data( name='image', shape=[None, IMAGE_SIZE], dtype='float32' ) - label = fluid.data(name='label', shape=[None, 1], dtype='int64') + label = paddle.static.data( + name='label', shape=[None, 1], dtype='int64' + ) hidden = image param_attr = fluid.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.8) @@ -192,10 +194,10 @@ class TestStaticDataLoader(unittest.TestCase): class TestStaticDataLoaderReturnList(unittest.TestCase): def run_single_place(self, num_workers): scope = fluid.Scope() - image = fluid.data( + image = paddle.static.data( name='image', shape=[None, IMAGE_SIZE], dtype='float32' ) - label = fluid.data(name='label', shape=[None, 1], dtype='int64') + label = paddle.static.data(name='label', shape=[None, 1], dtype='int64') with fluid.scope_guard(scope): dataset = RandomDataset(SAMPLE_NUM, CLASS_NUM) dataloader = DataLoader( @@ -215,10 +217,10 @@ class TestStaticDataLoaderReturnList(unittest.TestCase): def run_multi_place(self, num_workers): scope = fluid.Scope() - image = fluid.data( + image = paddle.static.data( name='image', shape=[None, IMAGE_SIZE], dtype='float32' ) - label = fluid.data(name='label', shape=[None, 1], dtype='int64') + label = paddle.static.data(name='label', shape=[None, 1], dtype='int64') with fluid.scope_guard(scope): dataset = RandomDataset(SAMPLE_NUM, CLASS_NUM) dataloader = DataLoader( diff --git a/python/paddle/fluid/tests/unittests/test_multiprocess_reader_exception.py b/python/paddle/fluid/tests/unittests/test_multiprocess_reader_exception.py index 03257b75fb38c974282f9ac7a38473a943b3fe4c..5edae8c5baab586969c19054082964c2560f668d 100644 --- a/python/paddle/fluid/tests/unittests/test_multiprocess_reader_exception.py +++ b/python/paddle/fluid/tests/unittests/test_multiprocess_reader_exception.py @@ -53,7 +53,9 @@ class TestMultiprocessReaderExceptionWithQueueSuccess(unittest.TestCase): return __impl__ with fluid.program_guard(fluid.Program(), fluid.Program()): - image = fluid.data(name='image', dtype='float32', shape=[None, 10]) + image = paddle.static.data( + name='image', dtype='float32', shape=[None, 10] + ) reader = fluid.io.DataLoader.from_generator( feed_list=[image], capacity=2, iterable=iterable ) diff --git a/python/paddle/fluid/tests/unittests/test_nan_to_num_op.py b/python/paddle/fluid/tests/unittests/test_nan_to_num_op.py index 7db79e4e80e55308139a036a8c8c46025231ba87..fc17cc913d0a6468307f3121865dee5c23edf95f 100644 --- a/python/paddle/fluid/tests/unittests/test_nan_to_num_op.py +++ b/python/paddle/fluid/tests/unittests/test_nan_to_num_op.py @@ -71,7 +71,7 @@ class TestNanToNum(unittest.TestCase): out4_np = np_nan_to_num(x_np, 1.0, 9.0, -12.0) paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', x_np.shape) + x = paddle.static.data('X', x_np.shape) out1 = paddle.nan_to_num(x) out2 = paddle.nan_to_num(x, 1.0) out3 = paddle.nan_to_num(x, 1.0, 9.0) diff --git a/python/paddle/fluid/tests/unittests/test_nanmean_api.py b/python/paddle/fluid/tests/unittests/test_nanmean_api.py index 368251520fe1a13e7abdc0b1188eeb7c42be04c9..8c0a335c268144882b8b4636f5b5a0841077b613 100644 --- a/python/paddle/fluid/tests/unittests/test_nanmean_api.py +++ b/python/paddle/fluid/tests/unittests/test_nanmean_api.py @@ -41,7 +41,7 @@ class TestNanmeanAPI(unittest.TestCase): def test_api_static(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.x_shape) + x = paddle.static.data('X', self.x_shape) out1 = paddle.nanmean(x) out2 = paddle.tensor.nanmean(x) out3 = paddle.tensor.math.nanmean(x) @@ -90,7 +90,7 @@ class TestNanmeanAPI(unittest.TestCase): def test_errors(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', [10, 12], 'int32') + x = paddle.static.data('X', [10, 12], 'int32') self.assertRaises(TypeError, paddle.nanmean, x) def test_api_dygraph_grad(self): diff --git a/python/paddle/fluid/tests/unittests/test_nanmedian.py b/python/paddle/fluid/tests/unittests/test_nanmedian.py index aeceadb0ea9b889e7c68577f889fd8f21ff4f673..7f5ecb1865ce3e1975ed8fd40e4e56396939c955 100644 --- a/python/paddle/fluid/tests/unittests/test_nanmedian.py +++ b/python/paddle/fluid/tests/unittests/test_nanmedian.py @@ -83,7 +83,7 @@ class TestNanmedian(unittest.TestCase): paddle.enable_static() np_res = np.nanmedian(data, keepdims=True) with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', data.shape) + x = paddle.static.data('X', data.shape) out1 = paddle.nanmedian(x, keepdim=True) out2 = paddle.tensor.nanmedian(x, keepdim=True) out3 = paddle.tensor.stat.nanmedian(x, keepdim=True) @@ -151,10 +151,10 @@ class TestNanmedian(unittest.TestCase): def test_errors(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data("X", [10, 12]) + x = paddle.static.data("X", [10, 12]) def test_dtype(): - x2 = paddle.fluid.data('X2', [10, 12], 'bool') + x2 = paddle.static.data('X2', [10, 12], 'bool') paddle.nanmedian(x2) def test_empty_axis(): diff --git a/python/paddle/fluid/tests/unittests/test_nansum_api.py b/python/paddle/fluid/tests/unittests/test_nansum_api.py index 87b05b4245d22863a07a7e55dab321be2ebcf844..3cd9005e000f8f8859658d03b5071206799cbf6e 100644 --- a/python/paddle/fluid/tests/unittests/test_nansum_api.py +++ b/python/paddle/fluid/tests/unittests/test_nansum_api.py @@ -26,7 +26,9 @@ class API_Test_Nansum(unittest.TestCase): startup_program = fluid.Program() train_program = fluid.Program() with fluid.program_guard(train_program, startup_program): - input = fluid.data(name='input', dtype='float32', shape=[2, 4]) + input = paddle.static.data( + name='input', dtype='float32', shape=[2, 4] + ) out1 = paddle.nansum(input) out2 = paddle.nansum(input, axis=0) out3 = paddle.nansum(input, axis=-1) diff --git a/python/paddle/fluid/tests/unittests/test_neg_op.py b/python/paddle/fluid/tests/unittests/test_neg_op.py index 53f01b94d303c737e6f66819f495033032a1a361..ea748a57aeb84f69b69f2cdc76f290427d8d727b 100644 --- a/python/paddle/fluid/tests/unittests/test_neg_op.py +++ b/python/paddle/fluid/tests/unittests/test_neg_op.py @@ -36,7 +36,9 @@ class TestNegOp(unittest.TestCase): ) def run_static(self, use_gpu=False): - input = paddle.fluid.data(name='input', shape=[32, 8], dtype=self.dtype) + input = paddle.static.data( + name='input', shape=[32, 8], dtype=self.dtype + ) result = paddle.neg(input) place = paddle.CUDAPlace(0) if use_gpu else paddle.CPUPlace() diff --git a/python/paddle/fluid/tests/unittests/test_nll_loss.py b/python/paddle/fluid/tests/unittests/test_nll_loss.py index 2d6d52e5935d94afd79e75a4ff72a7aaa5e92b23..e1197aa0a18469fbc25c4ea7f15908e0b14b9dca 100644 --- a/python/paddle/fluid/tests/unittests/test_nll_loss.py +++ b/python/paddle/fluid/tests/unittests/test_nll_loss.py @@ -91,8 +91,10 @@ class TestNLLLoss(unittest.TestCase): ) # place = fluid.CPUPlace() with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', shape=[10, 10], dtype='float64') - label = fluid.data(name='label', shape=[10], dtype='int64') + input = paddle.static.data( + name='input', shape=[10, 10], dtype='float64' + ) + label = paddle.static.data(name='label', shape=[10], dtype='int64') nll_loss = paddle.nn.loss.NLLLoss() res = nll_loss(input, label) @@ -137,8 +139,10 @@ class TestNLLLoss(unittest.TestCase): ) # place = fluid.CPUPlace() with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', shape=[10, 10], dtype='float64') - label = fluid.data(name='label', shape=[10], dtype='int64') + input = paddle.static.data( + name='input', shape=[10, 10], dtype='float64' + ) + label = paddle.static.data(name='label', shape=[10], dtype='int64') nll_loss = paddle.nn.loss.NLLLoss(reduction='sum') res = nll_loss(input, label) @@ -186,9 +190,13 @@ class TestNLLLoss(unittest.TestCase): ) # place = fluid.CPUPlace() with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', shape=[10, 10], dtype='float64') - label = fluid.data(name='label', shape=[10], dtype='int64') - weight = fluid.data(name='weight', shape=[10], dtype='float64') + input = paddle.static.data( + name='input', shape=[10, 10], dtype='float64' + ) + label = paddle.static.data(name='label', shape=[10], dtype='int64') + weight = paddle.static.data( + name='weight', shape=[10], dtype='float64' + ) nll_loss = paddle.nn.loss.NLLLoss(weight=weight) res = nll_loss(input, label) @@ -244,9 +252,13 @@ class TestNLLLoss(unittest.TestCase): ) # place = fluid.CPUPlace() with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', shape=[10, 10], dtype='float64') - label = fluid.data(name='label', shape=[10], dtype='int64') - weight = fluid.data(name='weight', shape=[10], dtype='float64') + input = paddle.static.data( + name='input', shape=[10, 10], dtype='float64' + ) + label = paddle.static.data(name='label', shape=[10], dtype='int64') + weight = paddle.static.data( + name='weight', shape=[10], dtype='float64' + ) nll_loss = paddle.nn.loss.NLLLoss(weight=weight, reduction='sum') res = nll_loss(input, label) @@ -287,9 +299,13 @@ class TestNLLLoss(unittest.TestCase): startup_prog = fluid.Program() place = fluid.CPUPlace() with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', shape=[10, 10], dtype='float64') - label = fluid.data(name='label', shape=[10], dtype='int64') - weight = fluid.data(name='weight', shape=[10], dtype='float64') + input = paddle.static.data( + name='input', shape=[10, 10], dtype='float64' + ) + label = paddle.static.data(name='label', shape=[10], dtype='int64') + weight = paddle.static.data( + name='weight', shape=[10], dtype='float64' + ) nll_loss = paddle.nn.loss.NLLLoss(weight=weight) res = nll_loss(input, label) @@ -328,9 +344,13 @@ class TestNLLLoss(unittest.TestCase): startup_prog = fluid.Program() place = fluid.CPUPlace() with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', shape=[10, 10], dtype='float64') - label = fluid.data(name='label', shape=[10], dtype='int64') - weight = fluid.data(name='weight', shape=[10], dtype='float64') + input = paddle.static.data( + name='input', shape=[10, 10], dtype='float64' + ) + label = paddle.static.data(name='label', shape=[10], dtype='int64') + weight = paddle.static.data( + name='weight', shape=[10], dtype='float64' + ) nll_loss = paddle.nn.loss.NLLLoss(weight=weight, reduction='none') res = nll_loss(input, label) @@ -375,10 +395,12 @@ class TestNLLLoss(unittest.TestCase): ) # place = fluid.CPUPlace() with fluid.program_guard(prog, startup_prog): - input = fluid.data( + input = paddle.static.data( name='input', shape=[5, 3, 5, 5], dtype='float64' ) - label = fluid.data(name='label', shape=[5, 5, 5], dtype='int64') + label = paddle.static.data( + name='label', shape=[5, 5, 5], dtype='int64' + ) nll_loss = paddle.nn.loss.NLLLoss() res = nll_loss(input, label) @@ -416,10 +438,12 @@ class TestNLLLoss(unittest.TestCase): ) # place = fluid.CPUPlace() with fluid.program_guard(prog, startup_prog): - input = fluid.data( + input = paddle.static.data( name='input', shape=[5, 3, 5, 5], dtype='float64' ) - label = fluid.data(name='label', shape=[5, 5, 5], dtype='int64') + label = paddle.static.data( + name='label', shape=[5, 5, 5], dtype='int64' + ) nll_loss = paddle.nn.loss.NLLLoss(reduction='sum') res = nll_loss(input, label) @@ -458,11 +482,15 @@ class TestNLLLoss(unittest.TestCase): ) # place = fluid.CPUPlace() with fluid.program_guard(prog, startup_prog): - input = fluid.data( + input = paddle.static.data( name='input', shape=[5, 3, 5, 5], dtype='float64' ) - label = fluid.data(name='label', shape=[5, 5, 5], dtype='int64') - weight = fluid.data(name='weight', shape=[3], dtype='float64') + label = paddle.static.data( + name='label', shape=[5, 5, 5], dtype='int64' + ) + weight = paddle.static.data( + name='weight', shape=[3], dtype='float64' + ) nll_loss = paddle.nn.loss.NLLLoss(weight=weight) res = nll_loss(input, label) @@ -503,11 +531,15 @@ class TestNLLLoss(unittest.TestCase): startup_prog = fluid.Program() place = fluid.CPUPlace() with fluid.program_guard(prog, startup_prog): - input = fluid.data( + input = paddle.static.data( name='input', shape=[5, 3, 5, 5], dtype='float64' ) - label = fluid.data(name='label', shape=[5, 5, 5], dtype='int64') - weight = fluid.data(name='weight', shape=[3], dtype='float64') + label = paddle.static.data( + name='label', shape=[5, 5, 5], dtype='int64' + ) + weight = paddle.static.data( + name='weight', shape=[3], dtype='float64' + ) nll_loss = paddle.nn.loss.NLLLoss(weight=weight) res = nll_loss(input, label) @@ -552,11 +584,15 @@ class TestNLLLoss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data( + input = paddle.static.data( name='input', shape=[5, 3, 5, 5], dtype='float64' ) - label = fluid.data(name='label', shape=[5, 5, 5], dtype='int64') - weight = fluid.data(name='weight', shape=[3], dtype='float64') + label = paddle.static.data( + name='label', shape=[5, 5, 5], dtype='int64' + ) + weight = paddle.static.data( + name='weight', shape=[3], dtype='float64' + ) nll_loss = paddle.nn.loss.NLLLoss(weight=weight, reduction='sum') res = nll_loss(input, label) @@ -603,10 +639,12 @@ class TestNLLLoss(unittest.TestCase): ) # place = fluid.CPUPlace() with fluid.program_guard(prog, startup_prog): - input = fluid.data( + input = paddle.static.data( name='input', shape=[5, 3, 5, 5, 5], dtype='float64' ) - label = fluid.data(name='label', shape=[5, 5, 5, 5], dtype='int64') + label = paddle.static.data( + name='label', shape=[5, 5, 5, 5], dtype='int64' + ) nll_loss = paddle.nn.loss.NLLLoss() res = nll_loss(input, label) @@ -651,11 +689,15 @@ class TestNLLLoss(unittest.TestCase): ) # place = fluid.CPUPlace() with fluid.program_guard(prog, startup_prog): - input = fluid.data( + input = paddle.static.data( name='input', shape=[5, 3, 5, 5, 5], dtype='float64' ) - label = fluid.data(name='label', shape=[5, 5, 5, 5], dtype='int64') - weight = fluid.data(name='weight', shape=[3], dtype='float64') + label = paddle.static.data( + name='label', shape=[5, 5, 5, 5], dtype='int64' + ) + weight = paddle.static.data( + name='weight', shape=[3], dtype='float64' + ) nll_loss = paddle.nn.loss.NLLLoss(weight=weight) res = nll_loss(input, label) @@ -708,11 +750,15 @@ class TestNLLLoss(unittest.TestCase): ) place = fluid.CPUPlace() with fluid.program_guard(prog, startup_prog): - input = fluid.data( + input = paddle.static.data( name='input', shape=[5, 3, 5, 5, 5], dtype='float64' ) - label = fluid.data(name='label', shape=[5, 5, 5, 5], dtype='int64') - weight = fluid.data(name='weight', shape=[3], dtype='float64') + label = paddle.static.data( + name='label', shape=[5, 5, 5, 5], dtype='int64' + ) + weight = paddle.static.data( + name='weight', shape=[3], dtype='float64' + ) nll_loss = paddle.nn.loss.NLLLoss(weight=weight, reduction='sum') res = nll_loss(input, label) @@ -768,11 +814,15 @@ class TestNLLLoss(unittest.TestCase): ) # place = fluid.CPUPlace() with fluid.program_guard(prog, startup_prog): - input = fluid.data( + input = paddle.static.data( name='input', shape=[5, 3, 5, 5, 5], dtype='float64' ) - label = fluid.data(name='label', shape=[5, 5, 5, 5], dtype='int64') - weight = fluid.data(name='weight', shape=[3], dtype='float64') + label = paddle.static.data( + name='label', shape=[5, 5, 5, 5], dtype='int64' + ) + weight = paddle.static.data( + name='weight', shape=[3], dtype='float64' + ) nll_loss = paddle.nn.loss.NLLLoss(weight=weight, reduction='none') res = nll_loss(input, label) @@ -824,11 +874,15 @@ class TestNLLLoss(unittest.TestCase): startup_prog = fluid.Program() place = fluid.CPUPlace() with fluid.program_guard(prog, startup_prog): - input = fluid.data( + input = paddle.static.data( name='input', shape=[5, 3, 5, 5, 5], dtype='float64' ) - label = fluid.data(name='label', shape=[5, 5, 5, 5], dtype='int64') - weight = fluid.data(name='weight', shape=[3], dtype='float64') + label = paddle.static.data( + name='label', shape=[5, 5, 5, 5], dtype='int64' + ) + weight = paddle.static.data( + name='weight', shape=[3], dtype='float64' + ) nll_loss = paddle.nn.loss.NLLLoss(weight=weight, reduction='none') res = nll_loss(input, label) @@ -1081,8 +1135,8 @@ class TestNLLLossName(unittest.TestCase): startup_prog = paddle.static.Program() place = paddle.CPUPlace() with paddle.static.program_guard(prog, startup_prog): - x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float64') - label = paddle.fluid.data(name='label', shape=[10], dtype='int64') + x = paddle.static.data(name='x', shape=[10, 10], dtype='float64') + label = paddle.static.data(name='label', shape=[10], dtype='int64') nll_loss = paddle.nn.loss.NLLLoss(name='nll_loss') res = nll_loss(x, label) self.assertTrue(res.name.startswith('nll_loss')) @@ -1095,14 +1149,14 @@ class TestNLLLossInvalidArgs(unittest.TestCase): startup_prog = paddle.static.Program() place = paddle.CPUPlace() with paddle.static.program_guard(prog, startup_prog): - x = paddle.fluid.data( + x = paddle.static.data( name='x', shape=[ 10, ], dtype='float64', ) - label = paddle.fluid.data( + label = paddle.static.data( name='label', shape=[ 10, @@ -1162,8 +1216,10 @@ class TestNLLLossInvalidArgs(unittest.TestCase): startup_prog = paddle.static.Program() place = paddle.CPUPlace() with paddle.static.program_guard(prog, startup_prog): - x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float64') - label = paddle.fluid.data( + x = paddle.static.data( + name='x', shape=[10, 10], dtype='float64' + ) + label = paddle.static.data( name='label', shape=[10], dtype='int64' ) nll_loss = paddle.nn.loss.NLLLoss(reduction='') @@ -1189,8 +1245,10 @@ class TestNLLLossInvalidArgs(unittest.TestCase): startup_prog = paddle.static.Program() place = paddle.CPUPlace() with paddle.static.program_guard(prog, startup_prog): - x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float64') - label = paddle.fluid.data( + x = paddle.static.data( + name='x', shape=[10, 10], dtype='float64' + ) + label = paddle.static.data( name='label', shape=[10], dtype='int64' ) res = paddle.nn.functional.nll_loss(x, label, reduction='') diff --git a/python/paddle/fluid/tests/unittests/test_norm_all.py b/python/paddle/fluid/tests/unittests/test_norm_all.py index 3b28007d4c8d1bdbefc92a849f507d1b97aa9582..4b58a5bc8eec9834161c3e478c64e11db5e675a9 100644 --- a/python/paddle/fluid/tests/unittests/test_norm_all.py +++ b/python/paddle/fluid/tests/unittests/test_norm_all.py @@ -415,7 +415,7 @@ class TestPnormBF16Op(OpTest): def run_fro(self, p, axis, shape_x, dtype, keep_dim, check_dim=False): with fluid.program_guard(fluid.Program()): - data = fluid.data(name="X", shape=shape_x, dtype=dtype) + data = paddle.static.data(name="X", shape=shape_x, dtype=dtype) out = paddle.norm(x=data, p=p, axis=axis, keepdim=keep_dim) place = fluid.CPUPlace() exe = fluid.Executor(place) @@ -437,7 +437,7 @@ def run_fro(self, p, axis, shape_x, dtype, keep_dim, check_dim=False): def run_pnorm(self, p, axis, shape_x, dtype, keep_dim, check_dim=False): with fluid.program_guard(fluid.Program()): - data = fluid.data(name="X", shape=shape_x, dtype=dtype) + data = paddle.static.data(name="X", shape=shape_x, dtype=dtype) out = paddle.norm(x=data, p=p, axis=axis, keepdim=keep_dim) place = fluid.CPUPlace() exe = fluid.Executor(place) @@ -640,7 +640,7 @@ class API_NormTest(unittest.TestCase): def test_name(self): with fluid.program_guard(fluid.Program()): - x = fluid.data(name="x", shape=[10, 10], dtype="float32") + x = paddle.static.data(name="x", shape=[10, 10], dtype="float32") y_1 = paddle.norm(x, p='fro', name='frobenius_name') y_2 = paddle.norm(x, p=2, name='pnorm_name') self.assertEqual(('frobenius_name' in y_1.name), True) @@ -650,24 +650,28 @@ class API_NormTest(unittest.TestCase): with fluid.program_guard(fluid.Program(), fluid.Program()): def err_dtype(p, shape_x, xdtype, out=None): - data = fluid.data(shape=shape_x, dtype=xdtype) + data = paddle.static.data(shape=shape_x, dtype=xdtype) paddle.norm(data, p=p, out=out) self.assertRaises(TypeError, err_dtype, "fro", [2, 2], "int64") self.assertRaises(ValueError, paddle.norm, "inf", [2], "int64") - out = fluid.data(name="out", shape=[1], dtype="int64") + out = paddle.static.data(name="out", shape=[1], dtype="int64") self.assertRaises( TypeError, err_dtype, "fro", [2, 2], "float64", out ) self.assertRaises(TypeError, err_dtype, 2, [10], "int64") self.assertRaises(TypeError, err_dtype, 2, [10], "float64", out) - data = fluid.data(name="data_2d", shape=[2, 2], dtype="float64") + data = paddle.static.data( + name="data_2d", shape=[2, 2], dtype="float64" + ) self.assertRaises(ValueError, paddle.norm, data, p="unsupport norm") self.assertRaises(ValueError, paddle.norm, data, p=[1]) self.assertRaises(ValueError, paddle.norm, data, p=[1], axis=-1) self.assertRaises(ValueError, paddle.norm, 0, [1, 0], "float64") - data = fluid.data(name="data_3d", shape=[2, 2, 2], dtype="float64") + data = paddle.static.data( + name="data_3d", shape=[2, 2, 2], dtype="float64" + ) self.assertRaises( ValueError, paddle.norm, data, p='unspport', axis=[-3, -2, -1] ) diff --git a/python/paddle/fluid/tests/unittests/test_norm_op.py b/python/paddle/fluid/tests/unittests/test_norm_op.py index 64a17969ddeb42edf7208a0d5f5253f9768887f4..2899e16e6f5f99452c731ffe2cbbb58f72341feb 100644 --- a/python/paddle/fluid/tests/unittests/test_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_norm_op.py @@ -195,7 +195,7 @@ class API_NormTest(unittest.TestCase): with fluid.program_guard(fluid.Program()): def test_norm_x_type(): - data = fluid.data(name="x", shape=[3, 3], dtype="int64") + data = paddle.static.data(name="x", shape=[3, 3], dtype="int64") out = paddle.nn.functional.normalize(data) self.assertRaises(TypeError, test_norm_x_type) diff --git a/python/paddle/fluid/tests/unittests/test_normal.py b/python/paddle/fluid/tests/unittests/test_normal.py index 76e9c7a2f328e5596866e7e43386f267ef6639a1..6009580a8cde7420abc60f5702b717df0daedb85 100644 --- a/python/paddle/fluid/tests/unittests/test_normal.py +++ b/python/paddle/fluid/tests/unittests/test_normal.py @@ -66,10 +66,10 @@ class TestNormalAPI(unittest.TestCase): self.std, np.ndarray ): with paddle.static.program_guard(paddle.static.Program()): - mean = paddle.fluid.data( + mean = paddle.static.data( 'Mean', self.mean.shape, self.mean.dtype ) - std = paddle.fluid.data('Std', self.std.shape, self.std.dtype) + std = paddle.static.data('Std', self.std.shape, self.std.dtype) out = paddle.normal(mean, std, self.shape) exe = paddle.static.Executor(self.place) @@ -85,7 +85,7 @@ class TestNormalAPI(unittest.TestCase): return ret_all elif isinstance(self.mean, np.ndarray): with paddle.static.program_guard(paddle.static.Program()): - mean = paddle.fluid.data( + mean = paddle.static.data( 'Mean', self.mean.shape, self.mean.dtype ) out = paddle.normal(mean, self.std, self.shape) @@ -97,7 +97,7 @@ class TestNormalAPI(unittest.TestCase): return ret_all elif isinstance(self.std, np.ndarray): with paddle.static.program_guard(paddle.static.Program()): - std = paddle.fluid.data('Std', self.std.shape, self.std.dtype) + std = paddle.static.data('Std', self.std.shape, self.std.dtype) out = paddle.normal(self.mean, std, self.shape) exe = paddle.static.Executor(self.place) @@ -203,17 +203,17 @@ class TestNormalErrors(unittest.TestCase): std = [1, 2, 3] self.assertRaises(TypeError, paddle.normal, std=std) - mean = paddle.fluid.data('Mean', [100], 'int32') + mean = paddle.static.data('Mean', [100], 'int32') self.assertRaises(TypeError, paddle.normal, mean) - std = paddle.fluid.data('Std', [100], 'int32') + std = paddle.static.data('Std', [100], 'int32') self.assertRaises(TypeError, paddle.normal, mean=1.0, std=std) self.assertRaises(TypeError, paddle.normal, shape=1) self.assertRaises(TypeError, paddle.normal, shape=[1.0]) - shape = paddle.fluid.data('Shape', [100], 'float32') + shape = paddle.static.data('Shape', [100], 'float32') self.assertRaises(TypeError, paddle.normal, shape=shape) diff --git a/python/paddle/fluid/tests/unittests/test_normalize.py b/python/paddle/fluid/tests/unittests/test_normalize.py index a986dd90415d5125cfcbf799503b1432a0e2ecba..2a27b42446d9fab46812dd1395f9b664a2a088d7 100644 --- a/python/paddle/fluid/tests/unittests/test_normalize.py +++ b/python/paddle/fluid/tests/unittests/test_normalize.py @@ -55,8 +55,8 @@ class TestNNFunctionalNormalize(unittest.TestCase): self.assertRaises(BaseException, F.normalize, x) def run_static(self, use_gpu=False): - x = paddle.fluid.data(name='input', shape=[10, 10], dtype='float32') - x2 = paddle.fluid.data(name='input2', shape=[2], dtype='float32') + x = paddle.static.data(name='input', shape=[10, 10], dtype='float32') + x2 = paddle.static.data(name='input2', shape=[2], dtype='float32') result0 = F.normalize(x) result1 = F.normalize(x, p=1.5) result2 = F.normalize(x, axis=0) diff --git a/python/paddle/fluid/tests/unittests/test_npair_loss_op.py b/python/paddle/fluid/tests/unittests/test_npair_loss_op.py index ad4aaf1f0e29c475d4d3e32f69b9b17fc916d5ac..a044a31525969fa153fc668b7788d86ef73bcd09 100755 --- a/python/paddle/fluid/tests/unittests/test_npair_loss_op.py +++ b/python/paddle/fluid/tests/unittests/test_npair_loss_op.py @@ -130,13 +130,15 @@ class TestNpairLossOpError(unittest.TestCase): anchor_np = np.random.random((2, 4)).astype("float32") positive_np = np.random.random((2, 4)).astype("float32") labels_np = np.random.random((2)).astype("float32") - anchor_data = fluid.data( + anchor_data = paddle.static.data( name='anchor', shape=[2, 4], dtype='float32' ) - positive_data = fluid.data( + positive_data = paddle.static.data( name='positive', shape=[2, 4], dtype='float32' ) - labels_data = fluid.data(name='labels', shape=[2], dtype='float32') + labels_data = paddle.static.data( + name='labels', shape=[2], dtype='float32' + ) def test_anchor_Variable(): # the anchor type must be Variable @@ -162,7 +164,7 @@ class TestNpairLossOpError(unittest.TestCase): def test_anchor_type(): # dtype must be float32 or float64 - anchor_data1 = fluid.data( + anchor_data1 = paddle.static.data( name='anchor1', shape=[2, 4], dtype='int32' ) paddle.nn.functional.npair_loss( @@ -171,7 +173,7 @@ class TestNpairLossOpError(unittest.TestCase): def test_positive_type(): # dtype must be float32 or float64 - positive_data1 = fluid.data( + positive_data1 = paddle.static.data( name='positive1', shape=[2, 4], dtype='int32' ) paddle.nn.functional.npair_loss( @@ -182,7 +184,7 @@ class TestNpairLossOpError(unittest.TestCase): def test_labels_type(): # dtype must be float32 or float64 - labels_data1 = fluid.data( + labels_data1 = paddle.static.data( name='labels1', shape=[2], dtype='int32' ) paddle.nn.functional.npair_loss( diff --git a/python/paddle/fluid/tests/unittests/test_number_count_op.py b/python/paddle/fluid/tests/unittests/test_number_count_op.py index c2781b98e00b0c1fb18f78a209c9194a1d21dc60..3e599ca1f0ae8b2112ca8b0a0a19d10ddcf61b7a 100644 --- a/python/paddle/fluid/tests/unittests/test_number_count_op.py +++ b/python/paddle/fluid/tests/unittests/test_number_count_op.py @@ -61,7 +61,7 @@ class TestNumberCountAPI(unittest.TestCase): def test_api_static(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('x', self.x.shape, dtype="int64") + x = paddle.static.data('x', self.x.shape, dtype="int64") out = utils._number_count(x, self.upper_num) exe = paddle.static.Executor(self.place) res = exe.run(feed={'x': self.x}, fetch_list=[out]) diff --git a/python/paddle/fluid/tests/unittests/test_numel_op.py b/python/paddle/fluid/tests/unittests/test_numel_op.py index 8a90883138415dd1cd761db28d00f6e0345c6417..3b3ce4eba2fe6bcf051d556e78f61d98c0ff92d6 100644 --- a/python/paddle/fluid/tests/unittests/test_numel_op.py +++ b/python/paddle/fluid/tests/unittests/test_numel_op.py @@ -56,8 +56,8 @@ class TestNumelAPI(unittest.TestCase): with fluid.program_guard(main_program, startup_program): shape1 = [2, 1, 4, 5] shape2 = [1, 4, 5] - x_1 = paddle.fluid.data(shape=shape1, dtype='int32', name='x_1') - x_2 = paddle.fluid.data(shape=shape2, dtype='int32', name='x_2') + x_1 = paddle.static.data(shape=shape1, dtype='int32', name='x_1') + x_2 = paddle.static.data(shape=shape2, dtype='int32', name='x_2') input_1 = np.random.random(shape1).astype("int32") input_2 = np.random.random(shape2).astype("int32") out_1 = paddle.numel(x_1) diff --git a/python/paddle/fluid/tests/unittests/test_ones_like.py b/python/paddle/fluid/tests/unittests/test_ones_like.py index e7f06d526c096436dd31c499f78d63afef8af7d6..f6481a805b3b04ac64d9a53ae7e3bf4d816e42d9 100644 --- a/python/paddle/fluid/tests/unittests/test_ones_like.py +++ b/python/paddle/fluid/tests/unittests/test_ones_like.py @@ -26,7 +26,7 @@ from paddle.fluid.framework import convert_np_dtype_to_dtype_ class TestOnesLikeAPIError(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()): - x = paddle.fluid.data('x', [3, 4]) + x = paddle.static.data('x', [3, 4]) self.assertRaises(TypeError, ones_like, x, 'int8') @@ -36,7 +36,7 @@ class TestOnesLikeAPI(unittest.TestCase): startup_program = Program() train_program = Program() with program_guard(train_program, startup_program): - x = paddle.fluid.data('X', shape) + x = paddle.static.data('X', shape) # 'bool', 'float32', 'float64', 'int32', 'int64' out1 = ones_like(x) diff --git a/python/paddle/fluid/tests/unittests/test_op_name_conflict.py b/python/paddle/fluid/tests/unittests/test_op_name_conflict.py index ec4e98b907dbb92b51b4ec099e29e0f91730ff67..202a6bc0f6cdf5c0e0019a5732ef157307b168b0 100644 --- a/python/paddle/fluid/tests/unittests/test_op_name_conflict.py +++ b/python/paddle/fluid/tests/unittests/test_op_name_conflict.py @@ -27,8 +27,8 @@ class TestOpNameConflict(unittest.TestCase): startup = fluid.Program() with fluid.unique_name.guard(): with fluid.program_guard(main, startup): - x = fluid.data(name="x", shape=[1], dtype='float32') - y = fluid.data(name="y", shape=[1], dtype='float32') + x = paddle.static.data(name="x", shape=[1], dtype='float32') + y = paddle.static.data(name="y", shape=[1], dtype='float32') m = paddle.log2(x, name="log2") n = paddle.log2(y, name="log2") diff --git a/python/paddle/fluid/tests/unittests/test_optimizer_in_control_flow.py b/python/paddle/fluid/tests/unittests/test_optimizer_in_control_flow.py index 2fd87456b69906d2d9b561194a4480fd37314fde..317b779dd5e26cad21251f12e0b111b2ca58fc21 100644 --- a/python/paddle/fluid/tests/unittests/test_optimizer_in_control_flow.py +++ b/python/paddle/fluid/tests/unittests/test_optimizer_in_control_flow.py @@ -89,14 +89,14 @@ def static( opt.minimize(avg_loss) return avg_loss - image = fluid.data('image', [BATCH_SIZE, INPUT_SIZE], 'float32') - label = fluid.data('label', [BATCH_SIZE, 1], 'int64') + image = paddle.static.data('image', [BATCH_SIZE, INPUT_SIZE], 'float32') + label = paddle.static.data('label', [BATCH_SIZE, 1], 'int64') hidden, prediction = double_fc_net(image) adam = optimizer.Adam(learning_rate=LR) sgd = optimizer.SGD(learning_rate=LR) - id = fluid.data('id', [1], 'int32') + id = paddle.static.data('id', [1], 'int32') two = paddle.tensor.fill_constant([1], 'int32', 2) mod_two = paddle.remainder(id, two) == 0 diff --git a/python/paddle/fluid/tests/unittests/test_pad3d_op.py b/python/paddle/fluid/tests/unittests/test_pad3d_op.py index cd93f48b7ebfe3da208a99ce78da43d2640326b4..815ae1d94a90bcda1f7b2fea2b528e83d85ef630 100644 --- a/python/paddle/fluid/tests/unittests/test_pad3d_op.py +++ b/python/paddle/fluid/tests/unittests/test_pad3d_op.py @@ -199,7 +199,7 @@ class TestPadAPI(unittest.TestCase): mode = "constant" value = 100 input_data = np.random.rand(*input_shape).astype(np.float32) - x = paddle.fluid.data(name="x", shape=input_shape) + x = paddle.static.data(name="x", shape=input_shape) result = F.pad( x=x, pad=pad, value=value, mode=mode, data_format="NCDHW" ) @@ -220,7 +220,7 @@ class TestPadAPI(unittest.TestCase): pad = [1, 2, 1, 1, 1, 2] mode = "reflect" input_data = np.random.rand(*input_shape).astype(np.float32) - x = paddle.fluid.data(name="x", shape=input_shape) + x = paddle.static.data(name="x", shape=input_shape) result1 = F.pad(x=x, pad=pad, mode=mode, data_format="NCDHW") result2 = F.pad(x=x, pad=pad, mode=mode, data_format="NDHWC") exe = Executor(place) @@ -246,7 +246,7 @@ class TestPadAPI(unittest.TestCase): pad = [1, 2, 1, 1, 3, 4] mode = "replicate" input_data = np.random.rand(*input_shape).astype(np.float32) - x = paddle.fluid.data(name="x", shape=input_shape) + x = paddle.static.data(name="x", shape=input_shape) result1 = F.pad(x=x, pad=pad, mode=mode, data_format="NCDHW") result2 = F.pad(x=x, pad=pad, mode=mode, data_format="NDHWC") exe = Executor(place) @@ -272,7 +272,7 @@ class TestPadAPI(unittest.TestCase): pad = [1, 2, 1, 1, 3, 4] mode = "circular" input_data = np.random.rand(*input_shape).astype(np.float32) - x = paddle.fluid.data(name="x", shape=input_shape) + x = paddle.static.data(name="x", shape=input_shape) result1 = F.pad(x=x, pad=pad, mode=mode, data_format="NCDHW") result2 = F.pad(x=x, pad=pad, mode=mode, data_format="NDHWC") exe = Executor(place) diff --git a/python/paddle/fluid/tests/unittests/test_pad_op.py b/python/paddle/fluid/tests/unittests/test_pad_op.py index 93fb376ee702f0a08a732c2bdc01c46b783f2a2e..effc6cc70a6c59c688ab1e95e49fd43cf8b370a7 100644 --- a/python/paddle/fluid/tests/unittests/test_pad_op.py +++ b/python/paddle/fluid/tests/unittests/test_pad_op.py @@ -20,7 +20,6 @@ from eager_op_test import OpTest from test_attribute_var import UnittestBase import paddle -import paddle.fluid as fluid import paddle.fluid.core as core from paddle.fluid import Program, program_guard @@ -121,7 +120,7 @@ class TestPadOpError(unittest.TestCase): self.assertRaises(TypeError, test_Variable) - data = fluid.data(name='data', shape=[4], dtype='float16') + data = paddle.static.data(name='data', shape=[4], dtype='float16') paddle.nn.functional.pad(x=data, pad=[0, 1]) diff --git a/python/paddle/fluid/tests/unittests/test_paddle_fluid_modelaverage.py b/python/paddle/fluid/tests/unittests/test_paddle_fluid_modelaverage.py index 1dfd7f2f858287ab9e430c781e54142deb36d9cc..d40f91dea2bf9081e17acf3d74fe9f0888680638 100644 --- a/python/paddle/fluid/tests/unittests/test_paddle_fluid_modelaverage.py +++ b/python/paddle/fluid/tests/unittests/test_paddle_fluid_modelaverage.py @@ -31,7 +31,9 @@ class TestModelAverage(unittest.TestCase): test_program = fluid.Program() with fluid.program_guard(train_program, startup): with fluid.unique_name.guard(): - data = fluid.data(name='X', shape=[None, 1], dtype='float32') + data = paddle.static.data( + name='X', shape=[None, 1], dtype='float32' + ) hidden = paddle.static.nn.fc(x=data, size=10) loss = paddle.mean(hidden) test_program = train_program.clone() diff --git a/python/paddle/fluid/tests/unittests/test_paddle_save_load_binary.py b/python/paddle/fluid/tests/unittests/test_paddle_save_load_binary.py index 70fe5cc8d8a1af12271952eeb2da04ce72e3679b..099c191004c028ea89929ab0ae536f12888a2f8e 100644 --- a/python/paddle/fluid/tests/unittests/test_paddle_save_load_binary.py +++ b/python/paddle/fluid/tests/unittests/test_paddle_save_load_binary.py @@ -141,7 +141,9 @@ class TestSaveLoadBinaryFormat(unittest.TestCase): paddle.enable_static() OUTPUT_NUM = 32 with new_program_scope(): - x = fluid.data(name="x", shape=[None, IMAGE_SIZE], dtype='float32') + x = paddle.static.data( + name="x", shape=[None, IMAGE_SIZE], dtype='float32' + ) y = paddle.static.nn.fc( x, OUTPUT_NUM, diff --git a/python/paddle/fluid/tests/unittests/test_pairwise_distance.py b/python/paddle/fluid/tests/unittests/test_pairwise_distance.py index 1c3b7d261a5731888e40a74cdd01dffe544f867d..56ac004cf1c6850a64d541e90db921403cfb3930 100644 --- a/python/paddle/fluid/tests/unittests/test_pairwise_distance.py +++ b/python/paddle/fluid/tests/unittests/test_pairwise_distance.py @@ -58,8 +58,8 @@ def test_static( ) paddle.enable_static() with paddle.static.program_guard(prog, startup_prog): - x = paddle.fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype) - y = paddle.fluid.data(name='y', shape=y_np.shape, dtype=x_np.dtype) + x = paddle.static.data(name='x', shape=x_np.shape, dtype=x_np.dtype) + y = paddle.static.data(name='y', shape=y_np.shape, dtype=x_np.dtype) if functional: distance = call_pairwise_distance_functional( diff --git a/python/paddle/fluid/tests/unittests/test_pixel_shuffle_op.py b/python/paddle/fluid/tests/unittests/test_pixel_shuffle_op.py index 3543cce6ad04c3407d5c531625b5eae6dc4c58ea..570ed7aee3f0642f4487a911350e0a1083c89809 100644 --- a/python/paddle/fluid/tests/unittests/test_pixel_shuffle_op.py +++ b/python/paddle/fluid/tests/unittests/test_pixel_shuffle_op.py @@ -113,10 +113,10 @@ class TestPixelShuffleAPI(unittest.TestCase): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.enable_static() - x_1 = paddle.fluid.data( + x_1 = paddle.static.data( name="x", shape=[2, 9, 4, 4], dtype="float64" ) - x_2 = paddle.fluid.data( + x_2 = paddle.static.data( name="x2", shape=[2, 4, 4, 9], dtype="float64" ) out_1 = F.pixel_shuffle(x_1, 3) @@ -149,10 +149,10 @@ class TestPixelShuffleAPI(unittest.TestCase): place = paddle.CUDAPlace(0) self.x_1_np = np.random.random([2, 9, 4, 4]).astype("float16") self.x_2_np = np.random.random([2, 4, 4, 9]).astype("float16") - x_1 = paddle.fluid.data( + x_1 = paddle.static.data( name="x", shape=[2, 9, 4, 4], dtype="float16" ) - x_2 = paddle.fluid.data( + x_2 = paddle.static.data( name="x2", shape=[2, 4, 4, 9], dtype="float16" ) # init instance @@ -186,10 +186,10 @@ class TestPixelShuffleAPI(unittest.TestCase): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.enable_static() - x_1 = paddle.fluid.data( + x_1 = paddle.static.data( name="x", shape=[2, 9, 4, 4], dtype="float64" ) - x_2 = paddle.fluid.data( + x_2 = paddle.static.data( name="x2", shape=[2, 4, 4, 9], dtype="float64" ) # init instance diff --git a/python/paddle/fluid/tests/unittests/test_pixel_unshuffle.py b/python/paddle/fluid/tests/unittests/test_pixel_unshuffle.py index 2aa064c2dc8699f911ef67ac1dd1d943bd761688..e9bb76ea2b9d14aca7d0785a26b536a5adc61283 100644 --- a/python/paddle/fluid/tests/unittests/test_pixel_unshuffle.py +++ b/python/paddle/fluid/tests/unittests/test_pixel_unshuffle.py @@ -140,10 +140,10 @@ class TestPixelUnshuffleAPI(unittest.TestCase): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.enable_static() - x_1 = paddle.fluid.data( + x_1 = paddle.static.data( name="x", shape=[2, 1, 12, 12], dtype="float64" ) - x_2 = paddle.fluid.data( + x_2 = paddle.static.data( name="x2", shape=[2, 12, 12, 1], dtype="float64" ) out_1 = F.pixel_unshuffle(x_1, 3) @@ -177,10 +177,10 @@ class TestPixelUnshuffleAPI(unittest.TestCase): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.enable_static() - x_1 = paddle.fluid.data( + x_1 = paddle.static.data( name="x", shape=[2, 1, 12, 12], dtype="float64" ) - x_2 = paddle.fluid.data( + x_2 = paddle.static.data( name="x2", shape=[2, 12, 12, 1], dtype="float64" ) # init instance diff --git a/python/paddle/fluid/tests/unittests/test_pool1d_api.py b/python/paddle/fluid/tests/unittests/test_pool1d_api.py index 212a896d40e625996ef020f0d17df7cbdf37a95d..b654f70c38dc4a701af9e8b152f53c595c9c119d 100644 --- a/python/paddle/fluid/tests/unittests/test_pool1d_api.py +++ b/python/paddle/fluid/tests/unittests/test_pool1d_api.py @@ -123,7 +123,9 @@ class TestPool1D_API(unittest.TestCase): def check_avg_static_results(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input = fluid.data(name="input", shape=[2, 3, 32], dtype="float32") + input = paddle.static.data( + name="input", shape=[2, 3, 32], dtype="float32" + ) result = F.avg_pool1d(input, kernel_size=2, stride=2, padding=0) input_np = np.random.random([2, 3, 32]).astype("float32") @@ -206,7 +208,9 @@ class TestPool1D_API(unittest.TestCase): def check_max_static_results(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input = fluid.data(name="input", shape=[2, 3, 32], dtype="float32") + input = paddle.static.data( + name="input", shape=[2, 3, 32], dtype="float32" + ) result = F.max_pool1d(input, kernel_size=2, stride=2, padding=[0]) input_np = np.random.random([2, 3, 32]).astype("float32") diff --git a/python/paddle/fluid/tests/unittests/test_pool2d_api.py b/python/paddle/fluid/tests/unittests/test_pool2d_api.py index fcdec610a480ebff0dfc3cd9372fe1a34ee4288c..dd6ae87f64d712aaaad83087475b5d2358531a82 100644 --- a/python/paddle/fluid/tests/unittests/test_pool2d_api.py +++ b/python/paddle/fluid/tests/unittests/test_pool2d_api.py @@ -36,7 +36,7 @@ class TestPool2D_API(unittest.TestCase): def check_avg_static_results(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input = fluid.data( + input = paddle.static.data( name="input", shape=[2, 3, 32, 32], dtype="float32" ) result = avg_pool2d(input, kernel_size=2, stride=2, padding=0) @@ -128,7 +128,7 @@ class TestPool2D_API(unittest.TestCase): def check_max_static_results(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input = fluid.data( + input = paddle.static.data( name="input", shape=[2, 3, 32, 32], dtype="float32" ) result = max_pool2d(input, kernel_size=2, stride=2, padding=0) diff --git a/python/paddle/fluid/tests/unittests/test_pool3d_api.py b/python/paddle/fluid/tests/unittests/test_pool3d_api.py index 2c069b9e844913718a92dee457eda8081d4a5ce4..d46e9f76c8fc675d49d3fec3de968f6a99771d11 100644 --- a/python/paddle/fluid/tests/unittests/test_pool3d_api.py +++ b/python/paddle/fluid/tests/unittests/test_pool3d_api.py @@ -36,7 +36,7 @@ class TestPool3D_API(unittest.TestCase): def check_avg_static_results(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input = fluid.data( + input = paddle.static.data( name="input", shape=[2, 3, 32, 32, 32], dtype="float32" ) result = avg_pool3d(input, kernel_size=2, stride=2, padding=0) @@ -141,7 +141,7 @@ class TestPool3D_API(unittest.TestCase): def check_max_static_results(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input = fluid.data( + input = paddle.static.data( name="input", shape=[2, 3, 32, 32, 32], dtype="float32" ) result = max_pool3d(input, kernel_size=2, stride=2, padding=0) diff --git a/python/paddle/fluid/tests/unittests/test_prelu_op.py b/python/paddle/fluid/tests/unittests/test_prelu_op.py index 4a4d5921bbb941f65d3e3fadbf7971a0154a40fb..c9d1a21fdad3b096f5fd836909656852c1e4d7ae 100644 --- a/python/paddle/fluid/tests/unittests/test_prelu_op.py +++ b/python/paddle/fluid/tests/unittests/test_prelu_op.py @@ -51,8 +51,8 @@ class TestFunctionalPReluAPI(unittest.TestCase): def static_check(self, weight_np): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.x_np.shape, 'float32') - weight = paddle.fluid.data('Alpha', weight_np.shape, 'float32') + x = paddle.static.data('X', self.x_np.shape, 'float32') + weight = paddle.static.data('Alpha', weight_np.shape, 'float32') out = F.prelu(x, weight) exe = paddle.static.Executor(self.place) res = exe.run( @@ -80,18 +80,18 @@ class TestFunctionalPReluAPI(unittest.TestCase): def test_error(self): with paddle.static.program_guard(paddle.static.Program()): - weight_fp32 = paddle.fluid.data( + weight_fp32 = paddle.static.data( name='weight_fp32', shape=[1], dtype='float32' ) # The input type must be Variable. self.assertRaises(TypeError, F.prelu, x=1, weight=weight_fp32) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[2, 3], dtype='int32' ) self.assertRaises(TypeError, F.prelu, x=x_int32, weight=weight_fp32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[2, 3], dtype='float16' ) F.prelu(x=x_fp16, weight=weight_fp32) @@ -110,7 +110,7 @@ class TestNNPReluAPI(unittest.TestCase): startup_program = paddle.static.Program() train_program = paddle.static.Program() with paddle.static.program_guard(train_program, startup_program): - x = paddle.fluid.data( + x = paddle.static.data( name='X', shape=self.x_np.shape, dtype='float32' ) m = paddle.nn.PReLU() @@ -463,7 +463,7 @@ class TestModeError(unittest.TestCase): def test_mode_error(self): main_program = Program() with fluid.program_guard(main_program, Program()): - x = fluid.data(name='x', shape=[2, 3, 4, 5]) + x = paddle.static.data(name='x', shape=[2, 3, 4, 5]) try: y = prelu_t(x, 'any') except Exception as e: @@ -472,7 +472,7 @@ class TestModeError(unittest.TestCase): def test_data_format_error1(self): main_program = Program() with fluid.program_guard(main_program, Program()): - x = fluid.data(name='x', shape=[2, 3, 4, 5]) + x = paddle.static.data(name='x', shape=[2, 3, 4, 5]) try: y = prelu_t(x, 'channel', data_format='N') except Exception as e: @@ -481,7 +481,7 @@ class TestModeError(unittest.TestCase): def test_data_format_error2(self): main_program = Program() with fluid.program_guard(main_program, Program()): - x = fluid.data(name='x', shape=[2, 3, 4, 5]) + x = paddle.static.data(name='x', shape=[2, 3, 4, 5]) try: y = paddle.static.nn.prelu(x, 'channel', data_format='N') except ValueError as e: diff --git a/python/paddle/fluid/tests/unittests/test_prod_op.py b/python/paddle/fluid/tests/unittests/test_prod_op.py index 47b41aafc32731d432e1c0ec03d5a76e7fa2c319..2146655baf5e0e6344bafadfc2172bfdbf0f9ea9 100644 --- a/python/paddle/fluid/tests/unittests/test_prod_op.py +++ b/python/paddle/fluid/tests/unittests/test_prod_op.py @@ -71,7 +71,7 @@ class TestProdOp(unittest.TestCase): ) def run_static(self, use_gpu=False): - input = paddle.fluid.data( + input = paddle.static.data( name='input', shape=[10, 10, 5], dtype='float32' ) result0 = paddle.prod(input) @@ -154,8 +154,8 @@ class TestProdOpError(unittest.TestCase): with paddle.static.program_guard( paddle.static.Program(), paddle.static.Program() ): - x = paddle.fluid.data(name='x', shape=[2, 2, 4], dtype='float32') - bool_x = paddle.fluid.data( + x = paddle.static.data(name='x', shape=[2, 2, 4], dtype='float32') + bool_x = paddle.static.data( name='bool_x', shape=[2, 2, 4], dtype='bool' ) # The argument x shoule be a Tensor diff --git a/python/paddle/fluid/tests/unittests/test_program.py b/python/paddle/fluid/tests/unittests/test_program.py index dfc9b7572da4bf5266403143932db23d950cf25c..c9de1f95e654e0f9cec43d12050d04d9dcc3d878 100644 --- a/python/paddle/fluid/tests/unittests/test_program.py +++ b/python/paddle/fluid/tests/unittests/test_program.py @@ -106,7 +106,7 @@ class TestProgram(unittest.TestCase): def test_program_all_parameters(self): program = fluid.default_main_program() - data = fluid.data(name='x', shape=[None, 13], dtype='float32') + data = paddle.static.data(name='x', shape=[None, 13], dtype='float32') hidden = paddle.static.nn.fc(x=data, size=10) loss = paddle.mean(hidden) fluid.optimizer.SGD(learning_rate=0.01).minimize(loss) diff --git a/python/paddle/fluid/tests/unittests/test_put_along_axis_op.py b/python/paddle/fluid/tests/unittests/test_put_along_axis_op.py index 7470dae1846ab353e31b6a113e93addc4481e0c3..75be4531820fbd4fafe241a30ec8a65f26e74026 100644 --- a/python/paddle/fluid/tests/unittests/test_put_along_axis_op.py +++ b/python/paddle/fluid/tests/unittests/test_put_along_axis_op.py @@ -86,9 +86,9 @@ class TestPutAlongAxisAPI(unittest.TestCase): def run(place): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.shape) - index = paddle.fluid.data('Index', self.index_shape, "int64") - value = paddle.fluid.data('Value', self.value_shape) + x = paddle.static.data('X', self.shape) + index = paddle.static.data('Index', self.index_shape, "int64") + value = paddle.static.data('Value', self.value_shape) out = paddle.put_along_axis(x, index, value, self.axis) exe = paddle.static.Executor(self.place[0]) res = exe.run( diff --git a/python/paddle/fluid/tests/unittests/test_pyramid_hash_op.py b/python/paddle/fluid/tests/unittests/test_pyramid_hash_op.py index 6af32d5870546634138845eb3affa44457c90d27..3ddf5cdde2304b63eececb9d27165ac58311617b 100644 --- a/python/paddle/fluid/tests/unittests/test_pyramid_hash_op.py +++ b/python/paddle/fluid/tests/unittests/test_pyramid_hash_op.py @@ -16,6 +16,7 @@ import unittest import numpy as np +import paddle import paddle.fluid as fluid @@ -24,7 +25,9 @@ class TestPyramidHashOpApi(unittest.TestCase): num_voc = 128 embed_dim = 64 x_shape, x_lod = [16, 10], [[3, 5, 2, 6]] - x = fluid.data(name='x', shape=x_shape, dtype='int32', lod_level=1) + x = paddle.static.data( + name='x', shape=x_shape, dtype='int32', lod_level=1 + ) hash_embd = fluid.contrib.search_pyramid_hash( input=x, num_emb=embed_dim, diff --git a/python/paddle/fluid/tests/unittests/test_qr_op.py b/python/paddle/fluid/tests/unittests/test_qr_op.py index 44dbeb902b9f62a9560eeb31cf6362b80df92ef6..c459a727bcffcbb823926e33977ff4d587352814 100644 --- a/python/paddle/fluid/tests/unittests/test_qr_op.py +++ b/python/paddle/fluid/tests/unittests/test_qr_op.py @@ -225,7 +225,7 @@ class TestQrAPI(unittest.TestCase): tmp_q, tmp_r = np.linalg.qr(a[coord], mode=mode) np_q[coord] = tmp_q np_r[coord] = tmp_r - x = paddle.fluid.data( + x = paddle.static.data( name="input", shape=shape, dtype=dtype ) if mode == "r": diff --git a/python/paddle/fluid/tests/unittests/test_rad2deg.py b/python/paddle/fluid/tests/unittests/test_rad2deg.py index 7332a113b87ea29830bf997816171c6515e7725d..d37e8987161b1d97cd5ea35ff3de8ed0e1bd675f 100644 --- a/python/paddle/fluid/tests/unittests/test_rad2deg.py +++ b/python/paddle/fluid/tests/unittests/test_rad2deg.py @@ -36,7 +36,9 @@ class TestRad2degAPI(unittest.TestCase): startup_program = fluid.Program() train_program = fluid.Program() with fluid.program_guard(startup_program, train_program): - x = fluid.data(name='input', dtype=self.x_dtype, shape=self.x_shape) + x = paddle.static.data( + name='input', dtype=self.x_dtype, shape=self.x_shape + ) out = paddle.rad2deg(x) place = ( diff --git a/python/paddle/fluid/tests/unittests/test_rand_op.py b/python/paddle/fluid/tests/unittests/test_rand_op.py index d259f2c91630b8baf4eaec7d21f3e1c72a38c405..94dc929f1517e2fab467fe49f8755a494587433a 100644 --- a/python/paddle/fluid/tests/unittests/test_rand_op.py +++ b/python/paddle/fluid/tests/unittests/test_rand_op.py @@ -68,10 +68,12 @@ class TestRandOp(unittest.TestCase): dim_2 = paddle.tensor.fill_constant([1], "int32", 5) result_2 = rand(shape=[dim_1, dim_2]) - var_shape = fluid.data(name='var_shape', shape=[2], dtype="int64") + var_shape = paddle.static.data( + name='var_shape', shape=[2], dtype="int64" + ) result_3 = rand(var_shape) - var_shape_int32 = fluid.data( + var_shape_int32 = paddle.static.data( name='var_shape_int32', shape=[2], dtype="int32" ) result_4 = rand(var_shape_int32) diff --git a/python/paddle/fluid/tests/unittests/test_randint_like.py b/python/paddle/fluid/tests/unittests/test_randint_like.py index 76e7b204be475e4a206e1cce19373b30a883631e..fdfac01b8bd0a75d7074d5ea1ee09ed2089997a0 100644 --- a/python/paddle/fluid/tests/unittests/test_randint_like.py +++ b/python/paddle/fluid/tests/unittests/test_randint_like.py @@ -41,7 +41,7 @@ class TestRandintLikeAPI(unittest.TestCase): paddle.enable_static() with program_guard(Program(), Program()): # results are from [-100, 100). - x_bool = paddle.fluid.data( + x_bool = paddle.static.data( name="x_bool", shape=[10, 12], dtype="bool" ) exe = paddle.static.Executor(self.place) @@ -55,7 +55,7 @@ class TestRandintLikeAPI(unittest.TestCase): self.assertTrue(out.dtype, np.dtype(dtype)) self.assertTrue(((out >= -10) & (out <= 10)).all(), True) with program_guard(Program(), Program()): - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name="x_int32", shape=[10, 12], dtype="int32" ) exe = paddle.static.Executor(self.place) @@ -70,7 +70,7 @@ class TestRandintLikeAPI(unittest.TestCase): self.assertTrue(((out >= -5) & (out <= 10)).all(), True) with program_guard(Program(), Program()): - x_int64 = paddle.fluid.data( + x_int64 = paddle.static.data( name="x_int64", shape=[10, 12], dtype="int64" ) exe = paddle.static.Executor(self.place) @@ -85,7 +85,7 @@ class TestRandintLikeAPI(unittest.TestCase): self.assertTrue(((out >= -100) & (out <= 100)).all(), True) if paddle.is_compiled_with_cuda(): with program_guard(Program(), Program()): - x_float16 = paddle.fluid.data( + x_float16 = paddle.static.data( name="x_float16", shape=[10, 12], dtype="float16" ) exe = paddle.static.Executor(self.place) @@ -102,7 +102,7 @@ class TestRandintLikeAPI(unittest.TestCase): self.assertTrue(((out >= -3) & (out <= 25)).all(), True) with program_guard(Program(), Program()): - x_float32 = paddle.fluid.data( + x_float32 = paddle.static.data( name="x_float32", shape=[10, 12], dtype="float32" ) exe = paddle.static.Executor(self.place) @@ -119,7 +119,7 @@ class TestRandintLikeAPI(unittest.TestCase): self.assertTrue(((out >= -25) & (out <= 25)).all(), True) with program_guard(Program(), Program()): - x_float64 = paddle.fluid.data( + x_float64 = paddle.static.data( name="x_float64", shape=[10, 12], dtype="float64" ) exe = paddle.static.Executor(self.place) @@ -172,22 +172,22 @@ class TestRandintLikeAPI(unittest.TestCase): def test_errors(self): paddle.enable_static() with program_guard(Program(), Program()): - x_bool = paddle.fluid.data( + x_bool = paddle.static.data( name="x_bool", shape=[10, 12], dtype="bool" ) - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name="x_int32", shape=[10, 12], dtype="int32" ) - x_int64 = paddle.fluid.data( + x_int64 = paddle.static.data( name="x_int64", shape=[10, 12], dtype="int64" ) - x_float16 = paddle.fluid.data( + x_float16 = paddle.static.data( name="x_float16", shape=[10, 12], dtype="float16" ) - x_float32 = paddle.fluid.data( + x_float32 = paddle.static.data( name="x_float32", shape=[10, 12], dtype="float32" ) - x_float64 = paddle.fluid.data( + x_float64 = paddle.static.data( name="x_float64", shape=[10, 12], dtype="float64" ) diff --git a/python/paddle/fluid/tests/unittests/test_reduce_op.py b/python/paddle/fluid/tests/unittests/test_reduce_op.py index f17f6112e8d63f3646c983de0e6f8b527d4ff9b8..3798d8c818da8c14766a2284f902d6996577e33c 100644 --- a/python/paddle/fluid/tests/unittests/test_reduce_op.py +++ b/python/paddle/fluid/tests/unittests/test_reduce_op.py @@ -1081,7 +1081,7 @@ class API_TestSumOp(unittest.TestCase): places.append(fluid.CUDAPlace(0)) for place in places: with fluid.program_guard(fluid.Program(), fluid.Program()): - data = fluid.data("data", shape=shape, dtype=x_dtype) + data = paddle.static.data("data", shape=shape, dtype=x_dtype) result_sum = paddle.sum( x=data, axis=attr_axis, dtype=attr_dtype ) @@ -1156,7 +1156,7 @@ class TestAllAPI(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input = fluid.data(name="input", shape=[4, 4], dtype="bool") + input = paddle.static.data(name="input", shape=[4, 4], dtype="bool") result = paddle.all(x=input) input_np = np.random.randint(0, 2, [4, 4]).astype("bool") @@ -1213,7 +1213,7 @@ class TestAnyAPI(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input = fluid.data(name="input", shape=[4, 4], dtype="bool") + input = paddle.static.data(name="input", shape=[4, 4], dtype="bool") result = paddle.any(x=input) input_np = np.random.randint(0, 2, [4, 4]).astype("bool") diff --git a/python/paddle/fluid/tests/unittests/test_rnn_cell_api.py b/python/paddle/fluid/tests/unittests/test_rnn_cell_api.py index 4534eac042a54efd3eba4606ed9588a6e2c68278..9bd6752b7a197fc801b44ffb80523e2d21ed7e89 100644 --- a/python/paddle/fluid/tests/unittests/test_rnn_cell_api.py +++ b/python/paddle/fluid/tests/unittests/test_rnn_cell_api.py @@ -37,7 +37,7 @@ class TestRnnError(unittest.TestCase): input_size = 16 hidden_size = 16 seq_len = 4 - inputs = fluid.data( + inputs = paddle.static.data( name='inputs', shape=[None, input_size], dtype='float32' ) pre_hidden = paddle.static.data( @@ -45,12 +45,12 @@ class TestRnnError(unittest.TestCase): shape=[None, hidden_size], dtype='float32', ) - inputs_basic_lstm = fluid.data( + inputs_basic_lstm = paddle.static.data( name='inputs_basic_lstm', shape=[None, None, input_size], dtype='float32', ) - sequence_length = fluid.data( + sequence_length = paddle.static.data( name="sequence_length", shape=[None], dtype='int64' ) @@ -161,18 +161,18 @@ class TestRnn(unittest.TestCase): setattr(numpy_cell, k, param) fluid.global_scope().find_var(v.name).get_tensor().set(param, place) - sequence_length = fluid.data( + sequence_length = paddle.static.data( name="sequence_length", shape=[None], dtype='int64' ) - inputs_rnn = fluid.data( + inputs_rnn = paddle.static.data( name='inputs_rnn', shape=[None, None, self.input_size], dtype='float64', ) - pre_hidden = fluid.data( + pre_hidden = paddle.static.data( name='pre_hidden', shape=[None, self.hidden_size], dtype='float64' ) - pre_cell = fluid.data( + pre_cell = paddle.static.data( name='pre_cell', shape=[None, self.hidden_size], dtype='float64' ) diff --git a/python/paddle/fluid/tests/unittests/test_rnn_decode_api.py b/python/paddle/fluid/tests/unittests/test_rnn_decode_api.py index 2337364efa2a4690572d09b125c71f4117c8a571..d925e83de25d8e4359e556b3b1b6bbfc9be1ec77 100644 --- a/python/paddle/fluid/tests/unittests/test_rnn_decode_api.py +++ b/python/paddle/fluid/tests/unittests/test_rnn_decode_api.py @@ -186,16 +186,20 @@ class SeqPGAgent: def build_program(self, model_cls, alg_cls, model_hparams, alg_hparams): with fluid.program_guard(self.main_program, self.startup_program): - source = fluid.data(name="src", shape=[None, None], dtype="int64") - source_length = fluid.data( + source = paddle.static.data( + name="src", shape=[None, None], dtype="int64" + ) + source_length = paddle.static.data( name="src_sequence_length", shape=[None], dtype="int64" ) # only for teacher-forcing MLE training - target = fluid.data(name="trg", shape=[None, None], dtype="int64") - target_length = fluid.data( + target = paddle.static.data( + name="trg", shape=[None, None], dtype="int64" + ) + target_length = paddle.static.data( name="trg_sequence_length", shape=[None], dtype="int64" ) - label = fluid.data( + label = paddle.static.data( name="label", shape=[None, None, 1], dtype="int64" ) self.model = model_cls(**model_hparams) @@ -204,7 +208,7 @@ class SeqPGAgent: source, source_length, target, target_length ) self.samples.stop_gradient = True - self.reward = fluid.data( + self.reward = paddle.static.data( name="reward", shape=[None, None], # batch_size, seq_len dtype=self.probs.dtype, diff --git a/python/paddle/fluid/tests/unittests/test_rot90_op.py b/python/paddle/fluid/tests/unittests/test_rot90_op.py index a6b249ab190201dfe2819b1d76ce3cd9b36eaf3c..73e59e1118d2f4d8039387adad0c0ee089708e91 100644 --- a/python/paddle/fluid/tests/unittests/test_rot90_op.py +++ b/python/paddle/fluid/tests/unittests/test_rot90_op.py @@ -28,7 +28,9 @@ class TestRot90_API(unittest.TestCase): startup_program = fluid.Program() train_program = fluid.Program() with fluid.program_guard(train_program, startup_program): - input = fluid.data(name='input', dtype='float32', shape=[2, 3]) + input = paddle.static.data( + name='input', dtype='float32', shape=[2, 3] + ) output = paddle.rot90(input, k=1, axes=[0, 1]) output = paddle.rot90(output, k=1, axes=[0, 1]) output = output.rot90(k=1, axes=[0, 1]) @@ -53,11 +55,13 @@ class TestRot90_API(unittest.TestCase): def test_static_k_0(self): paddle.enable_static() - input = fluid.data(name='input', dtype='float32', shape=[2, 3]) + input = paddle.static.data(name='input', dtype='float32', shape=[2, 3]) startup_program = fluid.Program() train_program = fluid.Program() with fluid.program_guard(train_program, startup_program): - input = fluid.data(name='input', dtype='float32', shape=[2, 3]) + input = paddle.static.data( + name='input', dtype='float32', shape=[2, 3] + ) output = paddle.rot90(input, k=0, axes=[0, 1]) place = fluid.CPUPlace() if fluid.core.is_compiled_with_cuda(): @@ -80,11 +84,13 @@ class TestRot90_API(unittest.TestCase): def test_static_k_2(self): paddle.enable_static() - input = fluid.data(name='input', dtype='float32', shape=[2, 3]) + input = paddle.static.data(name='input', dtype='float32', shape=[2, 3]) startup_program = fluid.Program() train_program = fluid.Program() with fluid.program_guard(train_program, startup_program): - input = fluid.data(name='input', dtype='float32', shape=[2, 3]) + input = paddle.static.data( + name='input', dtype='float32', shape=[2, 3] + ) output = paddle.rot90(input, k=2, axes=[0, 1]) place = fluid.CPUPlace() if fluid.core.is_compiled_with_cuda(): @@ -107,11 +113,13 @@ class TestRot90_API(unittest.TestCase): def test_static_k_3(self): paddle.enable_static() - input = fluid.data(name='input', dtype='float32', shape=[2, 3]) + input = paddle.static.data(name='input', dtype='float32', shape=[2, 3]) startup_program = fluid.Program() train_program = fluid.Program() with fluid.program_guard(train_program, startup_program): - input = fluid.data(name='input', dtype='float32', shape=[2, 3]) + input = paddle.static.data( + name='input', dtype='float32', shape=[2, 3] + ) output = paddle.rot90(input, k=3, axes=[0, 1]) place = fluid.CPUPlace() if fluid.core.is_compiled_with_cuda(): @@ -134,11 +142,13 @@ class TestRot90_API(unittest.TestCase): def test_static_neg_k_1(self): paddle.enable_static() - input = fluid.data(name='input', dtype='float32', shape=[2, 3]) + input = paddle.static.data(name='input', dtype='float32', shape=[2, 3]) startup_program = fluid.Program() train_program = fluid.Program() with fluid.program_guard(train_program, startup_program): - input = fluid.data(name='input', dtype='float32', shape=[2, 3]) + input = paddle.static.data( + name='input', dtype='float32', shape=[2, 3] + ) output = paddle.rot90(input, k=-1, axes=[0, 1]) place = fluid.CPUPlace() if fluid.core.is_compiled_with_cuda(): @@ -161,11 +171,13 @@ class TestRot90_API(unittest.TestCase): def test_static_neg_k_2(self): paddle.enable_static() - input = fluid.data(name='input', dtype='float32', shape=[2, 3]) + input = paddle.static.data(name='input', dtype='float32', shape=[2, 3]) startup_program = fluid.Program() train_program = fluid.Program() with fluid.program_guard(train_program, startup_program): - input = fluid.data(name='input', dtype='float32', shape=[2, 3]) + input = paddle.static.data( + name='input', dtype='float32', shape=[2, 3] + ) output = paddle.rot90(input, k=-2, axes=[0, 1]) place = fluid.CPUPlace() if fluid.core.is_compiled_with_cuda(): @@ -188,11 +200,13 @@ class TestRot90_API(unittest.TestCase): def test_static_neg_k_3(self): paddle.enable_static() - input = fluid.data(name='input', dtype='float32', shape=[2, 3]) + input = paddle.static.data(name='input', dtype='float32', shape=[2, 3]) startup_program = fluid.Program() train_program = fluid.Program() with fluid.program_guard(train_program, startup_program): - input = fluid.data(name='input', dtype='float32', shape=[2, 3]) + input = paddle.static.data( + name='input', dtype='float32', shape=[2, 3] + ) output = paddle.rot90(input, k=-3, axes=[0, 1]) place = fluid.CPUPlace() if fluid.core.is_compiled_with_cuda(): @@ -215,11 +229,13 @@ class TestRot90_API(unittest.TestCase): def test_static_neg_k_4(self): paddle.enable_static() - input = fluid.data(name='input', dtype='float32', shape=[2, 3]) + input = paddle.static.data(name='input', dtype='float32', shape=[2, 3]) startup_program = fluid.Program() train_program = fluid.Program() with fluid.program_guard(train_program, startup_program): - input = fluid.data(name='input', dtype='float32', shape=[2, 3]) + input = paddle.static.data( + name='input', dtype='float32', shape=[2, 3] + ) output = paddle.rot90(input, k=-4, axes=[0, 1]) place = fluid.CPUPlace() if fluid.core.is_compiled_with_cuda(): @@ -245,32 +261,40 @@ class TestRot90_API(unittest.TestCase): # dims error def run1(): - input = fluid.data(name='input', dtype='float32', shape=[2, 3]) + input = paddle.static.data( + name='input', dtype='float32', shape=[2, 3] + ) output = paddle.rot90(input, k=1, axes=[0]) self.assertRaises(ValueError, run1) # input dims error def run2(): - input = fluid.data(name='input', dtype='float32', shape=[2]) + input = paddle.static.data(name='input', dtype='float32', shape=[2]) output = paddle.rot90(input, k=1, axes=[0, 1]) self.assertRaises(ValueError, run2) def run3(): - input = fluid.data(name='input', dtype='float32', shape=[2, 3]) + input = paddle.static.data( + name='input', dtype='float32', shape=[2, 3] + ) output = paddle.rot90(input, k=1, axes=[0, 0]) self.assertRaises(ValueError, run3) def run4(): - input = fluid.data(name='input', dtype='float32', shape=[2, 3]) + input = paddle.static.data( + name='input', dtype='float32', shape=[2, 3] + ) output = paddle.rot90(input, k=1, axes=[3, 1]) self.assertRaises(ValueError, run4) def run5(): - input = fluid.data(name='input', dtype='float32', shape=[2, 3]) + input = paddle.static.data( + name='input', dtype='float32', shape=[2, 3] + ) output = paddle.rot90(input, k=1, axes=[0, 3]) self.assertRaises(ValueError, run5) diff --git a/python/paddle/fluid/tests/unittests/test_row_conv_op.py b/python/paddle/fluid/tests/unittests/test_row_conv_op.py index 408a5f8a7405e0c6b2b6af5e0a686db3f1ae99fd..0d27282aa3bf32bc3891af054a86847bc0a31cb1 100644 --- a/python/paddle/fluid/tests/unittests/test_row_conv_op.py +++ b/python/paddle/fluid/tests/unittests/test_row_conv_op.py @@ -193,7 +193,7 @@ class TestRowConvLayer(unittest.TestCase): main = fluid.Program() with fluid.unique_name.guard(): with fluid.program_guard(main, start): - x = fluid.data("x", (-1, -1, self.C), "float32") + x = paddle.static.data("x", (-1, -1, self.C), "float32") out = paddle.static.nn.row_conv( x, self.context_length, diff --git a/python/paddle/fluid/tests/unittests/test_rrelu_op.py b/python/paddle/fluid/tests/unittests/test_rrelu_op.py index c7523a5f9b3ec96d897b4e63ea6736e3d9bf742b..7fa2c62f602d9cd6c920a72e6b928763065dc8bf 100644 --- a/python/paddle/fluid/tests/unittests/test_rrelu_op.py +++ b/python/paddle/fluid/tests/unittests/test_rrelu_op.py @@ -59,7 +59,7 @@ class TestFunctionalRReluAPI(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input = fluid.data( + input = paddle.static.data( name="input", shape=[2, 3, 4, 5], dtype="float32" ) res1 = F.rrelu( @@ -97,10 +97,10 @@ class TestFunctionalRReluAPI(unittest.TestCase): for place in self.places: paddle.enable_static() - x_1 = paddle.fluid.data( + x_1 = paddle.static.data( name="x", shape=self.x_np.shape, dtype="float64" ) - x_2 = paddle.fluid.data( + x_2 = paddle.static.data( name="x2", shape=self.x_np.shape, dtype="float64" ) out_1 = F.rrelu(x_1, self.lower_0, self.upper_0, training=False) @@ -140,10 +140,10 @@ class TestFunctionalRReluAPI(unittest.TestCase): for place in self.places: paddle.enable_static() - x_1 = paddle.fluid.data( + x_1 = paddle.static.data( name="x", shape=self.x_np.shape, dtype="float64" ) - x_2 = paddle.fluid.data( + x_2 = paddle.static.data( name="x2", shape=self.x_np.shape, dtype="float64" ) # init instance @@ -223,7 +223,7 @@ class TestFunctionalRReluAPI(unittest.TestCase): TypeError, F.rrelu, x=1, lower=self.lower_0, upper=self.upper_0 ) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[2, 3], dtype='int32' ) self.assertRaises( @@ -233,7 +233,7 @@ class TestFunctionalRReluAPI(unittest.TestCase): lower=self.lower_0, upper=self.upper_0, ) - x_bool = paddle.fluid.data( + x_bool = paddle.static.data( name='x_bool', shape=[2, 3], dtype='int32' ) self.assertRaises( @@ -244,7 +244,7 @@ class TestFunctionalRReluAPI(unittest.TestCase): upper=self.upper_0, ) # lower and upper must be float - x_fp32 = paddle.fluid.data( + x_fp32 = paddle.static.data( name='x_fp32', shape=[2, 3], dtype='float32' ) self.assertRaises(TypeError, F.rrelu, x=x_fp32, lower=0, upper=0.5) @@ -261,7 +261,7 @@ class TestFunctionalRReluAPI(unittest.TestCase): ValueError, F.rrelu, x=x_fp32, lower=0.5, upper=0.2 ) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[2, 3], dtype='float16' ) F.rrelu(x=x_fp16, lower=self.lower_0, upper=self.upper_0) diff --git a/python/paddle/fluid/tests/unittests/test_run_program_op.py b/python/paddle/fluid/tests/unittests/test_run_program_op.py index 0374df968af209e8436d5ac7186d5f7d92472406..13d14c56f565a6841f99aa950acf20c30b52c694 100644 --- a/python/paddle/fluid/tests/unittests/test_run_program_op.py +++ b/python/paddle/fluid/tests/unittests/test_run_program_op.py @@ -394,7 +394,7 @@ class TestRunProgramOpWithFC(RunProgramOpTest): def build_model(self): # 1. simple model - img = fluid.data( + img = paddle.static.data( name=self.input_names['X'][0], shape=[None, 1, 28, 28], dtype='float32', diff --git a/python/paddle/fluid/tests/unittests/test_scaled_dot_product_attention.py b/python/paddle/fluid/tests/unittests/test_scaled_dot_product_attention.py index 2b0e28adf8f2bdf6b0f8d548e5d901e92eab6fca..3afa270f4dc64aed3c8b74656ecb1c3552cbe945 100644 --- a/python/paddle/fluid/tests/unittests/test_scaled_dot_product_attention.py +++ b/python/paddle/fluid/tests/unittests/test_scaled_dot_product_attention.py @@ -16,6 +16,7 @@ import unittest import numpy as np +import paddle import paddle.fluid as fluid from paddle.fluid import Program, program_guard @@ -23,11 +24,13 @@ from paddle.fluid import Program, program_guard class TestScaledDotProductAttentionError(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()): - queries = fluid.data( + queries = paddle.static.data( name="queries", shape=[3, 5, 9], dtype="float32" ) - keys = fluid.data(name="keys", shape=[3, 6, 9], dtype="float32") - values = fluid.data( + keys = paddle.static.data( + name="keys", shape=[3, 6, 9], dtype="float32" + ) + values = paddle.static.data( name="values", shape=[3, 6, 10], dtype="float32" ) @@ -56,10 +59,10 @@ class TestScaledDotProductAttentionError(unittest.TestCase): self.assertRaises(TypeError, test_values_Variable) def test_diff_dtype(): - keys_error = fluid.data( + keys_error = paddle.static.data( name="keys_error", shape=[3, 6, 9], dtype="float64" ) - values_error = fluid.data( + values_error = paddle.static.data( name="values_error", shape=[3, 6, 10], dtype="float64" ) fluid.nets.scaled_dot_product_attention( @@ -69,10 +72,10 @@ class TestScaledDotProductAttentionError(unittest.TestCase): self.assertRaises(TypeError, test_diff_dtype) def test_diff_dim(): - keys_error_dim = fluid.data( + keys_error_dim = paddle.static.data( name="keys_error_dim", shape=[3, 6], dtype="float32" ) - values_error_dim = fluid.data( + values_error_dim = paddle.static.data( name="values_error_dim", shape=[3], dtype="float32" ) fluid.nets.scaled_dot_product_attention( @@ -82,10 +85,10 @@ class TestScaledDotProductAttentionError(unittest.TestCase): self.assertRaises(ValueError, test_diff_dim) def test_diff_hidden_size(): - queries_error_hs = fluid.data( + queries_error_hs = paddle.static.data( name="queries_error_hs", shape=[3, 5, 9], dtype="float32" ) - keys_error_hs = fluid.data( + keys_error_hs = paddle.static.data( name="keys_error_hs", shape=[3, 6, 10], dtype="float32" ) fluid.nets.scaled_dot_product_attention( @@ -95,10 +98,10 @@ class TestScaledDotProductAttentionError(unittest.TestCase): self.assertRaises(ValueError, test_diff_hidden_size) def test_diff_max_len(): - keys_error_len = fluid.data( + keys_error_len = paddle.static.data( name="keys_error_len", shape=[3, 7, 9], dtype="float32" ) - values_error_len = fluid.data( + values_error_len = paddle.static.data( name="values_error_len", shape=[3, 6, 10], dtype="float32" ) fluid.nets.scaled_dot_product_attention( diff --git a/python/paddle/fluid/tests/unittests/test_scatter_op.py b/python/paddle/fluid/tests/unittests/test_scatter_op.py index ec810280432fe338047c8b6be9412791d32ec147..14afd56dec717f74bd0404a2553df336069bfa13 100644 --- a/python/paddle/fluid/tests/unittests/test_scatter_op.py +++ b/python/paddle/fluid/tests/unittests/test_scatter_op.py @@ -224,9 +224,13 @@ class TestScatterAPI(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input = fluid.data(name="input", shape=[3, 2], dtype="float64") - index = fluid.data(name="index", shape=[4], dtype="int64") - updates = fluid.data(name="updates", shape=[4, 2], dtype="float64") + input = paddle.static.data( + name="input", shape=[3, 2], dtype="float64" + ) + index = paddle.static.data(name="index", shape=[4], dtype="int64") + updates = paddle.static.data( + name="updates", shape=[4, 2], dtype="float64" + ) result = self.scatter(input, index, updates, False) input_data = np.array([[1, 1], [2, 2], [3, 3]]).astype(np.float64) diff --git a/python/paddle/fluid/tests/unittests/test_selu_op.py b/python/paddle/fluid/tests/unittests/test_selu_op.py index 1cd638b37836fdbd95b019394c95b4f5977aa8a7..e3ec3370905d53f1cd0c74a99033192d896a8c63 100644 --- a/python/paddle/fluid/tests/unittests/test_selu_op.py +++ b/python/paddle/fluid/tests/unittests/test_selu_op.py @@ -96,7 +96,7 @@ class TestSeluAPI(unittest.TestCase): def test_static_api(self): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype) + x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) out1 = F.selu(x, self.scale, self.alpha) selu = paddle.nn.SELU(self.scale, self.alpha) out2 = selu(x) @@ -119,7 +119,7 @@ class TestSeluAPI(unittest.TestCase): def test_fluid_api(self): with fluid.program_guard(fluid.Program()): - x = fluid.data('X', self.x_np.shape, self.x_np.dtype) + x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) out = F.selu(x, self.scale, self.alpha) exe = fluid.Executor(self.place) res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) @@ -131,19 +131,19 @@ class TestSeluAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.selu, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[12, 10], dtype='int32' ) self.assertRaises(TypeError, F.selu, x_int32) # The scale must be greater than 1.0 - x_fp32 = paddle.fluid.data( + x_fp32 = paddle.static.data( name='x_fp32', shape=[12, 10], dtype='float32' ) self.assertRaises(ValueError, F.selu, x_fp32, -1.0) # The alpha must be no less than 0 self.assertRaises(ValueError, F.selu, x_fp32, 1.6, -1.0) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[12, 10], dtype='float16' ) F.selu(x_fp16) diff --git a/python/paddle/fluid/tests/unittests/test_sigmoid_focal_loss.py b/python/paddle/fluid/tests/unittests/test_sigmoid_focal_loss.py index 594de6859cec21fc69950039b724f393c27bc149..23814c824b70587ea7b56735a8620a8bb5fb6590 100644 --- a/python/paddle/fluid/tests/unittests/test_sigmoid_focal_loss.py +++ b/python/paddle/fluid/tests/unittests/test_sigmoid_focal_loss.py @@ -42,17 +42,17 @@ def test_static( prog = paddle.static.Program() startup_prog = paddle.static.Program() with paddle.static.program_guard(prog, startup_prog): - logit = paddle.fluid.data( + logit = paddle.static.data( name='logit', shape=logit_np.shape, dtype='float64' ) - label = paddle.fluid.data( + label = paddle.static.data( name='label', shape=label_np.shape, dtype='float64' ) feed_dict = {"logit": logit_np, "label": label_np} normalizer = None if normalizer_np is not None: - normalizer = paddle.fluid.data( + normalizer = paddle.static.data( name='normalizer', shape=normalizer_np.shape, dtype='float64' ) feed_dict["normalizer"] = normalizer_np diff --git a/python/paddle/fluid/tests/unittests/test_size_op.py b/python/paddle/fluid/tests/unittests/test_size_op.py index edef25ed7a783909de49b102df3ec57e8eeafa9d..0a23e7359ef3de6a3f1229fee4158624d079b903 100644 --- a/python/paddle/fluid/tests/unittests/test_size_op.py +++ b/python/paddle/fluid/tests/unittests/test_size_op.py @@ -69,8 +69,8 @@ class TestSizeAPI(unittest.TestCase): with fluid.program_guard(main_program, startup_program): shape1 = [2, 1, 4, 5] shape2 = [1, 4, 5] - x_1 = paddle.fluid.data(shape=shape1, dtype='int32', name='x_1') - x_2 = paddle.fluid.data(shape=shape2, dtype='int32', name='x_2') + x_1 = paddle.static.data(shape=shape1, dtype='int32', name='x_1') + x_2 = paddle.static.data(shape=shape2, dtype='int32', name='x_2') input_1 = np.random.random(shape1).astype("int32") input_2 = np.random.random(shape2).astype("int32") out_1 = paddle.numel(x_1) diff --git a/python/paddle/fluid/tests/unittests/test_slice_op.py b/python/paddle/fluid/tests/unittests/test_slice_op.py index b8b0ccb8417aeea239e86b572ab855f22e794e9e..59725ecf040c013be1b2a7eb0ca83eccc2b38b64 100644 --- a/python/paddle/fluid/tests/unittests/test_slice_op.py +++ b/python/paddle/fluid/tests/unittests/test_slice_op.py @@ -720,9 +720,15 @@ class TestSliceApiWithLoDTensorArray(unittest.TestCase): def set_program_and_run(self, main_program, case_num): with fluid.program_guard(main_program): x = [ - fluid.data(name='x0', shape=self.shape, dtype="float32"), - fluid.data(name='x1', shape=self.shape, dtype="float32"), - fluid.data(name='x2', shape=self.shape, dtype="float32"), + paddle.static.data( + name='x0', shape=self.shape, dtype="float32" + ), + paddle.static.data( + name='x1', shape=self.shape, dtype="float32" + ), + paddle.static.data( + name='x2', shape=self.shape, dtype="float32" + ), ] for each_x in x: diff --git a/python/paddle/fluid/tests/unittests/test_smooth_l1_loss.py b/python/paddle/fluid/tests/unittests/test_smooth_l1_loss.py index 6f1565c093966de5f002e4330d6250a39479e26c..821733c3a7167cff1ed884cdb9c86ed2c143da56 100644 --- a/python/paddle/fluid/tests/unittests/test_smooth_l1_loss.py +++ b/python/paddle/fluid/tests/unittests/test_smooth_l1_loss.py @@ -54,8 +54,12 @@ class SmoothL1Loss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', shape=[100, 200], dtype='float32') - label = fluid.data(name='label', shape=[100, 200], dtype='float32') + input = paddle.static.data( + name='input', shape=[100, 200], dtype='float32' + ) + label = paddle.static.data( + name='label', shape=[100, 200], dtype='float32' + ) smooth_l1_loss = paddle.nn.loss.SmoothL1Loss() ret = smooth_l1_loss(input, label) @@ -93,8 +97,12 @@ class SmoothL1Loss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', shape=[100, 200], dtype='float32') - label = fluid.data(name='label', shape=[100, 200], dtype='float32') + input = paddle.static.data( + name='input', shape=[100, 200], dtype='float32' + ) + label = paddle.static.data( + name='label', shape=[100, 200], dtype='float32' + ) smooth_l1_loss = paddle.nn.loss.SmoothL1Loss(reduction='sum') ret = smooth_l1_loss(input, label) @@ -132,8 +140,12 @@ class SmoothL1Loss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', shape=[100, 200], dtype='float32') - label = fluid.data(name='label', shape=[100, 200], dtype='float32') + input = paddle.static.data( + name='input', shape=[100, 200], dtype='float32' + ) + label = paddle.static.data( + name='label', shape=[100, 200], dtype='float32' + ) smooth_l1_loss = paddle.nn.loss.SmoothL1Loss(reduction='none') ret = smooth_l1_loss(input, label) @@ -172,8 +184,12 @@ class SmoothL1Loss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', shape=[100, 200], dtype='float32') - label = fluid.data(name='label', shape=[100, 200], dtype='float32') + input = paddle.static.data( + name='input', shape=[100, 200], dtype='float32' + ) + label = paddle.static.data( + name='label', shape=[100, 200], dtype='float32' + ) smooth_l1_loss = paddle.nn.loss.SmoothL1Loss(delta=delta) ret = smooth_l1_loss(input, label) diff --git a/python/paddle/fluid/tests/unittests/test_softmax2d.py b/python/paddle/fluid/tests/unittests/test_softmax2d.py index 61d4bb931068058d415e5cbe1f07fa4a520457f8..cb24181cf3dbabfcb2971a311cd3b0a0efc43fff 100644 --- a/python/paddle/fluid/tests/unittests/test_softmax2d.py +++ b/python/paddle/fluid/tests/unittests/test_softmax2d.py @@ -35,7 +35,7 @@ class TestSoftmax2DAPI(unittest.TestCase): def test_static_api(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype) + x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) m = paddle.nn.Softmax2D() out = m(x) exe = paddle.static.Executor(self.place) @@ -111,7 +111,7 @@ class TestSoftmax2DError(unittest.TestCase): def test_static_error(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', [5, 5], 'float32') + x = paddle.static.data('X', [5, 5], 'float32') m = paddle.nn.Softmax2D() self.assertRaises(AssertionError, m, x) diff --git a/python/paddle/fluid/tests/unittests/test_softmax_mask_fuse_op.py b/python/paddle/fluid/tests/unittests/test_softmax_mask_fuse_op.py index f56e15856054e5a28a17b4b54e33968d9b927780..909758504667a2121f500f4c991df467a20d1020 100644 --- a/python/paddle/fluid/tests/unittests/test_softmax_mask_fuse_op.py +++ b/python/paddle/fluid/tests/unittests/test_softmax_mask_fuse_op.py @@ -89,8 +89,10 @@ class TestSoftmaxMaskFuseOp0(OpTest): class TestDropoutBiasFuseOp3(unittest.TestCase): def test_static_result(self): with fluid.program_guard(fluid.Program(), fluid.Program()): - input_x = fluid.data(name="x", shape=[1, 1, 8, 32], dtype="float32") - input_mask = fluid.data( + input_x = paddle.static.data( + name="x", shape=[1, 1, 8, 32], dtype="float32" + ) + input_mask = paddle.static.data( name="mask", shape=[1, 1, 8, 32], dtype="float32" ) rst = incubate.softmax_mask_fuse(input_x, input_mask) diff --git a/python/paddle/fluid/tests/unittests/test_softmax_mask_fuse_upper_triangle_op.py b/python/paddle/fluid/tests/unittests/test_softmax_mask_fuse_upper_triangle_op.py index 8d6d866fe91197d9c01115520f3dfcdb32aacea7..ddc894314a8cfd9c3582f6bbc940245c2e701a8d 100644 --- a/python/paddle/fluid/tests/unittests/test_softmax_mask_fuse_upper_triangle_op.py +++ b/python/paddle/fluid/tests/unittests/test_softmax_mask_fuse_upper_triangle_op.py @@ -92,7 +92,7 @@ class TestDropoutBiasFuseOp2(unittest.TestCase): def test_static(self): for dtype in self.dtypes: with fluid.program_guard(fluid.Program(), fluid.Program()): - input_x = fluid.data( + input_x = paddle.static.data( name="x", shape=[1, 4, 32, 32], dtype=dtype ) rst = incubate.softmax_mask_fuse_upper_triangle(input_x) diff --git a/python/paddle/fluid/tests/unittests/test_softmax_op.py b/python/paddle/fluid/tests/unittests/test_softmax_op.py index 943e9ce0713edd52be5dd41f96ac85cc4d36bca5..860541f3710aeb4613737ba1310674d46b964820 100644 --- a/python/paddle/fluid/tests/unittests/test_softmax_op.py +++ b/python/paddle/fluid/tests/unittests/test_softmax_op.py @@ -462,7 +462,7 @@ class TestSoftmaxAPI(unittest.TestCase): def test_static_check(self): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.x_np.shape, 'float32') + x = paddle.static.data('X', self.x_np.shape, 'float32') out1 = self.softmax(x) m = paddle.nn.Softmax() out2 = m(x) @@ -508,12 +508,12 @@ class TestSoftmaxAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, self.softmax, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[2, 3], dtype='int32' ) self.assertRaises(TypeError, self.softmax, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[2, 3], dtype='float16' ) self.softmax(x_fp16) diff --git a/python/paddle/fluid/tests/unittests/test_solve_op.py b/python/paddle/fluid/tests/unittests/test_solve_op.py index d1598954a431ee00690d6b5b148656b9ebb47879..9a0fb7a7235438fd6b679523baf9f4336bc4dd2d 100644 --- a/python/paddle/fluid/tests/unittests/test_solve_op.py +++ b/python/paddle/fluid/tests/unittests/test_solve_op.py @@ -270,30 +270,30 @@ class TestSolveOpError(unittest.TestCase): self.assertRaises(TypeError, paddle.linalg.solve, x1, y1) # The data type of input must be float32 or float64. - x2 = fluid.data(name="x2", shape=[30, 30], dtype="bool") - y2 = fluid.data(name="y2", shape=[30, 10], dtype="bool") + x2 = paddle.static.data(name="x2", shape=[30, 30], dtype="bool") + y2 = paddle.static.data(name="y2", shape=[30, 10], dtype="bool") self.assertRaises(TypeError, paddle.linalg.solve, x2, y2) - x3 = fluid.data(name="x3", shape=[30, 30], dtype="int32") - y3 = fluid.data(name="y3", shape=[30, 10], dtype="int32") + x3 = paddle.static.data(name="x3", shape=[30, 30], dtype="int32") + y3 = paddle.static.data(name="y3", shape=[30, 10], dtype="int32") self.assertRaises(TypeError, paddle.linalg.solve, x3, y3) - x4 = fluid.data(name="x4", shape=[30, 30], dtype="int64") - y4 = fluid.data(name="y4", shape=[30, 10], dtype="int64") + x4 = paddle.static.data(name="x4", shape=[30, 30], dtype="int64") + y4 = paddle.static.data(name="y4", shape=[30, 10], dtype="int64") self.assertRaises(TypeError, paddle.linalg.solve, x4, y4) - x5 = fluid.data(name="x5", shape=[30, 30], dtype="float16") - y5 = fluid.data(name="y5", shape=[30, 10], dtype="float16") + x5 = paddle.static.data(name="x5", shape=[30, 30], dtype="float16") + y5 = paddle.static.data(name="y5", shape=[30, 10], dtype="float16") self.assertRaises(TypeError, paddle.linalg.solve, x5, y5) # The number of dimensions of input'X must be >= 2. - x6 = fluid.data(name="x6", shape=[30], dtype="float64") - y6 = fluid.data(name="y6", shape=[30], dtype="float64") + x6 = paddle.static.data(name="x6", shape=[30], dtype="float64") + y6 = paddle.static.data(name="y6", shape=[30], dtype="float64") self.assertRaises(ValueError, paddle.linalg.solve, x6, y6) # The inner-most 2 dimensions of input'X should be equal to each other - x7 = fluid.data(name="x7", shape=[2, 3, 4], dtype="float64") - y7 = fluid.data(name="y7", shape=[2, 4, 3], dtype="float64") + x7 = paddle.static.data(name="x7", shape=[2, 3, 4], dtype="float64") + y7 = paddle.static.data(name="y7", shape=[2, 4, 3], dtype="float64") self.assertRaises(ValueError, paddle.linalg.solve, x7, y7) @@ -308,10 +308,10 @@ class TestSolveOpAPI_1(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - paddle_input_x = fluid.data( + paddle_input_x = paddle.static.data( name="input_x", shape=[3, 3], dtype=self.dtype ) - paddle_input_y = fluid.data( + paddle_input_y = paddle.static.data( name="input_y", shape=[3], dtype=self.dtype ) paddle_result = paddle.linalg.solve(paddle_input_x, paddle_input_y) @@ -369,10 +369,10 @@ class TestSolveOpAPI_2(unittest.TestCase): def check_static_result(self, place): paddle.enable_static() with fluid.program_guard(fluid.Program(), fluid.Program()): - paddle_input_x = fluid.data( + paddle_input_x = paddle.static.data( name="input_x", shape=[10, 10], dtype=self.dtype ) - paddle_input_y = fluid.data( + paddle_input_y = paddle.static.data( name="input_y", shape=[10, 4], dtype=self.dtype ) paddle_result = paddle.linalg.solve(paddle_input_x, paddle_input_y) @@ -429,10 +429,10 @@ class TestSolveOpAPI_3(unittest.TestCase): def check_static_result(self, place): paddle.enable_static() with fluid.program_guard(fluid.Program(), fluid.Program()): - paddle_input_x = fluid.data( + paddle_input_x = paddle.static.data( name="input_x", shape=[10, 10], dtype=self.dtype ) - paddle_input_y = fluid.data( + paddle_input_y = paddle.static.data( name="input_y", shape=[10, 4], dtype=self.dtype ) paddle_result = paddle.linalg.solve(paddle_input_x, paddle_input_y) @@ -489,10 +489,10 @@ class TestSolveOpAPI_4(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - paddle_input_x = fluid.data( + paddle_input_x = paddle.static.data( name="input_x", shape=[2, 3, 3], dtype=self.dtype ) - paddle_input_y = fluid.data( + paddle_input_y = paddle.static.data( name="input_y", shape=[1, 3, 3], dtype=self.dtype ) paddle_result = paddle.linalg.solve(paddle_input_x, paddle_input_y) @@ -548,8 +548,8 @@ class TestSolveOpSingularAPI(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - x = fluid.data(name="x", shape=[4, 4], dtype=self.dtype) - y = fluid.data(name="y", shape=[4, 4], dtype=self.dtype) + x = paddle.static.data(name="x", shape=[4, 4], dtype=self.dtype) + y = paddle.static.data(name="y", shape=[4, 4], dtype=self.dtype) result = paddle.linalg.solve(x, y) diff --git a/python/paddle/fluid/tests/unittests/test_sort_op.py b/python/paddle/fluid/tests/unittests/test_sort_op.py index 3f8666c8e7c65537dd609601d6fb2e7f60f558d1..a933187d273b6e07a02f56ef42423d36c5132bfb 100644 --- a/python/paddle/fluid/tests/unittests/test_sort_op.py +++ b/python/paddle/fluid/tests/unittests/test_sort_op.py @@ -27,7 +27,9 @@ class TestSortOnCPU(unittest.TestCase): def test_api_0(self): with fluid.program_guard(fluid.Program()): - input = fluid.data(name="input", shape=[2, 3, 4], dtype="float32") + input = paddle.static.data( + name="input", shape=[2, 3, 4], dtype="float32" + ) output = paddle.sort(x=input) exe = fluid.Executor(self.place) data = np.array( @@ -43,7 +45,9 @@ class TestSortOnCPU(unittest.TestCase): def test_api_1(self): with fluid.program_guard(fluid.Program()): - input = fluid.data(name="input", shape=[2, 3, 4], dtype="float32") + input = paddle.static.data( + name="input", shape=[2, 3, 4], dtype="float32" + ) output = paddle.sort(x=input, axis=1) exe = fluid.Executor(self.place) data = np.array( diff --git a/python/paddle/fluid/tests/unittests/test_split_op.py b/python/paddle/fluid/tests/unittests/test_split_op.py index 26f5dcc2943eeb36bbf4b323cc530fe48f0636b0..1d0b048b6173f4c72b3b60366b245fa97e721c4b 100644 --- a/python/paddle/fluid/tests/unittests/test_split_op.py +++ b/python/paddle/fluid/tests/unittests/test_split_op.py @@ -287,8 +287,8 @@ class TestSplitAPI(unittest.TestCase): positive_1_int32 = paddle.tensor.fill_constant([1], "int32", 1) positive_1_int64 = paddle.tensor.fill_constant([1], "int64", 1) positive_2_int64 = paddle.tensor.fill_constant([1], "int64", 2) - x_1 = fluid.data(shape=[4, 5, 6], dtype='int32', name='x_1') - x_2 = fluid.data(shape=[4, 5, None], dtype='int32', name='x_2') + x_1 = paddle.static.data(shape=[4, 5, 6], dtype='int32', name='x_1') + x_2 = paddle.static.data(shape=[4, 5, None], dtype='int32', name='x_2') out_0, out_1, out_2 = paddle.split( x=x_1, diff --git a/python/paddle/fluid/tests/unittests/test_splits_api.py b/python/paddle/fluid/tests/unittests/test_splits_api.py index 40083388d63e08713e8a838fc678c86ea90f527d..3ecb0ca9278f9f730d0004367a3a5348dda80fb3 100644 --- a/python/paddle/fluid/tests/unittests/test_splits_api.py +++ b/python/paddle/fluid/tests/unittests/test_splits_api.py @@ -56,7 +56,7 @@ class TestSplitsAPI(unittest.TestCase): paddle.enable_static() for func, func_type in test_list: with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype) + x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) out = func(x, self.num_or_sections) exe = paddle.static.Executor(self.place) res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) @@ -170,7 +170,7 @@ class TestSplitsError(unittest.TestCase): paddle.enable_static() for func, _ in test_list: with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', [5], 'float32') + x = paddle.static.data('X', [5], 'float32') self.assertRaises(ValueError, func, x, self.num_or_sections) def test_dygraph_error(self): diff --git a/python/paddle/fluid/tests/unittests/test_square_error_cost.py b/python/paddle/fluid/tests/unittests/test_square_error_cost.py index afd16a3095738ed97f99f0cf4a2c9bede7a0dec1..db015ae31358553f5ca32a4e6545ce8c46151f7f 100644 --- a/python/paddle/fluid/tests/unittests/test_square_error_cost.py +++ b/python/paddle/fluid/tests/unittests/test_square_error_cost.py @@ -55,13 +55,17 @@ class TestSquareErrorInvalidInput(unittest.TestCase): def test_error(self): def test_invalid_input(): input = [256, 3] - label = fluid.data(name='label1', shape=[None, 3], dtype='float32') + label = paddle.static.data( + name='label1', shape=[None, 3], dtype='float32' + ) loss = paddle.nn.functional.square_error_cost(input, label) self.assertRaises(TypeError, test_invalid_input) def test_invalid_label(): - input = fluid.data(name='input2', shape=[None, 3], dtype='float32') + input = paddle.static.data( + name='input2', shape=[None, 3], dtype='float32' + ) label = [256, 3] loss = paddle.nn.functional.square_error_cost(input, label) diff --git a/python/paddle/fluid/tests/unittests/test_static_save_load.py b/python/paddle/fluid/tests/unittests/test_static_save_load.py index 03695262e757ea7c07328f5147f5a28f22ce94d2..07a3e114c49adea18fabf4f42468e2186fedb469 100644 --- a/python/paddle/fluid/tests/unittests/test_static_save_load.py +++ b/python/paddle/fluid/tests/unittests/test_static_save_load.py @@ -881,7 +881,7 @@ class TestVariableInit(unittest.TestCase): def test_variable_init(self): - x = fluid.data(name="x", shape=[10, 10], dtype='float32') + x = paddle.static.data(name="x", shape=[10, 10], dtype='float32') y = paddle.static.nn.fc(x, 10) z = paddle.static.nn.fc(y, 10) diff --git a/python/paddle/fluid/tests/unittests/test_std_layer.py b/python/paddle/fluid/tests/unittests/test_std_layer.py index 8f8899f47f7fefac807ea95cbce7261e1543a21c..7cbf235699bd658771beb7743dd0f5aceb6bc44c 100644 --- a/python/paddle/fluid/tests/unittests/test_std_layer.py +++ b/python/paddle/fluid/tests/unittests/test_std_layer.py @@ -48,7 +48,7 @@ class TestStdAPI(unittest.TestCase): def static(self): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.shape, self.dtype) + x = paddle.static.data('X', self.shape, self.dtype) out = paddle.std(x, self.axis, self.unbiased, self.keepdim) exe = paddle.static.Executor(self.place) res = exe.run(feed={'X': self.x}, fetch_list=[out]) @@ -115,7 +115,7 @@ class TestStdAPI_alias(unittest.TestCase): class TestStdError(unittest.TestCase): def test_error(self): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', [2, 3, 4], 'int32') + x = paddle.static.data('X', [2, 3, 4], 'int32') self.assertRaises(TypeError, paddle.std, x) diff --git a/python/paddle/fluid/tests/unittests/test_sum_op.py b/python/paddle/fluid/tests/unittests/test_sum_op.py index a0c0d7757e5eaa4389bfb793201cd2a49489f46a..c485449b0c2f089c3004babecb838af1cd22caa4 100644 --- a/python/paddle/fluid/tests/unittests/test_sum_op.py +++ b/python/paddle/fluid/tests/unittests/test_sum_op.py @@ -437,14 +437,14 @@ class TestRaiseSumError(unittest.TestCase): self.assertRaises(TypeError, test_type) def test_dtype(): - data1 = fluid.data(name="input1", shape=[10], dtype="int8") - data2 = fluid.data(name="input2", shape=[10], dtype="int8") + data1 = paddle.static.data(name="input1", shape=[10], dtype="int8") + data2 = paddle.static.data(name="input2", shape=[10], dtype="int8") paddle.add_n([data1, data2]) self.assertRaises(TypeError, test_dtype) def test_dtype1(): - data1 = fluid.data(name="input1", shape=[10], dtype="int8") + data1 = paddle.static.data(name="input1", shape=[10], dtype="int8") paddle.add_n(data1) self.assertRaises(TypeError, test_dtype1) @@ -458,30 +458,38 @@ class TestRaiseSumsError(unittest.TestCase): self.assertRaises(TypeError, test_type) def test_dtype(): - data1 = fluid.data(name="input1", shape=[10], dtype="int8") - data2 = fluid.data(name="input2", shape=[10], dtype="int8") + data1 = paddle.static.data(name="input1", shape=[10], dtype="int8") + data2 = paddle.static.data(name="input2", shape=[10], dtype="int8") paddle.add_n([data1, data2]) self.assertRaises(TypeError, test_dtype) def test_dtype1(): - data1 = fluid.data(name="input1", shape=[10], dtype="int8") + data1 = paddle.static.data(name="input1", shape=[10], dtype="int8") paddle.add_n(data1) self.assertRaises(TypeError, test_dtype1) def test_out_type(): - data1 = fluid.data(name="input1", shape=[10], dtype="flaot32") - data2 = fluid.data(name="input2", shape=[10], dtype="float32") + data1 = paddle.static.data( + name="input1", shape=[10], dtype="flaot32" + ) + data2 = paddle.static.data( + name="input2", shape=[10], dtype="float32" + ) out = [10] out = paddle.add_n([data1, data2]) self.assertRaises(TypeError, test_out_type) def test_out_dtype(): - data1 = fluid.data(name="input1", shape=[10], dtype="flaot32") - data2 = fluid.data(name="input2", shape=[10], dtype="float32") - out = fluid.data(name="out", shape=[10], dtype="int8") + data1 = paddle.static.data( + name="input1", shape=[10], dtype="flaot32" + ) + data2 = paddle.static.data( + name="input2", shape=[10], dtype="float32" + ) + out = paddle.static.data(name="out", shape=[10], dtype="int8") out = paddle.add_n([data1, data2]) self.assertRaises(TypeError, test_out_dtype) diff --git a/python/paddle/fluid/tests/unittests/test_svd_op.py b/python/paddle/fluid/tests/unittests/test_svd_op.py index a760fef4ff2528bef558c1a895c6e383b0cf16b4..f17c80fae6e656d0d38762174d23f12022893b1f 100644 --- a/python/paddle/fluid/tests/unittests/test_svd_op.py +++ b/python/paddle/fluid/tests/unittests/test_svd_op.py @@ -307,7 +307,7 @@ class TestSvdAPI(unittest.TestCase): for place in places: with fluid.program_guard(fluid.Program(), fluid.Program()): a = np.random.rand(5, 5) - x = paddle.fluid.data( + x = paddle.static.data( name="input", shape=[5, 5], dtype='float64' ) u, s, vh = paddle.linalg.svd(x) diff --git a/python/paddle/fluid/tests/unittests/test_switch_case.py b/python/paddle/fluid/tests/unittests/test_switch_case.py index 5a1e8fb451b63d3a0e3c9175d8571efd0b42c6a4..de8763b27c3844b9b9312b5d99bc0091abf290c3 100644 --- a/python/paddle/fluid/tests/unittests/test_switch_case.py +++ b/python/paddle/fluid/tests/unittests/test_switch_case.py @@ -441,7 +441,9 @@ class TestAPISwitchCase_Nested(unittest.TestCase): main_program = Program() startup_program = Program() with program_guard(main_program, startup_program): - index_1 = fluid.data(name="index_1", shape=[1], dtype='uint8') + index_1 = paddle.static.data( + name="index_1", shape=[1], dtype='uint8' + ) index_2 = paddle.tensor.fill_constant( shape=[1], dtype='int32', value=2 ) @@ -540,7 +542,9 @@ class TestAPISwitchCase_Nested(unittest.TestCase): main_program = Program() startup_program = Program() with program_guard(main_program, startup_program): - index_1 = fluid.data(name="index_1", shape=[1], dtype='uint8') + index_1 = paddle.static.data( + name="index_1", shape=[1], dtype='uint8' + ) index_2 = paddle.full(shape=[], dtype='int32', fill_value=2) index_3 = paddle.full(shape=[], dtype='int64', fill_value=3) diff --git a/python/paddle/fluid/tests/unittests/test_take.py b/python/paddle/fluid/tests/unittests/test_take.py index bf16efa87ab899a2e4c0baffadc628d2ac9f544e..6a4b9702c5cfa20c012a5c93b991d2a5b6e03675 100644 --- a/python/paddle/fluid/tests/unittests/test_take.py +++ b/python/paddle/fluid/tests/unittests/test_take.py @@ -55,10 +55,10 @@ class TestTakeAPI(unittest.TestCase): startup_program = Program() train_program = Program() with program_guard(startup_program, train_program): - x = fluid.data( + x = paddle.static.data( name='input', dtype=self.input_dtype, shape=self.input_shape ) - index = fluid.data( + index = paddle.static.data( name='index', dtype=self.index_dtype, shape=self.index_shape ) out = paddle.take(x, index, mode=self.mode) @@ -116,7 +116,7 @@ class TestTakeTypeError(TestTakeAPI): """Argument 'index' must be Tensor""" paddle.enable_static() with program_guard(Program()): - x = fluid.data( + x = paddle.static.data( name='input', dtype=self.input_dtype, shape=self.input_shape ) self.assertRaises( @@ -132,10 +132,10 @@ class TestTakeTypeError(TestTakeAPI): """Data type of argument 'index' must be in [paddle.int32, paddle.int64]""" paddle.enable_static() with program_guard(Program()): - x = fluid.data( + x = paddle.static.data( name='input', dtype='float64', shape=self.input_shape ) - index = fluid.data( + index = paddle.static.data( name='index', dtype='float32', shape=self.index_shape ) self.assertRaises(TypeError, paddle.take, x, index, self.mode) @@ -184,10 +184,10 @@ class TestTakeModeRaisePos(unittest.TestCase): an error is reported directly through `paddle.index_select`""" paddle.enable_static() with program_guard(Program()): - x = fluid.data( + x = paddle.static.data( name='input', dtype=self.input_dtype, shape=self.input_shape ) - index = fluid.data( + index = paddle.static.data( name='index', dtype=self.index_dtype, shape=self.index_shape ) self.assertRaises(ValueError, paddle.index_select, x, index) diff --git a/python/paddle/fluid/tests/unittests/test_take_along_axis_op.py b/python/paddle/fluid/tests/unittests/test_take_along_axis_op.py index 7abd86d19f676ae7abab5e7cbc5dbaa6051572ef..b5a9c2169ff96b55fa97ea88bd281ddc8565eb1d 100644 --- a/python/paddle/fluid/tests/unittests/test_take_along_axis_op.py +++ b/python/paddle/fluid/tests/unittests/test_take_along_axis_op.py @@ -83,8 +83,8 @@ class TestTakeAlongAxisAPI(unittest.TestCase): def test_api_static(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.shape) - index = paddle.fluid.data('Index', self.index_shape, "int64") + x = paddle.static.data('X', self.shape) + index = paddle.static.data('Index', self.index_shape, "int64") out = paddle.take_along_axis(x, index, self.axis) exe = paddle.static.Executor(self.place[0]) res = exe.run( diff --git a/python/paddle/fluid/tests/unittests/test_trace_op.py b/python/paddle/fluid/tests/unittests/test_trace_op.py index b86422de074cf06ae42f3b60b30306ca010ae7ec..a2369448bc6886c298357b816fb666251e9bcd2d 100644 --- a/python/paddle/fluid/tests/unittests/test_trace_op.py +++ b/python/paddle/fluid/tests/unittests/test_trace_op.py @@ -72,7 +72,9 @@ class TestTraceOpCase2(TestTraceOp): class TestTraceAPICase(unittest.TestCase): def test_case1(self): case = np.random.randn(2, 20, 2, 3).astype('float32') - data1 = fluid.data(name='data1', shape=[2, 20, 2, 3], dtype='float32') + data1 = paddle.static.data( + name='data1', shape=[2, 20, 2, 3], dtype='float32' + ) out1 = tensor.trace(data1) out2 = tensor.trace(data1, offset=-5, axis1=1, axis2=-1) diff --git a/python/paddle/fluid/tests/unittests/test_transpose_op.py b/python/paddle/fluid/tests/unittests/test_transpose_op.py index 54b98551e2b5a528bb0a1048c2a20e93e17f5765..e0ed1f2f201d3514bb7503075bd077b4f3e0df2b 100644 --- a/python/paddle/fluid/tests/unittests/test_transpose_op.py +++ b/python/paddle/fluid/tests/unittests/test_transpose_op.py @@ -456,7 +456,7 @@ class TestTransposeApi(unittest.TestCase): class TestTAPI(unittest.TestCase): def test_out(self): with fluid.program_guard(fluid.Program()): - data = fluid.data(shape=[10], dtype="float64", name="data") + data = paddle.static.data(shape=[10], dtype="float64", name="data") data_t = paddle.t(data) place = fluid.CPUPlace() exe = fluid.Executor(place) @@ -466,7 +466,9 @@ class TestTAPI(unittest.TestCase): self.assertEqual((result == expected_result).all(), True) with fluid.program_guard(fluid.Program()): - data = fluid.data(shape=[10, 5], dtype="float64", name="data") + data = paddle.static.data( + shape=[10, 5], dtype="float64", name="data" + ) data_t = paddle.t(data) place = fluid.CPUPlace() exe = fluid.Executor(place) @@ -476,7 +478,9 @@ class TestTAPI(unittest.TestCase): self.assertEqual((result == expected_result).all(), True) with fluid.program_guard(fluid.Program()): - data = fluid.data(shape=[1, 5], dtype="float64", name="data") + data = paddle.static.data( + shape=[1, 5], dtype="float64", name="data" + ) data_t = paddle.t(data) place = fluid.CPUPlace() exe = fluid.Executor(place) @@ -511,7 +515,7 @@ class TestTAPI(unittest.TestCase): def test_errors(self): with fluid.program_guard(fluid.Program()): - x = fluid.data(name='x', shape=[10, 5, 3], dtype='float64') + x = paddle.static.data(name='x', shape=[10, 5, 3], dtype='float64') def test_x_dimension_check(): paddle.t(x) diff --git a/python/paddle/fluid/tests/unittests/test_triangular_solve_op.py b/python/paddle/fluid/tests/unittests/test_triangular_solve_op.py index 802cf4f9a62942c33b11e1263e7e2e4bcff42043..2bd6853f73fdc9cbf9237fdc83eb3c1a32bc36ea 100644 --- a/python/paddle/fluid/tests/unittests/test_triangular_solve_op.py +++ b/python/paddle/fluid/tests/unittests/test_triangular_solve_op.py @@ -258,8 +258,8 @@ class TestTriangularSolveAPI(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - x = fluid.data(name="x", shape=[3, 3], dtype=self.dtype) - y = fluid.data(name="y", shape=[3, 2], dtype=self.dtype) + x = paddle.static.data(name="x", shape=[3, 3], dtype=self.dtype) + y = paddle.static.data(name="y", shape=[3, 2], dtype=self.dtype) z = paddle.linalg.triangular_solve(x, y) x_np = np.random.random([3, 3]).astype(self.dtype) @@ -310,35 +310,35 @@ class TestTriangularSolveOpError(unittest.TestCase): self.assertRaises(TypeError, paddle.linalg.triangular_solve, x1, y1) # The data type of input must be float32 or float64. - x2 = fluid.data(name="x2", shape=[30, 30], dtype="bool") - y2 = fluid.data(name="y2", shape=[30, 10], dtype="bool") + x2 = paddle.static.data(name="x2", shape=[30, 30], dtype="bool") + y2 = paddle.static.data(name="y2", shape=[30, 10], dtype="bool") self.assertRaises(TypeError, paddle.linalg.triangular_solve, x2, y2) - x3 = fluid.data(name="x3", shape=[30, 30], dtype="int32") - y3 = fluid.data(name="y3", shape=[30, 10], dtype="int32") + x3 = paddle.static.data(name="x3", shape=[30, 30], dtype="int32") + y3 = paddle.static.data(name="y3", shape=[30, 10], dtype="int32") self.assertRaises(TypeError, paddle.linalg.triangular_solve, x3, y3) - x4 = fluid.data(name="x4", shape=[30, 30], dtype="float16") - y4 = fluid.data(name="y4", shape=[30, 10], dtype="float16") + x4 = paddle.static.data(name="x4", shape=[30, 30], dtype="float16") + y4 = paddle.static.data(name="y4", shape=[30, 10], dtype="float16") self.assertRaises(TypeError, paddle.linalg.triangular_solve, x4, y4) # The number of dimensions of input'X must be >= 2. - x5 = fluid.data(name="x5", shape=[30], dtype="float64") - y5 = fluid.data(name="y5", shape=[30, 30], dtype="float64") + x5 = paddle.static.data(name="x5", shape=[30], dtype="float64") + y5 = paddle.static.data(name="y5", shape=[30, 30], dtype="float64") self.assertRaises( ValueError, paddle.linalg.triangular_solve, x5, y5 ) # The number of dimensions of input'Y must be >= 2. - x6 = fluid.data(name="x6", shape=[30, 30], dtype="float64") - y6 = fluid.data(name="y6", shape=[30], dtype="float64") + x6 = paddle.static.data(name="x6", shape=[30, 30], dtype="float64") + y6 = paddle.static.data(name="y6", shape=[30], dtype="float64") self.assertRaises( ValueError, paddle.linalg.triangular_solve, x6, y6 ) # The inner-most 2 dimensions of input'X should be equal to each other - x7 = fluid.data(name="x7", shape=[2, 3, 4], dtype="float64") - y7 = fluid.data(name="y7", shape=[2, 4, 3], dtype="float64") + x7 = paddle.static.data(name="x7", shape=[2, 3, 4], dtype="float64") + y7 = paddle.static.data(name="y7", shape=[2, 4, 3], dtype="float64") self.assertRaises( ValueError, paddle.linalg.triangular_solve, x7, y7 ) diff --git a/python/paddle/fluid/tests/unittests/test_tril_triu_op.py b/python/paddle/fluid/tests/unittests/test_tril_triu_op.py index f35fc67bb2d8430142f84e259b52b84275dad87e..ee62930019da6bd2d62cb9da845796e424f0193f 100644 --- a/python/paddle/fluid/tests/unittests/test_tril_triu_op.py +++ b/python/paddle/fluid/tests/unittests/test_tril_triu_op.py @@ -78,7 +78,9 @@ def case_generator(op_type, Xshape, diagonal, expected): def test_failure(self): paddle.enable_static() - data = fluid.data(shape=Xshape, dtype='float64', name=cls_name) + data = paddle.static.data( + shape=Xshape, dtype='float64', name=cls_name + ) with self.assertRaisesRegex( eval(expected.split(':')[-1]), errmsg[expected] ): @@ -143,7 +145,9 @@ class TestTrilTriuOpAPI(unittest.TestCase): startup_prog = Program() with program_guard(prog, startup_prog): data = np.random.random([1, 9, 9, 4]).astype(dtype) - x = fluid.data(shape=[1, 9, -1, 4], dtype=dtype, name='x') + x = paddle.static.data( + shape=[1, 9, -1, 4], dtype=dtype, name='x' + ) tril_out, triu_out = tensor.tril(x), tensor.triu(x) place = ( @@ -184,7 +188,9 @@ class TestTrilTriuOpAPI(unittest.TestCase): startup_prog = Program() with program_guard(prog, startup_prog): data = np.random.random([1, 9, 9, 4]).astype(dtype) - x = fluid.data(shape=[1, 9, -1, 4], dtype=dtype, name='x') + x = paddle.static.data( + shape=[1, 9, -1, 4], dtype=dtype, name='x' + ) triu_out = paddle.triu(x) place = ( diff --git a/python/paddle/fluid/tests/unittests/test_trunc_op.py b/python/paddle/fluid/tests/unittests/test_trunc_op.py index db45b36b563020f388a933ea369964deaed7f4ac..8d8f0ce2b3b8b4bda321b0e331528870da57750f 100644 --- a/python/paddle/fluid/tests/unittests/test_trunc_op.py +++ b/python/paddle/fluid/tests/unittests/test_trunc_op.py @@ -68,7 +68,7 @@ class TestTruncAPI(unittest.TestCase): def test_api_static(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.shape) + x = paddle.static.data('X', self.shape) out = paddle.trunc(x) exe = paddle.static.Executor(self.place) res = exe.run(feed={'X': self.x}, fetch_list=[out]) @@ -86,7 +86,7 @@ class TestTruncAPI(unittest.TestCase): def test_errors(self): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', [20, 20], 'bool') + x = paddle.static.data('X', [20, 20], 'bool') self.assertRaises(TypeError, paddle.trunc, x) diff --git a/python/paddle/fluid/tests/unittests/test_unbind_op.py b/python/paddle/fluid/tests/unittests/test_unbind_op.py index cf1beb5bc87d3a5aa4ad597009f07112bf81d0aa..9df2e1958d4c2462a576f92f9459b718166f41f4 100644 --- a/python/paddle/fluid/tests/unittests/test_unbind_op.py +++ b/python/paddle/fluid/tests/unittests/test_unbind_op.py @@ -27,10 +27,10 @@ class TestUnbind(unittest.TestCase): def test_unbind(self): paddle.enable_static() - x_1 = fluid.data(shape=[2, 3], dtype='float32', name='x_1') + x_1 = paddle.static.data(shape=[2, 3], dtype='float32', name='x_1') [out_0, out_1] = tensor.unbind(input=x_1, axis=0) input_1 = np.random.random([2, 3]).astype("float32") - axis = fluid.data(shape=[1], dtype='int32', name='axis') + axis = paddle.static.data(shape=[1], dtype='int32', name='axis') exe = fluid.Executor(place=fluid.CPUPlace()) [res_1, res_2] = exe.run( @@ -85,10 +85,10 @@ class TestLayersUnbind(unittest.TestCase): def test_layers_unbind(self): paddle.enable_static() - x_1 = fluid.data(shape=[2, 3], dtype='float32', name='x_1') + x_1 = paddle.static.data(shape=[2, 3], dtype='float32', name='x_1') [out_0, out_1] = paddle.unbind(input=x_1, axis=0) input_1 = np.random.random([2, 3]).astype("float32") - axis = fluid.data(shape=[1], dtype='int32', name='axis') + axis = paddle.static.data(shape=[1], dtype='int32', name='axis') exe = fluid.Executor(place=fluid.CPUPlace()) [res_1, res_2] = exe.run( @@ -235,7 +235,7 @@ class TestUnbindBF16Op(OpTest): class TestUnbindAxisError(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()): - x = fluid.data(shape=[2, 3], dtype='float32', name='x') + x = paddle.static.data(shape=[2, 3], dtype='float32', name='x') def test_table_Variable(): tensor.unbind(input=x, axis=2.0) diff --git a/python/paddle/fluid/tests/unittests/test_uniform_random_bf16_op.py b/python/paddle/fluid/tests/unittests/test_uniform_random_bf16_op.py index 0dcdf0cc2502dc7b56cdef188207644462ca152b..032f22ae835e8613967b5cec78163974a2b5e3b5 100644 --- a/python/paddle/fluid/tests/unittests/test_uniform_random_bf16_op.py +++ b/python/paddle/fluid/tests/unittests/test_uniform_random_bf16_op.py @@ -243,7 +243,9 @@ class TestUniformRandomBatchSizeLikeOpBF16API(unittest.TestCase): startup_program = fluid.Program() train_program = fluid.Program() with fluid.program_guard(train_program, startup_program): - input = fluid.data(name="input", shape=[1, 3], dtype='uint16') + input = paddle.static.data( + name="input", shape=[1, 3], dtype='uint16' + ) out_1 = random.uniform_random_batch_size_like( input, [2, 4], dtype=np.uint16 ) # out_1.shape=[1, 4] diff --git a/python/paddle/fluid/tests/unittests/test_uniform_random_op.py b/python/paddle/fluid/tests/unittests/test_uniform_random_op.py index 3a03b7c0dce4870fd907de481bc3d2ead78fe300..77509851faba76fe0a87c210c5df664d9a40ca1a 100644 --- a/python/paddle/fluid/tests/unittests/test_uniform_random_op.py +++ b/python/paddle/fluid/tests/unittests/test_uniform_random_op.py @@ -360,7 +360,9 @@ class TestUniformRandomOp_attr_tensor_API(unittest.TestCase): startup_program = fluid.Program() train_program = fluid.Program() with fluid.program_guard(train_program, startup_program): - shape = fluid.data(name='shape_tensor', shape=[2], dtype="int32") + shape = paddle.static.data( + name='shape_tensor', shape=[2], dtype="int32" + ) ret = paddle.uniform(shape) place = fluid.CPUPlace() diff --git a/python/paddle/fluid/tests/unittests/test_unique.py b/python/paddle/fluid/tests/unittests/test_unique.py index b3ae10a6c335edbbaaff69150e1d796d4b6b6e16..5060e2fcf5fe30e462dd7024705b3cafa54cda0c 100644 --- a/python/paddle/fluid/tests/unittests/test_unique.py +++ b/python/paddle/fluid/tests/unittests/test_unique.py @@ -18,7 +18,6 @@ import numpy as np from op_test import OpTest import paddle -import paddle.fluid as fluid import paddle.fluid.core as core @@ -82,7 +81,7 @@ class TestUniqueRaiseError(unittest.TestCase): self.assertRaises(TypeError, test_type) def test_dtype(): - data = fluid.data(shape=[10], dtype="float16", name="input") + data = paddle.static.data(shape=[10], dtype="float16", name="input") paddle.unique(data) self.assertRaises(TypeError, test_dtype) @@ -295,7 +294,7 @@ class TestUniqueAPI(unittest.TestCase): with paddle.static.program_guard( paddle.static.Program(), paddle.static.Program() ): - x = paddle.fluid.data(name='x', shape=[3, 2], dtype='float64') + x = paddle.static.data(name='x', shape=[3, 2], dtype='float64') unique, inverse, counts = paddle.unique( x, return_inverse=True, return_counts=True, axis=0 ) @@ -320,14 +319,16 @@ class TestUniqueError(unittest.TestCase): with paddle.static.program_guard( paddle.static.Program(), paddle.static.Program() ): - x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float16') + x = paddle.static.data( + name='x', shape=[10, 10], dtype='float16' + ) result = paddle.unique(x) self.assertRaises(TypeError, test_x_dtype) def test_attr(self): paddle.enable_static() - x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float64') + x = paddle.static.data(name='x', shape=[10, 10], dtype='float64') def test_return_index(): result = paddle.unique(x, return_index=0) diff --git a/python/paddle/fluid/tests/unittests/test_unique_consecutive_op.py b/python/paddle/fluid/tests/unittests/test_unique_consecutive_op.py index 1cf7714844ce80cb634f1a03c831049707f9be27..970cddbe36bee3fe9ffb1bac2542f2a1b6c04c72 100644 --- a/python/paddle/fluid/tests/unittests/test_unique_consecutive_op.py +++ b/python/paddle/fluid/tests/unittests/test_unique_consecutive_op.py @@ -206,7 +206,7 @@ class TestUniqueConsecutiveAPI(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): paddle.enable_static() - input_x = fluid.data( + input_x = paddle.static.data( name="input_x", shape=[ 100, @@ -243,7 +243,7 @@ class TestUniqueConsecutiveCase2API(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): paddle.enable_static() - input_x = fluid.data( + input_x = paddle.static.data( name="input_x", shape=[ 100, @@ -284,7 +284,7 @@ class TestUniqueConsecutiveCase3API(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): paddle.enable_static() - input_x = fluid.data( + input_x = paddle.static.data( name="input_x", shape=[ 100, diff --git a/python/paddle/fluid/tests/unittests/test_unique_with_counts.py b/python/paddle/fluid/tests/unittests/test_unique_with_counts.py index ccbf56c66473bde41938977c10bd0c53de4d7668..a4f553ddc43c87c73ecc48874b07e4cbe76bfe15 100644 --- a/python/paddle/fluid/tests/unittests/test_unique_with_counts.py +++ b/python/paddle/fluid/tests/unittests/test_unique_with_counts.py @@ -18,7 +18,6 @@ import numpy as np from op_test import OpTest import paddle -import paddle.fluid as fluid import paddle.fluid.core as core @@ -88,7 +87,7 @@ class TestUniqueWithCountsRaiseError(unittest.TestCase): self.assertRaises(TypeError, test_type) def test_dtype(): - data = fluid.data(shape=[10], dtype="float16", name="input") + data = paddle.static.data(shape=[10], dtype="float16", name="input") paddle.unique(data) self.assertRaises(TypeError, test_dtype) diff --git a/python/paddle/fluid/tests/unittests/test_unpool1d_op.py b/python/paddle/fluid/tests/unittests/test_unpool1d_op.py index 72a314618597ec1d2fc88ba4a2e49906fa4667a0..2adf212f9ff78f8404bbcbf295f7458f0b0b880f 100644 --- a/python/paddle/fluid/tests/unittests/test_unpool1d_op.py +++ b/python/paddle/fluid/tests/unittests/test_unpool1d_op.py @@ -148,7 +148,7 @@ class TestUnpool1DOpAPI_static(unittest.TestCase): input_data = np.array( [[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]] ).astype("float32") - x = paddle.fluid.data( + x = paddle.static.data( name='x', shape=[1, 3, 4], dtype='float32' ) output, indices = F.max_pool1d( diff --git a/python/paddle/fluid/tests/unittests/test_unpool3d_op.py b/python/paddle/fluid/tests/unittests/test_unpool3d_op.py index aa4da0b7c107857eabc4c296074fd86d194a5970..ea2c3cc3d2f8710bbba79a56e8fa36f8d53413f9 100644 --- a/python/paddle/fluid/tests/unittests/test_unpool3d_op.py +++ b/python/paddle/fluid/tests/unittests/test_unpool3d_op.py @@ -403,7 +403,7 @@ class TestUnpool3DOpAPI_static(unittest.TestCase): ] ] ).astype("float32") - x = paddle.fluid.data( + x = paddle.static.data( name='x', shape=[1, 1, 2, 4, 4], dtype='float32' ) output, indices = F.max_pool3d( diff --git a/python/paddle/fluid/tests/unittests/test_unpool_op.py b/python/paddle/fluid/tests/unittests/test_unpool_op.py index e5eefc067e89206f518de39b5a4cce43c2c991ab..16eb8ffb3a0a4c7d9fb6d012be73446ff67ca359 100644 --- a/python/paddle/fluid/tests/unittests/test_unpool_op.py +++ b/python/paddle/fluid/tests/unittests/test_unpool_op.py @@ -411,7 +411,7 @@ class TestUnpoolOpAPI_st(unittest.TestCase): [[[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]]]] ).astype("float32") - x = fluid.data(name="x", shape=[1, 1, 4, 4], dtype="float32") + x = paddle.static.data(name="x", shape=[1, 1, 4, 4], dtype="float32") output, indices = F.max_pool2d( x, kernel_size=2, stride=2, return_mask=True ) diff --git a/python/paddle/fluid/tests/unittests/test_unzip_op.py b/python/paddle/fluid/tests/unittests/test_unzip_op.py index 71caac8c0f2f80ddce73c24e6e2d77e9ecb7395c..0dbe8559711ca3e64a7ad2ee1942bffec103ab6a 100644 --- a/python/paddle/fluid/tests/unittests/test_unzip_op.py +++ b/python/paddle/fluid/tests/unittests/test_unzip_op.py @@ -29,8 +29,8 @@ class TestUnzipOp(unittest.TestCase): paddle.enable_static() if core.is_compiled_with_cuda(): place = fluid.CUDAPlace(0) - x = fluid.data(name='X', shape=[3, 4], dtype='float64') - lod = fluid.data(name='lod', shape=[11], dtype='int64') + x = paddle.static.data(name='X', shape=[3, 4], dtype='float64') + lod = paddle.static.data(name='lod', shape=[11], dtype='int64') output = paddle.incubate.operators.unzip(x, lod) input = [ diff --git a/python/paddle/fluid/tests/unittests/test_update_loss_scaling_op.py b/python/paddle/fluid/tests/unittests/test_update_loss_scaling_op.py index 9cef4d721674e585ca87200883df96abd065558f..8b15157c846dc087085a45ff818091beb9212688 100644 --- a/python/paddle/fluid/tests/unittests/test_update_loss_scaling_op.py +++ b/python/paddle/fluid/tests/unittests/test_update_loss_scaling_op.py @@ -17,6 +17,7 @@ import unittest import numpy as np from op_test import OpTest +import paddle import paddle.fluid as fluid import paddle.static.amp.amp_nn as amp_nn @@ -136,17 +137,19 @@ class TestUpdateLossScalingOpBad(TestUpdateLossScalingOp): class TestUpdateLossScalingLayer(unittest.TestCase): def loss_scaling_check(self, use_cuda=True, scope=fluid.Scope()): - a = fluid.data(name="a", shape=[1024, 1024], dtype='float32') - b = fluid.data(name="b", shape=[512, 128], dtype='float32') + a = paddle.static.data(name="a", shape=[1024, 1024], dtype='float32') + b = paddle.static.data(name="b", shape=[512, 128], dtype='float32') x = [a, b] - found_inf = fluid.data(name="found_inf", shape=[1], dtype='bool') - prev_loss_scaling = fluid.data( + found_inf = paddle.static.data( + name="found_inf", shape=[1], dtype='bool' + ) + prev_loss_scaling = paddle.static.data( name="prev_loss_scaling", shape=[1], dtype='float32' ) - num_good_steps = fluid.data( + num_good_steps = paddle.static.data( name="num_good_steps", shape=[1], dtype='int32' ) - num_bad_steps = fluid.data( + num_bad_steps = paddle.static.data( name="num_bad_steps", shape=[1], dtype='int32' ) @@ -207,17 +210,19 @@ class TestUpdateLossScalingLayer(unittest.TestCase): assert np.array_equal(result_v[7], np.zeros_like(num_bad_steps_v)) def loss_scaling_check_inf(self, use_cuda=True, scope=fluid.Scope()): - a = fluid.data(name="a", shape=[1024, 1024], dtype='float32') - b = fluid.data(name="b", shape=[512, 128], dtype='float32') + a = paddle.static.data(name="a", shape=[1024, 1024], dtype='float32') + b = paddle.static.data(name="b", shape=[512, 128], dtype='float32') x = [a, b] - found_inf = fluid.data(name="found_inf", shape=[1], dtype='bool') - prev_loss_scaling = fluid.data( + found_inf = paddle.static.data( + name="found_inf", shape=[1], dtype='bool' + ) + prev_loss_scaling = paddle.static.data( name="prev_loss_scaling", shape=[1], dtype='float32' ) - num_good_steps = fluid.data( + num_good_steps = paddle.static.data( name="num_good_steps", shape=[1], dtype='int32' ) - num_bad_steps = fluid.data( + num_bad_steps = paddle.static.data( name="num_bad_steps", shape=[1], dtype='int32' ) diff --git a/python/paddle/fluid/tests/unittests/test_variance_layer.py b/python/paddle/fluid/tests/unittests/test_variance_layer.py index 6d9338542ddc1773d74ca22f8801c52f965945aa..2613fb91b15c0f7ca0b14ed16abe4a4bfda46ab1 100644 --- a/python/paddle/fluid/tests/unittests/test_variance_layer.py +++ b/python/paddle/fluid/tests/unittests/test_variance_layer.py @@ -48,7 +48,7 @@ class TestVarAPI(unittest.TestCase): def static(self): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.shape, self.dtype) + x = paddle.static.data('X', self.shape, self.dtype) out = paddle.var(x, self.axis, self.unbiased, self.keepdim) exe = paddle.static.Executor(self.place) res = exe.run(feed={'X': self.x}, fetch_list=[out]) @@ -115,7 +115,7 @@ class TestVarAPI_alias(unittest.TestCase): class TestVarError(unittest.TestCase): def test_error(self): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', [2, 3, 4], 'int32') + x = paddle.static.data('X', [2, 3, 4], 'int32') self.assertRaises(TypeError, paddle.var, x) diff --git a/python/paddle/fluid/tests/unittests/test_viterbi_decode_op.py b/python/paddle/fluid/tests/unittests/test_viterbi_decode_op.py index f6801d4f089d7ae2db462e6b0ae9f995916795aa..6bb21611f8310f5947999bac8718c1d3aab22faf 100644 --- a/python/paddle/fluid/tests/unittests/test_viterbi_decode_op.py +++ b/python/paddle/fluid/tests/unittests/test_viterbi_decode_op.py @@ -124,13 +124,15 @@ class TestViterbiAPI(unittest.TestCase): def check_static_result(self, place): bz, length, ntags = self.bz, self.len, self.ntags with fluid.program_guard(fluid.Program(), fluid.Program()): - Input = fluid.data( + Input = paddle.static.data( name="Input", shape=[bz, length, ntags], dtype="float32" ) - Transition = fluid.data( + Transition = paddle.static.data( name="Transition", shape=[ntags, ntags], dtype="float32" ) - Length = fluid.data(name="Length", shape=[bz], dtype="int64") + Length = paddle.static.data( + name="Length", shape=[bz], dtype="int64" + ) decoder = paddle.text.ViterbiDecoder(Transition, self.use_tag) score, path = decoder(Input, Length) exe = fluid.Executor(place) diff --git a/python/paddle/fluid/tests/unittests/test_warprnnt_op.py b/python/paddle/fluid/tests/unittests/test_warprnnt_op.py index 381ed400735dd3306699aaf22bf67abf7240d6e7..567ceb72f11467f6b37774c37d1bfeec319c6f5a 100644 --- a/python/paddle/fluid/tests/unittests/test_warprnnt_op.py +++ b/python/paddle/fluid/tests/unittests/test_warprnnt_op.py @@ -18,7 +18,6 @@ import numpy as np from op_test import OpTest import paddle -import paddle.fluid as fluid import paddle.fluid.core as core from paddle import _C_ops from paddle.fluid import Program, program_guard @@ -277,17 +276,21 @@ class TestWarpRNNTOpError(unittest.TestCase): def test_errors(self): print("test_errors") with program_guard(Program(), Program()): - logits = fluid.data(name='input', shape=[5, 16, 6], dtype='float32') - logits_length = fluid.data( + logits = paddle.static.data( + name='input', shape=[5, 16, 6], dtype='float32' + ) + logits_length = paddle.static.data( name='logit_lengths', shape=[None], dtype='int32' ) - label = fluid.data(name='labels', shape=[16, 3], dtype='int32') - label_length = fluid.data( + label = paddle.static.data( + name='labels', shape=[16, 3], dtype='int32' + ) + label_length = paddle.static.data( name='label_lengths', shape=[None], dtype='int32' ) def test_logits_Variable(): - logits_data = fluid.data( + logits_data = paddle.static.data( name='logits_data', shape=[5, 16, 6], dtype='int32' ) paddle.nn.functional.rnnt_loss( @@ -300,7 +303,7 @@ class TestWarpRNNTOpError(unittest.TestCase): self.assertRaises(TypeError, test_logits_Variable) def test_label_Variable(): - label_data = fluid.data( + label_data = paddle.static.data( name='label_data', shape=[16, 3], dtype='int64' ) paddle.nn.functional.rnnt_loss( @@ -313,7 +316,7 @@ class TestWarpRNNTOpError(unittest.TestCase): self.assertRaises(TypeError, test_label_Variable) def test_logits_len_Variable(): - logits_length_data = fluid.data( + logits_length_data = paddle.static.data( name='logits_length_data', shape=[None], dtype='int64' ) paddle.nn.functional.rnnt_loss( @@ -326,7 +329,7 @@ class TestWarpRNNTOpError(unittest.TestCase): self.assertRaises(TypeError, test_logits_len_Variable) def test_label_len_Variable(): - label_length_data = fluid.data( + label_length_data = paddle.static.data( name='label_length_data', shape=[None], dtype='int64' ) paddle.nn.functional.rnnt_loss( diff --git a/python/paddle/fluid/tests/unittests/test_while_loop_op.py b/python/paddle/fluid/tests/unittests/test_while_loop_op.py index 714e541f581accb28efd39efc04d76ed44a4d23e..cbb69319350a0965e5a8a130a270484b224e4809 100644 --- a/python/paddle/fluid/tests/unittests/test_while_loop_op.py +++ b/python/paddle/fluid/tests/unittests/test_while_loop_op.py @@ -72,7 +72,7 @@ class TestApiWhileLoop(unittest.TestCase): ten = paddle.tensor.fill_constant( shape=[1], dtype='int64', value=10 ) - mem = fluid.data(name='mem', shape=[10], dtype='float32') + mem = paddle.static.data(name='mem', shape=[10], dtype='float32') one = paddle.tensor.fill_constant( shape=[10], dtype='float32', value=1 ) @@ -205,8 +205,12 @@ class TestApiWhileLoop_Nested(unittest.TestCase): with program_guard(main_program, startup_program): i = layers.zeros(shape=[1], dtype='int64') j = layers.zeros(shape=[1], dtype='int64') - init = fluid.data(name='init', shape=[3, 3], dtype='float32') - sums = fluid.data(name='sums', shape=[3, 3], dtype='float32') + init = paddle.static.data( + name='init', shape=[3, 3], dtype='float32' + ) + sums = paddle.static.data( + name='sums', shape=[3, 3], dtype='float32' + ) loop_len1 = paddle.tensor.fill_constant( shape=[1], dtype='int64', value=2 ) @@ -254,7 +258,7 @@ class TestApiWhileLoop_Backward(unittest.TestCase): main_program = Program() startup_program = Program() with fluid.program_guard(main_program, startup_program): - i = fluid.data(name='i', shape=[1], dtype='float32') + i = paddle.static.data(name='i', shape=[1], dtype='float32') i.stop_gradient = False eleven = paddle.tensor.fill_constant( shape=[1], dtype='float32', value=11 @@ -262,7 +266,7 @@ class TestApiWhileLoop_Backward(unittest.TestCase): one = paddle.tensor.fill_constant( shape=[1], dtype='float32', value=1 ) - x = fluid.data(name='x', shape=[1], dtype='float32') + x = paddle.static.data(name='x', shape=[1], dtype='float32') x.stop_gradient = False out = paddle.static.nn.while_loop(cond, body, [i, x]) @@ -301,9 +305,9 @@ class TestApiWhileLoop_Backward(unittest.TestCase): main_program = Program() startup_program = Program() with fluid.program_guard(main_program, startup_program): - i = fluid.data(name='i', shape=[1], dtype='float32') + i = paddle.static.data(name='i', shape=[1], dtype='float32') i.stop_gradient = False - x = fluid.data(name='x', shape=[1], dtype='float32') + x = paddle.static.data(name='x', shape=[1], dtype='float32') x.stop_gradient = False out = paddle.static.nn.while_loop(cond, body, [i, x]) @@ -365,10 +369,10 @@ class TestApiWhileLoop_NestedWithBackwardAndLoDTensorArray(unittest.TestCase): main_program = Program() startup_program = Program() with fluid.program_guard(main_program, startup_program): - d0 = fluid.data(name='d0', shape=[10], dtype='float32') - d1 = fluid.data(name='d1', shape=[10], dtype='float32') - d2 = fluid.data(name='d2', shape=[10], dtype='float32') - x = fluid.data(name='x', shape=[10], dtype='float32') + d0 = paddle.static.data(name='d0', shape=[10], dtype='float32') + d1 = paddle.static.data(name='d1', shape=[10], dtype='float32') + d2 = paddle.static.data(name='d2', shape=[10], dtype='float32') + x = paddle.static.data(name='x', shape=[10], dtype='float32') x.stop_gradient = False i = layers.zeros(shape=[1], dtype='int64') i.stop_gradient = True diff --git a/python/paddle/fluid/tests/unittests/test_zeros_like_op.py b/python/paddle/fluid/tests/unittests/test_zeros_like_op.py index 8ab2ae07e948e721656e7a1d04e05f9e96af8a54..4a970d2acb3fbd444ed7f5e2ec6feb5d487e10b6 100644 --- a/python/paddle/fluid/tests/unittests/test_zeros_like_op.py +++ b/python/paddle/fluid/tests/unittests/test_zeros_like_op.py @@ -26,7 +26,7 @@ from paddle.fluid.framework import convert_np_dtype_to_dtype_ class TestZerosLikeAPIError(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()): - x = paddle.fluid.data('x', [3, 4]) + x = paddle.static.data('x', [3, 4]) self.assertRaises(TypeError, zeros_like, x, 'int8') @@ -36,7 +36,7 @@ class TestZerosLikeAPI(unittest.TestCase): startup_program = Program() train_program = Program() with program_guard(train_program, startup_program): - x = paddle.fluid.data('X', shape) + x = paddle.static.data('X', shape) out1 = zeros_like(x) out2 = zeros_like(x, np.bool_) out3 = zeros_like(x, 'float64') diff --git a/python/paddle/fluid/tests/unittests/xpu/test_activation_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_activation_op_xpu.py index 04931e5fb5355a5ef4857fd53b80cff045c077ab..e21153151d4c47ecbbeee8f8ce0576ebbb1e23ca 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_activation_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_activation_op_xpu.py @@ -126,7 +126,7 @@ class TestSiluAPI(unittest.TestCase): def test_static_api(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', [11, 17]) + x = paddle.static.data('X', [11, 17]) out1 = F.silu(x) m = paddle.nn.Silu() out2 = m(x) @@ -152,12 +152,12 @@ class TestSiluAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.silu, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[11, 17], dtype='int32' ) self.assertRaises(TypeError, F.silu, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[11, 17], dtype='float16' ) F.silu(x_fp16) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_adamw_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_adamw_op_xpu.py index 57b57adce3bfd915ab400cb59ee79c080dc3d096..0eb328a0fa183bfac861c6c62c2aae53656e1db6 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_adamw_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_adamw_op_xpu.py @@ -196,7 +196,7 @@ class XPUTestAdamwOp2(XPUOpTestWrapper): startup = fluid.Program() with fluid.program_guard(train_prog, startup): with fluid.unique_name.guard(): - data = fluid.data(name="data", shape=shape) + data = paddle.static.data(name="data", shape=shape) conv = paddle.static.nn.conv2d(data, 8, 3) loss = paddle.mean(conv) @@ -453,8 +453,12 @@ class XPUTestAdamwOp2(XPUOpTestWrapper): startup = fluid.Program() with fluid.program_guard(train_prog, startup): with fluid.unique_name.guard(): - x = fluid.data(name='x', shape=[None, 10], dtype='float32') - y = fluid.data(name='y', shape=[None, 1], dtype='float32') + x = paddle.static.data( + name='x', shape=[None, 10], dtype='float32' + ) + y = paddle.static.data( + name='y', shape=[None, 1], dtype='float32' + ) weight_attr1 = paddle.framework.ParamAttr( name="linear_0.w_0" diff --git a/python/paddle/fluid/tests/unittests/xpu/test_assign_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_assign_op_xpu.py index 650658d77e549b0e7d8112bc2c1f98db8be918bc..97460b54aa310d3ff4306a6612a882f4a78e2a3b 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_assign_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_assign_op_xpu.py @@ -67,10 +67,6 @@ class XPUTestAssignOP(XPUOpTestWrapper): def init_config(self): self.input_shape = [2, 768] - class XPUTestAssign2(TestAssignOPBase): - def init_config(self): - self.input_shape = [3, 8, 4096] - class XPUTestAssign3(TestAssignOPBase): def init_config(self): self.input_shape = [1024] diff --git a/python/paddle/fluid/tests/unittests/xpu/test_batch_norm_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_batch_norm_op_xpu.py index 3ee0469b6145d193cd8cf427c69574fc14947fdf..57b9fb25f2e843e29e94b1140410f055604b63d8 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_batch_norm_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_batch_norm_op_xpu.py @@ -202,17 +202,17 @@ class XPUTestBatchNormOp(XPUOpTestWrapper): def test_infer(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype) - scale = paddle.fluid.data( + x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) + scale = paddle.static.data( 'Scale', self.scale_np.shape, self.scale_np.dtype ) - bias = paddle.fluid.data( + bias = paddle.static.data( 'Bias', self.bias_np.shape, self.bias_np.dtype ) - mean = paddle.fluid.data( + mean = paddle.static.data( 'Mean', self.mean_np.shape, self.mean_np.dtype ) - variance = paddle.fluid.data( + variance = paddle.static.data( 'Variance', self.variance_np.shape, self.variance_np.dtype ) y = F.batch_norm( diff --git a/python/paddle/fluid/tests/unittests/xpu/test_bilinear_interp_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_bilinear_interp_op_xpu.py index d4166b9e0da4d49dec5402660c3fa6911eeff1e6..dc8e996e09382704b331c715fa85b6a3207f1791 100755 --- a/python/paddle/fluid/tests/unittests/xpu/test_bilinear_interp_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_bilinear_interp_op_xpu.py @@ -466,12 +466,12 @@ class TestBilinearInterp_attr_tensor_Case3(TestBilinearInterpOp_attr_tensor): "core is not compiled with XPU") class TestBilinearInterpOpAPI(unittest.TestCase): def test_case(self): - x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") + x = paddle.static.data(name="x", shape=[2, 3, 6, 6], dtype="float32") - dim = fluid.data(name="dim", shape=[1], dtype="int32") - shape_tensor = fluid.data(name="shape_tensor", shape=[2], dtype="int32") - actual_size = fluid.data(name="actual_size", shape=[2], dtype="int32") - scale_tensor = fluid.data( + dim = paddle.static.data(name="dim", shape=[1], dtype="int32") + shape_tensor = paddle.static.data(name="shape_tensor", shape=[2], dtype="int32") + actual_size = paddle.static.data(name="actual_size", shape=[2], dtype="int32") + scale_tensor = paddle.static.data( name="scale_tensor", shape=[1], dtype="float32") out1 = fluid.layers.resize_bilinear(x, out_shape=[12, 12]) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_clip_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_clip_op_xpu.py index 4bf88d40b7a98babc47fdb4a6a8f8e1c56e8c7a6..5aa2d78d343b40d273a891adf0ea7b6cd6bad4d2 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_clip_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_clip_op_xpu.py @@ -155,9 +155,11 @@ class TestClipAPI(unittest.TestCase): paddle.enable_static() data_shape = [1, 9, 9, 4] data = np.random.random(data_shape).astype('float32') - images = fluid.data(name='image', shape=data_shape, dtype='float32') - min = fluid.data(name='min', shape=[1], dtype='float32') - max = fluid.data(name='max', shape=[1], dtype='float32') + images = paddle.static.data( + name='image', shape=data_shape, dtype='float32' + ) + min = paddle.static.data(name='min', shape=[1], dtype='float32') + max = paddle.static.data(name='max', shape=[1], dtype='float32') place = ( fluid.XPUPlace(0) @@ -221,8 +223,8 @@ class TestClipAPI(unittest.TestCase): def test_errors(self): paddle.enable_static() - x1 = fluid.data(name='x1', shape=[1], dtype="int16") - x2 = fluid.data(name='x2', shape=[1], dtype="int8") + x1 = paddle.static.data(name='x1', shape=[1], dtype="int16") + x2 = paddle.static.data(name='x2', shape=[1], dtype="int8") self.assertRaises(TypeError, paddle.clip, x=x1, min=0.2, max=0.8) self.assertRaises(TypeError, paddle.clip, x=x2, min=0.2, max=0.8) paddle.disable_static() diff --git a/python/paddle/fluid/tests/unittests/xpu/test_diagonal_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_diagonal_op_xpu.py index 04561ce3ed9ccf6fcb47d3c7b253cd4d1cc38ea9..968623157cbae06b7872ec151ed9d44cd0ed483f 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_diagonal_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_diagonal_op_xpu.py @@ -137,7 +137,7 @@ class TestDiagonalAPI(unittest.TestCase): def test_api_static(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.shape) + x = paddle.static.data('X', self.shape) out = paddle.diagonal(x) exe = paddle.static.Executor(self.place) res = exe.run(feed={'X': self.x}, fetch_list=[out]) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_add_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_add_op_xpu.py index fbe68be403285484c577ea38f16165031d79df4b..5e2fbebf22773cca74f93a48d401f1f5363e95d7 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_add_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_add_op_xpu.py @@ -265,8 +265,8 @@ class XPUTestElementwiseAddOp(XPUOpTestWrapper): class TestAddOp(unittest.TestCase): def test_name(self): with fluid.program_guard(fluid.Program()): - x = fluid.data(name="x", shape=[2, 3], dtype="float32") - y = fluid.data(name='y', shape=[2, 3], dtype='float32') + x = paddle.static.data(name="x", shape=[2, 3], dtype="float32") + y = paddle.static.data(name='y', shape=[2, 3], dtype='float32') y_1 = paddle.add(x, y, name='add_res') self.assertEqual(('add_res' in y_1.name), True) @@ -280,8 +280,8 @@ class XPUTestElementwiseAddOp(XPUOpTestWrapper): "y": np.array([1, 5, 2]).astype('float32'), } - x = fluid.data(name="x", shape=[3], dtype='float32') - y = fluid.data(name="y", shape=[3], dtype='float32') + x = paddle.static.data(name="x", shape=[3], dtype='float32') + y = paddle.static.data(name="y", shape=[3], dtype='float32') z = paddle.add(x, y) place = fluid.XPUPlace(0) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_add_op_xpu_kp.py b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_add_op_xpu_kp.py index e1306610401f4ebd7ca518c8c106fbeb4fae4d2a..7ddc852b5e7021507771667a306c0732b5d4fe9c 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_add_op_xpu_kp.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_add_op_xpu_kp.py @@ -312,8 +312,8 @@ class TestElementwiseAddOp_xsize_lessthan_ysize_add(TestElementwiseAddOp): class TestAddOp(unittest.TestCase): def test_name(self): with fluid.program_guard(fluid.Program()): - x = fluid.data(name="x", shape=[2, 3], dtype="float32") - y = fluid.data(name='y', shape=[2, 3], dtype='float32') + x = paddle.static.data(name="x", shape=[2, 3], dtype="float32") + y = paddle.static.data(name='y', shape=[2, 3], dtype='float32') y_1 = paddle.add(x, y, name='add_res') self.assertEqual(('add_res' in y_1.name), True) @@ -327,8 +327,8 @@ class TestAddOp(unittest.TestCase): "y": np.array([1, 5, 2]).astype('float32'), } - x = fluid.data(name="x", shape=[3], dtype='float32') - y = fluid.data(name="y", shape=[3], dtype='float32') + x = paddle.static.data(name="x", shape=[3], dtype='float32') + y = paddle.static.data(name="y", shape=[3], dtype='float32') z = paddle.add(x, y) place = fluid.XPUPlace(0) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_div_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_div_op_xpu.py index 10b8314b85a0b277995f89daac83b102edbe83ca..ef1936d2de4871461c745dc5d8858983a2357063 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_div_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_div_op_xpu.py @@ -255,7 +255,7 @@ class XPUTestElementwiseDivOp(XPUOpTestWrapper): class TestElementwiseDivBroadcast(unittest.TestCase): def test_shape_with_batch_sizes(self): with fluid.program_guard(fluid.Program()): - x_var = fluid.data( + x_var = paddle.static.data( name='x', dtype='float32', shape=[None, 3, None, None] ) one = 2.0 diff --git a/python/paddle/fluid/tests/unittests/xpu/test_gaussian_random_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_gaussian_random_op_xpu.py index 7679baf1950b095fa4b84dd3b89f1e6fc71325b6..e6deee478de6b7920acb1cc7285eff2fae3d7b28 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_gaussian_random_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_gaussian_random_op_xpu.py @@ -185,11 +185,11 @@ class TestGaussianRandomAPI(unittest.TestCase): positive_2_int32 = paddle.tensor.fill_constant([1], "int32", 2000) positive_2_int64 = paddle.tensor.fill_constant([1], "int64", 500) - shape_tensor_int32 = fluid.data( + shape_tensor_int32 = paddle.static.data( name="shape_tensor_int32", shape=[2], dtype="int32" ) - shape_tensor_int64 = fluid.data( + shape_tensor_int64 = paddle.static.data( name="shape_tensor_int64", shape=[2], dtype="int64" ) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_index_sample_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_index_sample_op_xpu.py index 15d8e4abbd0dd67c0e1132d693a2b7621604c8a4..a78f84015de3bdf71bdd9a6a60a249e842aa1928 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_index_sample_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_index_sample_op_xpu.py @@ -125,8 +125,8 @@ class TestIndexSampleShape(unittest.TestCase): low=0, high=x_shape[1], size=index_shape ).astype(index_type) - x = fluid.data(name='x', shape=[-1, 5], dtype='float32') - index = fluid.data(name='index', shape=[-1, 3], dtype='int32') + x = paddle.static.data(name='x', shape=[-1, 5], dtype='float32') + index = paddle.static.data(name='index', shape=[-1, 3], dtype='int32') output = paddle.index_sample(x=x, index=index) place = fluid.XPUPlace(0) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_kldiv_loss_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_kldiv_loss_op_xpu.py index 861103061b160847ab34457820570e528aef8278..f77c3fd2303175f39d699a6340789b7549efc66c 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_kldiv_loss_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_kldiv_loss_op_xpu.py @@ -121,8 +121,8 @@ class XPUTestKLDivLossOp(XPUOpTestWrapper): self.run_kl_loss('none') def test_kl_loss_static_api(self): - input = paddle.fluid.data(name='input', shape=[5, 20]) - label = paddle.fluid.data(name='label', shape=[5, 20]) + input = paddle.static.data(name='input', shape=[5, 20]) + label = paddle.static.data(name='label', shape=[5, 20]) paddle.nn.functional.kl_div(input, label) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_masked_select_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_masked_select_op_xpu.py index 64d52d077fe83e2232e47d12f97a6d1c2b1b0eb1..d526dae396ddeac90b9864d673ad87009327455f 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_masked_select_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_masked_select_op_xpu.py @@ -95,8 +95,8 @@ class TestMaskedSelectAPI(unittest.TestCase): def test_static_mode(self): shape = [8, 9, 6] - x = paddle.fluid.data(shape=shape, dtype='float32', name='x') - mask = paddle.fluid.data(shape=shape, dtype='bool', name='mask') + x = paddle.static.data(shape=shape, dtype='float32', name='x') + mask = paddle.static.data(shape=shape, dtype='bool', name='mask') np_x = np.random.random(shape).astype('float32') np_mask = np.array(np.random.randint(2, size=shape, dtype=bool)) @@ -120,9 +120,9 @@ class TestMaskedSelectError(unittest.TestCase): ): shape = [8, 9, 6] - x = paddle.fluid.data(shape=shape, dtype='float32', name='x') - mask = paddle.fluid.data(shape=shape, dtype='bool', name='mask') - mask_float = paddle.fluid.data( + x = paddle.static.data(shape=shape, dtype='float32', name='x') + mask = paddle.static.data(shape=shape, dtype='bool', name='mask') + mask_float = paddle.static.data( shape=shape, dtype='float32', name='mask_float' ) np_x = np.random.random(shape).astype('float32') diff --git a/python/paddle/fluid/tests/unittests/xpu/test_matmul_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_matmul_op_xpu.py index c04cc72be4d6fd324d2e996c33ece1144e91e6eb..a89d7ae810e1dd79fd6006bdcb22fa189631bb1a 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_matmul_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_matmul_op_xpu.py @@ -142,9 +142,11 @@ class XPUTestMatmulOpErr(XPUOpTestWrapper): class API_TestMm(unittest.TestCase): def test_out(self): with fluid.program_guard(fluid.Program()): - x = fluid.data(name="x", shape=[2], dtype=self.in_type) - y = fluid.data(name='y', shape=[2], dtype=self.in_type) - res = fluid.data(name="output", shape=[1], dtype=self.in_type) + x = paddle.static.data(name="x", shape=[2], dtype=self.in_type) + y = paddle.static.data(name='y', shape=[2], dtype=self.in_type) + res = paddle.static.data( + name="output", shape=[1], dtype=self.in_type + ) result = paddle.mm(x, y) exe = fluid.Executor(fluid.XPUPlace(0)) data1 = np.random.rand(2).astype(self.in_type) @@ -193,10 +195,10 @@ class XPUTestMatmulOpErr(XPUOpTestWrapper): def test_errors(self): def test_error1(): with fluid.program_guard(fluid.Program(), fluid.Program()): - data1 = fluid.data( + data1 = paddle.static.data( name="data1", shape=[10, 2], dtype="float32" ) - data2 = fluid.data( + data2 = paddle.static.data( name="data2", shape=[3, 10], dtype="float32" ) paddle.mm(data1, data2) @@ -205,10 +207,10 @@ class XPUTestMatmulOpErr(XPUOpTestWrapper): def test_error2(): with fluid.program_guard(fluid.Program(), fluid.Program()): - data1 = fluid.data( + data1 = paddle.static.data( name="data1", shape=[-1, 10, 2], dtype="float32" ) - data2 = fluid.data( + data2 = paddle.static.data( name="data2", shape=[-1, 2, 10], dtype="float32" ) paddle.mm(data1, data2) @@ -217,10 +219,10 @@ class XPUTestMatmulOpErr(XPUOpTestWrapper): def test_error3(): with fluid.program_guard(fluid.Program(), fluid.Program()): - data1 = fluid.data( + data1 = paddle.static.data( name="data1", shape=[10, 10, 2], dtype="float32" ) - data2 = fluid.data( + data2 = paddle.static.data( name="data2", shape=[3, 2, 10], dtype="float32" ) paddle.mm(data1, data2) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_nearest_interp_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_nearest_interp_op_xpu.py index 42ea4032e1f4bff68844d5c33480f4b89309c2d1..441439838cbcd0e7b1a4e79330885f90467f8d24 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_nearest_interp_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_nearest_interp_op_xpu.py @@ -404,7 +404,7 @@ class TestNearestInterp_attr_tensor_Case3(TestNearestInterpOp_attr_tensor): "core is not compiled with XPU") class TestNearestInterpException(unittest.TestCase): def test_exception(self): - input = fluid.data(name="input", shape=[1, 3, 6, 6], dtype="float32") + input = paddle.static.data(name="input", shape=[1, 3, 6, 6], dtype="float32") def attr_data_format(): # for 4-D input, data_format can only be NCHW or NHWC diff --git a/python/paddle/fluid/tests/unittests/xpu/test_pad3d_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_pad3d_op_xpu.py index 73af305086700ee2759b788b5e43b8356365c8d8..9425f70159015c52fe40845944224014e7b8e097 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_pad3d_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_pad3d_op_xpu.py @@ -188,8 +188,8 @@ class XPUTestPad3dOp(XPUOpTestWrapper): mode = "constant" value = 100 input_data = np.random.rand(*input_shape).astype(self.dtype) - x = paddle.fluid.data( - name="x", shape=input_shape, dtype=self.dtype + x = paddle.static.data( + name="x", shape=input_shape, dtype="float32" ) result = F.pad( x=x, pad=pad, value=value, mode=mode, data_format="NCDHW" @@ -211,8 +211,8 @@ class XPUTestPad3dOp(XPUOpTestWrapper): pad = [1, 2, 1, 1, 1, 2] mode = "reflect" input_data = np.random.rand(*input_shape).astype(self.dtype) - x = paddle.fluid.data( - name="x", shape=input_shape, dtype=self.dtype + x = paddle.static.data( + name="x", shape=input_shape, dtype="float32" ) result1 = F.pad(x=x, pad=pad, mode=mode, data_format="NCDHW") result2 = F.pad(x=x, pad=pad, mode=mode, data_format="NDHWC") @@ -239,8 +239,8 @@ class XPUTestPad3dOp(XPUOpTestWrapper): pad = [1, 2, 1, 1, 3, 4] mode = "replicate" input_data = np.random.rand(*input_shape).astype(self.dtype) - x = paddle.fluid.data( - name="x", shape=input_shape, dtype=self.dtype + x = paddle.static.data( + name="x", shape=input_shape, dtype="float32" ) result1 = F.pad(x=x, pad=pad, mode=mode, data_format="NCDHW") result2 = F.pad(x=x, pad=pad, mode=mode, data_format="NDHWC") diff --git a/python/paddle/fluid/tests/unittests/xpu/test_prelu_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_prelu_op_xpu.py index 666c29f7fcaa8d521968b0fe092878669af0bd14..043d5436d9fd8b9f82ccf210a3d359aec0a129f8 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_prelu_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_prelu_op_xpu.py @@ -184,7 +184,7 @@ class TestModeError(unittest.TestCase): def test_mode_error(self): main_program = Program() with fluid.program_guard(main_program, Program()): - x = fluid.data(name='x', shape=[2, 3, 4, 5]) + x = paddle.static.data(name='x', shape=[2, 3, 4, 5]) try: y = prelu_t(x, 'any') except Exception as e: @@ -193,7 +193,7 @@ class TestModeError(unittest.TestCase): def test_data_format_error1(self): main_program = Program() with fluid.program_guard(main_program, Program()): - x = fluid.data(name='x', shape=[2, 3, 4, 5]) + x = paddle.static.data(name='x', shape=[2, 3, 4, 5]) try: y = prelu_t(x, 'channel', data_format='N') except Exception as e: @@ -202,7 +202,7 @@ class TestModeError(unittest.TestCase): def test_data_format_error2(self): main_program = Program() with fluid.program_guard(main_program, Program()): - x = fluid.data(name='x', shape=[2, 3, 4, 5]) + x = paddle.static.data(name='x', shape=[2, 3, 4, 5]) try: y = paddle.static.nn.prelu(x, 'channel', data_format='N') except ValueError as e: diff --git a/python/paddle/fluid/tests/unittests/xpu/test_prod_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_prod_op_xpu.py index 369cfb4ac96b8762717df2ba633144a5fa9e4678..1fb907f9f09244a563cfbe10f245dfc6e5491160 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_prod_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_prod_op_xpu.py @@ -74,7 +74,7 @@ class TestProdOp(unittest.TestCase): ) def run_static(self): - input = paddle.fluid.data( + input = paddle.static.data( name='input', shape=[10, 10, 5], dtype='float32' ) result0 = paddle.prod(input) @@ -146,8 +146,8 @@ class TestProdOpError(unittest.TestCase): with paddle.static.program_guard( paddle.static.Program(), paddle.static.Program() ): - x = paddle.fluid.data(name='x', shape=[2, 2, 4], dtype='float32') - bool_x = paddle.fluid.data( + x = paddle.static.data(name='x', shape=[2, 2, 4], dtype='float32') + bool_x = paddle.static.data( name='bool_x', shape=[2, 2, 4], dtype='bool' ) # The argument x shoule be a Tensor diff --git a/python/paddle/fluid/tests/unittests/xpu/test_sum_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_sum_op_xpu.py index 285d7cbfb007d177909166d6883ac51609f1d4c6..99d7091ca41fcc0f5ee5f2f63eda035639a58b00 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_sum_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_sum_op_xpu.py @@ -135,14 +135,14 @@ class TestRaiseSumError(unittest.TestCase): self.assertRaises(TypeError, test_type) def test_dtype(): - data1 = fluid.data(name="input1", shape=[10], dtype="int8") - data2 = fluid.data(name="input2", shape=[10], dtype="int8") + data1 = paddle.static.data(name="input1", shape=[10], dtype="int8") + data2 = paddle.static.data(name="input2", shape=[10], dtype="int8") paddle.add_n([data1, data2]) self.assertRaises(TypeError, test_dtype) def test_dtype1(): - data1 = fluid.data(name="input1", shape=[10], dtype="int8") + data1 = paddle.static.data(name="input1", shape=[10], dtype="int8") paddle.add_n(data1) self.assertRaises(TypeError, test_dtype1) @@ -156,30 +156,38 @@ class TestRaiseSumsError(unittest.TestCase): self.assertRaises(TypeError, test_type) def test_dtype(): - data1 = fluid.data(name="input1", shape=[10], dtype="int8") - data2 = fluid.data(name="input2", shape=[10], dtype="int8") + data1 = paddle.static.data(name="input1", shape=[10], dtype="int8") + data2 = paddle.static.data(name="input2", shape=[10], dtype="int8") paddle.add_n([data1, data2]) self.assertRaises(TypeError, test_dtype) def test_dtype1(): - data1 = fluid.data(name="input1", shape=[10], dtype="int8") + data1 = paddle.static.data(name="input1", shape=[10], dtype="int8") paddle.add_n(data1) self.assertRaises(TypeError, test_dtype1) def test_out_type(): - data1 = fluid.data(name="input1", shape=[10], dtype="flaot32") - data2 = fluid.data(name="input2", shape=[10], dtype="float32") + data1 = paddle.static.data( + name="input1", shape=[10], dtype="flaot32" + ) + data2 = paddle.static.data( + name="input2", shape=[10], dtype="float32" + ) out = [10] out = paddle.add_n([data1, data2]) self.assertRaises(TypeError, test_out_type) def test_out_dtype(): - data1 = fluid.data(name="input1", shape=[10], dtype="flaot32") - data2 = fluid.data(name="input2", shape=[10], dtype="float32") - out = fluid.data(name="out", shape=[10], dtype="int8") + data1 = paddle.static.data( + name="input1", shape=[10], dtype="flaot32" + ) + data2 = paddle.static.data( + name="input2", shape=[10], dtype="float32" + ) + out = paddle.static.data(name="out", shape=[10], dtype="int8") out = paddle.add_n([data1, data2]) self.assertRaises(TypeError, test_out_dtype) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_tril_triu_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_tril_triu_op_xpu.py index 85dbdeeadea991a9e143363c844613d81c38a7a0..d66e75c523884b0740a5dc09466ee6a85f852f27 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_tril_triu_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_tril_triu_op_xpu.py @@ -25,7 +25,6 @@ from xpu.get_test_cover_info import ( ) import paddle -import paddle.fluid as fluid import paddle.tensor as tensor paddle.enable_static() @@ -136,7 +135,7 @@ class XPUTestTrilTriuOp(XPUOpTestWrapper): class TestTrilTriuOpError(unittest.TestCase): def test_errors1(self): paddle.enable_static() - data = fluid.data(shape=(20, 22), dtype='float32', name="data1") + data = paddle.static.data(shape=(20, 22), dtype='float32', name="data1") op_type = np.random.choice(['triu', 'tril']) errmsg = { "diagonal: TypeError": "diagonal in {} must be a python Int".format( @@ -151,7 +150,7 @@ class TestTrilTriuOpError(unittest.TestCase): def test_errors2(self): paddle.enable_static() - data = fluid.data(shape=(200,), dtype='float32', name="data2") + data = paddle.static.data(shape=(200,), dtype='float32', name="data2") op_type = np.random.choice(['triu', 'tril']) errmsg = { "input: ValueError": "x shape in {} must be at least 2-D".format( diff --git a/python/paddle/fluid/tests/unittests/xpu/test_unbind_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_unbind_op_xpu.py index 74592c10245f35b7cbf2c368a93330b0bc2c2eac..49c72b671953393a0e500c0bf4537673aed61d48 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_unbind_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_unbind_op_xpu.py @@ -42,10 +42,10 @@ class XPUTestUnbindOP(XPUOpTestWrapper): def test_unbind(self): self.dtype = self.in_type self.place = paddle.XPUPlace(0) - x_1 = fluid.data(shape=[2, 3], dtype=self.dtype, name='x_1') + x_1 = paddle.static.data(shape=[2, 3], dtype=self.dtype, name='x_1') [out_0, out_1] = tensor.unbind(input=x_1, axis=0) input_1 = np.random.random([2, 3]).astype(self.dtype) - axis = fluid.data(shape=[1], dtype='int32', name='axis') + axis = paddle.static.data(shape=[1], dtype='int32', name='axis') exe = fluid.Executor(place=self.place) [res_1, res_2] = exe.run( @@ -82,10 +82,10 @@ class XPUTestUnbindOP(XPUOpTestWrapper): def test_layers_unbind(self): self.dtype = self.in_type self.place = paddle.XPUPlace(0) - x_1 = fluid.data(shape=[2, 3], dtype=self.dtype, name='x_1') + x_1 = paddle.static.data(shape=[2, 3], dtype=self.dtype, name='x_1') [out_0, out_1] = paddle.unbind(input=x_1, axis=0) input_1 = np.random.random([2, 3]).astype(self.dtype) - axis = fluid.data(shape=[1], dtype='int32', name='axis') + axis = paddle.static.data(shape=[1], dtype='int32', name='axis') exe = fluid.Executor(place=self.place) [res_1, res_2] = exe.run( @@ -194,7 +194,7 @@ class XPUTestUnbindOP(XPUOpTestWrapper): with program_guard(Program(), Program()): self.dtype = self.in_type self.place = paddle.XPUPlace(0) - x = fluid.data(shape=[2, 3], dtype=self.dtype, name='x') + x = paddle.static.data(shape=[2, 3], dtype=self.dtype, name='x') def test_table_Variable(): tensor.unbind(input=x, axis=2.0) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_update_loss_scaling_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_update_loss_scaling_op_xpu.py index 44d79efc97bfd1c66ce3cbaaa53b8354cd65712b..e8a1faff3327c50f5c075cd6bf3852c3c42a5bed 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_update_loss_scaling_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_update_loss_scaling_op_xpu.py @@ -111,17 +111,21 @@ class XPUTestUpdateLossScalingOp(XPUOpTestWrapper): class TestUpdateLossScalingLayer(unittest.TestCase): def loss_scaling_check(self, scope=fluid.Scope()): - a = fluid.data(name="a", shape=[1024, 1024], dtype='float32') - b = fluid.data(name="b", shape=[512, 128], dtype='float32') + a = paddle.static.data( + name="a", shape=[1024, 1024], dtype='float32' + ) + b = paddle.static.data(name="b", shape=[512, 128], dtype='float32') x = [a, b] - found_inf = fluid.data(name="found_inf", shape=[1], dtype='bool') - prev_loss_scaling = fluid.data( + found_inf = paddle.static.data( + name="found_inf", shape=[1], dtype='bool' + ) + prev_loss_scaling = paddle.static.data( name="prev_loss_scaling", shape=[1], dtype='float32' ) - num_good_steps = fluid.data( + num_good_steps = paddle.static.data( name="num_good_steps", shape=[1], dtype='int32' ) - num_bad_steps = fluid.data( + num_bad_steps = paddle.static.data( name="num_bad_steps", shape=[1], dtype='int32' ) @@ -182,17 +186,21 @@ class XPUTestUpdateLossScalingOp(XPUOpTestWrapper): assert np.array_equal(result_v[7], np.zeros_like(num_bad_steps_v)) def loss_scaling_check_inf(self, use_cuda=True, scope=fluid.Scope()): - a = fluid.data(name="a", shape=[1024, 1024], dtype='float32') - b = fluid.data(name="b", shape=[512, 128], dtype='float32') + a = paddle.static.data( + name="a", shape=[1024, 1024], dtype='float32' + ) + b = paddle.static.data(name="b", shape=[512, 128], dtype='float32') x = [a, b] - found_inf = fluid.data(name="found_inf", shape=[1], dtype='bool') - prev_loss_scaling = fluid.data( + found_inf = paddle.static.data( + name="found_inf", shape=[1], dtype='bool' + ) + prev_loss_scaling = paddle.static.data( name="prev_loss_scaling", shape=[1], dtype='float32' ) - num_good_steps = fluid.data( + num_good_steps = paddle.static.data( name="num_good_steps", shape=[1], dtype='int32' ) - num_bad_steps = fluid.data( + num_bad_steps = paddle.static.data( name="num_bad_steps", shape=[1], dtype='int32' ) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_warpctc_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_warpctc_op_xpu.py index 1a5866cc3d19e29cc8bee6cc49c504f276391cba..3cda4eaac1840b50db5d1ecfb00ede324124bacf 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_warpctc_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_warpctc_op_xpu.py @@ -27,7 +27,6 @@ from xpu.get_test_cover_info import ( ) import paddle -import paddle.fluid as fluid import paddle.nn.functional as F from paddle.fluid import Program, program_guard @@ -341,14 +340,16 @@ class XPUTestWarpCTCOp(XPUOpTestWrapper): self.dtype = self.in_type self.place = paddle.XPUPlace(0) with program_guard(Program(), Program()): - logits = fluid.data( + logits = paddle.static.data( name='logits', shape=[5, 16, 6], dtype=self.dtype ) - logits_length = fluid.data( + logits_length = paddle.static.data( name='logits_length', shape=[None], dtype='int64' ) - label = fluid.data(name='label', shape=[16, 3], dtype='int32') - label_length = fluid.data( + label = paddle.static.data( + name='label', shape=[16, 3], dtype='int32' + ) + label_length = paddle.static.data( name='labels_length', shape=[None], dtype='int64' ) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_where_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_where_op_xpu.py index 77b0e3d202e8b53b183d2afc7cb9f27028d291c4..06316d5a4cd1b8c1c4188a886b1ee431f4addb16 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_where_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_where_op_xpu.py @@ -106,11 +106,15 @@ class TestXPUWhereAPI(unittest.TestCase): train_prog = fluid.Program() startup = fluid.Program() with fluid.program_guard(train_prog, startup): - cond = fluid.data( + cond = paddle.static.data( name='cond', shape=self.shape, dtype='bool' ) - x = fluid.data(name='x', shape=self.shape, dtype='float32') - y = fluid.data(name='y', shape=self.shape, dtype='float32') + x = paddle.static.data( + name='x', shape=self.shape, dtype='float32' + ) + y = paddle.static.data( + name='y', shape=self.shape, dtype='float32' + ) x.stop_gradient = x_stop_gradient y.stop_gradient = y_stop_gradient diff --git a/python/paddle/jit/dy2static/program_translator.py b/python/paddle/jit/dy2static/program_translator.py index e6bdf04f1373f27345d1afd9bd1fd5827ef5cf2a..dc47184f6516f9090783b96f21d56cf6f7fa67cc 100644 --- a/python/paddle/jit/dy2static/program_translator.py +++ b/python/paddle/jit/dy2static/program_translator.py @@ -993,7 +993,7 @@ class ConcreteProgram: with framework.program_guard(main_program, startup_program): with _switch_declarative_mode_guard_(is_declarative=True): - # 1. Adds `fluid.data` layers for input if needed + # 1. Adds `paddle.static.data` layers for input if needed static_inputs = func_spec.to_static_inputs_with_spec( input_spec, main_program ) diff --git a/python/paddle/nn/clip.py b/python/paddle/nn/clip.py index 4b7e6215b2d3bdc230e88211ce8c35cf1466749e..d0274b4eb9178cf60c32dea37ec2205863acf488 100644 --- a/python/paddle/nn/clip.py +++ b/python/paddle/nn/clip.py @@ -969,7 +969,7 @@ def set_gradient_clip(clip, param_list=None, program=None): paddle.enable_static() def network(): - image = fluid.data(name='image', shape=[ + image = paddle.static.data(name='image', shape=[ None, 28], dtype='float32') param_attr1 = fluid.ParamAttr("fc1_param") fc1 = fluid.layers.fc(image, size=10, param_attr=param_attr1) diff --git a/python/paddle/reader/decorator.py b/python/paddle/reader/decorator.py index 61e474c857f23a06bda70bfb879e3dcd03beae57..5902c88130e282d7afd077ed8555e14627e86d69 100644 --- a/python/paddle/reader/decorator.py +++ b/python/paddle/reader/decorator.py @@ -527,7 +527,7 @@ def multiprocess_reader(readers, use_pipe=True, queue_size=1000): Example: .. code-block:: python - + import paddle import paddle.fluid as fluid from paddle.fluid.io import multiprocess_reader import numpy as np @@ -556,7 +556,7 @@ def multiprocess_reader(readers, use_pipe=True, queue_size=1000): with fluid.program_guard(fluid.Program(), fluid.Program()): place = fluid.CPUPlace() # the 1st 2 is batch size - image = fluid.data(name='image', dtype='int64', shape=[2, 1, 2]) + image = paddle.static.data(name='image', dtype='int64', shape=[2, 1, 2]) fluid.layers.Print(image) # print detailed tensor info of image variable diff --git a/python/paddle/static/io.py b/python/paddle/static/io.py index 3227dc6c67dd39c7c7ef30dd6d02e6b62fa1a3be..45143b4ad956a75e1588a37b7ecfd285e8cce34e 100644 --- a/python/paddle/static/io.py +++ b/python/paddle/static/io.py @@ -1721,7 +1721,7 @@ def get_program_persistable_vars(program): import paddle.static.io as io import paddle.fluid as fluid paddle.enable_static() - data = fluid.data(name="img", shape=[64, 784]) + data = paddle.static.data(name="img", shape=[64, 784]) w = paddle.create_parameter(shape=[784, 200], dtype='float32', name='fc_w') b = paddle.create_parameter(shape=[200], dtype='float32', name='fc_b') list_para = io.get_program_persistable_vars( fluid.default_main_program() ) diff --git a/python/paddle/tensor/attribute.py b/python/paddle/tensor/attribute.py index c79c9553c2f0802db750d75c802877cdcbd2a2c6..679bf3d811847a3dbf7c9a00f8d79eb71d0d89d6 100644 --- a/python/paddle/tensor/attribute.py +++ b/python/paddle/tensor/attribute.py @@ -92,7 +92,7 @@ def shape(input): import paddle paddle.enable_static() - inputs = fluid.data(name="x", shape=[3, 100, 100], dtype="float32") + inputs = paddle.static.data(name="x", shape=[3, 100, 100], dtype="float32") output = paddle.shape(inputs) exe = fluid.Executor(fluid.CPUPlace()) diff --git a/python/paddle/tensor/random.py b/python/paddle/tensor/random.py index e3fc96f3854002c3abd7575c0f11250dfd96c91f..a556a66fc15dc77ea77a7408691641e16d6952fb 100644 --- a/python/paddle/tensor/random.py +++ b/python/paddle/tensor/random.py @@ -258,7 +258,7 @@ def uniform_random_batch_size_like( from paddle.tensor import random paddle.enable_static() # example 1: - input = fluid.data(name="input", shape=[1, 3], dtype='float32') + input = paddle.static.data(name="input", shape=[1, 3], dtype='float32') out_1 = random.uniform_random_batch_size_like(input, [2, 4]) # out_1.shape=[1, 4] # example 2: out_2 = random.uniform_random_batch_size_like(input, [2, 4], input_dim_idx=1, output_dim_idx=1) # out_2.shape=[2, 3] diff --git a/python/paddle/tests/test_metrics.py b/python/paddle/tests/test_metrics.py index c604c7088e9422ee4944b133890a787dc64f8e48..af668217d50e8ec058c655fb6b6556ad2c490a4c 100644 --- a/python/paddle/tests/test_metrics.py +++ b/python/paddle/tests/test_metrics.py @@ -205,10 +205,12 @@ class TestAccuracyStatic(TestAccuracyDynamic): main_prog.random_seed = 1024 startup_prog.random_seed = 1024 with fluid.program_guard(main_prog, startup_prog): - pred = fluid.data( + pred = paddle.static.data( name='pred', shape=[None, self.class_num], dtype='float32' ) - label = fluid.data(name='label', shape=[None, 1], dtype='int64') + label = paddle.static.data( + name='label', shape=[None, 1], dtype='int64' + ) acc = paddle.metric.Accuracy(topk=self.topk, name=self.name) state = acc.compute(pred, label)