未验证 提交 5670644c 编写于 作者: G GGBond8488 提交者: GitHub

Fluid clean remove io data (#49301)

* replace paddle.fluid.layers.data and remove io.data

* partial commit

* partial commit

* partial commit

* partial commit

* partial commit

* partial commit

* remove data in fluid.layers.io.__all__

* fix errors

* fix unitests

* fix unitest

* fix unitests

* fix unitest

* fix unitest

* fix unitests

* fix unitest

* fix test_layers unitests

* fix typro

* fix unitest

* fix unitest

* fix unitest

* fix typro

* fix unitest test_model_cast_to_bf16

* fix test_reducescatter

* fix collective unitest

* fix collective unitests

* fix collective unitests

* add coverage

* fix add layers.data

* re run ci

* fix some typro

* fix samplecode error

* fix samplecode error
上级 64b3f2f6
......@@ -98,10 +98,12 @@ def fused_embedding_seq_pool(
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
import paddle
paddle.enable_static()
dict_size = 20
data_t = fluid.layers.data(
name='word', shape=[1], dtype='int64', lod_level=1)
data_t = paddle.static.data(
name='word', shape=[-1, 1], dtype='int64', lod_level=1)
padding_idx = np.random.randint(1, 10)
out = fluid.contrib.fused_embedding_seq_pool(
input=data_t,
......@@ -305,11 +307,13 @@ def multiclass_nms2(
import paddle.fluid as fluid
boxes = fluid.layers.data(name='bboxes', shape=[81, 4],
import paddle
paddle.enable_static()
boxes = paddle.static.data(name='bboxes', shape=[-1, 81, 4],
dtype='float32', lod_level=1)
scores = fluid.layers.data(name='scores', shape=[81],
scores = paddle.static.data(name='scores', shape=[-1, 81],
dtype='float32', lod_level=1)
out, index = fluid.layers.multiclass_nms2(bboxes=boxes,
out, index = fluid.contrib.layers.multiclass_nms2(bboxes=boxes,
scores=scores,
background_label=0,
score_threshold=0.5,
......@@ -501,7 +505,9 @@ def shuffle_batch(x, seed=None):
.. code-block:: python
import paddle.fluid as fluid
x = fluid.layers.data(name="x", shape=[-1, 4])
import paddle
paddle.enable_static()
x = paddle.static.data(name="x", shape=[-1, 4])
out = fluid.contrib.layers.shuffle_batch(x)
"""
helper = LayerHelper('shuffle_batch', **locals())
......@@ -1313,7 +1319,7 @@ def _pull_box_extended_sparse(input, size, extend_size=64, dtype='float32'):
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.layers.data(name='sequence', shape=[1], dtype='int64', lod_level=1)
data = paddle.static.data(name='sequence', shape=[-1, 1], dtype='int64', lod_level=1)
emb, emb_ex = fluid.contrib.layers._pull_box_extended_sparse(input=data, size=8, extend_size=128)
"""
helper = LayerHelper('pull_box_extended_sparse', **locals())
......@@ -1438,15 +1444,14 @@ def correlation(
.. code-block:: python
import paddle.fluid as fluid
x1 = fluid.layers.data(name='x1',
shape=x_shape,
dtype=x_type,
append_batch_size=False)
x2 = fluid.layers.data(name='x2',
shape=x_shape,
dtype=x_type,
append_batch_size=False)
import paddle
paddle.enable_static()
x1 = paddle.static.data(name='x1',
shape=[2,3,4,5],
dtype="float32")
x2 = paddle.static.data(name='x2',
shape=[2,3,4,5],
dtype="float32")
out = fluid.contrib.correlation(
......@@ -1555,8 +1560,8 @@ def fused_bn_add_act(
# required: gpu
def build_program(main_program, startup_program):
with fluid.program_guard(main_program, startup_program):
x = fluid.layers.data(name='x', shape=[1, 28, 28], dtype='float32')
y = fluid.layers.data(name="y", shape=[1], dtype='int64')
x = paddle.static.data(name='x', shape=[-1, 1, 28, 28], dtype='float32')
y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64')
conv1_1 = paddle.static.nn.conv2d(
input=x,
filter_size=3,
......
......@@ -85,20 +85,20 @@ class TestCorrelationOp(unittest.TestCase):
np.set_printoptions(threshold=np.inf)
x_shape = (2, 10, 3, 3)
x_type = 'float32'
x1 = fluid.layers.data(
x1 = paddle.static.data(
name='x1',
shape=x_shape,
dtype=x_type,
append_batch_size=False,
stop_gradient=False,
)
x2 = fluid.layers.data(
x1.desc.set_need_check_feed(False)
x1.stop_gradient = False
x2 = paddle.static.data(
name='x2',
shape=x_shape,
dtype=x_type,
append_batch_size=False,
stop_gradient=False,
)
x2.desc.set_need_check_feed(False)
x2.stop_gradient = False
x1_np = np.random.randn(2, 3, 4, 5).astype(x_type)
x2_np = np.random.randn(2, 3, 4, 5).astype(x_type)
......
......@@ -110,10 +110,10 @@ def train(net_type, use_cuda, save_dirname, is_local):
train_program.random_seed = 123
startup_prog.random_seed = 456
with fluid.program_guard(train_program, startup_prog):
images = fluid.layers.data(
name='pixel', shape=data_shape, dtype='float32'
images = paddle.static.data(
name='pixel', shape=[-1] + data_shape, dtype='float32'
)
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64')
if net_type == "vgg":
print("train vgg net")
......@@ -444,11 +444,11 @@ class TestAmpWithNonIterableDataLoader(unittest.TestCase):
start_prog = paddle.static.Program()
with paddle.static.program_guard(main_prog, start_prog):
with paddle.fluid.unique_name.guard():
image = fluid.layers.data(
name='image', shape=[3, 224, 224], dtype='float32'
image = paddle.static.data(
name='image', shape=[-1, 3, 224, 224], dtype='float32'
)
label = fluid.layers.data(
name='label', shape=[1], dtype='int64'
label = paddle.static.data(
name='label', shape=[-1, 1], dtype='int64'
)
py_reader = fluid.io.DataLoader.from_generator(
feed_list=[image, label],
......
......@@ -96,14 +96,22 @@ class TestModelCastBF16(unittest.TestCase):
nn_bf16 = amp.bf16.convert_float_to_uint16(nn)
with self.static_graph():
t_bf16 = layers.data(
name='t_bf16', shape=[size, size], dtype=np.int32
t_bf16 = paddle.static.data(
name='t_bf16', shape=[-1, size, size], dtype='int32'
)
tt_bf16 = layers.data(
name='tt_bf16', shape=[size, size], dtype=np.int32
t_bf16.desc.set_need_check_feed(False)
tt_bf16 = paddle.static.data(
name='tt_bf16', shape=[-1, size, size], dtype='int32'
)
t = layers.data(name='t', shape=[size, size], dtype='float32')
tt = layers.data(name='tt', shape=[size, size], dtype='float32')
tt_bf16.desc.set_need_check_feed(False)
t = paddle.static.data(
name='t', shape=[-1, size, size], dtype='float32'
)
t.desc.set_need_check_feed(False)
tt = paddle.static.data(
name='tt', shape=[-1, size, size], dtype='float32'
)
tt.desc.set_need_check_feed(False)
ret = paddle.add(t, tt)
ret = paddle.multiply(ret, t)
......@@ -143,8 +151,14 @@ class TestModelCastBF16(unittest.TestCase):
)
with self.static_graph():
t = layers.data(name='t', shape=[size, size], dtype='float32')
tt = layers.data(name='tt', shape=[size, size], dtype='float32')
t = paddle.static.data(
name='t', shape=[-1, size, size], dtype='float32'
)
t.desc.set_need_check_feed(False)
tt = paddle.static.data(
name='tt', shape=[-1, size, size], dtype='float32'
)
tt.desc.set_need_check_feed(False)
with amp.bf16.bf16_guard():
ret = paddle.add(t, tt)
......
......@@ -102,10 +102,10 @@ def train(use_pure_fp16=True, use_nesterov=False, optimizer=""):
train_program.random_seed = 123
startup_prog.random_seed = 456
with fluid.program_guard(train_program, startup_prog):
images = fluid.layers.data(
name='pixel', shape=data_shape, dtype='float32'
images = paddle.static.data(
name='pixel', shape=[-1] + data_shape, dtype='float32'
)
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64')
net = resnet_cifar10(images)
logits = paddle.static.nn.fc(x=net, size=classdim, activation="softmax")
cost = paddle.nn.functional.softmax_with_cross_entropy(
......@@ -275,11 +275,11 @@ class TestAmpWithNonIterableDataLoader(unittest.TestCase):
start_prog = paddle.static.Program()
with paddle.static.program_guard(main_prog, start_prog):
with paddle.fluid.unique_name.guard():
image = fluid.layers.data(
name='image', shape=[3, 224, 224], dtype='float32'
image = paddle.static.data(
name='image', shape=[-1, 3, 224, 224], dtype='float32'
)
label = fluid.layers.data(
name='label', shape=[1], dtype='int64'
label = paddle.static.data(
name='label', shape=[-1, 1], dtype='int64'
)
py_reader = fluid.io.DataLoader.from_generator(
feed_list=[image, label],
......
......@@ -134,10 +134,12 @@ class TestWeightDecay(unittest.TestCase):
startup_prog = fluid.framework.Program()
with prog_scope_guard(main_prog=main_prog, startup_prog=startup_prog):
data = fluid.layers.data(
name="words", shape=[1], dtype="int64", lod_level=1
data = paddle.static.data(
name="words", shape=[-1, 1], dtype="int64", lod_level=1
)
label = paddle.static.data(
name="label", shape=[-1, 1], dtype="int64"
)
label = fluid.layers.data(name="label", shape=[1], dtype="int64")
avg_cost = model(data, label, self.word_dict_len)
AdamW = fluid.contrib.extend_with_decoupled_weight_decay(
fluid.optimizer.Adam
......@@ -158,10 +160,12 @@ class TestWeightDecay(unittest.TestCase):
startup_prog = fluid.framework.Program()
with prog_scope_guard(main_prog=main_prog, startup_prog=startup_prog):
data = fluid.layers.data(
name="words", shape=[1], dtype="int64", lod_level=1
data = paddle.static.data(
name="words", shape=[-1, 1], dtype="int64", lod_level=1
)
label = paddle.static.data(
name="label", shape=[-1, 1], dtype="int64"
)
label = fluid.layers.data(name="label", shape=[1], dtype="int64")
avg_cost = model(data, label, self.word_dict_len)
......
......@@ -1862,7 +1862,6 @@ class Executor:
vardesc = global_block.desc.find_var(varname.encode())
varobj = global_block.vars[varname]
# Can not check var build by fluid.layers.data(), bucause fluid.layers.data() had not set need_check_feed
if (
vardesc.persistable() == False
and vardesc.type() == core.VarDesc.VarType.LOD_TENSOR
......
......@@ -6957,9 +6957,10 @@ class Parameter(Variable, metaclass=ParameterMetaClass):
.. code-block:: python
import paddle.fluid as fluid
import paddle
prog = fluid.default_main_program()
rlt = fluid.layers.data("fake_data", shape=[1,1], dtype='float32')
rlt = paddle.static.data("fake_data", shape=[-1,1,1], dtype='float32')
debug_str = prog.to_string(throw_on_error=True, with_details=False)
print(debug_str)
"""
......
......@@ -80,26 +80,23 @@ def model():
train_file_path,
) = ctr_dataset_reader.prepare_data()
""" network definition """
dnn_data = fluid.layers.data(
dnn_data = paddle.static.data(
name="dnn_data",
shape=[-1, 1],
dtype="int64",
lod_level=1,
append_batch_size=False,
)
lr_data = fluid.layers.data(
lr_data = paddle.static.data(
name="lr_data",
shape=[-1, 1],
dtype="int64",
lod_level=1,
append_batch_size=False,
)
label = fluid.layers.data(
label = paddle.static.data(
name="click",
shape=[-1, 1],
dtype="int64",
lod_level=0,
append_batch_size=False,
)
datas = [dnn_data, lr_data, label]
......
......@@ -1371,8 +1371,8 @@ class FleetUtil:
local_total_ins.name)
# below is part of example model
label = fluid.layers.data(name="click", shape=[-1, 1],\
dtype="int64", lod_level=0, append_batch_size=False)
label = paddle.static.data(name="click", shape=[-1, 1],\
dtype="int64", lod_level=0)
emb = my_slot_net(slots, label) # emb can be fc layer of size 1
similarity_norm = fluid.layers.sigmoid(paddle.clip(\
emb, min=-15.0, max=15.0), name="similarity_norm")\
......@@ -1571,8 +1571,8 @@ class FleetUtil:
local_total_ins.name)
# below is part of model
label = fluid.layers.data(name="click", shape=[-1, 1],\
dtype="int64", lod_level=0, append_batch_size=False)
label = paddle.static.data(name="click", shape=[-1, 1],\
dtype="int64", lod_level=0)
emb = my_slot_net(slots, label) # emb can be fc layer of size 1
similarity_norm = fluid.layers.sigmoid(paddle.clip(\
emb, min=-15.0, max=15.0), name="similarity_norm")\
......
......@@ -17,6 +17,7 @@ import sys
import logging
import subprocess
import numpy as np
import paddle
from collections import OrderedDict
import paddle.fluid as fluid
from paddle.fluid import core
......@@ -172,8 +173,9 @@ def save_var(np_array, var_name, shape_list, dtype, save_path):
program = fluid.Program()
place = fluid.CPUPlace()
exe = fluid.Executor(place)
shape = list(shape_list)
with fluid.program_guard(program):
d0_data = fluid.layers.data(var_name, shape=shape_list, dtype=dtype)
d0_data = paddle.static.data(var_name, shape=shape, dtype=dtype)
append_save_op(program.global_block(), d0_data, save_path)
exe.run(feed={var_name: np_array}, fetch_list=[])
......@@ -183,7 +185,7 @@ def load_var(var_name, shape_list, dtype, save_path):
place = fluid.CPUPlace()
exe = fluid.Executor(place)
with fluid.program_guard(program):
d0_data = fluid.layers.data(var_name, shape=shape_list, dtype=dtype)
d0_data = paddle.static.data(var_name, shape=shape_list, dtype=dtype)
append_load_op(program.global_block(), d0_data, save_path)
outs = exe.run(feed={}, fetch_list=[d0_data])
return outs
......
......@@ -103,7 +103,7 @@ def run_check():
with unique_name.guard():
build_strategy = compiler.BuildStrategy()
build_strategy.enable_inplace = True
inp = layers.data(name="inp", shape=[2, 2])
inp = paddle.static.data(name="inp", shape=[-1, 2, 2])
simple_layer = SimpleLayer(input_size=2)
out = simple_layer(inp)
exe = executor.Executor(
......@@ -138,9 +138,7 @@ def run_check():
with executor.scope_guard(scope):
with program_guard(train_prog, startup_prog):
with unique_name.guard():
inp0 = layers.data(
name="inp", shape=[2, 2], append_batch_size=False
)
inp0 = paddle.static.data(name="inp", shape=[2, 2])
simple_layer0 = SimpleLayer(input_size=2)
out0 = simple_layer0(inp0)
param_grads = backward.append_backward(
......
......@@ -355,7 +355,7 @@ def save_vars(
main_prog = fluid.Program()
startup_prog = fluid.Program()
with fluid.program_guard(main_prog, startup_prog):
data = fluid.layers.data(name="img", shape=[64, 784], append_batch_size=False)
data = paddle.static.data(name="img", shape=[64, 784])
w = paddle.create_parameter(shape=[784, 200], dtype='float32', name='fc_w')
b = paddle.create_parameter(shape=[200], dtype='float32', name='fc_b')
hidden_w = paddle.matmul(x=data, y=w)
......@@ -830,7 +830,7 @@ def load_vars(
main_prog = fluid.Program()
startup_prog = fluid.Program()
with fluid.program_guard(main_prog, startup_prog):
data = fluid.layers.data(name="img", shape=[64, 784], append_batch_size=False)
data = paddle.static.data(name="img", shape=[64, 784])
w = paddle.create_parameter(shape=[784, 200], dtype='float32', name='fc_w')
b = paddle.create_parameter(shape=[200], dtype='float32', name='fc_b')
hidden_w = paddle.matmul(x=data, y=w)
......@@ -1598,7 +1598,7 @@ def load_inference_model(
main_prog = fluid.Program()
startup_prog = fluid.Program()
with fluid.program_guard(main_prog, startup_prog):
data = fluid.layers.data(name="img", shape=[64, 784], append_batch_size=False)
data = paddle.static.data(name="img", shape=[-1, 64, 784])
w = paddle.create_parameter(shape=[784, 200], dtype='float32')
b = paddle.create_parameter(shape=[200], dtype='float32')
hidden_w = paddle.matmul(x=data, y=w)
......
......@@ -466,7 +466,7 @@ class StaticRNN:
is_sparse=False)
# transform batch size to dim 1
x_emb = paddle.transpose(x_emb, perm=[1, 0, 2])
boot_memory = fluid.layers.data(name='boot', shape=[hidden_size], dtype='float32', lod_level=1)
boot_memory = paddle.static.data(name='boot', shape=[-1, hidden_size], dtype='float32', lod_level=1)
rnn = fluid.layers.StaticRNN()
with rnn.step():
# mark created x_emb as input, each step process a word
......
......@@ -41,108 +41,7 @@ from ..framework import (
_set_expected_place,
)
__all__ = [
'data',
]
@static_only
def data(
name,
shape,
append_batch_size=True,
dtype='float32',
lod_level=0,
type=core.VarDesc.VarType.LOD_TENSOR,
stop_gradient=True,
):
"""
**Data Layer**
This operator creates the global variable. The global variables can be
accessed by all the following operators in the graph.
Note:
:code:`paddle.fluid.layers.data` is deprecated as it will be removed in
a later version. Please use :code:`paddle.fluid.data` .
This :code:`paddle.fluid.layers.data` set shape and dtype at compile
time but does NOT check the shape or the dtype of fed data, the
:code:`paddle.fluid.data` checks the shape and the dtype of data fed
by Executor or ParallelExecutor during run time.
To feed variable size inputs, users can feed variable size inputs
directly to this :code:`paddle.fluid.layers.data` and PaddlePaddle will
fit the size accordingly. Or set -1 on the variable dimension when using
:code:`paddle.fluid.data` .
The default :code:`stop_gradient` attribute of the Variable created by
this API is true, which means the gradient won't be passed backward
through the data Varaible. Set :code:`var.stop_gradient = False` If
user would like to pass backward gradient.
Args:
name(str): The name/alias of the variable, see :ref:`api_guide_Name`
for more details.
shape(list|tuple): Tuple declaring the shape. If :code:`append_batch_size` is
True and there is no -1 inside :code:`shape`, it should be
considered as the shape of the each sample. Otherwise, it should
be considered as the shape of the batched data.
append_batch_size(bool):
1. If true, it prepends -1 to the shape.
For example if shape=[1], the resulting shape is [-1, 1]. This will
be useful to set different batch size at run time.
2. If shape contains -1, such as shape=[1, -1].
append_batch_size will be enforced to be be False (ineffective)
because PaddlePaddle cannot set more than 1 unknown number on the
shape.
dtype(np.dtype|VarType|str): The type of the data. Supported dtype: bool,
float16, float32, float64, int8, int16, int32, int64, uint8.
type(VarType): The output type. Supported dtype: VarType.LOD_TENSOR,
VarType.SELECTED_ROWS, VarType.NCCL_ID. Default: VarType.LOD_TENSOR.
lod_level(int): The LoD Level. 0 means the input data is not a sequence.
Default: 0.
stop_gradient(bool): A boolean that mentions whether gradient should flow.
Default: True.
Returns:
The global variable that gives access to the data.
Return Type:
Variable
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.layers.data(name='x', shape=[784], dtype='float32')
"""
helper = LayerHelper('data', **locals())
check_type(name, 'name', (bytes, str), 'data')
check_type(shape, 'shape', (list, tuple), 'data')
shape = list(shape)
for i in range(len(shape)):
if shape[i] is None:
shape[i] = -1
append_batch_size = False
elif shape[i] < 0:
append_batch_size = False
if append_batch_size:
shape = [-1] + shape # append batch size as -1
data_var = helper.create_global_variable(
name=name,
shape=shape,
dtype=dtype,
type=type,
stop_gradient=stop_gradient,
lod_level=lod_level,
is_data=True,
)
return data_var
__all__ = []
class BlockGuardServ(BlockGuard):
......@@ -189,11 +88,10 @@ class ListenAndServ:
serv = layers.ListenAndServ(
"127.0.0.1:6170", ["X"], optimizer_mode=False)
with serv.do():
x = layers.data(
x = paddle.static.data(
shape=[32, 32],
dtype='float32',
name="X",
append_batch_size=False)
name="X")
fluid.initializer.Constant(value=1.0)(x, main.global_block())
paddle.scale(x=x, scale=10.0, out=out_var)
......
......@@ -329,7 +329,7 @@ def _pull_sparse(
.. code-block:: python
import paddle.fluid as fluid
data = fluid.layers.data(name='sequence', shape=[1], dtype='int64', lod_level=1)
data = paddle.static.data(name='sequence', shape=[-1, 1], dtype='int64', lod_level=1)
emb = fluid.layers.nn._pull_sparse(
input=data, size=11, table_id=0, accessor_class="DownpourCtrAccessor")
"""
......@@ -403,7 +403,7 @@ def _pull_sparse_v2(
.. code-block:: python
import paddle.fluid as fluid
data = fluid.layers.data(name='sequence', shape=[1], dtype='int64', lod_level=1)
data = paddle.static.data(name='sequence', shape=[-1, 1], dtype='int64', lod_level=1)
emb = fluid.layers.nn._pull_sparse_v2(
input=data, size=11, table_id=0, accessor_class="DownpourCtrAccessor")
"""
......@@ -464,9 +464,9 @@ def _pull_gpups_sparse(
import paddle.fluid as fluid
slots = []
data_1 = fluid.layers.data(name='sequence', shape=[1], dtype='int64', lod_level=1)
data_1 = paddle.static.data(name='sequence', shape=[-1,1], dtype='int64', lod_level=1)
slots.append(data_1)
data_2 = fluid.layers.data(name='sequence', shape=[1], dtype='int64', lod_level=1)
data_2 = paddle.static.data(name='sequence', shape=[-1,1], dtype='int64', lod_level=1)
slots.append(data_2)
embs = fluid.layers.pull_gpups_sparse(input=slots, size=[11, 35])
"""
......@@ -526,7 +526,7 @@ def _pull_box_sparse(
.. code-block:: python
import paddle.fluid as fluid
data = fluid.layers.data(name='sequence', shape=[1], dtype='int64', lod_level=1)
data = paddle.static.data(name='sequence', shape=[-1,1], dtype='int64', lod_level=1)
emb = fluid.layers.pull_box_sparse(input=data, size=[11])
"""
helper = LayerHelper('pull_box_sparse', **locals())
......@@ -711,7 +711,7 @@ def unsqueeze(input, axes, name=None):
.. code-block:: python
import paddle.fluid as fluid
x = fluid.layers.data(name='x', shape=[5, 10])
x = paddle.static.data(name='x', shape=[-1, 5, 10], dtype="float32")
y = fluid.layers.unsqueeze(input=x, axes=[1])
"""
......
......@@ -1431,8 +1431,8 @@ class SGDOptimizer(Optimizer):
place = fluid.CPUPlace()
main = fluid.Program()
with fluid.program_guard(main):
x = fluid.layers.data(name='x', shape=[13], dtype='float32')
y = fluid.layers.data(name='y', shape=[1], dtype='float32')
x = paddle.static.data(name='x', shape=[-1, 13], dtype='float32')
y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32')
y_predict = paddle.static.nn.fc(x, size=1, activation=None)
cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y)
avg_cost = paddle.mean(cost)
......@@ -1623,8 +1623,8 @@ class MomentumOptimizer(Optimizer):
place = fluid.CPUPlace()
main = fluid.Program()
with fluid.program_guard(main):
x = fluid.layers.data(name='x', shape=[13], dtype='float32')
y = fluid.layers.data(name='y', shape=[1], dtype='float32')
x = paddle.static.data(name='x', shape=[-1, 13], dtype='float32')
y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32')
y_predict = paddle.static.nn.fc(x, size=1, activation=None)
cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y)
avg_cost = paddle.mean(cost)
......@@ -1772,8 +1772,8 @@ class LarsMomentumOptimizer(Optimizer):
paddle.enable_static()
np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)
inp = fluid.layers.data(
name="inp", shape=[2, 2], append_batch_size=False)
inp = paddle.static.data(
name="inp", shape=[2, 2], dtype='float32')
out = paddle.static.nn.fc(inp, size=3)
out = paddle.sum(out)
optimizer = fluid.optimizer.LarsMomentumOptimizer(learning_rate=0.001, momentum=0.9)
......@@ -2764,7 +2764,7 @@ class DpsgdOptimizer(Optimizer):
train_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(train_program, startup_program):
data = fluid.layers.data(name='X', shape=[1], dtype='float32')
data = paddle.static.data(name='X', shape=[-1,1], dtype='float32')
hidden = paddle.static.nn.fc(x=data, size=10)
loss = paddle.mean(hidden)
optimizer = fluid.optimizer.Dpsgd(learning_rate=0.01, clip=10.0, batch_size=16.0, sigma=1.0)
......@@ -3217,8 +3217,8 @@ class RMSPropOptimizer(Optimizer):
place = fluid.CPUPlace()
main = fluid.Program()
with fluid.program_guard(main):
x = fluid.layers.data(name='x', shape=[13], dtype='float32')
y = fluid.layers.data(name='y', shape=[1], dtype='float32')
x = paddle.static.data(name='x', shape=[-1, 13], dtype='float32')
y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32')
y_predict = paddle.static.nn.fc(x, size=1, activation=None)
cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y)
avg_cost = paddle.mean(cost)
......@@ -3415,8 +3415,8 @@ class FtrlOptimizer(Optimizer):
place = fluid.CPUPlace()
main = fluid.Program()
with fluid.program_guard(main):
x = fluid.layers.data(name='x', shape=[13], dtype='float32')
y = fluid.layers.data(name='y', shape=[1], dtype='float32')
x = paddle.static.data(name='x', shape=[-1, 13], dtype='float32')
y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32')
y_predict = paddle.static.nn.fc(x, size=1, activation=None)
cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y)
avg_cost = paddle.mean(cost)
......@@ -4354,11 +4354,12 @@ class PipelineOptimizer:
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import numpy as np
paddle.enable_static()
with fluid.device_guard("gpu:0"):
x = fluid.layers.data(name='x', shape=[1], dtype='int64', lod_level=0)
y = fluid.layers.data(name='y', shape=[1], dtype='int64', lod_level=0)
x = paddle.static.data(name='x', shape=[-1, 1], dtype='int64', lod_level=0)
y = paddle.static.data(name='y', shape=[-1, 1], dtype='int64', lod_level=0)
data_loader = fluid.io.DataLoader.from_generator(
feed_list=[x, y],
capacity=64,
......@@ -6332,8 +6333,8 @@ class RecomputeOptimizer(Optimizer):
)
sum_cost = paddle.mean(cost)
return sum_cost, fc_1, prediction
input_x = fluid.layers.data(name="x", shape=[32], dtype='float32')
input_y = fluid.layers.data(name="y", shape=[1], dtype='int64')
input_x = paddle.static.data(name="x", shape=[-1,32], dtype='float32')
input_y = paddle.static.data(name="y", shape=[-1,1], dtype='int64')
cost, fc_1, pred = mlp(input_x, input_y)
sgd = fluid.optimizer.Adam(learning_rate=0.01)
......@@ -6410,8 +6411,8 @@ class RecomputeOptimizer(Optimizer):
sum_cost = paddle.mean(cost)
return sum_cost, fc_1, prediction
input_x = fluid.layers.data(name="x", shape=[32], dtype='float32')
input_y = fluid.layers.data(name="y", shape=[1], dtype='int64')
input_x = paddle.static.data(name="x", shape=[-1,32], dtype='float32')
input_y = paddle.static.data(name="y", shape=[-1,1], dtype='int64')
cost, fc_1, pred = mlp(input_x, input_y)
print("Finished FF")
......@@ -6458,8 +6459,8 @@ class RecomputeOptimizer(Optimizer):
return sum_cost, fc_1, prediction
input_x = fluid.layers.data(name="x", shape=[32], dtype='float32')
input_y = fluid.layers.data(name="y", shape=[1], dtype='int64')
input_x = paddle.static.data(name="x", shape=[-1,32], dtype='float32')
input_y = paddle.static.data(name="y", shape=[-1,1], dtype='int64')
cost, fc_1, pred = mlp(input_x, input_y)
print("Finished FF")
......@@ -6952,8 +6953,8 @@ class RecomputeOptimizer(Optimizer):
return sum_cost, fc_1, prediction
input_x = fluid.layers.data(name="x", shape=[32], dtype='float32')
input_y = fluid.layers.data(name="y", shape=[1], dtype='int64')
input_x = paddle.static.data(name="x", shape=[-1,32], dtype='float32')
input_y = paddle.static.data(name="y", shape=[-1,1], dtype='int64')
cost, fc_1, pred = mlp(input_x, input_y)
print("Finished FF")
......@@ -7033,8 +7034,8 @@ class RecomputeOptimizer(Optimizer):
sum_cost = paddle.mean(cost)
return sum_cost, fc_1, prediction
input_x = fluid.layers.data(name="x", shape=[32], dtype='float32')
input_y = fluid.layers.data(name="y", shape=[1], dtype='int64')
input_x = paddle.static.data(name="x", shape=[-1,32], dtype='float32')
input_y = paddle.static.data(name="y", shape=[-1,1], dtype='int64')
cost, fc_1, pred = mlp(input_x, input_y)
print("Finished FF")
......@@ -7120,8 +7121,8 @@ class LookaheadOptimizer:
paddle.enable_static()
x = fluid.layers.data(name='x', shape=[2], dtype='float32')
label = fluid.layers.data(name="label", shape=[1], dtype="int64")
x = paddle.static.data(name='x', shape=[-1,2], dtype='float32')
label = paddle.static.data(name="label", shape=[-1,1], dtype="int64")
y = paddle.static.nn.fc(x=[x], size=2, activation="softmax")
loss = paddle.nn.functional.cross_entropy(
input=y, label=label,
......@@ -7311,8 +7312,8 @@ class GradientMergeOptimizer:
sum_cost = paddle.mean(cost)
return sum_cost, fc_1, prediction
input_x = fluid.layers.data(name="x", shape=[32], dtype='float32')
input_y = fluid.layers.data(name="y", shape=[1], dtype='int64')
input_x = paddle.static.data(name="x", shape=[-1,32], dtype='float32')
input_y = paddle.static.data(name="y", shape=[-1,1], dtype='int64')
cost, fc_1, pred = mlp(input_x, input_y)
sgd = fluid.optimizer.Adam(learning_rate=0.01)
sgd = fluid.optimizer.GradientMergeOptimizer(sgd, k_steps=4, avg=True)
......
......@@ -1643,7 +1643,7 @@ class PyReader(DataLoaderBase):
Args:
feed_list (list(Variable)|tuple(Variable)): feed variable list.
The variables should be created by :code:`fluid.layers.data()`.
The variables should be created by :code:`paddle.static.data()`.
capacity (int): capacity of the queue maintained in PyReader.
The unit is batch number. Set larger capacity if your reader
is fast.
......
......@@ -74,8 +74,8 @@ class L2DecayRegularizer(WeightDecayRegularizer):
main_prog = fluid.Program()
startup_prog = fluid.Program()
with fluid.program_guard(main_prog, startup_prog):
data = fluid.layers.data(name='image', shape=[3, 28, 28], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
data = paddle.static.data(name='image', shape=[-1, 3, 28, 28], dtype='float32')
label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64')
hidden = paddle.static.nn.fc(x=data, size=128, activation='relu')
prediction = paddle.static.nn.fc(x=hidden, size=10, activation='softmax')
loss = paddle.nn.functional.cross_entropy(
......@@ -193,8 +193,8 @@ class L1DecayRegularizer(WeightDecayRegularizer):
main_prog = fluid.Program()
startup_prog = fluid.Program()
with fluid.program_guard(main_prog, startup_prog):
data = fluid.layers.data(name='image', shape=[3, 28, 28], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
data = paddle.static.data(name='image', shape=[-1, 3, 28, 28], dtype='float32')
label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64')
hidden = paddle.static.nn.fc(x=data, size=128, activation='relu')
prediction = paddle.static.nn.fc(x=hidden, size=10, activation='softmax')
loss = paddle.nn.functional.cross_entropy(
......
......@@ -68,10 +68,10 @@ def train(
dict_dim = len(word_dict)
class_dim = 2
data = fluid.layers.data(
name="words", shape=[1], dtype="int64", lod_level=1
data = paddle.static.data(
name="words", shape=[-1, 1], dtype="int64", lod_level=1
)
label = fluid.layers.data(name="label", shape=[1], dtype="int64")
label = paddle.static.data(name="label", shape=[-1, 1], dtype="int64")
if not parallel:
cost, acc_out, prediction = net_method(
......
......@@ -49,8 +49,10 @@ def convert_float_to_uint16(in_list):
def train(use_cuda, save_dirname, is_local, use_bf16, pure_bf16):
x = fluid.layers.data(name='x', shape=[13], dtype='float32')
y = fluid.layers.data(name='y', shape=[1], dtype='float32')
x = paddle.static.data(name='x', shape=[-1, 13], dtype='float32')
x.desc.set_need_check_feed(False)
y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32')
y.desc.set_need_check_feed(False)
if use_bf16:
if not pure_bf16:
......
......@@ -104,8 +104,10 @@ def train(net_type, use_cuda, save_dirname, is_local):
classdim = 10
data_shape = [3, 32, 32]
images = fluid.layers.data(name='pixel', shape=data_shape, dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
images = paddle.static.data(
name='pixel', shape=[-1] + data_shape, dtype='float32'
)
label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64')
if net_type == "vgg":
print("train vgg net")
......
......@@ -77,8 +77,8 @@ def train(
):
if use_cuda and not fluid.core.is_compiled_with_cuda():
return
img = fluid.layers.data(name='img', shape=[1, 28, 28], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
img = paddle.static.data(name='img', shape=[-1, 1, 28, 28], dtype='float32')
label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64')
if nn_type == 'mlp':
net_conf = mlp
......
......@@ -40,7 +40,7 @@ def get_usr_combined_features():
USR_DICT_SIZE = paddle.dataset.movielens.max_user_id() + 1
uid = layers.data(name='user_id', shape=[1], dtype='int64')
uid = paddle.static.data(name='user_id', shape=[-1, 1], dtype='int64')
usr_emb = layers.embedding(
input=uid,
......@@ -54,7 +54,9 @@ def get_usr_combined_features():
USR_GENDER_DICT_SIZE = 2
usr_gender_id = layers.data(name='gender_id', shape=[1], dtype='int64')
usr_gender_id = paddle.static.data(
name='gender_id', shape=[-1, 1], dtype='int64'
)
usr_gender_emb = layers.embedding(
input=usr_gender_id,
......@@ -66,7 +68,7 @@ def get_usr_combined_features():
usr_gender_fc = paddle.static.nn.fc(x=usr_gender_emb, size=16)
USR_AGE_DICT_SIZE = len(paddle.dataset.movielens.age_table)
usr_age_id = layers.data(name='age_id', shape=[1], dtype="int64")
usr_age_id = paddle.static.data(name='age_id', shape=[-1, 1], dtype="int64")
usr_age_emb = layers.embedding(
input=usr_age_id,
......@@ -78,7 +80,7 @@ def get_usr_combined_features():
usr_age_fc = paddle.static.nn.fc(x=usr_age_emb, size=16)
USR_JOB_DICT_SIZE = paddle.dataset.movielens.max_job_id() + 1
usr_job_id = layers.data(name='job_id', shape=[1], dtype="int64")
usr_job_id = paddle.static.data(name='job_id', shape=[-1, 1], dtype="int64")
usr_job_emb = layers.embedding(
input=usr_job_id,
......@@ -104,7 +106,7 @@ def get_mov_combined_features():
MOV_DICT_SIZE = paddle.dataset.movielens.max_movie_id() + 1
mov_id = layers.data(name='movie_id', shape=[1], dtype='int64')
mov_id = paddle.static.data(name='movie_id', shape=[-1, 1], dtype='int64')
mov_emb = layers.embedding(
input=mov_id,
......@@ -118,8 +120,8 @@ def get_mov_combined_features():
CATEGORY_DICT_SIZE = len(paddle.dataset.movielens.movie_categories())
category_id = layers.data(
name='category_id', shape=[1], dtype='int64', lod_level=1
category_id = paddle.static.data(
name='category_id', shape=[-1, 1], dtype='int64', lod_level=1
)
mov_categories_emb = layers.embedding(
......@@ -132,8 +134,8 @@ def get_mov_combined_features():
MOV_TITLE_DICT_SIZE = len(paddle.dataset.movielens.get_movie_title_dict())
mov_title_id = layers.data(
name='movie_title', shape=[1], dtype='int64', lod_level=1
mov_title_id = paddle.static.data(
name='movie_title', shape=[-1, 1], dtype='int64', lod_level=1
)
mov_title_emb = layers.embedding(
......@@ -170,7 +172,7 @@ def model():
)
scale_infer = paddle.scale(x=inference, scale=5.0)
label = layers.data(name='score', shape=[1], dtype='float32')
label = paddle.static.data(name='score', shape=[-1, 1], dtype='float32')
square_cost = paddle.nn.functional.square_error_cost(
input=scale_infer, label=label
)
......
......@@ -108,11 +108,13 @@ def train(
word_dict = paddle.dataset.imikolov.build_dict()
dict_size = len(word_dict)
first_word = fluid.layers.data(name='firstw', shape=[1], dtype='int64')
second_word = fluid.layers.data(name='secondw', shape=[1], dtype='int64')
third_word = fluid.layers.data(name='thirdw', shape=[1], dtype='int64')
forth_word = fluid.layers.data(name='forthw', shape=[1], dtype='int64')
next_word = fluid.layers.data(name='nextw', shape=[1], dtype='int64')
first_word = paddle.static.data(name='firstw', shape=[-1, 1], dtype='int64')
second_word = paddle.static.data(
name='secondw', shape=[-1, 1], dtype='int64'
)
third_word = paddle.static.data(name='thirdw', shape=[-1, 1], dtype='int64')
forth_word = paddle.static.data(name='forthw', shape=[-1, 1], dtype='int64')
next_word = paddle.static.data(name='nextw', shape=[-1, 1], dtype='int64')
if not is_parallel:
avg_cost, predict_word = __network__(
......
......@@ -22,8 +22,8 @@ paddle.enable_static()
class TestDataFeeder(unittest.TestCase):
def test_lod_level_0_converter(self):
img = fluid.layers.data(name='image', shape=[1, 28, 28])
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
img = paddle.static.data(name='image', shape=[-1, 1, 28, 28])
label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64')
feeder = fluid.DataFeeder([img, label], fluid.CPUPlace())
result = feeder.feed([([0] * 784, [9]), ([1] * 784, [1])])
......@@ -41,10 +41,10 @@ class TestDataFeeder(unittest.TestCase):
def test_lod_level_1_converter(self):
# lod_level = 1
# each sentence has a different number of words
sentences = fluid.layers.data(
name='sentences', shape=[1], dtype='int64', lod_level=1
sentences = paddle.static.data(
name='sentences', shape=[-1, 1], dtype='int64', lod_level=1
)
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64')
feeder = fluid.DataFeeder([sentences, label], fluid.CPUPlace())
# lod = [[0, 3, 5, 9]]
......@@ -64,10 +64,10 @@ class TestDataFeeder(unittest.TestCase):
def test_lod_level_2_converter(self):
# lod_level = 2
# paragraphs -> sentences -> words
paragraphs = fluid.layers.data(
name='paragraphs', shape=[1], dtype='int64', lod_level=2
paragraphs = paddle.static.data(
name='paragraphs', shape=[-1, 1], dtype='int64', lod_level=2
)
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64')
feeder = fluid.DataFeeder([paragraphs, label], fluid.CPUPlace())
# lod = [[0, 2, 3], [0, 3, 5, 9]]
......
......@@ -20,7 +20,6 @@ from unittests.test_imperative_base import new_program_scope
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
from paddle.fluid import core
from paddle.fluid.dygraph import base
from paddle.fluid.framework import Program, program_guard
......@@ -154,10 +153,12 @@ class TestMulticlassNMS2(unittest.TestCase):
def test_multiclass_nms2(self):
program = Program()
with program_guard(program):
bboxes = layers.data(
bboxes = paddle.static.data(
name='bboxes', shape=[-1, 10, 4], dtype='float32'
)
scores = layers.data(name='scores', shape=[-1, 10], dtype='float32')
scores = paddle.static.data(
name='scores', shape=[-1, 10], dtype='float32'
)
output = fluid.contrib.multiclass_nms2(
bboxes, scores, 0.3, 400, 200, 0.7
)
......
......@@ -23,13 +23,13 @@ paddle.enable_static()
prog = fluid.framework.Program()
with fluid.program_guard(main_program=prog):
image = fluid.layers.data(name='x', shape=[784], dtype='float32')
image = paddle.static.data(name='x', shape=[-1, 784], dtype='float32')
hidden1 = paddle.static.nn.fc(x=image, size=128, activation='relu')
hidden2 = paddle.static.nn.fc(x=hidden1, size=64, activation='relu')
predict = paddle.static.nn.fc(x=hidden2, size=10, activation='softmax')
label = fluid.layers.data(name='y', shape=[1], dtype='int64')
label = paddle.static.data(name='y', shape=[-1, 1], dtype='int64')
cost = paddle.nn.functional.cross_entropy(
input=predict, label=label, reduction='none', use_softmax=False
......
......@@ -46,8 +46,8 @@ def generator():
def net():
x = fluid.layers.data(name="x", shape=[3], dtype='float32')
y = fluid.layers.data(name="y", shape=[1], dtype='int64')
x = paddle.static.data(name="x", shape=[-1, 3], dtype='float32')
y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64')
# test int64 value
zero = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0)
......
......@@ -20,7 +20,6 @@ import test_collective_api_base as test_base
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
paddle.enable_static()
......@@ -33,7 +32,9 @@ class TestCollectiveAllgatherAPI(test_base.TestCollectiveAPIRunnerBase):
dtype = "float32" if dtype is None else dtype
with fluid.program_guard(main_prog, startup_program):
tensor_list = []
tindata = layers.data(name="tindata", shape=[10, 1000], dtype=dtype)
tindata = paddle.static.data(
name="tindata", shape=[-1, 10, 1000], dtype=dtype
)
paddle.distributed.all_gather(tensor_list, tindata)
return tensor_list
......
......@@ -16,7 +16,6 @@ from test_collective_api_base import TestCollectiveAPIRunnerBase, runtime_main
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
paddle.enable_static()
......@@ -27,8 +26,8 @@ class TestCollectiveAllreduceAPI(TestCollectiveAPIRunnerBase):
def get_model(self, main_prog, startup_program, rank):
with fluid.program_guard(main_prog, startup_program):
tindata = layers.data(
name="tindata", shape=[10, 1000], dtype='float32'
tindata = paddle.static.data(
name="tindata", shape=[-1, 10, 1000], dtype='float32'
)
paddle.distributed.all_reduce(tindata)
return [tindata]
......
......@@ -16,7 +16,6 @@ from test_collective_api_base import TestCollectiveAPIRunnerBase, runtime_main
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
paddle.enable_static()
......@@ -27,8 +26,8 @@ class TestCollectiveAllreduceNewGroupAPI(TestCollectiveAPIRunnerBase):
def get_model(self, main_prog, startup_program, rank):
with fluid.program_guard(main_prog, startup_program):
tindata = layers.data(
name="tindata", shape=[10, 1000], dtype='float32'
tindata = paddle.static.data(
name="tindata", shape=[1, 10, 1000], dtype='float32'
)
gp = paddle.distributed.new_group([0, 1])
paddle.distributed.all_reduce(tindata, group=gp, sync_op=True)
......
......@@ -16,7 +16,6 @@ from test_collective_base import TestCollectiveRunnerBase, runtime_main
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
from paddle.fluid import core
paddle.enable_static()
......@@ -29,9 +28,10 @@ class TestCollectiveAllreduce(TestCollectiveRunnerBase):
def get_model(self, main_prog, startup_program):
ring_id = 0
with fluid.program_guard(main_prog, startup_program):
tindata = layers.data(
name="tindata", shape=[10, 1000], dtype='float32'
tindata = paddle.static.data(
name="tindata", shape=[-1, 10, 1000], dtype='float32'
)
tindata.desc.set_need_check_feed(False)
toutdata = main_prog.current_block().create_var(
name="outofallreduce",
dtype='float32',
......
......@@ -16,7 +16,6 @@ from test_collective_base import TestCollectiveRunnerBase, runtime_main
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
from paddle.fluid import core
paddle.enable_static()
......@@ -29,9 +28,10 @@ class TestCollectiveAllreduce(TestCollectiveRunnerBase):
def get_model(self, main_prog, startup_program):
ring_id = 0
with fluid.program_guard(main_prog, startup_program):
tindata = layers.data(
name="tindata", shape=[10, 1000], dtype='float32'
tindata = paddle.static.data(
name="tindata", shape=[-1, 10, 1000], dtype='float32'
)
tindata.desc.set_need_check_feed(False)
toutdata = main_prog.current_block().create_var(
name="outofallreduce",
dtype='float32',
......
......@@ -16,7 +16,6 @@ from test_collective_api_base import TestCollectiveAPIRunnerBase, runtime_main
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
paddle.enable_static()
......@@ -27,9 +26,10 @@ class TestCollectiveAllToAllAPI(TestCollectiveAPIRunnerBase):
def get_model(self, main_prog, startup_program, rank):
with fluid.program_guard(main_prog, startup_program):
tindata = layers.data(
name="tindata", shape=[10, 1000], dtype='float32'
tindata = paddle.static.data(
name="tindata", shape=[-1, 10, 1000], dtype='float32'
)
tindata.desc.set_need_check_feed(False)
tindata = paddle.split(tindata, 2, axis=0)
tout_data = []
paddle.distributed.alltoall(tindata, tout_data)
......
......@@ -16,7 +16,6 @@ from test_collective_api_base import TestCollectiveAPIRunnerBase, runtime_main
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
paddle.enable_static()
......@@ -27,9 +26,10 @@ class TestCollectiveBroadcastAPI(TestCollectiveAPIRunnerBase):
def get_model(self, main_prog, startup_program, rank):
with fluid.program_guard(main_prog, startup_program):
tindata = layers.data(
name="tindata", shape=[10, 1000], dtype='float32'
tindata = paddle.static.data(
name="tindata", shape=[-1, 10, 1000], dtype='float32'
)
tindata.desc.set_need_check_feed(False)
paddle.distributed.broadcast(tindata, src=1)
return [tindata]
......
......@@ -16,7 +16,6 @@ from test_collective_base import TestCollectiveRunnerBase, runtime_main
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
from paddle.fluid import core
paddle.enable_static()
......@@ -30,9 +29,10 @@ class TestCollectiveBroadcast(TestCollectiveRunnerBase):
ring_id = 0
rootid = 1
with fluid.program_guard(main_prog, startup_program):
tindata = layers.data(
name="tindata", shape=[10, 1000], dtype='float32'
tindata = paddle.static.data(
name="tindata", shape=[-1, 10, 1000], dtype='float32'
)
tindata.desc.set_need_check_feed(False)
toutdata = main_prog.current_block().create_var(
name="outofbroadcast",
dtype='float32',
......
......@@ -16,7 +16,6 @@ from test_collective_base import TestCollectiveRunnerBase, runtime_main
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
from paddle.fluid import core
paddle.enable_static()
......@@ -30,9 +29,10 @@ class TestCollectiveConcat(TestCollectiveRunnerBase):
ring_id = 0
nranks = 2
with fluid.program_guard(main_prog, startup_program):
tindata = layers.data(
name="tindata", shape=[10, 1000], dtype='float32'
tindata = paddle.static.data(
name="tindata", shape=[-1, 10, 1000], dtype='float32'
)
tindata.desc.set_need_check_feed(False)
toutdata = main_prog.current_block().create_var(
name="outofconcat",
dtype='float32',
......
......@@ -16,7 +16,6 @@ from test_collective_base import TestCollectiveRunnerBase, runtime_main
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
from paddle.fluid import core
paddle.enable_static()
......@@ -30,9 +29,10 @@ class TestCollectiveIdentity(TestCollectiveRunnerBase):
ring_id = 0
nranks = 2
with fluid.program_guard(main_prog, startup_program):
tindata = layers.data(
name="tindata", shape=[10, 1000], dtype='float32'
tindata = paddle.static.data(
name="tindata", shape=[-1, 10, 1000], dtype='float32'
)
tindata.desc.set_need_check_feed(False)
toutdata = main_prog.current_block().create_var(
name="outofgather",
dtype='float32',
......
......@@ -16,7 +16,6 @@ from test_collective_api_base import TestCollectiveAPIRunnerBase, runtime_main
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
paddle.enable_static()
......@@ -27,9 +26,10 @@ class TestCollectiveReduceAPI(TestCollectiveAPIRunnerBase):
def get_model(self, main_prog, startup_program, rank):
with fluid.program_guard(main_prog, startup_program):
tindata = layers.data(
name="tindata", shape=[10, 1000], dtype='float32'
tindata = paddle.static.data(
name="tindata", shape=[-1, 10, 1000], dtype='float32'
)
tindata.desc.set_need_check_feed(False)
paddle.distributed.reduce(tindata, dst=0)
return [tindata]
......
......@@ -16,7 +16,6 @@ from test_collective_base import TestCollectiveRunnerBase, runtime_main
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
from paddle.fluid import core
paddle.enable_static()
......@@ -30,9 +29,10 @@ class TestCollectiveReduce(TestCollectiveRunnerBase):
ring_id = 0
rootid = 1
with fluid.program_guard(main_prog, startup_program):
tindata = layers.data(
name="tindata", shape=[10, 1000], dtype='float32'
tindata = paddle.static.data(
name="tindata", shape=[-1, 10, 1000], dtype='float32'
)
tindata.desc.set_need_check_feed(False)
toutdata = main_prog.current_block().create_var(
name="outofreduce",
dtype='float32',
......
......@@ -16,7 +16,6 @@ from test_collective_base import TestCollectiveRunnerBase, runtime_main
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
from paddle.fluid import core
paddle.enable_static()
......@@ -30,9 +29,11 @@ class TestCollectiveReduce(TestCollectiveRunnerBase):
ring_id = 0
rootid = 1
with fluid.program_guard(main_prog, startup_program):
tindata = layers.data(
name="tindata", shape=[10, 1000], dtype='float32'
tindata = paddle.static.data(
name="tindata", shape=[-1, 10, 1000], dtype='float32'
)
tindata.desc.set_need_check_feed(False)
toutdata = main_prog.current_block().create_var(
name="outofreduce",
dtype='float32',
......
......@@ -27,11 +27,10 @@ class TestCollectiveScatterAPI(TestCollectiveAPIRunnerBase):
def get_model(self, main_prog, startup_program, rank):
with fluid.program_guard(main_prog, startup_program):
tindata = layers.data(
tindata = paddle.static.data(
name="tindata",
shape=[10, 1000],
dtype='float32',
append_batch_size=False,
)
toutdata = layers.fill_constant(
shape=[5, 1000], dtype='float32', value=1.0
......
......@@ -16,7 +16,6 @@ from test_collective_base import TestCollectiveRunnerBase, runtime_main
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
from paddle.fluid import core
paddle.enable_static()
......@@ -30,9 +29,10 @@ class TestCollectiveScatter(TestCollectiveRunnerBase):
ring_id = 0
rootid = 1
with fluid.program_guard(main_prog, startup_program):
tindata = layers.data(
name="tindata", shape=[10, 1000], dtype='float32'
tindata = paddle.static.data(
name="tindata", shape=[-1, 10, 1000], dtype='float32'
)
tindata.desc.set_need_check_feed(False)
toutdata = main_prog.current_block().create_var(
name="outofreduce",
dtype='float32',
......
......@@ -16,7 +16,6 @@ from test_collective_api_base import TestCollectiveAPIRunnerBase, runtime_main
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
paddle.enable_static()
......@@ -27,11 +26,10 @@ class TestCollectiveSendRecvAPI(TestCollectiveAPIRunnerBase):
def get_model(self, main_prog, startup_program, rank):
with fluid.program_guard(main_prog, startup_program):
tindata = layers.data(
tindata = paddle.static.data(
name="tindata",
shape=[10, 1000],
dtype='float32',
append_batch_size=False,
)
if rank == 0:
paddle.distributed.send(tindata, dst=1)
......
......@@ -16,7 +16,6 @@ from test_collective_base import TestCollectiveRunnerBase, runtime_main
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
paddle.enable_static()
......@@ -28,12 +27,12 @@ class TestCollectiveSendRecv(TestCollectiveRunnerBase):
def get_model(self, main_prog, startup_program):
ring_id = self.global_ring_id
with fluid.program_guard(main_prog, startup_program):
tindata = layers.data(
tindata = paddle.static.data(
name="tindata",
shape=[10, 1000],
dtype='float64',
append_batch_size=False,
)
tindata.desc.set_need_check_feed(False)
if self.rank == 0:
main_prog.global_block().append_op(
type="send_v2",
......
......@@ -17,7 +17,6 @@ from test_collective_base import TestCollectiveRunnerBase, runtime_main
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
paddle.enable_static()
......@@ -29,12 +28,12 @@ class TestCollectiveSendRecv(TestCollectiveRunnerBase):
def get_model(self, main_prog, startup_program):
ring_id = self.global_ring_id
with fluid.program_guard(main_prog, startup_program):
tindata = layers.data(
tindata = paddle.static.data(
name="tindata",
shape=[10, 1000],
dtype='float64',
append_batch_size=False,
)
tindata.desc.set_need_check_feed(False)
if self.rank == 0:
data1 = fluid.layers.assign(
np.array([[0, 1, 2]], dtype='float32')
......
......@@ -16,7 +16,6 @@ from test_collective_base import TestCollectiveRunnerBase, runtime_main
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
paddle.enable_static()
......@@ -28,12 +27,12 @@ class TestCollectiveSendRecvDynamicShape(TestCollectiveRunnerBase):
def get_model(self, main_prog, startup_program):
ring_id = self.global_ring_id
with fluid.program_guard(main_prog, startup_program):
tindata = layers.data(
tindata = paddle.static.data(
name="tindata",
shape=[10, 1000],
shape=[-1, 10, 1000],
dtype='float64',
append_batch_size=False,
)
tindata.desc.set_need_check_feed(False)
if self.rank == 0:
main_prog.global_block().append_op(
type="send_v2",
......
......@@ -16,7 +16,6 @@ from test_collective_base import TestCollectiveRunnerBase, runtime_main
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
from paddle.fluid import core
paddle.enable_static()
......@@ -30,9 +29,10 @@ class TestCollectiveAllGather(TestCollectiveRunnerBase):
ring_id = 0
nranks = 2
with fluid.program_guard(main_prog, startup_program):
tindata = layers.data(
name="tindata", shape=[10, 1000], dtype='float32'
tindata = paddle.static.data(
name="tindata", shape=[-1, 10, 1000], dtype='float32'
)
tindata.desc.set_need_check_feed(False)
toutdata = main_prog.current_block().create_var(
name="outofsplit",
dtype='float32',
......
......@@ -29,8 +29,10 @@ fluid.default_main_program().random_seed = 1
class TestDistMnist2x2(TestDistRunnerBase):
def get_model(self, batch_size=2):
# Input data
images = fluid.layers.data(name='pixel', shape=[1, 28, 28], dtype=DTYPE)
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
images = paddle.static.data(
name='pixel', shape=[-1, 1, 28, 28], dtype=DTYPE
)
label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64')
# Train program
predict = cnn_model(images)
......
......@@ -85,10 +85,12 @@ class TestDistMnist2x2(TestDistRunnerBase):
def get_model(self, batch_size=2, use_dgc=False, dist_strategy=None):
# Input data
with fluid.device_guard("gpu:0"):
images = fluid.layers.data(
name='pixel', shape=[1, 28, 28], dtype=DTYPE
images = paddle.static.data(
name='pixel', shape=[-1, 1, 28, 28], dtype=DTYPE
)
label = paddle.static.data(
name='label', shape=[-1, 1], dtype='int64'
)
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
if dist_strategy:
data_loader = fluid.io.DataLoader.from_generator(
......
......@@ -85,10 +85,12 @@ class TestDistMnist2x2(TestDistRunnerBase):
def get_model(self, batch_size=2, use_dgc=False, dist_strategy=None):
# Input data
with fluid.device_guard("gpu:0"):
images = fluid.layers.data(
name='pixel', shape=[1, 28, 28], dtype=DTYPE
images = paddle.static.data(
name='pixel', shape=[-1, 1, 28, 28], dtype=DTYPE
)
label = paddle.static.data(
name='label', shape=[-1, 1], dtype='int64'
)
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
if dist_strategy:
data_loader = fluid.io.DataLoader.from_generator(
......
......@@ -77,10 +77,12 @@ class TestDistMnist2x2(TestDistRunnerBase):
if dist_strategy:
fleet.init(is_collective=True)
with fluid.device_guard("gpu:0"):
images = fluid.layers.data(
name='pixel', shape=[1, 28, 28], dtype=DTYPE
images = paddle.static.data(
name='pixel', shape=[-1, 1, 28, 28], dtype=DTYPE
)
label = paddle.static.data(
name='label', shape=[-1, 1], dtype='int64'
)
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
if dist_strategy:
data_loader = fluid.io.DataLoader.from_generator(
......
......@@ -29,9 +29,9 @@ paddle.enable_static()
class TestCommunicatorHalfAsyncEnd2End(unittest.TestCase):
def net(self):
x = fluid.layers.data(name='x', shape=[13], dtype='float32')
x = paddle.static.data(name='x', shape=[-1, 13], dtype='float32')
y_predict = paddle.static.nn.fc(x, size=1, activation=None)
y = fluid.layers.data(name='y', shape=[1], dtype='float32')
y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32')
cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y)
avg_cost = paddle.mean(cost)
......
......@@ -27,8 +27,8 @@ import paddle.fluid as fluid
class TestCommunicator(unittest.TestCase):
def net(self):
x = fluid.layers.data(name='x', shape=[1], dtype='float32')
y = fluid.layers.data(name='y', shape=[1], dtype='float32')
x = paddle.static.data(name='x', shape=[-1, 1], dtype='float32')
y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32')
cost = paddle.nn.functional.square_error_cost(input=x, label=y)
avg_cost = paddle.mean(cost)
return avg_cost
......
......@@ -270,8 +270,8 @@ class TestHalfAsyncStrategy(unittest.TestCase):
class TestDebugInfo(unittest.TestCase):
def test_debug_info(self):
x = fluid.layers.data(name='x', shape=[1], dtype='float32')
y = fluid.layers.data(name='y', shape=[1], dtype='float32')
x = paddle.static.data(name='x', shape=[-1, 1], dtype='float32')
y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32')
y_predict = paddle.static.nn.fc(x, size=1, activation=None)
cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y)
avg_cost = paddle.mean(cost)
......
......@@ -30,12 +30,8 @@ class TestFleetFP16CompressOptimizer(unittest.TestCase):
def net(self, main_prog, startup_prog, dtype='float32'):
with fluid.program_guard(main_prog, startup_prog):
input_x = paddle.fluid.layers.data(
name="x", shape=[32], dtype=dtype
)
input_y = paddle.fluid.layers.data(
name="y", shape=[1], dtype='int64'
)
input_x = paddle.static.data(name="x", shape=[-1, 32], dtype=dtype)
input_y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64')
fc_1 = paddle.static.nn.fc(x=input_x, size=64, activation='tanh')
fc_2 = paddle.static.nn.fc(x=fc_1, size=64, activation='tanh')
......
......@@ -60,12 +60,10 @@ class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase):
import paddle.distributed.fleet as fleet
fleet.init(is_collective=True)
input_x = paddle.fluid.layers.data(
name="x", shape=[32], dtype='float32'
)
input_y = paddle.fluid.layers.data(
name="y", shape=[1], dtype='int64'
input_x = paddle.static.data(
name="x", shape=[-1, 32], dtype='float32'
)
input_y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64')
fc_1 = paddle.static.nn.fc(x=input_x, size=64, activation='tanh')
fc_2 = paddle.static.nn.fc(x=fc_1, size=64, activation='tanh')
......@@ -126,12 +124,10 @@ class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase):
import paddle.distributed.fleet as fleet
fleet.init(is_collective=True)
input_x = paddle.fluid.layers.data(
name="x", shape=[32], dtype='float32'
)
input_y = paddle.fluid.layers.data(
name="y", shape=[1], dtype='int64'
input_x = paddle.static.data(
name="x", shape=[-1, 32], dtype='float32'
)
input_y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64')
fc_1 = paddle.static.nn.fc(x=input_x, size=64, activation='tanh')
fc_2 = paddle.static.nn.fc(x=fc_1, size=64, activation='tanh')
......@@ -204,12 +200,10 @@ class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase):
import paddle.distributed.fleet as fleet
fleet.init(is_collective=True)
input_x = paddle.fluid.layers.data(
name="x", shape=[32], dtype='float32'
)
input_y = paddle.fluid.layers.data(
name="y", shape=[1], dtype='int64'
input_x = paddle.static.data(
name="x", shape=[-1, 32], dtype='float32'
)
input_y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64')
fc_1 = paddle.static.nn.fc(x=input_x, size=64, activation='tanh')
fc_2 = paddle.static.nn.fc(x=fc_1, size=64, activation='tanh')
......@@ -269,12 +263,10 @@ class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase):
import paddle.distributed.fleet as fleet
fleet.init(is_collective=True)
input_x = paddle.fluid.layers.data(
name="x", shape=[32], dtype='float32'
)
input_y = paddle.fluid.layers.data(
name="y", shape=[1], dtype='int64'
input_x = paddle.static.data(
name="x", shape=[-1, 32], dtype='float32'
)
input_y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64')
fc_1 = paddle.static.nn.fc(x=input_x, size=64, activation='tanh')
fc_2 = paddle.static.nn.fc(x=fc_1, size=64, activation='tanh')
......
......@@ -45,12 +45,10 @@ class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase):
def node_func():
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
input_x = paddle.fluid.layers.data(
name="x", shape=[32], dtype='float32'
)
input_y = paddle.fluid.layers.data(
name="y", shape=[1], dtype='int64'
input_x = paddle.static.data(
name="x", shape=[-1, 32], dtype='float32'
)
input_y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64')
fc_1 = paddle.static.nn.fc(x=input_x, size=64, activation='tanh')
fc_2 = paddle.static.nn.fc(x=fc_1, size=64, activation='tanh')
......
......@@ -33,11 +33,11 @@ class TestFleetLambMetaOptimizer(unittest.TestCase):
def net(self, main_prog, startup_prog):
with fluid.program_guard(main_prog, startup_prog):
with fluid.unique_name.guard():
input_x = paddle.fluid.layers.data(
name="x", shape=[32], dtype='float32'
input_x = paddle.static.data(
name="x", shape=[-1, 32], dtype='float32'
)
input_y = paddle.fluid.layers.data(
name="y", shape=[1], dtype='int64'
input_y = paddle.static.data(
name="y", shape=[-1, 1], dtype='int64'
)
fc_1 = paddle.static.nn.fc(
......@@ -117,10 +117,8 @@ class TestFleetLambMetaOptimizer(unittest.TestCase):
def test_lamb_apply_with_amp(self):
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
input_x = paddle.fluid.layers.data(
name="x", shape=[32], dtype='float32'
)
input_y = paddle.fluid.layers.data(name="y", shape=[1], dtype='int64')
input_x = paddle.static.data(name="x", shape=[-1, 32], dtype='float32')
input_y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64')
fc_1 = paddle.static.nn.fc(x=input_x, size=64, activation='tanh')
fc_2 = paddle.static.nn.fc(x=fc_1, size=64, activation='tanh')
......
......@@ -33,11 +33,11 @@ class TestFleetLarsMetaOptimizer(unittest.TestCase):
def net(self, main_prog, startup_prog):
with fluid.program_guard(main_prog, startup_prog):
with fluid.unique_name.guard():
input_x = paddle.fluid.layers.data(
name="x", shape=[32], dtype='float32'
input_x = paddle.static.data(
name="x", shape=[-1, 32], dtype='float32'
)
input_y = paddle.fluid.layers.data(
name="y", shape=[1], dtype='int64'
input_y = paddle.static.data(
name="y", shape=[-1, 1], dtype='int64'
)
fc_1 = paddle.static.nn.fc(
......@@ -122,10 +122,8 @@ class TestFleetLarsMetaOptimizer(unittest.TestCase):
def test_lars_apply_with_amp(self):
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
input_x = paddle.fluid.layers.data(
name="x", shape=[32], dtype='float32'
)
input_y = paddle.fluid.layers.data(name="y", shape=[1], dtype='int64')
input_x = paddle.static.data(name="x", shape=[-1, 32], dtype='float32')
input_y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64')
fc_1 = paddle.static.nn.fc(x=input_x, size=64, activation='tanh')
fc_2 = paddle.static.nn.fc(x=fc_1, size=64, activation='tanh')
......
......@@ -29,11 +29,11 @@ class TestFleetMetaOptimizerBase(unittest.TestCase):
with fluid.unique_name.guard():
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
input_x = paddle.fluid.layers.data(
name="x", shape=[32], dtype='float32'
input_x = paddle.static.data(
name="x", shape=[-1, 32], dtype='float32'
)
input_y = paddle.fluid.layers.data(
name="y", shape=[1], dtype='int64'
input_y = paddle.static.data(
name="y", shape=[-1, 1], dtype='int64'
)
fc_1 = paddle.static.nn.fc(
......
......@@ -33,14 +33,12 @@ class TestFleetMetaOptimizer(unittest.TestCase):
def net(self):
with static.device_guard("gpu:0"):
input_x = paddle.fluid.layers.data(
name="x", shape=[32], dtype='float32'
input_x = paddle.static.data(
name="x", shape=[-1, 32], dtype='float32'
)
input_y = paddle.fluid.layers.data(
name="y", shape=[1], dtype='int64'
)
input_z = paddle.fluid.layers.data(
name="z", shape=[1], dtype="float32"
input_y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64')
input_z = paddle.static.data(
name="z", shape=[-1, 1], dtype="float32"
)
with static.device_guard("gpu:all"):
input_z = input_z * 1.0
......
......@@ -34,12 +34,10 @@ class TestFleetMetaOptimizer(unittest.TestCase):
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
with paddle.fluid.device_guard("gpu:0"):
input_x = paddle.fluid.layers.data(
name="x", shape=[32], dtype='float32'
)
input_y = paddle.fluid.layers.data(
name="y", shape=[1], dtype='int64'
input_x = paddle.static.data(
name="x", shape=[-1, 32], dtype='float32'
)
input_y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64')
fc_1 = paddle.static.nn.fc(x=input_x, size=64, activation='tanh')
fc_2 = paddle.static.nn.fc(x=fc_1, size=64, activation='tanh')
fc_3 = paddle.static.nn.fc(x=fc_2, size=64, activation='tanh')
......
......@@ -33,10 +33,8 @@ class TestFleetMetaOptimizer(unittest.TestCase):
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
input_x = paddle.fluid.layers.data(
name="x", shape=[32], dtype='float32'
)
input_y = paddle.fluid.layers.data(name="y", shape=[1], dtype='int64')
input_x = paddle.static.data(name="x", shape=[-1, 32], dtype='float32')
input_y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64')
fc_1 = paddle.static.nn.fc(x=input_x, size=64, activation='tanh')
fc_2 = paddle.static.nn.fc(x=fc_1, size=64, activation='tanh')
......
......@@ -443,9 +443,9 @@ class TestGlooWithCloudRoleMaker(unittest.TestCase):
os.environ["PADDLE_GLOO_FS_PATH"] = tmp
def net():
x = paddle.fluid.layers.data(name='x', shape=[13], dtype='float32')
x = paddle.static.data(name='x', shape=[-1, 13], dtype='float32')
y_predict = paddle.static.nn.fc(x, size=1, activation=None)
y = paddle.fluid.layers.data(name='y', shape=[1], dtype='float32')
y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32')
cost = paddle.nn.functional.square_error_cost(
input=y_predict, label=y
)
......
......@@ -16,7 +16,6 @@ from test_collective_base import TestCollectiveRunnerBase, runtime_main
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
from paddle.fluid import core
paddle.enable_static()
......@@ -30,9 +29,10 @@ class TestCollectiveAllGather(TestCollectiveRunnerBase):
ring_id = 0
nranks = 2
with fluid.program_guard(main_prog, startup_program):
tindata = layers.data(
name="tindata", shape=[10, 1000], dtype='float32'
tindata = paddle.static.data(
name="tindata", shape=[-1, 10, 1000], dtype='float32'
)
tindata.desc.set_need_check_feed(False)
toutdata = main_prog.current_block().create_var(
name="outofgather",
dtype='float32',
......
......@@ -16,7 +16,6 @@ from test_collective_base import TestCollectiveRunnerBase, runtime_main
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
paddle.enable_static()
......@@ -29,9 +28,10 @@ class TestCollectiveReduceScatter(TestCollectiveRunnerBase):
ring_id = 0
nranks = 2
with fluid.program_guard(main_prog, startup_program):
tindata = layers.data(
name="tindata", shape=[10, 1000], dtype='float32'
tindata = paddle.static.data(
name="tindata", shape=[-1, 10, 1000], dtype='float32'
)
tindata.desc.set_need_check_feed(False)
toutdata = fluid.layers.collective._c_reducescatter(tindata, nranks)
toutdata = fluid.layers.collective._c_sync_comm_stream(toutdata, 0)
return toutdata
......
......@@ -16,7 +16,6 @@ from test_collective_base import TestCollectiveRunnerBase, runtime_main
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
from paddle.fluid import core
paddle.enable_static()
......@@ -30,9 +29,10 @@ class TestCollectiveReduceScatter(TestCollectiveRunnerBase):
ring_id = 0
nranks = 2
with fluid.program_guard(main_prog, startup_program):
tindata = layers.data(
name="tindata", shape=[10, 1000], dtype='float32'
tindata = paddle.static.data(
name="tindata", shape=[-1, 10, 1000], dtype='float32'
)
tindata.desc.set_need_check_feed(False)
toutdata = main_prog.current_block().create_var(
name="outofrs",
dtype='float32',
......
......@@ -72,8 +72,10 @@ def cnn_model(data):
class TestDistMnist2x2(TestDistRunnerBase):
def get_model(self, batch_size=2, single_device=False):
# Input data
images = fluid.layers.data(name='pixel', shape=[1, 28, 28], dtype=DTYPE)
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
images = paddle.static.data(
name='pixel', shape=[-1, 1, 28, 28], dtype=DTYPE
)
label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64')
# Train program
predict = cnn_model(images)
......
......@@ -33,26 +33,23 @@ class TestDistCTR2x2(TestDistRunnerBase):
dnn_input_dim, lr_input_dim = dist_ctr_reader.load_data_meta()
""" network definition """
dnn_data = fluid.layers.data(
dnn_data = paddle.static.data(
name="dnn_data",
shape=[-1, 1],
dtype="int64",
lod_level=1,
append_batch_size=False,
)
lr_data = fluid.layers.data(
lr_data = paddle.static.data(
name="lr_data",
shape=[-1, 1],
dtype="int64",
lod_level=1,
append_batch_size=False,
)
label = fluid.layers.data(
label = paddle.static.data(
name="click",
shape=[-1, 1],
dtype="int64",
lod_level=0,
append_batch_size=False,
)
# build dnn model
......
......@@ -62,26 +62,23 @@ class TestDistCTR2x2(FleetDistRunnerBase):
"""
dnn_input_dim, lr_input_dim = int(1e5), int(1e5)
dnn_data = fluid.layers.data(
dnn_data = paddle.static.data(
name="dnn_data",
shape=[-1, 1],
dtype="int64",
lod_level=1,
append_batch_size=False,
)
lr_data = fluid.layers.data(
lr_data = paddle.static.data(
name="lr_data",
shape=[-1, 1],
dtype="int64",
lod_level=1,
append_batch_size=False,
)
label = fluid.layers.data(
label = paddle.static.data(
name="click",
shape=[-1, 1],
dtype="int64",
lod_level=0,
append_batch_size=False,
)
datas = [dnn_data, lr_data, label]
......
......@@ -49,26 +49,23 @@ class TestHeterPipelinePsCTR2x2(FleetDistHeterRunnerBase):
dnn_input_dim, lr_input_dim = int(1e5), int(1e5)
with fluid.device_guard("cpu"):
dnn_data = fluid.layers.data(
dnn_data = paddle.static.data(
name="dnn_data",
shape=[-1, 1],
dtype="int64",
lod_level=1,
append_batch_size=False,
)
lr_data = fluid.layers.data(
lr_data = paddle.static.data(
name="lr_data",
shape=[-1, 1],
dtype="int64",
lod_level=1,
append_batch_size=False,
)
label = fluid.layers.data(
label = paddle.static.data(
name="click",
shape=[-1, 1],
dtype="float32",
lod_level=0,
append_batch_size=False,
)
datas = [dnn_data, lr_data, label]
......
......@@ -74,8 +74,10 @@ def cnn_model(data):
class TestFleetMetaOptimizerPrecision(TestDistRunnerBase):
def get_model(self, batch_size=2, single_device=False):
# Input data
images = fluid.layers.data(name='pixel', shape=[1, 28, 28], dtype=DTYPE)
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
images = paddle.static.data(
name='pixel', shape=[-1, 1, 28, 28], dtype=DTYPE
)
label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64')
# Train program
predict = cnn_model(images)
......
......@@ -74,8 +74,10 @@ def cnn_model(data):
class TestFleetMetaOptimizerFuseAllReducePrecision(TestDistRunnerBase):
def get_model(self, batch_size=2, single_device=False):
# Input data
images = fluid.layers.data(name='pixel', shape=[1, 28, 28], dtype=DTYPE)
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
images = paddle.static.data(
name='pixel', shape=[-1, 1, 28, 28], dtype=DTYPE
)
label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64')
# Train program
predict = cnn_model(images)
......
......@@ -93,18 +93,18 @@ def train_network(
is_pyreader=False,
):
# query
q = fluid.layers.data(
name="query_ids", shape=[1], dtype="int64", lod_level=1
q = paddle.static.data(
name="query_ids", shape=[-1, 1], dtype="int64", lod_level=1
)
# label data
label = fluid.layers.data(name="label", shape=[1], dtype="int64")
label = paddle.static.data(name="label", shape=[-1, 1], dtype="int64")
# pt
pt = fluid.layers.data(
name="pos_title_ids", shape=[1], dtype="int64", lod_level=1
pt = paddle.static.data(
name="pos_title_ids", shape=[-1, 1], dtype="int64", lod_level=1
)
# nt
nt = fluid.layers.data(
name="neg_title_ids", shape=[1], dtype="int64", lod_level=1
nt = paddle.static.data(
name="neg_title_ids", shape=[-1, 1], dtype="int64", lod_level=1
)
datas = [q, label, pt, nt]
......
......@@ -52,26 +52,23 @@ class TestDistCTR2x2(FleetDistRunnerBase):
"""
dnn_input_dim, lr_input_dim = 10, 10
dnn_data = fluid.layers.data(
dnn_data = paddle.static.data(
name="dnn_data",
shape=[-1, 1],
dtype="int64",
lod_level=1,
append_batch_size=False,
)
lr_data = fluid.layers.data(
lr_data = paddle.static.data(
name="lr_data",
shape=[-1, 1],
dtype="int64",
lod_level=1,
append_batch_size=False,
)
label = fluid.layers.data(
label = paddle.static.data(
name="click",
shape=[-1, 1],
dtype="int64",
lod_level=0,
append_batch_size=False,
)
datas = [dnn_data, lr_data, label]
......
......@@ -73,8 +73,10 @@ def cnn_model(data):
class TestDistMnist2x2(TestDistRunnerBase):
def get_model(self, batch_size=2, use_dgc=False, dist_strategy=None):
# Input data
images = fluid.layers.data(name='pixel', shape=[1, 28, 28], dtype=DTYPE)
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
images = paddle.static.data(
name='pixel', shape=[-1, 1, 28, 28], dtype=DTYPE
)
label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64')
# Train program
predict = cnn_model(images)
......
......@@ -38,8 +38,10 @@ def test_merge_reader(repeat_batch_size=8):
class TestDistMnist2x2(TestDistRunnerBase):
def get_model(self, batch_size=2):
# Input data
images = fluid.layers.data(name='pixel', shape=[1, 28, 28], dtype=DTYPE)
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
images = paddle.static.data(
name='pixel', shape=[-1, 1, 28, 28], dtype=DTYPE
)
label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64')
# Train program
predict = cnn_model(images)
......
......@@ -32,8 +32,10 @@ fluid.default_main_program().random_seed = 1
class TestDistMnist2x2(TestDistRunnerBase):
def get_model(self, batch_size=2):
# Input data
images = fluid.layers.data(name='pixel', shape=[1, 28, 28], dtype=DTYPE)
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
images = paddle.static.data(
name='pixel', shape=[-1, 1, 28, 28], dtype=DTYPE
)
label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64')
# Train program
predict = cnn_model(images)
......
......@@ -29,8 +29,10 @@ fluid.default_main_program().random_seed = 1
class TestDistMnist2x2(TestDistRunnerBase):
def get_model(self, batch_size=2):
# Input data
images = fluid.layers.data(name='pixel', shape=[1, 28, 28], dtype=DTYPE)
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
images = paddle.static.data(
name='pixel', shape=[-1, 1, 28, 28], dtype=DTYPE
)
label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64')
# Train program
predict = cnn_model(images)
......
......@@ -209,10 +209,10 @@ class SE_ResNeXt:
class DistSeResneXt2x2(TestDistRunnerBase):
def get_model(self, batch_size=2, use_dgc=False):
# Input data
image = fluid.layers.data(
name="data", shape=[3, 224, 224], dtype='float32'
image = paddle.static.data(
name="data", shape=[-1, 3, 224, 224], dtype='float32'
)
label = fluid.layers.data(name="int64", shape=[1], dtype='int64')
label = paddle.static.data(name="int64", shape=[-1, 1], dtype='int64')
# Train program
model = SE_ResNeXt(layers=50)
......
......@@ -38,12 +38,10 @@ def runtime_main():
fleet.init(role)
with fluid.program_guard(train_prog, startup_prog):
with fluid.unique_name.guard():
input_x = paddle.fluid.layers.data(
name="x", shape=[32], dtype='float32'
)
input_y = paddle.fluid.layers.data(
name="y", shape=[1], dtype='int64'
input_x = paddle.static.data(
name="x", shape=[-1, 32], dtype='float32'
)
input_y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64')
fc_1 = paddle.static.nn.fc(x=input_x, size=64, activation='tanh')
fc_2 = paddle.static.nn.fc(x=fc_1, size=256, activation='tanh')
......
......@@ -95,8 +95,8 @@ def conv_net(
def inference_network(dict_dim):
data = fluid.layers.data(
name="words", shape=[1], dtype="int64", lod_level=1
data = paddle.static.data(
name="words", shape=[-1, 1], dtype="int64", lod_level=1
)
out = conv_net(data, dict_dim)
return out
......@@ -125,10 +125,10 @@ class TestDistTextClassification2x2(TestDistRunnerBase):
word_dict, dict_dim = get_worddict(vocab)
# Input data
data = fluid.layers.data(
name="words", shape=[1], dtype="int64", lod_level=1
data = paddle.static.data(
name="words", shape=[-1, 1], dtype="int64", lod_level=1
)
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64')
# Train program
predict = conv_net(data, dict_dim)
......
......@@ -1512,14 +1512,13 @@ def make_all_inputs(input_fields):
"""
inputs = []
for input_field in input_fields:
input_var = layers.data(
input_var = paddle.static.data(
name=input_field,
shape=input_descs[input_field][0],
dtype=input_descs[input_field][1],
lod_level=input_descs[input_field][2]
if len(input_descs[input_field]) == 3
else 0,
append_batch_size=False,
)
inputs.append(input_var)
return inputs
......
......@@ -107,13 +107,21 @@ class TestDistWord2vec2x2(TestDistRunnerBase):
word_dict = paddle.dataset.imikolov.build_dict()
dict_size = len(word_dict)
first_word = fluid.layers.data(name='firstw', shape=[1], dtype='int64')
second_word = fluid.layers.data(
name='secondw', shape=[1], dtype='int64'
first_word = paddle.static.data(
name='firstw', shape=[-1, 1], dtype='int64'
)
second_word = paddle.static.data(
name='secondw', shape=[-1, 1], dtype='int64'
)
third_word = paddle.static.data(
name='thirdw', shape=[-1, 1], dtype='int64'
)
forth_word = paddle.static.data(
name='forthw', shape=[-1, 1], dtype='int64'
)
next_word = paddle.static.data(
name='nextw', shape=[-1, 1], dtype='int64'
)
third_word = fluid.layers.data(name='thirdw', shape=[1], dtype='int64')
forth_word = fluid.layers.data(name='forthw', shape=[1], dtype='int64')
next_word = fluid.layers.data(name='nextw', shape=[1], dtype='int64')
avg_cost, predict_word = __network__(
[first_word, second_word, third_word, forth_word, next_word]
)
......
......@@ -24,7 +24,6 @@ from test_distribution import DistributionNumpy
import paddle
from paddle import fluid
from paddle.distribution import Normal
from paddle.fluid import layers
np.random.seed(2022)
......@@ -117,8 +116,8 @@ class NormalTest(unittest.TestCase):
self.static_other_loc = self.other_loc_np
self.static_other_scale = self.other_scale_np
with fluid.program_guard(self.test_program):
self.static_values = layers.data(
name='values', shape=[], dtype='float32'
self.static_values = paddle.static.data(
name='values', shape=[-1], dtype='float32'
)
def compare_with_numpy(self, fetch_list, sample_shape=7, tolerance=1e-6):
......@@ -237,8 +236,8 @@ class NormalTest3(NormalTest):
self.static_other_loc = self.other_loc_np
self.static_other_scale = self.other_scale_np
with fluid.program_guard(self.test_program):
self.static_values = layers.data(
name='values', shape=[dims], dtype='float32'
self.static_values = paddle.static.data(
name='values', shape=[-1, dims], dtype='float32'
)
......@@ -266,8 +265,8 @@ class NormalTest4(NormalTest):
self.static_other_loc = self.other_loc_np
self.static_other_scale = self.other_scale_np
with fluid.program_guard(self.test_program):
self.static_values = layers.data(
name='values', shape=[dims], dtype='float32'
self.static_values = paddle.static.data(
name='values', shape=[-1, dims], dtype='float32'
)
......@@ -302,8 +301,8 @@ class NormalTest5(NormalTest):
self.static_other_loc = self.other_loc_np
self.static_other_scale = self.other_scale_np
with fluid.program_guard(self.test_program):
self.static_values = layers.data(
name='values', shape=[dims], dtype='float64'
self.static_values = paddle.static.data(
name='values', shape=[-1, dims], dtype='float64'
)
......@@ -334,20 +333,20 @@ class NormalTest6(NormalTest):
def init_static_data(self, batch_size, dims):
with fluid.program_guard(self.test_program):
self.static_loc = layers.data(
name='loc', shape=[dims], dtype='float32'
self.static_loc = paddle.static.data(
name='loc', shape=[-1, dims], dtype='float32'
)
self.static_scale = layers.data(
name='scale', shape=[dims], dtype='float32'
self.static_scale = paddle.static.data(
name='scale', shape=[-1, dims], dtype='float32'
)
self.static_values = layers.data(
name='values', shape=[dims], dtype='float32'
self.static_values = paddle.static.data(
name='values', shape=[-1, dims], dtype='float32'
)
self.static_other_loc = layers.data(
name='other_loc', shape=[dims], dtype='float32'
self.static_other_loc = paddle.static.data(
name='other_loc', shape=[-1, dims], dtype='float32'
)
self.static_other_scale = layers.data(
name='other_scale', shape=[dims], dtype='float32'
self.static_other_scale = paddle.static.data(
name='other_scale', shape=[-1, dims], dtype='float32'
)
......@@ -382,20 +381,20 @@ class NormalTest7(NormalTest):
def init_static_data(self, batch_size, dims):
with fluid.program_guard(self.test_program):
self.static_loc = layers.data(
name='loc', shape=[dims], dtype='float64'
self.static_loc = paddle.static.data(
name='loc', shape=[-1, dims], dtype='float64'
)
self.static_scale = layers.data(
name='scale', shape=[dims], dtype='float64'
self.static_scale = paddle.static.data(
name='scale', shape=[-1, dims], dtype='float64'
)
self.static_values = layers.data(
name='values', shape=[dims], dtype='float64'
self.static_values = paddle.static.data(
name='values', shape=[-1, dims], dtype='float64'
)
self.static_other_loc = layers.data(
name='other_loc', shape=[dims], dtype='float64'
self.static_other_loc = paddle.static.data(
name='other_loc', shape=[-1, dims], dtype='float64'
)
self.static_other_scale = layers.data(
name='other_scale', shape=[dims], dtype='float64'
self.static_other_scale = paddle.static.data(
name='other_scale', shape=[-1, dims], dtype='float64'
)
......@@ -430,20 +429,20 @@ class NormalTest8(NormalTest):
def init_static_data(self, batch_size, dims):
with fluid.program_guard(self.test_program):
self.static_loc = layers.data(
name='loc', shape=[dims], dtype='float64'
self.static_loc = paddle.static.data(
name='loc', shape=[-1, dims], dtype='float64'
)
self.static_scale = layers.data(
name='scale', shape=[dims], dtype='float64'
self.static_scale = paddle.static.data(
name='scale', shape=[-1, dims], dtype='float64'
)
self.static_values = layers.data(
name='values', shape=[dims], dtype='float32'
self.static_values = paddle.static.data(
name='values', shape=[-1, dims], dtype='float32'
)
self.static_other_loc = layers.data(
name='other_loc', shape=[dims], dtype='float64'
self.static_other_loc = paddle.static.data(
name='other_loc', shape=[-1, dims], dtype='float64'
)
self.static_other_scale = layers.data(
name='other_scale', shape=[dims], dtype='float64'
self.static_other_scale = paddle.static.data(
name='other_scale', shape=[-1, dims], dtype='float64'
)
......@@ -477,8 +476,8 @@ class NormalTest9(NormalTest):
self.static_other_loc = self.other_loc_np
self.static_other_scale = self.other_scale_np
with fluid.program_guard(self.test_program):
self.static_values = layers.data(
name='values', shape=[dims], dtype='float32'
self.static_values = paddle.static.data(
name='values', shape=[-1, dims], dtype='float32'
)
......@@ -512,8 +511,8 @@ class NormalTest10(NormalTest):
self.static_other_loc = self.other_loc_np
self.static_other_scale = self.other_scale_np
with fluid.program_guard(self.test_program):
self.static_values = layers.data(
name='values', shape=[dims], dtype='float32'
self.static_values = paddle.static.data(
name='values', shape=[-1, dims], dtype='float32'
)
......
......@@ -20,7 +20,6 @@ from test_distribution import DistributionNumpy
import paddle
from paddle import fluid
from paddle.distribution import Uniform
from paddle.fluid import layers
np.random.seed(2022)
......@@ -88,8 +87,8 @@ class UniformTest(unittest.TestCase):
self.static_low = self.low_np
self.static_high = self.high_np
with fluid.program_guard(self.test_program):
self.static_values = layers.data(
name='values', shape=[], dtype='float32'
self.static_values = paddle.static.data(
name='values', shape=[-1], dtype='float32'
)
def compare_with_numpy(self, fetch_list, sample_shape=7, tolerance=1e-6):
......@@ -170,8 +169,8 @@ class UniformTest3(UniformTest):
self.static_low = self.low_np
self.static_high = self.high_np
with fluid.program_guard(self.test_program):
self.static_values = layers.data(
name='values', shape=[dims], dtype='float32'
self.static_values = paddle.static.data(
name='values', shape=[-1, dims], dtype='float32'
)
......@@ -188,8 +187,8 @@ class UniformTest4(UniformTest):
self.static_low = self.low_np
self.static_high = self.high_np
with fluid.program_guard(self.test_program):
self.static_values = layers.data(
name='values', shape=[dims], dtype='float32'
self.static_values = paddle.static.data(
name='values', shape=[-1, dims], dtype='float32'
)
......@@ -211,8 +210,8 @@ class UniformTest5(UniformTest):
self.static_low = self.low_np
self.static_high = self.high_np
with fluid.program_guard(self.test_program):
self.static_values = layers.data(
name='values', shape=[dims], dtype='float64'
self.static_values = paddle.static.data(
name='values', shape=[-1, dims], dtype='float64'
)
......@@ -232,14 +231,14 @@ class UniformTest6(UniformTest):
def init_static_data(self, batch_size, dims):
with fluid.program_guard(self.test_program):
self.static_low = layers.data(
name='low', shape=[dims], dtype='float32'
self.static_low = paddle.static.data(
name='low', shape=[-1, dims], dtype='float32'
)
self.static_high = layers.data(
name='high', shape=[dims], dtype='float32'
self.static_high = paddle.static.data(
name='high', shape=[-1, dims], dtype='float32'
)
self.static_values = layers.data(
name='values', shape=[dims], dtype='float32'
self.static_values = paddle.static.data(
name='values', shape=[-1, dims], dtype='float32'
)
......@@ -259,14 +258,14 @@ class UniformTest7(UniformTest):
def init_static_data(self, batch_size, dims):
with fluid.program_guard(self.test_program):
self.static_low = layers.data(
name='low', shape=[dims], dtype='float64'
self.static_low = paddle.static.data(
name='low', shape=[-1, dims], dtype='float64'
)
self.static_high = layers.data(
name='high', shape=[dims], dtype='float64'
self.static_high = paddle.static.data(
name='high', shape=[-1, dims], dtype='float64'
)
self.static_values = layers.data(
name='values', shape=[dims], dtype='float64'
self.static_values = paddle.static.data(
name='values', shape=[-1, dims], dtype='float64'
)
......@@ -286,14 +285,14 @@ class UniformTest8(UniformTest):
def init_static_data(self, batch_size, dims):
with fluid.program_guard(self.test_program):
self.static_low = layers.data(
name='low', shape=[dims], dtype='float64'
self.static_low = paddle.static.data(
name='low', shape=[-1, dims], dtype='float64'
)
self.static_high = layers.data(
name='high', shape=[dims], dtype='float64'
self.static_high = paddle.static.data(
name='high', shape=[-1, dims], dtype='float64'
)
self.static_values = layers.data(
name='values', shape=[dims], dtype='float32'
self.static_values = paddle.static.data(
name='values', shape=[-1, dims], dtype='float32'
)
......@@ -311,8 +310,8 @@ class UniformTest9(UniformTest):
self.static_low = self.low_np
self.static_high = self.high_np
with fluid.program_guard(self.test_program):
self.static_values = layers.data(
name='values', shape=[dims], dtype='float32'
self.static_values = paddle.static.data(
name='values', shape=[-1, dims], dtype='float32'
)
......@@ -333,8 +332,8 @@ class UniformTest10(UniformTest):
self.static_low = self.low_np
self.static_high = self.high_np
with fluid.program_guard(self.test_program):
self.static_values = layers.data(
name='values', shape=[dims], dtype='float32'
self.static_values = paddle.static.data(
name='values', shape=[-1, dims], dtype='float32'
)
......@@ -355,8 +354,8 @@ class UniformTest11(UniformTest):
self.static_low = self.low_np
self.static_high = self.high_np
with fluid.program_guard(self.test_program):
self.static_values = layers.data(
name='values', shape=[dims], dtype='float32'
self.static_values = paddle.static.data(
name='values', shape=[-1, dims], dtype='float32'
)
......
......@@ -296,12 +296,11 @@ class InputField:
self.feed_list = []
for slot in input_slots:
self.feed_list.append(
fluid.layers.data(
paddle.static.data(
name=slot['name'],
shape=slot['shape'],
dtype=slot['dtype'],
lod_level=slot.get('lod_level', 0),
append_batch_size=False,
)
)
......
......@@ -41,26 +41,23 @@ def net(batch_size=4, lr=0.01):
dnn_input_dim, lr_input_dim = int(2), int(2)
with fluid.device_guard("cpu"):
dnn_data = fluid.layers.data(
dnn_data = paddle.static.data(
name="dnn_data",
shape=[-1, 1],
dtype="int64",
lod_level=1,
append_batch_size=False,
)
lr_data = fluid.layers.data(
lr_data = paddle.static.data(
name="lr_data",
shape=[-1, 1],
dtype="int64",
lod_level=1,
append_batch_size=False,
)
label = fluid.layers.data(
label = paddle.static.data(
name="click",
shape=[-1, 1],
dtype="float32",
lod_level=0,
append_batch_size=False,
)
datas = [dnn_data, lr_data, label]
......
......@@ -55,11 +55,11 @@ class TestFleetMetaOptimizer(unittest.TestCase):
with fluid.unique_name.guard():
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
input_x = paddle.fluid.layers.data(
name="x", shape=[32], dtype='float32'
input_x = paddle.static.data(
name="x", shape=[-1, 32], dtype='float32'
)
input_y = paddle.fluid.layers.data(
name="y", shape=[1], dtype='int64'
input_y = paddle.static.data(
name="y", shape=[-1, 1], dtype='int64'
)
fc_1 = paddle.static.nn.fc(
......@@ -92,11 +92,11 @@ class TestFleetMetaOptimizer(unittest.TestCase):
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
with fluid.device_guard("gpu:0"):
input_x = paddle.fluid.layers.data(
name="x", shape=[32], dtype='float32'
input_x = paddle.static.data(
name="x", shape=[-1, 32], dtype='float32'
)
input_y = paddle.fluid.layers.data(
name="y", shape=[1], dtype='int64'
input_y = paddle.static.data(
name="y", shape=[-1, 1], dtype='int64'
)
for stage_idx in range(pp_degree):
......
......@@ -15,14 +15,15 @@
from nets import mlp
from utils import gen_data
import paddle
import paddle.fluid as fluid
from paddle.fluid.incubate.fleet.base import role_maker
from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import (
fleet,
)
input_x = fluid.layers.data(name="x", shape=[32], dtype='float32')
input_y = fluid.layers.data(name="y", shape=[1], dtype='int64')
input_x = paddle.static.data(name="x", shape=[-1, 32], dtype='float32')
input_y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64')
input_y = fluid.layers.cast(input_y, dtype="float32")
with fluid.device_guard("gpu"):
......
......@@ -25,23 +25,20 @@ import paddle.fluid.core as core
class EmbEltwiseLayerNormFusePassTest(PassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
word_id = fluid.layers.data(
word_id = paddle.static.data(
name="word_id",
shape=[1, 128, 1],
dtype="int64",
append_batch_size=False,
)
pos_id = fluid.layers.data(
pos_id = paddle.static.data(
name="pos_id",
shape=[1, 128, 1],
dtype="int64",
append_batch_size=False,
)
sent_id = fluid.layers.data(
sent_id = paddle.static.data(
name="sent_id",
shape=[1, 128, 1],
dtype="int64",
append_batch_size=False,
)
word_emb = fluid.layers.embedding(
input=word_id, size=(128, 768), dtype='float32'
......@@ -56,29 +53,25 @@ class EmbEltwiseLayerNormFusePassTest(PassTest):
add2 = paddle.add(add1, sent_emb)
hidden1 = paddle.static.nn.layer_norm(input=add2, begin_norm_axis=2)
id1 = fluid.layers.data(
id1 = paddle.static.data(
name="id1",
shape=[1, 128, 1],
dtype="int64",
append_batch_size=False,
)
id2 = fluid.layers.data(
id2 = paddle.static.data(
name="id2",
shape=[1, 128, 1],
dtype="int64",
append_batch_size=False,
)
id3 = fluid.layers.data(
id3 = paddle.static.data(
name="id3",
shape=[1, 128, 1],
dtype="int64",
append_batch_size=False,
)
id4 = fluid.layers.data(
id4 = paddle.static.data(
name="id4",
shape=[1, 128, 1],
dtype="int64",
append_batch_size=False,
)
emb1 = fluid.layers.embedding(
input=id1, size=(128, 768), dtype='float32'
......
......@@ -28,10 +28,12 @@ paddle.enable_static()
class TestQuantizationSubGraph(unittest.TestCase):
def build_graph_with_sub_graph(self):
def linear_fc(num):
data = fluid.layers.data(
name='image', shape=[1, 32, 32], dtype='float32'
data = paddle.static.data(
name='image', shape=[-1, 1, 32, 32], dtype='float32'
)
label = paddle.static.data(
name='label', shape=[-1, 1], dtype='int64'
)
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
hidden = data
for _ in range(num):
hidden = paddle.static.nn.fc(
......
......@@ -60,11 +60,11 @@ class BuildIrMemOptBase(unittest.TestCase):
fluid.default_startup_program().random_seed = 100
fluid.default_main_program().random_seed = 100
data = fluid.layers.data(
name="words", shape=[1], dtype="int64", lod_level=1
data = paddle.static.data(
name="words", shape=[-1, 1], dtype="int64", lod_level=1
)
label = fluid.layers.data(name="label", shape=[1], dtype="int64")
label = paddle.static.data(name="label", shape=[-1, 1], dtype="int64")
cost = network(data, label, len(self.word_dict))
optimizer = fluid.optimizer.Adam(learning_rate=0.001)
......
......@@ -45,8 +45,8 @@ class TestCollectiveAllgatherAPI(TestCollectiveAPIRunnerBase):
def get_model(self, main_prog, startup_program, rank):
with fluid.program_guard(main_prog, startup_program):
tensor_list = []
tindata = layers.data(
name="tindata", shape=[10, 1000], dtype='float32'
tindata = paddle.static.data(
name="tindata", shape=[-1, 10, 1000], dtype='float32'
)
paddle.distributed.all_gather(tensor_list, tindata)
return tensor_list
......
......@@ -42,9 +42,10 @@ class TestCollectiveAllgather(TestCollectiveRunnerBase):
ring_id = 0
nranks = 2
with fluid.program_guard(main_prog, startup_program):
tindata = layers.data(
name="tindata", shape=[10, 1000], dtype='float32'
tindata = paddle.static.data(
name="tindata", shape=[-1, 10, 1000], dtype='float32'
)
tindata.desc.set_need_check_feed(False)
toutdata = main_prog.current_block().create_var(
name="outofallgather",
dtype='float32',
......
......@@ -44,8 +44,8 @@ class TestCollectiveAllreduceAPI(TestCollectiveAPIRunnerBase):
def get_model(self, main_prog, startup_program, rank):
with fluid.program_guard(main_prog, startup_program):
tindata = layers.data(
name="tindata", shape=[10, 1000], dtype='float32'
tindata = paddle.static.data(
name="tindata", shape=[-1, 10, 1000], dtype='float32'
)
paddle.distributed.all_reduce(tindata)
return [tindata]
......
......@@ -42,9 +42,11 @@ class TestCollectiveAllreduce(TestCollectiveRunnerBase):
def get_model(self, main_prog, startup_program, col_type):
ring_id = 0
with fluid.program_guard(main_prog, startup_program):
tindata = layers.data(
name="tindata", shape=[10, 1000], dtype='float32'
tindata = paddle.static.data(
name="tindata", shape=[-1, 10, 1000], dtype='float32'
)
tindata.desc.set_need_check_feed(False)
toutdata = main_prog.current_block().create_var(
name="outof" + col_type,
dtype='float32',
......
......@@ -44,9 +44,11 @@ class TestCollectiveBroadcastAPI(TestCollectiveAPIRunnerBase):
def get_model(self, main_prog, startup_program, rank):
with fluid.program_guard(main_prog, startup_program):
tindata = layers.data(
name="tindata", shape=[10, 1000], dtype="float32"
tindata = paddle.static.data(
name="tindata", shape=[-1, 10, 1000], dtype="float32"
)
tindata.desc.set_need_check_feed(False)
paddle.distributed.broadcast(tindata, src=1)
return [tindata]
......
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册