未验证 提交 c985b1ac 编写于 作者: G GGBond8488 提交者: GitHub

Fluid clean move out fill constant (#49511)

* migrate fill_constant to paddle.tensor

* move fill_constant to paddle.tensor and repalce the reference

* add missing fill_constant replacement

* fix typro

* remove unused import fill_constant

* fix zeros import error

* fix circle import

* fix layers.zeros

* fix unitest

* fix unitests

* fix unitest

* use paddle.full replace fill_constant in samplecode

* fix sample code

* recovery xpu test

* recovery xpu test

* fix circle import

* fix utils import error

* fix utils error

* fix circle import

* redo

* fix circle import

* fix prim fill constant import

* fix type error

* fix increase error

* fix test error

* fix fill_constant
上级 4851c642
...@@ -33,7 +33,6 @@ from paddle.fluid.framework import ( # noqa: F401 ...@@ -33,7 +33,6 @@ from paddle.fluid.framework import ( # noqa: F401
in_dygraph_mode, in_dygraph_mode,
) )
from paddle.fluid.layer_helper import LayerHelper # noqa: F401 from paddle.fluid.layer_helper import LayerHelper # noqa: F401
from paddle.fluid.layers import fill_constant # noqa: F401
from paddle.fluid.layers.layer_function_generator import ( # noqa: F401 from paddle.fluid.layers.layer_function_generator import ( # noqa: F401
templatedoc, templatedoc,
) )
......
...@@ -17,8 +17,8 @@ from collections import OrderedDict ...@@ -17,8 +17,8 @@ from collections import OrderedDict
import paddle import paddle
from paddle import _legacy_C_ops from paddle import _legacy_C_ops
from paddle.framework import core, in_dygraph_mode from paddle.framework import core, in_dygraph_mode
from paddle.tensor import fill_constant
from ...fluid.layers.tensor import fill_constant
from ..collective import _get_global_env, _new_ring_id from ..collective import _get_global_env, _new_ring_id
......
...@@ -50,9 +50,9 @@ class HybridParallelInferenceHelper: ...@@ -50,9 +50,9 @@ class HybridParallelInferenceHelper:
# while op pattern # while op pattern
with paddle.fluid.device_guard(f'{device}:all'): with paddle.fluid.device_guard(f'{device}:all'):
# init global cond # init global cond
max_len = layers.fill_constant(shape=[1], dtype="int64", value=10, force_cpu=False) max_len = paddle.full(shape=[1], dtype="int64", fill_value=10)
step_idx = layers.fill_constant(shape=[1], dtype="int64", value=0, force_cpu=False) step_idx = paddle.full(shape=[1], dtype="int64", fill_value=0)
cond_int = layers.fill_constant(shape=[1], dtype="int64", value=0, force_cpu=False, name="cond_int") cond_int = paddle.full(shape=[1], dtype="int64", fill_value=0, name="cond_int")
cond = layers.cast(step_idx < max_len, dtype="bool") cond = layers.cast(step_idx < max_len, dtype="bool")
while_op = layers.While(cond, is_test=True) while_op = layers.While(cond, is_test=True)
...@@ -124,14 +124,14 @@ class HybridParallelInferenceHelper: ...@@ -124,14 +124,14 @@ class HybridParallelInferenceHelper:
X = paddle.static.data(name='X', shape=[None, 2], dtype='float32') X = paddle.static.data(name='X', shape=[None, 2], dtype='float32')
with paddle.fluid.device_guard(f'{device}:all'): with paddle.fluid.device_guard(f'{device}:all'):
max_len = layers.fill_constant( max_len = paddle.full(
shape=[1], dtype="int64", value=5, force_cpu=False, name="n") shape=[1], dtype="int64", fill_value=5, name="n")
step_idx = layers.fill_constant( step_idx = paddle.full(
shape=[1], dtype="int64", value=0, force_cpu=False, name="i") shape=[1], dtype="int64", fill_value=0, name="i")
data = paddle.tensor.array_write(X, step_idx) data = paddle.tensor.array_write(X, step_idx)
cond_int = layers.fill_constant(shape=[1], dtype="int64", value=0, force_cpu=False, name="cond_int") cond_int = paddle.full(shape=[1], dtype="int64", fill_value=0, name="cond_int")
cond = paddle.less_than(x=step_idx, y=max_len) cond = paddle.less_than(x=step_idx, y=max_len)
while_op = layers.While(cond, is_test=True) while_op = layers.While(cond, is_test=True)
......
...@@ -25,7 +25,6 @@ from paddle.distributed.auto_parallel.utils import ( ...@@ -25,7 +25,6 @@ from paddle.distributed.auto_parallel.utils import (
set_var_dist_attr, set_var_dist_attr,
) )
from paddle.distributed.fleet.meta_optimizers.common import OP_ROLE_KEY, OpRole from paddle.distributed.fleet.meta_optimizers.common import OP_ROLE_KEY, OpRole
from paddle.fluid import layers
from paddle.framework import core from paddle.framework import core
from paddle.static import device_guard from paddle.static import device_guard
...@@ -284,7 +283,7 @@ def _create_cond_block_and_update_optimizer( ...@@ -284,7 +283,7 @@ def _create_cond_block_and_update_optimizer(
# clear gradient_merge_vars # clear gradient_merge_vars
for param, new_grad in new_params_to_grads: for param, new_grad in new_params_to_grads:
layers.fill_constant( paddle.tensor.fill_constant(
shape=new_grad.shape, shape=new_grad.shape,
dtype=new_grad.dtype, dtype=new_grad.dtype,
value=0.0, value=0.0,
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
""" """
Contrib layers just related to metric. Contrib layers just related to metric.
""" """
import paddle
import warnings import warnings
import paddle import paddle
from paddle.fluid.layer_helper import LayerHelper from paddle.fluid.layer_helper import LayerHelper
...@@ -79,7 +79,7 @@ def ctr_metric_bundle(input, label, ins_tag_weight=None): ...@@ -79,7 +79,7 @@ def ctr_metric_bundle(input, label, ins_tag_weight=None):
""" """
if ins_tag_weight is None: if ins_tag_weight is None:
ins_tag_weight = tensor.fill_constant( ins_tag_weight = paddle.tensor.fill_constant(
shape=[1, 1], dtype="float32", value=1.0 shape=[1, 1], dtype="float32", value=1.0
) )
......
...@@ -287,10 +287,10 @@ class TestAmpWithNonIterableDataLoader(unittest.TestCase): ...@@ -287,10 +287,10 @@ class TestAmpWithNonIterableDataLoader(unittest.TestCase):
iterable=False, iterable=False,
use_double_buffer=False, use_double_buffer=False,
) )
zero_var = fluid.layers.fill_constant( zero_var = paddle.tensor.fill_constant(
shape=[1], dtype='int64', value=0 shape=[1], dtype='int64', value=0
) )
one_var = fluid.layers.fill_constant( one_var = paddle.tensor.fill_constant(
shape=[1], dtype='int64', value=1 shape=[1], dtype='int64', value=1
) )
with fluid.layers.control_flow.Switch() as switch: with fluid.layers.control_flow.Switch() as switch:
......
...@@ -15,7 +15,6 @@ ...@@ -15,7 +15,6 @@
from ..wrapped_decorator import signature_safe_contextmanager from ..wrapped_decorator import signature_safe_contextmanager
from .layer_function_generator import templatedoc from .layer_function_generator import templatedoc
from .tensor import fill_constant
from .. import core from .. import core
from ..framework import ( from ..framework import (
Program, Program,
...@@ -925,11 +924,12 @@ class While: ...@@ -925,11 +924,12 @@ class While:
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle
import numpy as np import numpy as np
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0) # loop counter i = paddle.full(shape=[1], dtype='int64', fill_value=0) # loop counter
loop_len = fluid.layers.fill_constant(shape=[1],dtype='int64', value=10) # loop length loop_len = paddle.full(shape=[1],dtype='int64', fill_value=10) # loop length
cond = paddle.less_than(x=i, y=loop_len) cond = paddle.less_than(x=i, y=loop_len)
while_op = fluid.layers.While(cond=cond) while_op = fluid.layers.While(cond=cond)
...@@ -952,11 +952,11 @@ class While: ...@@ -952,11 +952,11 @@ class While:
import numpy as np import numpy as np
paddle.enable_static() paddle.enable_static()
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0) i = paddle.full(shape=[1], dtype='int64', fill_value=0)
loop_len = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10) loop_len = paddle.full(shape=[1], dtype='int64', fill_value=10)
one = fluid.layers.fill_constant(shape=[1], dtype='float32', value=1) one = paddle.full(shape=[1], dtype='float32', fill_value=1)
data = fluid.data(name='data', shape=[1], dtype='float32') data = fluid.data(name='data', shape=[1], dtype='float32')
sums = fluid.layers.fill_constant(shape=[1], dtype='float32', value=0) # Define the variable to be obtained ouside of While, which name should be different from the variable inside the While to be obtained sums = paddle.full(shape=[1], dtype='float32', fill_value=0) # Define the variable to be obtained ouside of While, which name should be different from the variable inside the While to be obtained
cond = paddle.less_than(x=i, y=loop_len) cond = paddle.less_than(x=i, y=loop_len)
while_op = fluid.layers.While(cond=cond) while_op = fluid.layers.While(cond=cond)
...@@ -1537,13 +1537,15 @@ class Switch: ...@@ -1537,13 +1537,15 @@ class Switch:
.. code-block:: python .. code-block:: python
''' '''
import paddle
import paddle.fluid as fluid
with fluid.layers.Switch() as switch: with fluid.layers.Switch() as switch:
with switch.case(cond1): with switch.case(cond1):
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=1) i = paddle.full(shape=[1], dtype='int64', fill_value=1)
with switch.case(cond2): with switch.case(cond2):
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=2) i = paddle.full(shape=[1], dtype='int64', fill_value=2)
with switch.default(): with switch.default():
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0) i = paddle.full(shape=[1], dtype='int64', fill_value=0)
''' '''
Args: Args:
...@@ -1561,20 +1563,20 @@ class Switch: ...@@ -1561,20 +1563,20 @@ class Switch:
dtype='float32', dtype='float32',
persistable=True, persistable=True,
name="learning_rate") name="learning_rate")
zero_var = fluid.layers.fill_constant( zero_var = paddle.full(
shape=[1], dtype='float32', value=0.0) shape=[1], dtype='float32', fill_value=0.0)
one_var = fluid.layers.fill_constant( one_var = paddle.full(
shape=[1], dtype='float32', value=1.0) shape=[1], dtype='float32', fill_value=1.0)
two_var = fluid.layers.fill_constant( two_var = paddle.full(
shape=[1], dtype='float32', value=2.0) shape=[1], dtype='float32', fill_value=2.0)
global_step = fluid.layers.autoincreased_step_counter(counter_name='@LR_DECAY_COUNTER@', begin=0, step=1) global_step = fluid.layers.autoincreased_step_counter(counter_name='@LR_DECAY_COUNTER@', begin=0, step=1)
with fluid.layers.control_flow.Switch() as switch: with fluid.layers.control_flow.Switch() as switch:
with switch.case(global_step == zero_var): with switch.case(global_step == zero_var):
fluid.layers.assign(input=one_var, output=lr) paddle.assign(input=one_var, output=lr)
with switch.default(): with switch.default():
fluid.layers.assign(input=two_var, output=lr) paddle.assign(input=two_var, output=lr)
exe = fluid.Executor(fluid.CPUPlace()) exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program()) exe.run(fluid.default_startup_program())
......
...@@ -352,10 +352,10 @@ def polynomial_decay( ...@@ -352,10 +352,10 @@ def polynomial_decay(
if cycle: if cycle:
div_res = paddle.ceil(global_step / decay_steps) div_res = paddle.ceil(global_step / decay_steps)
zero_var = tensor.fill_constant( zero_var = paddle.tensor.fill_constant(
shape=[1], dtype='float32', value=0.0 shape=[1], dtype='float32', value=0.0
) )
one_var = tensor.fill_constant( one_var = paddle.tensor.fill_constant(
shape=[1], dtype='float32', value=1.0 shape=[1], dtype='float32', value=1.0
) )
...@@ -364,7 +364,7 @@ def polynomial_decay( ...@@ -364,7 +364,7 @@ def polynomial_decay(
paddle.assign(one_var, output=div_res) paddle.assign(one_var, output=div_res)
decay_steps = decay_steps * div_res decay_steps = decay_steps * div_res
else: else:
decay_steps_var = tensor.fill_constant( decay_steps_var = paddle.tensor.fill_constant(
shape=[1], dtype='float32', value=float(decay_steps) shape=[1], dtype='float32', value=float(decay_steps)
) )
global_step = paddle.minimum(x=global_step, y=decay_steps_var) global_step = paddle.minimum(x=global_step, y=decay_steps_var)
...@@ -435,21 +435,21 @@ def piecewise_decay(boundaries, values): ...@@ -435,21 +435,21 @@ def piecewise_decay(boundaries, values):
with control_flow.Switch() as switch: with control_flow.Switch() as switch:
for i in range(len(boundaries)): for i in range(len(boundaries)):
boundary_val = tensor.fill_constant( boundary_val = paddle.tensor.fill_constant(
shape=[1], shape=[1],
dtype='float32', dtype='float32',
value=float(boundaries[i]), value=float(boundaries[i]),
force_cpu=True, force_cpu=True,
) )
with switch.case(global_step < boundary_val): with switch.case(global_step < boundary_val):
tensor.fill_constant( paddle.tensor.fill_constant(
shape=[1], shape=[1],
dtype="float32", dtype="float32",
value=float(values[i]), value=float(values[i]),
out=lr, out=lr,
) )
with switch.default(): with switch.default():
tensor.fill_constant( paddle.tensor.fill_constant(
shape=[1], shape=[1],
dtype="float32", dtype="float32",
value=float(values[len(values) - 1]), value=float(values[len(values) - 1]),
...@@ -598,7 +598,7 @@ def linear_lr_warmup(learning_rate, warmup_steps, start_lr, end_lr): ...@@ -598,7 +598,7 @@ def linear_lr_warmup(learning_rate, warmup_steps, start_lr, end_lr):
paddle.assign(decayed_lr, lr) paddle.assign(decayed_lr, lr)
with switch.default(): with switch.default():
if not isinstance(learning_rate, Variable): if not isinstance(learning_rate, Variable):
learning_rate = tensor.fill_constant( learning_rate = paddle.tensor.fill_constant(
shape=[1], dtype=dtype, value=float(learning_rate) shape=[1], dtype=dtype, value=float(learning_rate)
) )
paddle.assign(learning_rate, lr) paddle.assign(learning_rate, lr)
......
...@@ -41,7 +41,7 @@ from .layer_function_generator import ( ...@@ -41,7 +41,7 @@ from .layer_function_generator import (
templatedoc, templatedoc,
_generate_doc_string_, _generate_doc_string_,
) )
from .tensor import fill_constant, zeros from .tensor import zeros
from .. import unique_name from .. import unique_name
from .. import core from .. import core
from ...utils import deprecated from ...utils import deprecated
......
...@@ -39,142 +39,10 @@ from paddle import _C_ops, _legacy_C_ops ...@@ -39,142 +39,10 @@ from paddle import _C_ops, _legacy_C_ops
__all__ = [ __all__ = [
'fill_constant_batch_size_like', 'fill_constant_batch_size_like',
'fill_constant',
'zeros', 'zeros',
] ]
def fill_constant(shape, dtype, value, force_cpu=False, out=None, name=None):
"""
This OP creates a Tensor with specified `shape` and `dtype`, and
initializes it with a constant specified by `value`.
The attribute `stop_gradient` of the created Tensor is set to True.
Args:
shape(list|tuple|Tensor): Shape of the output Tensor, the data type of ``shape`` is int32 or int64.
If ``shape`` is a list or tuple, the elements of it should be integers or Tensors with shape [1].
If ``shape`` is an Tensor, it should be an 1-D Tensor with date type int32 or int64.
dtype(np.dtype|str): Data type of the output Tensor which can
be float16, float32, float64, uint8, int16, int32, int64.
value(bool|float|int|Tensor): The constant value used to initialize
the Tensor to be created. If ``value`` is an Tensor, it should be an 1-D Tensor.
force_cpu(bool, optional): data should be on CPU if it's true, default value is False.
out(Tensor, optional): Optional output which can be any created
Tensor that meets the requirements to store the result of operation.
if ``out`` is None, a new Tensor will be create to store the result.
name(str, optional): The default value is None. Normally there is no need for user to set this
property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: Tensor which is created according to shape and dtype.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# attr shape is a list which doesn't contain Tensor.
data1 = fluid.layers.fill_constant(shape=[2,1], value=0, dtype='int64') # data1=[[0],[0]]
data2 = fluid.layers.fill_constant(shape=[2,1], value=5, dtype='int64', out=data1)
# data1=[[5], [5]] data2=[[5], [5]]
# attr shape is a list which contains Tensor.
positive_2 = fluid.layers.fill_constant([1], "int32", 2)
data3 = fluid.layers.fill_constant(shape=[1, positive_2], dtype='float32', value=1.5) # data3=[[1.5, 1.5]]
# attr shape is a Tensor.
shape = fluid.layers.fill_constant([2], "int32", 2) # shape=[2,2]
data4 = fluid.layers.fill_constant(shape=shape, dtype='bool', value=True) # data4=[[True,True],[True,True]]
# attr value is a Tensor.
val = fluid.layers.fill_constant([1], "float32", 2.0) # val=[2.0]
data5 = fluid.layers.fill_constant(shape=[2,1], value=val, dtype='float32') #data5=[[2.0],[2.0]]
"""
if in_dygraph_mode():
place = _current_expected_place()
if force_cpu:
place = core.CPUPlace()
if isinstance(shape, (list, tuple)):
shape = paddle.utils.convert_shape_to_list(shape)
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
if out is None:
out = _C_ops.full(shape, float(value), dtype, place)
out.stop_gradient = True
return out
if out is not None:
# final state mode is support out is not None.
_C_ops.full_(out, shape, float(value), dtype, place)
out.stop_gradient = True
return out
else:
attrs = {'force_cpu': force_cpu}
dtype = convert_dtype(dtype)
if not isinstance(value, Variable):
if dtype in ['uint8', 'int16', 'int32', 'int64']:
attrs['str_value'] = str(int(value))
attrs['value'] = int(value)
else:
attrs['str_value'] = str(float(value))
attrs['value'] = float(value)
helper = LayerHelper("fill_constant", **locals())
inputs = {}
if isinstance(value, Variable):
if convert_dtype(value.dtype) != dtype:
value = paddle.cast(value, dtype)
inputs['ValueTensor'] = value
paddle.utils.check_shape(shape)
check_dtype(
dtype,
'dtype',
[
'bool',
'float16',
'float32',
'float64',
'uint8',
'int16',
'int32',
'int64',
'complex64',
'complex128',
'uint16',
],
'fill_constant',
)
check_type(shape, 'shape', (Variable, list, tuple), 'fill_constant')
if out is not None:
check_variable_and_dtype(
out, 'out', [convert_dtype(dtype)], 'fill_constant'
)
helper = LayerHelper("fill_constant", **locals())
paddle.utils.get_shape_tensor_inputs(
inputs=inputs, attrs=attrs, shape=shape, op_type='fill_constant'
)
if out is None:
out = helper.create_variable_for_type_inference(dtype=dtype)
attrs['dtype'] = out.dtype
helper.append_op(
type='fill_constant',
inputs=inputs,
outputs={'Out': [out]},
attrs=attrs,
stop_gradient=True,
)
out.stop_gradient = True
return out
@deprecated(since='1.8.0', update_to="paddle.fluid.layers.fill_constant") @deprecated(since='1.8.0', update_to="paddle.fluid.layers.fill_constant")
@templatedoc() @templatedoc()
def fill_constant_batch_size_like( def fill_constant_batch_size_like(
...@@ -214,8 +82,9 @@ def fill_constant_batch_size_like( ...@@ -214,8 +82,9 @@ def fill_constant_batch_size_like(
.. code-block:: python .. code-block:: python
import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
like = fluid.layers.fill_constant(shape=[1,2], value=10, dtype='int64') #like=[[10, 10]] like = paddle.full(shape=[1,2], fill_value=10, dtype='int64') #like=[[10, 10]]
data = fluid.layers.fill_constant_batch_size_like( data = fluid.layers.fill_constant_batch_size_like(
input=like, shape=[1], value=0, dtype='int64') #like=[[10, 10]] data=[0] input=like, shape=[1], value=0, dtype='int64') #like=[[10, 10]] data=[0]
...@@ -279,10 +148,16 @@ def zeros(shape, dtype, force_cpu=False, name=None): ...@@ -279,10 +148,16 @@ def zeros(shape, dtype, force_cpu=False, name=None):
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle
data = fluid.layers.zeros(shape=[3, 2], dtype='float32') # [[0., 0.], [0., 0.], [0., 0.]] data = fluid.layers.zeros(shape=[3, 2], dtype='float32') # [[0., 0.], [0., 0.], [0., 0.]]
# shape is a Tensor # shape is a Tensor
shape = fluid.layers.fill_constant(shape=[2], dtype='int32', value=2) shape = paddle.full(shape=[2], dtype='int32', fill_value=2)
data1 = fluid.layers.zeros(shape=shape, dtype='int32') #[[0, 0], [0, 0]] data1 = fluid.layers.zeros(shape=shape, dtype='int32') #[[0, 0], [0, 0]]
""" """
return fill_constant(value=0.0, **locals()) # TODO: remove zeros
from paddle.tensor import fill_constant
return fill_constant(
value=0.0, shape=shape, dtype=dtype, force_cpu=force_cpu, name=name
)
...@@ -236,7 +236,7 @@ class Optimizer: ...@@ -236,7 +236,7 @@ class Optimizer:
None, name='global_step', dtype='int32' None, name='global_step', dtype='int32'
) )
tensor.fill_constant( paddle.tensor.fill_constant(
[1], "int32", self._learning_rate.step_num, out=var_temp [1], "int32", self._learning_rate.step_num, out=var_temp
) )
...@@ -7393,11 +7393,11 @@ class LookaheadOptimizer: ...@@ -7393,11 +7393,11 @@ class LookaheadOptimizer:
paddle.increment(x=step, value=1.0) paddle.increment(x=step, value=1.0)
# lookahead # lookahead
zero_var = layers.fill_constant( zero_var = paddle.tensor.fill_constant(
shape=[1], dtype='float32', value=0.0 shape=[1], dtype='float32', value=0.0
) )
one_var = layers.fill_constant( one_var = paddle.tensor.fill_constant(
shape=[1], dtype='float32', value=1.0 shape=[1], dtype='float32', value=1.0
) )
...@@ -7747,7 +7747,7 @@ class GradientMergeOptimizer: ...@@ -7747,7 +7747,7 @@ class GradientMergeOptimizer:
# clear gradient_merge_vars # clear gradient_merge_vars
for param, new_grad in new_params_grads: for param, new_grad in new_params_grads:
layers.fill_constant( paddle.tensor.fill_constant(
shape=new_grad.shape, shape=new_grad.shape,
dtype=new_grad.dtype, dtype=new_grad.dtype,
value=0.0, value=0.0,
......
...@@ -96,7 +96,9 @@ def init_communicator( ...@@ -96,7 +96,9 @@ def init_communicator(
with fluid.program_guard(main_program): with fluid.program_guard(main_program):
op_type = "c_allreduce_sum" op_type = "c_allreduce_sum"
data = fluid.layers.fill_constant(shape=[1], dtype='float32', value=2.5) data = paddle.tensor.fill_constant(
shape=[1], dtype='float32', value=2.5
)
helper = LayerHelper(op_type, **locals()) helper = LayerHelper(op_type, **locals())
helper.append_op( helper.append_op(
type=op_type, type=op_type,
......
...@@ -117,11 +117,11 @@ def get_program(): ...@@ -117,11 +117,11 @@ def get_program():
with fluid.program_guard(train_program, start_program): with fluid.program_guard(train_program, start_program):
# 循环计数器 # 循环计数器
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0) i = paddle.tensor.fill_constant(shape=[1], dtype='int64', value=0)
auto.shard_tensor(i, _g_process_mesh, [None]) auto.shard_tensor(i, _g_process_mesh, [None])
# 循环次数 # 循环次数
loop_len = fluid.layers.fill_constant( loop_len = paddle.tensor.fill_constant(
shape=[1], dtype='int64', value=epoch_num shape=[1], dtype='int64', value=epoch_num
) )
auto.shard_tensor(loop_len, _g_process_mesh, [None]) auto.shard_tensor(loop_len, _g_process_mesh, [None])
......
...@@ -50,7 +50,7 @@ def net(): ...@@ -50,7 +50,7 @@ def net():
y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64') y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64')
# test int64 value # test int64 value
zero = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0) zero = paddle.tensor.fill_constant(shape=[1], dtype='int64', value=0)
# test float16 value # test float16 value
fp16_zero = paddle.cast(zero, dtype='float16') fp16_zero = paddle.cast(zero, dtype='float16')
......
...@@ -16,7 +16,6 @@ from test_collective_api_base import TestCollectiveAPIRunnerBase, runtime_main ...@@ -16,7 +16,6 @@ from test_collective_api_base import TestCollectiveAPIRunnerBase, runtime_main
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.layers as layers
paddle.enable_static() paddle.enable_static()
...@@ -32,7 +31,7 @@ class TestCollectiveScatterAPI(TestCollectiveAPIRunnerBase): ...@@ -32,7 +31,7 @@ class TestCollectiveScatterAPI(TestCollectiveAPIRunnerBase):
shape=[10, 1000], shape=[10, 1000],
dtype='float32', dtype='float32',
) )
toutdata = layers.fill_constant( toutdata = paddle.tensor.fill_constant(
shape=[5, 1000], dtype='float32', value=1.0 shape=[5, 1000], dtype='float32', value=1.0
) )
tensor_list = None tensor_list = None
......
...@@ -41,7 +41,7 @@ class TestCollectiveSendRecv(TestCollectiveRunnerBase): ...@@ -41,7 +41,7 @@ class TestCollectiveSendRecv(TestCollectiveRunnerBase):
data1 = paddle.assign(np.array([[3, 4, 5]], dtype='float32')) data1 = paddle.assign(np.array([[3, 4, 5]], dtype='float32'))
data2 = paddle.assign(np.array([[0, 1, 2]], dtype='float32')) data2 = paddle.assign(np.array([[0, 1, 2]], dtype='float32'))
tensor_array = paddle.tensor.create_array(dtype='float32') tensor_array = paddle.tensor.create_array(dtype='float32')
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0) i = paddle.tensor.fill_constant(shape=[1], dtype='int64', value=0)
paddle.tensor.array_write(data1, i, tensor_array) paddle.tensor.array_write(data1, i, tensor_array)
paddle.tensor.array_write(data2, i + 1, tensor_array) paddle.tensor.array_write(data2, i + 1, tensor_array)
if self.rank == 0: if self.rank == 0:
......
...@@ -19,7 +19,6 @@ import numpy as np ...@@ -19,7 +19,6 @@ import numpy as np
import paddle import paddle
import paddle.distributed.fleet as fleet import paddle.distributed.fleet as fleet
import paddle.fluid.layers as layers
from paddle.distributed.fleet.utils.hybrid_parallel_inference import ( from paddle.distributed.fleet.utils.hybrid_parallel_inference import (
HybridParallelInferenceHelper, HybridParallelInferenceHelper,
) )
...@@ -66,16 +65,16 @@ class TestHybridParallelInferenceHelperClass(unittest.TestCase): ...@@ -66,16 +65,16 @@ class TestHybridParallelInferenceHelperClass(unittest.TestCase):
) )
with paddle.fluid.device_guard(f'{device}:all'): with paddle.fluid.device_guard(f'{device}:all'):
max_len = layers.fill_constant( max_len = paddle.tensor.fill_constant(
shape=[1], dtype="int64", value=2, force_cpu=False, name="n" shape=[1], dtype="int64", value=2, force_cpu=False, name="n"
) )
step_idx = layers.fill_constant( step_idx = paddle.tensor.fill_constant(
shape=[1], dtype="int64", value=0, force_cpu=False, name="i" shape=[1], dtype="int64", value=0, force_cpu=False, name="i"
) )
data = paddle.tensor.array_write(X, step_idx) data = paddle.tensor.array_write(X, step_idx)
cond_int = layers.fill_constant( cond_int = paddle.tensor.fill_constant(
shape=[1], shape=[1],
dtype="int64", dtype="int64",
value=0, value=0,
......
...@@ -59,7 +59,7 @@ def get_acc(cos_q_nt, cos_q_pt, batch_size): ...@@ -59,7 +59,7 @@ def get_acc(cos_q_nt, cos_q_pt, batch_size):
cond_3 = paddle.sum(cond) cond_3 = paddle.sum(cond)
acc = paddle.divide( acc = paddle.divide(
cond_3, cond_3,
fluid.layers.fill_constant( paddle.tensor.fill_constant(
shape=[1], value=batch_size * 1.0, dtype='float64' shape=[1], value=batch_size * 1.0, dtype='float64'
), ),
name="simnet_acc", name="simnet_acc",
......
...@@ -60,7 +60,7 @@ def dyfunc_with_if_else2(x, col=100): ...@@ -60,7 +60,7 @@ def dyfunc_with_if_else2(x, col=100):
# TODO: Don't support return non-Tensor in Tensor-dependent `if` stament currently. # TODO: Don't support return non-Tensor in Tensor-dependent `if` stament currently.
# `x` is Tensor, `col` is not Tensor, and `col` is the return value of `true_fn` after transformed. # `x` is Tensor, `col` is not Tensor, and `col` is the return value of `true_fn` after transformed.
# col = -1 # col = -1
col = fluid.layers.fill_constant(shape=[1], value=-1, dtype="int64") col = paddle.tensor.fill_constant(shape=[1], value=-1, dtype="int64")
if paddle.mean(x).numpy() > x.numpy()[row][col]: if paddle.mean(x).numpy() > x.numpy()[row][col]:
y = paddle.nn.functional.relu(x) y = paddle.nn.functional.relu(x)
else: else:
...@@ -149,7 +149,7 @@ def dyfunc_with_if_else_with_list_geneator(x): ...@@ -149,7 +149,7 @@ def dyfunc_with_if_else_with_list_geneator(x):
def nested_if_else(x_v): def nested_if_else(x_v):
batch_size = 16 batch_size = 16
feat_size = x_v.shape[-1] feat_size = x_v.shape[-1]
bias = fluid.layers.fill_constant([feat_size], dtype='float32', value=1) bias = paddle.tensor.fill_constant([feat_size], dtype='float32', value=1)
if x_v.shape[0] != batch_size: if x_v.shape[0] != batch_size:
# TODO: Don't support return non-Tensor in Tensor-dependent `if` stament currently. # TODO: Don't support return non-Tensor in Tensor-dependent `if` stament currently.
# `x_v.shape[0]` is not Tensor, and `batch_size` is the return value of `true_fn` after transformed. # `x_v.shape[0]` is not Tensor, and `batch_size` is the return value of `true_fn` after transformed.
...@@ -160,14 +160,14 @@ def nested_if_else(x_v): ...@@ -160,14 +160,14 @@ def nested_if_else(x_v):
# if tensor.shape is [1], now support to compare with numpy. # if tensor.shape is [1], now support to compare with numpy.
if paddle.mean(x_v).numpy() < 0: if paddle.mean(x_v).numpy() < 0:
y = x_v + bias y = x_v + bias
w = fluid.layers.fill_constant([feat_size], dtype='float32', value=10) w = paddle.tensor.fill_constant([feat_size], dtype='float32', value=10)
if y.numpy()[0] < 10: if y.numpy()[0] < 10:
tmp = y * w tmp = y * w
y = paddle.nn.functional.relu(tmp) y = paddle.nn.functional.relu(tmp)
if paddle.mean(y).numpy() < batch_size: if paddle.mean(y).numpy() < batch_size:
y = paddle.abs(y) y = paddle.abs(y)
else: else:
tmp = fluid.layers.fill_constant( tmp = paddle.tensor.fill_constant(
y.shape, dtype='float32', value=-1 y.shape, dtype='float32', value=-1
) )
y = y - tmp y = y - tmp
...@@ -185,13 +185,13 @@ def nested_if_else_2(x): ...@@ -185,13 +185,13 @@ def nested_if_else_2(x):
x_shape_0 = x.shape[0] x_shape_0 = x.shape[0]
if x_shape_0 < 1: if x_shape_0 < 1:
if paddle.shape(y).numpy()[0] < 1: if paddle.shape(y).numpy()[0] < 1:
res = fluid.layers.fill_constant( res = paddle.tensor.fill_constant(
value=2, shape=x.shape, dtype="int32" value=2, shape=x.shape, dtype="int32"
) )
# `z` is a new var here. # `z` is a new var here.
z = y + 1 z = y + 1
else: else:
res = fluid.layers.fill_constant( res = paddle.tensor.fill_constant(
value=3, shape=x.shape, dtype="int32" value=3, shape=x.shape, dtype="int32"
) )
else: else:
...@@ -218,13 +218,13 @@ def nested_if_else_3(x): ...@@ -218,13 +218,13 @@ def nested_if_else_3(x):
else: else:
y_shape = paddle.shape(y) y_shape = paddle.shape(y)
if y_shape.numpy()[0] < 1: if y_shape.numpy()[0] < 1:
res = fluid.layers.fill_constant( res = paddle.tensor.fill_constant(
value=2, shape=x.shape, dtype="int32" value=2, shape=x.shape, dtype="int32"
) )
# `z` is created in above code block. # `z` is created in above code block.
z = y + 1 z = y + 1
else: else:
res = fluid.layers.fill_constant( res = paddle.tensor.fill_constant(
value=3, shape=x.shape, dtype="int32" value=3, shape=x.shape, dtype="int32"
) )
# `out` is a new var. # `out` is a new var.
...@@ -259,14 +259,14 @@ class NetWithControlFlowIf(fluid.dygraph.Layer): ...@@ -259,14 +259,14 @@ class NetWithControlFlowIf(fluid.dygraph.Layer):
) )
) )
self.constant_vars['bias'] = fluid.layers.fill_constant( self.constant_vars['bias'] = paddle.tensor.fill_constant(
[5], dtype='float32', value=1 [5], dtype='float32', value=1
) )
# Control flow `if` statement # Control flow `if` statement
fc_out = self.fc(input) fc_out = self.fc(input)
if paddle.mean(fc_out).numpy() < 0: if paddle.mean(fc_out).numpy() < 0:
y = fc_out + self.constant_vars['bias'] y = fc_out + self.constant_vars['bias']
self.constant_vars['w'] = fluid.layers.fill_constant( self.constant_vars['w'] = paddle.tensor.fill_constant(
[5], dtype='float32', value=10 [5], dtype='float32', value=10
) )
if y.numpy()[0] < self.alpha: if y.numpy()[0] < self.alpha:
...@@ -277,12 +277,12 @@ class NetWithControlFlowIf(fluid.dygraph.Layer): ...@@ -277,12 +277,12 @@ class NetWithControlFlowIf(fluid.dygraph.Layer):
# Nested `if/else` # Nested `if/else`
if y.numpy()[-1] < self.alpha: if y.numpy()[-1] < self.alpha:
# Modify variable of class # Modify variable of class
self.constant_vars['w'] = fluid.layers.fill_constant( self.constant_vars['w'] = paddle.tensor.fill_constant(
[hidden_dim], dtype='float32', value=9 [hidden_dim], dtype='float32', value=9
) )
y = paddle.abs(y) y = paddle.abs(y)
else: else:
tmp = fluid.layers.fill_constant( tmp = paddle.tensor.fill_constant(
y.shape, dtype='float32', value=-1 y.shape, dtype='float32', value=-1
) )
y = y - tmp y = y - tmp
......
...@@ -15,7 +15,6 @@ ...@@ -15,7 +15,6 @@
from functools import reduce from functools import reduce
import paddle import paddle
import paddle.fluid as fluid
import paddle.fluid.param_attr as attr import paddle.fluid.param_attr as attr
from paddle.common_ops_import import Variable from paddle.common_ops_import import Variable
from paddle.fluid.dygraph import Layer from paddle.fluid.dygraph import Layer
...@@ -214,7 +213,7 @@ class ConstantLayer: ...@@ -214,7 +213,7 @@ class ConstantLayer:
shape = list(shape) shape = list(shape)
input_shape = paddle.shape(input) input_shape = paddle.shape(input)
shape[0] = input_shape[0] shape[0] = input_shape[0]
constant = fluid.layers.fill_constant(shape, dtype, value) constant = paddle.tensor.fill_constant(shape, dtype, value)
return constant return constant
......
...@@ -210,7 +210,7 @@ class ConstantLayer: ...@@ -210,7 +210,7 @@ class ConstantLayer:
shape = list(shape) shape = list(shape)
input_shape = paddle.shape(input) input_shape = paddle.shape(input)
shape[0] = input_shape[0] shape[0] = input_shape[0]
constant = paddle.fluid.layers.fill_constant(shape, dtype, value) constant = paddle.tensor.fill_constant(shape, dtype, value)
return constant return constant
......
...@@ -62,7 +62,7 @@ def test_continue_in_for_at_end(x): ...@@ -62,7 +62,7 @@ def test_continue_in_for_at_end(x):
def test_continue_in_while(x): def test_continue_in_while(x):
x = fluid.dygraph.to_variable(x) x = fluid.dygraph.to_variable(x)
i = fluid.layers.fill_constant(shape=[1], dtype='int32', value=0) i = paddle.tensor.fill_constant(shape=[1], dtype='int32', value=0)
while i < 10: while i < 10:
i += 1 i += 1
if i > 5: if i > 5:
...@@ -94,7 +94,7 @@ def test_break_in_for_at_end(x): ...@@ -94,7 +94,7 @@ def test_break_in_for_at_end(x):
def test_break_in_while(x): def test_break_in_while(x):
x = fluid.dygraph.to_variable(x) x = fluid.dygraph.to_variable(x)
i = fluid.layers.fill_constant(shape=[1], dtype='int32', value=0) i = paddle.tensor.fill_constant(shape=[1], dtype='int32', value=0)
while i < 10: while i < 10:
i += 1 i += 1
if i > 5: if i > 5:
...@@ -116,8 +116,8 @@ def test_break_continue_in_for(x): ...@@ -116,8 +116,8 @@ def test_break_continue_in_for(x):
break break
x += 10086 x += 10086
a = fluid.layers.fill_constant(shape=[1], dtype='int32', value=0) a = paddle.tensor.fill_constant(shape=[1], dtype='int32', value=0)
b = fluid.layers.fill_constant(shape=[1], dtype='int32', value=3) b = paddle.tensor.fill_constant(shape=[1], dtype='int32', value=3)
# b = 10 # b = 10
# TODO: add Raise Error and suggestion for usage: # TODO: add Raise Error and suggestion for usage:
# Py for contains break/continue depends on control-flow. # Py for contains break/continue depends on control-flow.
...@@ -192,7 +192,7 @@ def test_optim_break_in_for(x): ...@@ -192,7 +192,7 @@ def test_optim_break_in_for(x):
def test_optim_break_in_while(x): def test_optim_break_in_while(x):
x = paddle.to_tensor(x) x = paddle.to_tensor(x)
i = fluid.layers.fill_constant(shape=[1], dtype='int32', value=0) i = paddle.tensor.fill_constant(shape=[1], dtype='int32', value=0)
while i < 10: while i < 10:
if i > 5: if i > 5:
break break
......
...@@ -84,12 +84,12 @@ class MainNetWithDict(fluid.dygraph.Layer): ...@@ -84,12 +84,12 @@ class MainNetWithDict(fluid.dygraph.Layer):
def forward(self, input, max_len=4): def forward(self, input, max_len=4):
input = fluid.dygraph.to_variable(input) input = fluid.dygraph.to_variable(input)
cache = { cache = {
"k": fluid.layers.fill_constant( "k": paddle.tensor.fill_constant(
shape=[self.batch_size, self.output_size], shape=[self.batch_size, self.output_size],
dtype='float32', dtype='float32',
value=0, value=0,
), ),
"v": fluid.layers.fill_constant( "v": paddle.tensor.fill_constant(
shape=[self.batch_size, self.output_size], shape=[self.batch_size, self.output_size],
dtype='float32', dtype='float32',
value=0, value=0,
......
...@@ -25,7 +25,7 @@ from paddle.jit.dy2static.origin_info import unwrap ...@@ -25,7 +25,7 @@ from paddle.jit.dy2static.origin_info import unwrap
def inner_func(): def inner_func():
fluid.layers.fill_constant(shape=[1, 2], value=9, dtype="int") paddle.tensor.fill_constant(shape=[1, 2], value=9, dtype="int")
return return
...@@ -50,7 +50,7 @@ def func_error_in_compile_time_2(x): ...@@ -50,7 +50,7 @@ def func_error_in_compile_time_2(x):
@paddle.jit.to_static @paddle.jit.to_static
def func_error_in_runtime(x): def func_error_in_runtime(x):
x = fluid.dygraph.to_variable(x) x = fluid.dygraph.to_variable(x)
two = fluid.layers.fill_constant(shape=[1], value=2, dtype="int32") two = paddle.tensor.fill_constant(shape=[1], value=2, dtype="int32")
x = paddle.reshape(x, shape=[1, two]) x = paddle.reshape(x, shape=[1, two])
return x return x
...@@ -77,7 +77,7 @@ class LayerErrorInCompiletime(fluid.dygraph.Layer): ...@@ -77,7 +77,7 @@ class LayerErrorInCompiletime(fluid.dygraph.Layer):
) )
def forward(self, x): def forward(self, x):
y = self._linear(x) y = self._linear(x)
z = fluid.layers.fill_constant(shape=[1, 2], value=9, dtype="int") z = paddle.tensor.fill_constant(shape=[1, 2], value=9, dtype="int")
out = paddle.mean(y[z]) out = paddle.mean(y[z])
return out return out
...@@ -101,7 +101,7 @@ class LayerErrorInCompiletime2(fluid.dygraph.Layer): ...@@ -101,7 +101,7 @@ class LayerErrorInCompiletime2(fluid.dygraph.Layer):
@paddle.jit.to_static @paddle.jit.to_static
def func_error_in_runtime_with_empty_line(x): def func_error_in_runtime_with_empty_line(x):
x = fluid.dygraph.to_variable(x) x = fluid.dygraph.to_variable(x)
two = fluid.layers.fill_constant(shape=[1], value=2, dtype="int32") two = paddle.tensor.fill_constant(shape=[1], value=2, dtype="int32")
x = paddle.reshape(x, shape=[1, two]) x = paddle.reshape(x, shape=[1, two])
...@@ -261,7 +261,7 @@ class TestErrorStaticLayerCallInCompiletime(TestErrorBase): ...@@ -261,7 +261,7 @@ class TestErrorStaticLayerCallInCompiletime(TestErrorBase):
'inner_func()', 'inner_func()',
'File "{}", line 28, in inner_func'.format(self.filepath), 'File "{}", line 28, in inner_func'.format(self.filepath),
'def inner_func():', 'def inner_func():',
'fluid.layers.fill_constant(shape=[1, 2], value=9, dtype="int")', 'paddle.tensor.fill_constant(shape=[1, 2], value=9, dtype="int")',
'<--- HERE', '<--- HERE',
'return', 'return',
] ]
...@@ -340,7 +340,7 @@ class TestErrorStaticLayerCallInRuntime(TestErrorStaticLayerCallInCompiletime): ...@@ -340,7 +340,7 @@ class TestErrorStaticLayerCallInRuntime(TestErrorStaticLayerCallInCompiletime):
self.filepath self.filepath
), ),
'x = fluid.dygraph.to_variable(x)', 'x = fluid.dygraph.to_variable(x)',
'two = fluid.layers.fill_constant(shape=[1], value=2, dtype="int32")', 'two = paddle.tensor.fill_constant(shape=[1], value=2, dtype="int32")',
'x = paddle.reshape(x, shape=[1, two])', 'x = paddle.reshape(x, shape=[1, two])',
'<--- HERE', '<--- HERE',
'return x', 'return x',
...@@ -356,7 +356,7 @@ class TestErrorStaticLayerCallInRuntime2(TestErrorStaticLayerCallInRuntime): ...@@ -356,7 +356,7 @@ class TestErrorStaticLayerCallInRuntime2(TestErrorStaticLayerCallInRuntime):
'File "{}", line 106, in func_error_in_runtime_with_empty_line'.format( 'File "{}", line 106, in func_error_in_runtime_with_empty_line'.format(
self.filepath self.filepath
), ),
'two = fluid.layers.fill_constant(shape=[1], value=2, dtype="int32")', 'two = paddle.tensor.fill_constant(shape=[1], value=2, dtype="int32")',
'x = paddle.reshape(x, shape=[1, two])', 'x = paddle.reshape(x, shape=[1, two])',
'<--- HERE', '<--- HERE',
'return x', 'return x',
...@@ -379,7 +379,7 @@ class TestJitSaveInCompiletime(TestErrorBase): ...@@ -379,7 +379,7 @@ class TestJitSaveInCompiletime(TestErrorBase):
'File "{}", line 80, in forward'.format(self.filepath), 'File "{}", line 80, in forward'.format(self.filepath),
'def forward(self, x):', 'def forward(self, x):',
'y = self._linear(x)', 'y = self._linear(x)',
'z = fluid.layers.fill_constant(shape=[1, 2], value=9, dtype="int")', 'z = paddle.tensor.fill_constant(shape=[1, 2], value=9, dtype="int")',
'<--- HERE', '<--- HERE',
'out = paddle.mean(y[z])', 'out = paddle.mean(y[z])',
'return out', 'return out',
......
...@@ -26,7 +26,7 @@ from paddle.static import InputSpec ...@@ -26,7 +26,7 @@ from paddle.static import InputSpec
# 0. for in range var.numpy()[0] # 0. for in range var.numpy()[0]
@paddle.jit.to_static @paddle.jit.to_static
def for_in_range(x): def for_in_range(x):
z = fluid.layers.fill_constant([1], 'int32', 0) z = paddle.tensor.fill_constant([1], 'int32', 0)
x = fluid.dygraph.to_variable(x) x = fluid.dygraph.to_variable(x)
for i in range(x.numpy()[0]): for i in range(x.numpy()[0]):
z = z + i z = z + i
...@@ -36,7 +36,7 @@ def for_in_range(x): ...@@ -36,7 +36,7 @@ def for_in_range(x):
# 1. for iter list # 1. for iter list
@paddle.jit.to_static @paddle.jit.to_static
def for_iter_list(x_array): def for_iter_list(x_array):
z = fluid.layers.fill_constant([1], 'int32', 0) z = paddle.tensor.fill_constant([1], 'int32', 0)
for x in x_array: for x in x_array:
z = z + x z = z + x
return z return z
...@@ -45,7 +45,7 @@ def for_iter_list(x_array): ...@@ -45,7 +45,7 @@ def for_iter_list(x_array):
# 2. for enumerate list # 2. for enumerate list
@paddle.jit.to_static @paddle.jit.to_static
def for_enumerate_list(x_array): def for_enumerate_list(x_array):
z = fluid.layers.fill_constant([1], 'int32', 0) z = paddle.tensor.fill_constant([1], 'int32', 0)
for i, x in enumerate(x_array): for i, x in enumerate(x_array):
z = z + x + i z = z + x + i
return z return z
...@@ -54,7 +54,7 @@ def for_enumerate_list(x_array): ...@@ -54,7 +54,7 @@ def for_enumerate_list(x_array):
# 3. for iter var.numpy() # 3. for iter var.numpy()
@paddle.jit.to_static @paddle.jit.to_static
def for_iter_var_numpy(x_array): def for_iter_var_numpy(x_array):
z = fluid.layers.fill_constant([1], 'int32', 0) z = paddle.tensor.fill_constant([1], 'int32', 0)
x_array = fluid.dygraph.to_variable(x_array) x_array = fluid.dygraph.to_variable(x_array)
for x in x_array.numpy(): for x in x_array.numpy():
z = z + x z = z + x
...@@ -64,8 +64,8 @@ def for_iter_var_numpy(x_array): ...@@ -64,8 +64,8 @@ def for_iter_var_numpy(x_array):
# 4. for enumerate var.numpy() # 4. for enumerate var.numpy()
@paddle.jit.to_static @paddle.jit.to_static
def for_enumerate_var_numpy(x_array): def for_enumerate_var_numpy(x_array):
y = fluid.layers.fill_constant([1], 'int32', 0) y = paddle.tensor.fill_constant([1], 'int32', 0)
z = fluid.layers.fill_constant([1], 'int32', 0) z = paddle.tensor.fill_constant([1], 'int32', 0)
x_array = fluid.dygraph.to_variable(x_array) x_array = fluid.dygraph.to_variable(x_array)
for i, x in enumerate(x_array.numpy()): for i, x in enumerate(x_array.numpy()):
y = y + i y = y + i
...@@ -76,8 +76,8 @@ def for_enumerate_var_numpy(x_array): ...@@ -76,8 +76,8 @@ def for_enumerate_var_numpy(x_array):
# 5. for enumerate var.numpy() with start # 5. for enumerate var.numpy() with start
@paddle.jit.to_static @paddle.jit.to_static
def for_enumerate_var_numpy_with_start(x_array): def for_enumerate_var_numpy_with_start(x_array):
y = fluid.layers.fill_constant([1], 'int32', 0) y = paddle.tensor.fill_constant([1], 'int32', 0)
z = fluid.layers.fill_constant([1], 'int32', 0) z = paddle.tensor.fill_constant([1], 'int32', 0)
x_array = fluid.dygraph.to_variable(x_array) x_array = fluid.dygraph.to_variable(x_array)
for i, x in enumerate(x_array.numpy(), 1): for i, x in enumerate(x_array.numpy(), 1):
y = y + i y = y + i
...@@ -88,7 +88,7 @@ def for_enumerate_var_numpy_with_start(x_array): ...@@ -88,7 +88,7 @@ def for_enumerate_var_numpy_with_start(x_array):
# 6. for in range with break # 6. for in range with break
@paddle.jit.to_static @paddle.jit.to_static
def for_in_range_with_break(x): def for_in_range_with_break(x):
z = fluid.layers.fill_constant([1], 'int32', 0) z = paddle.tensor.fill_constant([1], 'int32', 0)
x = fluid.dygraph.to_variable(x) x = fluid.dygraph.to_variable(x)
for i in range(x.numpy()[0]): for i in range(x.numpy()[0]):
z = z + i z = z + i
...@@ -100,8 +100,8 @@ def for_in_range_with_break(x): ...@@ -100,8 +100,8 @@ def for_in_range_with_break(x):
# 7. for enumerate var.numpy() with break # 7. for enumerate var.numpy() with break
@paddle.jit.to_static @paddle.jit.to_static
def for_enumerate_var_numpy_with_break(x_array): def for_enumerate_var_numpy_with_break(x_array):
y = fluid.layers.fill_constant([1], 'int32', 0) y = paddle.tensor.fill_constant([1], 'int32', 0)
z = fluid.layers.fill_constant([1], 'int32', 0) z = paddle.tensor.fill_constant([1], 'int32', 0)
x_array = fluid.dygraph.to_variable(x_array) x_array = fluid.dygraph.to_variable(x_array)
for i, x in enumerate(x_array.numpy()): for i, x in enumerate(x_array.numpy()):
y = y + i y = y + i
...@@ -114,8 +114,8 @@ def for_enumerate_var_numpy_with_break(x_array): ...@@ -114,8 +114,8 @@ def for_enumerate_var_numpy_with_break(x_array):
# 8. for enumerate var.numpy() with continue # 8. for enumerate var.numpy() with continue
@paddle.jit.to_static @paddle.jit.to_static
def for_enumerate_var_numpy_with_continue(x_array): def for_enumerate_var_numpy_with_continue(x_array):
y = fluid.layers.fill_constant([1], 'int32', 0) y = paddle.tensor.fill_constant([1], 'int32', 0)
z = fluid.layers.fill_constant([1], 'int32', 0) z = paddle.tensor.fill_constant([1], 'int32', 0)
x_array = fluid.dygraph.to_variable(x_array) x_array = fluid.dygraph.to_variable(x_array)
for i, x in enumerate(x_array.numpy()): for i, x in enumerate(x_array.numpy()):
y = y + i y = y + i
...@@ -128,8 +128,8 @@ def for_enumerate_var_numpy_with_continue(x_array): ...@@ -128,8 +128,8 @@ def for_enumerate_var_numpy_with_continue(x_array):
# 9. for enumerate var.numpy() with start & break # 9. for enumerate var.numpy() with start & break
@paddle.jit.to_static @paddle.jit.to_static
def for_enumerate_var_numpy_with_start_break(x_array): def for_enumerate_var_numpy_with_start_break(x_array):
y = fluid.layers.fill_constant([1], 'int32', 0) y = paddle.tensor.fill_constant([1], 'int32', 0)
z = fluid.layers.fill_constant([1], 'int32', 0) z = paddle.tensor.fill_constant([1], 'int32', 0)
x_array = fluid.dygraph.to_variable(x_array) x_array = fluid.dygraph.to_variable(x_array)
for i, x in enumerate(x_array.numpy(), 1): for i, x in enumerate(x_array.numpy(), 1):
y = y + i y = y + i
...@@ -142,8 +142,8 @@ def for_enumerate_var_numpy_with_start_break(x_array): ...@@ -142,8 +142,8 @@ def for_enumerate_var_numpy_with_start_break(x_array):
# 10. for enumerate var.numpy() with start & continue # 10. for enumerate var.numpy() with start & continue
@paddle.jit.to_static @paddle.jit.to_static
def for_enumerate_var_numpy_with_start_continue(x_array): def for_enumerate_var_numpy_with_start_continue(x_array):
y = fluid.layers.fill_constant([1], 'int32', 0) y = paddle.tensor.fill_constant([1], 'int32', 0)
z = fluid.layers.fill_constant([1], 'int32', 0) z = paddle.tensor.fill_constant([1], 'int32', 0)
x_array = fluid.dygraph.to_variable(x_array) x_array = fluid.dygraph.to_variable(x_array)
for i, x in enumerate(x_array.numpy(), 1): for i, x in enumerate(x_array.numpy(), 1):
y = y + i y = y + i
...@@ -156,7 +156,7 @@ def for_enumerate_var_numpy_with_start_continue(x_array): ...@@ -156,7 +156,7 @@ def for_enumerate_var_numpy_with_start_continue(x_array):
# 11. for iter var # 11. for iter var
@paddle.jit.to_static @paddle.jit.to_static
def for_iter_var(x_array): def for_iter_var(x_array):
z = fluid.layers.fill_constant([1], 'int32', 0) z = paddle.tensor.fill_constant([1], 'int32', 0)
x_array = fluid.dygraph.to_variable(x_array) x_array = fluid.dygraph.to_variable(x_array)
for x in x_array: for x in x_array:
...@@ -167,8 +167,8 @@ def for_iter_var(x_array): ...@@ -167,8 +167,8 @@ def for_iter_var(x_array):
# 12. for enumerate var # 12. for enumerate var
@paddle.jit.to_static @paddle.jit.to_static
def for_enumerate_var(x_array): def for_enumerate_var(x_array):
y = fluid.layers.fill_constant([1], 'int32', 0) y = paddle.tensor.fill_constant([1], 'int32', 0)
z = fluid.layers.fill_constant([1], 'int32', 0) z = paddle.tensor.fill_constant([1], 'int32', 0)
x_array = fluid.dygraph.to_variable(x_array) x_array = fluid.dygraph.to_variable(x_array)
for i, x in enumerate(x_array): for i, x in enumerate(x_array):
y = y + i y = y + i
...@@ -181,12 +181,12 @@ def for_enumerate_var(x_array): ...@@ -181,12 +181,12 @@ def for_enumerate_var(x_array):
def for_iter_var_list(x): def for_iter_var_list(x):
# 1. prepare data, ref test_list.py # 1. prepare data, ref test_list.py
x = fluid.dygraph.to_variable(x) x = fluid.dygraph.to_variable(x)
iter_num = fluid.layers.fill_constant(shape=[1], value=5, dtype="int32") iter_num = paddle.tensor.fill_constant(shape=[1], value=5, dtype="int32")
a = [] a = []
for i in range(iter_num): for i in range(iter_num):
a.append(x + i) a.append(x + i)
# 2. iter list[var] # 2. iter list[var]
y = fluid.layers.fill_constant([1], 'int32', 0) y = paddle.tensor.fill_constant([1], 'int32', 0)
for x in a: for x in a:
y = y + x y = y + x
return y return y
...@@ -197,13 +197,13 @@ def for_iter_var_list(x): ...@@ -197,13 +197,13 @@ def for_iter_var_list(x):
def for_enumerate_var_list(x): def for_enumerate_var_list(x):
# 1. prepare data, ref test_list.py # 1. prepare data, ref test_list.py
x = fluid.dygraph.to_variable(x) x = fluid.dygraph.to_variable(x)
iter_num = fluid.layers.fill_constant(shape=[1], value=5, dtype="int32") iter_num = paddle.tensor.fill_constant(shape=[1], value=5, dtype="int32")
a = [] a = []
for i in range(iter_num): for i in range(iter_num):
a.append(x + i) a.append(x + i)
# 2. iter list[var] # 2. iter list[var]
y = fluid.layers.fill_constant([1], 'int32', 0) y = paddle.tensor.fill_constant([1], 'int32', 0)
z = fluid.layers.fill_constant([1], 'int32', 0) z = paddle.tensor.fill_constant([1], 'int32', 0)
for i, x in enumerate(a): for i, x in enumerate(a):
y = y + i y = y + i
z = z + x z = z + x
...@@ -213,7 +213,7 @@ def for_enumerate_var_list(x): ...@@ -213,7 +213,7 @@ def for_enumerate_var_list(x):
# 15. for enumerate list[var] with a nested for range # 15. for enumerate list[var] with a nested for range
@paddle.jit.to_static @paddle.jit.to_static
def for_enumerate_var_with_nested_range(x_array): def for_enumerate_var_with_nested_range(x_array):
x = fluid.layers.fill_constant([1], 'int32', 0) x = paddle.tensor.fill_constant([1], 'int32', 0)
x_array = fluid.dygraph.to_variable(x_array) x_array = fluid.dygraph.to_variable(x_array)
for i, num in enumerate(x_array): for i, num in enumerate(x_array):
for idx in range(num): for idx in range(num):
...@@ -224,7 +224,7 @@ def for_enumerate_var_with_nested_range(x_array): ...@@ -224,7 +224,7 @@ def for_enumerate_var_with_nested_range(x_array):
# 16. for iter var[idx] # 16. for iter var[idx]
@paddle.jit.to_static @paddle.jit.to_static
def for_iter_var_idx(x_array): def for_iter_var_idx(x_array):
z = fluid.layers.fill_constant([1], 'int32', 0) z = paddle.tensor.fill_constant([1], 'int32', 0)
x_array = fluid.dygraph.to_variable(x_array) x_array = fluid.dygraph.to_variable(x_array)
for x in x_array[0:]: for x in x_array[0:]:
...@@ -306,7 +306,7 @@ class ForwardContainsForLayer(paddle.nn.Layer): ...@@ -306,7 +306,7 @@ class ForwardContainsForLayer(paddle.nn.Layer):
# 21. for original list # 21. for original list
@paddle.jit.to_static @paddle.jit.to_static
def for_original_list(): def for_original_list():
z = fluid.layers.fill_constant([1], 'int32', 0) z = paddle.tensor.fill_constant([1], 'int32', 0)
for x in [1, 2, 3]: for x in [1, 2, 3]:
z = z + x z = z + x
return z return z
...@@ -315,7 +315,7 @@ def for_original_list(): ...@@ -315,7 +315,7 @@ def for_original_list():
# 22. for original tuple # 22. for original tuple
@paddle.jit.to_static @paddle.jit.to_static
def for_original_tuple(): def for_original_tuple():
z = fluid.layers.fill_constant([1], 'int32', 0) z = paddle.tensor.fill_constant([1], 'int32', 0)
for x in (1, 2, 3): for x in (1, 2, 3):
z = z + x z = z + x
return z return z
......
...@@ -158,8 +158,8 @@ def dyfunc_ifExp_with_while(x): ...@@ -158,8 +158,8 @@ def dyfunc_ifExp_with_while(x):
i += 1 i += 1
return [i, ten, y] return [i, ten, y]
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0) i = paddle.tensor.fill_constant(shape=[1], dtype='int64', value=0)
ten = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10) ten = paddle.tensor.fill_constant(shape=[1], dtype='int64', value=10)
i, ten, y = paddle.static.nn.while_loop(cond, body, [i, ten, y]) i, ten, y = paddle.static.nn.while_loop(cond, body, [i, ten, y])
return y[0] return y[0]
...@@ -180,7 +180,7 @@ def dyfunc_ifExp(x): ...@@ -180,7 +180,7 @@ def dyfunc_ifExp(x):
def map_func(func, tensor_list): def map_func(func, tensor_list):
return [func(x) for x in tensor_list] return [func(x) for x in tensor_list]
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0) i = paddle.tensor.fill_constant(shape=[1], dtype='int64', value=0)
# It will be converted into `layers.cond` as followed. # It will be converted into `layers.cond` as followed.
# map_func(lambda x: paddle.static.nn.cond(i==1, lambda: x, lambda: add_fn(x), y) # map_func(lambda x: paddle.static.nn.cond(i==1, lambda: x, lambda: add_fn(x), y)
# `if (Tensor) == 1` is supported in dygraph. # `if (Tensor) == 1` is supported in dygraph.
......
...@@ -35,7 +35,7 @@ def len_with_tensor(x): ...@@ -35,7 +35,7 @@ def len_with_tensor(x):
def len_with_lod_tensor_array(x): def len_with_lod_tensor_array(x):
x = fluid.dygraph.to_variable(x) x = fluid.dygraph.to_variable(x)
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0) i = paddle.tensor.fill_constant(shape=[1], dtype='int64', value=0)
arr = paddle.tensor.array_write(x, i=i) arr = paddle.tensor.array_write(x, i=i)
arr_len = len(arr) arr_len = len(arr)
......
...@@ -42,7 +42,7 @@ def test_list_append_in_if(x): ...@@ -42,7 +42,7 @@ def test_list_append_in_if(x):
a.append(x) a.append(x)
else: else:
a.append( a.append(
fluid.layers.fill_constant(shape=[1, 2], value=9, dtype="int64") paddle.tensor.fill_constant(shape=[1, 2], value=9, dtype="int64")
) )
# TODO(Aurelius84): Currently, run_program_op doesn't support output LoDTensorArray. # TODO(Aurelius84): Currently, run_program_op doesn't support output LoDTensorArray.
return a[0] return a[0]
...@@ -51,7 +51,7 @@ def test_list_append_in_if(x): ...@@ -51,7 +51,7 @@ def test_list_append_in_if(x):
def test_list_append_in_for_loop(x, iter_num): def test_list_append_in_for_loop(x, iter_num):
x = fluid.dygraph.to_variable(x) x = fluid.dygraph.to_variable(x)
# Use `fill_constant` so that static analysis can analyze the type of iter_num is Tensor # Use `fill_constant` so that static analysis can analyze the type of iter_num is Tensor
iter_num = fluid.layers.fill_constant( iter_num = paddle.tensor.fill_constant(
shape=[1], value=iter_num, dtype="int32" shape=[1], value=iter_num, dtype="int32"
) # TODO(liym27): Delete it if the type of parameter iter_num can be resolved ) # TODO(liym27): Delete it if the type of parameter iter_num can be resolved
a = [] a = []
...@@ -88,7 +88,7 @@ def test_list_append_in_for_loop_with_concat(x, iter_num): ...@@ -88,7 +88,7 @@ def test_list_append_in_for_loop_with_concat(x, iter_num):
x = fluid.dygraph.to_variable(x) x = fluid.dygraph.to_variable(x)
a = [] a = []
# Use `fill_constant` so that static analysis can analyze the type of iter_num is Tensor # Use `fill_constant` so that static analysis can analyze the type of iter_num is Tensor
iter_num = fluid.layers.fill_constant( iter_num = paddle.tensor.fill_constant(
shape=[1], value=iter_num, dtype="int32" shape=[1], value=iter_num, dtype="int32"
) # TODO(liym27): Delete it if the type of parameter iter_num can be resolved ) # TODO(liym27): Delete it if the type of parameter iter_num can be resolved
for i in range(iter_num): for i in range(iter_num):
...@@ -99,7 +99,7 @@ def test_list_append_in_for_loop_with_concat(x, iter_num): ...@@ -99,7 +99,7 @@ def test_list_append_in_for_loop_with_concat(x, iter_num):
def test_list_append_in_while_loop(x, iter_num): def test_list_append_in_while_loop(x, iter_num):
x = fluid.dygraph.to_variable(x) x = fluid.dygraph.to_variable(x)
iter_num = fluid.layers.fill_constant( iter_num = paddle.tensor.fill_constant(
shape=[1], value=iter_num, dtype="int32" shape=[1], value=iter_num, dtype="int32"
) )
a = [] a = []
...@@ -112,7 +112,7 @@ def test_list_append_in_while_loop(x, iter_num): ...@@ -112,7 +112,7 @@ def test_list_append_in_while_loop(x, iter_num):
def test_list_append_in_while_loop_with_stack(x, iter_num): def test_list_append_in_while_loop_with_stack(x, iter_num):
x = fluid.dygraph.to_variable(x) x = fluid.dygraph.to_variable(x)
iter_num = fluid.layers.fill_constant( iter_num = paddle.tensor.fill_constant(
shape=[1], value=iter_num, dtype="int32" shape=[1], value=iter_num, dtype="int32"
) )
a = [] a = []
...@@ -159,11 +159,11 @@ def test_list_pop_in_if(x): ...@@ -159,11 +159,11 @@ def test_list_pop_in_if(x):
if x.numpy()[0] > 0: if x.numpy()[0] > 0:
a.append(x) a.append(x)
b.append(x + 1) b.append(x + 1)
a.append(fluid.layers.fill_constant(shape=[1], value=1, dtype="int64")) a.append(paddle.tensor.fill_constant(shape=[1], value=1, dtype="int64"))
else: else:
a.append(x + 1) a.append(x + 1)
b.append(x - 1) b.append(x - 1)
a.append(fluid.layers.fill_constant(shape=[2], value=2, dtype="int64")) a.append(paddle.tensor.fill_constant(shape=[2], value=2, dtype="int64"))
item1 = a.pop(1) item1 = a.pop(1)
return item1, b[-1] return item1, b[-1]
...@@ -171,7 +171,7 @@ def test_list_pop_in_if(x): ...@@ -171,7 +171,7 @@ def test_list_pop_in_if(x):
def test_list_pop_in_for_loop(x, iter_num): def test_list_pop_in_for_loop(x, iter_num):
x = fluid.dygraph.to_variable(x) x = fluid.dygraph.to_variable(x)
# Use `fill_constant` so that static analysis can analyze the type of iter_num is Tensor # Use `fill_constant` so that static analysis can analyze the type of iter_num is Tensor
iter_num = fluid.layers.fill_constant( iter_num = paddle.tensor.fill_constant(
shape=[1], value=iter_num, dtype="int32" shape=[1], value=iter_num, dtype="int32"
) # TODO(liym27): Delete it if the type of parameter iter_num can be resolved ) # TODO(liym27): Delete it if the type of parameter iter_num can be resolved
...@@ -189,7 +189,7 @@ def test_list_pop_in_for_loop(x, iter_num): ...@@ -189,7 +189,7 @@ def test_list_pop_in_for_loop(x, iter_num):
def test_list_pop_in_while_loop(x, iter_num): def test_list_pop_in_while_loop(x, iter_num):
x = fluid.dygraph.to_variable(x) x = fluid.dygraph.to_variable(x)
iter_num = fluid.layers.fill_constant( iter_num = paddle.tensor.fill_constant(
shape=[1], value=iter_num, dtype="int32" shape=[1], value=iter_num, dtype="int32"
) )
a = [] a = []
......
...@@ -89,7 +89,7 @@ def for_loop_dyfunc(max_len): ...@@ -89,7 +89,7 @@ def for_loop_dyfunc(max_len):
def for_loop_dyfunc2(max_len): def for_loop_dyfunc2(max_len):
# Test case: a variable is used and created in loop, but used before created # Test case: a variable is used and created in loop, but used before created
x = fluid.layers.fill_constant(shape=[1, 2], dtype="int32", value=1) x = paddle.tensor.fill_constant(shape=[1, 2], dtype="int32", value=1)
for i in range(max_len): for i in range(max_len):
if i > 1: if i > 1:
...@@ -97,7 +97,7 @@ def for_loop_dyfunc2(max_len): ...@@ -97,7 +97,7 @@ def for_loop_dyfunc2(max_len):
a = 1 a = 1
q, _ = x.shape # test var x.shape only used but not created in loop q, _ = x.shape # test var x.shape only used but not created in loop
ret = fluid.layers.fill_constant(shape=[1], dtype="int32", value=s + q) ret = paddle.tensor.fill_constant(shape=[1], dtype="int32", value=s + q)
return ret return ret
...@@ -189,7 +189,7 @@ def for_loop_class_var(max_len): ...@@ -189,7 +189,7 @@ def for_loop_class_var(max_len):
foo = Foo() foo = Foo()
# Use `to_variable` so that static analysis can analyze the type of X is Tensor # Use `to_variable` so that static analysis can analyze the type of X is Tensor
max_len = fluid.layers.fill_constant( max_len = paddle.tensor.fill_constant(
shape=[1], value=max_len, dtype="int32" shape=[1], value=max_len, dtype="int32"
) )
...@@ -206,8 +206,8 @@ def var_create_in_for_loop(max_len): ...@@ -206,8 +206,8 @@ def var_create_in_for_loop(max_len):
def nested_for_loop_dyfunc(): def nested_for_loop_dyfunc():
two = fluid.layers.fill_constant(shape=[1], value=2, dtype="int32") two = paddle.tensor.fill_constant(shape=[1], value=2, dtype="int32")
three = fluid.layers.fill_constant(shape=[1], value=3, dtype="int32") three = paddle.tensor.fill_constant(shape=[1], value=3, dtype="int32")
for j in range(two): for j in range(two):
for i in range(10): for i in range(10):
a = 2 + j a = 2 + j
......
...@@ -69,7 +69,7 @@ def test_return_if_else(x): ...@@ -69,7 +69,7 @@ def test_return_if_else(x):
@to_static @to_static
def test_return_in_while(x): def test_return_in_while(x):
x = fluid.dygraph.to_variable(x) x = fluid.dygraph.to_variable(x)
i = fluid.layers.fill_constant(shape=[1], dtype='int32', value=0) i = paddle.tensor.fill_constant(shape=[1], dtype='int32', value=0)
while i < 10: while i < 10:
i += 1 i += 1
if i > 5: if i > 5:
......
...@@ -109,11 +109,11 @@ def dyfunc_with_if_1(x): ...@@ -109,11 +109,11 @@ def dyfunc_with_if_1(x):
# `res.shape[0]` is transformed into # `res.shape[0]` is transformed into
# `paddle.jit.dy2static.convert_var_shape(res)[0]` # `paddle.jit.dy2static.convert_var_shape(res)[0]`
if res.shape[0] > 1: if res.shape[0] > 1:
res = fluid.layers.fill_constant( res = paddle.tensor.fill_constant(
value=2, shape=x.shape, dtype="int32" value=2, shape=x.shape, dtype="int32"
) )
else: else:
res = fluid.layers.fill_constant( res = paddle.tensor.fill_constant(
value=3, shape=x.shape, dtype="int32" value=3, shape=x.shape, dtype="int32"
) )
return res return res
...@@ -125,14 +125,14 @@ def dyfunc_with_if_2(x): ...@@ -125,14 +125,14 @@ def dyfunc_with_if_2(x):
if len(x.shape) < 1: if len(x.shape) < 1:
res = x res = x
else: else:
res = fluid.layers.fill_constant(value=8, shape=x.shape, dtype="int32") res = paddle.tensor.fill_constant(value=8, shape=x.shape, dtype="int32")
return res return res
def dyfunc_with_for_1(x): def dyfunc_with_for_1(x):
x = fluid.dygraph.to_variable(x) x = fluid.dygraph.to_variable(x)
res = fluid.layers.fill_constant(value=0, shape=[1], dtype="int32") res = paddle.tensor.fill_constant(value=0, shape=[1], dtype="int32")
# `x.shape[0]` is transformed into `paddle.jit.dy2static.convert_var_shape(x)[0]` # `x.shape[0]` is transformed into `paddle.jit.dy2static.convert_var_shape(x)[0]`
for i in range(x.shape[0]): for i in range(x.shape[0]):
res += 1 res += 1
...@@ -142,7 +142,7 @@ def dyfunc_with_for_1(x): ...@@ -142,7 +142,7 @@ def dyfunc_with_for_1(x):
def dyfunc_with_for_2(x): def dyfunc_with_for_2(x):
x = fluid.dygraph.to_variable(x) x = fluid.dygraph.to_variable(x)
x_shape_0 = x.shape[0] x_shape_0 = x.shape[0]
res = fluid.layers.fill_constant(value=0, shape=[1], dtype="int32") res = paddle.tensor.fill_constant(value=0, shape=[1], dtype="int32")
# `x_shape_0` is transformed into `paddle.jit.dy2static.convert_var_shape(x)[0]` # `x_shape_0` is transformed into `paddle.jit.dy2static.convert_var_shape(x)[0]`
for i in range(x_shape_0): for i in range(x_shape_0):
...@@ -152,7 +152,7 @@ def dyfunc_with_for_2(x): ...@@ -152,7 +152,7 @@ def dyfunc_with_for_2(x):
def dyfunc_with_for_3(x): def dyfunc_with_for_3(x):
x = fluid.dygraph.to_variable(x) x = fluid.dygraph.to_variable(x)
res = fluid.layers.fill_constant(value=0, shape=[1], dtype="int32") res = paddle.tensor.fill_constant(value=0, shape=[1], dtype="int32")
# `len(x.shape)` is not transformed. # `len(x.shape)` is not transformed.
for i in range(len(x.shape)): for i in range(len(x.shape)):
res += 1 res += 1
...@@ -162,7 +162,7 @@ def dyfunc_with_for_3(x): ...@@ -162,7 +162,7 @@ def dyfunc_with_for_3(x):
def dyfunc_with_while_1(x): def dyfunc_with_while_1(x):
x = fluid.dygraph.to_variable(x) x = fluid.dygraph.to_variable(x)
res = fluid.layers.fill_constant(value=0, shape=[1], dtype="int32") res = paddle.tensor.fill_constant(value=0, shape=[1], dtype="int32")
# `x.shape[0]` is transformed into `paddle.jit.dy2static.convert_var_shape(x)[0]` # `x.shape[0]` is transformed into `paddle.jit.dy2static.convert_var_shape(x)[0]`
i = 1 i = 1
while i < x.shape[0]: while i < x.shape[0]:
...@@ -174,7 +174,7 @@ def dyfunc_with_while_1(x): ...@@ -174,7 +174,7 @@ def dyfunc_with_while_1(x):
def dyfunc_with_while_2(x): def dyfunc_with_while_2(x):
x = fluid.dygraph.to_variable(x) x = fluid.dygraph.to_variable(x)
x_shape_0 = x.shape[0] x_shape_0 = x.shape[0]
res = fluid.layers.fill_constant(value=0, shape=[1], dtype="int32") res = paddle.tensor.fill_constant(value=0, shape=[1], dtype="int32")
i = 1 i = 1
# `x_shape_0` is transformed into `paddle.jit.dy2static.convert_var_shape(x)[0]` # `x_shape_0` is transformed into `paddle.jit.dy2static.convert_var_shape(x)[0]`
while i < x_shape_0: while i < x_shape_0:
...@@ -186,7 +186,7 @@ def dyfunc_with_while_2(x): ...@@ -186,7 +186,7 @@ def dyfunc_with_while_2(x):
def dyfunc_with_while_3(x): def dyfunc_with_while_3(x):
x = fluid.dygraph.to_variable(x) x = fluid.dygraph.to_variable(x)
x_shape = x.shape x_shape = x.shape
res = fluid.layers.fill_constant(value=0, shape=[1], dtype="int32") res = paddle.tensor.fill_constant(value=0, shape=[1], dtype="int32")
i = 1 i = 1
# `len(x.shape)` is not transformed. # `len(x.shape)` is not transformed.
...@@ -601,7 +601,7 @@ def dyfunc_with_static_convert_var_shape(x): ...@@ -601,7 +601,7 @@ def dyfunc_with_static_convert_var_shape(x):
else: else:
# Test for correctly to find `batch_size__static_convert_var_shape_suffix_0` in # Test for correctly to find `batch_size__static_convert_var_shape_suffix_0` in
# deeply nested scope. # deeply nested scope.
res = fluid.layers.fill_constant( res = paddle.tensor.fill_constant(
value=8, shape=[batch_size], dtype="int32" value=8, shape=[batch_size], dtype="int32"
) )
......
...@@ -16,7 +16,6 @@ import numpy as np ...@@ -16,7 +16,6 @@ import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.layers as layers
import paddle.nn.functional as F import paddle.nn.functional as F
from paddle.fluid.dygraph import Layer, to_variable from paddle.fluid.dygraph import Layer, to_variable
from paddle.jit.api import dygraph_to_static_func from paddle.jit.api import dygraph_to_static_func
...@@ -796,7 +795,7 @@ class Transformer(Layer): ...@@ -796,7 +795,7 @@ class Transformer(Layer):
# constant number # constant number
inf = float(1.0 * 1e7) inf = float(1.0 * 1e7)
max_len = (enc_output.shape[1] + 20) if max_len is None else max_len max_len = (enc_output.shape[1] + 20) if max_len is None else max_len
vocab_size_tensor = layers.fill_constant( vocab_size_tensor = paddle.tensor.fill_constant(
shape=[1], dtype="int64", value=self.trg_vocab_size shape=[1], dtype="int64", value=self.trg_vocab_size
) )
end_token_tensor = to_variable( end_token_tensor = to_variable(
...@@ -824,7 +823,7 @@ class Transformer(Layer): ...@@ -824,7 +823,7 @@ class Transformer(Layer):
np.full([batch_size, beam_size], 0, dtype="bool") np.full([batch_size, beam_size], 0, dtype="bool")
) )
trg_word = layers.fill_constant( trg_word = paddle.tensor.fill_constant(
shape=[batch_size * beam_size, 1], dtype="int64", value=bos_id shape=[batch_size * beam_size, 1], dtype="int64", value=bos_id
) )
...@@ -838,12 +837,12 @@ class Transformer(Layer): ...@@ -838,12 +837,12 @@ class Transformer(Layer):
# init states (caches) for transformer, need to be updated according to selected beam # init states (caches) for transformer, need to be updated according to selected beam
caches = [ caches = [
{ {
"k": layers.fill_constant( "k": paddle.tensor.fill_constant(
shape=[batch_size, beam_size, self.n_head, 0, self.d_key], shape=[batch_size, beam_size, self.n_head, 0, self.d_key],
dtype=enc_output.dtype, dtype=enc_output.dtype,
value=0, value=0,
), ),
"v": layers.fill_constant( "v": paddle.tensor.fill_constant(
shape=[batch_size, beam_size, self.n_head, 0, self.d_value], shape=[batch_size, beam_size, self.n_head, 0, self.d_value],
dtype=enc_output.dtype, dtype=enc_output.dtype,
value=0, value=0,
...@@ -853,7 +852,7 @@ class Transformer(Layer): ...@@ -853,7 +852,7 @@ class Transformer(Layer):
] ]
for i in range(paddle.to_tensor(max_len)): for i in range(paddle.to_tensor(max_len)):
trg_pos = layers.fill_constant( trg_pos = paddle.tensor.fill_constant(
shape=trg_word.shape, dtype="int64", value=i shape=trg_word.shape, dtype="int64", value=i
) )
caches = paddle.utils.map_structure( caches = paddle.utils.map_structure(
......
...@@ -89,10 +89,10 @@ class TestMinMaxTensor(TestBase): ...@@ -89,10 +89,10 @@ class TestMinMaxTensor(TestBase):
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32'
) )
min = paddle.fluid.layers.fill_constant( min = paddle.tensor.fill_constant(
name="min", shape=[1], dtype='float32', value=0.1 name="min", shape=[1], dtype='float32', value=0.1
) )
max = paddle.fluid.layers.fill_constant( max = paddle.tensor.fill_constant(
name="max", shape=[1], dtype='float32', value=3.4 name="max", shape=[1], dtype='float32', value=3.4
) )
x = paddle.clip(x, min=min, max=max) x = paddle.clip(x, min=min, max=max)
...@@ -106,7 +106,7 @@ class TestMinTensor(TestBase): ...@@ -106,7 +106,7 @@ class TestMinTensor(TestBase):
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32'
) )
min = paddle.fluid.layers.fill_constant( min = paddle.tensor.fill_constant(
name="min", shape=[1], dtype='float32', value=0.1 name="min", shape=[1], dtype='float32', value=0.1
) )
x = paddle.clip(x, min=min) x = paddle.clip(x, min=min)
...@@ -120,7 +120,7 @@ class TestMaxTensor(TestBase): ...@@ -120,7 +120,7 @@ class TestMaxTensor(TestBase):
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32'
) )
max = paddle.fluid.layers.fill_constant( max = paddle.tensor.fill_constant(
name="max", shape=[1], dtype='float32', value=3.4 name="max", shape=[1], dtype='float32', value=3.4
) )
x = paddle.clip(x, max=max) x = paddle.clip(x, max=max)
...@@ -134,7 +134,7 @@ class TestCombine1(TestBase): ...@@ -134,7 +134,7 @@ class TestCombine1(TestBase):
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32'
) )
min = paddle.fluid.layers.fill_constant( min = paddle.tensor.fill_constant(
name="min", shape=[1], dtype='float32', value=0.1 name="min", shape=[1], dtype='float32', value=0.1
) )
x = paddle.clip(x, min=min, max=3.4) x = paddle.clip(x, min=min, max=3.4)
...@@ -148,7 +148,7 @@ class TestCombine2(TestBase): ...@@ -148,7 +148,7 @@ class TestCombine2(TestBase):
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32'
) )
max = paddle.fluid.layers.fill_constant( max = paddle.tensor.fill_constant(
name="max", shape=[1], dtype='float32', value=3.4 name="max", shape=[1], dtype='float32', value=3.4
) )
x = paddle.clip(x, min=0.1, max=max) x = paddle.clip(x, min=0.1, max=max)
...@@ -186,10 +186,10 @@ class TestIntMinMax(TestBase): ...@@ -186,10 +186,10 @@ class TestIntMinMax(TestBase):
x = paddle.static.data( x = paddle.static.data(
name=self.feed_list[0], shape=self.feed_shape[0], dtype='int32' name=self.feed_list[0], shape=self.feed_shape[0], dtype='int32'
) )
min = paddle.fluid.layers.fill_constant( min = paddle.tensor.fill_constant(
name="min", shape=[1], dtype='int32', value=1 name="min", shape=[1], dtype='int32', value=1
) )
max = paddle.fluid.layers.fill_constant( max = paddle.tensor.fill_constant(
name="max", shape=[1], dtype='int32', value=3 name="max", shape=[1], dtype='int32', value=3
) )
x = paddle.clip(x, min=min, max=max) x = paddle.clip(x, min=min, max=max)
......
...@@ -80,7 +80,7 @@ class TestCase1(TestBase): ...@@ -80,7 +80,7 @@ class TestCase1(TestBase):
x = paddle.static.data( x = paddle.static.data(
name=self.feed_list[0], shape=self.feed_shape[0], dtype="float32" name=self.feed_list[0], shape=self.feed_shape[0], dtype="float32"
) )
expand_times = paddle.fluid.layers.fill_constant( expand_times = paddle.tensor.fill_constant(
shape=[len(self.feed_shape[0])], dtype="int32", value=2 shape=[len(self.feed_shape[0])], dtype="int32", value=2
) )
out = paddle.expand(x, expand_times, **self.attrs) out = paddle.expand(x, expand_times, **self.attrs)
......
...@@ -100,7 +100,7 @@ class TestCase4(TestBase): ...@@ -100,7 +100,7 @@ class TestCase4(TestBase):
'dtype': 'int32', 'dtype': 'int32',
'value': 3, 'value': 3,
} }
y = paddle.fluid.layers.fill_constant(**self.attrs) y = paddle.tensor.fill_constant(**self.attrs)
out = paddle.expand(x, shape=y) out = paddle.expand(x, shape=y)
self.fetch_list = [out.name] self.fetch_list = [out.name]
......
...@@ -46,7 +46,7 @@ class TestBase(IPUOpTest): ...@@ -46,7 +46,7 @@ class TestBase(IPUOpTest):
@IPUOpTest.static_graph @IPUOpTest.static_graph
def build_model(self): def build_model(self):
x = paddle.fluid.layers.fill_constant(**self.attrs) x = paddle.tensor.fill_constant(**self.attrs)
out = paddle.add(x, x) out = paddle.add(x, x)
self.fetch_list = [out.name] self.fetch_list = [out.name]
......
...@@ -73,7 +73,7 @@ class TestCase1(TestBase): ...@@ -73,7 +73,7 @@ class TestCase1(TestBase):
'dtype': 'int32', 'dtype': 'int32',
'value': 2, 'value': 2,
} }
y = paddle.fluid.layers.fill_constant(**const_attrs) y = paddle.tensor.fill_constant(**const_attrs)
pad = paddle.nn.functional.pad(x, pad=y) pad = paddle.nn.functional.pad(x, pad=y)
self.fetch_list = [pad.name] self.fetch_list = [pad.name]
......
...@@ -57,7 +57,7 @@ class TestTopKOp(IPUOpTest): ...@@ -57,7 +57,7 @@ class TestTopKOp(IPUOpTest):
topk_values, topk_indices = self.op(x, **self.attrs) topk_values, topk_indices = self.op(x, **self.attrs)
else: else:
# !important, popart cannot accept non const tensor # !important, popart cannot accept non const tensor
K_t = paddle.fluid.layers.fill_constant( K_t = paddle.tensor.fill_constant(
shape=[1], dtype='int32', value=self.k, name="in_2" shape=[1], dtype='int32', value=self.k, name="in_2"
) )
topk_values, topk_indices = self.op(x, K_t, **self.attrs) topk_values, topk_indices = self.op(x, K_t, **self.attrs)
......
...@@ -64,7 +64,7 @@ class TestBase(IPUOpTest): ...@@ -64,7 +64,7 @@ class TestBase(IPUOpTest):
'dtype': 'int32', 'dtype': 'int32',
'value': 6, 'value': 6,
} }
img_size = paddle.fluid.layers.fill_constant(**attrs) img_size = paddle.tensor.fill_constant(**attrs)
out = paddle.vision.ops.yolo_box(x=x, img_size=img_size, **self.attrs) out = paddle.vision.ops.yolo_box(x=x, img_size=img_size, **self.attrs)
self.fetch_list = [x.name for x in out] self.fetch_list = [x.name for x in out]
......
...@@ -20,7 +20,6 @@ from pass_test import PassTest ...@@ -20,7 +20,6 @@ from pass_test import PassTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
import paddle.fluid.layers as layers
class FusionGroupPassTest(PassTest): class FusionGroupPassTest(PassTest):
...@@ -86,7 +85,7 @@ class FusionGroupPassComplicatedTest(FusionGroupPassTest): ...@@ -86,7 +85,7 @@ class FusionGroupPassComplicatedTest(FusionGroupPassTest):
with fluid.program_guard(self.main_program, self.startup_program): with fluid.program_guard(self.main_program, self.startup_program):
self.feed_vars = self._prepare_feed_vars([32, 64], dtype, 5, False) self.feed_vars = self._prepare_feed_vars([32, 64], dtype, 5, False)
one = layers.fill_constant(shape=[1], dtype=dtype, value=1.0) one = paddle.tensor.fill_constant(shape=[1], dtype=dtype, value=1.0)
tmp_0 = one * self.feed_vars[0] tmp_0 = one * self.feed_vars[0]
# subgraph with 9 op nodes # subgraph with 9 op nodes
tmp_1 = tmp_0 * paddle.nn.functional.sigmoid( tmp_1 = tmp_0 * paddle.nn.functional.sigmoid(
...@@ -142,7 +141,9 @@ class FusionGroupPassTestCastAndFP16(FusionGroupPassTest): ...@@ -142,7 +141,9 @@ class FusionGroupPassTestCastAndFP16(FusionGroupPassTest):
tmp_0 = self.feed_vars[0] * self.feed_vars[1] tmp_0 = self.feed_vars[0] * self.feed_vars[1]
tmp_0.stop_gradient = False tmp_0.stop_gradient = False
tmp_1 = paddle.cast(tmp_0, dtype="float16") tmp_1 = paddle.cast(tmp_0, dtype="float16")
zero = layers.fill_constant(shape=[128], dtype="float16", value=0) zero = paddle.tensor.fill_constant(
shape=[128], dtype="float16", value=0
)
# TODO(xreki): fix precision problem when using softmax of float16. # TODO(xreki): fix precision problem when using softmax of float16.
# tmp_2 = layers.softmax(tmp_1) # tmp_2 = layers.softmax(tmp_1)
tmp_2 = paddle.add(tmp_1, zero) tmp_2 = paddle.add(tmp_1, zero)
...@@ -212,7 +213,9 @@ class FusionGroupPassFillConstantTest(FusionGroupPassTest): ...@@ -212,7 +213,9 @@ class FusionGroupPassFillConstantTest(FusionGroupPassTest):
tmp_0 = paddle.add(self.feed_vars[0], self.feed_vars[1]) tmp_0 = paddle.add(self.feed_vars[0], self.feed_vars[1])
tmp_0.stop_gradient = False tmp_0.stop_gradient = False
tmp_1 = layers.fill_constant(shape=[2, 2], dtype=dtype, value=2.0) tmp_1 = paddle.tensor.fill_constant(
shape=[2, 2], dtype=dtype, value=2.0
)
tmp_2 = paddle.scale( tmp_2 = paddle.scale(
tmp_1, scale=3.0, bias=1.0, bias_after_scale=True tmp_1, scale=3.0, bias=1.0, bias_after_scale=True
) )
......
...@@ -16,7 +16,6 @@ import unittest ...@@ -16,7 +16,6 @@ import unittest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.layers as layers
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid.framework import IrGraph, Program, program_guard from paddle.fluid.framework import IrGraph, Program, program_guard
from paddle.fluid.tests.unittests.op_test import OpTestTool from paddle.fluid.tests.unittests.op_test import OpTestTool
...@@ -55,8 +54,12 @@ class TestQuantizationSubGraph(unittest.TestCase): ...@@ -55,8 +54,12 @@ class TestQuantizationSubGraph(unittest.TestCase):
return linear_fc(5) return linear_fc(5)
with program_guard(main_program, startup_program): with program_guard(main_program, startup_program):
x = layers.fill_constant(shape=[1], dtype='float32', value=0.1) x = paddle.tensor.fill_constant(
y = layers.fill_constant(shape=[1], dtype='float32', value=0.23) shape=[1], dtype='float32', value=0.1
)
y = paddle.tensor.fill_constant(
shape=[1], dtype='float32', value=0.23
)
pred = paddle.less_than(y, x) pred = paddle.less_than(y, x)
out = paddle.static.nn.cond(pred, true_func, false_func) out = paddle.static.nn.cond(pred, true_func, false_func)
......
...@@ -235,7 +235,7 @@ class TestExpandV2API(unittest.TestCase): ...@@ -235,7 +235,7 @@ class TestExpandV2API(unittest.TestCase):
name='x', shape=[12, 14], dtype="float32" name='x', shape=[12, 14], dtype="float32"
) )
positive_2 = fluid.layers.fill_constant([1], "int32", 12) positive_2 = paddle.tensor.fill_constant([1], "int32", 12)
expand_shape = paddle.static.data( expand_shape = paddle.static.data(
name="expand_shape", name="expand_shape",
shape=[2], shape=[2],
......
...@@ -268,8 +268,8 @@ class TestFillConstantOp2_ValueTensor(OpTest): ...@@ -268,8 +268,8 @@ class TestFillConstantOp2_ValueTensor(OpTest):
class TestFillConstantAPI(unittest.TestCase): class TestFillConstantAPI(unittest.TestCase):
def test_api(self): def test_api(self):
positive_2_int32 = fluid.layers.fill_constant([1], "int32", 2) positive_2_int32 = paddle.tensor.fill_constant([1], "int32", 2)
positive_2_int64 = fluid.layers.fill_constant([1], "int64", 2) positive_2_int64 = paddle.tensor.fill_constant([1], "int64", 2)
shape_tensor_int32 = fluid.data( shape_tensor_int32 = fluid.data(
name="shape_tensor_int32", shape=[2], dtype="int32" name="shape_tensor_int32", shape=[2], dtype="int32"
...@@ -278,41 +278,41 @@ class TestFillConstantAPI(unittest.TestCase): ...@@ -278,41 +278,41 @@ class TestFillConstantAPI(unittest.TestCase):
name="shape_tensor_int64", shape=[2], dtype="int64" name="shape_tensor_int64", shape=[2], dtype="int64"
) )
out_1 = fluid.layers.fill_constant( out_1 = paddle.tensor.fill_constant(
shape=[1, 2], dtype="float32", value=1.1 shape=[1, 2], dtype="float32", value=1.1
) )
out_2 = fluid.layers.fill_constant( out_2 = paddle.tensor.fill_constant(
shape=[1, positive_2_int32], dtype="float32", value=1.1 shape=[1, positive_2_int32], dtype="float32", value=1.1
) )
out_3 = fluid.layers.fill_constant( out_3 = paddle.tensor.fill_constant(
shape=[1, positive_2_int64], dtype="float32", value=1.1 shape=[1, positive_2_int64], dtype="float32", value=1.1
) )
out_4 = fluid.layers.fill_constant( out_4 = paddle.tensor.fill_constant(
shape=shape_tensor_int32, dtype="float32", value=1.1 shape=shape_tensor_int32, dtype="float32", value=1.1
) )
out_5 = fluid.layers.fill_constant( out_5 = paddle.tensor.fill_constant(
shape=shape_tensor_int64, dtype="float32", value=1.1 shape=shape_tensor_int64, dtype="float32", value=1.1
) )
out_6 = fluid.layers.fill_constant( out_6 = paddle.tensor.fill_constant(
shape=shape_tensor_int64, dtype=np.float32, value=1.1 shape=shape_tensor_int64, dtype=np.float32, value=1.1
) )
val1 = fluid.layers.fill_constant( val1 = paddle.tensor.fill_constant(
shape=[1], dtype=np.float32, value=1.1 shape=[1], dtype=np.float32, value=1.1
) )
val2 = fluid.layers.fill_constant( val2 = paddle.tensor.fill_constant(
shape=[1], dtype=np.float64, value=1.1 shape=[1], dtype=np.float64, value=1.1
) )
out_7 = fluid.layers.fill_constant( out_7 = paddle.tensor.fill_constant(
shape=shape_tensor_int64, dtype=np.float32, value=val1 shape=shape_tensor_int64, dtype=np.float32, value=val1
) )
out_8 = fluid.layers.fill_constant( out_8 = paddle.tensor.fill_constant(
shape=shape_tensor_int64, dtype=np.float32, value=val2 shape=shape_tensor_int64, dtype=np.float32, value=val2
) )
...@@ -345,16 +345,16 @@ class TestFillConstantImperative(unittest.TestCase): ...@@ -345,16 +345,16 @@ class TestFillConstantImperative(unittest.TestCase):
shape = fluid.dygraph.to_variable(data1) shape = fluid.dygraph.to_variable(data1)
val = fluid.dygraph.to_variable(data2) val = fluid.dygraph.to_variable(data2)
value = fluid.dygraph.to_variable(data3) value = fluid.dygraph.to_variable(data3)
res1 = fluid.layers.fill_constant( res1 = paddle.tensor.fill_constant(
shape=[1, 2], dtype='float32', value=1.1 shape=[1, 2], dtype='float32', value=1.1
) )
res2 = fluid.layers.fill_constant( res2 = paddle.tensor.fill_constant(
shape=shape, dtype='float32', value=1.1 shape=shape, dtype='float32', value=1.1
) )
res3 = fluid.layers.fill_constant( res3 = paddle.tensor.fill_constant(
shape=shape, dtype='float32', value=val shape=shape, dtype='float32', value=val
) )
res4 = fluid.layers.fill_constant( res4 = paddle.tensor.fill_constant(
shape=shape, dtype='int32', value=value shape=shape, dtype='int32', value=value
) )
assert np.array_equal( assert np.array_equal(
...@@ -372,17 +372,17 @@ class TestFillConstantImperative(unittest.TestCase): ...@@ -372,17 +372,17 @@ class TestFillConstantImperative(unittest.TestCase):
def test_nan(self): def test_nan(self):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
res = fluid.layers.fill_constant([1], 'float32', np.nan) res = paddle.tensor.fill_constant([1], 'float32', np.nan)
self.assertTrue(np.isnan(res.numpy().item(0))) self.assertTrue(np.isnan(res.numpy().item(0)))
def test_inf(self): def test_inf(self):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
res = fluid.layers.fill_constant([1], 'float32', np.inf) res = paddle.tensor.fill_constant([1], 'float32', np.inf)
self.assertTrue(np.isinf(res.numpy().item(0))) self.assertTrue(np.isinf(res.numpy().item(0)))
def test_ninf(self): def test_ninf(self):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
res = fluid.layers.fill_constant([1], 'float32', np.NINF) res = paddle.tensor.fill_constant([1], 'float32', np.NINF)
self.assertTrue(np.isinf(res.numpy().item(0))) self.assertTrue(np.isinf(res.numpy().item(0)))
self.assertEqual(np.NINF, res.numpy().item(0)) self.assertEqual(np.NINF, res.numpy().item(0))
...@@ -434,13 +434,13 @@ class TestFillConstantOpError(unittest.TestCase): ...@@ -434,13 +434,13 @@ class TestFillConstantOpError(unittest.TestCase):
# The argument shape's type of fill_constant_op must be list, tuple or Variable. # The argument shape's type of fill_constant_op must be list, tuple or Variable.
def test_shape_type(): def test_shape_type():
fluid.layers.fill_constant(shape=1, dtype="float32", value=1) paddle.tensor.fill_constant(shape=1, dtype="float32", value=1)
self.assertRaises(TypeError, test_shape_type) self.assertRaises(TypeError, test_shape_type)
# The argument shape's size of fill_constant_op must not be 0. # The argument shape's size of fill_constant_op must not be 0.
def test_shape_size(): def test_shape_size():
fluid.layers.fill_constant(shape=[], dtype="float32", value=1) paddle.tensor.fill_constant(shape=[], dtype="float32", value=1)
self.assertRaises(AssertionError, test_shape_size) self.assertRaises(AssertionError, test_shape_size)
...@@ -449,7 +449,7 @@ class TestFillConstantOpError(unittest.TestCase): ...@@ -449,7 +449,7 @@ class TestFillConstantOpError(unittest.TestCase):
shape = fluid.data( shape = fluid.data(
name="shape_tensor", shape=[2], dtype="float32" name="shape_tensor", shape=[2], dtype="float32"
) )
fluid.layers.fill_constant( paddle.tensor.fill_constant(
shape=shape, dtype="float32", value=1 shape=shape, dtype="float32", value=1
) )
...@@ -459,7 +459,7 @@ class TestFillConstantOpError(unittest.TestCase): ...@@ -459,7 +459,7 @@ class TestFillConstantOpError(unittest.TestCase):
shape = fluid.data( shape = fluid.data(
name="shape_tensor_list", shape=[1], dtype="bool" name="shape_tensor_list", shape=[1], dtype="bool"
) )
fluid.layers.fill_constant( paddle.tensor.fill_constant(
shape=[shape, 2], dtype="float32", value=1 shape=[shape, 2], dtype="float32", value=1
) )
......
...@@ -525,8 +525,8 @@ class TestStridedSliceOp_strides_Tensor(OpTest): ...@@ -525,8 +525,8 @@ class TestStridedSliceOp_strides_Tensor(OpTest):
class TestStridedSliceAPI(unittest.TestCase): class TestStridedSliceAPI(unittest.TestCase):
def test_1(self): def test_1(self):
input = np.random.random([3, 4, 5, 6]).astype("float32") input = np.random.random([3, 4, 5, 6]).astype("float32")
minus_1 = fluid.layers.fill_constant([1], "int32", -1) minus_1 = paddle.tensor.fill_constant([1], "int32", -1)
minus_3 = fluid.layers.fill_constant([1], "int32", -3) minus_3 = paddle.tensor.fill_constant([1], "int32", -3)
starts = paddle.static.data( starts = paddle.static.data(
name='starts', shape=[3], dtype='int32' name='starts', shape=[3], dtype='int32'
) )
......
...@@ -234,7 +234,7 @@ class TestTileAPIStatic(unittest.TestCase): ...@@ -234,7 +234,7 @@ class TestTileAPIStatic(unittest.TestCase):
repeat_times = [2, 2] repeat_times = [2, 2]
x1 = paddle.static.data(name='x1', shape=[-1, 4], dtype="int32") x1 = paddle.static.data(name='x1', shape=[-1, 4], dtype="int32")
out = paddle.tile(x1, repeat_times) out = paddle.tile(x1, repeat_times)
positive_2 = fluid.layers.fill_constant([1], dtype="int32", value=2) positive_2 = paddle.tensor.fill_constant([1], dtype="int32", value=2)
out2 = paddle.tile(x1, repeat_times=[positive_2, 2]) out2 = paddle.tile(x1, repeat_times=[positive_2, 2])
......
...@@ -171,7 +171,7 @@ class TestConcatAPIWithLoDTensorArray(unittest.TestCase): ...@@ -171,7 +171,7 @@ class TestConcatAPIWithLoDTensorArray(unittest.TestCase):
with fluid.program_guard(self.program): with fluid.program_guard(self.program):
input = paddle.assign(self.x) input = paddle.assign(self.x)
tensor_array = paddle.tensor.create_array(dtype='float32') tensor_array = paddle.tensor.create_array(dtype='float32')
zero = fluid.layers.fill_constant( zero = paddle.tensor.fill_constant(
shape=[1], value=0, dtype="int64" shape=[1], value=0, dtype="int64"
) )
......
...@@ -270,7 +270,7 @@ class TestExpandV2API(unittest.TestCase): ...@@ -270,7 +270,7 @@ class TestExpandV2API(unittest.TestCase):
dtype="float32", dtype="float32",
) )
positive_2 = fluid.layers.fill_constant([1], "int32", 12) positive_2 = paddle.tensor.fill_constant([1], "int32", 12)
expand_shape = paddle.static.data( expand_shape = paddle.static.data(
name="expand_shape", name="expand_shape",
shape=[2], shape=[2],
......
...@@ -139,7 +139,7 @@ class TestStackAPIWithLoDTensorArray(unittest.TestCase): ...@@ -139,7 +139,7 @@ class TestStackAPIWithLoDTensorArray(unittest.TestCase):
with fluid.program_guard(self.program): with fluid.program_guard(self.program):
input = paddle.assign(self.x) input = paddle.assign(self.x)
tensor_array = paddle.tensor.create_array(dtype='float32') tensor_array = paddle.tensor.create_array(dtype='float32')
zero = fluid.layers.fill_constant(shape=[1], value=0, dtype="int64") zero = paddle.tensor.fill_constant(shape=[1], value=0, dtype="int64")
for i in range(self.iter_num): for i in range(self.iter_num):
paddle.tensor.array_write(input, zero + i, tensor_array) paddle.tensor.array_write(input, zero + i, tensor_array)
...@@ -177,7 +177,7 @@ class TestTensorStackAPIWithLoDTensorArray(unittest.TestCase): ...@@ -177,7 +177,7 @@ class TestTensorStackAPIWithLoDTensorArray(unittest.TestCase):
with fluid.program_guard(self.program): with fluid.program_guard(self.program):
input = paddle.assign(self.x) input = paddle.assign(self.x)
tensor_array = paddle.tensor.create_array(dtype='float32') tensor_array = paddle.tensor.create_array(dtype='float32')
zero = fluid.layers.fill_constant(shape=[1], value=0, dtype="int64") zero = paddle.tensor.fill_constant(shape=[1], value=0, dtype="int64")
for i in range(self.iter_num): for i in range(self.iter_num):
paddle.tensor.array_write(input, zero + i, tensor_array) paddle.tensor.array_write(input, zero + i, tensor_array)
......
...@@ -578,8 +578,8 @@ class TestStridedSliceOp_strides_Tensor(OpTest): ...@@ -578,8 +578,8 @@ class TestStridedSliceOp_strides_Tensor(OpTest):
class TestStridedSliceAPI(unittest.TestCase): class TestStridedSliceAPI(unittest.TestCase):
def test_1(self): def test_1(self):
input = np.random.random([3, 4, 5, 6]).astype("float64") input = np.random.random([3, 4, 5, 6]).astype("float64")
minus_1 = fluid.layers.fill_constant([1], "int32", -1) minus_1 = paddle.tensor.fill_constant([1], "int32", -1)
minus_3 = fluid.layers.fill_constant([1], "int32", -3) minus_3 = paddle.tensor.fill_constant([1], "int32", -3)
starts = paddle.static.data( starts = paddle.static.data(
name='starts', shape=[3], dtype='int32' name='starts', shape=[3], dtype='int32'
) )
......
...@@ -50,15 +50,15 @@ class TestWhileOp(unittest.TestCase): ...@@ -50,15 +50,15 @@ class TestWhileOp(unittest.TestCase):
i = layers.zeros(shape=[1], dtype='int32') i = layers.zeros(shape=[1], dtype='int32')
i = paddle.cast(i, 'int64') i = paddle.cast(i, 'int64')
i.stop_gradient = True i.stop_gradient = True
array_len = layers.fill_constant(shape=[1], dtype='int32', value=5) array_len = paddle.tensor.fill_constant(shape=[1], dtype='int32', value=5)
array_len = paddle.cast(array_len, 'int64') array_len = paddle.cast(array_len, 'int64')
array_len.stop_gradient = True array_len.stop_gradient = True
cond = paddle.ones(shape=[1], dtype='int32') cond = paddle.ones(shape=[1], dtype='int32')
cond = paddle.cast(cond, 'bool') cond = paddle.cast(cond, 'bool')
j = layers.fill_constant(shape=[1], dtype='int32', value=1) j = paddle.tensor.fill_constant(shape=[1], dtype='int32', value=1)
j = paddle.cast(j, 'int64') j = paddle.cast(j, 'int64')
j.stop_gradient = True j.stop_gradient = True
array_len2 = layers.fill_constant(shape=[1], dtype='int32', value=3) array_len2 = paddle.tensor.fill_constant(shape=[1], dtype='int32', value=3)
array_len2 = paddle.cast(array_len2, 'int64') array_len2 = paddle.cast(array_len2, 'int64')
array_len2.stop_gradient = True array_len2.stop_gradient = True
cond2 = paddle.logical_or(x=j, y=array_len2) cond2 = paddle.logical_or(x=j, y=array_len2)
......
...@@ -17,7 +17,6 @@ import unittest ...@@ -17,7 +17,6 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid.layers as layers
from paddle.fluid import core, framework from paddle.fluid import core, framework
from paddle.fluid.framework import Program, program_guard from paddle.fluid.framework import Program, program_guard
...@@ -42,20 +41,26 @@ class TestCompatibility(unittest.TestCase): ...@@ -42,20 +41,26 @@ class TestCompatibility(unittest.TestCase):
def build_program(self): def build_program(self):
def true_func(): def true_func():
return layers.fill_constant( return paddle.tensor.fill_constant(
shape=[1, 2], dtype='int32', value=1 shape=[1, 2], dtype='int32', value=1
), layers.fill_constant(shape=[2, 3], dtype='bool', value=True) ), paddle.tensor.fill_constant(
shape=[2, 3], dtype='bool', value=True
)
def false_func(): def false_func():
return layers.fill_constant( return paddle.tensor.fill_constant(
shape=[3, 4], dtype='float32', value=3 shape=[3, 4], dtype='float32', value=3
), layers.fill_constant(shape=[4, 5], dtype='int64', value=2) ), paddle.tensor.fill_constant(shape=[4, 5], dtype='int64', value=2)
main_program = Program() main_program = Program()
startup_program = Program() startup_program = Program()
with program_guard(main_program, startup_program): with program_guard(main_program, startup_program):
x = layers.fill_constant(shape=[1], dtype='float32', value=0.1) x = paddle.tensor.fill_constant(
y = layers.fill_constant(shape=[1], dtype='float32', value=0.23) shape=[1], dtype='float32', value=0.1
)
y = paddle.tensor.fill_constant(
shape=[1], dtype='float32', value=0.23
)
pred = paddle.less_than(x, y) pred = paddle.less_than(x, y)
out = paddle.static.nn.cond(pred, true_func, false_func) out = paddle.static.nn.cond(pred, true_func, false_func)
# out is a tuple containing 2 tensors # out is a tuple containing 2 tensors
......
...@@ -395,7 +395,7 @@ class TestInplaceApiWithDataTransform(unittest.TestCase): ...@@ -395,7 +395,7 @@ class TestInplaceApiWithDataTransform(unittest.TestCase):
def test_increment(self): def test_increment(self):
if paddle.fluid.core.is_compiled_with_cuda(): if paddle.fluid.core.is_compiled_with_cuda():
with paddle.fluid.device_guard("gpu:0"): with paddle.fluid.device_guard("gpu:0"):
x = paddle.fluid.layers.fill_constant([1], "float32", 0) x = paddle.tensor.fill_constant([1], "float32", 0)
with paddle.fluid.device_guard("cpu"): with paddle.fluid.device_guard("cpu"):
x = paddle.increment(x) x = paddle.increment(x)
exe = paddle.static.Executor(paddle.CUDAPlace(0)) exe = paddle.static.Executor(paddle.CUDAPlace(0))
......
...@@ -3045,7 +3045,7 @@ class TestPow_factor_tensor(TestActivation): ...@@ -3045,7 +3045,7 @@ class TestPow_factor_tensor(TestActivation):
) )
factor_1 = 2.0 factor_1 = 2.0
factor_2 = fluid.layers.fill_constant([1], "float32", 3.0) factor_2 = paddle.tensor.fill_constant([1], "float32", 3.0)
out_1 = paddle.pow(x, factor_1) out_1 = paddle.pow(x, factor_1)
out_2 = paddle.pow(x, factor_2) out_2 = paddle.pow(x, factor_2)
out_4 = paddle.pow(x, factor_1, name='pow_res') out_4 = paddle.pow(x, factor_1, name='pow_res')
......
...@@ -16,7 +16,6 @@ import unittest ...@@ -16,7 +16,6 @@ import unittest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.layers as layers
from paddle.static.nn.control_flow import Assert from paddle.static.nn.control_flow import Assert
...@@ -31,7 +30,7 @@ class TestAssertOp(unittest.TestCase): ...@@ -31,7 +30,7 @@ class TestAssertOp(unittest.TestCase):
def test_assert_true(self): def test_assert_true(self):
def net_func(): def net_func():
condition = layers.fill_constant( condition = paddle.tensor.fill_constant(
shape=[1], dtype='bool', value=True shape=[1], dtype='bool', value=True
) )
Assert(condition, []) Assert(condition, [])
...@@ -40,7 +39,7 @@ class TestAssertOp(unittest.TestCase): ...@@ -40,7 +39,7 @@ class TestAssertOp(unittest.TestCase):
def test_assert_false(self): def test_assert_false(self):
def net_func(): def net_func():
condition = layers.fill_constant( condition = paddle.tensor.fill_constant(
shape=[1], dtype='bool', value=False shape=[1], dtype='bool', value=False
) )
Assert(condition) Assert(condition)
...@@ -50,7 +49,7 @@ class TestAssertOp(unittest.TestCase): ...@@ -50,7 +49,7 @@ class TestAssertOp(unittest.TestCase):
def test_assert_cond_numel_error(self): def test_assert_cond_numel_error(self):
def net_func(): def net_func():
condition = layers.fill_constant( condition = paddle.tensor.fill_constant(
shape=[1, 2], dtype='bool', value=True shape=[1, 2], dtype='bool', value=True
) )
Assert(condition, []) Assert(condition, [])
...@@ -60,8 +59,10 @@ class TestAssertOp(unittest.TestCase): ...@@ -60,8 +59,10 @@ class TestAssertOp(unittest.TestCase):
def test_assert_print_data(self): def test_assert_print_data(self):
def net_func(): def net_func():
zero = layers.fill_constant(shape=[1], dtype='int64', value=0) zero = paddle.tensor.fill_constant(
one = layers.fill_constant(shape=[1], dtype='int64', value=1) shape=[1], dtype='int64', value=0
)
one = paddle.tensor.fill_constant(shape=[1], dtype='int64', value=1)
condition = paddle.less_than(one, zero) # False condition = paddle.less_than(one, zero) # False
Assert(condition, [zero, one]) Assert(condition, [zero, one])
...@@ -71,7 +72,9 @@ class TestAssertOp(unittest.TestCase): ...@@ -71,7 +72,9 @@ class TestAssertOp(unittest.TestCase):
def test_assert_summary(self): def test_assert_summary(self):
def net_func(): def net_func():
x = layers.fill_constant(shape=[10], dtype='float32', value=2.0) x = paddle.tensor.fill_constant(
shape=[10], dtype='float32', value=2.0
)
condition = paddle.max(x) < 1.0 condition = paddle.max(x) < 1.0
Assert(condition, (x,), 5) Assert(condition, (x,), 5)
...@@ -81,7 +84,9 @@ class TestAssertOp(unittest.TestCase): ...@@ -81,7 +84,9 @@ class TestAssertOp(unittest.TestCase):
def test_assert_summary_greater_than_size(self): def test_assert_summary_greater_than_size(self):
def net_func(): def net_func():
x = layers.fill_constant(shape=[2, 3], dtype='float32', value=2.0) x = paddle.tensor.fill_constant(
shape=[2, 3], dtype='float32', value=2.0
)
condition = paddle.max(x) < 1.0 condition = paddle.max(x) < 1.0
Assert(condition, [x], 10, name="test") Assert(condition, [x], 10, name="test")
......
...@@ -76,11 +76,11 @@ class TestAssignOpWithLoDTensorArray(unittest.TestCase): ...@@ -76,11 +76,11 @@ class TestAssignOpWithLoDTensorArray(unittest.TestCase):
with program_guard(main_program): with program_guard(main_program):
x = fluid.data(name='x', shape=[100, 10], dtype='float32') x = fluid.data(name='x', shape=[100, 10], dtype='float32')
x.stop_gradient = False x.stop_gradient = False
y = fluid.layers.fill_constant( y = paddle.tensor.fill_constant(
shape=[100, 10], dtype='float32', value=1 shape=[100, 10], dtype='float32', value=1
) )
z = paddle.add(x=x, y=y) z = paddle.add(x=x, y=y)
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0) i = paddle.tensor.fill_constant(shape=[1], dtype='int64', value=0)
init_array = paddle.tensor.array_write(x=z, i=i) init_array = paddle.tensor.array_write(x=z, i=i)
array = paddle.assign(init_array) array = paddle.assign(init_array)
sums = paddle.tensor.array_read(array=init_array, i=i) sums = paddle.tensor.array_read(array=init_array, i=i)
...@@ -129,11 +129,11 @@ class TestAssignOApi(unittest.TestCase): ...@@ -129,11 +129,11 @@ class TestAssignOApi(unittest.TestCase):
with program_guard(main_program): with program_guard(main_program):
x = fluid.data(name='x', shape=[100, 10], dtype='float32') x = fluid.data(name='x', shape=[100, 10], dtype='float32')
x.stop_gradient = False x.stop_gradient = False
y = fluid.layers.fill_constant( y = paddle.tensor.fill_constant(
shape=[100, 10], dtype='float32', value=1 shape=[100, 10], dtype='float32', value=1
) )
z = paddle.add(x=x, y=y) z = paddle.add(x=x, y=y)
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0) i = paddle.tensor.fill_constant(shape=[1], dtype='int64', value=0)
init_array = paddle.tensor.array_write(x=z, i=i) init_array = paddle.tensor.array_write(x=z, i=i)
array = paddle.assign(init_array) array = paddle.assign(init_array)
sums = paddle.tensor.array_read(array=init_array, i=i) sums = paddle.tensor.array_read(array=init_array, i=i)
......
...@@ -44,7 +44,7 @@ class TestBroadcastToAPI(unittest.TestCase): ...@@ -44,7 +44,7 @@ class TestBroadcastToAPI(unittest.TestCase):
input = np.random.random([12, 14]).astype("float32") input = np.random.random([12, 14]).astype("float32")
x = paddle.static.data(name='x', shape=[12, 14], dtype="float32") x = paddle.static.data(name='x', shape=[12, 14], dtype="float32")
positive_2 = fluid.layers.fill_constant([1], "int32", 12) positive_2 = paddle.tensor.fill_constant([1], "int32", 12)
expand_shape = paddle.static.data( expand_shape = paddle.static.data(
name="expand_shape", name="expand_shape",
shape=[2], shape=[2],
...@@ -81,7 +81,7 @@ class TestBroadcastToAPI(unittest.TestCase): ...@@ -81,7 +81,7 @@ class TestBroadcastToAPI(unittest.TestCase):
name="x", shape=[12, 14], dtype="float16" name="x", shape=[12, 14], dtype="float16"
) )
positive_2 = paddle.fluid.layers.fill_constant([1], "int32", 12) positive_2 = paddle.tensor.fill_constant([1], "int32", 12)
expand_shape = paddle.static.data( expand_shape = paddle.static.data(
name="expand_shape", name="expand_shape",
shape=[2], shape=[2],
......
...@@ -20,7 +20,6 @@ import numpy as np ...@@ -20,7 +20,6 @@ import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
import paddle.fluid.layers as layers
import paddle.fluid.optimizer as optimizer import paddle.fluid.optimizer as optimizer
from paddle.fluid.backward import append_backward from paddle.fluid.backward import append_backward
from paddle.fluid.framework import Program, program_guard from paddle.fluid.framework import Program, program_guard
...@@ -31,20 +30,32 @@ paddle.enable_static() ...@@ -31,20 +30,32 @@ paddle.enable_static()
class TestAPICase(unittest.TestCase): class TestAPICase(unittest.TestCase):
def test_return_single_var(self): def test_return_single_var(self):
def fn_1(): def fn_1():
return layers.fill_constant(shape=[4, 2], dtype='int32', value=1) return paddle.tensor.fill_constant(
shape=[4, 2], dtype='int32', value=1
)
def fn_2(): def fn_2():
return layers.fill_constant(shape=[4, 2], dtype='int32', value=2) return paddle.tensor.fill_constant(
shape=[4, 2], dtype='int32', value=2
)
def fn_3(): def fn_3():
return layers.fill_constant(shape=[4, 3], dtype='int32', value=3) return paddle.tensor.fill_constant(
shape=[4, 3], dtype='int32', value=3
)
main_program = Program() main_program = Program()
startup_program = Program() startup_program = Program()
with program_guard(main_program, startup_program): with program_guard(main_program, startup_program):
x = layers.fill_constant(shape=[1], dtype='float32', value=0.3) x = paddle.tensor.fill_constant(
y = layers.fill_constant(shape=[1], dtype='float32', value=0.1) shape=[1], dtype='float32', value=0.3
z = layers.fill_constant(shape=[1], dtype='float32', value=0.2) )
y = paddle.tensor.fill_constant(
shape=[1], dtype='float32', value=0.1
)
z = paddle.tensor.fill_constant(
shape=[1], dtype='float32', value=0.2
)
pred_2 = paddle.less_than(x, y) # false: 0.3 < 0.1 pred_2 = paddle.less_than(x, y) # false: 0.3 < 0.1
pred_1 = paddle.less_than(z, x) # true: 0.2 < 0.3 pred_1 = paddle.less_than(z, x) # true: 0.2 < 0.3
...@@ -244,26 +255,32 @@ class TestAPICase(unittest.TestCase): ...@@ -244,26 +255,32 @@ class TestAPICase(unittest.TestCase):
def test_return_var_tuple(self): def test_return_var_tuple(self):
def fn_1(): def fn_1():
return layers.fill_constant( return paddle.tensor.fill_constant(
shape=[1, 2], dtype='int32', value=1 shape=[1, 2], dtype='int32', value=1
), layers.fill_constant(shape=[2, 3], dtype='float32', value=2) ), paddle.tensor.fill_constant(
shape=[2, 3], dtype='float32', value=2
)
def fn_2(): def fn_2():
return layers.fill_constant( return paddle.tensor.fill_constant(
shape=[3, 4], dtype='int32', value=3 shape=[3, 4], dtype='int32', value=3
), layers.fill_constant(shape=[4, 5], dtype='float32', value=4) ), paddle.tensor.fill_constant(
shape=[4, 5], dtype='float32', value=4
)
def fn_3(): def fn_3():
return layers.fill_constant( return paddle.tensor.fill_constant(
shape=[5], dtype='int32', value=5 shape=[5], dtype='int32', value=5
), layers.fill_constant(shape=[5, 6], dtype='float32', value=6) ), paddle.tensor.fill_constant(
shape=[5, 6], dtype='float32', value=6
)
main_program = Program() main_program = Program()
startup_program = Program() startup_program = Program()
with program_guard(main_program, startup_program): with program_guard(main_program, startup_program):
x = layers.fill_constant(shape=[1], dtype='float32', value=1) x = paddle.tensor.fill_constant(shape=[1], dtype='float32', value=1)
y = layers.fill_constant(shape=[1], dtype='float32', value=1) y = paddle.tensor.fill_constant(shape=[1], dtype='float32', value=1)
z = layers.fill_constant(shape=[1], dtype='float32', value=3) z = paddle.tensor.fill_constant(shape=[1], dtype='float32', value=3)
pred_1 = paddle.equal(x, y) # true pred_1 = paddle.equal(x, y) # true
pred_2 = paddle.equal(x, z) # false pred_2 = paddle.equal(x, z) # false
...@@ -291,14 +308,18 @@ class TestAPICase(unittest.TestCase): ...@@ -291,14 +308,18 @@ class TestAPICase(unittest.TestCase):
class TestAPICase_Nested(unittest.TestCase): class TestAPICase_Nested(unittest.TestCase):
def test_nested_case(self): def test_nested_case(self):
def fn_1(x=1): def fn_1(x=1):
var_5 = layers.fill_constant(shape=[1], dtype='int32', value=5) var_5 = paddle.tensor.fill_constant(
var_6 = layers.fill_constant(shape=[1], dtype='int32', value=6) shape=[1], dtype='int32', value=5
)
var_6 = paddle.tensor.fill_constant(
shape=[1], dtype='int32', value=6
)
out = paddle.static.nn.control_flow.case( out = paddle.static.nn.control_flow.case(
pred_fn_pairs=[ pred_fn_pairs=[
( (
var_5 < var_6, var_5 < var_6,
partial( partial(
layers.fill_constant, paddle.tensor.fill_constant,
shape=[1], shape=[1],
dtype='int32', dtype='int32',
value=x, value=x,
...@@ -307,7 +328,7 @@ class TestAPICase_Nested(unittest.TestCase): ...@@ -307,7 +328,7 @@ class TestAPICase_Nested(unittest.TestCase):
( (
var_5 == var_6, var_5 == var_6,
partial( partial(
layers.fill_constant, paddle.tensor.fill_constant,
shape=[2], shape=[2],
dtype='int32', dtype='int32',
value=x, value=x,
...@@ -318,15 +339,19 @@ class TestAPICase_Nested(unittest.TestCase): ...@@ -318,15 +339,19 @@ class TestAPICase_Nested(unittest.TestCase):
return out return out
def fn_2(x=2): def fn_2(x=2):
var_5 = layers.fill_constant(shape=[1], dtype='int32', value=5) var_5 = paddle.tensor.fill_constant(
var_6 = layers.fill_constant(shape=[1], dtype='int32', value=6) shape=[1], dtype='int32', value=5
)
var_6 = paddle.tensor.fill_constant(
shape=[1], dtype='int32', value=6
)
out = paddle.static.nn.control_flow.case( out = paddle.static.nn.control_flow.case(
pred_fn_pairs=[ pred_fn_pairs=[
(var_5 < var_6, partial(fn_1, x=x)), (var_5 < var_6, partial(fn_1, x=x)),
( (
var_5 == var_6, var_5 == var_6,
partial( partial(
layers.fill_constant, paddle.tensor.fill_constant,
shape=[2], shape=[2],
dtype='int32', dtype='int32',
value=x, value=x,
...@@ -337,15 +362,19 @@ class TestAPICase_Nested(unittest.TestCase): ...@@ -337,15 +362,19 @@ class TestAPICase_Nested(unittest.TestCase):
return out return out
def fn_3(): def fn_3():
var_5 = layers.fill_constant(shape=[1], dtype='int32', value=5) var_5 = paddle.tensor.fill_constant(
var_6 = layers.fill_constant(shape=[1], dtype='int32', value=6) shape=[1], dtype='int32', value=5
)
var_6 = paddle.tensor.fill_constant(
shape=[1], dtype='int32', value=6
)
out = paddle.static.nn.control_flow.case( out = paddle.static.nn.control_flow.case(
pred_fn_pairs=[ pred_fn_pairs=[
(var_5 < var_6, partial(fn_2, x=3)), (var_5 < var_6, partial(fn_2, x=3)),
( (
var_5 == var_6, var_5 == var_6,
partial( partial(
layers.fill_constant, paddle.tensor.fill_constant,
shape=[2], shape=[2],
dtype='int32', dtype='int32',
value=7, value=7,
...@@ -358,9 +387,15 @@ class TestAPICase_Nested(unittest.TestCase): ...@@ -358,9 +387,15 @@ class TestAPICase_Nested(unittest.TestCase):
main_program = Program() main_program = Program()
startup_program = Program() startup_program = Program()
with program_guard(main_program, startup_program): with program_guard(main_program, startup_program):
x = layers.fill_constant(shape=[1], dtype='float32', value=0.3) x = paddle.tensor.fill_constant(
y = layers.fill_constant(shape=[1], dtype='float32', value=0.1) shape=[1], dtype='float32', value=0.3
z = layers.fill_constant(shape=[1], dtype='float32', value=0.2) )
y = paddle.tensor.fill_constant(
shape=[1], dtype='float32', value=0.1
)
z = paddle.tensor.fill_constant(
shape=[1], dtype='float32', value=0.2
)
pred_2 = paddle.less_than(x, y) # false: 0.3 < 0.1 pred_2 = paddle.less_than(x, y) # false: 0.3 < 0.1
pred_1 = paddle.less_than(z, x) # true: 0.2 < 0.3 pred_1 = paddle.less_than(z, x) # true: 0.2 < 0.3
...@@ -496,13 +531,19 @@ class TestAPICase_Nested(unittest.TestCase): ...@@ -496,13 +531,19 @@ class TestAPICase_Nested(unittest.TestCase):
class TestAPICase_Error(unittest.TestCase): class TestAPICase_Error(unittest.TestCase):
def test_error(self): def test_error(self):
def fn_1(): def fn_1():
return layers.fill_constant(shape=[4, 2], dtype='int32', value=1) return paddle.tensor.fill_constant(
shape=[4, 2], dtype='int32', value=1
)
main_program = Program() main_program = Program()
startup_program = Program() startup_program = Program()
with program_guard(main_program, startup_program): with program_guard(main_program, startup_program):
x = layers.fill_constant(shape=[1], dtype='float32', value=0.23) x = paddle.tensor.fill_constant(
z = layers.fill_constant(shape=[1], dtype='float32', value=0.2) shape=[1], dtype='float32', value=0.23
)
z = paddle.tensor.fill_constant(
shape=[1], dtype='float32', value=0.2
)
pred_1 = paddle.less_than(z, x) # true pred_1 = paddle.less_than(z, x) # true
# The type of 'pred_fn_pairs' in case must be list or tuple # The type of 'pred_fn_pairs' in case must be list or tuple
...@@ -570,7 +611,7 @@ class TestMutiTask(unittest.TestCase): ...@@ -570,7 +611,7 @@ class TestMutiTask(unittest.TestCase):
switch_id = fluid.data(name='switch_id', shape=[1], dtype='int32') switch_id = fluid.data(name='switch_id', shape=[1], dtype='int32')
one = layers.fill_constant(shape=[1], dtype='int32', value=1) one = paddle.tensor.fill_constant(shape=[1], dtype='int32', value=1)
adam = optimizer.Adam(learning_rate=0.001) adam = optimizer.Adam(learning_rate=0.001)
adagrad = optimizer.Adagrad(learning_rate=0.001) adagrad = optimizer.Adagrad(learning_rate=0.001)
......
...@@ -347,8 +347,8 @@ class TestConcatAPI(unittest.TestCase): ...@@ -347,8 +347,8 @@ class TestConcatAPI(unittest.TestCase):
input_3 = np.random.random([2, 2, 4, 5]).astype("int32") input_3 = np.random.random([2, 2, 4, 5]).astype("int32")
x_2 = fluid.data(shape=[2, 1, 4, 5], dtype='int32', name='x_2') x_2 = fluid.data(shape=[2, 1, 4, 5], dtype='int32', name='x_2')
x_3 = fluid.data(shape=[2, 2, 4, 5], dtype='int32', name='x_3') x_3 = fluid.data(shape=[2, 2, 4, 5], dtype='int32', name='x_3')
positive_1_int32 = fluid.layers.fill_constant([1], "int32", 1) positive_1_int32 = paddle.tensor.fill_constant([1], "int32", 1)
positive_1_int64 = fluid.layers.fill_constant([1], "int64", 1) positive_1_int64 = paddle.tensor.fill_constant([1], "int64", 1)
out_1 = paddle.concat([x_2, x_3], axis=1) out_1 = paddle.concat([x_2, x_3], axis=1)
out_2 = paddle.concat([x_2, x_3], axis=positive_1_int32) out_2 = paddle.concat([x_2, x_3], axis=positive_1_int32)
out_3 = paddle.concat([x_2, x_3], axis=positive_1_int64) out_3 = paddle.concat([x_2, x_3], axis=positive_1_int64)
...@@ -374,9 +374,9 @@ class TestConcatAPI(unittest.TestCase): ...@@ -374,9 +374,9 @@ class TestConcatAPI(unittest.TestCase):
input_3 = np.random.random([2, 2, 4, 5]).astype("int32") input_3 = np.random.random([2, 2, 4, 5]).astype("int32")
x_2 = fluid.data(shape=[2, 1, 4, 5], dtype='int32', name='x_2') x_2 = fluid.data(shape=[2, 1, 4, 5], dtype='int32', name='x_2')
x_3 = fluid.data(shape=[2, 2, 4, 5], dtype='int32', name='x_3') x_3 = fluid.data(shape=[2, 2, 4, 5], dtype='int32', name='x_3')
positive_1_int32 = paddle.fluid.layers.fill_constant([1], "int32", 1) positive_1_int32 = paddle.tensor.fill_constant([1], "int32", 1)
positive_1_int64 = paddle.fluid.layers.fill_constant([1], "int64", 1) positive_1_int64 = paddle.tensor.fill_constant([1], "int64", 1)
negative_int64 = paddle.fluid.layers.fill_constant([1], "int64", -3) negative_int64 = paddle.tensor.fill_constant([1], "int64", -3)
out_1 = paddle.concat(x=[x_2, x_3], axis=1) out_1 = paddle.concat(x=[x_2, x_3], axis=1)
out_2 = paddle.concat(x=[x_2, x_3], axis=positive_1_int32) out_2 = paddle.concat(x=[x_2, x_3], axis=positive_1_int32)
out_3 = paddle.concat(x=[x_2, x_3], axis=positive_1_int64) out_3 = paddle.concat(x=[x_2, x_3], axis=positive_1_int64)
...@@ -464,7 +464,7 @@ class TestConcatAPIWithLoDTensorArray(unittest.TestCase): ...@@ -464,7 +464,7 @@ class TestConcatAPIWithLoDTensorArray(unittest.TestCase):
with fluid.program_guard(self.program): with fluid.program_guard(self.program):
input = paddle.assign(self.x) input = paddle.assign(self.x)
tensor_array = paddle.tensor.create_array(dtype='float32') tensor_array = paddle.tensor.create_array(dtype='float32')
zero = fluid.layers.fill_constant( zero = paddle.tensor.fill_constant(
shape=[1], value=0, dtype="int64" shape=[1], value=0, dtype="int64"
) )
......
...@@ -21,7 +21,6 @@ import paddle ...@@ -21,7 +21,6 @@ import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
import paddle.fluid.framework as framework import paddle.fluid.framework as framework
import paddle.fluid.layers as layers
from paddle.fluid.backward import append_backward from paddle.fluid.backward import append_backward
from paddle.fluid.framework import Program, program_guard from paddle.fluid.framework import Program, program_guard
...@@ -42,16 +41,24 @@ class TestCondInputOutput(unittest.TestCase): ...@@ -42,16 +41,24 @@ class TestCondInputOutput(unittest.TestCase):
paddle.enable_static() paddle.enable_static()
def true_func(): def true_func():
return layers.fill_constant(shape=[2, 3], dtype='int32', value=2) return paddle.tensor.fill_constant(
shape=[2, 3], dtype='int32', value=2
)
def false_func(): def false_func():
return layers.fill_constant(shape=[3, 2], dtype='int32', value=-1) return paddle.tensor.fill_constant(
shape=[3, 2], dtype='int32', value=-1
)
main_program = Program() main_program = Program()
startup_program = Program() startup_program = Program()
with program_guard(main_program, startup_program): with program_guard(main_program, startup_program):
x = layers.fill_constant(shape=[1], dtype='float32', value=0.1) x = paddle.tensor.fill_constant(
y = layers.fill_constant(shape=[1], dtype='float32', value=0.23) shape=[1], dtype='float32', value=0.1
)
y = paddle.tensor.fill_constant(
shape=[1], dtype='float32', value=0.23
)
pred = paddle.less_than(y, x) pred = paddle.less_than(y, x)
out = paddle.static.nn.cond(pred, true_func, false_func) out = paddle.static.nn.cond(pred, true_func, false_func)
# out is one tensor # out is one tensor
...@@ -217,19 +224,23 @@ class TestCondInputOutput(unittest.TestCase): ...@@ -217,19 +224,23 @@ class TestCondInputOutput(unittest.TestCase):
paddle.enable_static() paddle.enable_static()
def true_func(): def true_func():
return layers.fill_constant( return paddle.tensor.fill_constant(
shape=[1, 2], dtype='int32', value=1 shape=[1, 2], dtype='int32', value=1
), layers.fill_constant(shape=[2, 3], dtype='bool', value=True) ), paddle.tensor.fill_constant(
shape=[2, 3], dtype='bool', value=True
)
def false_func(): def false_func():
return layers.fill_constant( return paddle.tensor.fill_constant(
shape=[3, 4], dtype='float32', value=3 shape=[3, 4], dtype='float32', value=3
), layers.fill_constant(shape=[4, 5], dtype='int64', value=2) ), paddle.tensor.fill_constant(shape=[4, 5], dtype='int64', value=2)
main_program = Program() main_program = Program()
startup_program = Program() startup_program = Program()
with program_guard(main_program, startup_program): with program_guard(main_program, startup_program):
pred = layers.fill_constant(shape=[1], dtype='bool', value=True) pred = paddle.tensor.fill_constant(
shape=[1], dtype='bool', value=True
)
out = paddle.static.nn.cond(pred, true_func, false_func) out = paddle.static.nn.cond(pred, true_func, false_func)
# out is a tuple containing 2 tensors # out is a tuple containing 2 tensors
...@@ -271,7 +282,9 @@ class TestCondInputOutput(unittest.TestCase): ...@@ -271,7 +282,9 @@ class TestCondInputOutput(unittest.TestCase):
main_program = Program() main_program = Program()
startup_program = Program() startup_program = Program()
with program_guard(main_program, startup_program): with program_guard(main_program, startup_program):
a = layers.fill_constant(shape=[3, 2, 1], dtype='int32', value=7) a = paddle.tensor.fill_constant(
shape=[3, 2, 1], dtype='int32', value=7
)
i = fluid.data(name="i", shape=[1], dtype='int32') i = fluid.data(name="i", shape=[1], dtype='int32')
pred = (i % 2) == 0 pred = (i % 2) == 0
a = paddle.static.nn.cond( a = paddle.static.nn.cond(
...@@ -346,12 +359,14 @@ class TestCondInputOutput(unittest.TestCase): ...@@ -346,12 +359,14 @@ class TestCondInputOutput(unittest.TestCase):
return None return None
def func_return_one_tensor(): def func_return_one_tensor():
return layers.fill_constant(shape=[2, 7], dtype='int32', value=3) return paddle.tensor.fill_constant(
shape=[2, 7], dtype='int32', value=3
)
def func_return_two_tensors(): def func_return_two_tensors():
return layers.fill_constant( return paddle.tensor.fill_constant(
shape=[3, 1], dtype='int32', value=7 shape=[3, 1], dtype='int32', value=7
), layers.fill_constant(shape=[3, 1], dtype='int32', value=8) ), paddle.tensor.fill_constant(shape=[3, 1], dtype='int32', value=8)
main_program = Program() main_program = Program()
startup_program = Program() startup_program = Program()
...@@ -398,11 +413,11 @@ class TestCondInputOutput(unittest.TestCase): ...@@ -398,11 +413,11 @@ class TestCondInputOutput(unittest.TestCase):
main_program = fluid.Program() main_program = fluid.Program()
startup_program = fluid.Program() startup_program = fluid.Program()
with fluid.program_guard(main_program, startup_program): with fluid.program_guard(main_program, startup_program):
a = fluid.layers.fill_constant( a = paddle.tensor.fill_constant(
shape=[1], dtype='float32', value=1.23 shape=[1], dtype='float32', value=1.23
) )
a.stop_gradient = False a.stop_gradient = False
b = fluid.layers.fill_constant( b = paddle.tensor.fill_constant(
shape=[1], dtype='float32', value=1.25 shape=[1], dtype='float32', value=1.25
) )
b.stop_gradient = False b.stop_gradient = False
...@@ -567,11 +582,11 @@ class TestCondNestedControlFlow(unittest.TestCase): ...@@ -567,11 +582,11 @@ class TestCondNestedControlFlow(unittest.TestCase):
startup_program = fluid.Program() startup_program = fluid.Program()
with fluid.program_guard(main_program, startup_program): with fluid.program_guard(main_program, startup_program):
a = fluid.layers.fill_constant( a = paddle.tensor.fill_constant(
shape=[1], dtype='float32', value=1.23 shape=[1], dtype='float32', value=1.23
) )
a.stop_gradient = False a.stop_gradient = False
b = fluid.layers.fill_constant( b = paddle.tensor.fill_constant(
shape=[1], dtype='float32', value=1.24 shape=[1], dtype='float32', value=1.24
) )
b.stop_gradient = False b.stop_gradient = False
......
...@@ -19,7 +19,6 @@ import numpy as np ...@@ -19,7 +19,6 @@ import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
import paddle.fluid.layers as layers
from paddle.fluid.backward import append_backward from paddle.fluid.backward import append_backward
from paddle.fluid.executor import Executor from paddle.fluid.executor import Executor
from paddle.fluid.layers.control_flow import ConditionalBlock from paddle.fluid.layers.control_flow import ConditionalBlock
...@@ -67,7 +66,7 @@ class TestConditionalBlockOpInferShape(unittest.TestCase): ...@@ -67,7 +66,7 @@ class TestConditionalBlockOpInferShape(unittest.TestCase):
step_scope = global_block.create_var( step_scope = global_block.create_var(
type=core.VarDesc.VarType.STEP_SCOPES type=core.VarDesc.VarType.STEP_SCOPES
) )
cond_var = layers.fill_constant( cond_var = paddle.tensor.fill_constant(
shape=[1], dtype='bool', value=False shape=[1], dtype='bool', value=False
) )
......
...@@ -41,7 +41,7 @@ class TestPSMinimize(unittest.TestCase): ...@@ -41,7 +41,7 @@ class TestPSMinimize(unittest.TestCase):
cond_3 = paddle.sum(cond) cond_3 = paddle.sum(cond)
acc = paddle.divide( acc = paddle.divide(
cond_3, cond_3,
fluid.layers.fill_constant( paddle.tensor.fill_constant(
shape=[1], value=batch_size * 1.0, dtype='float64' shape=[1], value=batch_size * 1.0, dtype='float64'
), ),
name="simnet_acc", name="simnet_acc",
......
...@@ -41,7 +41,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -41,7 +41,7 @@ class TestPSPassWithBow(unittest.TestCase):
cond_3 = paddle.sum(cond) cond_3 = paddle.sum(cond)
acc = paddle.divide( acc = paddle.divide(
cond_3, cond_3,
fluid.layers.fill_constant( paddle.tensor.fill_constant(
shape=[1], value=batch_size * 1.0, dtype='float64' shape=[1], value=batch_size * 1.0, dtype='float64'
), ),
name="simnet_acc", name="simnet_acc",
......
...@@ -41,7 +41,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -41,7 +41,7 @@ class TestPSPassWithBow(unittest.TestCase):
cond_3 = paddle.sum(cond) cond_3 = paddle.sum(cond)
acc = paddle.divide( acc = paddle.divide(
cond_3, cond_3,
fluid.layers.fill_constant( paddle.tensor.fill_constant(
shape=[1], value=batch_size * 1.0, dtype='float64' shape=[1], value=batch_size * 1.0, dtype='float64'
), ),
name="simnet_acc", name="simnet_acc",
......
...@@ -44,7 +44,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -44,7 +44,7 @@ class TestPSPassWithBow(unittest.TestCase):
cond_3 = paddle.sum(cond) cond_3 = paddle.sum(cond)
acc = paddle.divide( acc = paddle.divide(
cond_3, cond_3,
fluid.layers.fill_constant( paddle.tensor.fill_constant(
shape=[1], value=batch_size * 1.0, dtype='float64' shape=[1], value=batch_size * 1.0, dtype='float64'
), ),
name="simnet_acc", name="simnet_acc",
......
...@@ -45,7 +45,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -45,7 +45,7 @@ class TestPSPassWithBow(unittest.TestCase):
cond_3 = paddle.sum(cond) cond_3 = paddle.sum(cond)
acc = paddle.divide( acc = paddle.divide(
cond_3, cond_3,
fluid.layers.fill_constant( paddle.tensor.fill_constant(
shape=[1], value=batch_size * 1.0, dtype='float64' shape=[1], value=batch_size * 1.0, dtype='float64'
), ),
name="simnet_acc", name="simnet_acc",
......
...@@ -44,7 +44,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -44,7 +44,7 @@ class TestPSPassWithBow(unittest.TestCase):
cond_3 = paddle.sum(cond) cond_3 = paddle.sum(cond)
acc = paddle.divide( acc = paddle.divide(
cond_3, cond_3,
fluid.layers.fill_constant( paddle.tensor.fill_constant(
shape=[1], value=batch_size * 1.0, dtype='float64' shape=[1], value=batch_size * 1.0, dtype='float64'
), ),
name="simnet_acc", name="simnet_acc",
......
...@@ -41,7 +41,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -41,7 +41,7 @@ class TestPSPassWithBow(unittest.TestCase):
cond_3 = paddle.sum(cond) cond_3 = paddle.sum(cond)
acc = paddle.divide( acc = paddle.divide(
cond_3, cond_3,
fluid.layers.fill_constant( paddle.tensor.fill_constant(
shape=[1], value=batch_size * 1.0, dtype='float64' shape=[1], value=batch_size * 1.0, dtype='float64'
), ),
name="simnet_acc", name="simnet_acc",
......
...@@ -41,7 +41,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -41,7 +41,7 @@ class TestPSPassWithBow(unittest.TestCase):
cond_3 = paddle.sum(cond) cond_3 = paddle.sum(cond)
acc = paddle.divide( acc = paddle.divide(
cond_3, cond_3,
fluid.layers.fill_constant( paddle.tensor.fill_constant(
shape=[1], value=batch_size * 1.0, dtype='float64' shape=[1], value=batch_size * 1.0, dtype='float64'
), ),
name="simnet_acc", name="simnet_acc",
......
...@@ -41,7 +41,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -41,7 +41,7 @@ class TestPSPassWithBow(unittest.TestCase):
cond_3 = paddle.sum(cond) cond_3 = paddle.sum(cond)
acc = paddle.divide( acc = paddle.divide(
cond_3, cond_3,
fluid.layers.fill_constant( paddle.tensor.fill_constant(
shape=[1], value=batch_size * 1.0, dtype='float64' shape=[1], value=batch_size * 1.0, dtype='float64'
), ),
name="simnet_acc", name="simnet_acc",
......
...@@ -41,7 +41,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -41,7 +41,7 @@ class TestPSPassWithBow(unittest.TestCase):
cond_3 = paddle.sum(cond) cond_3 = paddle.sum(cond)
acc = paddle.divide( acc = paddle.divide(
cond_3, cond_3,
fluid.layers.fill_constant( paddle.tensor.fill_constant(
shape=[1], value=batch_size * 1.0, dtype='float64' shape=[1], value=batch_size * 1.0, dtype='float64'
), ),
name="simnet_acc", name="simnet_acc",
......
...@@ -39,7 +39,7 @@ class TestSPMT(unittest.TestCase): ...@@ -39,7 +39,7 @@ class TestSPMT(unittest.TestCase):
cond_3 = paddle.sum(cond) cond_3 = paddle.sum(cond)
acc = paddle.divide( acc = paddle.divide(
cond_3, cond_3,
fluid.layers.fill_constant( paddle.tensor.fill_constant(
shape=[1], value=batch_size * 1.0, dtype='float64' shape=[1], value=batch_size * 1.0, dtype='float64'
), ),
name="simnet_acc", name="simnet_acc",
......
...@@ -18,7 +18,6 @@ import numpy as np ...@@ -18,7 +18,6 @@ import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.layers as layers
from paddle.tensor.manipulation import tensor_array_to_tensor from paddle.tensor.manipulation import tensor_array_to_tensor
paddle.enable_static() paddle.enable_static()
...@@ -33,10 +32,10 @@ def build_and_run_program(place, batch_size, beam_size, stop_gradient=False): ...@@ -33,10 +32,10 @@ def build_and_run_program(place, batch_size, beam_size, stop_gradient=False):
np.random.rand(batch_size, beam_size, 32).astype("float32") np.random.rand(batch_size, beam_size, 32).astype("float32")
) )
indices = fluid.data(shape=[None, beam_size], dtype="int64", name="indices") indices = fluid.data(shape=[None, beam_size], dtype="int64", name="indices")
step_idx = layers.fill_constant( step_idx = paddle.tensor.fill_constant(
shape=[1], dtype="int64", value=0, force_cpu=True shape=[1], dtype="int64", value=0, force_cpu=True
) )
max_len = layers.fill_constant( max_len = paddle.tensor.fill_constant(
shape=[1], dtype="int64", value=10, force_cpu=True shape=[1], dtype="int64", value=10, force_cpu=True
) )
cond = paddle.less_than(x=step_idx, y=max_len) cond = paddle.less_than(x=step_idx, y=max_len)
......
...@@ -73,14 +73,18 @@ class TestEagerDeletionWhileOpBase(unittest.TestCase): ...@@ -73,14 +73,18 @@ class TestEagerDeletionWhileOpBase(unittest.TestCase):
i = layers.zeros(shape=[1], dtype='int64') i = layers.zeros(shape=[1], dtype='int64')
i.stop_gradient = True i.stop_gradient = True
array_len = layers.fill_constant(shape=[1], dtype='int64', value=1) array_len = paddle.tensor.fill_constant(
shape=[1], dtype='int64', value=1
)
array_len.stop_gradient = True array_len.stop_gradient = True
cond = paddle.less_than(x=i, y=array_len) cond = paddle.less_than(x=i, y=array_len)
j = layers.fill_constant(shape=[1], dtype='int64', value=1) j = paddle.tensor.fill_constant(shape=[1], dtype='int64', value=1)
j.stop_gradient = True j.stop_gradient = True
array_len2 = layers.fill_constant(shape=[1], dtype='int64', value=3) array_len2 = paddle.tensor.fill_constant(
shape=[1], dtype='int64', value=3
)
array_len2.stop_gradient = True array_len2.stop_gradient = True
cond2 = paddle.less_than(x=j, y=array_len2) cond2 = paddle.less_than(x=j, y=array_len2)
......
...@@ -239,8 +239,8 @@ class TestEmptyAPI(unittest.TestCase): ...@@ -239,8 +239,8 @@ class TestEmptyAPI(unittest.TestCase):
def test_static_graph(self): def test_static_graph(self):
dtype = 'float64' dtype = 'float64'
positive_2_int32 = fluid.layers.fill_constant([1], "int32", 3) positive_2_int32 = paddle.tensor.fill_constant([1], "int32", 3)
positive_2_int64 = fluid.layers.fill_constant([1], "int64", 3) positive_2_int64 = paddle.tensor.fill_constant([1], "int64", 3)
shape_tensor_int32 = fluid.data( shape_tensor_int32 = fluid.data(
name="shape_tensor_int32", shape=[2], dtype="int32" name="shape_tensor_int32", shape=[2], dtype="int32"
......
...@@ -217,7 +217,7 @@ class TestExpandV2API(unittest.TestCase): ...@@ -217,7 +217,7 @@ class TestExpandV2API(unittest.TestCase):
input = np.random.random([12, 14]).astype("float32") input = np.random.random([12, 14]).astype("float32")
x = paddle.static.data(name='x', shape=[12, 14], dtype="float32") x = paddle.static.data(name='x', shape=[12, 14], dtype="float32")
positive_2 = fluid.layers.fill_constant([1], "int32", 12) positive_2 = paddle.tensor.fill_constant([1], "int32", 12)
expand_shape = paddle.static.data( expand_shape = paddle.static.data(
name="expand_shape", name="expand_shape",
shape=[2], shape=[2],
......
...@@ -282,8 +282,8 @@ class TestFillConstantOp2_ValueTensor(OpTest): ...@@ -282,8 +282,8 @@ class TestFillConstantOp2_ValueTensor(OpTest):
class TestFillConstantAPI(unittest.TestCase): class TestFillConstantAPI(unittest.TestCase):
def test_api(self): def test_api(self):
positive_2_int32 = fluid.layers.fill_constant([1], "int32", 2) positive_2_int32 = paddle.tensor.fill_constant([1], "int32", 2)
positive_2_int64 = fluid.layers.fill_constant([1], "int64", 2) positive_2_int64 = paddle.tensor.fill_constant([1], "int64", 2)
shape_tensor_int32 = fluid.data( shape_tensor_int32 = fluid.data(
name="shape_tensor_int32", shape=[2], dtype="int32" name="shape_tensor_int32", shape=[2], dtype="int32"
...@@ -292,41 +292,41 @@ class TestFillConstantAPI(unittest.TestCase): ...@@ -292,41 +292,41 @@ class TestFillConstantAPI(unittest.TestCase):
name="shape_tensor_int64", shape=[2], dtype="int64" name="shape_tensor_int64", shape=[2], dtype="int64"
) )
out_1 = fluid.layers.fill_constant( out_1 = paddle.tensor.fill_constant(
shape=[1, 2], dtype="float32", value=1.1 shape=[1, 2], dtype="float32", value=1.1
) )
out_2 = fluid.layers.fill_constant( out_2 = paddle.tensor.fill_constant(
shape=[1, positive_2_int32], dtype="float32", value=1.1 shape=[1, positive_2_int32], dtype="float32", value=1.1
) )
out_3 = fluid.layers.fill_constant( out_3 = paddle.tensor.fill_constant(
shape=[1, positive_2_int64], dtype="float32", value=1.1 shape=[1, positive_2_int64], dtype="float32", value=1.1
) )
out_4 = fluid.layers.fill_constant( out_4 = paddle.tensor.fill_constant(
shape=shape_tensor_int32, dtype="float32", value=1.1 shape=shape_tensor_int32, dtype="float32", value=1.1
) )
out_5 = fluid.layers.fill_constant( out_5 = paddle.tensor.fill_constant(
shape=shape_tensor_int64, dtype="float32", value=1.1 shape=shape_tensor_int64, dtype="float32", value=1.1
) )
out_6 = fluid.layers.fill_constant( out_6 = paddle.tensor.fill_constant(
shape=shape_tensor_int64, dtype=np.float32, value=1.1 shape=shape_tensor_int64, dtype=np.float32, value=1.1
) )
val1 = fluid.layers.fill_constant( val1 = paddle.tensor.fill_constant(
shape=[1], dtype=np.float32, value=1.1 shape=[1], dtype=np.float32, value=1.1
) )
val2 = fluid.layers.fill_constant( val2 = paddle.tensor.fill_constant(
shape=[1], dtype=np.float64, value=1.1 shape=[1], dtype=np.float64, value=1.1
) )
out_7 = fluid.layers.fill_constant( out_7 = paddle.tensor.fill_constant(
shape=shape_tensor_int64, dtype=np.float32, value=val1 shape=shape_tensor_int64, dtype=np.float32, value=val1
) )
out_8 = fluid.layers.fill_constant( out_8 = paddle.tensor.fill_constant(
shape=shape_tensor_int64, dtype=np.float32, value=val2 shape=shape_tensor_int64, dtype=np.float32, value=val2
) )
...@@ -359,16 +359,16 @@ class TestFillConstantImperative(unittest.TestCase): ...@@ -359,16 +359,16 @@ class TestFillConstantImperative(unittest.TestCase):
shape = fluid.dygraph.to_variable(data1) shape = fluid.dygraph.to_variable(data1)
val = fluid.dygraph.to_variable(data2) val = fluid.dygraph.to_variable(data2)
value = fluid.dygraph.to_variable(data3) value = fluid.dygraph.to_variable(data3)
res1 = fluid.layers.fill_constant( res1 = paddle.tensor.fill_constant(
shape=[1, 2], dtype='float32', value=1.1 shape=[1, 2], dtype='float32', value=1.1
) )
res2 = fluid.layers.fill_constant( res2 = paddle.tensor.fill_constant(
shape=shape, dtype='float32', value=1.1 shape=shape, dtype='float32', value=1.1
) )
res3 = fluid.layers.fill_constant( res3 = paddle.tensor.fill_constant(
shape=shape, dtype='float32', value=val shape=shape, dtype='float32', value=val
) )
res4 = fluid.layers.fill_constant( res4 = paddle.tensor.fill_constant(
shape=shape, dtype='int32', value=value shape=shape, dtype='int32', value=value
) )
assert np.array_equal( assert np.array_equal(
...@@ -386,17 +386,17 @@ class TestFillConstantImperative(unittest.TestCase): ...@@ -386,17 +386,17 @@ class TestFillConstantImperative(unittest.TestCase):
def test_nan(self): def test_nan(self):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
res = fluid.layers.fill_constant([1], 'float32', np.nan) res = paddle.tensor.fill_constant([1], 'float32', np.nan)
self.assertTrue(np.isnan(res.numpy().item(0))) self.assertTrue(np.isnan(res.numpy().item(0)))
def test_inf(self): def test_inf(self):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
res = fluid.layers.fill_constant([1], 'float32', np.inf) res = paddle.tensor.fill_constant([1], 'float32', np.inf)
self.assertTrue(np.isinf(res.numpy().item(0))) self.assertTrue(np.isinf(res.numpy().item(0)))
def test_ninf(self): def test_ninf(self):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
res = fluid.layers.fill_constant([1], 'float32', np.NINF) res = paddle.tensor.fill_constant([1], 'float32', np.NINF)
self.assertTrue(np.isinf(res.numpy().item(0))) self.assertTrue(np.isinf(res.numpy().item(0)))
self.assertEqual(np.NINF, res.numpy().item(0)) self.assertEqual(np.NINF, res.numpy().item(0))
...@@ -408,7 +408,7 @@ class TestFillConstantOpError(unittest.TestCase): ...@@ -408,7 +408,7 @@ class TestFillConstantOpError(unittest.TestCase):
x1 = paddle.static.data(name='x1', shape=[-1, 1], dtype="int16") x1 = paddle.static.data(name='x1', shape=[-1, 1], dtype="int16")
self.assertRaises( self.assertRaises(
TypeError, TypeError,
fluid.layers.fill_constant, paddle.tensor.fill_constant,
shape=[1], shape=[1],
value=5, value=5,
dtype='uint4', dtype='uint4',
...@@ -416,7 +416,7 @@ class TestFillConstantOpError(unittest.TestCase): ...@@ -416,7 +416,7 @@ class TestFillConstantOpError(unittest.TestCase):
self.assertRaises( self.assertRaises(
TypeError, TypeError,
fluid.layers.fill_constant, paddle.tensor.fill_constant,
shape=[1.1], shape=[1.1],
value=5, value=5,
dtype='float32', dtype='float32',
...@@ -429,7 +429,7 @@ class TestFillConstantOpError(unittest.TestCase): ...@@ -429,7 +429,7 @@ class TestFillConstantOpError(unittest.TestCase):
self.assertRaises( self.assertRaises(
TypeError, TypeError,
fluid.layers.fill_constant, paddle.tensor.fill_constant,
shape=[1], shape=[1],
value=5, value=5,
dtype='float64', dtype='float64',
...@@ -439,7 +439,7 @@ class TestFillConstantOpError(unittest.TestCase): ...@@ -439,7 +439,7 @@ class TestFillConstantOpError(unittest.TestCase):
x3 = np.random.randn(100, 100).astype('int32') x3 = np.random.randn(100, 100).astype('int32')
self.assertRaises( self.assertRaises(
TypeError, TypeError,
fluid.layers.fill_constant, paddle.tensor.fill_constant,
shape=[100, 100], shape=[100, 100],
value=5, value=5,
dtype='float64', dtype='float64',
...@@ -448,7 +448,7 @@ class TestFillConstantOpError(unittest.TestCase): ...@@ -448,7 +448,7 @@ class TestFillConstantOpError(unittest.TestCase):
# The argument shape's type of fill_constant_op must be list, tuple or Variable. # The argument shape's type of fill_constant_op must be list, tuple or Variable.
def test_shape_type(): def test_shape_type():
fluid.layers.fill_constant(shape=1, dtype="float32", value=1) paddle.tensor.fill_constant(shape=1, dtype="float32", value=1)
self.assertRaises(TypeError, test_shape_type) self.assertRaises(TypeError, test_shape_type)
...@@ -457,7 +457,7 @@ class TestFillConstantOpError(unittest.TestCase): ...@@ -457,7 +457,7 @@ class TestFillConstantOpError(unittest.TestCase):
shape = fluid.data( shape = fluid.data(
name="shape_tensor", shape=[2], dtype="float32" name="shape_tensor", shape=[2], dtype="float32"
) )
fluid.layers.fill_constant( paddle.tensor.fill_constant(
shape=shape, dtype="float32", value=1 shape=shape, dtype="float32", value=1
) )
...@@ -467,7 +467,7 @@ class TestFillConstantOpError(unittest.TestCase): ...@@ -467,7 +467,7 @@ class TestFillConstantOpError(unittest.TestCase):
shape = fluid.data( shape = fluid.data(
name="shape_tensor_list", shape=[1], dtype="bool" name="shape_tensor_list", shape=[1], dtype="bool"
) )
fluid.layers.fill_constant( paddle.tensor.fill_constant(
shape=[shape, 2], dtype="float32", value=1 shape=[shape, 2], dtype="float32", value=1
) )
......
...@@ -24,9 +24,9 @@ from paddle.fluid import Program, program_guard ...@@ -24,9 +24,9 @@ from paddle.fluid import Program, program_guard
# Test python API # Test python API
class TestFullAPI(unittest.TestCase): class TestFullAPI(unittest.TestCase):
def test_api(self): def test_api(self):
positive_2_int32 = fluid.layers.fill_constant([1], "int32", 2) positive_2_int32 = paddle.tensor.fill_constant([1], "int32", 2)
positive_2_int64 = fluid.layers.fill_constant([1], "int64", 2) positive_2_int64 = paddle.tensor.fill_constant([1], "int64", 2)
shape_tensor_int32 = fluid.data( shape_tensor_int32 = fluid.data(
name="shape_tensor_int32", shape=[2], dtype="int32" name="shape_tensor_int32", shape=[2], dtype="int32"
) )
...@@ -57,7 +57,9 @@ class TestFullAPI(unittest.TestCase): ...@@ -57,7 +57,9 @@ class TestFullAPI(unittest.TestCase):
shape=shape_tensor_int64, dtype=np.float32, fill_value=1.1 shape=shape_tensor_int64, dtype=np.float32, fill_value=1.1
) )
val = fluid.layers.fill_constant(shape=[1], dtype=np.float32, value=1.1) val = paddle.tensor.fill_constant(
shape=[1], dtype=np.float32, value=1.1
)
out_7 = paddle.full( out_7 = paddle.full(
shape=shape_tensor_int64, dtype=np.float32, fill_value=val shape=shape_tensor_int64, dtype=np.float32, fill_value=val
) )
...@@ -82,9 +84,11 @@ class TestFullAPI(unittest.TestCase): ...@@ -82,9 +84,11 @@ class TestFullAPI(unittest.TestCase):
def test_api_eager(self): def test_api_eager(self):
with fluid.dygraph.base.guard(): with fluid.dygraph.base.guard():
positive_2_int32 = fluid.layers.fill_constant([1], "int32", 2) positive_2_int32 = paddle.tensor.fill_constant([1], "int32", 2)
positive_2_int64 = fluid.layers.fill_constant([1], "int64", 2) positive_2_int64 = paddle.tensor.fill_constant([1], "int64", 2)
positive_4_int64 = fluid.layers.fill_constant([1], "int64", 4, True) positive_4_int64 = paddle.tensor.fill_constant(
[1], "int64", 4, True
)
out_1 = paddle.full(shape=[1, 2], dtype="float32", fill_value=1.1) out_1 = paddle.full(shape=[1, 2], dtype="float32", fill_value=1.1)
...@@ -106,7 +110,7 @@ class TestFullAPI(unittest.TestCase): ...@@ -106,7 +110,7 @@ class TestFullAPI(unittest.TestCase):
out_6 = paddle.full(shape=[1, 2], dtype=np.float32, fill_value=1.1) out_6 = paddle.full(shape=[1, 2], dtype=np.float32, fill_value=1.1)
val = fluid.layers.fill_constant( val = paddle.tensor.fill_constant(
shape=[1], dtype=np.float32, value=1.1 shape=[1], dtype=np.float32, value=1.1
) )
out_7 = paddle.full(shape=[1, 2], dtype=np.float32, fill_value=val) out_7 = paddle.full(shape=[1, 2], dtype=np.float32, fill_value=val)
......
...@@ -21,7 +21,6 @@ from eager_op_test import OpTest ...@@ -21,7 +21,6 @@ from eager_op_test import OpTest
import paddle import paddle
import paddle.nn.functional as F import paddle.nn.functional as F
from paddle import tensor from paddle import tensor
from paddle.fluid import layers
from paddle.fluid.framework import default_main_program from paddle.fluid.framework import default_main_program
from paddle.incubate.nn import FusedMultiTransformer from paddle.incubate.nn import FusedMultiTransformer
from paddle.incubate.nn.functional import fused_multi_transformer from paddle.incubate.nn.functional import fused_multi_transformer
...@@ -862,7 +861,7 @@ class TestFusedMultiTransformerOp(OpTest): ...@@ -862,7 +861,7 @@ class TestFusedMultiTransformerOp(OpTest):
assert self.query_length == self.cache_length assert self.query_length == self.cache_length
cache_kv[:] = 0 cache_kv[:] = 0
else: else:
time_step = layers.fill_constant( time_step = paddle.tensor.fill_constant(
shape=[1], dtype="int32", value=0, force_cpu=True shape=[1], dtype="int32", value=0, force_cpu=True
) )
time_step_feed = self.cache_length time_step_feed = self.cache_length
...@@ -947,7 +946,7 @@ class TestFusedMultiTransformerOp(OpTest): ...@@ -947,7 +946,7 @@ class TestFusedMultiTransformerOp(OpTest):
for i in range(self.layers): for i in range(self.layers):
if self.has_cache_kv: if self.has_cache_kv:
cache_kvs.append( cache_kvs.append(
layers.fill_constant( paddle.tensor.fill_constant(
shape=cache_kv.shape, dtype=cache_kv.dtype, value=0 shape=cache_kv.shape, dtype=cache_kv.dtype, value=0
) )
) )
...@@ -955,13 +954,13 @@ class TestFusedMultiTransformerOp(OpTest): ...@@ -955,13 +954,13 @@ class TestFusedMultiTransformerOp(OpTest):
if self.has_pre_cache: if self.has_pre_cache:
cache_kvs.append( cache_kvs.append(
layers.fill_constant( paddle.tensor.fill_constant(
shape=cache_kv.shape, dtype=cache_kv.dtype, value=0 shape=cache_kv.shape, dtype=cache_kv.dtype, value=0
) )
) )
cache_kvs_feed.append(cache_kv) cache_kvs_feed.append(cache_kv)
pre_caches.append( pre_caches.append(
layers.fill_constant( paddle.tensor.fill_constant(
shape=self.pre_cache_kv.shape, shape=self.pre_cache_kv.shape,
dtype=self.pre_cache_kv.dtype, dtype=self.pre_cache_kv.dtype,
value=0, value=0,
......
...@@ -209,9 +209,9 @@ class TestGaussianRandomOp1_ShapeTensor(TestGaussianRandomOp): ...@@ -209,9 +209,9 @@ class TestGaussianRandomOp1_ShapeTensor(TestGaussianRandomOp):
# Test python API # Test python API
class TestGaussianRandomAPI(unittest.TestCase): class TestGaussianRandomAPI(unittest.TestCase):
def test_api(self): def test_api(self):
positive_2_int32 = fluid.layers.fill_constant([1], "int32", 2000) positive_2_int32 = paddle.tensor.fill_constant([1], "int32", 2000)
positive_2_int64 = fluid.layers.fill_constant([1], "int64", 500) positive_2_int64 = paddle.tensor.fill_constant([1], "int64", 500)
shape_tensor_int32 = fluid.data( shape_tensor_int32 = fluid.data(
name="shape_tensor_int32", shape=[2], dtype="int32" name="shape_tensor_int32", shape=[2], dtype="int32"
) )
......
...@@ -82,7 +82,7 @@ class TestHistogramOpError(unittest.TestCase): ...@@ -82,7 +82,7 @@ class TestHistogramOpError(unittest.TestCase):
"""Test bins should be greater than or equal to 1.""" """Test bins should be greater than or equal to 1."""
def net_func(): def net_func():
input_value = paddle.fluid.layers.fill_constant( input_value = paddle.tensor.fill_constant(
shape=[3, 4], dtype='float32', value=3.0 shape=[3, 4], dtype='float32', value=3.0
) )
paddle.histogram(input=input_value, bins=-1, min=1, max=5) paddle.histogram(input=input_value, bins=-1, min=1, max=5)
...@@ -94,7 +94,7 @@ class TestHistogramOpError(unittest.TestCase): ...@@ -94,7 +94,7 @@ class TestHistogramOpError(unittest.TestCase):
"""Test max must be larger or equal to min.""" """Test max must be larger or equal to min."""
def net_func(): def net_func():
input_value = paddle.fluid.layers.fill_constant( input_value = paddle.tensor.fill_constant(
shape=[3, 4], dtype='float32', value=3.0 shape=[3, 4], dtype='float32', value=3.0
) )
paddle.histogram(input=input_value, bins=1, min=5, max=1) paddle.histogram(input=input_value, bins=1, min=5, max=1)
...@@ -106,7 +106,7 @@ class TestHistogramOpError(unittest.TestCase): ...@@ -106,7 +106,7 @@ class TestHistogramOpError(unittest.TestCase):
"""Test range of min, max is not finite""" """Test range of min, max is not finite"""
def net_func(): def net_func():
input_value = paddle.fluid.layers.fill_constant( input_value = paddle.tensor.fill_constant(
shape=[3, 4], dtype='float32', value=3.0 shape=[3, 4], dtype='float32', value=3.0
) )
paddle.histogram(input=input_value, bins=1, min=-np.inf, max=5) paddle.histogram(input=input_value, bins=1, min=-np.inf, max=5)
......
...@@ -77,7 +77,7 @@ class TestDygraphGAN(unittest.TestCase): ...@@ -77,7 +77,7 @@ class TestDygraphGAN(unittest.TestCase):
d_loss_real = paddle.mean( d_loss_real = paddle.mean(
paddle.nn.functional.binary_cross_entropy_with_logits( paddle.nn.functional.binary_cross_entropy_with_logits(
logit=d_real, logit=d_real,
label=fluid.layers.fill_constant( label=paddle.tensor.fill_constant(
shape=[2, 1], dtype='float32', value=1.0 shape=[2, 1], dtype='float32', value=1.0
), ),
) )
...@@ -87,7 +87,7 @@ class TestDygraphGAN(unittest.TestCase): ...@@ -87,7 +87,7 @@ class TestDygraphGAN(unittest.TestCase):
d_loss_fake = paddle.mean( d_loss_fake = paddle.mean(
paddle.nn.functional.binary_cross_entropy_with_logits( paddle.nn.functional.binary_cross_entropy_with_logits(
logit=d_fake, logit=d_fake,
label=fluid.layers.fill_constant( label=paddle.tensor.fill_constant(
shape=[2, 1], dtype='float32', value=0.0 shape=[2, 1], dtype='float32', value=0.0
), ),
) )
...@@ -108,7 +108,7 @@ class TestDygraphGAN(unittest.TestCase): ...@@ -108,7 +108,7 @@ class TestDygraphGAN(unittest.TestCase):
g_loss = paddle.mean( g_loss = paddle.mean(
paddle.nn.functional.binary_cross_entropy_with_logits( paddle.nn.functional.binary_cross_entropy_with_logits(
logit=d_fake, logit=d_fake,
label=fluid.layers.fill_constant( label=paddle.tensor.fill_constant(
shape=[2, 1], dtype='float32', value=1.0 shape=[2, 1], dtype='float32', value=1.0
), ),
) )
......
...@@ -53,20 +53,20 @@ class LeNetDygraph(fluid.dygraph.Layer): ...@@ -53,20 +53,20 @@ class LeNetDygraph(fluid.dygraph.Layer):
def init_weights(layer): def init_weights(layer):
if type(layer) == nn.Linear: if type(layer) == nn.Linear:
new_weight = paddle.fluid.layers.fill_constant( new_weight = paddle.tensor.fill_constant(
layer.weight.shape, layer.weight.dtype, value=0.9 layer.weight.shape, layer.weight.dtype, value=0.9
) )
layer.weight.set_value(new_weight) layer.weight.set_value(new_weight)
new_bias = paddle.fluid.layers.fill_constant( new_bias = paddle.tensor.fill_constant(
layer.bias.shape, layer.bias.dtype, value=-0.1 layer.bias.shape, layer.bias.dtype, value=-0.1
) )
layer.bias.set_value(new_bias) layer.bias.set_value(new_bias)
elif type(layer) == nn.Conv2D: elif type(layer) == nn.Conv2D:
new_weight = paddle.fluid.layers.fill_constant( new_weight = paddle.tensor.fill_constant(
layer.weight.shape, layer.weight.dtype, value=0.7 layer.weight.shape, layer.weight.dtype, value=0.7
) )
layer.weight.set_value(new_weight) layer.weight.set_value(new_weight)
new_bias = paddle.fluid.layers.fill_constant( new_bias = paddle.tensor.fill_constant(
layer.bias.shape, layer.bias.dtype, value=-0.2 layer.bias.shape, layer.bias.dtype, value=-0.2
) )
layer.bias.set_value(new_bias) layer.bias.set_value(new_bias)
......
...@@ -212,7 +212,7 @@ class EncoderNet(fluid.dygraph.Layer): ...@@ -212,7 +212,7 @@ class EncoderNet(fluid.dygraph.Layer):
) )
h_0 = to_variable(h_0) h_0 = to_variable(h_0)
else: else:
h_0 = fluid.layers.fill_constant( h_0 = paddle.tensor.fill_constant(
shape=[Config.batch_size, rnn_hidden_size], shape=[Config.batch_size, rnn_hidden_size],
dtype='float32', dtype='float32',
value=0, value=0,
......
...@@ -23,7 +23,7 @@ import paddle.fluid as fluid ...@@ -23,7 +23,7 @@ import paddle.fluid as fluid
class TestIncrement(unittest.TestCase): class TestIncrement(unittest.TestCase):
def test_api(self): def test_api(self):
with fluid.program_guard(fluid.Program(), fluid.Program()): with fluid.program_guard(fluid.Program(), fluid.Program()):
input = fluid.layers.fill_constant( input = paddle.tensor.fill_constant(
shape=[1], dtype='int64', value=5 shape=[1], dtype='int64', value=5
) )
expected_result = np.array([8], dtype='int64') expected_result = np.array([8], dtype='int64')
...@@ -45,7 +45,7 @@ class TestInplaceApiWithDataTransform(unittest.TestCase): ...@@ -45,7 +45,7 @@ class TestInplaceApiWithDataTransform(unittest.TestCase):
if fluid.core.is_compiled_with_cuda(): if fluid.core.is_compiled_with_cuda():
paddle.enable_static() paddle.enable_static()
with paddle.fluid.device_guard("gpu:0"): with paddle.fluid.device_guard("gpu:0"):
x = paddle.fluid.layers.fill_constant([1], "float32", 0) x = paddle.tensor.fill_constant([1], "float32", 0)
with paddle.fluid.device_guard("cpu"): with paddle.fluid.device_guard("cpu"):
x = paddle.increment(x) x = paddle.increment(x)
exe = paddle.static.Executor(paddle.CUDAPlace(0)) exe = paddle.static.Executor(paddle.CUDAPlace(0))
......
...@@ -19,7 +19,6 @@ import unittest ...@@ -19,7 +19,6 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid.framework import convert_np_dtype_to_dtype_ from paddle.fluid.framework import convert_np_dtype_to_dtype_
from paddle.jit.dy2static.utils import _compatible_non_tensor_spec from paddle.jit.dy2static.utils import _compatible_non_tensor_spec
...@@ -35,7 +34,9 @@ class TestInputSpec(unittest.TestCase): ...@@ -35,7 +34,9 @@ class TestInputSpec(unittest.TestCase):
self.assertIsNone(tensor_spec.name) self.assertIsNone(tensor_spec.name)
def test_from_tensor(self): def test_from_tensor(self):
x_bool = fluid.layers.fill_constant(shape=[1], dtype='bool', value=True) x_bool = paddle.tensor.fill_constant(
shape=[1], dtype='bool', value=True
)
bool_spec = InputSpec.from_tensor(x_bool) bool_spec = InputSpec.from_tensor(x_bool)
self.assertEqual(bool_spec.dtype, x_bool.dtype) self.assertEqual(bool_spec.dtype, x_bool.dtype)
self.assertEqual(list(bool_spec.shape), list(x_bool.shape)) self.assertEqual(list(bool_spec.shape), list(x_bool.shape))
......
...@@ -18,7 +18,6 @@ import numpy as np ...@@ -18,7 +18,6 @@ import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.layers as layers
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid.dygraph.base import switch_to_static_graph from paddle.fluid.dygraph.base import switch_to_static_graph
...@@ -39,13 +38,13 @@ class LAMBOptimizer(paddle.optimizer.Lamb): ...@@ -39,13 +38,13 @@ class LAMBOptimizer(paddle.optimizer.Lamb):
self._beta2_pow_acc_str, param_and_grad[0] self._beta2_pow_acc_str, param_and_grad[0]
) )
beta_1 = layers.fill_constant( beta_1 = paddle.tensor.fill_constant(
dtype='float32', shape=[1], value=self._beta1, name='lamb_beta_1' dtype='float32', shape=[1], value=self._beta1, name='lamb_beta_1'
) )
beta_2 = layers.fill_constant( beta_2 = paddle.tensor.fill_constant(
dtype='float32', shape=[1], value=self._beta2, name='lamb_beta_2' dtype='float32', shape=[1], value=self._beta2, name='lamb_beta_2'
) )
epsilon = layers.fill_constant( epsilon = paddle.tensor.fill_constant(
dtype='float32', shape=[1], value=self._epsilon, name='epsilon' dtype='float32', shape=[1], value=self._epsilon, name='epsilon'
) )
......
...@@ -1030,8 +1030,10 @@ class TestLayer(LayerTest): ...@@ -1030,8 +1030,10 @@ class TestLayer(LayerTest):
def test_while_loop(self): def test_while_loop(self):
with self.static_graph(): with self.static_graph():
i = layers.fill_constant(shape=[1], dtype='int64', value=0) i = paddle.tensor.fill_constant(shape=[1], dtype='int64', value=0)
ten = layers.fill_constant(shape=[1], dtype='int64', value=10) ten = paddle.tensor.fill_constant(
shape=[1], dtype='int64', value=10
)
def cond(i): def cond(i):
return paddle.less_than(i, ten) return paddle.less_than(i, ten)
...@@ -1043,8 +1045,10 @@ class TestLayer(LayerTest): ...@@ -1043,8 +1045,10 @@ class TestLayer(LayerTest):
static_ret = self.get_static_graph_result(feed={}, fetch_list=out) static_ret = self.get_static_graph_result(feed={}, fetch_list=out)
with self.dynamic_graph(): with self.dynamic_graph():
i = layers.fill_constant(shape=[1], dtype='int64', value=0) i = paddle.tensor.fill_constant(shape=[1], dtype='int64', value=0)
ten = layers.fill_constant(shape=[1], dtype='int64', value=10) ten = paddle.tensor.fill_constant(
shape=[1], dtype='int64', value=10
)
def cond1(i): def cond1(i):
return paddle.less_than(i, ten) return paddle.less_than(i, ten)
...@@ -1054,7 +1058,9 @@ class TestLayer(LayerTest): ...@@ -1054,7 +1058,9 @@ class TestLayer(LayerTest):
dy_ret = paddle.static.nn.while_loop(cond1, body1, [i]) dy_ret = paddle.static.nn.while_loop(cond1, body1, [i])
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
j = layers.fill_constant(shape=[1], dtype='int64', value=0) j = paddle.tensor.fill_constant(
shape=[1], dtype='int64', value=0
)
def body2(i): def body2(i):
return i + 1, i + 2 return i + 1, i + 2
...@@ -1170,10 +1176,10 @@ class TestLayer(LayerTest): ...@@ -1170,10 +1176,10 @@ class TestLayer(LayerTest):
return paddle.subtract(a, b) return paddle.subtract(a, b)
with self.static_graph(): with self.static_graph():
a = fluid.layers.fill_constant( a = paddle.tensor.fill_constant(
shape=[1], dtype='float32', value=0.1 shape=[1], dtype='float32', value=0.1
) )
b = fluid.layers.fill_constant( b = paddle.tensor.fill_constant(
shape=[1], dtype='float32', value=0.23 shape=[1], dtype='float32', value=0.23
) )
out = paddle.static.nn.cond( out = paddle.static.nn.cond(
...@@ -1215,18 +1221,30 @@ class TestLayer(LayerTest): ...@@ -1215,18 +1221,30 @@ class TestLayer(LayerTest):
def test_case(self): def test_case(self):
def fn_1(): def fn_1():
return layers.fill_constant(shape=[1, 2], dtype='float32', value=1) return paddle.tensor.fill_constant(
shape=[1, 2], dtype='float32', value=1
)
def fn_2(): def fn_2():
return layers.fill_constant(shape=[2, 2], dtype='int32', value=2) return paddle.tensor.fill_constant(
shape=[2, 2], dtype='int32', value=2
)
def fn_3(): def fn_3():
return layers.fill_constant(shape=[3], dtype='int32', value=3) return paddle.tensor.fill_constant(
shape=[3], dtype='int32', value=3
)
with self.static_graph(): with self.static_graph():
x = layers.fill_constant(shape=[1], dtype='float32', value=0.3) x = paddle.tensor.fill_constant(
y = layers.fill_constant(shape=[1], dtype='float32', value=0.1) shape=[1], dtype='float32', value=0.3
z = layers.fill_constant(shape=[1], dtype='float32', value=0.2) )
y = paddle.tensor.fill_constant(
shape=[1], dtype='float32', value=0.1
)
z = paddle.tensor.fill_constant(
shape=[1], dtype='float32', value=0.2
)
pred_1 = paddle.less_than(z, x) # true: 0.2 < 0.3 pred_1 = paddle.less_than(z, x) # true: 0.2 < 0.3
pred_2 = paddle.less_than(x, y) # false: 0.3 < 0.1 pred_2 = paddle.less_than(x, y) # false: 0.3 < 0.1
...@@ -1248,9 +1266,15 @@ class TestLayer(LayerTest): ...@@ -1248,9 +1266,15 @@ class TestLayer(LayerTest):
static_res1, static_res2 = exe.run(fetch_list=[out_1, out_2]) static_res1, static_res2 = exe.run(fetch_list=[out_1, out_2])
with self.dynamic_graph(): with self.dynamic_graph():
x = layers.fill_constant(shape=[1], dtype='float32', value=0.3) x = paddle.tensor.fill_constant(
y = layers.fill_constant(shape=[1], dtype='float32', value=0.1) shape=[1], dtype='float32', value=0.3
z = layers.fill_constant(shape=[1], dtype='float32', value=0.2) )
y = paddle.tensor.fill_constant(
shape=[1], dtype='float32', value=0.1
)
z = paddle.tensor.fill_constant(
shape=[1], dtype='float32', value=0.2
)
pred_1 = paddle.less_than(z, x) # true: 0.2 < 0.3 pred_1 = paddle.less_than(z, x) # true: 0.2 < 0.3
pred_2 = paddle.less_than(x, y) # false: 0.3 < 0.1 pred_2 = paddle.less_than(x, y) # false: 0.3 < 0.1
...@@ -1270,17 +1294,27 @@ class TestLayer(LayerTest): ...@@ -1270,17 +1294,27 @@ class TestLayer(LayerTest):
def test_switch_case(self): def test_switch_case(self):
def fn_1(): def fn_1():
return layers.fill_constant(shape=[1, 2], dtype='float32', value=1) return paddle.tensor.fill_constant(
shape=[1, 2], dtype='float32', value=1
)
def fn_2(): def fn_2():
return layers.fill_constant(shape=[2, 2], dtype='int32', value=2) return paddle.tensor.fill_constant(
shape=[2, 2], dtype='int32', value=2
)
def fn_3(): def fn_3():
return layers.fill_constant(shape=[3], dtype='int32', value=3) return paddle.tensor.fill_constant(
shape=[3], dtype='int32', value=3
)
with self.static_graph(): with self.static_graph():
index_1 = layers.fill_constant(shape=[1], dtype='int32', value=1) index_1 = paddle.tensor.fill_constant(
index_2 = layers.fill_constant(shape=[1], dtype='int32', value=2) shape=[1], dtype='int32', value=1
)
index_2 = paddle.tensor.fill_constant(
shape=[1], dtype='int32', value=2
)
out_1 = paddle.static.nn.switch_case( out_1 = paddle.static.nn.switch_case(
branch_index=index_1, branch_index=index_1,
...@@ -1308,8 +1342,12 @@ class TestLayer(LayerTest): ...@@ -1308,8 +1342,12 @@ class TestLayer(LayerTest):
) )
with self.dynamic_graph(): with self.dynamic_graph():
index_1 = layers.fill_constant(shape=[1], dtype='int32', value=1) index_1 = paddle.tensor.fill_constant(
index_2 = layers.fill_constant(shape=[1], dtype='int32', value=2) shape=[1], dtype='int32', value=1
)
index_2 = paddle.tensor.fill_constant(
shape=[1], dtype='int32', value=2
)
out_1 = paddle.static.nn.switch_case( out_1 = paddle.static.nn.switch_case(
branch_index=index_1, branch_index=index_1,
...@@ -1987,9 +2025,15 @@ class TestBook(LayerTest): ...@@ -1987,9 +2025,15 @@ class TestBook(LayerTest):
paddle.arange(0, 10, 2, 'int32') paddle.arange(0, 10, 2, 'int32')
paddle.arange(0.1, 10.0, 0.2, 'float32') paddle.arange(0.1, 10.0, 0.2, 'float32')
paddle.arange(0.1, 10.0, 0.2, 'float64') paddle.arange(0.1, 10.0, 0.2, 'float64')
start = layers.fill_constant(shape=[1], value=0.1, dtype="float32") start = paddle.tensor.fill_constant(
end = layers.fill_constant(shape=[1], value=10.0, dtype="float32") shape=[1], value=0.1, dtype="float32"
step = layers.fill_constant(shape=[1], value=0.2, dtype="float32") )
end = paddle.tensor.fill_constant(
shape=[1], value=10.0, dtype="float32"
)
step = paddle.tensor.fill_constant(
shape=[1], value=0.2, dtype="float32"
)
y = paddle.arange(start, end, step, 'float64') y = paddle.arange(start, end, step, 'float64')
return y return y
...@@ -2088,7 +2132,7 @@ class TestBook(LayerTest): ...@@ -2088,7 +2132,7 @@ class TestBook(LayerTest):
def test_fill_constant_batch_size_like(self): def test_fill_constant_batch_size_like(self):
with self.static_graph(): with self.static_graph():
like = fluid.layers.fill_constant( like = paddle.tensor.fill_constant(
shape=[1, 200], value=10, dtype='int64' shape=[1, 200], value=10, dtype='int64'
) )
out = layers.fill_constant_batch_size_like( out = layers.fill_constant_batch_size_like(
......
...@@ -26,7 +26,7 @@ from paddle.fluid.executor import Executor ...@@ -26,7 +26,7 @@ from paddle.fluid.executor import Executor
class TestLoDArrayLength(unittest.TestCase): class TestLoDArrayLength(unittest.TestCase):
def test_array_length(self): def test_array_length(self):
tmp = layers.zeros(shape=[10], dtype='int32') tmp = layers.zeros(shape=[10], dtype='int32')
i = layers.fill_constant(shape=[1], dtype='int64', value=10) i = paddle.tensor.fill_constant(shape=[1], dtype='int64', value=10)
arr = paddle.tensor.array_write(tmp, i=i) arr = paddle.tensor.array_write(tmp, i=i)
arr_len = paddle.tensor.array_length(arr) arr_len = paddle.tensor.array_length(arr)
cpu = core.CPUPlace() cpu = core.CPUPlace()
......
...@@ -19,7 +19,6 @@ import numpy as np ...@@ -19,7 +19,6 @@ import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
import paddle.fluid.layers as layers
import paddle.fluid.optimizer as optimizer import paddle.fluid.optimizer as optimizer
from paddle.fluid.framework import Program, program_guard from paddle.fluid.framework import Program, program_guard
...@@ -98,7 +97,7 @@ def static( ...@@ -98,7 +97,7 @@ def static(
sgd = optimizer.SGD(learning_rate=LR) sgd = optimizer.SGD(learning_rate=LR)
id = fluid.data('id', [1], 'int32') id = fluid.data('id', [1], 'int32')
two = layers.fill_constant([1], 'int32', 2) two = paddle.tensor.fill_constant([1], 'int32', 2)
mod_two = paddle.remainder(id, two) == 0 mod_two = paddle.remainder(id, two) == 0
if loss_in_switch: if loss_in_switch:
......
...@@ -45,7 +45,7 @@ class TestProfiler(unittest.TestCase): ...@@ -45,7 +45,7 @@ class TestProfiler(unittest.TestCase):
counter = fluid.layers.zeros( counter = fluid.layers.zeros(
shape=[1], dtype='int64', force_cpu=True shape=[1], dtype='int64', force_cpu=True
) )
until = layers.fill_constant([1], dtype='int64', value=10) until = paddle.tensor.fill_constant([1], dtype='int64', value=10)
data_arr = paddle.tensor.array_write(hidden1, i) data_arr = paddle.tensor.array_write(hidden1, i)
cond = paddle.less_than(x=counter, y=until) cond = paddle.less_than(x=counter, y=until)
while_op = paddle.static.nn.control_flow.While(cond=cond) while_op = paddle.static.nn.control_flow.While(cond=cond)
......
...@@ -16,7 +16,6 @@ import unittest ...@@ -16,7 +16,6 @@ import unittest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.layers as layers
class TestProgramToReadableCode(unittest.TestCase): class TestProgramToReadableCode(unittest.TestCase):
...@@ -37,14 +36,22 @@ class TestProgramToReadableCode(unittest.TestCase): ...@@ -37,14 +36,22 @@ class TestProgramToReadableCode(unittest.TestCase):
def append_cond_op(self, program): def append_cond_op(self, program):
def true_func(): def true_func():
return layers.fill_constant(shape=[2, 3], dtype='int32', value=2) return paddle.tensor.fill_constant(
shape=[2, 3], dtype='int32', value=2
)
def false_func(): def false_func():
return layers.fill_constant(shape=[3, 2], dtype='int32', value=-1) return paddle.tensor.fill_constant(
shape=[3, 2], dtype='int32', value=-1
)
with fluid.program_guard(program): with fluid.program_guard(program):
x = layers.fill_constant(shape=[1], dtype='float32', value=0.1) x = paddle.tensor.fill_constant(
y = layers.fill_constant(shape=[1], dtype='float32', value=0.23) shape=[1], dtype='float32', value=0.1
)
y = paddle.tensor.fill_constant(
shape=[1], dtype='float32', value=0.23
)
pred = paddle.less_than(y, x) pred = paddle.less_than(y, x)
out = paddle.static.nn.cond(pred, true_func, false_func) out = paddle.static.nn.cond(pred, true_func, false_func)
......
...@@ -72,7 +72,7 @@ def cond_net(use_feed=None): ...@@ -72,7 +72,7 @@ def cond_net(use_feed=None):
avg_loss = paddle.mean(loss, name='mean_softmax_loss') avg_loss = paddle.mean(loss, name='mean_softmax_loss')
return avg_loss return avg_loss
two = fluid.layers.fill_constant([1], 'int32', 2) two = paddle.tensor.fill_constant([1], 'int32', 2)
pred = two == 0 pred = two == 0
avg_loss = paddle.static.nn.case( avg_loss = paddle.static.nn.case(
[(pred, lambda: loss1(prediction, label))], [(pred, lambda: loss1(prediction, label))],
...@@ -106,7 +106,7 @@ def optimization_in_cond_net(with_optimize=False): ...@@ -106,7 +106,7 @@ def optimization_in_cond_net(with_optimize=False):
return avg_loss return avg_loss
sgd = fluid.optimizer.SGD(learning_rate=0.1) sgd = fluid.optimizer.SGD(learning_rate=0.1)
two = fluid.layers.fill_constant([1], 'int32', 2) two = paddle.tensor.fill_constant([1], 'int32', 2)
pred = two == 0 pred = two == 0
avg_loss = paddle.static.nn.case( avg_loss = paddle.static.nn.case(
[(pred, lambda: loss1(sgd, prediction, label, with_optimize))], [(pred, lambda: loss1(sgd, prediction, label, with_optimize))],
......
...@@ -42,8 +42,8 @@ class TestRandOpError(unittest.TestCase): ...@@ -42,8 +42,8 @@ class TestRandOpError(unittest.TestCase):
self.assertRaises(TypeError, test_Variable) self.assertRaises(TypeError, test_Variable)
def test_dtype(): def test_dtype():
dim_1 = fluid.layers.fill_constant([1], "int64", 3) dim_1 = paddle.tensor.fill_constant([1], "int64", 3)
dim_2 = fluid.layers.fill_constant([1], "int32", 5) dim_2 = paddle.tensor.fill_constant([1], "int32", 5)
rand(shape=[dim_1, dim_2], dtype='int32') rand(shape=[dim_1, dim_2], dtype='int32')
self.assertRaises(TypeError, test_dtype) self.assertRaises(TypeError, test_dtype)
...@@ -64,8 +64,8 @@ class TestRandOp(unittest.TestCase): ...@@ -64,8 +64,8 @@ class TestRandOp(unittest.TestCase):
result_0 = rand([3, 4]) result_0 = rand([3, 4])
result_1 = rand([3, 4], 'float64') result_1 = rand([3, 4], 'float64')
dim_1 = fluid.layers.fill_constant([1], "int64", 3) dim_1 = paddle.tensor.fill_constant([1], "int64", 3)
dim_2 = fluid.layers.fill_constant([1], "int32", 5) dim_2 = paddle.tensor.fill_constant([1], "int32", 5)
result_2 = rand(shape=[dim_1, dim_2]) result_2 = rand(shape=[dim_1, dim_2])
var_shape = fluid.data(name='var_shape', shape=[2], dtype="int64") var_shape = fluid.data(name='var_shape', shape=[2], dtype="int64")
...@@ -104,8 +104,8 @@ class TestRandOpForDygraph(unittest.TestCase): ...@@ -104,8 +104,8 @@ class TestRandOpForDygraph(unittest.TestCase):
rand([3, 4], 'float64') rand([3, 4], 'float64')
dim_1 = fluid.layers.fill_constant([1], "int64", 3) dim_1 = paddle.tensor.fill_constant([1], "int64", 3)
dim_2 = fluid.layers.fill_constant([1], "int32", 5) dim_2 = paddle.tensor.fill_constant([1], "int32", 5)
rand(shape=[dim_1, dim_2]) rand(shape=[dim_1, dim_2])
var_shape = fluid.dygraph.to_variable(np.array([3, 4])) var_shape = fluid.dygraph.to_variable(np.array([3, 4]))
......
...@@ -126,8 +126,8 @@ class TestRandintAPI(unittest.TestCase): ...@@ -126,8 +126,8 @@ class TestRandintAPI(unittest.TestCase):
low=-100, high=100, shape=(32, 32, 3), dtype='int64' low=-100, high=100, shape=(32, 32, 3), dtype='int64'
) )
# shape is a tensorlist and dtype is 'float32' # shape is a tensorlist and dtype is 'float32'
dim_1 = paddle.fluid.layers.fill_constant([1], "int64", 32) dim_1 = paddle.tensor.fill_constant([1], "int64", 32)
dim_2 = paddle.fluid.layers.fill_constant([1], "int32", 50) dim_2 = paddle.tensor.fill_constant([1], "int32", 50)
out4 = paddle.randint( out4 = paddle.randint(
low=-100, high=100, shape=[dim_1, 5, dim_2], dtype='int32' low=-100, high=100, shape=[dim_1, 5, dim_2], dtype='int32'
) )
......
...@@ -30,8 +30,8 @@ class TestRandnOp(unittest.TestCase): ...@@ -30,8 +30,8 @@ class TestRandnOp(unittest.TestCase):
x1 = paddle.randn(shape, 'float32') x1 = paddle.randn(shape, 'float32')
x2 = paddle.randn(shape, 'float64') x2 = paddle.randn(shape, 'float64')
dim_1 = paddle.fluid.layers.fill_constant([1], "int64", 20) dim_1 = paddle.tensor.fill_constant([1], "int64", 20)
dim_2 = paddle.fluid.layers.fill_constant([1], "int32", 50) dim_2 = paddle.tensor.fill_constant([1], "int32", 50)
x3 = paddle.randn([dim_1, dim_2, 784]) x3 = paddle.randn([dim_1, dim_2, 784])
var_shape = paddle.static.data('X', [2], 'int32') var_shape = paddle.static.data('X', [2], 'int32')
...@@ -66,8 +66,8 @@ class TestRandnOpForDygraph(unittest.TestCase): ...@@ -66,8 +66,8 @@ class TestRandnOpForDygraph(unittest.TestCase):
x1 = paddle.randn(shape, 'float32') x1 = paddle.randn(shape, 'float32')
x2 = paddle.randn(shape, 'float64') x2 = paddle.randn(shape, 'float64')
dim_1 = paddle.fluid.layers.fill_constant([1], "int64", 20) dim_1 = paddle.tensor.fill_constant([1], "int64", 20)
dim_2 = paddle.fluid.layers.fill_constant([1], "int32", 50) dim_2 = paddle.tensor.fill_constant([1], "int32", 50)
x3 = paddle.randn(shape=[dim_1, dim_2, 784]) x3 = paddle.randn(shape=[dim_1, dim_2, 784])
var_shape = paddle.to_tensor(np.array(shape)) var_shape = paddle.to_tensor(np.array(shape))
......
...@@ -327,7 +327,7 @@ class TestReshapeOpBool(TestReshapeOp): ...@@ -327,7 +327,7 @@ class TestReshapeOpBool(TestReshapeOp):
# Test python API # Test python API
class TestReshapeAPI(unittest.TestCase): class TestReshapeAPI(unittest.TestCase):
def _set_paddle_api(self): def _set_paddle_api(self):
self.fill_constant = paddle.fluid.layers.fill_constant self.fill_constant = paddle.tensor.fill_constant
self.data = paddle.static.data self.data = paddle.static.data
self.to_tensor = paddle.to_tensor self.to_tensor = paddle.to_tensor
self._executed_api() self._executed_api()
......
...@@ -80,7 +80,7 @@ class TestRetainGraph(unittest.TestCase): ...@@ -80,7 +80,7 @@ class TestRetainGraph(unittest.TestCase):
fake_AB = paddle.concat((real_data.detach(), interpolatesv), 1) fake_AB = paddle.concat((real_data.detach(), interpolatesv), 1)
disc_interpolates = netD(fake_AB) disc_interpolates = netD(fake_AB)
outs = paddle.fluid.layers.fill_constant( outs = paddle.tensor.fill_constant(
disc_interpolates.shape, disc_interpolates.dtype, 1.0 disc_interpolates.shape, disc_interpolates.dtype, 1.0
) )
gradients = paddle.grad( gradients = paddle.grad(
...@@ -125,7 +125,7 @@ class TestRetainGraph(unittest.TestCase): ...@@ -125,7 +125,7 @@ class TestRetainGraph(unittest.TestCase):
fake_AB = paddle.concat((realA, fakeB), 1) fake_AB = paddle.concat((realA, fakeB), 1)
G_pred_fake = d(fake_AB.detach()) G_pred_fake = d(fake_AB.detach())
false_target = paddle.fluid.layers.fill_constant( false_target = paddle.tensor.fill_constant(
G_pred_fake.shape, 'float32', 0.0 G_pred_fake.shape, 'float32', 0.0
) )
...@@ -140,7 +140,7 @@ class TestRetainGraph(unittest.TestCase): ...@@ -140,7 +140,7 @@ class TestRetainGraph(unittest.TestCase):
optim_g.clear_gradients() optim_g.clear_gradients()
fake_AB = paddle.concat((realA, fakeB), 1) fake_AB = paddle.concat((realA, fakeB), 1)
G_pred_fake = d(fake_AB) G_pred_fake = d(fake_AB)
true_target = paddle.fluid.layers.fill_constant( true_target = paddle.tensor.fill_constant(
G_pred_fake.shape, 'float32', 1.0 G_pred_fake.shape, 'float32', 1.0
) )
loss_g = l1_criterion(fakeB, realB) + gan_criterion( loss_g = l1_criterion(fakeB, realB) + gan_criterion(
......
...@@ -1306,19 +1306,13 @@ class TestGradientTruncated(unittest.TestCase): ...@@ -1306,19 +1306,13 @@ class TestGradientTruncated(unittest.TestCase):
numel = lambda input_shape: reduce(lambda x, y: x * y, input_shape) numel = lambda input_shape: reduce(lambda x, y: x * y, input_shape)
def op1(x): def op1(x):
value = paddle.fluid.layers.fill_constant([1], "float32", 1) value = paddle.tensor.fill_constant([1], "float32", 1)
# test stop_gradient # test stop_gradient
value.stop_gradient = True value.stop_gradient = True
x.stop_gradient = False x.stop_gradient = False
start = paddle.fluid.layers.fill_constant( start = paddle.tensor.fill_constant([1], "int32", 5, force_cpu=True)
[1], "int32", 5, force_cpu=True end = paddle.tensor.fill_constant([1], "int32", 0, force_cpu=True)
) step = paddle.tensor.fill_constant([1], "int32", -2, force_cpu=True)
end = paddle.fluid.layers.fill_constant(
[1], "int32", 0, force_cpu=True
)
step = paddle.fluid.layers.fill_constant(
[1], "int32", -2, force_cpu=True
)
inputs = { inputs = {
'Input': x, 'Input': x,
...@@ -1347,7 +1341,7 @@ class TestGradientTruncated(unittest.TestCase): ...@@ -1347,7 +1341,7 @@ class TestGradientTruncated(unittest.TestCase):
return y, value return y, value
def op2(x): def op2(x):
value = paddle.fluid.layers.fill_constant([1, 3, 2], "float32", 1) value = paddle.tensor.fill_constant([1, 3, 2], "float32", 1)
# test stop_gradient # test stop_gradient
value.stop_gradient = False value.stop_gradient = False
x.stop_gradient = False x.stop_gradient = False
...@@ -1372,18 +1366,12 @@ class TestGradientTruncated(unittest.TestCase): ...@@ -1372,18 +1366,12 @@ class TestGradientTruncated(unittest.TestCase):
return y, value return y, value
def op3(x): def op3(x):
value = paddle.fluid.layers.fill_constant([1], "float32", 1) value = paddle.tensor.fill_constant([1], "float32", 1)
x.stop_gradient = True x.stop_gradient = True
value.stop_gradient = False value.stop_gradient = False
start = paddle.fluid.layers.fill_constant( start = paddle.tensor.fill_constant([1], "int32", 0, force_cpu=True)
[1], "int32", 0, force_cpu=True end = paddle.tensor.fill_constant([1], "int32", 5, force_cpu=True)
) step = paddle.tensor.fill_constant([1], "int32", 3, force_cpu=True)
end = paddle.fluid.layers.fill_constant(
[1], "int32", 5, force_cpu=True
)
step = paddle.fluid.layers.fill_constant(
[1], "int32", 3, force_cpu=True
)
inputs = { inputs = {
'Input': x, 'Input': x,
......
...@@ -196,8 +196,8 @@ class TestSGDOpOptimizeSelectedRows(unittest.TestCase): ...@@ -196,8 +196,8 @@ class TestSGDOpOptimizeSelectedRows(unittest.TestCase):
class TestSGDOpWithLargeInput(unittest.TestCase): class TestSGDOpWithLargeInput(unittest.TestCase):
def runTest(self): def runTest(self):
paddle.enable_static() paddle.enable_static()
data = fluid.layers.fill_constant(shape=[1], value=128, dtype='int64') data = paddle.tensor.fill_constant(shape=[1], value=128, dtype='int64')
label = fluid.layers.fill_constant( label = paddle.tensor.fill_constant(
shape=[1, 150], value=0.5, dtype='float32' shape=[1, 150], value=0.5, dtype='float32'
) )
emb = paddle.static.nn.embedding( emb = paddle.static.nn.embedding(
......
...@@ -582,8 +582,8 @@ class TestBF16(OpTest): ...@@ -582,8 +582,8 @@ class TestBF16(OpTest):
class TestSliceAPI(unittest.TestCase): class TestSliceAPI(unittest.TestCase):
def test_1(self): def test_1(self):
input = np.random.random([3, 4, 5, 6]).astype("float64") input = np.random.random([3, 4, 5, 6]).astype("float64")
minus_1 = fluid.layers.fill_constant([1], "int32", -1) minus_1 = paddle.tensor.fill_constant([1], "int32", -1)
minus_3 = fluid.layers.fill_constant([1], "int64", -3) minus_3 = paddle.tensor.fill_constant([1], "int64", -3)
starts = paddle.static.data( starts = paddle.static.data(
name='starts', shape=[1, 3], dtype="float32" name='starts', shape=[1, 3], dtype="float32"
) )
...@@ -597,7 +597,7 @@ class TestSliceAPI(unittest.TestCase): ...@@ -597,7 +597,7 @@ class TestSliceAPI(unittest.TestCase):
) )
# value_int64 is greater than 2147483647 which is the max of int32 # value_int64 is greater than 2147483647 which is the max of int32
value_int64 = fluid.layers.fill_constant([1], "int64", 2147483648) value_int64 = paddle.tensor.fill_constant([1], "int64", 2147483648)
out_1 = paddle.slice( out_1 = paddle.slice(
x, axes=[0, 1, 2], starts=[-3, 0, 2], ends=[value_int64, 100, -1] x, axes=[0, 1, 2], starts=[-3, 0, 2], ends=[value_int64, 100, -1]
...@@ -739,7 +739,7 @@ class TestSliceApiWithLoDTensorArray(unittest.TestCase): ...@@ -739,7 +739,7 @@ class TestSliceApiWithLoDTensorArray(unittest.TestCase):
slice_arr, axis=self.axis, use_stack=True slice_arr, axis=self.axis, use_stack=True
) )
elif case_num == 3: elif case_num == 3:
value_int64 = fluid.layers.fill_constant( value_int64 = paddle.tensor.fill_constant(
[1], "int64", 2147483648 [1], "int64", 2147483648
) )
self.sliced_arr = slice_arr = arr[self.start : value_int64] self.sliced_arr = slice_arr = arr[self.start : value_int64]
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册