未验证 提交 f6d4ae3d 编写于 作者: R risemeup1 提交者: GitHub

[FLUID_API_CLEAN]remove zeros (#52536)

* remove zeros

* remove zeros

* apply gcc12 to py3

* apply gcc12 to py3

* fluid api clear

* fluid api clean

* fluid api clean
上级 f3e8c4be
......@@ -190,7 +190,7 @@ class Normal(distribution.Distribution):
output_shape = shape + batch_shape
output = random.gaussian(
output_shape, mean=0.0, std=1.0, seed=seed, dtype=self.dtype
) * (tensor.zeros(output_shape, dtype=self.dtype) + self.scale)
) * (paddle.zeros(output_shape, dtype=self.dtype) + self.scale)
output = paddle.add(output, self.loc, name=name)
if self.all_arg_is_float:
return paddle.reshape(output, shape, name=name)
......
......@@ -185,7 +185,7 @@ class Uniform(distribution.Distribution):
output = paddle.uniform(
output_shape, dtype=self.dtype, min=0.0, max=1.0, seed=seed
) * (
tensor.zeros(output_shape, dtype=self.dtype)
paddle.zeros(output_shape, dtype=self.dtype)
+ (self.high - self.low)
)
output = paddle.add(output, self.low, name=name)
......
......@@ -41,7 +41,7 @@ from .layer_function_generator import (
templatedoc,
_generate_doc_string_,
)
from .tensor import zeros
from .. import unique_name
from .. import core
from ...utils import deprecated
......
......@@ -39,7 +39,6 @@ from paddle import _C_ops, _legacy_C_ops
__all__ = [
'fill_constant_batch_size_like',
'zeros',
]
......@@ -124,40 +123,3 @@ def fill_constant_batch_size_like(
)
out.stop_gradient = True
return out
def zeros(shape, dtype, force_cpu=False, name=None):
"""
The OP creates a tensor of specified :attr:`shape` and :attr:`dtype`, and fills it with 0.
Its :attr:`stop_gradient` will be set to True to stop gradient computation.
Parameters:
shape(tuple|list|Tensor): Shape of output Tensor, the data type of ``shape`` is int32 or int64.
dtype (np.dtype|str): Data type of output Tensor, it supports
bool, float16, float32, float64, int32 and int64.
force_cpu (bool, optional): Whether force to store the output Tensor in CPU memory.
If :attr:`force_cpu` is False, the output Tensor will be stored in running device memory.
Default: False.
name(str, optional): The default value is None. Normally there is no need for user to set this
property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: A tensor of data type :attr:`dtype` with shape :attr:`shape` and all elements set to 0.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle
data = fluid.layers.zeros(shape=[3, 2], dtype='float32') # [[0., 0.], [0., 0.], [0., 0.]]
# shape is a Tensor
shape = paddle.full(shape=[2], dtype='int32', fill_value=2)
data1 = fluid.layers.zeros(shape=shape, dtype='int32') #[[0, 0], [0, 0]]
"""
# TODO: remove zeros
from paddle.tensor import fill_constant
return fill_constant(
value=0.0, shape=shape, dtype=dtype, force_cpu=force_cpu, name=name
)
......@@ -18,14 +18,14 @@ import numpy as np
import paddle
from paddle import fluid
from paddle.fluid import Program, core, layers, program_guard
from paddle.fluid import Program, core, program_guard
from paddle.fluid.backward import append_backward
from paddle.fluid.executor import Executor
from paddle.fluid.framework import default_main_program
def _test_read_write(x):
i = layers.zeros(shape=[1], dtype='int64')
i = paddle.zeros(shape=[1], dtype='int64')
i.stop_gradient = False
arr = paddle.tensor.array_write(x=x[0], i=i)
i = paddle.increment(x=i)
......@@ -33,7 +33,7 @@ def _test_read_write(x):
i = paddle.increment(x=i)
arr = paddle.tensor.array_write(x=x[2], i=i, array=arr)
i = layers.zeros(shape=[1], dtype='int64')
i = paddle.zeros(shape=[1], dtype='int64')
i.stop_gradient = False
a0 = paddle.tensor.array_read(array=arr, i=i)
i = paddle.increment(x=i)
......
......@@ -22,7 +22,7 @@ import numpy
import paddle
from paddle import fluid
from paddle.fluid import core, layers
from paddle.fluid import core
from paddle.fluid.executor import Executor
paddle.enable_static()
......@@ -56,10 +56,10 @@ class TestEagerDeletionWhileOpBase(unittest.TestCase):
d1 = paddle.static.data("d1", shape=[-1, 10], dtype='float32')
d2 = paddle.static.data("d2", shape=[-1, 10], dtype='float32')
i = layers.zeros(shape=[1], dtype='int64')
i = paddle.zeros(shape=[1], dtype='int64')
i.stop_gradient = True
init = layers.zeros(shape=[10], dtype='float32')
init = paddle.zeros(shape=[10], dtype='float32')
mem_array = paddle.tensor.array_write(x=init, i=i)
data_array = paddle.tensor.array_write(x=d0, i=i)
......@@ -69,7 +69,7 @@ class TestEagerDeletionWhileOpBase(unittest.TestCase):
i = paddle.increment(i)
paddle.tensor.array_write(d2, i, array=data_array)
i = layers.zeros(shape=[1], dtype='int64')
i = paddle.zeros(shape=[1], dtype='int64')
i.stop_gradient = True
array_len = paddle.tensor.fill_constant(
......
......@@ -18,14 +18,13 @@ import numpy as np
import paddle
from paddle.fluid.executor import Executor
from paddle.fluid.layers import zeros
from paddle.static import data
from paddle.tensor import array_write
class TestExecutor(unittest.TestCase):
def test_mul(self):
i = zeros(shape=[1], dtype='int64')
i = paddle.zeros(shape=[1], dtype='int64')
a = data(name='a', shape=[-1, 784], dtype='float32')
array = array_write(x=a, i=i)
......
......@@ -19,14 +19,13 @@ from simple_nets import simple_fc_net, simple_fc_net_with_inputs
import paddle
from paddle import fluid
from paddle.fluid import layers
class TestFetchLoDTensorArray(unittest.TestCase):
def build_program(self, main_program, startup_program):
with fluid.unique_name.guard():
with fluid.program_guard(main_program, startup_program):
i = layers.zeros(shape=[1], dtype='int64')
i = paddle.zeros(shape=[1], dtype='int64')
img = paddle.static.data(
name='image', shape=[-1, 784], dtype='float32'
)
......
......@@ -17,13 +17,13 @@ import unittest
import numpy
import paddle
from paddle.fluid import Program, core, layers, program_guard
from paddle.fluid import Program, core, program_guard
from paddle.fluid.executor import Executor
class TestLoDArrayLength(unittest.TestCase):
def test_array_length(self):
tmp = layers.zeros(shape=[10], dtype='int32')
tmp = paddle.zeros(shape=[10], dtype='int32')
i = paddle.tensor.fill_constant(shape=[1], dtype='int64', value=10)
arr = paddle.tensor.array_write(tmp, i=i)
arr_len = paddle.tensor.array_length(arr)
......
......@@ -236,7 +236,7 @@ class TestMathOpPatches(unittest.TestCase):
b = paddle.static.data(name="b", shape=[-1, 1], dtype='float32')
b.desc.set_need_check_feed(False)
one = paddle.ones(shape=[1], dtype='int32')
zero = fluid.layers.zeros(shape=[1], dtype='int32')
zero = paddle.zeros(shape=[1], dtype='int32')
cond = one == zero
c = paddle.static.nn.cond(cond, lambda: a + b, lambda: a - b)
......
......@@ -20,7 +20,7 @@ import numpy as np
import paddle
from paddle import fluid, utils
from paddle.fluid import core, layers, profiler
from paddle.fluid import core, profiler
from paddle.fluid.proto.profiler import profiler_pb2
from paddle.utils.flops import flops
......@@ -38,9 +38,9 @@ class TestProfiler(unittest.TestCase):
name='x', shape=[-1, 784], dtype='float32'
)
hidden1 = paddle.static.nn.fc(x=image, size=64, activation='relu')
i = layers.zeros(shape=[1], dtype='int64')
counter = fluid.layers.zeros(
shape=[1], dtype='int64', force_cpu=True
i = paddle.zeros(shape=[1], dtype='int64')
counter = paddle.tensor.fill_constant(
shape=[1], dtype='int64', value=0, force_cpu=True
)
until = paddle.tensor.fill_constant([1], dtype='int64', value=10)
data_arr = paddle.tensor.array_write(hidden1, i)
......
......@@ -19,7 +19,7 @@ import numpy as np
import paddle
import paddle.nn.functional as F
from paddle import fluid
from paddle.fluid import core, layers
from paddle.fluid import core
from paddle.fluid.backward import append_backward
from paddle.fluid.framework import Program, program_guard
......@@ -67,7 +67,7 @@ class TestApiWhileLoop(unittest.TestCase):
main_program = Program()
startup_program = Program()
with program_guard(main_program, startup_program):
i = layers.zeros(shape=[1], dtype='int64')
i = paddle.zeros(shape=[1], dtype='int64')
ten = paddle.tensor.fill_constant(
shape=[1], dtype='int64', value=10
)
......@@ -112,7 +112,7 @@ class TestApiWhileLoop(unittest.TestCase):
main_program = Program()
startup_program = Program()
with program_guard(main_program, startup_program):
i = layers.zeros(shape=[1], dtype='int64')
i = paddle.zeros(shape=[1], dtype='int64')
ten = paddle.tensor.fill_constant(
shape=[1], dtype='int64', value=10
)
......@@ -202,8 +202,8 @@ class TestApiWhileLoop_Nested(unittest.TestCase):
main_program = Program()
startup_program = Program()
with program_guard(main_program, startup_program):
i = layers.zeros(shape=[1], dtype='int64')
j = layers.zeros(shape=[1], dtype='int64')
i = paddle.zeros(shape=[1], dtype='int64')
j = paddle.zeros(shape=[1], dtype='int64')
init = paddle.static.data(
name='init', shape=[3, 3], dtype='float32'
)
......@@ -373,9 +373,9 @@ class TestApiWhileLoop_NestedWithBackwardAndLoDTensorArray(unittest.TestCase):
d2 = paddle.static.data(name='d2', shape=[10], dtype='float32')
x = paddle.static.data(name='x', shape=[10], dtype='float32')
x.stop_gradient = False
i = layers.zeros(shape=[1], dtype='int64')
i = paddle.zeros(shape=[1], dtype='int64')
i.stop_gradient = True
init = layers.zeros(shape=[10], dtype='float32')
init = paddle.zeros(shape=[10], dtype='float32')
mem_array = paddle.tensor.array_write(x=init, i=i)
data_array = paddle.tensor.array_write(x=d0, i=i)
mem_array.stop_gradient = False
......@@ -383,7 +383,7 @@ class TestApiWhileLoop_NestedWithBackwardAndLoDTensorArray(unittest.TestCase):
paddle.tensor.array_write(d1, i, array=data_array)
i = paddle.increment(i)
paddle.tensor.array_write(d2, i, array=data_array)
i = layers.zeros(shape=[1], dtype='int64')
i = paddle.zeros(shape=[1], dtype='int64')
i.stop_gradient = True
array_len = paddle.tensor.fill_constant(
shape=[1], dtype='int64', value=1
......
......@@ -18,7 +18,7 @@ import numpy
import paddle
from paddle import fluid
from paddle.fluid import core, layers
from paddle.fluid import core
from paddle.fluid.backward import append_backward
from paddle.fluid.executor import Executor
......@@ -30,16 +30,16 @@ class TestWhileOp(unittest.TestCase):
d0 = paddle.static.data("d0", shape=[10], dtype='float32')
d1 = paddle.static.data("d1", shape=[10], dtype='float32')
d2 = paddle.static.data("d2", shape=[10], dtype='float32')
i = layers.zeros(shape=[1], dtype='int64')
i = paddle.zeros(shape=[1], dtype='int64')
i.stop_gradient = True
init = layers.zeros(shape=[10], dtype='float32')
init = paddle.zeros(shape=[10], dtype='float32')
mem_array = paddle.tensor.array_write(x=init, i=i)
data_array = paddle.tensor.array_write(x=d0, i=i)
i = paddle.increment(i)
paddle.tensor.array_write(d1, i, array=data_array)
i = paddle.increment(i)
paddle.tensor.array_write(d2, i, array=data_array)
i = layers.zeros(shape=[1], dtype='int64')
i = paddle.zeros(shape=[1], dtype='int64')
i.stop_gradient = True
array_len = paddle.tensor.fill_constant(
shape=[1], dtype='int64', value=1
......@@ -115,7 +115,7 @@ class TestWhileOp(unittest.TestCase):
exe.run(binary, feed={'d0': d[0], 'd1': d[1], 'd2': d[2]})
def test_exceptions(self):
i = layers.zeros(shape=[2], dtype='int64')
i = paddle.zeros(shape=[2], dtype='int64')
array_len = paddle.tensor.fill_constant(
shape=[2], dtype='int64', value=1
)
......
......@@ -54,7 +54,7 @@ class ApiZerosTest(unittest.TestCase):
def test_fluid_out(self):
with program_guard(Program()):
zeros = fluid.layers.zeros(shape=[10], dtype='int64')
zeros = paddle.zeros(shape=[10], dtype='int64')
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
(result,) = exe.run(fetch_list=[zeros])
......@@ -66,7 +66,7 @@ class ApiZerosError(unittest.TestCase):
def test_errors(self):
def test_error1():
with paddle.static.program_guard(fluid.Program()):
ones = fluid.layers.zeros(shape=10, dtype='int64')
ones = paddle.zeros(shape=10, dtype='int64')
self.assertRaises(TypeError, test_error1)
......
......@@ -18,7 +18,6 @@ import numpy
import paddle
from paddle import fluid
from paddle.fluid import layers
from paddle.fluid.backward import append_backward
from paddle.fluid.executor import Executor
......@@ -30,16 +29,16 @@ class TestWhileOp(unittest.TestCase):
d0 = paddle.static.data("d0", shape=[10], dtype='float32')
d1 = paddle.static.data("d1", shape=[10], dtype='float32')
d2 = paddle.static.data("d2", shape=[10], dtype='float32')
i = layers.zeros(shape=[1], dtype='int64')
i = paddle.zeros(shape=[1], dtype='int64')
i.stop_gradient = True
init = layers.zeros(shape=[10], dtype='float32')
init = paddle.zeros(shape=[10], dtype='float32')
mem_array = paddle.tensor.array_write(x=init, i=i)
data_array = paddle.tensor.array_write(x=d0, i=i)
i = paddle.increment(i)
paddle.tensor.array_write(d1, i, array=data_array)
i = paddle.increment(i)
paddle.tensor.array_write(d2, i, array=data_array)
i = layers.zeros(shape=[1], dtype='int64')
i = paddle.zeros(shape=[1], dtype='int64')
i.stop_gradient = True
array_len = paddle.tensor.fill_constant(
shape=[1], dtype='int64', value=1
......@@ -115,7 +114,7 @@ class TestWhileOp(unittest.TestCase):
exe.run(binary, feed={'d0': d[0], 'd1': d[1], 'd2': d[2]})
def test_exceptions(self):
i = layers.zeros(shape=[2], dtype='int64')
i = paddle.zeros(shape=[2], dtype='int64')
array_len = paddle.tensor.fill_constant(
shape=[2], dtype='int64', value=1
)
......
......@@ -219,7 +219,7 @@ class BaseModel(paddle.nn.Layer):
enc_cell_0 = to_variable(
np.zeros((self.batch_size, self.hidden_size), dtype='float32')
)
zero = fluid.layers.zeros(shape=[1], dtype="int64")
zero = paddle.zeros(shape=[1], dtype="int64")
enc_hidden = paddle.tensor.create_array(dtype="float32")
enc_cell = paddle.tensor.create_array(dtype="float32")
for i in range(self.num_layers):
......@@ -321,7 +321,7 @@ class BaseModel(paddle.nn.Layer):
enc_cell_0 = to_variable(
np.zeros((self.batch_size, self.hidden_size), dtype='float32')
)
zero = fluid.layers.zeros(shape=[1], dtype="int64")
zero = paddle.zeros(shape=[1], dtype="int64")
enc_hidden = paddle.tensor.create_array(dtype="float32")
enc_cell = paddle.tensor.create_array(dtype="float32")
for j in range(self.num_layers):
......@@ -738,7 +738,7 @@ class AttentionModel(paddle.nn.Layer):
np.zeros((self.batch_size, self.hidden_size), dtype='float32')
)
enc_hidden_0.stop_gradient = True
zero = fluid.layers.zeros(shape=[1], dtype="int64")
zero = paddle.zeros(shape=[1], dtype="int64")
enc_hidden = paddle.tensor.create_array(dtype="float32")
enc_cell = paddle.tensor.create_array(dtype="float32")
for i in range(self.num_layers):
......
......@@ -168,7 +168,7 @@ def while_loop_class_var(x):
foo = Foo()
i = fluid.dygraph.to_variable(x)
while i < 10:
foo.b = fluid.layers.zeros(shape=[1], dtype='float32')
foo.b = paddle.zeros(shape=[1], dtype='float32')
foo.c = foo.b + foo.a
i += 1
if foo.c < 0:
......
......@@ -82,7 +82,7 @@ def while_loop_dyfunc_with_none(x):
def for_loop_dyfunc(max_len):
for i in range(max_len):
ret = fluid.layers.zeros(shape=[1], dtype='float32')
ret = paddle.zeros(shape=[1], dtype='float32')
paddle.increment(ret, value=2.0)
return ret
......@@ -102,21 +102,21 @@ def for_loop_dyfunc2(max_len):
def for_loop_dyfunc3(max_len):
ret = fluid.layers.zeros(shape=[1], dtype='float32')
ret = paddle.zeros(shape=[1], dtype='float32')
for i in range(1, 10, 2):
paddle.increment(ret, value=2.0)
return ret
def for_loop_dyfunc4(max_len):
ret = fluid.layers.zeros(shape=[1], dtype='float32')
ret = paddle.zeros(shape=[1], dtype='float32')
for i in range(10, 1, -2):
paddle.increment(ret, value=2.0)
return ret
def for_loop_dyfunc_not_support(max_len):
ret = fluid.layers.zeros(shape=[1], dtype='float32')
ret = paddle.zeros(shape=[1], dtype='float32')
a = -2
for i in range(10, 1, a):
paddle.increment(ret, value=2.0)
......@@ -163,7 +163,7 @@ def while_loop_class_var(x):
foo = Foo()
i = fluid.dygraph.to_variable(x)
while i < 10:
foo.b = fluid.layers.zeros(shape=[1], dtype='float32')
foo.b = paddle.zeros(shape=[1], dtype='float32')
foo.c = foo.b + foo.a
i += 1
return foo.c
......@@ -194,14 +194,14 @@ def for_loop_class_var(max_len):
)
for i in range(max_len):
foo.b = fluid.layers.zeros(shape=[1], dtype='float32')
foo.b = paddle.zeros(shape=[1], dtype='float32')
foo.c = foo.b + foo.a
return foo.c
def var_create_in_for_loop(max_len):
for i in range(max_len):
ret = fluid.layers.zeros(shape=[3, 4, 5], dtype='float64')
ret = paddle.zeros(shape=[3, 4, 5], dtype='float64')
return ret
......@@ -213,7 +213,7 @@ def nested_for_loop_dyfunc():
a = 2 + j
for i in range(three):
b = fluid.layers.zeros(shape=[1], dtype='float32')
b = paddle.zeros(shape=[1], dtype='float32')
return b
......
......@@ -24,7 +24,6 @@ from paddle.fluid import core, framework
from paddle.incubate.autograd import primapi
from paddle.nn import BatchNorm
from paddle.tensor import ones # noqa: F401
from paddle.tensor import zeros # noqa: F401
np.random.seed(2023)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册