未验证 提交 f6d4ae3d 编写于 作者: R risemeup1 提交者: GitHub

[FLUID_API_CLEAN]remove zeros (#52536)

* remove zeros

* remove zeros

* apply gcc12 to py3

* apply gcc12 to py3

* fluid api clear

* fluid api clean

* fluid api clean
上级 f3e8c4be
...@@ -190,7 +190,7 @@ class Normal(distribution.Distribution): ...@@ -190,7 +190,7 @@ class Normal(distribution.Distribution):
output_shape = shape + batch_shape output_shape = shape + batch_shape
output = random.gaussian( output = random.gaussian(
output_shape, mean=0.0, std=1.0, seed=seed, dtype=self.dtype output_shape, mean=0.0, std=1.0, seed=seed, dtype=self.dtype
) * (tensor.zeros(output_shape, dtype=self.dtype) + self.scale) ) * (paddle.zeros(output_shape, dtype=self.dtype) + self.scale)
output = paddle.add(output, self.loc, name=name) output = paddle.add(output, self.loc, name=name)
if self.all_arg_is_float: if self.all_arg_is_float:
return paddle.reshape(output, shape, name=name) return paddle.reshape(output, shape, name=name)
......
...@@ -185,7 +185,7 @@ class Uniform(distribution.Distribution): ...@@ -185,7 +185,7 @@ class Uniform(distribution.Distribution):
output = paddle.uniform( output = paddle.uniform(
output_shape, dtype=self.dtype, min=0.0, max=1.0, seed=seed output_shape, dtype=self.dtype, min=0.0, max=1.0, seed=seed
) * ( ) * (
tensor.zeros(output_shape, dtype=self.dtype) paddle.zeros(output_shape, dtype=self.dtype)
+ (self.high - self.low) + (self.high - self.low)
) )
output = paddle.add(output, self.low, name=name) output = paddle.add(output, self.low, name=name)
......
...@@ -41,7 +41,7 @@ from .layer_function_generator import ( ...@@ -41,7 +41,7 @@ from .layer_function_generator import (
templatedoc, templatedoc,
_generate_doc_string_, _generate_doc_string_,
) )
from .tensor import zeros
from .. import unique_name from .. import unique_name
from .. import core from .. import core
from ...utils import deprecated from ...utils import deprecated
......
...@@ -39,7 +39,6 @@ from paddle import _C_ops, _legacy_C_ops ...@@ -39,7 +39,6 @@ from paddle import _C_ops, _legacy_C_ops
__all__ = [ __all__ = [
'fill_constant_batch_size_like', 'fill_constant_batch_size_like',
'zeros',
] ]
...@@ -124,40 +123,3 @@ def fill_constant_batch_size_like( ...@@ -124,40 +123,3 @@ def fill_constant_batch_size_like(
) )
out.stop_gradient = True out.stop_gradient = True
return out return out
def zeros(shape, dtype, force_cpu=False, name=None):
"""
The OP creates a tensor of specified :attr:`shape` and :attr:`dtype`, and fills it with 0.
Its :attr:`stop_gradient` will be set to True to stop gradient computation.
Parameters:
shape(tuple|list|Tensor): Shape of output Tensor, the data type of ``shape`` is int32 or int64.
dtype (np.dtype|str): Data type of output Tensor, it supports
bool, float16, float32, float64, int32 and int64.
force_cpu (bool, optional): Whether force to store the output Tensor in CPU memory.
If :attr:`force_cpu` is False, the output Tensor will be stored in running device memory.
Default: False.
name(str, optional): The default value is None. Normally there is no need for user to set this
property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: A tensor of data type :attr:`dtype` with shape :attr:`shape` and all elements set to 0.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle
data = fluid.layers.zeros(shape=[3, 2], dtype='float32') # [[0., 0.], [0., 0.], [0., 0.]]
# shape is a Tensor
shape = paddle.full(shape=[2], dtype='int32', fill_value=2)
data1 = fluid.layers.zeros(shape=shape, dtype='int32') #[[0, 0], [0, 0]]
"""
# TODO: remove zeros
from paddle.tensor import fill_constant
return fill_constant(
value=0.0, shape=shape, dtype=dtype, force_cpu=force_cpu, name=name
)
...@@ -18,14 +18,14 @@ import numpy as np ...@@ -18,14 +18,14 @@ import numpy as np
import paddle import paddle
from paddle import fluid from paddle import fluid
from paddle.fluid import Program, core, layers, program_guard from paddle.fluid import Program, core, program_guard
from paddle.fluid.backward import append_backward from paddle.fluid.backward import append_backward
from paddle.fluid.executor import Executor from paddle.fluid.executor import Executor
from paddle.fluid.framework import default_main_program from paddle.fluid.framework import default_main_program
def _test_read_write(x): def _test_read_write(x):
i = layers.zeros(shape=[1], dtype='int64') i = paddle.zeros(shape=[1], dtype='int64')
i.stop_gradient = False i.stop_gradient = False
arr = paddle.tensor.array_write(x=x[0], i=i) arr = paddle.tensor.array_write(x=x[0], i=i)
i = paddle.increment(x=i) i = paddle.increment(x=i)
...@@ -33,7 +33,7 @@ def _test_read_write(x): ...@@ -33,7 +33,7 @@ def _test_read_write(x):
i = paddle.increment(x=i) i = paddle.increment(x=i)
arr = paddle.tensor.array_write(x=x[2], i=i, array=arr) arr = paddle.tensor.array_write(x=x[2], i=i, array=arr)
i = layers.zeros(shape=[1], dtype='int64') i = paddle.zeros(shape=[1], dtype='int64')
i.stop_gradient = False i.stop_gradient = False
a0 = paddle.tensor.array_read(array=arr, i=i) a0 = paddle.tensor.array_read(array=arr, i=i)
i = paddle.increment(x=i) i = paddle.increment(x=i)
......
...@@ -22,7 +22,7 @@ import numpy ...@@ -22,7 +22,7 @@ import numpy
import paddle import paddle
from paddle import fluid from paddle import fluid
from paddle.fluid import core, layers from paddle.fluid import core
from paddle.fluid.executor import Executor from paddle.fluid.executor import Executor
paddle.enable_static() paddle.enable_static()
...@@ -56,10 +56,10 @@ class TestEagerDeletionWhileOpBase(unittest.TestCase): ...@@ -56,10 +56,10 @@ class TestEagerDeletionWhileOpBase(unittest.TestCase):
d1 = paddle.static.data("d1", shape=[-1, 10], dtype='float32') d1 = paddle.static.data("d1", shape=[-1, 10], dtype='float32')
d2 = paddle.static.data("d2", shape=[-1, 10], dtype='float32') d2 = paddle.static.data("d2", shape=[-1, 10], dtype='float32')
i = layers.zeros(shape=[1], dtype='int64') i = paddle.zeros(shape=[1], dtype='int64')
i.stop_gradient = True i.stop_gradient = True
init = layers.zeros(shape=[10], dtype='float32') init = paddle.zeros(shape=[10], dtype='float32')
mem_array = paddle.tensor.array_write(x=init, i=i) mem_array = paddle.tensor.array_write(x=init, i=i)
data_array = paddle.tensor.array_write(x=d0, i=i) data_array = paddle.tensor.array_write(x=d0, i=i)
...@@ -69,7 +69,7 @@ class TestEagerDeletionWhileOpBase(unittest.TestCase): ...@@ -69,7 +69,7 @@ class TestEagerDeletionWhileOpBase(unittest.TestCase):
i = paddle.increment(i) i = paddle.increment(i)
paddle.tensor.array_write(d2, i, array=data_array) paddle.tensor.array_write(d2, i, array=data_array)
i = layers.zeros(shape=[1], dtype='int64') i = paddle.zeros(shape=[1], dtype='int64')
i.stop_gradient = True i.stop_gradient = True
array_len = paddle.tensor.fill_constant( array_len = paddle.tensor.fill_constant(
......
...@@ -18,14 +18,13 @@ import numpy as np ...@@ -18,14 +18,13 @@ import numpy as np
import paddle import paddle
from paddle.fluid.executor import Executor from paddle.fluid.executor import Executor
from paddle.fluid.layers import zeros
from paddle.static import data from paddle.static import data
from paddle.tensor import array_write from paddle.tensor import array_write
class TestExecutor(unittest.TestCase): class TestExecutor(unittest.TestCase):
def test_mul(self): def test_mul(self):
i = zeros(shape=[1], dtype='int64') i = paddle.zeros(shape=[1], dtype='int64')
a = data(name='a', shape=[-1, 784], dtype='float32') a = data(name='a', shape=[-1, 784], dtype='float32')
array = array_write(x=a, i=i) array = array_write(x=a, i=i)
......
...@@ -19,14 +19,13 @@ from simple_nets import simple_fc_net, simple_fc_net_with_inputs ...@@ -19,14 +19,13 @@ from simple_nets import simple_fc_net, simple_fc_net_with_inputs
import paddle import paddle
from paddle import fluid from paddle import fluid
from paddle.fluid import layers
class TestFetchLoDTensorArray(unittest.TestCase): class TestFetchLoDTensorArray(unittest.TestCase):
def build_program(self, main_program, startup_program): def build_program(self, main_program, startup_program):
with fluid.unique_name.guard(): with fluid.unique_name.guard():
with fluid.program_guard(main_program, startup_program): with fluid.program_guard(main_program, startup_program):
i = layers.zeros(shape=[1], dtype='int64') i = paddle.zeros(shape=[1], dtype='int64')
img = paddle.static.data( img = paddle.static.data(
name='image', shape=[-1, 784], dtype='float32' name='image', shape=[-1, 784], dtype='float32'
) )
......
...@@ -17,13 +17,13 @@ import unittest ...@@ -17,13 +17,13 @@ import unittest
import numpy import numpy
import paddle import paddle
from paddle.fluid import Program, core, layers, program_guard from paddle.fluid import Program, core, program_guard
from paddle.fluid.executor import Executor from paddle.fluid.executor import Executor
class TestLoDArrayLength(unittest.TestCase): class TestLoDArrayLength(unittest.TestCase):
def test_array_length(self): def test_array_length(self):
tmp = layers.zeros(shape=[10], dtype='int32') tmp = paddle.zeros(shape=[10], dtype='int32')
i = paddle.tensor.fill_constant(shape=[1], dtype='int64', value=10) i = paddle.tensor.fill_constant(shape=[1], dtype='int64', value=10)
arr = paddle.tensor.array_write(tmp, i=i) arr = paddle.tensor.array_write(tmp, i=i)
arr_len = paddle.tensor.array_length(arr) arr_len = paddle.tensor.array_length(arr)
......
...@@ -236,7 +236,7 @@ class TestMathOpPatches(unittest.TestCase): ...@@ -236,7 +236,7 @@ class TestMathOpPatches(unittest.TestCase):
b = paddle.static.data(name="b", shape=[-1, 1], dtype='float32') b = paddle.static.data(name="b", shape=[-1, 1], dtype='float32')
b.desc.set_need_check_feed(False) b.desc.set_need_check_feed(False)
one = paddle.ones(shape=[1], dtype='int32') one = paddle.ones(shape=[1], dtype='int32')
zero = fluid.layers.zeros(shape=[1], dtype='int32') zero = paddle.zeros(shape=[1], dtype='int32')
cond = one == zero cond = one == zero
c = paddle.static.nn.cond(cond, lambda: a + b, lambda: a - b) c = paddle.static.nn.cond(cond, lambda: a + b, lambda: a - b)
......
...@@ -20,7 +20,7 @@ import numpy as np ...@@ -20,7 +20,7 @@ import numpy as np
import paddle import paddle
from paddle import fluid, utils from paddle import fluid, utils
from paddle.fluid import core, layers, profiler from paddle.fluid import core, profiler
from paddle.fluid.proto.profiler import profiler_pb2 from paddle.fluid.proto.profiler import profiler_pb2
from paddle.utils.flops import flops from paddle.utils.flops import flops
...@@ -38,9 +38,9 @@ class TestProfiler(unittest.TestCase): ...@@ -38,9 +38,9 @@ class TestProfiler(unittest.TestCase):
name='x', shape=[-1, 784], dtype='float32' name='x', shape=[-1, 784], dtype='float32'
) )
hidden1 = paddle.static.nn.fc(x=image, size=64, activation='relu') hidden1 = paddle.static.nn.fc(x=image, size=64, activation='relu')
i = layers.zeros(shape=[1], dtype='int64') i = paddle.zeros(shape=[1], dtype='int64')
counter = fluid.layers.zeros( counter = paddle.tensor.fill_constant(
shape=[1], dtype='int64', force_cpu=True shape=[1], dtype='int64', value=0, force_cpu=True
) )
until = paddle.tensor.fill_constant([1], dtype='int64', value=10) until = paddle.tensor.fill_constant([1], dtype='int64', value=10)
data_arr = paddle.tensor.array_write(hidden1, i) data_arr = paddle.tensor.array_write(hidden1, i)
......
...@@ -19,7 +19,7 @@ import numpy as np ...@@ -19,7 +19,7 @@ import numpy as np
import paddle import paddle
import paddle.nn.functional as F import paddle.nn.functional as F
from paddle import fluid from paddle import fluid
from paddle.fluid import core, layers from paddle.fluid import core
from paddle.fluid.backward import append_backward from paddle.fluid.backward import append_backward
from paddle.fluid.framework import Program, program_guard from paddle.fluid.framework import Program, program_guard
...@@ -67,7 +67,7 @@ class TestApiWhileLoop(unittest.TestCase): ...@@ -67,7 +67,7 @@ class TestApiWhileLoop(unittest.TestCase):
main_program = Program() main_program = Program()
startup_program = Program() startup_program = Program()
with program_guard(main_program, startup_program): with program_guard(main_program, startup_program):
i = layers.zeros(shape=[1], dtype='int64') i = paddle.zeros(shape=[1], dtype='int64')
ten = paddle.tensor.fill_constant( ten = paddle.tensor.fill_constant(
shape=[1], dtype='int64', value=10 shape=[1], dtype='int64', value=10
) )
...@@ -112,7 +112,7 @@ class TestApiWhileLoop(unittest.TestCase): ...@@ -112,7 +112,7 @@ class TestApiWhileLoop(unittest.TestCase):
main_program = Program() main_program = Program()
startup_program = Program() startup_program = Program()
with program_guard(main_program, startup_program): with program_guard(main_program, startup_program):
i = layers.zeros(shape=[1], dtype='int64') i = paddle.zeros(shape=[1], dtype='int64')
ten = paddle.tensor.fill_constant( ten = paddle.tensor.fill_constant(
shape=[1], dtype='int64', value=10 shape=[1], dtype='int64', value=10
) )
...@@ -202,8 +202,8 @@ class TestApiWhileLoop_Nested(unittest.TestCase): ...@@ -202,8 +202,8 @@ class TestApiWhileLoop_Nested(unittest.TestCase):
main_program = Program() main_program = Program()
startup_program = Program() startup_program = Program()
with program_guard(main_program, startup_program): with program_guard(main_program, startup_program):
i = layers.zeros(shape=[1], dtype='int64') i = paddle.zeros(shape=[1], dtype='int64')
j = layers.zeros(shape=[1], dtype='int64') j = paddle.zeros(shape=[1], dtype='int64')
init = paddle.static.data( init = paddle.static.data(
name='init', shape=[3, 3], dtype='float32' name='init', shape=[3, 3], dtype='float32'
) )
...@@ -373,9 +373,9 @@ class TestApiWhileLoop_NestedWithBackwardAndLoDTensorArray(unittest.TestCase): ...@@ -373,9 +373,9 @@ class TestApiWhileLoop_NestedWithBackwardAndLoDTensorArray(unittest.TestCase):
d2 = paddle.static.data(name='d2', shape=[10], dtype='float32') d2 = paddle.static.data(name='d2', shape=[10], dtype='float32')
x = paddle.static.data(name='x', shape=[10], dtype='float32') x = paddle.static.data(name='x', shape=[10], dtype='float32')
x.stop_gradient = False x.stop_gradient = False
i = layers.zeros(shape=[1], dtype='int64') i = paddle.zeros(shape=[1], dtype='int64')
i.stop_gradient = True i.stop_gradient = True
init = layers.zeros(shape=[10], dtype='float32') init = paddle.zeros(shape=[10], dtype='float32')
mem_array = paddle.tensor.array_write(x=init, i=i) mem_array = paddle.tensor.array_write(x=init, i=i)
data_array = paddle.tensor.array_write(x=d0, i=i) data_array = paddle.tensor.array_write(x=d0, i=i)
mem_array.stop_gradient = False mem_array.stop_gradient = False
...@@ -383,7 +383,7 @@ class TestApiWhileLoop_NestedWithBackwardAndLoDTensorArray(unittest.TestCase): ...@@ -383,7 +383,7 @@ class TestApiWhileLoop_NestedWithBackwardAndLoDTensorArray(unittest.TestCase):
paddle.tensor.array_write(d1, i, array=data_array) paddle.tensor.array_write(d1, i, array=data_array)
i = paddle.increment(i) i = paddle.increment(i)
paddle.tensor.array_write(d2, i, array=data_array) paddle.tensor.array_write(d2, i, array=data_array)
i = layers.zeros(shape=[1], dtype='int64') i = paddle.zeros(shape=[1], dtype='int64')
i.stop_gradient = True i.stop_gradient = True
array_len = paddle.tensor.fill_constant( array_len = paddle.tensor.fill_constant(
shape=[1], dtype='int64', value=1 shape=[1], dtype='int64', value=1
......
...@@ -18,7 +18,7 @@ import numpy ...@@ -18,7 +18,7 @@ import numpy
import paddle import paddle
from paddle import fluid from paddle import fluid
from paddle.fluid import core, layers from paddle.fluid import core
from paddle.fluid.backward import append_backward from paddle.fluid.backward import append_backward
from paddle.fluid.executor import Executor from paddle.fluid.executor import Executor
...@@ -30,16 +30,16 @@ class TestWhileOp(unittest.TestCase): ...@@ -30,16 +30,16 @@ class TestWhileOp(unittest.TestCase):
d0 = paddle.static.data("d0", shape=[10], dtype='float32') d0 = paddle.static.data("d0", shape=[10], dtype='float32')
d1 = paddle.static.data("d1", shape=[10], dtype='float32') d1 = paddle.static.data("d1", shape=[10], dtype='float32')
d2 = paddle.static.data("d2", shape=[10], dtype='float32') d2 = paddle.static.data("d2", shape=[10], dtype='float32')
i = layers.zeros(shape=[1], dtype='int64') i = paddle.zeros(shape=[1], dtype='int64')
i.stop_gradient = True i.stop_gradient = True
init = layers.zeros(shape=[10], dtype='float32') init = paddle.zeros(shape=[10], dtype='float32')
mem_array = paddle.tensor.array_write(x=init, i=i) mem_array = paddle.tensor.array_write(x=init, i=i)
data_array = paddle.tensor.array_write(x=d0, i=i) data_array = paddle.tensor.array_write(x=d0, i=i)
i = paddle.increment(i) i = paddle.increment(i)
paddle.tensor.array_write(d1, i, array=data_array) paddle.tensor.array_write(d1, i, array=data_array)
i = paddle.increment(i) i = paddle.increment(i)
paddle.tensor.array_write(d2, i, array=data_array) paddle.tensor.array_write(d2, i, array=data_array)
i = layers.zeros(shape=[1], dtype='int64') i = paddle.zeros(shape=[1], dtype='int64')
i.stop_gradient = True i.stop_gradient = True
array_len = paddle.tensor.fill_constant( array_len = paddle.tensor.fill_constant(
shape=[1], dtype='int64', value=1 shape=[1], dtype='int64', value=1
...@@ -115,7 +115,7 @@ class TestWhileOp(unittest.TestCase): ...@@ -115,7 +115,7 @@ class TestWhileOp(unittest.TestCase):
exe.run(binary, feed={'d0': d[0], 'd1': d[1], 'd2': d[2]}) exe.run(binary, feed={'d0': d[0], 'd1': d[1], 'd2': d[2]})
def test_exceptions(self): def test_exceptions(self):
i = layers.zeros(shape=[2], dtype='int64') i = paddle.zeros(shape=[2], dtype='int64')
array_len = paddle.tensor.fill_constant( array_len = paddle.tensor.fill_constant(
shape=[2], dtype='int64', value=1 shape=[2], dtype='int64', value=1
) )
......
...@@ -54,7 +54,7 @@ class ApiZerosTest(unittest.TestCase): ...@@ -54,7 +54,7 @@ class ApiZerosTest(unittest.TestCase):
def test_fluid_out(self): def test_fluid_out(self):
with program_guard(Program()): with program_guard(Program()):
zeros = fluid.layers.zeros(shape=[10], dtype='int64') zeros = paddle.zeros(shape=[10], dtype='int64')
place = paddle.CPUPlace() place = paddle.CPUPlace()
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
(result,) = exe.run(fetch_list=[zeros]) (result,) = exe.run(fetch_list=[zeros])
...@@ -66,7 +66,7 @@ class ApiZerosError(unittest.TestCase): ...@@ -66,7 +66,7 @@ class ApiZerosError(unittest.TestCase):
def test_errors(self): def test_errors(self):
def test_error1(): def test_error1():
with paddle.static.program_guard(fluid.Program()): with paddle.static.program_guard(fluid.Program()):
ones = fluid.layers.zeros(shape=10, dtype='int64') ones = paddle.zeros(shape=10, dtype='int64')
self.assertRaises(TypeError, test_error1) self.assertRaises(TypeError, test_error1)
......
...@@ -18,7 +18,6 @@ import numpy ...@@ -18,7 +18,6 @@ import numpy
import paddle import paddle
from paddle import fluid from paddle import fluid
from paddle.fluid import layers
from paddle.fluid.backward import append_backward from paddle.fluid.backward import append_backward
from paddle.fluid.executor import Executor from paddle.fluid.executor import Executor
...@@ -30,16 +29,16 @@ class TestWhileOp(unittest.TestCase): ...@@ -30,16 +29,16 @@ class TestWhileOp(unittest.TestCase):
d0 = paddle.static.data("d0", shape=[10], dtype='float32') d0 = paddle.static.data("d0", shape=[10], dtype='float32')
d1 = paddle.static.data("d1", shape=[10], dtype='float32') d1 = paddle.static.data("d1", shape=[10], dtype='float32')
d2 = paddle.static.data("d2", shape=[10], dtype='float32') d2 = paddle.static.data("d2", shape=[10], dtype='float32')
i = layers.zeros(shape=[1], dtype='int64') i = paddle.zeros(shape=[1], dtype='int64')
i.stop_gradient = True i.stop_gradient = True
init = layers.zeros(shape=[10], dtype='float32') init = paddle.zeros(shape=[10], dtype='float32')
mem_array = paddle.tensor.array_write(x=init, i=i) mem_array = paddle.tensor.array_write(x=init, i=i)
data_array = paddle.tensor.array_write(x=d0, i=i) data_array = paddle.tensor.array_write(x=d0, i=i)
i = paddle.increment(i) i = paddle.increment(i)
paddle.tensor.array_write(d1, i, array=data_array) paddle.tensor.array_write(d1, i, array=data_array)
i = paddle.increment(i) i = paddle.increment(i)
paddle.tensor.array_write(d2, i, array=data_array) paddle.tensor.array_write(d2, i, array=data_array)
i = layers.zeros(shape=[1], dtype='int64') i = paddle.zeros(shape=[1], dtype='int64')
i.stop_gradient = True i.stop_gradient = True
array_len = paddle.tensor.fill_constant( array_len = paddle.tensor.fill_constant(
shape=[1], dtype='int64', value=1 shape=[1], dtype='int64', value=1
...@@ -115,7 +114,7 @@ class TestWhileOp(unittest.TestCase): ...@@ -115,7 +114,7 @@ class TestWhileOp(unittest.TestCase):
exe.run(binary, feed={'d0': d[0], 'd1': d[1], 'd2': d[2]}) exe.run(binary, feed={'d0': d[0], 'd1': d[1], 'd2': d[2]})
def test_exceptions(self): def test_exceptions(self):
i = layers.zeros(shape=[2], dtype='int64') i = paddle.zeros(shape=[2], dtype='int64')
array_len = paddle.tensor.fill_constant( array_len = paddle.tensor.fill_constant(
shape=[2], dtype='int64', value=1 shape=[2], dtype='int64', value=1
) )
......
...@@ -219,7 +219,7 @@ class BaseModel(paddle.nn.Layer): ...@@ -219,7 +219,7 @@ class BaseModel(paddle.nn.Layer):
enc_cell_0 = to_variable( enc_cell_0 = to_variable(
np.zeros((self.batch_size, self.hidden_size), dtype='float32') np.zeros((self.batch_size, self.hidden_size), dtype='float32')
) )
zero = fluid.layers.zeros(shape=[1], dtype="int64") zero = paddle.zeros(shape=[1], dtype="int64")
enc_hidden = paddle.tensor.create_array(dtype="float32") enc_hidden = paddle.tensor.create_array(dtype="float32")
enc_cell = paddle.tensor.create_array(dtype="float32") enc_cell = paddle.tensor.create_array(dtype="float32")
for i in range(self.num_layers): for i in range(self.num_layers):
...@@ -321,7 +321,7 @@ class BaseModel(paddle.nn.Layer): ...@@ -321,7 +321,7 @@ class BaseModel(paddle.nn.Layer):
enc_cell_0 = to_variable( enc_cell_0 = to_variable(
np.zeros((self.batch_size, self.hidden_size), dtype='float32') np.zeros((self.batch_size, self.hidden_size), dtype='float32')
) )
zero = fluid.layers.zeros(shape=[1], dtype="int64") zero = paddle.zeros(shape=[1], dtype="int64")
enc_hidden = paddle.tensor.create_array(dtype="float32") enc_hidden = paddle.tensor.create_array(dtype="float32")
enc_cell = paddle.tensor.create_array(dtype="float32") enc_cell = paddle.tensor.create_array(dtype="float32")
for j in range(self.num_layers): for j in range(self.num_layers):
...@@ -738,7 +738,7 @@ class AttentionModel(paddle.nn.Layer): ...@@ -738,7 +738,7 @@ class AttentionModel(paddle.nn.Layer):
np.zeros((self.batch_size, self.hidden_size), dtype='float32') np.zeros((self.batch_size, self.hidden_size), dtype='float32')
) )
enc_hidden_0.stop_gradient = True enc_hidden_0.stop_gradient = True
zero = fluid.layers.zeros(shape=[1], dtype="int64") zero = paddle.zeros(shape=[1], dtype="int64")
enc_hidden = paddle.tensor.create_array(dtype="float32") enc_hidden = paddle.tensor.create_array(dtype="float32")
enc_cell = paddle.tensor.create_array(dtype="float32") enc_cell = paddle.tensor.create_array(dtype="float32")
for i in range(self.num_layers): for i in range(self.num_layers):
......
...@@ -168,7 +168,7 @@ def while_loop_class_var(x): ...@@ -168,7 +168,7 @@ def while_loop_class_var(x):
foo = Foo() foo = Foo()
i = fluid.dygraph.to_variable(x) i = fluid.dygraph.to_variable(x)
while i < 10: while i < 10:
foo.b = fluid.layers.zeros(shape=[1], dtype='float32') foo.b = paddle.zeros(shape=[1], dtype='float32')
foo.c = foo.b + foo.a foo.c = foo.b + foo.a
i += 1 i += 1
if foo.c < 0: if foo.c < 0:
......
...@@ -82,7 +82,7 @@ def while_loop_dyfunc_with_none(x): ...@@ -82,7 +82,7 @@ def while_loop_dyfunc_with_none(x):
def for_loop_dyfunc(max_len): def for_loop_dyfunc(max_len):
for i in range(max_len): for i in range(max_len):
ret = fluid.layers.zeros(shape=[1], dtype='float32') ret = paddle.zeros(shape=[1], dtype='float32')
paddle.increment(ret, value=2.0) paddle.increment(ret, value=2.0)
return ret return ret
...@@ -102,21 +102,21 @@ def for_loop_dyfunc2(max_len): ...@@ -102,21 +102,21 @@ def for_loop_dyfunc2(max_len):
def for_loop_dyfunc3(max_len): def for_loop_dyfunc3(max_len):
ret = fluid.layers.zeros(shape=[1], dtype='float32') ret = paddle.zeros(shape=[1], dtype='float32')
for i in range(1, 10, 2): for i in range(1, 10, 2):
paddle.increment(ret, value=2.0) paddle.increment(ret, value=2.0)
return ret return ret
def for_loop_dyfunc4(max_len): def for_loop_dyfunc4(max_len):
ret = fluid.layers.zeros(shape=[1], dtype='float32') ret = paddle.zeros(shape=[1], dtype='float32')
for i in range(10, 1, -2): for i in range(10, 1, -2):
paddle.increment(ret, value=2.0) paddle.increment(ret, value=2.0)
return ret return ret
def for_loop_dyfunc_not_support(max_len): def for_loop_dyfunc_not_support(max_len):
ret = fluid.layers.zeros(shape=[1], dtype='float32') ret = paddle.zeros(shape=[1], dtype='float32')
a = -2 a = -2
for i in range(10, 1, a): for i in range(10, 1, a):
paddle.increment(ret, value=2.0) paddle.increment(ret, value=2.0)
...@@ -163,7 +163,7 @@ def while_loop_class_var(x): ...@@ -163,7 +163,7 @@ def while_loop_class_var(x):
foo = Foo() foo = Foo()
i = fluid.dygraph.to_variable(x) i = fluid.dygraph.to_variable(x)
while i < 10: while i < 10:
foo.b = fluid.layers.zeros(shape=[1], dtype='float32') foo.b = paddle.zeros(shape=[1], dtype='float32')
foo.c = foo.b + foo.a foo.c = foo.b + foo.a
i += 1 i += 1
return foo.c return foo.c
...@@ -194,14 +194,14 @@ def for_loop_class_var(max_len): ...@@ -194,14 +194,14 @@ def for_loop_class_var(max_len):
) )
for i in range(max_len): for i in range(max_len):
foo.b = fluid.layers.zeros(shape=[1], dtype='float32') foo.b = paddle.zeros(shape=[1], dtype='float32')
foo.c = foo.b + foo.a foo.c = foo.b + foo.a
return foo.c return foo.c
def var_create_in_for_loop(max_len): def var_create_in_for_loop(max_len):
for i in range(max_len): for i in range(max_len):
ret = fluid.layers.zeros(shape=[3, 4, 5], dtype='float64') ret = paddle.zeros(shape=[3, 4, 5], dtype='float64')
return ret return ret
...@@ -213,7 +213,7 @@ def nested_for_loop_dyfunc(): ...@@ -213,7 +213,7 @@ def nested_for_loop_dyfunc():
a = 2 + j a = 2 + j
for i in range(three): for i in range(three):
b = fluid.layers.zeros(shape=[1], dtype='float32') b = paddle.zeros(shape=[1], dtype='float32')
return b return b
......
...@@ -24,7 +24,6 @@ from paddle.fluid import core, framework ...@@ -24,7 +24,6 @@ from paddle.fluid import core, framework
from paddle.incubate.autograd import primapi from paddle.incubate.autograd import primapi
from paddle.nn import BatchNorm from paddle.nn import BatchNorm
from paddle.tensor import ones # noqa: F401 from paddle.tensor import ones # noqa: F401
from paddle.tensor import zeros # noqa: F401
np.random.seed(2023) np.random.seed(2023)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册