未验证 提交 86e990d4 编写于 作者: Z zqw_1997 提交者: GitHub

Remove paddle.fluid.layers.utils.* (#51033)

* move fluid.utils to paddle.utils.layers_utils

* fix error

* delete original fluid layers utils

* remove import and old utils

* remove more old utils import

* change import path of fill_constant in the layers_utils.py

* fix mistake

* fix error

* expose in __init__.py

* for comment

* when change the ref of func is_sequence, it should change to the root of is_sequence instead

* for codecheck
上级 b7e4d974
...@@ -33,7 +33,7 @@ from paddle.fluid.framework import ( # noqa: F401 ...@@ -33,7 +33,7 @@ from paddle.fluid.framework import ( # noqa: F401
in_dygraph_mode, in_dygraph_mode,
) )
from paddle.fluid.layer_helper import LayerHelper # noqa: F401 from paddle.fluid.layer_helper import LayerHelper # noqa: F401
from paddle.fluid.layers import fill_constant, utils # noqa: F401 from paddle.fluid.layers import fill_constant # noqa: F401
from paddle.fluid.layers.layer_function_generator import ( # noqa: F401 from paddle.fluid.layers.layer_function_generator import ( # noqa: F401
templatedoc, templatedoc,
) )
......
...@@ -22,7 +22,6 @@ from paddle.fluid.core import ( ...@@ -22,7 +22,6 @@ from paddle.fluid.core import (
is_compiled_with_cuda, is_compiled_with_cuda,
is_compiled_with_rocm, is_compiled_with_rocm,
) )
from paddle.fluid.layers.utils import _hash_with_id
if is_compiled_with_cuda() and not is_compiled_with_rocm(): if is_compiled_with_cuda() and not is_compiled_with_rocm():
from paddle.fluid.core import CUDAGraph as CoreCUDAGraph from paddle.fluid.core import CUDAGraph as CoreCUDAGraph
...@@ -395,7 +394,7 @@ def replace_cuda_graph_section( ...@@ -395,7 +394,7 @@ def replace_cuda_graph_section(
stop_gradient=True, stop_gradient=True,
) )
program_id = _hash_with_id(section_program, ins_and_outs) program_id = paddle.utils._hash_with_id(section_program, ins_and_outs)
# insert the run_program_op into the block # insert the run_program_op into the block
origin_block._insert_op( origin_block._insert_op(
......
...@@ -27,7 +27,6 @@ import paddle.utils as utils ...@@ -27,7 +27,6 @@ import paddle.utils as utils
from paddle import static from paddle import static
from paddle.distributed import fleet from paddle.distributed import fleet
from paddle.fluid.executor import _to_name_str from paddle.fluid.executor import _to_name_str
from paddle.fluid.layers.utils import flatten
from paddle.framework import IrGraph from paddle.framework import IrGraph
from paddle.framework import _current_expected_place as _get_device from paddle.framework import _current_expected_place as _get_device
from paddle.framework import core, in_dygraph_mode from paddle.framework import core, in_dygraph_mode
...@@ -602,7 +601,7 @@ class Engine: ...@@ -602,7 +601,7 @@ class Engine:
feed_vars = {"inputs": self._inputs, "labels": self._labels} feed_vars = {"inputs": self._inputs, "labels": self._labels}
fetch_vars = { fetch_vars = {
"outputs": flatten(outputs), "outputs": paddle.utils.flatten(outputs),
"loss": self._losses, "loss": self._losses,
"metrics": metrics, "metrics": metrics,
} }
......
...@@ -15,7 +15,6 @@ ...@@ -15,7 +15,6 @@
from functools import reduce from functools import reduce
import paddle import paddle
import paddle.fluid.layers.utils as utils
from paddle.distributed.fleet.meta_optimizers.common import OpRole from paddle.distributed.fleet.meta_optimizers.common import OpRole
from paddle.framework import LayerHelper, OpProtoHolder, Program, core from paddle.framework import LayerHelper, OpProtoHolder, Program, core
from paddle.utils import unique_name from paddle.utils import unique_name
...@@ -589,7 +588,7 @@ class Inserter: ...@@ -589,7 +588,7 @@ class Inserter:
attrs['value'] = int("1") attrs['value'] = int("1")
attrs['dtype'] = out.dtype attrs['dtype'] = out.dtype
attrs['op_role'] = op_role attrs['op_role'] = op_role
utils.get_shape_tensor_inputs( paddle.utils.get_shape_tensor_inputs(
inputs=inputs, attrs=attrs, shape=[0], op_type='fill_constant' inputs=inputs, attrs=attrs, shape=[0], op_type='fill_constant'
) )
fillconstant_op = block._insert_op( fillconstant_op = block._insert_op(
......
...@@ -22,7 +22,6 @@ import inspect ...@@ -22,7 +22,6 @@ import inspect
import numpy as np import numpy as np
import paddle import paddle
from paddle.fluid.layer_helper import LayerHelper from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.layers import utils
from ... import unique_name from ... import unique_name
from paddle.fluid.data_feeder import ( from paddle.fluid.data_feeder import (
check_variable_and_dtype, check_variable_and_dtype,
......
...@@ -25,12 +25,11 @@ from ..framework import ( ...@@ -25,12 +25,11 @@ from ..framework import (
in_dygraph_mode, in_dygraph_mode,
) )
from ..layer_helper import LayerHelper, unique_name from ..layer_helper import LayerHelper, unique_name
from .utils import ( from ...utils import (
assert_same_structure, assert_same_structure,
map_structure, map_structure,
hold_mutable_vars, hold_mutable_vars,
copy_mutable_vars, copy_mutable_vars,
padding_to_same_structure,
is_sequence, is_sequence,
pack_sequence_as, pack_sequence_as,
flatten, flatten,
......
...@@ -42,7 +42,6 @@ from .layer_function_generator import ( ...@@ -42,7 +42,6 @@ from .layer_function_generator import (
_generate_doc_string_, _generate_doc_string_,
) )
from .tensor import fill_constant, zeros from .tensor import fill_constant, zeros
from . import utils
from .. import unique_name from .. import unique_name
from .. import core from .. import core
from ...utils import deprecated from ...utils import deprecated
...@@ -735,8 +734,10 @@ def unsqueeze(input, axes, name=None): ...@@ -735,8 +734,10 @@ def unsqueeze(input, axes, name=None):
axes.stop_gradient = True axes.stop_gradient = True
inputs["AxesTensor"] = axes inputs["AxesTensor"] = axes
elif isinstance(axes, (list, tuple)): elif isinstance(axes, (list, tuple)):
if utils._contain_var(axes): if paddle.utils._contain_var(axes):
inputs["AxesTensorList"] = utils._convert_to_tensor_list(axes) inputs["AxesTensorList"] = paddle.utils._convert_to_tensor_list(
axes
)
else: else:
attrs["axes"] = axes attrs["axes"] = axes
......
...@@ -27,7 +27,6 @@ from ..framework import Variable ...@@ -27,7 +27,6 @@ from ..framework import Variable
from ..core import VarDesc from ..core import VarDesc
from .. import core from .. import core
from .layer_function_generator import templatedoc from .layer_function_generator import templatedoc
from . import utils
from ..data_feeder import ( from ..data_feeder import (
check_variable_and_dtype, check_variable_and_dtype,
check_type, check_type,
...@@ -36,7 +35,6 @@ from ..data_feeder import ( ...@@ -36,7 +35,6 @@ from ..data_feeder import (
) )
from paddle.utils import deprecated from paddle.utils import deprecated
from .utils import check_shape
from paddle import _C_ops, _legacy_C_ops from paddle import _C_ops, _legacy_C_ops
__all__ = [ __all__ = [
...@@ -99,7 +97,7 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None, name=None): ...@@ -99,7 +97,7 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None, name=None):
if force_cpu: if force_cpu:
place = core.CPUPlace() place = core.CPUPlace()
if isinstance(shape, (list, tuple)): if isinstance(shape, (list, tuple)):
shape = utils.convert_shape_to_list(shape) shape = paddle.utils.convert_shape_to_list(shape)
if not isinstance(dtype, core.VarDesc.VarType): if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype) dtype = convert_np_dtype_to_dtype_(dtype)
...@@ -132,7 +130,7 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None, name=None): ...@@ -132,7 +130,7 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None, name=None):
value = paddle.cast(value, dtype) value = paddle.cast(value, dtype)
inputs['ValueTensor'] = value inputs['ValueTensor'] = value
check_shape(shape) paddle.utils.check_shape(shape)
check_dtype( check_dtype(
dtype, dtype,
'dtype', 'dtype',
...@@ -159,7 +157,7 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None, name=None): ...@@ -159,7 +157,7 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None, name=None):
) )
helper = LayerHelper("fill_constant", **locals()) helper = LayerHelper("fill_constant", **locals())
utils.get_shape_tensor_inputs( paddle.utils.get_shape_tensor_inputs(
inputs=inputs, attrs=attrs, shape=shape, op_type='fill_constant' inputs=inputs, attrs=attrs, shape=shape, op_type='fill_constant'
) )
......
...@@ -16,7 +16,6 @@ import unittest ...@@ -16,7 +16,6 @@ import unittest
import paddle import paddle
from paddle.fluid.layer_helper import LayerHelper from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.layers.utils import flatten
from paddle.incubate.autograd.primrules import _jvp, _transpose from paddle.incubate.autograd.primrules import _jvp, _transpose
paddle.enable_static() paddle.enable_static()
...@@ -79,14 +78,14 @@ class TestAddPJVPAndTranspose(unittest.TestCase): ...@@ -79,14 +78,14 @@ class TestAddPJVPAndTranspose(unittest.TestCase):
) )
jvp_out = _jvp(op, *self.jvp_args) jvp_out = _jvp(op, *self.jvp_args)
jvp_out = flatten(jvp_out) jvp_out = paddle.utils.flatten(jvp_out)
for k, v in self.jvp_out_shape_map.items(): for k, v in self.jvp_out_shape_map.items():
self.assertEqual(jvp_out[k].shape, v.shape) self.assertEqual(jvp_out[k].shape, v.shape)
# Some prim ops dont have transpose rule # Some prim ops dont have transpose rule
if hasattr(self, 'transpose_args'): if hasattr(self, 'transpose_args'):
transpose_out = _transpose(op, *self.transpose_args) transpose_out = _transpose(op, *self.transpose_args)
transpose_out = flatten(transpose_out) transpose_out = paddle.utils.flatten(transpose_out)
for k, v in self.transpose_out_shape_map.items(): for k, v in self.transpose_out_shape_map.items():
self.assertEqual(transpose_out[k].shape, v.shape) self.assertEqual(transpose_out[k].shape, v.shape)
......
...@@ -16,7 +16,6 @@ import unittest ...@@ -16,7 +16,6 @@ import unittest
import paddle import paddle
from paddle.fluid.layer_helper import LayerHelper from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.layers.utils import flatten
from paddle.incubate.autograd.primrules import _orig2prim from paddle.incubate.autograd.primrules import _orig2prim
paddle.enable_static() paddle.enable_static()
...@@ -67,7 +66,7 @@ class TestElementWiseAddOrig2Prim(unittest.TestCase): ...@@ -67,7 +66,7 @@ class TestElementWiseAddOrig2Prim(unittest.TestCase):
all_ops = [op.type for op in self.main_program.block(0).ops] all_ops = [op.type for op in self.main_program.block(0).ops]
self.assertEqual(sorted(all_ops), sorted(self.all_ops)) self.assertEqual(sorted(all_ops), sorted(self.all_ops))
prim_out = flatten(prim_out) prim_out = paddle.utils.flatten(prim_out)
for k, v in self.out_map.items(): for k, v in self.out_map.items():
self.assertEqual(prim_out[k].shape, v.shape) self.assertEqual(prim_out[k].shape, v.shape)
......
...@@ -16,7 +16,6 @@ import unittest ...@@ -16,7 +16,6 @@ import unittest
import paddle import paddle
from paddle.fluid.layer_helper import LayerHelper from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.layers.utils import flatten
from paddle.incubate.autograd.primrules import _prim2orig from paddle.incubate.autograd.primrules import _prim2orig
paddle.enable_static() paddle.enable_static()
...@@ -66,7 +65,7 @@ class TestAddPPrim2Orig(unittest.TestCase): ...@@ -66,7 +65,7 @@ class TestAddPPrim2Orig(unittest.TestCase):
orig_out = _prim2orig(op, *self.prim2orig_args) orig_out = _prim2orig(op, *self.prim2orig_args)
all_ops = [op.type for op in self.main_program.block(0).ops] all_ops = [op.type for op in self.main_program.block(0).ops]
self.assertEqual(sorted(all_ops), sorted(self.all_ops)) self.assertEqual(sorted(all_ops), sorted(self.all_ops))
orig_out = flatten(orig_out) orig_out = paddle.utils.flatten(orig_out)
for k, v in self.out_map.items(): for k, v in self.out_map.items():
self.assertEqual(k.shape, orig_out[v].shape) self.assertEqual(k.shape, orig_out[v].shape)
......
...@@ -15,7 +15,6 @@ ...@@ -15,7 +15,6 @@
import unittest import unittest
import paddle import paddle
from paddle.fluid.layers.utils import flatten
from paddle.incubate.autograd.primx import Transform, orig2prim, prim2orig from paddle.incubate.autograd.primx import Transform, orig2prim, prim2orig
paddle.enable_static() paddle.enable_static()
...@@ -157,10 +156,10 @@ class TestAutoGradTransformForAdd(unittest.TestCase): ...@@ -157,10 +156,10 @@ class TestAutoGradTransformForAdd(unittest.TestCase):
xs_dot, ys_dot = ad.linearize(self.orig_xs, self.orig_ys) xs_dot, ys_dot = ad.linearize(self.orig_xs, self.orig_ys)
linearize_ops = [op.type for op in self.main_program.block(0).ops] linearize_ops = [op.type for op in self.main_program.block(0).ops]
self.assertEqual(sorted(linearize_ops), sorted(self.linearize_ops)) self.assertEqual(sorted(linearize_ops), sorted(self.linearize_ops))
flatten_xs_dot = flatten(xs_dot) flatten_xs_dot = paddle.utils.flatten(xs_dot)
for k, v in self.xs_shape_map.items(): for k, v in self.xs_shape_map.items():
self.assertEqual(flatten_xs_dot[k].shape, v) self.assertEqual(flatten_xs_dot[k].shape, v)
flatten_ys_dot = flatten(ys_dot) flatten_ys_dot = paddle.utils.flatten(ys_dot)
for k, v in self.ys_shape_map.items(): for k, v in self.ys_shape_map.items():
self.assertEqual(flatten_ys_dot[k].shape, v) self.assertEqual(flatten_ys_dot[k].shape, v)
...@@ -168,12 +167,12 @@ class TestAutoGradTransformForAdd(unittest.TestCase): ...@@ -168,12 +167,12 @@ class TestAutoGradTransformForAdd(unittest.TestCase):
ys_bar, xs_bar = ad.transpose(ys_dot, xs_dot, retain_fwd=False) ys_bar, xs_bar = ad.transpose(ys_dot, xs_dot, retain_fwd=False)
transpose_ops = [op.type for op in self.main_program.block(0).ops] transpose_ops = [op.type for op in self.main_program.block(0).ops]
self.assertEqual(sorted(transpose_ops), sorted(self.transpose_ops)) self.assertEqual(sorted(transpose_ops), sorted(self.transpose_ops))
flatten_xs_bar = flatten(xs_bar) flatten_xs_bar = paddle.utils.flatten(xs_bar)
for k, v in self.xs_shape_map.items(): for k, v in self.xs_shape_map.items():
# There may be None in the result of transpose like gather op # There may be None in the result of transpose like gather op
if flatten_xs_bar[k] is not None: if flatten_xs_bar[k] is not None:
self.assertEqual(flatten_xs_bar[k].shape, v) self.assertEqual(flatten_xs_bar[k].shape, v)
flatten_ys_bar = flatten(ys_bar) flatten_ys_bar = paddle.utils.flatten(ys_bar)
for k, v in self.ys_shape_map.items(): for k, v in self.ys_shape_map.items():
self.assertEqual(flatten_ys_bar[k].shape, v) self.assertEqual(flatten_ys_bar[k].shape, v)
......
...@@ -19,7 +19,6 @@ import numpy as np ...@@ -19,7 +19,6 @@ import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.layers.utils import map_structure
SEED = 2020 SEED = 2020
np.random.seed(SEED) np.random.seed(SEED)
...@@ -231,7 +230,7 @@ class TestListWithoutControlFlow(unittest.TestCase): ...@@ -231,7 +230,7 @@ class TestListWithoutControlFlow(unittest.TestCase):
def varbase_to_numpy(self, res): def varbase_to_numpy(self, res):
if isinstance(res, (list, tuple)): if isinstance(res, (list, tuple)):
res = map_structure(lambda x: x.numpy(), res) res = paddle.utils.map_structure(lambda x: x.numpy(), res)
else: else:
res = [res.numpy()] res = [res.numpy()]
return res return res
......
...@@ -19,7 +19,6 @@ from test_fetch_feed import Linear ...@@ -19,7 +19,6 @@ from test_fetch_feed import Linear
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.layers.utils import flatten
from paddle.jit.api import to_static from paddle.jit.api import to_static
SEED = 2020 SEED = 2020
...@@ -109,10 +108,10 @@ class TestWithNestedOutput(unittest.TestCase): ...@@ -109,10 +108,10 @@ class TestWithNestedOutput(unittest.TestCase):
def test_nest(self): def test_nest(self):
dygraph_res = self._run(to_static=False) dygraph_res = self._run(to_static=False)
dygraph_res = flatten(dygraph_res) dygraph_res = paddle.utils.flatten(dygraph_res)
static_res = self._run(to_static=True) static_res = self._run(to_static=True)
static_res = flatten(static_res) static_res = paddle.utils.flatten(static_res)
self.assertTrue(len(dygraph_res) == len(static_res)) self.assertTrue(len(dygraph_res) == len(static_res))
......
...@@ -19,7 +19,6 @@ import paddle.fluid as fluid ...@@ -19,7 +19,6 @@ import paddle.fluid as fluid
import paddle.fluid.layers as layers import paddle.fluid.layers as layers
import paddle.nn.functional as F import paddle.nn.functional as F
from paddle.fluid.dygraph import Layer, to_variable from paddle.fluid.dygraph import Layer, to_variable
from paddle.fluid.layers.utils import map_structure
from paddle.jit.api import dygraph_to_static_func from paddle.jit.api import dygraph_to_static_func
from paddle.nn import Linear from paddle.nn import Linear
...@@ -857,13 +856,13 @@ class Transformer(Layer): ...@@ -857,13 +856,13 @@ class Transformer(Layer):
trg_pos = layers.fill_constant( trg_pos = layers.fill_constant(
shape=trg_word.shape, dtype="int64", value=i shape=trg_word.shape, dtype="int64", value=i
) )
caches = map_structure( caches = paddle.utils.map_structure(
merge_batch_beams, caches merge_batch_beams, caches
) # TODO: modified for dygraph2static ) # TODO: modified for dygraph2static
logits = self.decoder( logits = self.decoder(
trg_word, trg_pos, None, trg_src_attn_bias, enc_output, caches trg_word, trg_pos, None, trg_src_attn_bias, enc_output, caches
) )
caches = map_structure(split_batch_beams, caches) caches = paddle.utils.map_structure(split_batch_beams, caches)
step_log_probs = split_batch_beams( step_log_probs = split_batch_beams(
paddle.log(paddle.nn.functional.softmax(logits)) paddle.log(paddle.nn.functional.softmax(logits))
) )
...@@ -883,7 +882,7 @@ class Transformer(Layer): ...@@ -883,7 +882,7 @@ class Transformer(Layer):
token_indices = paddle.remainder(topk_indices, vocab_size_tensor) token_indices = paddle.remainder(topk_indices, vocab_size_tensor)
# update states # update states
caches = map_structure( caches = paddle.utils.map_structure(
lambda x: gather(x, beam_indices, batch_pos), caches lambda x: gather(x, beam_indices, batch_pos), caches
) )
log_probs = gather(log_probs, topk_indices, batch_pos) log_probs = gather(log_probs, topk_indices, batch_pos)
......
...@@ -2310,7 +2310,9 @@ class OpTest(unittest.TestCase): ...@@ -2310,7 +2310,9 @@ class OpTest(unittest.TestCase):
if in_dygraph_mode(): if in_dygraph_mode():
core.eager.run_backward( core.eager.run_backward(
fluid.layers.utils.flatten(outputs), grad_outputs, False paddle.utils.flatten(outputs),
grad_outputs,
False,
) )
grad_inputs = [] grad_inputs = []
for inputs_list in inputs.values(): for inputs_list in inputs.values():
...@@ -2319,8 +2321,8 @@ class OpTest(unittest.TestCase): ...@@ -2319,8 +2321,8 @@ class OpTest(unittest.TestCase):
return grad_inputs return grad_inputs
else: else:
grad_inputs = paddle.grad( grad_inputs = paddle.grad(
outputs=fluid.layers.utils.flatten(outputs), outputs=paddle.utils.flatten(outputs),
inputs=fluid.layers.utils.flatten(inputs), inputs=paddle.utils.flatten(inputs),
grad_outputs=grad_outputs, grad_outputs=grad_outputs,
) )
return [grad.numpy() for grad in grad_inputs] return [grad.numpy() for grad in grad_inputs]
......
...@@ -24,7 +24,6 @@ import paddle ...@@ -24,7 +24,6 @@ import paddle
from paddle import _C_ops, _legacy_C_ops from paddle import _C_ops, _legacy_C_ops
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid import core, framework, executor from paddle.fluid import core, framework, executor
from paddle.fluid.layers.utils import _hash_with_id
from paddle.fluid.framework import global_var from paddle.fluid.framework import global_var
paddle.enable_static() paddle.enable_static()
...@@ -103,7 +102,7 @@ class RunProgramNPUOpTest(unittest.TestCase): ...@@ -103,7 +102,7 @@ class RunProgramNPUOpTest(unittest.TestCase):
'end_op_index', 'end_op_index',
self.fwd_op_num, self.fwd_op_num,
'program_id', 'program_id',
_hash_with_id(self.program_desc, self), paddle.utils._hash_with_id(self.program_desc, self),
) )
def get_param_grad_names(self): def get_param_grad_names(self):
......
...@@ -2618,7 +2618,9 @@ class OpTest(unittest.TestCase): ...@@ -2618,7 +2618,9 @@ class OpTest(unittest.TestCase):
if in_dygraph_mode(): if in_dygraph_mode():
core.eager.run_backward( core.eager.run_backward(
fluid.layers.utils.flatten(outputs), grad_outputs, False paddle.utils.flatten(outputs),
grad_outputs,
False,
) )
grad_inputs = [] grad_inputs = []
for inputs_list in inputs.values(): for inputs_list in inputs.values():
...@@ -2627,8 +2629,8 @@ class OpTest(unittest.TestCase): ...@@ -2627,8 +2629,8 @@ class OpTest(unittest.TestCase):
return grad_inputs return grad_inputs
else: else:
grad_inputs = paddle.grad( grad_inputs = paddle.grad(
outputs=fluid.layers.utils.flatten(outputs), outputs=paddle.utils.flatten(outputs),
inputs=fluid.layers.utils.flatten(inputs), inputs=paddle.utils.flatten(inputs),
grad_outputs=grad_outputs, grad_outputs=grad_outputs,
) )
return [grad.numpy() for grad in grad_inputs] return [grad.numpy() for grad in grad_inputs]
......
...@@ -22,7 +22,6 @@ import numpy as np ...@@ -22,7 +22,6 @@ import numpy as np
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.framework import _dygraph_tracer, in_dygraph_mode from paddle.fluid.framework import _dygraph_tracer, in_dygraph_mode
from paddle.fluid.layers.utils import map_structure
from paddle.jit.dy2static.utils import parse_arg_and_kwargs from paddle.jit.dy2static.utils import parse_arg_and_kwargs
...@@ -430,9 +429,11 @@ class PrimForwardChecker: ...@@ -430,9 +429,11 @@ class PrimForwardChecker:
args, len(inputs_sig) args, len(inputs_sig)
) )
ret = flatten(_as_list(self.python_api(*args))) ret = flatten(_as_list(self.python_api(*args)))
ret = map_structure(lambda x: x.numpy(), ret) ret = paddle.utils.map_structure(lambda x: x.numpy(), ret)
if OpTestUtils.is_bfloat16_type(self.dtype): if OpTestUtils.is_bfloat16_type(self.dtype):
ret = map_structure(lambda x: convert_uint16_to_float(x), ret) ret = paddle.utils.map_structure(
lambda x: convert_uint16_to_float(x), ret
)
return ret return ret
def get_eager_input_attr_and_inputdict(self, stop_gradient): def get_eager_input_attr_and_inputdict(self, stop_gradient):
...@@ -577,7 +578,9 @@ class PrimForwardChecker: ...@@ -577,7 +578,9 @@ class PrimForwardChecker:
exe.run(startup_program) exe.run(startup_program)
ret = exe.run(main_program, feed=feed, fetch_list=ret) ret = exe.run(main_program, feed=feed, fetch_list=ret)
if OpTestUtils.is_bfloat16_type(self.dtype): if OpTestUtils.is_bfloat16_type(self.dtype):
ret = map_structure(lambda x: convert_uint16_to_float(x), ret) ret = paddle.utils.map_structure(
lambda x: convert_uint16_to_float(x), ret
)
# check static forward # check static forward
if len(ret) != len(self.eager_desire): if len(ret) != len(self.eager_desire):
msg = ( msg = (
...@@ -640,9 +643,11 @@ class PrimForwardChecker: ...@@ -640,9 +643,11 @@ class PrimForwardChecker:
net = PrimNet(self.python_api) net = PrimNet(self.python_api)
net = apply_to_static(net, False) net = apply_to_static(net, False)
ret = flatten(_as_list(net(args))) ret = flatten(_as_list(net(args)))
ret = map_structure(lambda x: x.numpy(), ret) ret = paddle.utils.map_structure(lambda x: x.numpy(), ret)
if OpTestUtils.is_bfloat16_type(self.dtype): if OpTestUtils.is_bfloat16_type(self.dtype):
ret = map_structure(lambda x: convert_uint16_to_float(x), ret) ret = paddle.utils.map_structure(
lambda x: convert_uint16_to_float(x), ret
)
# check jit comp forward # check jit comp forward
if len(ret) != len(self.eager_desire): if len(ret) != len(self.eager_desire):
msg = ( msg = (
...@@ -719,9 +724,11 @@ class PrimForwardChecker: ...@@ -719,9 +724,11 @@ class PrimForwardChecker:
net, core.is_compiled_with_cinn() and self.enable_cinn net, core.is_compiled_with_cinn() and self.enable_cinn
) )
ret = flatten(_as_list(net(args))) ret = flatten(_as_list(net(args)))
ret = map_structure(lambda x: x.numpy(), ret) ret = paddle.utils.map_structure(lambda x: x.numpy(), ret)
if OpTestUtils.is_bfloat16_type(self.dtype): if OpTestUtils.is_bfloat16_type(self.dtype):
ret = map_structure(lambda x: convert_uint16_to_float(x), ret) ret = paddle.utils.map_structure(
lambda x: convert_uint16_to_float(x), ret
)
# check jit comp forward # check jit comp forward
if len(ret) != len(self.eager_desire): if len(ret) != len(self.eager_desire):
msg = ( msg = (
...@@ -895,9 +902,11 @@ class PrimGradChecker(PrimForwardChecker): ...@@ -895,9 +902,11 @@ class PrimGradChecker(PrimForwardChecker):
ret = paddle.grad( ret = paddle.grad(
ys, xs, vs, allow_unused=True, no_grad_vars=no_grad_vars ys, xs, vs, allow_unused=True, no_grad_vars=no_grad_vars
) )
ret = map_structure(lambda x: x.numpy(), ret) ret = paddle.utils.map_structure(lambda x: x.numpy(), ret)
if OpTestUtils.is_bfloat16_type(self.dtype): if OpTestUtils.is_bfloat16_type(self.dtype):
ret = map_structure(lambda x: convert_uint16_to_float(x), ret) ret = paddle.utils.map_structure(
lambda x: convert_uint16_to_float(x), ret
)
return ret return ret
def check_eager_comp(self): def check_eager_comp(self):
...@@ -1005,7 +1014,7 @@ class PrimGradChecker(PrimForwardChecker): ...@@ -1005,7 +1014,7 @@ class PrimGradChecker(PrimForwardChecker):
exe.run(startup_program) exe.run(startup_program)
actual_ret = exe.run(main_program, feed=feed, fetch_list=ret) actual_ret = exe.run(main_program, feed=feed, fetch_list=ret)
if OpTestUtils.is_bfloat16_type(self.dtype): if OpTestUtils.is_bfloat16_type(self.dtype):
actual_ret = map_structure( actual_ret = paddle.utils.map_structure(
lambda x: convert_uint16_to_float(x), actual_ret lambda x: convert_uint16_to_float(x), actual_ret
) )
# check static grad out # check static grad out
...@@ -1104,9 +1113,11 @@ class PrimGradChecker(PrimForwardChecker): ...@@ -1104,9 +1113,11 @@ class PrimGradChecker(PrimForwardChecker):
ret = paddle.grad( ret = paddle.grad(
ys, xs, vs, allow_unused=True, no_grad_vars=no_grad_vars ys, xs, vs, allow_unused=True, no_grad_vars=no_grad_vars
) )
ret = map_structure(lambda x: x.numpy(), ret) ret = paddle.utils.map_structure(lambda x: x.numpy(), ret)
if OpTestUtils.is_bfloat16_type(self.dtype): if OpTestUtils.is_bfloat16_type(self.dtype):
ret = map_structure(lambda x: convert_uint16_to_float(x), ret) ret = paddle.utils.map_structure(
lambda x: convert_uint16_to_float(x), ret
)
# check jit comp grad out # check jit comp grad out
if len(ret) != len(self.eager_desire): if len(ret) != len(self.eager_desire):
msg = ( msg = (
...@@ -1216,9 +1227,11 @@ class PrimGradChecker(PrimForwardChecker): ...@@ -1216,9 +1227,11 @@ class PrimGradChecker(PrimForwardChecker):
ret = paddle.grad( ret = paddle.grad(
ys, xs, vs, allow_unused=True, no_grad_vars=no_grad_vars ys, xs, vs, allow_unused=True, no_grad_vars=no_grad_vars
) )
ret = map_structure(lambda x: x.numpy(), ret) ret = paddle.utils.map_structure(lambda x: x.numpy(), ret)
if OpTestUtils.is_bfloat16_type(self.dtype): if OpTestUtils.is_bfloat16_type(self.dtype):
ret = map_structure(lambda x: convert_uint16_to_float(x), ret) ret = paddle.utils.map_structure(
lambda x: convert_uint16_to_float(x), ret
)
# check jit comp grad out # check jit comp grad out
if len(ret) != len(self.eager_desire): if len(ret) != len(self.eager_desire):
msg = ( msg = (
......
...@@ -53,7 +53,7 @@ class Conv2DTestCase(unittest.TestCase): ...@@ -53,7 +53,7 @@ class Conv2DTestCase(unittest.TestCase):
self.padding = padding self.padding = padding
if padding_mode in {'reflect', 'replicate', 'circular'}: if padding_mode in {'reflect', 'replicate', 'circular'}:
_paired_padding = fluid.layers.utils.convert_to_list( _paired_padding = paddle.utils.convert_to_list(
padding, 2, 'padding' padding, 2, 'padding'
) )
self._reversed_padding_repeated_twice = _reverse_repeat_list( self._reversed_padding_repeated_twice = _reverse_repeat_list(
......
...@@ -25,7 +25,6 @@ from paddle.fluid.executor import ( ...@@ -25,7 +25,6 @@ from paddle.fluid.executor import (
_is_enable_standalone_executor, _is_enable_standalone_executor,
) )
from paddle.fluid.framework import Variable from paddle.fluid.framework import Variable
from paddle.fluid.layers.utils import _hash_with_id
def _append_backward_desc(main_program, outs): def _append_backward_desc(main_program, outs):
...@@ -134,7 +133,7 @@ class TestRunProgram(unittest.TestCase): ...@@ -134,7 +133,7 @@ class TestRunProgram(unittest.TestCase):
'is_test', 'is_test',
False, False,
'program_id', 'program_id',
_hash_with_id(program), paddle.utils._hash_with_id(program),
'param_grad_names', 'param_grad_names',
['Fake_var@GRAD'], ['Fake_var@GRAD'],
'out_grad_names', 'out_grad_names',
......
...@@ -17,7 +17,6 @@ import unittest ...@@ -17,7 +17,6 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
from paddle.fluid.layers import utils
paddle.enable_static() paddle.enable_static()
...@@ -49,9 +48,10 @@ class TestGetInputsOutputsInBlock(unittest.TestCase): ...@@ -49,9 +48,10 @@ class TestGetInputsOutputsInBlock(unittest.TestCase):
i = paddle.static.nn.while_loop(while_cond, while_body, [i]) i = paddle.static.nn.while_loop(while_cond, while_body, [i])
sub_block = main_program.block(1) sub_block = main_program.block(1)
inner_inputs, inner_outputs = utils.get_inputs_outputs_in_block( (
sub_block inner_inputs,
) inner_outputs,
) = paddle.utils.get_inputs_outputs_in_block(sub_block)
# 'assign_0.tmp_0', 'assign_1.tmp_0' are name of i and ten in program # 'assign_0.tmp_0', 'assign_1.tmp_0' are name of i and ten in program
self.assertTrue(inner_inputs == {'assign_0.tmp_0', 'assign_1.tmp_0'}) self.assertTrue(inner_inputs == {'assign_0.tmp_0', 'assign_1.tmp_0'})
# 'tmp_0', 'assign_0.tmp_0' are name of i < ten and i in program # 'tmp_0', 'assign_0.tmp_0' are name of i < ten and i in program
...@@ -67,9 +67,10 @@ class TestGetInputsOutputsInBlock(unittest.TestCase): ...@@ -67,9 +67,10 @@ class TestGetInputsOutputsInBlock(unittest.TestCase):
out = paddle.static.nn.cond(a < b, lambda: a + c, lambda: b * b) out = paddle.static.nn.cond(a < b, lambda: a + c, lambda: b * b)
sub_block = main_program.block(1) sub_block = main_program.block(1)
inner_inputs, inner_outputs = utils.get_inputs_outputs_in_block( (
sub_block inner_inputs,
) inner_outputs,
) = paddle.utils.get_inputs_outputs_in_block(sub_block)
# 'fill_constant_1.tmp_0', 'tmp_3' are names of a, c # 'fill_constant_1.tmp_0', 'tmp_3' are names of a, c
self.assertTrue(inner_inputs == {'fill_constant_1.tmp_0', 'tmp_3'}) self.assertTrue(inner_inputs == {'fill_constant_1.tmp_0', 'tmp_3'})
# '_generated_var_1', is name of a + c # '_generated_var_1', is name of a + c
......
...@@ -24,7 +24,6 @@ import numpy as np ...@@ -24,7 +24,6 @@ import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid import unique_name from paddle.fluid import unique_name
from paddle.fluid.layers.utils import flatten
from paddle.jit.api import to_static from paddle.jit.api import to_static
from paddle.jit.translated_layer import INFER_PARAMS_INFO_SUFFIX from paddle.jit.translated_layer import INFER_PARAMS_INFO_SUFFIX
from paddle.nn import Linear from paddle.nn import Linear
...@@ -455,14 +454,14 @@ class TestSaveLoadWithNestOut(unittest.TestCase): ...@@ -455,14 +454,14 @@ class TestSaveLoadWithNestOut(unittest.TestCase):
) )
net = LinearNetWithNestOut(8, 8) net = LinearNetWithNestOut(8, 8)
dy_outs = flatten(net(x)) dy_outs = paddle.utils.flatten(net(x))
net = to_static(net, input_spec=[InputSpec([None, 8], name='x')]) net = to_static(net, input_spec=[InputSpec([None, 8], name='x')])
model_path = os.path.join(self.temp_dir.name, "net_with_nest_out/model") model_path = os.path.join(self.temp_dir.name, "net_with_nest_out/model")
paddle.jit.save(net, model_path) paddle.jit.save(net, model_path)
load_net = paddle.jit.load(model_path) load_net = paddle.jit.load(model_path)
load_outs = flatten(load_net(x)) load_outs = paddle.utils.flatten(load_net(x))
self.assertTrue(len(dy_outs) == 4) self.assertTrue(len(dy_outs) == 4)
for dy_out, load_out in zip(dy_outs, load_outs): for dy_out, load_out in zip(dy_outs, load_outs):
......
...@@ -22,7 +22,6 @@ from rnn.rnn_numpy import rnn as numpy_rnn ...@@ -22,7 +22,6 @@ from rnn.rnn_numpy import rnn as numpy_rnn
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
import paddle.fluid.layers.utils as utils
from paddle.fluid import framework from paddle.fluid import framework
from paddle.fluid.executor import Executor from paddle.fluid.executor import Executor
from paddle.fluid.framework import Program, program_guard from paddle.fluid.framework import Program, program_guard
...@@ -226,11 +225,11 @@ class TestRnnUtil(unittest.TestCase): ...@@ -226,11 +225,11 @@ class TestRnnUtil(unittest.TestCase):
def test_case(self): def test_case(self):
inputs = {"key1": 1, "key2": 2} inputs = {"key1": 1, "key2": 2}
func = lambda x: x + 1 func = lambda x: x + 1
outputs = utils.map_structure(func, inputs) outputs = paddle.utils.map_structure(func, inputs)
utils.assert_same_structure(inputs, outputs) paddle.utils.assert_same_structure(inputs, outputs)
try: try:
inputs["key3"] = 3 inputs["key3"] = 3
utils.assert_same_structure(inputs, outputs) paddle.utils.assert_same_structure(inputs, outputs)
except ValueError as identifier: except ValueError as identifier:
pass pass
......
...@@ -24,7 +24,6 @@ import paddle.fluid.layers as layers ...@@ -24,7 +24,6 @@ import paddle.fluid.layers as layers
import paddle.nn as nn import paddle.nn as nn
from paddle import Model, set_device from paddle import Model, set_device
from paddle.fluid.data_feeder import convert_dtype from paddle.fluid.data_feeder import convert_dtype
from paddle.fluid.layers.utils import map_structure
from paddle.nn import ( from paddle.nn import (
RNN, RNN,
BeamSearchDecoder, BeamSearchDecoder,
...@@ -510,7 +509,7 @@ class TrainingHelper: ...@@ -510,7 +509,7 @@ class TrainingHelper:
self.inputs = inputs self.inputs = inputs
self.sequence_length = sequence_length self.sequence_length = sequence_length
self.time_major = time_major self.time_major = time_major
self.inputs_ = map_structure( self.inputs_ = paddle.utils.map_structure(
lambda x: paddle.nn.functional.pad( lambda x: paddle.nn.functional.pad(
x, x,
pad=([0, 1] + [0, 0] * (len(x.shape) - 1)) pad=([0, 1] + [0, 0] * (len(x.shape) - 1))
...@@ -527,7 +526,7 @@ class TrainingHelper: ...@@ -527,7 +526,7 @@ class TrainingHelper:
shape=[1], dtype=self.sequence_length.dtype, fill_value=0 shape=[1], dtype=self.sequence_length.dtype, fill_value=0
), ),
) )
init_inputs = map_structure( init_inputs = paddle.utils.map_structure(
lambda x: x[0] if self.time_major else x[:, 0], self.inputs lambda x: x[0] if self.time_major else x[:, 0], self.inputs
) )
return init_inputs, init_finished return init_inputs, init_finished
...@@ -556,7 +555,7 @@ class TrainingHelper: ...@@ -556,7 +555,7 @@ class TrainingHelper:
axis=axes, axis=axes,
) )
next_inputs = map_structure(_slice, self.inputs_) next_inputs = paddle.utils.map_structure(_slice, self.inputs_)
return finished, next_inputs, states return finished, next_inputs, states
......
...@@ -27,7 +27,6 @@ from paddle.fluid.executor import ( ...@@ -27,7 +27,6 @@ from paddle.fluid.executor import (
_is_enable_standalone_executor, _is_enable_standalone_executor,
) )
from paddle.fluid.framework import global_var from paddle.fluid.framework import global_var
from paddle.fluid.layers.utils import _hash_with_id
paddle.enable_static() paddle.enable_static()
...@@ -145,7 +144,7 @@ class RunProgramOpTest(unittest.TestCase): ...@@ -145,7 +144,7 @@ class RunProgramOpTest(unittest.TestCase):
'end_op_index', 'end_op_index',
self.fwd_op_num, self.fwd_op_num,
'program_id', 'program_id',
_hash_with_id(self.program_desc, self), paddle.utils._hash_with_id(self.program_desc, self),
] ]
def get_param_grad_names(self): def get_param_grad_names(self):
......
...@@ -15,7 +15,6 @@ ...@@ -15,7 +15,6 @@
import unittest import unittest
import paddle import paddle
from paddle.fluid.layers.utils import try_set_static_shape_tensor
class StaticShapeInferrenceTest(unittest.TestCase): class StaticShapeInferrenceTest(unittest.TestCase):
...@@ -24,7 +23,7 @@ class StaticShapeInferrenceTest(unittest.TestCase): ...@@ -24,7 +23,7 @@ class StaticShapeInferrenceTest(unittest.TestCase):
data = paddle.static.data(name="x", shape=[-1, 2], dtype='float32') data = paddle.static.data(name="x", shape=[-1, 2], dtype='float32')
shape = paddle.shape(data) # shape should be [-1, 2] shape = paddle.shape(data) # shape should be [-1, 2]
x = paddle.uniform(shape) x = paddle.uniform(shape)
try_set_static_shape_tensor(x, shape) paddle.utils.try_set_static_shape_tensor(x, shape)
self.assertEqual(x.shape, data.shape) self.assertEqual(x.shape, data.shape)
paddle.disable_static() paddle.disable_static()
......
...@@ -297,10 +297,9 @@ def is_bool_tensor(ele): ...@@ -297,10 +297,9 @@ def is_bool_tensor(ele):
def deal_attrs(attrs, attr, attr_name, tensor_attr_name, inputs, infer_flags): def deal_attrs(attrs, attr, attr_name, tensor_attr_name, inputs, infer_flags):
from .framework import Variable from .framework import Variable
from .layers import utils
if utils._contain_var(attr): if paddle.utils._contain_var(attr):
inputs[tensor_attr_name] = utils._convert_to_tensor_list( inputs[tensor_attr_name] = paddle.utils._convert_to_tensor_list(
attr, dtype="int64" attr, dtype="int64"
) )
for i, dim in enumerate(attr): for i, dim in enumerate(attr):
...@@ -763,16 +762,16 @@ def _setitem_impl_(var, item, value): ...@@ -763,16 +762,16 @@ def _setitem_impl_(var, item, value):
'none_axes': none_axes, 'none_axes': none_axes,
} }
from .layers import utils if paddle.utils._contain_var(starts):
inputs['StartsTensorList'] = paddle.utils._convert_to_tensor_list(
if utils._contain_var(starts): starts
inputs['StartsTensorList'] = utils._convert_to_tensor_list(starts) )
del attrs['starts'] del attrs['starts']
if utils._contain_var(ends): if paddle.utils._contain_var(ends):
inputs['EndsTensorList'] = utils._convert_to_tensor_list(ends) inputs['EndsTensorList'] = paddle.utils._convert_to_tensor_list(ends)
del attrs['ends'] del attrs['ends']
if utils._contain_var(steps): if paddle.utils._contain_var(steps):
inputs['StepsTensorList'] = utils._convert_to_tensor_list(steps) inputs['StepsTensorList'] = paddle.utils._convert_to_tensor_list(steps)
del attrs['steps'] del attrs['steps']
# 2. Parse value # 2. Parse value
......
...@@ -35,7 +35,6 @@ from paddle.fluid.framework import Variable ...@@ -35,7 +35,6 @@ from paddle.fluid.framework import Variable
from paddle.fluid.framework import _current_expected_place as _get_device from paddle.fluid.framework import _current_expected_place as _get_device
from paddle.fluid.framework import _get_paddle_place, _non_static_mode from paddle.fluid.framework import _get_paddle_place, _non_static_mode
from paddle.fluid.layers import collective from paddle.fluid.layers import collective
from paddle.fluid.layers.utils import flatten
from paddle.framework.io_utils import is_belong_to_optimizer from paddle.framework.io_utils import is_belong_to_optimizer
from paddle.io import DataLoader, Dataset, DistributedBatchSampler from paddle.io import DataLoader, Dataset, DistributedBatchSampler
from paddle.jit.translated_layer import INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX from paddle.jit.translated_layer import INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX
...@@ -2293,7 +2292,7 @@ class Model: ...@@ -2293,7 +2292,7 @@ class Model:
# 4. custumed iterator yield separated inputs and labels: # 4. custumed iterator yield separated inputs and labels:
# ([input1, input2, ...], [label1, lable2, ...]) # ([input1, input2, ...], [label1, lable2, ...])
# To handle all of these, flatten (nested) list to list. # To handle all of these, flatten (nested) list to list.
data = flatten(data) data = paddle.utils.flatten(data)
# LoDTensor.shape is callable, where LoDTensor comes from # LoDTensor.shape is callable, where LoDTensor comes from
# DataLoader in static graph # DataLoader in static graph
......
...@@ -14,9 +14,9 @@ ...@@ -14,9 +14,9 @@
import numpy as np import numpy as np
import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.layer_helper import LayerHelper from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.layers import utils
from paddle.fluid.param_attr import ParamAttr from paddle.fluid.param_attr import ParamAttr
from paddle.nn import Layer from paddle.nn import Layer
from paddle.nn import initializer as I from paddle.nn import initializer as I
...@@ -184,7 +184,9 @@ class ResNetUnit(Layer): ...@@ -184,7 +184,9 @@ class ResNetUnit(Layer):
self._stride = stride self._stride = stride
self._stride_z = stride_z self._stride_z = stride_z
self._dilation = 1 self._dilation = 1
self._kernel_size = utils.convert_to_list(filter_size, 2, 'kernel_size') self._kernel_size = paddle.utils.convert_to_list(
filter_size, 2, 'kernel_size'
)
self._padding = (filter_size - 1) // 2 self._padding = (filter_size - 1) // 2
self._groups = 1 self._groups = 1
self._momentum = momentum self._momentum = momentum
......
...@@ -14,10 +14,10 @@ ...@@ -14,10 +14,10 @@
import numpy as np import numpy as np
import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle import _legacy_C_ops from paddle import _legacy_C_ops
from paddle.fluid.layer_helper import LayerHelper from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.layers import utils
from paddle.fluid.param_attr import ParamAttr from paddle.fluid.param_attr import ParamAttr
from paddle.nn import Layer from paddle.nn import Layer
from paddle.nn import initializer as I from paddle.nn import initializer as I
...@@ -478,10 +478,10 @@ class ResNetBasicBlock(Layer): ...@@ -478,10 +478,10 @@ class ResNetBasicBlock(Layer):
super().__init__() super().__init__()
self._stride1 = stride1 self._stride1 = stride1
self._stride2 = stride2 self._stride2 = stride2
self._kernel1_size = utils.convert_to_list( self._kernel1_size = paddle.utils.convert_to_list(
filter1_size, 2, 'filter1_size' filter1_size, 2, 'filter1_size'
) )
self._kernel2_size = utils.convert_to_list( self._kernel2_size = paddle.utils.convert_to_list(
filter2_size, 2, 'filter2_size' filter2_size, 2, 'filter2_size'
) )
self._dilation1 = dilation1 self._dilation1 = dilation1
...@@ -500,7 +500,7 @@ class ResNetBasicBlock(Layer): ...@@ -500,7 +500,7 @@ class ResNetBasicBlock(Layer):
self._find_conv_max = find_conv_max self._find_conv_max = find_conv_max
if has_shortcut: if has_shortcut:
self._kernel3_size = utils.convert_to_list( self._kernel3_size = paddle.utils.convert_to_list(
filter3_size, 2, 'filter3_size' filter3_size, 2, 'filter3_size'
) )
self._padding3 = padding3 self._padding3 = padding3
......
...@@ -34,7 +34,6 @@ from paddle.fluid.compiler import ( ...@@ -34,7 +34,6 @@ from paddle.fluid.compiler import (
ExecutionStrategy, ExecutionStrategy,
) )
from paddle.fluid.data_feeder import check_type from paddle.fluid.data_feeder import check_type
from paddle.fluid.layers.utils import flatten, pack_sequence_as
from paddle.fluid.dygraph.base import ( from paddle.fluid.dygraph.base import (
program_desc_tracing_guard, program_desc_tracing_guard,
switch_to_static_graph, switch_to_static_graph,
...@@ -509,7 +508,9 @@ def _get_input_var_names(inputs, input_spec): ...@@ -509,7 +508,9 @@ def _get_input_var_names(inputs, input_spec):
) )
result_list = [] result_list = []
input_var_names = [ input_var_names = [
var.name for var in flatten(inputs) if isinstance(var, Variable) var.name
for var in paddle.utils.flatten(inputs)
if isinstance(var, Variable)
] ]
if input_spec is None: if input_spec is None:
# no prune # no prune
...@@ -562,7 +563,7 @@ def _get_output_vars(outputs, output_spec, with_hook=False): ...@@ -562,7 +563,7 @@ def _get_output_vars(outputs, output_spec, with_hook=False):
) )
result_list = [] result_list = []
output_vars_dict = OrderedDict() output_vars_dict = OrderedDict()
for var in flatten(outputs): for var in paddle.utils.flatten(outputs):
if isinstance(var, Variable): if isinstance(var, Variable):
output_vars_dict[var.name] = var output_vars_dict[var.name] = var
if output_spec is None: if output_spec is None:
...@@ -970,7 +971,7 @@ def save(layer, path, input_spec=None, **configs): ...@@ -970,7 +971,7 @@ def save(layer, path, input_spec=None, **configs):
% type(input_spec) % type(input_spec)
) )
inner_input_spec = [] inner_input_spec = []
for var in flatten(input_spec): for var in paddle.utils.flatten(input_spec):
if isinstance(var, paddle.static.InputSpec): if isinstance(var, paddle.static.InputSpec):
inner_input_spec.append(var) inner_input_spec.append(var)
elif isinstance(var, (core.VarBase, core.eager.Tensor, Variable)): elif isinstance(var, (core.VarBase, core.eager.Tensor, Variable)):
...@@ -1034,7 +1035,7 @@ def save(layer, path, input_spec=None, **configs): ...@@ -1034,7 +1035,7 @@ def save(layer, path, input_spec=None, **configs):
# inner_input_spec is list[InputSpec], it should be packed with same structure # inner_input_spec is list[InputSpec], it should be packed with same structure
# as original input_spec here. # as original input_spec here.
if inner_input_spec: if inner_input_spec:
inner_input_spec = pack_sequence_as( inner_input_spec = paddle.utils.pack_sequence_as(
input_spec, inner_input_spec input_spec, inner_input_spec
) )
static_forward = to_static( static_forward = to_static(
...@@ -1067,7 +1068,7 @@ def save(layer, path, input_spec=None, **configs): ...@@ -1067,7 +1068,7 @@ def save(layer, path, input_spec=None, **configs):
) )
else: else:
if inner_input_spec: if inner_input_spec:
inner_input_spec = pack_sequence_as( inner_input_spec = paddle.utils.pack_sequence_as(
input_spec, inner_input_spec input_spec, inner_input_spec
) )
static_function = to_static( static_function = to_static(
......
...@@ -20,7 +20,6 @@ from paddle.fluid.dygraph.base import _convert_into_variable ...@@ -20,7 +20,6 @@ from paddle.fluid.dygraph.base import _convert_into_variable
from paddle.fluid.framework import Variable, core from paddle.fluid.framework import Variable, core
from paddle.fluid.layers import Print, control_flow, fill_constant from paddle.fluid.layers import Print, control_flow, fill_constant
from paddle.fluid.layers.control_flow import while_loop from paddle.fluid.layers.control_flow import while_loop
from paddle.fluid.layers.utils import copy_mutable_vars
from paddle.jit.dy2static.utils import ( from paddle.jit.dy2static.utils import (
Dygraph2StaticException, Dygraph2StaticException,
GetterSetterHelper, GetterSetterHelper,
...@@ -371,7 +370,10 @@ def _run_paddle_cond( ...@@ -371,7 +370,10 @@ def _run_paddle_cond(
def new_true_fn(): def new_true_fn():
# init args may contain mutable python container like [var, 2], we copy then like in while_loop # init args may contain mutable python container like [var, 2], we copy then like in while_loop
helper.set(return_name_ids, copy_mutable_vars(init_args)) helper.set(
return_name_ids,
paddle.utils.copy_mutable_vars(init_args),
)
ret = true_fn() ret = true_fn()
# IfExpr will return a non-None return value, so we just return ret. # IfExpr will return a non-None return value, so we just return ret.
# We assume normal return has no return value. # We assume normal return has no return value.
...@@ -382,7 +384,10 @@ def _run_paddle_cond( ...@@ -382,7 +384,10 @@ def _run_paddle_cond(
def new_false_fn(): def new_false_fn():
# init args may contain mutable python container like [var, 2], we copy then like in while_loop # init args may contain mutable python container like [var, 2], we copy then like in while_loop
helper.set(return_name_ids, copy_mutable_vars(init_args)) helper.set(
return_name_ids,
paddle.utils.copy_mutable_vars(init_args),
)
ret = false_fn() ret = false_fn()
if ret is None: if ret is None:
return helper.get(return_name_ids) return helper.get(return_name_ids)
......
...@@ -21,7 +21,6 @@ import paddle ...@@ -21,7 +21,6 @@ import paddle
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid.dygraph import layers from paddle.fluid.dygraph import layers
from paddle.fluid.dygraph.base import switch_to_static_graph from paddle.fluid.dygraph.base import switch_to_static_graph
from paddle.fluid.layers.utils import flatten, pack_sequence_as
from paddle.jit.translated_layer import TranslatedLayer from paddle.jit.translated_layer import TranslatedLayer
from . import logging_utils from . import logging_utils
...@@ -47,7 +46,7 @@ class FunctionSpec: ...@@ -47,7 +46,7 @@ class FunctionSpec:
self._flat_input_spec = None self._flat_input_spec = None
else: else:
self._input_spec = self._verify_input_spec(input_spec) self._input_spec = self._verify_input_spec(input_spec)
self._flat_input_spec = flatten(self._input_spec) self._flat_input_spec = paddle.utils.flatten(self._input_spec)
# parse full argument names list. # parse full argument names list.
self._arg_names, self._default_kwargs = parse_arg_and_kwargs(function) self._arg_names, self._default_kwargs = parse_arg_and_kwargs(function)
...@@ -171,7 +170,7 @@ class FunctionSpec: ...@@ -171,7 +170,7 @@ class FunctionSpec:
input_with_spec(tuple): input arguments by replacing argument with InputSpec. input_with_spec(tuple): input arguments by replacing argument with InputSpec.
main_program(Program): main program for inserting feed layer. main_program(Program): main program for inserting feed layer.
""" """
flat_input_spec = flatten(input_with_spec) flat_input_spec = paddle.utils.flatten(input_with_spec)
inputs = [] inputs = []
block = main_program.global_block() block = main_program.global_block()
...@@ -191,7 +190,7 @@ class FunctionSpec: ...@@ -191,7 +190,7 @@ class FunctionSpec:
feed_layer = var_spec feed_layer = var_spec
inputs.append(feed_layer) inputs.append(feed_layer)
return pack_sequence_as(input_with_spec, inputs) return paddle.utils.pack_sequence_as(input_with_spec, inputs)
def _verify_input_spec(self, input_spec): def _verify_input_spec(self, input_spec):
""" """
...@@ -283,7 +282,7 @@ def get_buffers(layer_instance, include_sublayer=True): ...@@ -283,7 +282,7 @@ def get_buffers(layer_instance, include_sublayer=True):
def _replace_value_with_input_spec(args): def _replace_value_with_input_spec(args):
args_with_spec = [] args_with_spec = []
for idx, input_var in enumerate(flatten(args)): for idx, input_var in enumerate(paddle.utils.flatten(args)):
if isinstance(input_var, np.ndarray): if isinstance(input_var, np.ndarray):
input_var = paddle.static.InputSpec.from_numpy(input_var) input_var = paddle.static.InputSpec.from_numpy(input_var)
input_var.stop_gradient = True input_var.stop_gradient = True
...@@ -299,7 +298,7 @@ def _replace_value_with_input_spec(args): ...@@ -299,7 +298,7 @@ def _replace_value_with_input_spec(args):
input_var.stop_gradient = stop_gradient input_var.stop_gradient = stop_gradient
args_with_spec.append(input_var) args_with_spec.append(input_var)
args_with_spec = pack_sequence_as(args, args_with_spec) args_with_spec = paddle.utils.pack_sequence_as(args, args_with_spec)
return args_with_spec return args_with_spec
...@@ -450,12 +449,12 @@ def _hash_spec_names(args_specs, kwargs_specs): ...@@ -450,12 +449,12 @@ def _hash_spec_names(args_specs, kwargs_specs):
""" """
spec_names = [ spec_names = [
spec.name spec.name
for spec in flatten(args_specs) for spec in paddle.utils.flatten(args_specs)
if isinstance(spec, paddle.static.InputSpec) if isinstance(spec, paddle.static.InputSpec)
] ]
spec_names += [ spec_names += [
spec.name spec.name
for spec in flatten(kwargs_specs) for spec in paddle.utils.flatten(kwargs_specs)
if isinstance(spec, paddle.static.InputSpec) if isinstance(spec, paddle.static.InputSpec)
] ]
i, name_ids = 0, {} i, name_ids = 0, {}
......
...@@ -23,7 +23,6 @@ from paddle.fluid.compiler import BuildStrategy ...@@ -23,7 +23,6 @@ from paddle.fluid.compiler import BuildStrategy
from paddle.fluid.dygraph import layers from paddle.fluid.dygraph import layers
from paddle.fluid.dygraph.base import switch_to_static_graph from paddle.fluid.dygraph.base import switch_to_static_graph
from paddle.fluid.framework import _apply_pass from paddle.fluid.framework import _apply_pass
from paddle.fluid.layers.utils import _hash_with_id, flatten, pack_sequence_as
from . import logging_utils from . import logging_utils
from .return_transformer import RETURN_NO_VALUE_MAGIC_NUM from .return_transformer import RETURN_NO_VALUE_MAGIC_NUM
...@@ -48,14 +47,14 @@ class NestSequence: ...@@ -48,14 +47,14 @@ class NestSequence:
""" """
Flattens the nested sequences into single list. Flattens the nested sequences into single list.
""" """
return flatten(self.__raw_input) return paddle.utils.flatten(self.__raw_input)
def restore(self, value_list): def restore(self, value_list):
""" """
Restores the nested sequence from value list. Restores the nested sequence from value list.
""" """
assert len(self.__input_list) == len(value_list) assert len(self.__input_list) == len(value_list)
return pack_sequence_as(self.__raw_input, value_list) return paddle.utils.pack_sequence_as(self.__raw_input, value_list)
def _get_var_ids(self): def _get_var_ids(self):
var_ids = [] var_ids = []
...@@ -374,7 +373,7 @@ class PartialProgramLayer: ...@@ -374,7 +373,7 @@ class PartialProgramLayer:
@LazyInitialized @LazyInitialized
def _train_program_id(self): def _train_program_id(self):
program_id = _hash_with_id(self._train_program, self) program_id = paddle.utils._hash_with_id(self._train_program, self)
core._set_cached_executor_build_strategy( core._set_cached_executor_build_strategy(
program_id, self._build_strategy program_id, self._build_strategy
) )
...@@ -382,11 +381,11 @@ class PartialProgramLayer: ...@@ -382,11 +381,11 @@ class PartialProgramLayer:
@LazyInitialized @LazyInitialized
def _infer_program_id(self): def _infer_program_id(self):
return _hash_with_id(self._infer_program, self) return paddle.utils._hash_with_id(self._infer_program, self)
@LazyInitialized @LazyInitialized
def _train_amp_program_id(self): def _train_amp_program_id(self):
program_id = _hash_with_id(self._train_amp_program, self) program_id = paddle.utils._hash_with_id(self._train_amp_program, self)
core._set_cached_executor_build_strategy( core._set_cached_executor_build_strategy(
program_id, self._build_strategy program_id, self._build_strategy
) )
...@@ -394,11 +393,13 @@ class PartialProgramLayer: ...@@ -394,11 +393,13 @@ class PartialProgramLayer:
@LazyInitialized @LazyInitialized
def _infer_amp_program_id(self): def _infer_amp_program_id(self):
return _hash_with_id(self._infer_amp_program, self) return paddle.utils._hash_with_id(self._infer_amp_program, self)
@LazyInitialized @LazyInitialized
def _train_pure_fp16_program_id(self): def _train_pure_fp16_program_id(self):
program_id = _hash_with_id(self._train_pure_fp16_program, self) program_id = paddle.utils._hash_with_id(
self._train_pure_fp16_program, self
)
core._set_cached_executor_build_strategy( core._set_cached_executor_build_strategy(
program_id, self._build_strategy program_id, self._build_strategy
) )
...@@ -406,7 +407,7 @@ class PartialProgramLayer: ...@@ -406,7 +407,7 @@ class PartialProgramLayer:
@LazyInitialized @LazyInitialized
def _infer_pure_fp16_program_id(self): def _infer_pure_fp16_program_id(self):
return _hash_with_id(self._infer_pure_fp16_program, self) return paddle.utils._hash_with_id(self._infer_pure_fp16_program, self)
@LazyInitialized @LazyInitialized
def _param_grad_names(self): def _param_grad_names(self):
...@@ -854,7 +855,7 @@ class PartialProgramLayer: ...@@ -854,7 +855,7 @@ class PartialProgramLayer:
""" """
assert isinstance(inputs, (tuple, list)) assert isinstance(inputs, (tuple, list))
# Flatten inputs with nested structure into single list. # Flatten inputs with nested structure into single list.
flatten_inputs = flatten(inputs) flatten_inputs = paddle.utils.flatten(inputs)
# Convert variable into VarBase and feed in training data. # Convert variable into VarBase and feed in training data.
input_vars = [] input_vars = []
expected_place = framework._current_expected_place() expected_place = framework._current_expected_place()
......
...@@ -24,8 +24,7 @@ from paddle.fluid import _non_static_mode, core, framework ...@@ -24,8 +24,7 @@ from paddle.fluid import _non_static_mode, core, framework
from paddle.fluid.data_feeder import check_type from paddle.fluid.data_feeder import check_type
from paddle.fluid.dygraph import layers from paddle.fluid.dygraph import layers
from paddle.fluid.dygraph.base import param_guard, switch_to_static_graph from paddle.fluid.dygraph.base import param_guard, switch_to_static_graph
from paddle.fluid.layers.utils import flatten from paddle.utils import flatten, gast
from paddle.utils import gast
from . import error, logging_utils from . import error, logging_utils
from .ast_transformer import DygraphToStaticAst from .ast_transformer import DygraphToStaticAst
......
...@@ -14,8 +14,7 @@ ...@@ -14,8 +14,7 @@
import paddle import paddle
from paddle.fluid.framework import Variable from paddle.fluid.framework import Variable
from paddle.fluid.layers.utils import is_sequence, map_structure from paddle.utils import gast, is_sequence, map_structure
from paddle.utils import gast
from .utils import UndefinedVar, create_undefined_variable from .utils import UndefinedVar, create_undefined_variable
......
...@@ -27,7 +27,6 @@ from paddle.fluid.executor import ( ...@@ -27,7 +27,6 @@ from paddle.fluid.executor import (
_is_enable_standalone_executor, _is_enable_standalone_executor,
) )
from paddle.fluid.framework import OpProtoHolder, _non_static_mode from paddle.fluid.framework import OpProtoHolder, _non_static_mode
from paddle.fluid.layers.utils import _hash_with_id
from paddle.jit.dy2static.partial_program import ( from paddle.jit.dy2static.partial_program import (
LazyInitialized, LazyInitialized,
add_build_strategy_for, add_build_strategy_for,
...@@ -1025,7 +1024,7 @@ def _run_dygraph(instance, input, program_holder): ...@@ -1025,7 +1024,7 @@ def _run_dygraph(instance, input, program_holder):
'is_test', 'is_test',
instance._is_test, instance._is_test,
'program_id', 'program_id',
_hash_with_id(trace_program, instance), paddle.utils._hash_with_id(trace_program, instance),
] ]
if not instance._is_test: if not instance._is_test:
attrs.extend( attrs.extend(
......
...@@ -23,7 +23,6 @@ from paddle.common_ops_import import default_main_program ...@@ -23,7 +23,6 @@ from paddle.common_ops_import import default_main_program
from paddle.framework import _non_static_mode from paddle.framework import _non_static_mode
from ..fluid.data_feeder import convert_dtype from ..fluid.data_feeder import convert_dtype
from ..fluid.layers.utils import flatten, map_structure
__all__ = [] __all__ = []
...@@ -434,7 +433,7 @@ class BeamSearchDecoder(Decoder): ...@@ -434,7 +433,7 @@ class BeamSearchDecoder(Decoder):
`finished` is a `bool` tensor filled by False with shape `[batch_size, beam_size]`. `finished` is a `bool` tensor filled by False with shape `[batch_size, beam_size]`.
""" """
self.kinf = 1e9 self.kinf = 1e9
state = flatten(initial_cell_states)[0] state = paddle.utils.flatten(initial_cell_states)[0]
self.batch_size = paddle.shape(state)[0] self.batch_size = paddle.shape(state)[0]
self.start_token_tensor = paddle.full( self.start_token_tensor = paddle.full(
...@@ -444,7 +443,7 @@ class BeamSearchDecoder(Decoder): ...@@ -444,7 +443,7 @@ class BeamSearchDecoder(Decoder):
shape=[1], dtype="int64", fill_value=self.end_token shape=[1], dtype="int64", fill_value=self.end_token
) )
init_cell_states = map_structure( init_cell_states = paddle.utils.map_structure(
self._expand_to_beam_size, initial_cell_states self._expand_to_beam_size, initial_cell_states
) )
init_inputs = paddle.full( init_inputs = paddle.full(
...@@ -542,7 +541,7 @@ class BeamSearchDecoder(Decoder): ...@@ -542,7 +541,7 @@ class BeamSearchDecoder(Decoder):
topk_indices, topk_indices,
self.batch_size, self.batch_size,
) )
next_cell_states = map_structure( next_cell_states = paddle.utils.map_structure(
lambda x: self._gather(x, beam_indices, self.batch_size), lambda x: self._gather(x, beam_indices, self.batch_size),
next_cell_states, next_cell_states,
) )
...@@ -596,13 +595,17 @@ class BeamSearchDecoder(Decoder): ...@@ -596,13 +595,17 @@ class BeamSearchDecoder(Decoder):
`[batch_size, beam_size]` with data type `float32, int64, int64`. \ `[batch_size, beam_size]` with data type `float32, int64, int64`. \
`finished` is a `bool` tensor with shape `[batch_size, beam_size]`. `finished` is a `bool` tensor with shape `[batch_size, beam_size]`.
""" """
inputs = map_structure(self._merge_batch_beams, inputs) inputs = paddle.utils.map_structure(self._merge_batch_beams, inputs)
cell_states = map_structure(self._merge_batch_beams, states.cell_states) cell_states = paddle.utils.map_structure(
self._merge_batch_beams, states.cell_states
)
cell_outputs, next_cell_states = self.cell( cell_outputs, next_cell_states = self.cell(
inputs, cell_states, **kwargs inputs, cell_states, **kwargs
) )
cell_outputs = map_structure(self._split_batch_beams, cell_outputs) cell_outputs = paddle.utils.map_structure(
next_cell_states = map_structure( self._split_batch_beams, cell_outputs
)
next_cell_states = paddle.utils.map_structure(
self._split_batch_beams, next_cell_states self._split_batch_beams, next_cell_states
) )
...@@ -729,7 +732,7 @@ def _dynamic_decode_imperative( ...@@ -729,7 +732,7 @@ def _dynamic_decode_imperative(
), ),
) )
if impute_finished: # rectify the states for the finished. if impute_finished: # rectify the states for the finished.
next_states = map_structure( next_states = paddle.utils.map_structure(
lambda x, y: _maybe_copy(x, y, finished), lambda x, y: _maybe_copy(x, y, finished),
states, states,
next_states, next_states,
...@@ -743,9 +746,9 @@ def _dynamic_decode_imperative( ...@@ -743,9 +746,9 @@ def _dynamic_decode_imperative(
) )
outputs = ( outputs = (
map_structure(lambda x: ArrayWrapper(x), step_outputs) paddle.utils.map_structure(lambda x: ArrayWrapper(x), step_outputs)
if step_idx == 0 if step_idx == 0
else map_structure( else paddle.utils.map_structure(
lambda x, x_array: x_array.append(x), step_outputs, outputs lambda x, x_array: x_array.append(x), step_outputs, outputs
) )
) )
...@@ -763,7 +766,7 @@ def _dynamic_decode_imperative( ...@@ -763,7 +766,7 @@ def _dynamic_decode_imperative(
if max_step_num is not None and step_idx > max_step_num: if max_step_num is not None and step_idx > max_step_num:
break break
final_outputs = map_structure( final_outputs = paddle.utils.map_structure(
lambda x: paddle.stack(x.array, axis=0), outputs lambda x: paddle.stack(x.array, axis=0), outputs
) )
final_states = states final_states = states
...@@ -776,7 +779,7 @@ def _dynamic_decode_imperative( ...@@ -776,7 +779,7 @@ def _dynamic_decode_imperative(
pass pass
if not output_time_major: if not output_time_major:
final_outputs = map_structure( final_outputs = paddle.utils.map_structure(
lambda x: paddle.transpose( lambda x: paddle.transpose(
x, [1, 0] + list(range(2, len(x.shape))) x, [1, 0] + list(range(2, len(x.shape)))
), ),
...@@ -822,15 +825,15 @@ def _dynamic_decode_declarative( ...@@ -822,15 +825,15 @@ def _dynamic_decode_declarative(
if is_test: if is_test:
# for test, reuse inputs and states variables to save memory # for test, reuse inputs and states variables to save memory
inputs = map_structure(lambda x: x, initial_inputs) inputs = paddle.utils.map_structure(lambda x: x, initial_inputs)
states = map_structure(lambda x: x, initial_states) states = paddle.utils.map_structure(lambda x: x, initial_states)
else: else:
# inputs and states of all steps must be saved for backward and training # inputs and states of all steps must be saved for backward and training
inputs_arrays = map_structure( inputs_arrays = paddle.utils.map_structure(
lambda x: paddle.tensor.array.array_write(x, step_idx), lambda x: paddle.tensor.array.array_write(x, step_idx),
initial_inputs, initial_inputs,
) )
states_arrays = map_structure( states_arrays = paddle.utils.map_structure(
lambda x: paddle.tensor.array.array_write(x, step_idx), lambda x: paddle.tensor.array.array_write(x, step_idx),
initial_states, initial_states,
) )
...@@ -869,11 +872,11 @@ def _dynamic_decode_declarative( ...@@ -869,11 +872,11 @@ def _dynamic_decode_declarative(
# While # While
with while_op.block(): with while_op.block():
if not is_test: if not is_test:
inputs = map_structure( inputs = paddle.utils.map_structure(
lambda array: paddle.tensor.array.array_read(array, step_idx), lambda array: paddle.tensor.array.array_read(array, step_idx),
inputs_arrays, inputs_arrays,
) )
states = map_structure( states = paddle.utils.map_structure(
lambda array: paddle.tensor.array.array_read(array, step_idx), lambda array: paddle.tensor.array.array_read(array, step_idx),
states_arrays, states_arrays,
) )
...@@ -894,7 +897,7 @@ def _dynamic_decode_declarative( ...@@ -894,7 +897,7 @@ def _dynamic_decode_declarative(
), ),
) )
if impute_finished: # rectify the states for the finished. if impute_finished: # rectify the states for the finished.
next_states = map_structure( next_states = paddle.utils.map_structure(
lambda x, y: _maybe_copy(x, y, global_finished), lambda x, y: _maybe_copy(x, y, global_finished),
states, states,
next_states, next_states,
...@@ -908,11 +911,11 @@ def _dynamic_decode_declarative( ...@@ -908,11 +911,11 @@ def _dynamic_decode_declarative(
) )
# create tensor array in global block after dtype[s] of outputs can be got # create tensor array in global block after dtype[s] of outputs can be got
outputs_arrays = map_structure( outputs_arrays = paddle.utils.map_structure(
lambda x: _create_array_out_of_while(x.dtype), outputs lambda x: _create_array_out_of_while(x.dtype), outputs
) )
map_structure( paddle.utils.map_structure(
lambda x, x_array: paddle.tensor.array.array_write( lambda x, x_array: paddle.tensor.array.array_write(
x, i=step_idx, array=x_array x, i=step_idx, array=x_array
), ),
...@@ -925,17 +928,21 @@ def _dynamic_decode_declarative( ...@@ -925,17 +928,21 @@ def _dynamic_decode_declarative(
paddle.assign(next_finished, global_finished) paddle.assign(next_finished, global_finished)
paddle.assign(next_sequence_lengths, sequence_lengths) paddle.assign(next_sequence_lengths, sequence_lengths)
if is_test: if is_test:
map_structure(paddle.assign, next_inputs, global_inputs) paddle.utils.map_structure(
map_structure(paddle.assign, next_states, global_states) paddle.assign, next_inputs, global_inputs
)
paddle.utils.map_structure(
paddle.assign, next_states, global_states
)
else: else:
map_structure( paddle.utils.map_structure(
lambda x, x_array: paddle.tensor.array.array_write( lambda x, x_array: paddle.tensor.array.array_write(
x, i=step_idx, array=x_array x, i=step_idx, array=x_array
), ),
next_inputs, next_inputs,
inputs_arrays, inputs_arrays,
) )
map_structure( paddle.utils.map_structure(
lambda x, x_array: paddle.tensor.array.array_write( lambda x, x_array: paddle.tensor.array.array_write(
x, i=step_idx, array=x_array x, i=step_idx, array=x_array
), ),
...@@ -951,7 +958,7 @@ def _dynamic_decode_declarative( ...@@ -951,7 +958,7 @@ def _dynamic_decode_declarative(
else: else:
paddle.logical_not(paddle.all(global_finished), cond) paddle.logical_not(paddle.all(global_finished), cond)
final_outputs = map_structure( final_outputs = paddle.utils.map_structure(
lambda array: paddle.tensor.manipulation.tensor_array_to_tensor( lambda array: paddle.tensor.manipulation.tensor_array_to_tensor(
array, axis=0, use_stack=True array, axis=0, use_stack=True
)[0], )[0],
...@@ -960,7 +967,7 @@ def _dynamic_decode_declarative( ...@@ -960,7 +967,7 @@ def _dynamic_decode_declarative(
if is_test: if is_test:
final_states = global_states final_states = global_states
else: else:
final_states = map_structure( final_states = paddle.utils.map_structure(
lambda array: paddle.tensor.array.array_read(array, step_idx), lambda array: paddle.tensor.array.array_read(array, step_idx),
states_arrays, states_arrays,
) )
...@@ -973,7 +980,9 @@ def _dynamic_decode_declarative( ...@@ -973,7 +980,9 @@ def _dynamic_decode_declarative(
pass pass
if not output_time_major: if not output_time_major:
final_outputs = map_structure(_transpose_batch_time, final_outputs) final_outputs = paddle.utils.map_structure(
_transpose_batch_time, final_outputs
)
return ( return (
(final_outputs, final_states, sequence_lengths) (final_outputs, final_states, sequence_lengths)
......
...@@ -26,14 +26,14 @@ from ...common_ops_import import Variable ...@@ -26,14 +26,14 @@ from ...common_ops_import import Variable
from ...device import get_cudnn_version from ...device import get_cudnn_version
from ...fluid.data_feeder import check_dtype, check_variable_and_dtype from ...fluid.data_feeder import check_dtype, check_variable_and_dtype
from ...fluid.layer_helper import LayerHelper from ...fluid.layer_helper import LayerHelper
from ...fluid.layers.utils import ( from ...framework import no_grad
from ...tensor.manipulation import squeeze, unsqueeze
from ...utils import (
_contain_var, _contain_var,
_convert_to_tensor_list, _convert_to_tensor_list,
_is_symmetric_padding, _is_symmetric_padding,
convert_to_list, convert_to_list,
) )
from ...framework import no_grad
from ...tensor.manipulation import squeeze, unsqueeze
__all__ = [] __all__ = []
......
...@@ -16,10 +16,16 @@ from paddle import _C_ops, _legacy_C_ops, in_dynamic_mode ...@@ -16,10 +16,16 @@ from paddle import _C_ops, _legacy_C_ops, in_dynamic_mode
from paddle.fluid.framework import Variable, in_dygraph_mode from paddle.fluid.framework import Variable, in_dygraph_mode
from ...fluid.data_feeder import check_type, check_variable_and_dtype from ...fluid.data_feeder import check_type, check_variable_and_dtype
from ...fluid.layers import LayerHelper
from ...tensor.manipulation import squeeze, unsqueeze
# TODO: define pooling functions # TODO: define pooling functions
from ...fluid.layers import LayerHelper, utils from ...utils import (
from ...tensor.manipulation import squeeze, unsqueeze _contain_var,
_convert_to_tensor_list,
_is_symmetric_padding,
convert_to_list,
)
__all__ = [] __all__ = []
...@@ -135,24 +141,24 @@ def _update_padding_nd(padding, num_dims, channel_last=False, ceil_mode=False): ...@@ -135,24 +141,24 @@ def _update_padding_nd(padding, num_dims, channel_last=False, ceil_mode=False):
padding = _exclude_padding_in_batch_and_channel( padding = _exclude_padding_in_batch_and_channel(
padding, channel_last padding, channel_last
) )
if utils._is_symmetric_padding(padding, num_dims): if _is_symmetric_padding(padding, num_dims):
padding = padding[0::2] padding = padding[0::2]
# for padding like [pad_before, pad_after, pad_before, pad_after, ...] # for padding like [pad_before, pad_after, pad_before, pad_after, ...]
elif len(padding) == 2 * num_dims and isinstance(padding[0], int): elif len(padding) == 2 * num_dims and isinstance(padding[0], int):
padding_algorithm = "EXPLICIT" padding_algorithm = "EXPLICIT"
padding = utils.convert_to_list(padding, 2 * num_dims, 'padding') padding = convert_to_list(padding, 2 * num_dims, 'padding')
if utils._is_symmetric_padding(padding, num_dims): if _is_symmetric_padding(padding, num_dims):
padding = padding[0::2] padding = padding[0::2]
# for padding like [pad_d1, pad_d2, ...] # for padding like [pad_d1, pad_d2, ...]
elif len(padding) == num_dims and isinstance(padding[0], int): elif len(padding) == num_dims and isinstance(padding[0], int):
padding_algorithm = "EXPLICIT" padding_algorithm = "EXPLICIT"
padding = utils.convert_to_list(padding, num_dims, 'padding') padding = convert_to_list(padding, num_dims, 'padding')
else: else:
raise ValueError("Invalid padding: {}".format(padding)) raise ValueError("Invalid padding: {}".format(padding))
# for integer padding # for integer padding
else: else:
padding_algorithm = "EXPLICIT" padding_algorithm = "EXPLICIT"
padding = utils.convert_to_list(padding, num_dims, 'padding') padding = convert_to_list(padding, num_dims, 'padding')
return padding, padding_algorithm return padding, padding_algorithm
...@@ -228,12 +234,12 @@ def avg_pool1d( ...@@ -228,12 +234,12 @@ def avg_pool1d(
) )
_check_input(x, 3) _check_input(x, 3)
x = unsqueeze(x, [2]) x = unsqueeze(x, [2])
kernel_size = utils.convert_to_list(kernel_size, 1, 'kernel_size') kernel_size = convert_to_list(kernel_size, 1, 'kernel_size')
kernel_size = [1] + kernel_size kernel_size = [1] + kernel_size
if stride is None: if stride is None:
stride = kernel_size stride = kernel_size
else: else:
stride = utils.convert_to_list(stride, 1, 'pool_stride') stride = convert_to_list(stride, 1, 'pool_stride')
stride = [1] + stride stride = [1] + stride
_check_value_limitation(kernel_size, "kernel_size", min_limit=1e-3) _check_value_limitation(kernel_size, "kernel_size", min_limit=1e-3)
...@@ -353,11 +359,11 @@ def avg_pool2d( ...@@ -353,11 +359,11 @@ def avg_pool2d(
stride=2, padding=0) stride=2, padding=0)
# out.shape [1, 3, 16, 16] # out.shape [1, 3, 16, 16]
""" """
kernel_size = utils.convert_to_list(kernel_size, 2, 'pool_size') kernel_size = convert_to_list(kernel_size, 2, 'pool_size')
if stride is None: if stride is None:
stride = kernel_size stride = kernel_size
else: else:
stride = utils.convert_to_list(stride, 2, 'pool_stride') stride = convert_to_list(stride, 2, 'pool_stride')
_check_value_limitation(kernel_size, "kernel_size", min_limit=1e-3) _check_value_limitation(kernel_size, "kernel_size", min_limit=1e-3)
_check_value_limitation(stride, "stride", min_limit=1e-3) _check_value_limitation(stride, "stride", min_limit=1e-3)
...@@ -482,11 +488,11 @@ def avg_pool3d( ...@@ -482,11 +488,11 @@ def avg_pool3d(
padding=0) padding=0)
# out.shape: [1, 3, 16, 16, 16] # out.shape: [1, 3, 16, 16, 16]
""" """
kernel_size = utils.convert_to_list(kernel_size, 3, 'pool_size') kernel_size = convert_to_list(kernel_size, 3, 'pool_size')
if stride is None: if stride is None:
stride = kernel_size stride = kernel_size
else: else:
stride = utils.convert_to_list(stride, 3, 'pool_stride') stride = convert_to_list(stride, 3, 'pool_stride')
channel_last = _channel_last(data_format, 3) channel_last = _channel_last(data_format, 3)
padding, padding_algorithm = _update_padding_nd( padding, padding_algorithm = _update_padding_nd(
...@@ -603,11 +609,11 @@ def max_pool1d( ...@@ -603,11 +609,11 @@ def max_pool1d(
data_format = "NCHW" data_format = "NCHW"
_check_input(x, 3) _check_input(x, 3)
x = unsqueeze(x, [2]) x = unsqueeze(x, [2])
kernel_size = [1] + utils.convert_to_list(kernel_size, 1, 'pool_size') kernel_size = [1] + convert_to_list(kernel_size, 1, 'pool_size')
if stride is None: if stride is None:
stride = kernel_size stride = kernel_size
else: else:
stride = [1] + utils.convert_to_list(stride, 1, 'pool_stride') stride = [1] + convert_to_list(stride, 1, 'pool_stride')
padding, padding_algorithm = _update_padding_nd( padding, padding_algorithm = _update_padding_nd(
padding, 1, ceil_mode=ceil_mode padding, 1, ceil_mode=ceil_mode
...@@ -693,10 +699,10 @@ def _unpool_output_size(x, kernel_size, stride, padding, output_size): ...@@ -693,10 +699,10 @@ def _unpool_output_size(x, kernel_size, stride, padding, output_size):
has_static_var = False has_static_var = False
if output_size is None: if output_size is None:
return default_size return default_size
elif utils._contain_var(output_size): elif _contain_var(output_size):
if not in_dygraph_mode(): if not in_dygraph_mode():
has_static_var = True has_static_var = True
output_size = utils._convert_to_tensor_list(output_size) output_size = _convert_to_tensor_list(output_size)
else: else:
for i, var in enumerate(output_size): for i, var in enumerate(output_size):
if isinstance(var, Variable): if isinstance(var, Variable):
...@@ -799,11 +805,11 @@ def max_unpool1d( ...@@ -799,11 +805,11 @@ def max_unpool1d(
data_format = "NCHW" data_format = "NCHW"
x = unsqueeze(x, [2]) x = unsqueeze(x, [2])
indices = unsqueeze(indices, [2]) indices = unsqueeze(indices, [2])
kernel_size = [1] + utils.convert_to_list(kernel_size, 1, 'pool_size') kernel_size = [1] + convert_to_list(kernel_size, 1, 'pool_size')
if stride is None: if stride is None:
stride = kernel_size stride = kernel_size
else: else:
stride = [1] + utils.convert_to_list(stride, 1, 'pool_stride') stride = [1] + convert_to_list(stride, 1, 'pool_stride')
padding, padding_algorithm = _update_padding_nd(padding, 1) padding, padding_algorithm = _update_padding_nd(padding, 1)
# use 2d to implenment 1d should expand padding in advance. # use 2d to implenment 1d should expand padding in advance.
padding = _expand_low_nd_padding(padding) padding = _expand_low_nd_padding(padding)
...@@ -940,12 +946,12 @@ def max_unpool2d( ...@@ -940,12 +946,12 @@ def max_unpool2d(
f'The indices should have [N, C, H, W] format, but received {indices.shape}.' f'The indices should have [N, C, H, W] format, but received {indices.shape}.'
) )
kernel_size = utils.convert_to_list(kernel_size, 2, 'pool_size') kernel_size = convert_to_list(kernel_size, 2, 'pool_size')
if stride is None: if stride is None:
stride = kernel_size stride = kernel_size
else: else:
stride = utils.convert_to_list(stride, 2, 'pool_stride') stride = convert_to_list(stride, 2, 'pool_stride')
padding = utils.convert_to_list(padding, 2, 'padding') padding = convert_to_list(padding, 2, 'padding')
if data_format not in ["NCHW"]: if data_format not in ["NCHW"]:
raise ValueError( raise ValueError(
...@@ -1083,12 +1089,12 @@ def max_unpool3d( ...@@ -1083,12 +1089,12 @@ def max_unpool3d(
f'The indices should have [N, C, D, H, W] format, but received {indices.shape}.' f'The indices should have [N, C, D, H, W] format, but received {indices.shape}.'
) )
kernel_size = utils.convert_to_list(kernel_size, 3, 'pool_size') kernel_size = convert_to_list(kernel_size, 3, 'pool_size')
if stride is None: if stride is None:
stride = kernel_size stride = kernel_size
else: else:
stride = utils.convert_to_list(stride, 3, 'pool_stride') stride = convert_to_list(stride, 3, 'pool_stride')
padding = utils.convert_to_list(padding, 3, 'padding') padding = convert_to_list(padding, 3, 'padding')
if data_format not in ["NCDHW"]: if data_format not in ["NCDHW"]:
raise ValueError( raise ValueError(
...@@ -1203,11 +1209,11 @@ def max_pool2d( ...@@ -1203,11 +1209,11 @@ def max_pool2d(
# out.shape [1, 3, 16, 16], max_indices.shape [1, 3, 16, 16], # out.shape [1, 3, 16, 16], max_indices.shape [1, 3, 16, 16],
""" """
kernel_size = utils.convert_to_list(kernel_size, 2, 'pool_size') kernel_size = convert_to_list(kernel_size, 2, 'pool_size')
if stride is None: if stride is None:
stride = kernel_size stride = kernel_size
else: else:
stride = utils.convert_to_list(stride, 2, 'pool_stride') stride = convert_to_list(stride, 2, 'pool_stride')
if data_format not in ["NCHW", "NHWC"]: if data_format not in ["NCHW", "NHWC"]:
raise ValueError( raise ValueError(
...@@ -1370,11 +1376,11 @@ def max_pool3d( ...@@ -1370,11 +1376,11 @@ def max_pool3d(
# output.shape [1, 3, 16, 16, 16], max_indices.shape [1, 3, 16, 16, 16] # output.shape [1, 3, 16, 16, 16], max_indices.shape [1, 3, 16, 16, 16]
""" """
kernel_size = utils.convert_to_list(kernel_size, 3, 'pool_size') kernel_size = convert_to_list(kernel_size, 3, 'pool_size')
if stride is None: if stride is None:
stride = kernel_size stride = kernel_size
else: else:
stride = utils.convert_to_list(stride, 3, 'pool_stride') stride = convert_to_list(stride, 3, 'pool_stride')
channel_last = _channel_last(data_format, 3) channel_last = _channel_last(data_format, 3)
...@@ -1478,7 +1484,7 @@ def adaptive_avg_pool1d(x, output_size, name=None): ...@@ -1478,7 +1484,7 @@ def adaptive_avg_pool1d(x, output_size, name=None):
""" """
pool_type = 'avg' pool_type = 'avg'
_check_input(x, 3) _check_input(x, 3)
pool_size = [1] + utils.convert_to_list(output_size, 1, 'pool_size') pool_size = [1] + convert_to_list(output_size, 1, 'pool_size')
x = unsqueeze(x, [2]) x = unsqueeze(x, [2])
if in_dygraph_mode(): if in_dygraph_mode():
...@@ -1593,7 +1599,7 @@ def adaptive_avg_pool2d(x, output_size, data_format='NCHW', name=None): ...@@ -1593,7 +1599,7 @@ def adaptive_avg_pool2d(x, output_size, data_format='NCHW', name=None):
in_h, in_w = x.shape[1:3] in_h, in_w = x.shape[1:3]
if isinstance(output_size, int): if isinstance(output_size, int):
output_size = utils.convert_to_list(output_size, 2, 'output_size') output_size = convert_to_list(output_size, 2, 'output_size')
else: else:
output_size = list(output_size) output_size = list(output_size)
if output_size[0] is None: if output_size[0] is None:
...@@ -1607,8 +1613,8 @@ def adaptive_avg_pool2d(x, output_size, data_format='NCHW', name=None): ...@@ -1607,8 +1613,8 @@ def adaptive_avg_pool2d(x, output_size, data_format='NCHW', name=None):
for item in output_size for item in output_size
] ]
# output_size support Variable in static graph mode # output_size support Variable in static graph mode
elif utils._contain_var(output_size): elif _contain_var(output_size):
output_size = utils._convert_to_tensor_list(output_size) output_size = _convert_to_tensor_list(output_size)
if in_dygraph_mode(): if in_dygraph_mode():
x = x._use_gpudnn(False) x = x._use_gpudnn(False)
...@@ -1728,7 +1734,7 @@ def adaptive_avg_pool3d(x, output_size, data_format='NCDHW', name=None): ...@@ -1728,7 +1734,7 @@ def adaptive_avg_pool3d(x, output_size, data_format='NCDHW', name=None):
in_l, in_h, in_w = x.shape[1:4] in_l, in_h, in_w = x.shape[1:4]
if isinstance(output_size, int): if isinstance(output_size, int):
output_size = utils.convert_to_list(output_size, 3, 'output_size') output_size = convert_to_list(output_size, 3, 'output_size')
else: else:
output_size = list(output_size) output_size = list(output_size)
if output_size[0] is None: if output_size[0] is None:
...@@ -1827,7 +1833,7 @@ def adaptive_max_pool1d(x, output_size, return_mask=False, name=None): ...@@ -1827,7 +1833,7 @@ def adaptive_max_pool1d(x, output_size, return_mask=False, name=None):
""" """
_check_input(x, 3) _check_input(x, 3)
pool_size = [1] + utils.convert_to_list(output_size, 1, 'pool_size') pool_size = [1] + convert_to_list(output_size, 1, 'pool_size')
x = unsqueeze(x, [2]) x = unsqueeze(x, [2])
if in_dygraph_mode(): if in_dygraph_mode():
...@@ -1917,7 +1923,7 @@ def adaptive_max_pool2d(x, output_size, return_mask=False, name=None): ...@@ -1917,7 +1923,7 @@ def adaptive_max_pool2d(x, output_size, return_mask=False, name=None):
in_h, in_w = x.shape[2:4] in_h, in_w = x.shape[2:4]
if isinstance(output_size, int): if isinstance(output_size, int):
output_size = utils.convert_to_list(output_size, 2, 'output_size') output_size = convert_to_list(output_size, 2, 'output_size')
else: else:
output_size = list(output_size) output_size = list(output_size)
if output_size[0] is None: if output_size[0] is None:
...@@ -2006,7 +2012,7 @@ def adaptive_max_pool3d(x, output_size, return_mask=False, name=None): ...@@ -2006,7 +2012,7 @@ def adaptive_max_pool3d(x, output_size, return_mask=False, name=None):
in_l, in_h, in_w = x.shape[2:5] in_l, in_h, in_w = x.shape[2:5]
if isinstance(output_size, int): if isinstance(output_size, int):
output_size = utils.convert_to_list(output_size, 3, 'output_size') output_size = convert_to_list(output_size, 3, 'output_size')
else: else:
output_size = list(output_size) output_size = list(output_size)
if output_size[0] is None: if output_size[0] is None:
......
...@@ -23,7 +23,7 @@ from ...device import ( ...@@ -23,7 +23,7 @@ from ...device import (
is_compiled_with_cuda, is_compiled_with_cuda,
is_compiled_with_rocm, is_compiled_with_rocm,
) )
from ...fluid.layers import utils from ...utils import convert_to_list
from .. import Layer from .. import Layer
from .. import functional as F from .. import functional as F
from ..functional.conv import _update_padding_nd from ..functional.conv import _update_padding_nd
...@@ -110,11 +110,9 @@ class _ConvNd(Layer): ...@@ -110,11 +110,9 @@ class _ConvNd(Layer):
else: else:
self._channel_dim = 1 self._channel_dim = 1
self._stride = utils.convert_to_list(stride, dims, 'stride') self._stride = convert_to_list(stride, dims, 'stride')
self._dilation = utils.convert_to_list(dilation, dims, 'dilation') self._dilation = convert_to_list(dilation, dims, 'dilation')
self._kernel_size = utils.convert_to_list( self._kernel_size = convert_to_list(kernel_size, dims, 'kernel_size')
kernel_size, dims, 'kernel_size'
)
self._padding = padding self._padding = padding
self._padding_mode = padding_mode self._padding_mode = padding_mode
self.output_padding = output_padding self.output_padding = output_padding
...@@ -133,9 +131,7 @@ class _ConvNd(Layer): ...@@ -133,9 +131,7 @@ class _ConvNd(Layer):
raise ValueError("in_channels must be divisible by groups.") raise ValueError("in_channels must be divisible by groups.")
if padding_mode in {'reflect', 'replicate', 'circular'}: if padding_mode in {'reflect', 'replicate', 'circular'}:
_paired_padding = utils.convert_to_list( _paired_padding = convert_to_list(padding, dims, 'padding')
padding, dims, 'padding'
)
self._reversed_padding_repeated_twice = _reverse_repeat_list( self._reversed_padding_repeated_twice = _reverse_repeat_list(
_paired_padding, 2 _paired_padding, 2
) )
......
...@@ -28,8 +28,7 @@ from paddle.fluid.framework import ( ...@@ -28,8 +28,7 @@ from paddle.fluid.framework import (
in_dygraph_mode, in_dygraph_mode,
program_guard, program_guard,
) )
from paddle.fluid.layers import control_flow, utils from paddle.fluid.layers import control_flow
from paddle.fluid.layers.utils import flatten, map_structure
from paddle.framework import core from paddle.framework import core
from paddle.nn import Layer from paddle.nn import Layer
from paddle.nn import functional as F from paddle.nn import functional as F
...@@ -159,7 +158,7 @@ def _rnn_dynamic_graph( ...@@ -159,7 +158,7 @@ def _rnn_dynamic_graph(
**kwargs **kwargs
): ):
time_step_index = 0 if time_major else 1 time_step_index = 0 if time_major else 1
flat_inputs = flatten(inputs) flat_inputs = paddle.utils.flatten(inputs)
time_steps = flat_inputs[0].shape[time_step_index] time_steps = flat_inputs[0].shape[time_step_index]
if initial_states is None: if initial_states is None:
...@@ -168,7 +167,7 @@ def _rnn_dynamic_graph( ...@@ -168,7 +167,7 @@ def _rnn_dynamic_graph(
) )
if not time_major: if not time_major:
inputs = map_structure(_transpose_batch_time, inputs) inputs = paddle.utils.map_structure(_transpose_batch_time, inputs)
if sequence_length is not None: if sequence_length is not None:
mask = paddle.static.nn.sequence_lod.sequence_mask( mask = paddle.static.nn.sequence_lod.sequence_mask(
...@@ -177,7 +176,9 @@ def _rnn_dynamic_graph( ...@@ -177,7 +176,9 @@ def _rnn_dynamic_graph(
mask = paddle.transpose(mask, [1, 0]) mask = paddle.transpose(mask, [1, 0])
if is_reverse: if is_reverse:
inputs = map_structure(lambda x: paddle.reverse(x, axis=[0]), inputs) inputs = paddle.utils.map_structure(
lambda x: paddle.reverse(x, axis=[0]), inputs
)
mask = ( mask = (
paddle.reverse(mask, axis=[0]) paddle.reverse(mask, axis=[0])
if sequence_length is not None if sequence_length is not None
...@@ -187,27 +188,27 @@ def _rnn_dynamic_graph( ...@@ -187,27 +188,27 @@ def _rnn_dynamic_graph(
states = initial_states states = initial_states
outputs = [] outputs = []
for i in range(time_steps): for i in range(time_steps):
step_inputs = map_structure(lambda x: x[i], inputs) step_inputs = paddle.utils.map_structure(lambda x: x[i], inputs)
step_outputs, new_states = cell(step_inputs, states, **kwargs) step_outputs, new_states = cell(step_inputs, states, **kwargs)
if sequence_length is not None: if sequence_length is not None:
new_states = map_structure( new_states = paddle.utils.map_structure(
partial(_maybe_copy, step_mask=mask[i]), states, new_states partial(_maybe_copy, step_mask=mask[i]), states, new_states
) )
states = new_states states = new_states
outputs = ( outputs = (
map_structure(lambda x: ArrayWrapper(x), step_outputs) paddle.utils.map_structure(lambda x: ArrayWrapper(x), step_outputs)
if i == 0 if i == 0
else map_structure( else paddle.utils.map_structure(
lambda x, x_array: x_array.append(x), step_outputs, outputs lambda x, x_array: x_array.append(x), step_outputs, outputs
) )
) )
final_outputs = map_structure( final_outputs = paddle.utils.map_structure(
lambda x: paddle.stack(x.array, axis=time_step_index), outputs lambda x: paddle.stack(x.array, axis=time_step_index), outputs
) )
if is_reverse: if is_reverse:
final_outputs = map_structure( final_outputs = paddle.utils.map_structure(
lambda x: paddle.reverse(x, axis=time_step_index), final_outputs lambda x: paddle.reverse(x, axis=time_step_index), final_outputs
) )
...@@ -249,21 +250,23 @@ def _rnn_static_graph( ...@@ -249,21 +250,23 @@ def _rnn_static_graph(
initial_states = cell.get_initial_states( initial_states = cell.get_initial_states(
batch_ref=inputs, batch_dim_idx=1 if time_major else 0 batch_ref=inputs, batch_dim_idx=1 if time_major else 0
) )
initial_states = map_structure(_switch_grad, initial_states) initial_states = paddle.utils.map_structure(_switch_grad, initial_states)
if not time_major: if not time_major:
inputs = map_structure(_transpose_batch_time, inputs) inputs = paddle.utils.map_structure(_transpose_batch_time, inputs)
max_seq_len = paddle.shape(flatten(inputs)[0])[0] max_seq_len = paddle.shape(paddle.utils.flatten(inputs)[0])[0]
if sequence_length: if sequence_length:
mask = paddle.static.nn.sequence_lod.sequence_mask( mask = paddle.static.nn.sequence_lod.sequence_mask(
sequence_length, sequence_length,
maxlen=max_seq_len, maxlen=max_seq_len,
dtype=flatten(initial_states)[0].dtype, dtype=paddle.utils.flatten(initial_states)[0].dtype,
) )
mask = paddle.transpose(mask, [1, 0]) mask = paddle.transpose(mask, [1, 0])
if is_reverse: if is_reverse:
inputs = map_structure(lambda x: paddle.reverse(x, axis=[0]), inputs) inputs = paddle.utils.map_structure(
lambda x: paddle.reverse(x, axis=[0]), inputs
)
mask = paddle.reverse(mask, axis=[0]) if sequence_length else None mask = paddle.reverse(mask, axis=[0]) if sequence_length else None
with paddle.fluid.framework.device_guard("cpu"): with paddle.fluid.framework.device_guard("cpu"):
...@@ -274,13 +277,15 @@ def _rnn_static_graph( ...@@ -274,13 +277,15 @@ def _rnn_static_graph(
cond = start_i < end cond = start_i < end
while_op = control_flow.While(cond) while_op = control_flow.While(cond)
out_array = paddle.tensor.create_array(dtype=flatten(inputs)[0].dtype) out_array = paddle.tensor.create_array(
dtype=paddle.utils.flatten(inputs)[0].dtype
)
init_array = map_structure( init_array = paddle.utils.map_structure(
lambda x: paddle.tensor.create_array(dtype=x.dtype), initial_states lambda x: paddle.tensor.create_array(dtype=x.dtype), initial_states
) )
map_structure( paddle.utils.map_structure(
lambda x, y: paddle.tensor.array_write(x, start_i, y), lambda x, y: paddle.tensor.array_write(x, start_i, y),
initial_states, initial_states,
init_array, init_array,
...@@ -290,13 +295,13 @@ def _rnn_static_graph( ...@@ -290,13 +295,13 @@ def _rnn_static_graph(
step_in = inputs[start_i] step_in = inputs[start_i]
# step_in = paddle.fluid.layers.Print( step_in, message="step in") # step_in = paddle.fluid.layers.Print( step_in, message="step in")
pre_state = map_structure( pre_state = paddle.utils.map_structure(
lambda x: paddle.tensor.array_read(x, start_i), init_array lambda x: paddle.tensor.array_read(x, start_i), init_array
) )
# pre_state = paddle.fluid.layers.Print( pre_state, message="pre") # pre_state = paddle.fluid.layers.Print( pre_state, message="pre")
outputs, new_states = cell(step_in, pre_state, **kwargs) outputs, new_states = cell(step_in, pre_state, **kwargs)
assert isinstance(outputs, paddle.fluid.framework.Variable) assert isinstance(outputs, paddle.fluid.framework.Variable)
utils.assert_same_structure(new_states, pre_state) paddle.utils.assert_same_structure(new_states, pre_state)
if sequence_length: if sequence_length:
step_mask = paddle.unsqueeze(mask[start_i], 1) step_mask = paddle.unsqueeze(mask[start_i], 1)
# paddle.fluid.layers.Print( step_mask, message="mask") # paddle.fluid.layers.Print( step_mask, message="mask")
...@@ -304,7 +309,7 @@ def _rnn_static_graph( ...@@ -304,7 +309,7 @@ def _rnn_static_graph(
# partial(_maybe_copy, step_mask=step_mask), # partial(_maybe_copy, step_mask=step_mask),
# pre_state, new_states # pre_state, new_states
# ) # )
new_states = map_structure( new_states = paddle.utils.map_structure(
lambda x, y: (x * step_mask + y * (1.0 - step_mask)), lambda x, y: (x * step_mask + y * (1.0 - step_mask)),
new_states, new_states,
pre_state, pre_state,
...@@ -315,7 +320,7 @@ def _rnn_static_graph( ...@@ -315,7 +320,7 @@ def _rnn_static_graph(
with paddle.fluid.framework.device_guard("cpu"): with paddle.fluid.framework.device_guard("cpu"):
start_i = paddle.tensor.increment(x=start_i, value=1) start_i = paddle.tensor.increment(x=start_i, value=1)
map_structure( paddle.utils.map_structure(
lambda x, y: paddle.tensor.array_write(x, start_i, y), lambda x, y: paddle.tensor.array_write(x, start_i, y),
new_states, new_states,
init_array, init_array,
...@@ -327,20 +332,22 @@ def _rnn_static_graph( ...@@ -327,20 +332,22 @@ def _rnn_static_graph(
out, _ = tensor_array_to_tensor(out_array, axis=0, use_stack=True) out, _ = tensor_array_to_tensor(out_array, axis=0, use_stack=True)
all_state = map_structure( all_state = paddle.utils.map_structure(
lambda x: tensor_array_to_tensor(x, axis=0, use_stack=True)[0], lambda x: tensor_array_to_tensor(x, axis=0, use_stack=True)[0],
init_array, init_array,
) )
final_outputs = out final_outputs = out
final_states = map_structure(lambda x: x[-1], all_state) final_states = paddle.utils.map_structure(lambda x: x[-1], all_state)
if is_reverse: if is_reverse:
final_outputs = map_structure( final_outputs = paddle.utils.map_structure(
lambda x: paddle.reverse(x, axis=[0]), final_outputs lambda x: paddle.reverse(x, axis=[0]), final_outputs
) )
if not time_major: if not time_major:
final_outputs = map_structure(_transpose_batch_time, final_outputs) final_outputs = paddle.utils.map_structure(
_transpose_batch_time, final_outputs
)
return (final_outputs, final_states) return (final_outputs, final_states)
...@@ -438,7 +445,7 @@ def birnn( ...@@ -438,7 +445,7 @@ def birnn(
**kwargs **kwargs
) )
outputs = map_structure( outputs = paddle.utils.map_structure(
lambda x, y: paddle.concat([x, y], -1), outputs_fw, outputs_bw lambda x, y: paddle.concat([x, y], -1), outputs_fw, outputs_bw
) )
...@@ -532,9 +539,9 @@ def concat_states(states, bidirectional=False, state_components=1): ...@@ -532,9 +539,9 @@ def concat_states(states, bidirectional=False, state_components=1):
""" """
if state_components == 1: if state_components == 1:
return paddle.stack(flatten(states)) return paddle.stack(paddle.utils.flatten(states))
else: else:
states = flatten(states) states = paddle.utils.flatten(states)
componnets = [] componnets = []
for i in range(state_components): for i in range(state_components):
componnets.append(states[i::state_components]) componnets.append(states[i::state_components])
...@@ -582,7 +589,7 @@ class RNNCellBase(Layer): ...@@ -582,7 +589,7 @@ class RNNCellBase(Layer):
packed in the same structure as `shape` and `type` does. packed in the same structure as `shape` and `type` does.
""" """
# TODO: use inputs and batch_size # TODO: use inputs and batch_size
batch_ref = flatten(batch_ref)[0] batch_ref = paddle.utils.flatten(batch_ref)[0]
def _is_shape_sequence(seq): def _is_shape_sequence(seq):
"""For shape, list/tuple of integer is the finest-grained objection""" """For shape, list/tuple of integer is the finest-grained objection"""
...@@ -602,21 +609,25 @@ class RNNCellBase(Layer): ...@@ -602,21 +609,25 @@ class RNNCellBase(Layer):
# nested structure of shapes # nested structure of shapes
states_shapes = self.state_shape if shape is None else shape states_shapes = self.state_shape if shape is None else shape
is_sequence_ori = utils.is_sequence is_sequence_ori = paddle.utils.layers_utils.is_sequence
utils.is_sequence = _is_shape_sequence paddle.utils.layers_utils.is_sequence = _is_shape_sequence
states_shapes = map_structure(lambda shape: Shape(shape), states_shapes) states_shapes = paddle.utils.map_structure(
utils.is_sequence = is_sequence_ori lambda shape: Shape(shape), states_shapes
)
paddle.utils.layers_utils.is_sequence = is_sequence_ori
# nested structure of dtypes # nested structure of dtypes
try: try:
states_dtypes = self.state_dtype if dtype is None else dtype states_dtypes = self.state_dtype if dtype is None else dtype
except NotImplementedError: except NotImplementedError:
states_dtypes = framework.get_default_dtype() states_dtypes = framework.get_default_dtype()
if len(flatten(states_dtypes)) == 1: if len(paddle.utils.flatten(states_dtypes)) == 1:
dtype = flatten(states_dtypes)[0] dtype = paddle.utils.flatten(states_dtypes)[0]
states_dtypes = map_structure(lambda shape: dtype, states_shapes) states_dtypes = paddle.utils.map_structure(
lambda shape: dtype, states_shapes
)
init_states = map_structure( init_states = paddle.utils.map_structure(
lambda shape, dtype: paddle.fluid.layers.fill_constant_batch_size_like( lambda shape, dtype: paddle.fluid.layers.fill_constant_batch_size_like(
input=batch_ref, input=batch_ref,
shape=shape.shape, shape=shape.shape,
......
...@@ -16,8 +16,8 @@ __all__ = [] ...@@ -16,8 +16,8 @@ __all__ = []
from paddle import _C_ops, in_dynamic_mode from paddle import _C_ops, in_dynamic_mode
from paddle.fluid.layer_helper import LayerHelper from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.layers.utils import convert_to_list
from paddle.nn.functional.conv import _update_padding_nd from paddle.nn.functional.conv import _update_padding_nd
from paddle.utils import convert_to_list
from ...binary import add from ...binary import add
......
...@@ -13,8 +13,8 @@ ...@@ -13,8 +13,8 @@
# limitations under the License. # limitations under the License.
from paddle import _C_ops, in_dynamic_mode from paddle import _C_ops, in_dynamic_mode
from paddle.fluid.layers import utils
from paddle.nn.functional.pooling import _update_padding_nd from paddle.nn.functional.pooling import _update_padding_nd
from paddle.utils import convert_to_list
__all__ = [] __all__ = []
...@@ -82,11 +82,11 @@ def max_pool3d( ...@@ -82,11 +82,11 @@ def max_pool3d(
data_format == 'NDHWC' data_format == 'NDHWC'
), "Currently, sparse.max_pool3d only support data format of 'NDHWC'" ), "Currently, sparse.max_pool3d only support data format of 'NDHWC'"
kernel_size = utils.convert_to_list(kernel_size, 3, 'pool_size') kernel_size = convert_to_list(kernel_size, 3, 'pool_size')
if stride is None: if stride is None:
stride = kernel_size stride = kernel_size
else: else:
stride = utils.convert_to_list(stride, 3, 'pool_stride') stride = convert_to_list(stride, 3, 'pool_stride')
channel_last = True channel_last = True
......
...@@ -14,10 +14,10 @@ ...@@ -14,10 +14,10 @@
import numpy as np import numpy as np
from paddle.fluid.layers import utils
from paddle.nn import Layer from paddle.nn import Layer
from paddle.nn.functional.conv import _update_padding_nd from paddle.nn.functional.conv import _update_padding_nd
from paddle.nn.initializer import Normal from paddle.nn.initializer import Normal
from paddle.utils import convert_to_list
from .. import functional as F from .. import functional as F
...@@ -70,11 +70,9 @@ class _Conv3D(Layer): ...@@ -70,11 +70,9 @@ class _Conv3D(Layer):
channel_last = data_format == "NDHWC" channel_last = data_format == "NDHWC"
dims = 3 dims = 3
self._stride = utils.convert_to_list(stride, dims, 'stride') self._stride = convert_to_list(stride, dims, 'stride')
self._dilation = utils.convert_to_list(dilation, dims, 'dilation') self._dilation = convert_to_list(dilation, dims, 'dilation')
self._kernel_size = utils.convert_to_list( self._kernel_size = convert_to_list(kernel_size, dims, 'kernel_size')
kernel_size, dims, 'kernel_size'
)
self._padding = padding self._padding = padding
self._padding_mode = padding_mode self._padding_mode = padding_mode
self._updated_padding, self._padding_algorithm = _update_padding_nd( self._updated_padding, self._padding_algorithm = _update_padding_nd(
......
...@@ -23,7 +23,6 @@ from paddle.common_ops_import import ( ...@@ -23,7 +23,6 @@ from paddle.common_ops_import import (
LayerHelper, LayerHelper,
check_type, check_type,
check_variable_and_dtype, check_variable_and_dtype,
utils,
) )
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid.data_feeder import check_dtype from paddle.fluid.data_feeder import check_dtype
...@@ -953,9 +952,9 @@ def conv2d( ...@@ -953,9 +952,9 @@ def conv2d(
helper = LayerHelper(l_type, **locals()) helper = LayerHelper(l_type, **locals())
dtype = helper.input_dtype() dtype = helper.input_dtype()
filter_size = utils.convert_to_list(filter_size, 2, 'filter_size') filter_size = paddle.utils.convert_to_list(filter_size, 2, 'filter_size')
stride = utils.convert_to_list(stride, 2, 'stride') stride = paddle.utils.convert_to_list(stride, 2, 'stride')
dilation = utils.convert_to_list(dilation, 2, 'dilation') dilation = paddle.utils.convert_to_list(dilation, 2, 'dilation')
# padding # padding
def _update_padding(padding, data_format): def _update_padding(padding, data_format):
...@@ -981,12 +980,12 @@ def conv2d( ...@@ -981,12 +980,12 @@ def conv2d(
) )
padding = padding[1:3] padding = padding[1:3]
padding = [ele for a_list in padding for ele in a_list] padding = [ele for a_list in padding for ele in a_list]
padding = utils.convert_to_list(padding, 4, 'padding') padding = paddle.utils.convert_to_list(padding, 4, 'padding')
if utils._is_symmetric_padding(padding, 2): if paddle.utils._is_symmetric_padding(padding, 2):
padding = [padding[0], padding[2]] padding = [padding[0], padding[2]]
else: else:
padding = utils.convert_to_list(padding, 2, 'padding') padding = paddle.utils.convert_to_list(padding, 2, 'padding')
return padding return padding
...@@ -1250,9 +1249,9 @@ def conv3d( ...@@ -1250,9 +1249,9 @@ def conv3d(
) )
num_filter_channels = num_channels // groups num_filter_channels = num_channels // groups
filter_size = utils.convert_to_list(filter_size, 3, 'filter_size') filter_size = paddle.utils.convert_to_list(filter_size, 3, 'filter_size')
stride = utils.convert_to_list(stride, 3, 'stride') stride = paddle.utils.convert_to_list(stride, 3, 'stride')
dilation = utils.convert_to_list(dilation, 3, 'dilation') dilation = paddle.utils.convert_to_list(dilation, 3, 'dilation')
def _update_padding(padding, data_format): def _update_padding(padding, data_format):
def is_list_or_tuple(ele): def is_list_or_tuple(ele):
...@@ -1277,15 +1276,15 @@ def conv3d( ...@@ -1277,15 +1276,15 @@ def conv3d(
) )
padding = padding[1:4] padding = padding[1:4]
padding = [ele for a_list in padding for ele in a_list] padding = [ele for a_list in padding for ele in a_list]
padding = utils.convert_to_list(padding, 6, 'padding') padding = paddle.utils.convert_to_list(padding, 6, 'padding')
if utils._is_symmetric_padding(padding, 3): if paddle.utils._is_symmetric_padding(padding, 3):
padding = [padding[0], padding[2], padding[4]] padding = [padding[0], padding[2], padding[4]]
elif is_list_or_tuple(padding) and len(padding) == 6: elif is_list_or_tuple(padding) and len(padding) == 6:
padding = utils.convert_to_list(padding, 6, 'padding') padding = paddle.utils.convert_to_list(padding, 6, 'padding')
if utils._is_symmetric_padding(padding, 3): if paddle.utils._is_symmetric_padding(padding, 3):
padding = [padding[0], padding[2], padding[4]] padding = [padding[0], padding[2], padding[4]]
else: else:
padding = utils.convert_to_list(padding, 3, 'padding') padding = paddle.utils.convert_to_list(padding, 3, 'padding')
return padding return padding
...@@ -1571,8 +1570,8 @@ def conv2d_transpose( ...@@ -1571,8 +1570,8 @@ def conv2d_transpose(
if not isinstance(input, Variable): if not isinstance(input, Variable):
raise TypeError("Input of conv2d_transpose must be Tensor") raise TypeError("Input of conv2d_transpose must be Tensor")
stride = utils.convert_to_list(stride, 2, 'stride') stride = paddle.utils.convert_to_list(stride, 2, 'stride')
dilation = utils.convert_to_list(dilation, 2, 'dilation') dilation = paddle.utils.convert_to_list(dilation, 2, 'dilation')
if not isinstance(use_cudnn, bool): if not isinstance(use_cudnn, bool):
raise ValueError("use_cudnn should be True or False") raise ValueError("use_cudnn should be True or False")
...@@ -1600,9 +1599,9 @@ def conv2d_transpose( ...@@ -1600,9 +1599,9 @@ def conv2d_transpose(
) )
padding = padding[1:3] padding = padding[1:3]
padding = [ele for a_list in padding for ele in a_list] padding = [ele for a_list in padding for ele in a_list]
padding = utils.convert_to_list(padding, 4, 'padding') padding = paddle.utils.convert_to_list(padding, 4, 'padding')
else: else:
padding = utils.convert_to_list(padding, 2, 'padding') padding = paddle.utils.convert_to_list(padding, 2, 'padding')
padding = [padding[0], padding[0], padding[1], padding[1]] padding = [padding[0], padding[0], padding[1], padding[1]]
return padding return padding
...@@ -1626,12 +1625,16 @@ def conv2d_transpose( ...@@ -1626,12 +1625,16 @@ def conv2d_transpose(
if output_size is None: if output_size is None:
output_size = [] output_size = []
elif isinstance(output_size, (list, tuple)): elif isinstance(output_size, (list, tuple)):
if utils._contain_var(output_size): if paddle.utils._contain_var(output_size):
output_size = utils._convert_to_tensor_list(output_size) output_size = paddle.utils._convert_to_tensor_list(output_size)
else: else:
output_size = utils.convert_to_list(output_size, 2, 'output_size') output_size = paddle.utils.convert_to_list(
output_size, 2, 'output_size'
)
elif isinstance(output_size, int): elif isinstance(output_size, int):
output_size = utils.convert_to_list(output_size, 2, 'output_size') output_size = paddle.utils.convert_to_list(
output_size, 2, 'output_size'
)
elif isinstance(output_size, Variable): elif isinstance(output_size, Variable):
check_dtype( check_dtype(
output_size.dtype, output_size.dtype,
...@@ -1655,16 +1658,16 @@ def conv2d_transpose( ...@@ -1655,16 +1658,16 @@ def conv2d_transpose(
if output_size is []: if output_size is []:
raise ValueError("output_size must be set when filter_size is None") raise ValueError("output_size must be set when filter_size is None")
if not _non_static_mode(): if not _non_static_mode():
if isinstance(output_size, Variable) or utils._contain_var( if isinstance(output_size, Variable) or paddle.utils._contain_var(
output_size output_size
): ):
raise ValueError( raise ValueError(
"filter_size should not be None when output_size is Tensor or contain Tensor in static graph mode." "filter_size should not be None when output_size is Tensor or contain Tensor in static graph mode."
) )
else: else:
output_size = utils.convert_shape_to_list(output_size) output_size = paddle.utils.convert_shape_to_list(output_size)
if len(output_size) == 1: if len(output_size) == 1:
output_size = utils.convert_to_list( output_size = paddle.utils.convert_to_list(
output_size[0], 2, 'output_size' output_size[0], 2, 'output_size'
) )
...@@ -1687,11 +1690,11 @@ def conv2d_transpose( ...@@ -1687,11 +1690,11 @@ def conv2d_transpose(
) // dilation[1] + 1 ) // dilation[1] + 1
filter_size = [filter_size_h, filter_size_w] filter_size = [filter_size_h, filter_size_w]
else: else:
filter_size = utils.convert_to_list( filter_size = paddle.utils.convert_to_list(
filter_size, 2, 'conv2d_transpose.filter_size' filter_size, 2, 'conv2d_transpose.filter_size'
) )
if len(padding) == 4 and utils._is_symmetric_padding(padding, 2): if len(padding) == 4 and paddle.utils._is_symmetric_padding(padding, 2):
padding = [padding[0], padding[2]] padding = [padding[0], padding[2]]
if groups is None: if groups is None:
...@@ -1938,8 +1941,8 @@ def conv3d_transpose( ...@@ -1938,8 +1941,8 @@ def conv3d_transpose(
input.shape[1] if data_format == 'NCDHW' else input.shape[-1] input.shape[1] if data_format == 'NCDHW' else input.shape[-1]
) )
stride = utils.convert_to_list(stride, 3, 'stride') stride = paddle.utils.convert_to_list(stride, 3, 'stride')
dilation = utils.convert_to_list(dilation, 3, 'dilation') dilation = paddle.utils.convert_to_list(dilation, 3, 'dilation')
if not isinstance(use_cudnn, bool): if not isinstance(use_cudnn, bool):
raise ValueError("use_cudnn should be True or False") raise ValueError("use_cudnn should be True or False")
...@@ -1967,13 +1970,13 @@ def conv3d_transpose( ...@@ -1967,13 +1970,13 @@ def conv3d_transpose(
) )
padding = padding[1:4] padding = padding[1:4]
padding = [ele for a_list in padding for ele in a_list] padding = [ele for a_list in padding for ele in a_list]
padding = utils.convert_to_list(padding, 6, 'padding') padding = paddle.utils.convert_to_list(padding, 6, 'padding')
elif is_list_or_tuple(padding) and len(padding) == 6: elif is_list_or_tuple(padding) and len(padding) == 6:
padding = utils.convert_to_list(padding, 6, 'padding') padding = paddle.utils.convert_to_list(padding, 6, 'padding')
else: else:
padding = utils.convert_to_list(padding, 3, 'padding') padding = paddle.utils.convert_to_list(padding, 3, 'padding')
padding = [ padding = [
padding[0], padding[0],
padding[0], padding[0],
...@@ -2034,17 +2037,19 @@ def conv3d_transpose( ...@@ -2034,17 +2037,19 @@ def conv3d_transpose(
) // dilation[2] + 1 ) // dilation[2] + 1
filter_size = [filter_size_d, filter_size_h, filter_size_w] filter_size = [filter_size_d, filter_size_h, filter_size_w]
else: else:
filter_size = utils.convert_to_list( filter_size = paddle.utils.convert_to_list(
filter_size, 3, 'conv3d_transpose.filter_size' filter_size, 3, 'conv3d_transpose.filter_size'
) )
if len(padding) == 6 and utils._is_symmetric_padding(padding, 3): if len(padding) == 6 and paddle.utils._is_symmetric_padding(padding, 3):
padding = [padding[0], padding[2], padding[4]] padding = [padding[0], padding[2], padding[4]]
if output_size is None: if output_size is None:
output_size = [] output_size = []
elif isinstance(output_size, (list, tuple, int)): elif isinstance(output_size, (list, tuple, int)):
output_size = utils.convert_to_list(output_size, 3, 'output_size') output_size = paddle.utils.convert_to_list(
output_size, 3, 'output_size'
)
else: else:
raise ValueError("output_size should be int, list[int] or tuple[int]") raise ValueError("output_size should be int, list[int] or tuple[int]")
...@@ -2275,10 +2280,10 @@ def deformable_conv( ...@@ -2275,10 +2280,10 @@ def deformable_conv(
raise ValueError("num_channels must be divisible by groups.") raise ValueError("num_channels must be divisible by groups.")
num_filter_channels = num_channels // groups num_filter_channels = num_channels // groups
filter_size = utils.convert_to_list(filter_size, 2, 'filter_size') filter_size = paddle.utils.convert_to_list(filter_size, 2, 'filter_size')
stride = utils.convert_to_list(stride, 2, 'stride') stride = paddle.utils.convert_to_list(stride, 2, 'stride')
padding = utils.convert_to_list(padding, 2, 'padding') padding = paddle.utils.convert_to_list(padding, 2, 'padding')
dilation = utils.convert_to_list(dilation, 2, 'dilation') dilation = paddle.utils.convert_to_list(dilation, 2, 'dilation')
input_shape = input.shape input_shape = input.shape
filter_shape = [num_filters, int(num_filter_channels)] + filter_size filter_shape = [num_filters, int(num_filter_channels)] + filter_size
......
...@@ -28,7 +28,7 @@ from paddle.fluid.framework import Operator, Program, Variable ...@@ -28,7 +28,7 @@ from paddle.fluid.framework import Operator, Program, Variable
# Temporary solution, it will be deleted later # Temporary solution, it will be deleted later
from paddle.fluid.layers.control_flow import ConditionalBlock, select_input from paddle.fluid.layers.control_flow import ConditionalBlock, select_input
from paddle.fluid.layers.utils import ( from paddle.utils import (
assert_same_structure, assert_same_structure,
copy_mutable_vars, copy_mutable_vars,
flatten, flatten,
......
...@@ -35,7 +35,6 @@ from ..fluid.framework import ( ...@@ -35,7 +35,6 @@ from ..fluid.framework import (
_in_eager_without_dygraph_check, _in_eager_without_dygraph_check,
device_guard, device_guard,
) )
from ..fluid.layers import utils
from ..fluid.param_attr import ParamAttr from ..fluid.param_attr import ParamAttr
from ..framework import ( from ..framework import (
LayerHelper, LayerHelper,
...@@ -1775,7 +1774,7 @@ def empty(shape, dtype=None, name=None): ...@@ -1775,7 +1774,7 @@ def empty(shape, dtype=None, name=None):
dtype = convert_dtype(dtype) dtype = convert_dtype(dtype)
if in_dygraph_mode(): if in_dygraph_mode():
shape = utils.convert_shape_to_list(shape) shape = paddle.utils.convert_shape_to_list(shape)
out = _C_ops.empty( out = _C_ops.empty(
shape, convert_np_dtype_to_dtype_(dtype), _current_expected_place() shape, convert_np_dtype_to_dtype_(dtype), _current_expected_place()
) )
...@@ -1797,7 +1796,7 @@ def empty(shape, dtype=None, name=None): ...@@ -1797,7 +1796,7 @@ def empty(shape, dtype=None, name=None):
check_dtype(shape.dtype, 'shape', ['int32', 'int64'], 'empty') check_dtype(shape.dtype, 'shape', ['int32', 'int64'], 'empty')
attrs = {} attrs = {}
utils.get_shape_tensor_inputs( paddle.utils.get_shape_tensor_inputs(
inputs=inputs, attrs=attrs, shape=shape, op_type='empty' inputs=inputs, attrs=attrs, shape=shape, op_type='empty'
) )
...@@ -1874,7 +1873,7 @@ def empty_like(x, dtype=None, name=None): ...@@ -1874,7 +1873,7 @@ def empty_like(x, dtype=None, name=None):
attrs = {} attrs = {}
attrs['dtype'] = convert_np_dtype_to_dtype_(dtype) attrs['dtype'] = convert_np_dtype_to_dtype_(dtype)
shape = paddle.shape(x) shape = paddle.shape(x)
utils.get_shape_tensor_inputs( paddle.utils.get_shape_tensor_inputs(
inputs=inputs, attrs=attrs, shape=shape, op_type='empty_like' inputs=inputs, attrs=attrs, shape=shape, op_type='empty_like'
) )
......
...@@ -27,7 +27,6 @@ from ..fluid.data_feeder import ( ...@@ -27,7 +27,6 @@ from ..fluid.data_feeder import (
check_variable_and_dtype, check_variable_and_dtype,
convert_dtype, convert_dtype,
) )
from ..fluid.layers import utils
from ..framework import ( from ..framework import (
LayerHelper, LayerHelper,
convert_np_dtype_to_dtype_, convert_np_dtype_to_dtype_,
...@@ -376,10 +375,10 @@ def slice(input, axes, starts, ends): ...@@ -376,10 +375,10 @@ def slice(input, axes, starts, ends):
infer_flags = list(-1 for i in range(len(axes))) infer_flags = list(-1 for i in range(len(axes)))
elif isinstance(starts, (list, tuple)): elif isinstance(starts, (list, tuple)):
attrs['starts'] = [] attrs['starts'] = []
if utils._contain_var(starts): if paddle.utils._contain_var(starts):
inputs['StartsTensorList'] = utils._convert_to_tensor_list( inputs[
starts 'StartsTensorList'
) ] = paddle.utils._convert_to_tensor_list(starts)
for i, dim in enumerate(starts): for i, dim in enumerate(starts):
if isinstance(dim, Variable): if isinstance(dim, Variable):
attrs['starts'].append(-1) attrs['starts'].append(-1)
...@@ -396,8 +395,10 @@ def slice(input, axes, starts, ends): ...@@ -396,8 +395,10 @@ def slice(input, axes, starts, ends):
infer_flags = list(-1 for i in range(len(axes))) infer_flags = list(-1 for i in range(len(axes)))
elif isinstance(ends, (list, tuple)): elif isinstance(ends, (list, tuple)):
attrs['ends'] = [] attrs['ends'] = []
if utils._contain_var(ends): if paddle.utils._contain_var(ends):
inputs['EndsTensorList'] = utils._convert_to_tensor_list(ends) inputs['EndsTensorList'] = paddle.utils._convert_to_tensor_list(
ends
)
for i, dim in enumerate(ends): for i, dim in enumerate(ends):
if isinstance(dim, Variable): if isinstance(dim, Variable):
attrs['ends'].append(-1) attrs['ends'].append(-1)
...@@ -793,7 +794,7 @@ def crop(x, shape=None, offsets=None, name=None): ...@@ -793,7 +794,7 @@ def crop(x, shape=None, offsets=None, name=None):
offsets.stop_gradient = True offsets.stop_gradient = True
ipts['Offsets'] = offsets ipts['Offsets'] = offsets
attrs['offsets'] = [-1] * len(x.shape) attrs['offsets'] = [-1] * len(x.shape)
elif utils._contain_var(offsets): elif paddle.utils._contain_var(offsets):
new_offsets_tensor = [] new_offsets_tensor = []
offsets_attr = [] offsets_attr = []
for dim in offsets: for dim in offsets:
...@@ -817,7 +818,7 @@ def crop(x, shape=None, offsets=None, name=None): ...@@ -817,7 +818,7 @@ def crop(x, shape=None, offsets=None, name=None):
if isinstance(shape, Variable): if isinstance(shape, Variable):
shape.stop_gradient = True shape.stop_gradient = True
ipts['Shape'] = shape ipts['Shape'] = shape
elif utils._contain_var(shape): elif paddle.utils._contain_var(shape):
new_shape_tensor = [] new_shape_tensor = []
shape_attr = [] shape_attr = []
for dim_size in shape: for dim_size in shape:
...@@ -1942,7 +1943,7 @@ def split(x, num_or_sections, axis=0, name=None): ...@@ -1942,7 +1943,7 @@ def split(x, num_or_sections, axis=0, name=None):
dim = (len(input.shape) + dim) if dim < 0 else dim dim = (len(input.shape) + dim) if dim < 0 else dim
if isinstance(num_or_sections, (list, tuple)): if isinstance(num_or_sections, (list, tuple)):
if utils._contain_var(num_or_sections): if paddle.utils._contain_var(num_or_sections):
for index, item in enumerate(num_or_sections): for index, item in enumerate(num_or_sections):
if isinstance(item, Variable): if isinstance(item, Variable):
num_or_sections[index] = num_or_sections[index].numpy()[ num_or_sections[index] = num_or_sections[index].numpy()[
...@@ -2043,7 +2044,7 @@ def split(x, num_or_sections, axis=0, name=None): ...@@ -2043,7 +2044,7 @@ def split(x, num_or_sections, axis=0, name=None):
num_or_sections, num_or_sections,
) )
) )
if utils._contain_var(num_or_sections): if paddle.utils._contain_var(num_or_sections):
inputs['SectionsTensorList'] = _get_SectionsTensorList( inputs['SectionsTensorList'] = _get_SectionsTensorList(
num_or_sections num_or_sections
) )
...@@ -2213,8 +2214,8 @@ def squeeze(x, axis=None, name=None): ...@@ -2213,8 +2214,8 @@ def squeeze(x, axis=None, name=None):
axes.stop_gradient = True axes.stop_gradient = True
attrs["axes"] = axes attrs["axes"] = axes
elif isinstance(axes, (list, tuple)): elif isinstance(axes, (list, tuple)):
if utils._contain_var(axes): if paddle.utils._contain_var(axes):
attrs["axes"] = utils._convert_to_tensor_list(axes) attrs["axes"] = paddle.utils._convert_to_tensor_list(axes)
else: else:
attrs["axes"] = axes attrs["axes"] = axes
...@@ -2613,8 +2614,10 @@ def unsqueeze(x, axis, name=None): ...@@ -2613,8 +2614,10 @@ def unsqueeze(x, axis, name=None):
axes.stop_gradient = True axes.stop_gradient = True
inputs["AxesTensor"] = axes inputs["AxesTensor"] = axes
elif isinstance(axes, (list, tuple)): elif isinstance(axes, (list, tuple)):
if utils._contain_var(axes): if paddle.utils._contain_var(axes):
inputs["AxesTensorList"] = utils._convert_to_tensor_list(axes) inputs["AxesTensorList"] = paddle.utils._convert_to_tensor_list(
axes
)
else: else:
attrs["axes"] = axes attrs["axes"] = axes
...@@ -3197,10 +3200,10 @@ def tile(x, repeat_times, name=None): ...@@ -3197,10 +3200,10 @@ def tile(x, repeat_times, name=None):
attrs['repeat_times'] = [-1] attrs['repeat_times'] = [-1]
elif isinstance(repeat_times, (list, tuple)): elif isinstance(repeat_times, (list, tuple)):
attrs['repeat_times'] = get_attr_repeat_times(repeat_times) attrs['repeat_times'] = get_attr_repeat_times(repeat_times)
if utils._contain_var(repeat_times): if paddle.utils._contain_var(repeat_times):
inputs['repeat_times_tensor'] = utils._convert_to_tensor_list( inputs[
repeat_times 'repeat_times_tensor'
) ] = paddle.utils._convert_to_tensor_list(repeat_times)
dtype = helper.input_dtype(input_param_name='x') dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype) out = helper.create_variable_for_type_inference(dtype)
...@@ -3351,10 +3354,10 @@ def broadcast_to(x, shape, name=None): ...@@ -3351,10 +3354,10 @@ def broadcast_to(x, shape, name=None):
inputs['Shape'] = shape inputs['Shape'] = shape
elif isinstance(shape, (list, tuple)): elif isinstance(shape, (list, tuple)):
attrs['shape'] = get_attr_expand_shape(shape) attrs['shape'] = get_attr_expand_shape(shape)
if utils._contain_var(shape): if paddle.utils._contain_var(shape):
inputs['expand_shapes_tensor'] = utils._convert_to_tensor_list( inputs[
shape 'expand_shapes_tensor'
) ] = paddle.utils._convert_to_tensor_list(shape)
dtype = helper.input_dtype(input_param_name='x') dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype) out = helper.create_variable_for_type_inference(dtype)
...@@ -3445,10 +3448,10 @@ def expand(x, shape, name=None): ...@@ -3445,10 +3448,10 @@ def expand(x, shape, name=None):
inputs['Shape'] = shape inputs['Shape'] = shape
elif isinstance(shape, (list, tuple)): elif isinstance(shape, (list, tuple)):
attrs['shape'] = get_attr_expand_shape(shape) attrs['shape'] = get_attr_expand_shape(shape)
if utils._contain_var(shape): if paddle.utils._contain_var(shape):
inputs['expand_shapes_tensor'] = utils._convert_to_tensor_list( inputs[
shape 'expand_shapes_tensor'
) ] = paddle.utils._convert_to_tensor_list(shape)
dtype = helper.input_dtype(input_param_name='x') dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype) out = helper.create_variable_for_type_inference(dtype)
...@@ -3602,8 +3605,10 @@ def reshape(x, shape, name=None): ...@@ -3602,8 +3605,10 @@ def reshape(x, shape, name=None):
inputs["Shape"] = shape inputs["Shape"] = shape
elif isinstance(shape, (list, tuple)): elif isinstance(shape, (list, tuple)):
attrs["shape"] = get_attr_shape(shape) attrs["shape"] = get_attr_shape(shape)
if utils._contain_var(shape): if paddle.utils._contain_var(shape):
inputs['ShapeTensor'] = utils._convert_to_tensor_list(shape) inputs['ShapeTensor'] = paddle.utils._convert_to_tensor_list(
shape
)
helper = LayerHelper("reshape2", **locals()) helper = LayerHelper("reshape2", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
...@@ -3894,7 +3899,7 @@ def strided_slice(x, axes, starts, ends, strides, name=None): ...@@ -3894,7 +3899,7 @@ def strided_slice(x, axes, starts, ends, strides, name=None):
inputs['StartsTensor'] = starts inputs['StartsTensor'] = starts
elif isinstance(starts, (list, tuple)): elif isinstance(starts, (list, tuple)):
attrs['starts'] = [] attrs['starts'] = []
if utils._contain_var(starts): if paddle.utils._contain_var(starts):
inputs['StartsTensorList'] = get_new_list_tensor(starts) inputs['StartsTensorList'] = get_new_list_tensor(starts)
for i, dim in enumerate(starts): for i, dim in enumerate(starts):
if isinstance(dim, Variable): if isinstance(dim, Variable):
...@@ -3911,7 +3916,7 @@ def strided_slice(x, axes, starts, ends, strides, name=None): ...@@ -3911,7 +3916,7 @@ def strided_slice(x, axes, starts, ends, strides, name=None):
inputs['EndsTensor'] = ends inputs['EndsTensor'] = ends
elif isinstance(ends, (list, tuple)): elif isinstance(ends, (list, tuple)):
attrs['ends'] = [] attrs['ends'] = []
if utils._contain_var(ends): if paddle.utils._contain_var(ends):
inputs['EndsTensorList'] = get_new_list_tensor(ends) inputs['EndsTensorList'] = get_new_list_tensor(ends)
for i, dim in enumerate(ends): for i, dim in enumerate(ends):
if isinstance(dim, Variable): if isinstance(dim, Variable):
...@@ -3928,7 +3933,7 @@ def strided_slice(x, axes, starts, ends, strides, name=None): ...@@ -3928,7 +3933,7 @@ def strided_slice(x, axes, starts, ends, strides, name=None):
inputs['StridesTensor'] = strides inputs['StridesTensor'] = strides
elif isinstance(strides, (list, tuple)): elif isinstance(strides, (list, tuple)):
attrs['strides'] = [] attrs['strides'] = []
if utils._contain_var(strides): if paddle.utils._contain_var(strides):
inputs['StridesTensorList'] = get_new_list_tensor(strides) inputs['StridesTensorList'] = get_new_list_tensor(strides)
for i, dim in enumerate(strides): for i, dim in enumerate(strides):
if isinstance(dim, Variable): if isinstance(dim, Variable):
......
...@@ -32,7 +32,6 @@ from ..fluid.data_feeder import ( ...@@ -32,7 +32,6 @@ from ..fluid.data_feeder import (
check_variable_and_dtype, check_variable_and_dtype,
convert_dtype, convert_dtype,
) )
from ..fluid.layers import utils
from ..framework import ( from ..framework import (
LayerHelper, LayerHelper,
convert_np_dtype_to_dtype_, convert_np_dtype_to_dtype_,
...@@ -121,8 +120,8 @@ def _get_reduce_axis_with_tensor(axis, x): ...@@ -121,8 +120,8 @@ def _get_reduce_axis_with_tensor(axis, x):
reduce_all = False reduce_all = False
else: else:
reduce_all, axis = _get_reduce_axis(axis, x) reduce_all, axis = _get_reduce_axis(axis, x)
if utils._contain_var(axis): if paddle.utils._contain_var(axis):
axis = utils._convert_to_tensor_list(axis) axis = paddle.utils._convert_to_tensor_list(axis)
return reduce_all, axis return reduce_all, axis
...@@ -2319,8 +2318,8 @@ def max(x, axis=None, keepdim=False, name=None): ...@@ -2319,8 +2318,8 @@ def max(x, axis=None, keepdim=False, name=None):
check_variable_and_dtype( check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'int32', 'int64'], 'max' x, 'x', ['float32', 'float64', 'int32', 'int64'], 'max'
) )
if not isinstance(axis, Variable) and utils._contain_var(axis): if not isinstance(axis, Variable) and paddle.utils._contain_var(axis):
axis = utils._convert_to_tensor_list(axis) axis = paddle.utils._convert_to_tensor_list(axis)
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op( helper.append_op(
......
...@@ -25,7 +25,6 @@ from ..fluid.data_feeder import ( ...@@ -25,7 +25,6 @@ from ..fluid.data_feeder import (
check_type, check_type,
check_variable_and_dtype, check_variable_and_dtype,
) )
from ..fluid.layers import utils
from ..framework import ( from ..framework import (
LayerHelper, LayerHelper,
convert_np_dtype_to_dtype_, convert_np_dtype_to_dtype_,
...@@ -336,7 +335,7 @@ def gaussian(shape, mean=0.0, std=1.0, seed=0, dtype=None, name=None): ...@@ -336,7 +335,7 @@ def gaussian(shape, mean=0.0, std=1.0, seed=0, dtype=None, name=None):
dtype = convert_np_dtype_to_dtype_(dtype) dtype = convert_np_dtype_to_dtype_(dtype)
if in_dygraph_mode(): if in_dygraph_mode():
shape = utils.convert_shape_to_list(shape) shape = paddle.utils.convert_shape_to_list(shape)
place = _current_expected_place() place = _current_expected_place()
return _C_ops.gaussian( return _C_ops.gaussian(
shape, float(mean), float(std), seed, dtype, place shape, float(mean), float(std), seed, dtype, place
...@@ -353,7 +352,7 @@ def gaussian(shape, mean=0.0, std=1.0, seed=0, dtype=None, name=None): ...@@ -353,7 +352,7 @@ def gaussian(shape, mean=0.0, std=1.0, seed=0, dtype=None, name=None):
'dtype': dtype, 'dtype': dtype,
'use_mkldnn': False, 'use_mkldnn': False,
} }
utils.get_shape_tensor_inputs( paddle.utils.get_shape_tensor_inputs(
inputs=inputs, attrs=attrs, shape=shape, op_type=op_type_for_check inputs=inputs, attrs=attrs, shape=shape, op_type=op_type_for_check
) )
...@@ -644,7 +643,7 @@ def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None): ...@@ -644,7 +643,7 @@ def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None):
dtype = convert_np_dtype_to_dtype_(dtype) dtype = convert_np_dtype_to_dtype_(dtype)
if in_dygraph_mode(): if in_dygraph_mode():
shape = utils.convert_shape_to_list(shape) shape = paddle.utils.convert_shape_to_list(shape)
return _C_ops.uniform( return _C_ops.uniform(
shape, shape,
dtype, dtype,
...@@ -661,7 +660,7 @@ def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None): ...@@ -661,7 +660,7 @@ def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None):
inputs = dict() inputs = dict()
attrs = {'seed': seed, 'min': min, 'max': max, 'dtype': dtype} attrs = {'seed': seed, 'min': min, 'max': max, 'dtype': dtype}
utils.get_shape_tensor_inputs( paddle.utils.get_shape_tensor_inputs(
inputs=inputs, attrs=attrs, shape=shape, op_type='uniform/rand' inputs=inputs, attrs=attrs, shape=shape, op_type='uniform/rand'
) )
...@@ -794,7 +793,7 @@ def randint(low=0, high=None, shape=[1], dtype=None, name=None): ...@@ -794,7 +793,7 @@ def randint(low=0, high=None, shape=[1], dtype=None, name=None):
dtype = convert_np_dtype_to_dtype_(dtype) dtype = convert_np_dtype_to_dtype_(dtype)
if in_dygraph_mode(): if in_dygraph_mode():
shape = utils.convert_shape_to_list(shape) shape = paddle.utils.convert_shape_to_list(shape)
place = _current_expected_place() place = _current_expected_place()
return _C_ops.randint(low, high, shape, dtype, place) return _C_ops.randint(low, high, shape, dtype, place)
else: else:
...@@ -808,7 +807,7 @@ def randint(low=0, high=None, shape=[1], dtype=None, name=None): ...@@ -808,7 +807,7 @@ def randint(low=0, high=None, shape=[1], dtype=None, name=None):
inputs = dict() inputs = dict()
attrs = {'low': low, 'high': high, 'seed': 0, 'dtype': dtype} attrs = {'low': low, 'high': high, 'seed': 0, 'dtype': dtype}
utils.get_shape_tensor_inputs( paddle.utils.get_shape_tensor_inputs(
inputs=inputs, attrs=attrs, shape=shape, op_type='randint' inputs=inputs, attrs=attrs, shape=shape, op_type='randint'
) )
...@@ -967,7 +966,7 @@ def randint_like(x, low=0, high=None, dtype=None, name=None): ...@@ -967,7 +966,7 @@ def randint_like(x, low=0, high=None, dtype=None, name=None):
) )
if in_dygraph_mode(): if in_dygraph_mode():
shape = utils.convert_shape_to_list(shape) shape = paddle.utils.convert_shape_to_list(shape)
out = _legacy_C_ops.randint( out = _legacy_C_ops.randint(
'shape', 'shape',
shape, shape,
......
...@@ -27,5 +27,33 @@ from . import download # noqa: F401 ...@@ -27,5 +27,33 @@ from . import download # noqa: F401
from . import image_util # noqa: F401 from . import image_util # noqa: F401
from . import cpp_extension # noqa: F401 from . import cpp_extension # noqa: F401
from . import dlpack from . import dlpack
from . import layers_utils # noqa: F401
from .layers_utils import convert_to_list # noqa: F401
from .layers_utils import is_sequence # noqa: F401
from .layers_utils import to_sequence # noqa: F401
from .layers_utils import flatten # noqa: F401
from .layers_utils import pack_sequence_as # noqa: F401
from .layers_utils import map_structure # noqa: F401
from .layers_utils import hold_mutable_vars # noqa: F401
from .layers_utils import copy_mutable_vars # noqa: F401
from .layers_utils import padding_to_same_structure # noqa: F401
from .layers_utils import assert_same_structure # noqa: F401
from .layers_utils import get_shape_tensor_inputs # noqa: F401
from .layers_utils import convert_shape_to_list # noqa: F401
from .layers_utils import check_shape # noqa: F401
from .layers_utils import try_set_static_shape_tensor # noqa: F401
from .layers_utils import try_get_constant_shape_from_tensor # noqa: F401
from .layers_utils import get_inputs_outputs_in_block # noqa: F401
from .layers_utils import _hash_with_id # noqa: F401
from .layers_utils import _sorted # noqa: F401
from .layers_utils import _yield_value # noqa: F401
from .layers_utils import _yield_flat_nest # noqa: F401
from .layers_utils import _sequence_like # noqa: F401
from .layers_utils import _packed_nest_with_indices # noqa: F401
from .layers_utils import _recursive_assert_same_structure # noqa: F401
from .layers_utils import _is_symmetric_padding # noqa: F401
from .layers_utils import _contain_var # noqa: F401
from .layers_utils import _convert_to_tensor_list # noqa: F401
__all__ = ['deprecated', 'run_check', 'require_version', 'try_import'] # noqa __all__ = ['deprecated', 'run_check', 'require_version', 'try_import'] # noqa
...@@ -289,7 +289,7 @@ class VersionManager: ...@@ -289,7 +289,7 @@ class VersionManager:
self.version = self.hasher(version_field) self.version = self.hasher(version_field)
def hasher(self, version_field): def hasher(self, version_field):
from paddle.fluid.layers.utils import flatten from paddle.utils import flatten
md5 = hashlib.md5() md5 = hashlib.md5()
for field in version_field._fields: for field in version_field._fields:
......
...@@ -12,24 +12,16 @@ ...@@ -12,24 +12,16 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import paddle
import collections
import copy import copy
import numpy as np
from ..framework import Block, Variable, _non_static_mode
from ..data_feeder import (
convert_dtype,
check_variable_and_dtype,
check_type,
check_dtype,
)
from ..layer_helper import LayerHelper
from sys import version_info
from collections.abc import Sequence
from weakref import WeakKeyDictionary
from collections import defaultdict from collections import defaultdict
from collections.abc import Sequence
from uuid import uuid4 from uuid import uuid4
from weakref import WeakKeyDictionary
import paddle
from ..fluid.data_feeder import check_dtype, convert_dtype
from ..fluid.framework import Block, Variable, _non_static_mode
def convert_to_list(value, n, name, dtype=int): def convert_to_list(value, n, name, dtype=int):
...@@ -388,7 +380,7 @@ def _contain_var(list_or_tuple): ...@@ -388,7 +380,7 @@ def _contain_var(list_or_tuple):
def get_shape_tensor_inputs(inputs, attrs, shape, op_type): def get_shape_tensor_inputs(inputs, attrs, shape, op_type):
from .tensor import fill_constant from ..fluid.layers.tensor import fill_constant
def _get_attr_shape(list_shape): def _get_attr_shape(list_shape):
attr_shape = [] attr_shape = []
...@@ -443,7 +435,7 @@ def _convert_to_tensor_list(old_list, dtype="int32"): ...@@ -443,7 +435,7 @@ def _convert_to_tensor_list(old_list, dtype="int32"):
""" """
Converts all elements of a list to Variable. Converts all elements of a list to Variable.
""" """
from .tensor import fill_constant from ..fluid.layers.tensor import fill_constant
new_list_tensor = [] new_list_tensor = []
for ele in old_list: for ele in old_list:
......
...@@ -16,11 +16,11 @@ import numpy as np ...@@ -16,11 +16,11 @@ import numpy as np
from paddle import _C_ops, _legacy_C_ops from paddle import _C_ops, _legacy_C_ops
from paddle.tensor.math import _add_with_axis from paddle.tensor.math import _add_with_axis
from paddle.utils import convert_to_list
from ..fluid.data_feeder import check_type, check_variable_and_dtype from ..fluid.data_feeder import check_type, check_variable_and_dtype
from ..fluid.framework import Variable, in_dygraph_mode from ..fluid.framework import Variable, in_dygraph_mode
from ..fluid.layer_helper import LayerHelper from ..fluid.layer_helper import LayerHelper
from ..fluid.layers import utils
from ..framework import _current_expected_place from ..framework import _current_expected_place
from ..nn import BatchNorm2D, Conv2D, Layer, ReLU, Sequential from ..nn import BatchNorm2D, Conv2D, Layer, ReLU, Sequential
from ..nn.initializer import Normal from ..nn.initializer import Normal
...@@ -863,9 +863,9 @@ def deform_conv2d( ...@@ -863,9 +863,9 @@ def deform_conv2d(
# returns # returns
[8, 16, 26, 26] [8, 16, 26, 26]
""" """
stride = utils.convert_to_list(stride, 2, 'stride') stride = convert_to_list(stride, 2, 'stride')
padding = utils.convert_to_list(padding, 2, 'padding') padding = convert_to_list(padding, 2, 'padding')
dilation = utils.convert_to_list(dilation, 2, 'dilation') dilation = convert_to_list(dilation, 2, 'dilation')
use_deform_conv2d_v1 = True if mask is None else False use_deform_conv2d_v1 = True if mask is None else False
...@@ -899,9 +899,9 @@ def deform_conv2d( ...@@ -899,9 +899,9 @@ def deform_conv2d(
helper = LayerHelper('deformable_conv', **locals()) helper = LayerHelper('deformable_conv', **locals())
dtype = helper.input_dtype() dtype = helper.input_dtype()
stride = utils.convert_to_list(stride, 2, 'stride') stride = convert_to_list(stride, 2, 'stride')
padding = utils.convert_to_list(padding, 2, 'padding') padding = convert_to_list(padding, 2, 'padding')
dilation = utils.convert_to_list(dilation, 2, 'dilation') dilation = convert_to_list(dilation, 2, 'dilation')
pre_bias = helper.create_variable_for_type_inference(dtype) pre_bias = helper.create_variable_for_type_inference(dtype)
...@@ -1106,14 +1106,14 @@ class DeformConv2D(Layer): ...@@ -1106,14 +1106,14 @@ class DeformConv2D(Layer):
self._out_channels = out_channels self._out_channels = out_channels
self._channel_dim = 1 self._channel_dim = 1
self._stride = utils.convert_to_list(stride, 2, 'stride') self._stride = convert_to_list(stride, 2, 'stride')
self._dilation = utils.convert_to_list(dilation, 2, 'dilation') self._dilation = convert_to_list(dilation, 2, 'dilation')
self._kernel_size = utils.convert_to_list(kernel_size, 2, 'kernel_size') self._kernel_size = convert_to_list(kernel_size, 2, 'kernel_size')
if in_channels % groups != 0: if in_channels % groups != 0:
raise ValueError("in_channels must be divisible by groups.") raise ValueError("in_channels must be divisible by groups.")
self._padding = utils.convert_to_list(padding, 2, 'padding') self._padding = convert_to_list(padding, 2, 'padding')
filter_shape = [out_channels, in_channels // groups] + self._kernel_size filter_shape = [out_channels, in_channels // groups] + self._kernel_size
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册