未验证 提交 c9951dfc 编写于 作者: H heyanru 提交者: GitHub

[Fluid Clean] remove paddle.fluid.layers.nn.split and paddle.fluid.layers.nn.l2_normalize (#48274)

上级 344b99e1
......@@ -156,7 +156,7 @@ class BasicGRUUnit(Layer):
gate_input = paddle.add(gate_input, self._gate_bias)
gate_input = self._gate_activation(gate_input)
r, u = layers.split(gate_input, num_or_sections=2, dim=1)
r, u = paddle.split(gate_input, num_or_sections=2, axis=1)
r_hidden = r * pre_hidden
......@@ -877,7 +877,7 @@ class BasicLSTMUnit(Layer):
gate_input = paddle.matmul(x=concat_input_hidden, y=self._weight)
gate_input = paddle.add(gate_input, self._bias)
i, j, f, o = layers.split(gate_input, num_or_sections=4, dim=-1)
i, j, f, o = paddle.split(gate_input, num_or_sections=4, axis=-1)
new_cell = paddle.add(
paddle.multiply(
pre_cell,
......
......@@ -66,8 +66,6 @@ __all__ = [
'fc',
'embedding',
'conv2d',
'split',
'l2_normalize',
'row_conv',
'layer_norm',
'spectral_norm',
......@@ -1420,252 +1418,6 @@ def reduce_sum(input, dim=None, keep_dim=False, name=None):
return out
def split(input, num_or_sections, dim=-1, name=None):
"""
Split the input tensor into multiple sub-Tensors.
Args:
input (Tensor): A N-D Tensor. The data type is bool, float16, float32, float64, int32 or int64.
num_or_sections (int|list|tuple): If ``num_or_sections`` is int, then the ``num_or_sections``
indicates the number of equal sized sub-Tensors that the ``input``
will be divided into. If ``num_or_sections`` is a list or tuple, the length of it
indicates the number of sub-Tensors and the elements in it indicate the sizes of sub-Tensors'
dimension orderly. The length of the list mustn't be larger than the ``input`` 's size of specified dim.
dim (int|Tensor, optional): The dimension along which to split, it can be a scalar with type ``int`` or
a ``Tensor`` with shape [1] and data type ``int32`` or ``int64``. If :math:`dim < 0`,
the dimension to split along is :math:`rank(input) + dim`. Default is -1.
name (str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
list(Tensor): The list of segmented Tensors.
Example:
.. code-block:: python
import paddle.fluid as fluid
# input is a Tensor which shape is [3, 9, 5]
input = fluid.data(
name="input", shape=[3, 9, 5], dtype="float32")
out0, out1, out2 = fluid.layers.split(input, num_or_sections=3, dim=1)
# out0.shape [3, 3, 5]
# out1.shape [3, 3, 5]
# out2.shape [3, 3, 5]
out0, out1, out2 = fluid.layers.split(input, num_or_sections=[2, 3, 4], dim=1)
# out0.shape [3, 2, 5]
# out1.shape [3, 3, 5]
# out2.shape [3, 4, 5]
out0, out1, out2 = fluid.layers.split(input, num_or_sections=[2, 3, -1], dim=1)
# out0.shape [3, 2, 5]
# out1.shape [3, 3, 5]
# out2.shape [3, 4, 5]
# dim is negative, the real dim is (rank(input) + axis) which real
# value is 1.
out0, out1, out2 = fluid.layers.split(input, num_or_sections=3, dim=-2)
# out0.shape [3, 3, 5]
# out1.shape [3, 3, 5]
# out2.shape [3, 3, 5]
"""
if _non_static_mode():
num = None
attrs = ()
if isinstance(dim, Variable):
dim = dim.numpy()
dim = dim.item(0)
assert len(input.shape) + dim >= 0, "(rank(x) + axis) must >= 0"
dim = (len(input.shape) + dim) if dim < 0 else dim
attrs += ('axis', dim)
if isinstance(num_or_sections, int):
num = num_or_sections
attrs += ('num', num_or_sections)
elif isinstance(num_or_sections, (list, tuple)):
num = len(num_or_sections)
if utils._contain_var(num_or_sections):
for index, item in enumerate(num_or_sections):
if isinstance(item, Variable):
num_or_sections[index] = num_or_sections[index].numpy()[
0
]
attrs += ('sections', list(num_or_sections))
else:
attrs += ('sections', list(num_or_sections))
else:
raise TypeError(
"The type of 'num_or_sections' in split must be int, list or tuple in imperative mode, but "
"received %s." % (type(num_or_sections))
)
if in_dygraph_mode():
if isinstance(num_or_sections, int):
return _C_ops.split_with_num(input, num_or_sections, dim)
else:
return _C_ops.split(input, num_or_sections, dim)
elif _in_legacy_dygraph():
out = [_varbase_creator() for n in range(num)]
_legacy_C_ops.split(input, out, *attrs)
return out
check_variable_and_dtype(
input,
'input',
['bool', 'float16', 'float32', 'float64', 'int32', 'int64'],
'split',
)
check_type(num_or_sections, 'num_or_sections', (list, int, tuple), 'split')
check_type(dim, 'dim', (int, Variable), 'split')
if isinstance(dim, Variable):
check_dtype(dim.dtype, 'dim', ['int32', 'int64'], 'split')
helper = LayerHelper('split', **locals())
input_shape = input.shape
inputs = {'X': input}
attrs = {'num': num_or_sections if isinstance(num_or_sections, int) else 0}
def _get_SectionsTensorList(one_list):
tensor_list = []
unk_dim_idx = -1
for idx, dim_size in enumerate(one_list):
if isinstance(dim_size, Variable):
dim_size.stop_gradient = True
tensor_list.append(dim_size)
else:
assert isinstance(dim_size, int)
if dim_size == -1:
assert unk_dim_idx == -1, (
"Only one value of 'num_or_section' in split can "
"be -1. But received num_or_section[%d] is also -1."
% idx
)
unk_dim_idx = idx
temp_out = helper.create_variable_for_type_inference('int32')
fill_constant(
[1], 'int32', dim_size, force_cpu=True, out=temp_out
)
tensor_list.append(temp_out)
return tensor_list
if isinstance(dim, Variable):
dim.stop_gradient = True
inputs['AxisTensor'] = dim
else:
assert len(input.shape) + dim >= 0, "(rank(x) + axis) must >= 0"
dim = (len(input_shape) + dim) if dim < 0 else dim
attrs['axis'] = dim
if isinstance(num_or_sections, int):
assert num_or_sections > 1, 'num_or_sections must be more than 1.'
if isinstance(dim, int) and input_shape[dim] > 0:
assert input_shape[dim] % num_or_sections == 0, (
"The input's size along the split dimension "
"must be evenly divisible by Attr(num_or_sections). "
"But %d is not evenly divisible by %d. "
% (num_or_sections, input_shape[dim])
)
num = num_or_sections
else:
if isinstance(dim, int) and input_shape[dim] > 0:
assert (
len(num_or_sections) <= input_shape[dim]
), 'len(num_or_sections) must not be more than input.shape[dim].'
num = len(num_or_sections)
attrs['sections'] = list(
map(
lambda ele: -1 if isinstance(ele, Variable) else ele,
num_or_sections,
)
)
if utils._contain_var(num_or_sections):
inputs['SectionsTensorList'] = _get_SectionsTensorList(
num_or_sections
)
outs = [
helper.create_variable_for_type_inference(dtype=helper.input_dtype())
for i in range(num)
]
helper.append_op(
type='split', inputs=inputs, outputs={'Out': outs}, attrs=attrs
)
return outs
def l2_normalize(x, axis, epsilon=1e-12, name=None):
r"""
This op normalizes `x` along dimension `axis` using an L2
norm. For a 1-D tensor (`dim` is fixed to 0), this layer computes
.. math::
y = \\frac{x}{ \sqrt{\sum {x^2} + epsion }}
For `x` with more dimensions, this layer independently normalizes each 1-D
slice along dimension `axis`.
Args:
x(Variable|list): The input tensor could be N-D tensor, and the input data type could be float16, float32 or float64.
axis(int): The axis on which to apply normalization. If `axis < 0`, \
the dimension to normalization is rank(X) + axis. -1 is the
last dimension.
epsilon(float): The epsilon value is used to avoid division by zero, \
the default value is 1e-12.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: The output has the same shape and data type with `x`.
Examples:
.. code-block:: python
:name: code-example1
import paddle
X = paddle.randn(shape=[3, 5], dtype='float64')
out = paddle.fluid.layers.l2_normalize(X, axis=-1)
print(out)
# [[ 0.21558504 0.56360189 0.47466096 0.46269539 -0.44326736]
# [-0.70602414 -0.52745777 0.37771788 -0.2804768 -0.04449922]
# [-0.33972208 -0.43014923 0.31772556 0.76617881 -0.10761525]]
"""
if len(x.shape) == 1:
axis = 0
if _non_static_mode():
if in_dygraph_mode():
out, _ = _C_ops.norm(x, 1 if axis is None else axis, epsilon, False)
elif _in_legacy_dygraph():
_, out = _legacy_C_ops.norm(
x, 'axis', 1 if axis is None else axis, 'epsilon', epsilon
)
return out
check_variable_and_dtype(x, "X", ("float16", "float32", "float64"), "norm")
helper = LayerHelper("l2_normalize", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
norm = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="norm",
inputs={"X": x},
outputs={"Out": out, "Norm": norm},
attrs={
"axis": 1 if axis is None else axis,
"epsilon": epsilon,
},
)
return out
@templatedoc()
def row_conv(input, future_context_size, param_attr=None, act=None):
"""
......
......@@ -397,7 +397,7 @@ def glu(input, dim=-1):
check_variable_and_dtype(
input, 'input', ['float16', 'float32', 'float64'], "glu"
)
a, b = layers.split(input, num_or_sections=2, dim=dim)
a, b = paddle.split(input, num_or_sections=2, axis=dim)
act_b = paddle.nn.functional.sigmoid(x=b)
out = paddle.multiply(x=a, y=act_b)
return out
......
......@@ -73,7 +73,7 @@ class BasicLSTMUnit(Layer):
gate_input = paddle.matmul(x=concat_input_hidden, y=self._weight)
gate_input = paddle.add(gate_input, self._bias)
i, j, f, o = layers.split(gate_input, num_or_sections=4, dim=-1)
i, j, f, o = paddle.split(gate_input, num_or_sections=4, axis=-1)
new_cell = paddle.add(
paddle.multiply(
pre_cell, paddle.nn.functional.sigmoid(f + self._forget_bias)
......
......@@ -188,7 +188,7 @@ class GPT2LMHeadModel(fluid.dygraph.Layer):
@declarative
def forward(self, x):
x = paddle.reshape(x, shape=[-1, 6])
x1, x2, x3 = fluid.layers.split(input=x, dim=1, num_or_sections=3)
x1, x2, x3 = paddle.split(x=x, axis=1, num_or_sections=3)
return x1
......
......@@ -96,8 +96,8 @@ class SimpleLSTMRNN(fluid.Layer):
gate_input = paddle.matmul(x=nn, y=weight_1)
gate_input = paddle.add(gate_input, bias)
i, j, f, o = fluid.layers.split(
gate_input, num_or_sections=4, dim=-1
i, j, f, o = paddle.split(
gate_input, num_or_sections=4, axis=-1
)
c = pre_cell * paddle.nn.functional.sigmoid(
f
......
......@@ -89,7 +89,7 @@ class TensorRTSubgraphPassSplitTest(InferencePassTest):
data = fluid.data(
name="data", shape=[-1, 3, 64, 64], dtype="float32"
)
split_out = fluid.layers.split(data, dim=-1, num_or_sections=2)
split_out = paddle.split(data, axis=-1, num_or_sections=2)
out = nn.batch_norm(split_out[0], is_test=True)
self.feeds = {
"data": np.random.random([1, 3, 64, 64]).astype("float32"),
......@@ -115,7 +115,7 @@ class TensorRTSubgraphPassSplitSerializeTest(InferencePassTest):
data = fluid.data(
name="data", shape=[-1, 3, 64, 64], dtype="float32"
)
split_out = fluid.layers.split(data, dim=-1, num_or_sections=2)
split_out = paddle.split(data, axis=-1, num_or_sections=2)
out = nn.batch_norm(split_out[0], is_test=True)
self.feeds = {
"data": np.random.random([1, 3, 64, 64]).astype("float32"),
......@@ -143,7 +143,7 @@ class TensorRTSubgraphPassDynamicSplitFp16SerializeTest(InferencePassTest):
data = fluid.data(
name="data", shape=[-1, 3, 64, 64], dtype="float32"
)
split_out = fluid.layers.split(data, dim=-1, num_or_sections=2)
split_out = paddle.split(data, axis=-1, num_or_sections=2)
out = nn.batch_norm(split_out[0], is_test=True)
self.feeds = {
"data": np.random.random([1, 3, 64, 64]).astype("float32"),
......
......@@ -107,7 +107,7 @@ class API_NormTest(unittest.TestCase):
def test_norm_x_type():
data = fluid.data(name="x", shape=[3, 3], dtype="float64")
out = fluid.layers.l2_normalize(data)
out = paddle.nn.functional.normalize(data)
self.assertRaises(TypeError, test_norm_x_type)
......@@ -127,4 +127,4 @@ class TestNPUNormOpFP16(TestNPUNormOp):
if __name__ == '__main__':
unittest.main()
unittest.main()
\ No newline at end of file
......@@ -89,7 +89,7 @@ class TestGradientWithPrune(unittest.TestCase):
with paddle.fluid.scope_guard(paddle.static.Scope()):
x = fluid.data(name='x', shape=[3], dtype='float32')
x.stop_gradient = False
x1, x2, x3 = fluid.layers.split(x, dim=0, num_or_sections=3)
x1, x2, x3 = paddle.split(x, axis=0, num_or_sections=3)
y = x1 * 2
x1_grad = fluid.gradients(y, x)
......
......@@ -277,8 +277,8 @@ def lm_model(
cell_array.append(pre_cell)
res = []
sliced_inputs = layers.split(
input_embedding, num_or_sections=len, dim=1
sliced_inputs = paddle.split(
input_embedding, num_or_sections=len, axis=1
)
for index in range(len):
......@@ -294,7 +294,9 @@ def lm_model(
gate_input = paddle.matmul(x=nn, y=weight_1)
gate_input = paddle.add(gate_input, bias)
i, j, f, o = layers.split(gate_input, num_or_sections=4, dim=-1)
i, j, f, o = paddle.split(
gate_input, num_or_sections=4, axis=-1
)
c = pre_cell * paddle.nn.functional.sigmoid(
f
......
......@@ -105,9 +105,7 @@ class AutoPruneLayer3(fluid.Layer):
def forward(self, x, label, test_num):
feature = self.linear(x)
part1, part2 = fluid.layers.split(
feature, num_or_sections=[10, 10], dim=1
)
part1, part2 = paddle.split(feature, num_or_sections=[10, 10], axis=1)
# Note that: part2 is not used.
loss = paddle.nn.functional.cross_entropy(
input=part1, label=label, reduction='none', use_softmax=False
......
......@@ -110,8 +110,8 @@ class SimpleLSTMRNN(fluid.Layer):
gate_input = paddle.matmul(x=nn, y=weight_1)
gate_input = paddle.add(gate_input, bias)
i, j, f, o = fluid.layers.split(
gate_input, num_or_sections=4, dim=-1
i, j, f, o = paddle.split(
gate_input, num_or_sections=4, axis=-1
)
c = pre_cell * paddle.nn.functional.sigmoid(
f
......
......@@ -107,8 +107,8 @@ class SimpleLSTMRNN(fluid.Layer):
gate_input = paddle.matmul(x=nn, y=weight_1)
gate_input = paddle.add(gate_input, bias)
i, j, f, o = fluid.layers.split(
gate_input, num_or_sections=4, dim=-1
i, j, f, o = paddle.split(
gate_input, num_or_sections=4, axis=-1
)
c = pre_cell * paddle.nn.functional.sigmoid(
f
......
......@@ -108,8 +108,8 @@ class SimpleLSTMRNN(fluid.Layer):
gate_input = paddle.matmul(x=nn, y=weight_1)
gate_input = paddle.add(gate_input, bias)
i, j, f, o = fluid.layers.split(
gate_input, num_or_sections=4, dim=-1
i, j, f, o = paddle.split(
gate_input, num_or_sections=4, axis=-1
)
c = pre_cell * paddle.nn.functional.sigmoid(
f
......
......@@ -922,21 +922,21 @@ class TestLayer(LayerTest):
with self.dynamic_graph():
with _test_eager_guard():
input = fluid.dygraph.to_variable(np.random.random((3, 8, 5)))
x0, x1 = fluid.layers.split(input, num_or_sections=2, dim=1)
x00, x11 = fluid.layers.split(
x0, x1 = paddle.split(input, num_or_sections=2, axis=1)
x00, x11 = paddle.split(
input,
num_or_sections=2,
dim=fluid.dygraph.to_variable(np.array([1])),
axis=fluid.dygraph.to_variable(np.array([1])),
)
np.testing.assert_array_equal(x0.numpy(), x00.numpy())
np.testing.assert_array_equal(x1.numpy(), x11.numpy())
input = fluid.dygraph.to_variable(np.random.random((3, 8, 5)))
x0, x1 = fluid.layers.split(input, num_or_sections=2, dim=1)
x00, x11 = fluid.layers.split(
x0, x1 = paddle.split(input, num_or_sections=2, axis=1)
x00, x11 = paddle.split(
input,
num_or_sections=2,
dim=fluid.dygraph.to_variable(np.array([1])),
axis=fluid.dygraph.to_variable(np.array([1])),
)
np.testing.assert_array_equal(x0.numpy(), x00.numpy())
np.testing.assert_array_equal(x1.numpy(), x11.numpy())
......@@ -2368,7 +2368,7 @@ class TestBook(LayerTest):
fluid.default_main_program(), fluid.default_startup_program()
):
x = self._get_data(name='x', shape=[8, 7, 10], dtype="float32")
output = layers.l2_normalize(x, axis=1)
output = paddle.nn.functional.normalize(x, axis=1)
return output
def make_shape(self):
......
......@@ -32,7 +32,7 @@ def l2_norm(x, axis, epsilon):
class TestNormOp(OpTest):
def setUp(self):
self.op_type = "norm"
self.python_api = paddle.fluid.layers.l2_normalize
self.python_api = paddle.nn.functional.normalize
self.init_test_case()
self.init_dtype()
x = np.random.random(self.shape).astype(self.dtype)
......@@ -155,7 +155,7 @@ class API_NormTest(unittest.TestCase):
def test_norm_x_type():
data = fluid.data(name="x", shape=[3, 3], dtype="int64")
out = fluid.layers.l2_normalize(data)
out = paddle.nn.functional.normalize(data)
self.assertRaises(TypeError, test_norm_x_type)
......
......@@ -39,7 +39,9 @@ class TestNormalization(unittest.TestCase):
append_batch_size=False,
)
data.stop_gradient = False
l2_norm = fluid.layers.l2_normalize(x=data, axis=axis, epsilon=epsilon)
l2_norm = paddle.nn.functional.normalize(
data, axis=axis, epsilon=epsilon
)
out = paddle.sum(l2_norm, axis=None)
fluid.backward.append_backward(loss=out)
......
......@@ -202,7 +202,7 @@ class TestSGDOpWithLargeInput(unittest.TestCase):
shape=[1, 150], value=0.5, dtype='float32'
)
emb = fluid.embedding(input=data, size=(10000000, 150), dtype='float32')
out = fluid.layers.l2_normalize(x=emb, axis=-1)
out = paddle.nn.functional.normalize(x=emb, axis=-1)
cost = paddle.nn.functional.square_error_cost(input=out, label=label)
avg_cost = paddle.mean(cost)
......
......@@ -272,16 +272,16 @@ class TestSplitAPI(unittest.TestCase):
x_1 = fluid.data(shape=[4, 5, 6], dtype='int32', name='x_1')
x_2 = fluid.data(shape=[4, 5, None], dtype='int32', name='x_2')
out_0, out_1, out_2 = fluid.layers.split(
input=x_1,
out_0, out_1, out_2 = paddle.split(
x=x_1,
num_or_sections=[positive_2_int64, positive_1_int32, -1],
dim=positive_1_int64,
axis=positive_1_int64,
)
out_3, out_4, out_5 = fluid.layers.split(
input=x_1, num_or_sections=[2, 1, 2], dim=positive_1_int32
out_3, out_4, out_5 = paddle.split(
x=x_1, num_or_sections=[2, 1, 2], axis=positive_1_int32
)
fluid.layers.split(input=x_2, num_or_sections=2, dim=2)
paddle.split(x=x_2, num_or_sections=2, axis=2)
exe = fluid.Executor(place=fluid.CPUPlace())
[res_0, res_1, res_2, res_3, res_4, res_5] = exe.run(
......@@ -305,7 +305,7 @@ class TestSplitOpError(unittest.TestCase):
# The type of axis in split_op should be int or Variable.
def test_axis_type():
x6 = fluid.layers.data(shape=[4], dtype='float16', name='x3')
fluid.layers.split(input=x6, num_or_sections=2, dim=3.2)
paddle.split(x=x6, num_or_sections=2, axis=3.2)
self.assertRaises(TypeError, test_axis_type)
......@@ -313,14 +313,14 @@ class TestSplitOpError(unittest.TestCase):
def test_axis_variable_type():
x9 = fluid.layers.data(shape=[4], dtype='float16', name='x9')
x10 = fluid.layers.data(shape=[1], dtype='float16', name='x10')
fluid.layers.split(input=x9, num_or_sections=2, dim=x10)
paddle.split(x=x9, num_or_sections=2, axis=x10)
self.assertRaises(TypeError, test_axis_variable_type)
# The type of num_or_sections in split_op should be int, tuple or list.
def test_num_or_sections_type():
x6 = fluid.layers.data(shape=[4], dtype='float16', name='x4')
fluid.layers.split(input=x6, num_or_sections=2.1, dim=3)
paddle.split(x=x6, num_or_sections=2.1, axis=3)
self.assertRaises(TypeError, test_num_or_sections_type)
......@@ -447,7 +447,7 @@ class API_TestDygraphFluidSplit(unittest.TestCase):
input_1 = np.random.random([4, 6, 6]).astype("int32")
# input is a variable which shape is [4, 6, 6]
input = paddle.to_tensor(input_1)
x0, x1, x2 = fluid.layers.split(input, num_or_sections=3, dim=1)
x0, x1, x2 = paddle.split(input, num_or_sections=3, axis=1)
x0_out = x0.numpy()
x1_out = x1.numpy()
x2_out = x2.numpy()
......@@ -455,7 +455,7 @@ class API_TestDygraphFluidSplit(unittest.TestCase):
# input is a variable which shape is [4, 6, 6]
input = paddle.to_tensor(input_1)
input.stop_gradient = False
x0, x1, x2 = fluid.layers.split(input, num_or_sections=3, dim=1)
x0, x1, x2 = paddle.split(input, num_or_sections=3, axis=1)
eager_x0_out = x0.numpy()
eager_x1_out = x1.numpy()
eager_x2_out = x2.numpy()
......@@ -477,7 +477,7 @@ class API_TestDygraphFluidSplit(unittest.TestCase):
input_1 = np.random.random([4, 6, 6]).astype("int32")
# input is a variable which shape is [4, 6, 6]
input = paddle.to_tensor(input_1)
x0, x1, x2 = fluid.layers.split(input, [2, 2, 2], dim=1)
x0, x1, x2 = paddle.split(input, [2, 2, 2], axis=1)
x0_out = x0.numpy()
x1_out = x1.numpy()
x2_out = x2.numpy()
......@@ -485,7 +485,7 @@ class API_TestDygraphFluidSplit(unittest.TestCase):
# input is a variable which shape is [4, 6, 6]
input = paddle.to_tensor(input_1)
input.stop_gradient = False
x0, x1, x2 = fluid.layers.split(input, [2, 2, 2], dim=1)
x0, x1, x2 = paddle.split(input, [2, 2, 2], axis=1)
eager_x0_out = x0.numpy()
eager_x1_out = x1.numpy()
eager_x2_out = x2.numpy()
......
......@@ -118,8 +118,8 @@ class SimpleLSTMRNN(fluid.Layer):
gate_input = paddle.matmul(x=nn, y=weight_1)
gate_input = paddle.add(gate_input, bias)
i, j, f, o = fluid.layers.split(
gate_input, num_or_sections=4, dim=-1
i, j, f, o = paddle.split(
gate_input, num_or_sections=4, axis=-1
)
c = pre_cell * paddle.nn.functional.sigmoid(
f
......
......@@ -73,7 +73,7 @@ class TestSGDOpWithLargeInput(unittest.TestCase):
shape=[1, 150], value=0.5, dtype='float32'
)
emb = fluid.embedding(input=data, size=(10000, 150), dtype='float32')
out = fluid.layers.l2_normalize(x=emb, axis=-1)
out = paddle.nn.functional.normalize(x=emb, axis=-1)
cost = paddle.nn.functional.square_error_cost(input=out, label=label)
avg_cost = paddle.mean(cost)
......
......@@ -14,7 +14,6 @@
import paddle
from paddle import _C_ops
from ...fluid import layers as F
from ...fluid.data_feeder import check_variable_and_dtype
from ...fluid.layer_helper import LayerHelper
from ...framework import in_dygraph_mode
......@@ -74,11 +73,11 @@ def _weight_norm(v, g, dim):
v_normalized = v / (paddle.sqrt(paddle.sum(paddle.square(v))) + 1e-12)
elif dim == 0:
p_matrix = paddle.reshape(v, (shape[0], -1))
v_normalized = F.l2_normalize(p_matrix, axis=1)
v_normalized = paddle.nn.functional.normalize(p_matrix, axis=1)
v_normalized = paddle.reshape(v_normalized, shape)
elif dim == ndims - 1:
p_matrix = paddle.reshape(v, (-1, shape[-1]))
v_normalized = F.l2_normalize(p_matrix, axis=0)
v_normalized = paddle.nn.functional.normalize(p_matrix, axis=0)
v_normalized = paddle.reshape(v_normalized, shape)
else:
perm = list(range(ndims))
......@@ -87,7 +86,7 @@ def _weight_norm(v, g, dim):
p_transposed = paddle.transpose(v, perm)
transposed_shape = p_transposed.shape
p_matrix = paddle.reshape(p_transposed, (p_transposed.shape[0], -1))
v_normalized = F.l2_normalize(p_matrix, axis=1)
v_normalized = paddle.nn.functional.normalize(p_matrix, axis=1)
v_normalized = paddle.reshape(v_normalized, transposed_shape)
v_normalized = paddle.transpose(v_normalized, perm)
weight = paddle.tensor.math._multiply_with_axis(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册