提交 d502e79f 编写于 作者: M Megvii Engine Team

feat(mge): make F.norm numpy compatible

GitOrigin-RevId: 4e5236171c7cd9f46a753db889068915eb7bd391
上级 d31a4fff
...@@ -374,18 +374,14 @@ def max( ...@@ -374,18 +374,14 @@ def max(
def norm( def norm(
inp: Tensor, inp: Tensor, ord: float = None, axis: int = None, keepdims=False,
p: int = 2,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims=False,
): ):
"""Calculates ``p``-norm of input tensor along """Calculates ``p``-norm of input tensor along
given axis. If axis is a list of dimensions, given axis.
reduce over all of them.
:param inp: input tensor. :param inp: input tensor.
:param p: power of value applied to inp. Default: 2 :param ord: power of value applied to inp. Default: 2
:param axis: dimension to reduce. If None, all the dimensions will be reduced. Default: None :param axis: dimension to reduce. If None, input must be a vector. Default: None
:param keepdims: whether the output tensor has axis retained or not. Default: False :param keepdims: whether the output tensor has axis retained or not. Default: False
:return: output tensor. :return: output tensor.
...@@ -397,7 +393,7 @@ def norm( ...@@ -397,7 +393,7 @@ def norm(
from megengine import tensor from megengine import tensor
import megengine.functional as F import megengine.functional as F
x = tensor(np.arange(-3, 3, dtype=np.float32).reshape(2,3)) x = tensor(np.arange(-3, 3, dtype=np.float32))
out = F.norm(x) out = F.norm(x)
print(out.numpy()) print(out.numpy())
...@@ -408,13 +404,18 @@ def norm( ...@@ -408,13 +404,18 @@ def norm(
[4.3589] [4.3589]
""" """
if p == 0: if axis is None:
if inp.ndim != 1:
raise TypeError("axis is required unless input is a vector")
if ord is None:
ord = 2
if ord == 0:
return sum(inp != 0, axis=axis, keepdims=keepdims) return sum(inp != 0, axis=axis, keepdims=keepdims)
if p == math.inf: if ord == math.inf:
return max(abs(inp)) return max(abs(inp))
if p == -math.inf: if ord == -math.inf:
return min(abs(inp)) return min(abs(inp))
return sum(abs(inp) ** p, axis=axis, keepdims=keepdims) ** (1.0 / p) return sum(abs(inp) ** ord, axis=axis, keepdims=keepdims) ** (1.0 / ord)
def argmin( def argmin(
...@@ -534,14 +535,10 @@ def argmax( ...@@ -534,14 +535,10 @@ def argmax(
def normalize( def normalize(
inp: Tensor, inp: Tensor, ord: float = None, axis: int = None, eps: float = 1e-12,
p: int = 2,
axis: Optional[Union[int, Sequence[int]]] = None,
eps: float = 1e-12,
) -> Tensor: ) -> Tensor:
r"""Performs :math:`L_p` normalization of input tensor along r"""Performs :math:`L_p` normalization of input tensor along
given axis. If axis is a list of dimensions, given axis.
reduce over all of them.
For a tensor of shape :math:`(n_0, ..., n_{dim}, ..., n_k)`, each For a tensor of shape :math:`(n_0, ..., n_{dim}, ..., n_k)`, each
:math:`n_{dim}` -element vector :math:`v` along dimension :attr:`axis` is transformed as: :math:`n_{dim}` -element vector :math:`v` along dimension :attr:`axis` is transformed as:
...@@ -550,16 +547,15 @@ def normalize( ...@@ -550,16 +547,15 @@ def normalize(
v = \frac{v}{\max(\lVert v \rVert_p, \epsilon)}. v = \frac{v}{\max(\lVert v \rVert_p, \epsilon)}.
:param inp: input tensor. :param inp: input tensor.
:param p: power of value applied to input tensor. Default: 2 :param ord: power of value applied to input tensor. Default: 2
:param axis: dimension to reduce. If None, all dimensions will be reduced :param axis: dimension to reduce.If None, input must be a vector. Default: None
to calculate the norm. Default: None
:param eps: a small value to avoid division by zero. Default: 1e-12 :param eps: a small value to avoid division by zero. Default: 1e-12
:return: normalized output tensor. :return: normalized output tensor.
""" """
if axis is None: if axis is None:
return inp / clip(norm(inp, p, axis), lower=eps) return inp / clip(norm(inp, ord, axis), lower=eps)
else: else:
return inp / clip(norm(inp, p, axis, keepdims=True), lower=eps) return inp / clip(norm(inp, ord, axis, keepdims=True), lower=eps)
def argsort(inp: Tensor, descending: bool = False) -> Tensor: def argsort(inp: Tensor, descending: bool = False) -> Tensor:
......
...@@ -124,11 +124,11 @@ def test_normalize(): ...@@ -124,11 +124,11 @@ def test_normalize():
norm = np.sum(x ** p, axis=axis, keepdims=True) ** (1.0 / p) norm = np.sum(x ** p, axis=axis, keepdims=True) ** (1.0 / p)
return x / np.clip(norm, a_min=eps, a_max=np.inf) return x / np.clip(norm, a_min=eps, a_max=np.inf)
# Test L-2 norm along all dimensions # # Test L-2 norm along all dimensions
opr_test(cases, F.normalize, ref_fn=np_normalize) # opr_test(cases, F.normalize, ref_fn=np_normalize)
# Test L-1 norm along all dimensions # # Test L-1 norm along all dimensions
opr_test(cases, partial(F.normalize, p=1), ref_fn=partial(np_normalize, p=1)) # opr_test(cases, partial(F.normalize, p=1), ref_fn=partial(np_normalize, p=1))
# Test L-2 norm along the second dimension # Test L-2 norm along the second dimension
opr_test(cases, partial(F.normalize, axis=1), ref_fn=partial(np_normalize, axis=1)) opr_test(cases, partial(F.normalize, axis=1), ref_fn=partial(np_normalize, axis=1))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册