未验证 提交 118e585b 编写于 作者: D David Lin 提交者: GitHub

rename op fluid.layers.matmul to paddle.matmul (#23375)

* rename op paddle.fluid.matmul to paddle.matmul
* modify original API fluid.layers.matmul,and add new API paddle.matmul
上级 29c4fae1
...@@ -147,7 +147,7 @@ from .tensor.math import add #DEFINE_ALIAS ...@@ -147,7 +147,7 @@ from .tensor.math import add #DEFINE_ALIAS
# from .tensor.attribute import shape #DEFINE_ALIAS # from .tensor.attribute import shape #DEFINE_ALIAS
# from .tensor.io import save #DEFINE_ALIAS # from .tensor.io import save #DEFINE_ALIAS
# from .tensor.io import load #DEFINE_ALIAS # from .tensor.io import load #DEFINE_ALIAS
# from .tensor.linalg import matmul #DEFINE_ALIAS from .tensor.linalg import matmul #DEFINE_ALIAS
# from .tensor.linalg import dot #DEFINE_ALIAS # from .tensor.linalg import dot #DEFINE_ALIAS
# from .tensor.linalg import einsum #DEFINE_ALIAS # from .tensor.linalg import einsum #DEFINE_ALIAS
# from .tensor.linalg import morm #DEFINE_ALIAS # from .tensor.linalg import morm #DEFINE_ALIAS
......
...@@ -14,14 +14,16 @@ ...@@ -14,14 +14,16 @@
""" """
All layers just related to the neural network. All layers just related to the neural network.
""" """
from __future__ import print_function from __future__ import print_function
import numpy as np
import warnings
import six
import os import os
import inspect import inspect
import warnings
import numpy as np
import six
import paddle
from ..layer_helper import LayerHelper from ..layer_helper import LayerHelper
from ..initializer import Normal, Constant, NumpyArrayInitializer from ..initializer import Normal, Constant, NumpyArrayInitializer
from ..framework import Variable, OpProtoHolder, in_dygraph_mode, dygraph_only, _dygraph_tracer, default_main_program from ..framework import Variable, OpProtoHolder, in_dygraph_mode, dygraph_only, _dygraph_tracer, default_main_program
...@@ -4944,63 +4946,7 @@ def matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None): ...@@ -4944,63 +4946,7 @@ def matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None):
y = fluid.layers.data(name='y', shape=[3, 2], dtype='float32') y = fluid.layers.data(name='y', shape=[3, 2], dtype='float32')
out = fluid.layers.matmul(x, y, True, True) out = fluid.layers.matmul(x, y, True, True)
""" """
attrs = { return paddle.matmul(x, y, transpose_x, transpose_y, alpha, name)
'transpose_X': transpose_x,
'transpose_Y': transpose_y,
'alpha': float(alpha),
}
if in_dygraph_mode():
return core.ops.matmul(x, y, 'transpose_X', transpose_x, 'transpose_Y',
transpose_y, 'alpha', float(alpha))
def __check_input(x, y):
var_names = {'x': x, 'y': y}
for name, val in var_names.items():
check_variable_and_dtype(
val, name, ['float16', 'float32', 'float64'], 'matmul')
x_shape = list(x.shape)
y_shape = list(y.shape)
if len(x_shape) == 1:
x_shape = [1] + x_shape
if len(y_shape) == 1:
y_shape = y_shape + [1]
# check the inner 2 dimensions
if transpose_x:
x_shape[-2], x_shape[-1] = x_shape[-1], x_shape[-2]
if transpose_y:
y_shape[-2], y_shape[-1] = y_shape[-1], y_shape[-2]
if x_shape[-1] != y_shape[-2]:
assert (x_shape[-1] == -1) or (y_shape[-2] == -1), \
"After performing an optional transpose, Input X's width should be " \
"equal to Y's width for multiplication " \
"prerequisites. But received X's shape: %s, Y's shape: %s\n" % \
(x_shape, y_shape)
if len(y_shape) > 2 and len(x_shape) > 2:
for i, dim_x in enumerate(x_shape[:-2]):
# don't check neg shape
if dim_x < 0 or y_shape[i] < 0:
continue
if dim_x != y_shape[i]:
raise ValueError(
"When the matrix is larger than 2 dimensions, the higher "
"dimensional values of the two matrices need to be equal. "
"But received x_shape[%d] != y_shape[%d]. X's shape: %s, "
"Y's shape: %s.\n" % (i, i, x_shape, y_shape))
__check_input(x, y)
helper = LayerHelper('matmul', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='matmul',
inputs={'X': x,
'Y': y},
outputs={'Out': out},
attrs=attrs)
return out
def topk(input, k, name=None): def topk(input, k, name=None):
......
...@@ -122,7 +122,7 @@ from .math import add #DEFINE_ALIAS ...@@ -122,7 +122,7 @@ from .math import add #DEFINE_ALIAS
# from .attribute import shape #DEFINE_ALIAS # from .attribute import shape #DEFINE_ALIAS
# from .io import save #DEFINE_ALIAS # from .io import save #DEFINE_ALIAS
# from .io import load #DEFINE_ALIAS # from .io import load #DEFINE_ALIAS
# from .linalg import matmul #DEFINE_ALIAS from .linalg import matmul #DEFINE_ALIAS
# from .linalg import dot #DEFINE_ALIAS # from .linalg import dot #DEFINE_ALIAS
# from .linalg import einsum #DEFINE_ALIAS # from .linalg import einsum #DEFINE_ALIAS
# from .linalg import morm #DEFINE_ALIAS # from .linalg import morm #DEFINE_ALIAS
......
...@@ -11,15 +11,148 @@ ...@@ -11,15 +11,148 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from paddle.common_ops_import import *
# TODO: define functions of linear algebra # TODO: define functions of linear algebra
# __all__ = ['matmul', __all__ = [
# 'dot', 'matmul',
# 'einsum', # 'dot',
# 'morm', # 'einsum',
# 'transpose', # 'morm',
# 'dist', # 'transpose',
# 't', # 'dist',
# 'cross', # 't',
# 'cholesky', # 'cross',
# 'tensordot'] # 'cholesky',
# 'tensordot'
]
def matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None):
"""
Applies matrix multiplication to two tensors.
Currently, the input tensors' rank can be any, but when the rank of any
inputs is bigger than 3, this two inputs' rank should be equal.
The actual behavior depends on the shapes of :math:`x`, :math:`y` and the
flag values of :attr:`transpose_x`, :attr:`transpose_y`. Specifically:
- If a transpose flag is specified, the last two dimensions of the tensor
are transposed. If the tensor is rank-1 of shape :math:`[D]`, then for
:math:`x` it is treated as :math:`[1, D]` in nontransposed form and as
:math:`[D, 1]` in transposed form, whereas for :math:`y` it is the
opposite: It is treated as :math:`[D, 1]` in nontransposed form and as
:math:`[1, D]` in transposed form.
- After transpose, the two tensors are 2-D or n-D and matrix multiplication
performs in the following way.
- If both are 2-D, they are multiplied like conventional matrices.
- If either is n-D, it is treated as a stack of matrices residing in the
last two dimensions and a batched matrix multiply supporting broadcast
applies on the two tensors.
Also note that if the raw tensor :math:`x` or :math:`y` is rank-1 and
nontransposed, the prepended or appended dimension :math:`1` will be
removed after matrix multiplication.
Args:
x (Variable): The input variable which is a Tensor or LoDTensor.
y (Variable): The input variable which is a Tensor or LoDTensor.
transpose_x (bool): Whether to transpose :math:`x` before multiplication.
transpose_y (bool): Whether to transpose :math:`y` before multiplication.
alpha (float): The scale of output. Default 1.0.
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
Returns:
Variable: The product Tensor (or LoDTensor) variable.
Examples:
.. code-block:: python
# Examples to clarify shapes of the inputs and output
# x: [B, ..., M, K], y: [B, ..., K, N]
# paddle.matmul(x, y) # out: [B, ..., M, N]
# x: [B, M, K], y: [B, K, N]
# paddle.matmul(x, y) # out: [B, M, N]
# x: [B, M, K], y: [K, N]
# paddle.matmul(x, y) # out: [B, M, N]
# x: [M, K], y: [K, N]
# paddle.matmul(x, y) # out: [M, N]
# x: [B, M, K], y: [K]
# paddle.matmul(x, y) # out: [B, M]
# x: [K], y: [K]
# paddle.matmul(x, y) # out: [1]
# x: [M], y: [N]
# paddle.matmul(x, y, True, True) # out: [M, N]
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[2, 3], dtype='float32')
y = fluid.data(name='y', shape=[3, 2], dtype='float32')
out = paddle.matmul(x, y, True, True)
"""
attrs = {
'transpose_X': transpose_x,
'transpose_Y': transpose_y,
'alpha': float(alpha),
}
if in_dygraph_mode():
return core.ops.matmul(x, y, 'transpose_X', transpose_x, 'transpose_Y',
transpose_y, 'alpha', float(alpha))
def __check_input(x, y):
var_names = {'x': x, 'y': y}
for name, val in var_names.items():
check_variable_and_dtype(
val, name, ['float16', 'float32', 'float64'], 'matmul')
x_shape = list(x.shape)
y_shape = list(y.shape)
if len(x_shape) == 1:
x_shape = [1] + x_shape
if len(y_shape) == 1:
y_shape = y_shape + [1]
# check the inner 2 dimensions
if transpose_x:
x_shape[-2], x_shape[-1] = x_shape[-1], x_shape[-2]
if transpose_y:
y_shape[-2], y_shape[-1] = y_shape[-1], y_shape[-2]
if x_shape[-1] != y_shape[-2]:
assert (x_shape[-1] == -1) or (y_shape[-2] == -1), \
"After performing an optional transpose, Input X's width should be " \
"equal to Y's width for multiplication " \
"prerequisites. But received X's shape: %s, Y's shape: %s\n" % \
(x_shape, y_shape)
if len(y_shape) > 2 and len(x_shape) > 2:
for i, dim_x in enumerate(x_shape[:-2]):
# don't check neg shape
if dim_x < 0 or y_shape[i] < 0:
continue
if dim_x != y_shape[i]:
raise ValueError(
"When the matrix is larger than 2 dimensions, the higher "
"dimensional values of the two matrices need to be equal. "
"But received x_shape[%d] != y_shape[%d]. X's shape: %s, "
"Y's shape: %s.\n" % (i, i, x_shape, y_shape))
__check_input(x, y)
helper = LayerHelper('matmul', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='matmul',
inputs={'X': x,
'Y': y},
outputs={'Out': out},
attrs=attrs)
return out
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册