未验证 提交 e3736d73 编写于 作者: S Shibo Tao 提交者: GitHub

add op multiply, delete op elementwise_mul from math.py. (#25480)

* add op multiply, delete op elementwise_mul from math.py. test=develop,test=document_fix

* bug fix. test=develop,test=document_fix

* bug fix. test=develop,test=document_fix

* bug fix. test=develop,test=document_fix

* bug fix. test=develop,test=document_fix

* add unittest for multiply op. test=develop.

* fix code style. test=develop

* use random input. test=develop

* add test error case for static computation graph. test=develop

* add np.random.seed(7)

* increase input ndarray size. test=develop

* change float32 to float64. test=develop
上级 30d1ff3b
......@@ -139,7 +139,6 @@ from .tensor.math import elementwise_floordiv #DEFINE_ALIAS
from .tensor.math import elementwise_max #DEFINE_ALIAS
from .tensor.math import elementwise_min #DEFINE_ALIAS
from .tensor.math import elementwise_mod #DEFINE_ALIAS
from .tensor.math import elementwise_mul #DEFINE_ALIAS
from .tensor.math import elementwise_pow #DEFINE_ALIAS
from .tensor.math import elementwise_sub #DEFINE_ALIAS
from .tensor.math import exp #DEFINE_ALIAS
......@@ -169,6 +168,7 @@ from .tensor.math import max #DEFINE_ALIAS
from .tensor.math import min #DEFINE_ALIAS
from .tensor.math import mm #DEFINE_ALIAS
from .tensor.math import div #DEFINE_ALIAS
from .tensor.math import multiply #DEFINE_ALIAS
from .tensor.math import add #DEFINE_ALIAS
from .tensor.math import atan #DEFINE_ALIAS
from .tensor.math import logsumexp #DEFINE_ALIAS
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import paddle
import paddle.tensor as tensor
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
import numpy as np
import unittest
class TestMultiplyAPI(unittest.TestCase):
"""TestMultiplyAPI."""
def __run_static_graph_case(self, x_data, y_data, axis=-1):
with program_guard(Program(), Program()):
x = paddle.nn.data(name='x', shape=x_data.shape, dtype=x_data.dtype)
y = paddle.nn.data(name='y', shape=y_data.shape, dtype=y_data.dtype)
res = tensor.multiply(x, y, axis=axis)
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
exe = fluid.Executor(place)
outs = exe.run(fluid.default_main_program(),
feed={'x': x_data,
'y': y_data},
fetch_list=[res])
res = outs[0]
return res
def __run_dynamic_graph_case(self, x_data, y_data, axis=-1):
paddle.enable_imperative()
x = paddle.imperative.to_variable(x_data)
y = paddle.imperative.to_variable(y_data)
res = paddle.multiply(x, y, axis=axis)
return res.numpy()
def test_multiply(self):
"""test_multiply."""
np.random.seed(7)
# test static computation graph: 1-d array
x_data = np.random.rand(200)
y_data = np.random.rand(200)
res = self.__run_static_graph_case(x_data, y_data)
self.assertTrue(np.allclose(res, np.multiply(x_data, y_data)))
# test static computation graph: 2-d array
x_data = np.random.rand(2, 500)
y_data = np.random.rand(2, 500)
res = self.__run_static_graph_case(x_data, y_data)
self.assertTrue(np.allclose(res, np.multiply(x_data, y_data)))
# test static computation graph: broadcast
x_data = np.random.rand(2, 500)
y_data = np.random.rand(500)
res = self.__run_static_graph_case(x_data, y_data)
self.assertTrue(np.allclose(res, np.multiply(x_data, y_data)))
# test static computation graph: broadcast with axis
x_data = np.random.rand(2, 300, 40)
y_data = np.random.rand(300)
res = self.__run_static_graph_case(x_data, y_data, axis=1)
expected = np.multiply(x_data, y_data[..., np.newaxis])
self.assertTrue(np.allclose(res, expected))
# test dynamic computation graph: 1-d array
x_data = np.random.rand(200)
y_data = np.random.rand(200)
res = self.__run_dynamic_graph_case(x_data, y_data)
self.assertTrue(np.allclose(res, np.multiply(x_data, y_data)))
# test dynamic computation graph: 2-d array
x_data = np.random.rand(20, 50)
y_data = np.random.rand(20, 50)
res = self.__run_dynamic_graph_case(x_data, y_data)
self.assertTrue(np.allclose(res, np.multiply(x_data, y_data)))
# test dynamic computation graph: broadcast
x_data = np.random.rand(2, 500)
y_data = np.random.rand(500)
res = self.__run_dynamic_graph_case(x_data, y_data)
self.assertTrue(np.allclose(res, np.multiply(x_data, y_data)))
# test dynamic computation graph: broadcast with axis
x_data = np.random.rand(2, 300, 40)
y_data = np.random.rand(300)
res = self.__run_dynamic_graph_case(x_data, y_data, axis=1)
expected = np.multiply(x_data, y_data[..., np.newaxis])
self.assertTrue(np.allclose(res, expected))
class TestMultiplyError(unittest.TestCase):
"""TestMultiplyError."""
def test_errors(self):
"""test_errors."""
# test static computation graph: dtype can not be int8
paddle.disable_imperative()
with program_guard(Program(), Program()):
x = paddle.nn.data(name='x', shape=[100], dtype=np.int8)
y = paddle.nn.data(name='y', shape=[100], dtype=np.int8)
self.assertRaises(TypeError, tensor.multiply, x, y)
# test static computation graph: inputs must be broadcastable
with program_guard(Program(), Program()):
x = paddle.nn.data(name='x', shape=[20, 50], dtype=np.float64)
y = paddle.nn.data(name='y', shape=[20], dtype=np.float64)
self.assertRaises(fluid.core.EnforceNotMet, tensor.multiply, x, y)
np.random.seed(7)
# test dynamic computation graph: dtype can not be int8
paddle.enable_imperative()
x_data = np.random.randn(200).astype(np.int8)
y_data = np.random.randn(200).astype(np.int8)
x = paddle.imperative.to_variable(x_data)
y = paddle.imperative.to_variable(y_data)
self.assertRaises(fluid.core.EnforceNotMet, paddle.multiply, x, y)
# test dynamic computation graph: inputs must be broadcastable
x_data = np.random.rand(200, 5)
y_data = np.random.rand(200)
x = paddle.imperative.to_variable(x_data)
y = paddle.imperative.to_variable(y_data)
self.assertRaises(fluid.core.EnforceNotMet, paddle.multiply, x, y)
if __name__ == '__main__':
unittest.main()
......@@ -112,7 +112,6 @@ from .math import elementwise_floordiv #DEFINE_ALIAS
from .math import elementwise_max #DEFINE_ALIAS
from .math import elementwise_min #DEFINE_ALIAS
from .math import elementwise_mod #DEFINE_ALIAS
from .math import elementwise_mul #DEFINE_ALIAS
from .math import elementwise_pow #DEFINE_ALIAS
from .math import elementwise_sub #DEFINE_ALIAS
from .math import exp #DEFINE_ALIAS
......@@ -142,6 +141,7 @@ from .math import max #DEFINE_ALIAS
from .math import min #DEFINE_ALIAS
from .math import mm #DEFINE_ALIAS
from .math import div #DEFINE_ALIAS
from .math import multiply #DEFINE_ALIAS
from .math import add #DEFINE_ALIAS
from .math import atan #DEFINE_ALIAS
from .math import logsumexp #DEFINE_ALIAS
......
......@@ -76,7 +76,6 @@ __all__ = [
'elementwise_max',
'elementwise_min',
'elementwise_mod',
'elementwise_mul',
'elementwise_pow',
'elementwise_sub',
'exp',
......@@ -107,6 +106,7 @@ __all__ = [
'min',
'mm',
'div',
'multiply',
'add',
'atan',
'logsumexp',
......@@ -580,11 +580,49 @@ Examples:
return _elementwise_op(LayerHelper(op_type, **locals()))
def multiply(x, y, axis=-1, name=None):
"""
:alias_main: paddle.multiply
:alias: paddle.multiply,paddle.tensor.multiply,paddle.tensor.math.multiply
Examples:
.. code-block:: python
import paddle
import numpy as np
paddle.enable_imperative()
x_data = np.array([[1, 2], [3, 4]], dtype=np.float32)
y_data = np.array([[5, 6], [7, 8]], dtype=np.float32)
x = paddle.imperative.to_variable(x_data)
y = paddle.imperative.to_variable(y_data)
res = paddle.multiply(x, y)
print(res.numpy()) # [[5, 12], [21, 32]]
x_data = np.array([[[1, 2, 3], [1, 2, 3]]], dtype=np.float32)
y_data = np.array([1, 2], dtype=np.float32)
x = paddle.imperative.to_variable(x_data)
y = paddle.imperative.to_variable(y_data)
res = paddle.multiply(x, y, axis=1)
print(res.numpy()) # [[[1, 2, 3], [2, 4, 6]]]
"""
op_type = 'elementwise_mul'
act = None
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name=op_type)
return _elementwise_op(LayerHelper(op_type, **locals()))
for func in [
add,
div,
multiply,
]:
proto_dict = {'add': 'elementwise_add', 'div': 'elementwise_div'}
proto_dict = {'add': 'elementwise_add', 'div': 'elementwise_div', 'multiply': 'elementwise_mul'}
op_proto = OpProtoHolder.instance().get_op_proto(proto_dict[func.__name__])
if func.__name__ in ['add']:
alias_main = ':alias_main: paddle.%(func)s' % {'func': func.__name__}
......@@ -601,9 +639,6 @@ for func in [
]
else:
additional_args_lines = [
"out (Variable, optinal): The Variable that stores results of the operation. If out is None, \
a new Variable will be created to store the results."
,
"name (string, optional): Name of the output. \
Default is None. It's used to print debug info for developers. Details: \
:ref:`api_guide_Name` "
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册