未验证 提交 12bed4a9 编写于 作者: W WuHaobo 提交者: GitHub

remove all op with out args (#25570)

* test=develop,test=document_fix, remove the out args

* Revert "test=develop,test=document_fix, remove the out args"

This reverts commit f430799b.

* test=develop,test=document_fix, remove the out args

* remove all  the out args

* remove all  the out args

* test=develop, remove all  the out args

* test=develop, remove all  the out args

* test=develop, remove out args in matmul testcase

* test=develop, fix the bugs of out args testcase in logsumexp

* test=develop, fix the bugs of out args testcase in elementwise_add

* test=develop, fix the bugs of out args testcase in elementwise_div

* test=develop, fix the bugs of out args testcase in activation

* test=develop, fix the bugs of out args testcase in addcmul

* test=develop, fix the bugs of out args

* test=develop, fix the bugs of out args in API example

* test=develop, fix the bugs of out args in kron testcase

* test=develop, fix the bugs of out args in kron testcase

* test=develop, remove out args

* test=develop, remove out args

* test=develop, fix the coverage

* test=develop, polish codestyle

* test=develop, Update math.py

* test=develop, polish codestyle

* test=develop, polish codestyle

* test=develop, polish codestyle

* test=develop, polish codestyle

* test=develop, polish the test_activation_op.py

* test=develop, Update math.py

* test=develop, fix the failed CI

* test=develop, add core.ops

* test=develop, add core.ops change clamp to clip

* test=develop, add core.ops change clamp to clip

* test=develop, write testcase for clamp OP
上级 a6c87fd0
...@@ -70,25 +70,16 @@ class TestActivation(OpTest): ...@@ -70,25 +70,16 @@ class TestActivation(OpTest):
class TestParameter(object): class TestParameter(object):
def test_out(self):
with fluid.program_guard(fluid.Program()):
data = fluid.layers.data(name="X", shape=[1])
out = eval("paddle.%s(data, out=data)" % self.op_type)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result = exe.run(feed={"X": np.array([0.1])},
fetch_list=[data, out])
self.assertEqual(result[0], result[1])
def test_out_name(self): def test_out_name(self):
with fluid.program_guard(fluid.Program()): with fluid.program_guard(fluid.Program()):
np_x = np.array([0.1])
data = fluid.layers.data(name="X", shape=[1]) data = fluid.layers.data(name="X", shape=[1])
out = eval("paddle.%s(data, name='Y', out=data)" % self.op_type) out = eval("paddle.%s(data, name='Y')" % self.op_type)
place = fluid.CPUPlace() place = fluid.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
result = exe.run(feed={"X": np.array([0.1])}, result, = exe.run(feed={"X": np_x}, fetch_list=[out])
fetch_list=[data, out]) expected = eval("np.%s(np_x)" % self.op_type)
self.assertEqual(result[0], result[1]) self.assertEqual(result, expected)
def test_dygraph(self): def test_dygraph(self):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
...@@ -174,6 +165,17 @@ class TestAtan(TestActivation, TestParameter): ...@@ -174,6 +165,17 @@ class TestAtan(TestActivation, TestParameter):
return return
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out')
def test_out_name(self):
with fluid.program_guard(fluid.Program()):
np_x = np.array([0.1])
data = fluid.layers.data(name="X", shape=[1])
out = paddle.atan(data, name='Y')
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result, = exe.run(feed={"X": np_x}, fetch_list=[out])
expected = np.arctan(np_x)
self.assertEqual(result, expected)
def test_dygraph(self): def test_dygraph(self):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
np_x = np.array([0.1]) np_x = np.array([0.1])
...@@ -1034,21 +1036,18 @@ class TestPow_factor_tensor(TestActivation): ...@@ -1034,21 +1036,18 @@ class TestPow_factor_tensor(TestActivation):
factor_2 = fluid.layers.fill_constant([1], "float32", 3.0) factor_2 = fluid.layers.fill_constant([1], "float32", 3.0)
out_1 = fluid.layers.pow(x, factor=factor_1) out_1 = fluid.layers.pow(x, factor=factor_1)
out_2 = fluid.layers.pow(x, factor=factor_2) out_2 = fluid.layers.pow(x, factor=factor_2)
out_3 = paddle.pow(x, factor_1, out=res)
out_4 = paddle.pow(x, factor_1, name='pow_res') out_4 = paddle.pow(x, factor_1, name='pow_res')
out_5 = paddle.pow(x, factor_1, out=res, name='pow_res')
out_6 = paddle.pow(x, factor_2) out_6 = paddle.pow(x, factor_2)
self.assertEqual(('pow_res' in out_4.name), True) self.assertEqual(('pow_res' in out_4.name), True)
exe = fluid.Executor(place=fluid.CPUPlace()) exe = fluid.Executor(place=fluid.CPUPlace())
res_1, res_2, res_3, res, res_6 = exe.run( res_1, res_2, res, res_6 = exe.run(
fluid.default_main_program(), fluid.default_main_program(),
feed={"x": input}, feed={"x": input},
fetch_list=[out_1, out_2, out_3, res, out_6]) fetch_list=[out_1, out_2, res, out_6])
assert np.array_equal(res_1, np.power(input, 2)) assert np.array_equal(res_1, np.power(input, 2))
assert np.array_equal(res_2, np.power(input, 3)) assert np.array_equal(res_2, np.power(input, 3))
assert np.array_equal(res_3, res)
assert np.array_equal(res_6, np.power(input, 3)) assert np.array_equal(res_6, np.power(input, 3))
def test_error(self): def test_error(self):
......
...@@ -118,17 +118,6 @@ class TestAddcmul(unittest.TestCase): ...@@ -118,17 +118,6 @@ class TestAddcmul(unittest.TestCase):
out = paddle.addcmul(input, tensor1, tensor2) out = paddle.addcmul(input, tensor1, tensor2)
self.assertEqual(out.shape, input.shape) self.assertEqual(out.shape, input.shape)
def test_addcmul_has_out(self):
program = Program()
with program_guard(program):
input = fluid.data(name='in', shape=[4, 100], dtype='float32')
tensor1 = fluid.data(name='t1', shape=[100], dtype='float32')
tensor2 = fluid.data(name='t2', shape=[100], dtype='float32')
out = fluid.data(name='out', shape=[4, 100], dtype='float32')
out = paddle.addcmul(input, tensor1, tensor2, out=out)
self.assertEqual(out.shape, input.shape)
class InvalidInputTest(unittest.TestCase): class InvalidInputTest(unittest.TestCase):
def test_error(self): def test_error(self):
......
...@@ -20,6 +20,18 @@ import unittest ...@@ -20,6 +20,18 @@ import unittest
class TestClampAPI(unittest.TestCase): class TestClampAPI(unittest.TestCase):
def test_dygraph_clamp(self):
in1 = np.array([[1.2, 3.5], [4.5, 6.4]]).astype('float32')
with fluid.dygraph.guard():
x1 = fluid.dygraph.to_variable(in1)
out1 = tensor.clamp(x1, min=3.5, max=5.0)
out2 = tensor.clamp(x1, min=2.5)
self.assertTrue(
np.allclose(
out1.numpy(), in1.clip(
min=3.5, max=5.0)))
self.assertTrue(np.allclose(out2.numpy(), in1.clip(min=2.5)))
def test_clamp(self): def test_clamp(self):
data_shape = [1, 9, 9, 4] data_shape = [1, 9, 9, 4]
data = np.random.random(data_shape).astype('float32') data = np.random.random(data_shape).astype('float32')
......
...@@ -389,44 +389,6 @@ class TestElementwiseAddOpError(unittest.TestCase): ...@@ -389,44 +389,6 @@ class TestElementwiseAddOpError(unittest.TestCase):
class TestAddOp(unittest.TestCase): class TestAddOp(unittest.TestCase):
def test_out(self):
with fluid.program_guard(fluid.Program()):
x = fluid.data(name="x", shape=[3], dtype="float32")
y = fluid.data(name='y', shape=[3], dtype='float32')
res = fluid.data(name="output", shape=[3], dtype="float32")
y_1 = paddle.add(x, y, out=res)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
data1 = np.array([2, 3, 4], dtype='float32')
data2 = np.array([1, 5, 2], dtype='float32')
np_res, np_y_1 = exe.run(feed={'x': data1,
'y': data2},
fetch_list=[res, y_1])
self.assertEqual((np_res == np_y_1).all(), True)
def test_out_gpu(self):
if not fluid.core.is_compiled_with_cuda():
return
with fluid.program_guard(fluid.Program()):
x = fluid.data(name="x", shape=[3], dtype="float32")
y = fluid.data(name='y', shape=[3], dtype='float32')
res = fluid.data(name="output", shape=[3], dtype="float32")
y_1 = paddle.add(x, y, out=res)
place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
data1 = np.array([2, 3, 4], dtype='float32')
data2 = np.array([1, 5, 2], dtype='float32')
np_res, np_y_1 = exe.run(feed={'x': data1,
'y': data2},
fetch_list=[res, y_1])
self.assertEqual((np_res == np_y_1).all(), True)
def test_name(self): def test_name(self):
with fluid.program_guard(fluid.Program()): with fluid.program_guard(fluid.Program()):
x = fluid.data(name="x", shape=[2, 3], dtype="float32") x = fluid.data(name="x", shape=[2, 3], dtype="float32")
......
...@@ -241,44 +241,6 @@ class TestElementwiseDivBroadcast(unittest.TestCase): ...@@ -241,44 +241,6 @@ class TestElementwiseDivBroadcast(unittest.TestCase):
class TestDivOp(unittest.TestCase): class TestDivOp(unittest.TestCase):
def test_out(self):
with fluid.program_guard(fluid.Program()):
x = fluid.data(name="x", shape=[3], dtype="float32")
y = fluid.data(name='y', shape=[3], dtype='float32')
res = fluid.data(name="output", shape=[3], dtype="float32")
y_1 = paddle.div(x, y, out=res)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
data1 = np.array([2, 3, 4], dtype='float32')
data2 = np.array([1, 5, 2], dtype='float32')
np_res, np_y_1 = exe.run(feed={'x': data1,
'y': data2},
fetch_list=[res, y_1])
self.assertEqual((np_res == np_y_1).all(), True)
def test_out_gpu(self):
if not fluid.core.is_compiled_with_cuda():
return
with fluid.program_guard(fluid.Program()):
x = fluid.data(name="x", shape=[3], dtype="float32")
y = fluid.data(name='y', shape=[3], dtype='float32')
res = fluid.data(name="output", shape=[3], dtype="float32")
y_1 = paddle.div(x, y, out=res)
place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
data1 = np.array([2, 3, 4], dtype='float32')
data2 = np.array([1, 5, 2], dtype='float32')
np_res, np_y_1 = exe.run(feed={'x': data1,
'y': data2},
fetch_list=[res, y_1])
self.assertEqual((np_res == np_y_1).all(), True)
def test_name(self): def test_name(self):
with fluid.program_guard(fluid.Program()): with fluid.program_guard(fluid.Program()):
x = fluid.data(name="x", shape=[2, 3], dtype="float32") x = fluid.data(name="x", shape=[2, 3], dtype="float32")
......
...@@ -86,14 +86,10 @@ class TestInverseAPI(unittest.TestCase): ...@@ -86,14 +86,10 @@ class TestInverseAPI(unittest.TestCase):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
self.places.append(fluid.CUDAPlace(0)) self.places.append(fluid.CUDAPlace(0))
def check_static_result(self, place, with_out=False): def check_static_result(self, place):
with fluid.program_guard(fluid.Program(), fluid.Program()): with fluid.program_guard(fluid.Program(), fluid.Program()):
input = fluid.data(name="input", shape=[4, 4], dtype="float64") input = fluid.data(name="input", shape=[4, 4], dtype="float64")
if with_out: result = paddle.inverse(input=input)
out = fluid.data(name="output", shape=[4, 4], dtype="float64")
else:
out = None
result = paddle.inverse(input=input, out=out)
input_np = np.random.random([4, 4]).astype("float64") input_np = np.random.random([4, 4]).astype("float64")
result_np = np.linalg.inv(input_np) result_np = np.linalg.inv(input_np)
......
...@@ -93,8 +93,7 @@ class TestKronLayer(unittest.TestCase): ...@@ -93,8 +93,7 @@ class TestKronLayer(unittest.TestCase):
with fluid.program_guard(main, start): with fluid.program_guard(main, start):
a_var = fluid.data("a", [-1, -1], dtype="float64") a_var = fluid.data("a", [-1, -1], dtype="float64")
b_var = fluid.data("b", [-1, -1], dtype="float64") b_var = fluid.data("b", [-1, -1], dtype="float64")
out_var = fluid.layers.create_tensor("float64", "c") out_var = paddle.kron(a_var, b_var)
paddle.kron(a_var, b_var, out=out_var)
place = fluid.CPUPlace() place = fluid.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
......
...@@ -71,15 +71,6 @@ class TestLogSumExpOp(unittest.TestCase): ...@@ -71,15 +71,6 @@ class TestLogSumExpOp(unittest.TestCase):
x, keepdim=True).numpy(), x, keepdim=True).numpy(),
np.log(np.sum(np.exp(np_x), keepdims=True)))) np.log(np.sum(np.exp(np_x), keepdims=True))))
np_x = np.random.uniform(0.1, 1, [2, 3, 4]).astype(np.float32)
x = fluid.dygraph.to_variable(np_x)
helper = LayerHelper("test_logsumexp")
out = helper.create_variable(
type=x.type, name='out', dtype=x.dtype, persistable=False)
paddle.logsumexp(x, out=out)
self.assertTrue(
np.allclose(out.numpy(), np.log(np.sum(np.exp(np_x)))))
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -245,23 +245,6 @@ for dim in [4]: ...@@ -245,23 +245,6 @@ for dim in [4]:
class API_TestMm(unittest.TestCase): class API_TestMm(unittest.TestCase):
def test_out(self): def test_out(self):
with fluid.program_guard(fluid.Program()):
x = fluid.data(name="x", shape=[3, 2], dtype="float64")
y = fluid.data(name='y', shape=[2, 3], dtype='float64')
res = fluid.data(name="output", shape=[3, 3], dtype="float64")
y_1 = paddle.mm(x, y, out=res)
exe = fluid.Executor(fluid.CPUPlace())
data1 = np.random.rand(3, 2)
data2 = np.random.rand(2, 3)
np_res, expected_result = exe.run(feed={'x': data1,
'y': data2},
fetch_list=[res, y_1])
self.assertTrue(
np.allclose(
np.array(np_res), np.array(expected_result), atol=1e-5),
"two value is\
{}\n{}, check diff!".format(np_res, expected_result))
with fluid.program_guard(fluid.Program()): with fluid.program_guard(fluid.Program()):
x = fluid.data(name="x", shape=[2], dtype="float64") x = fluid.data(name="x", shape=[2], dtype="float64")
y = fluid.data(name='y', shape=[2], dtype='float64') y = fluid.data(name='y', shape=[2], dtype='float64')
...@@ -280,18 +263,6 @@ class API_TestMm(unittest.TestCase): ...@@ -280,18 +263,6 @@ class API_TestMm(unittest.TestCase):
"two value is\ "two value is\
{}\n{}, check diff!".format(np_res, expected_result)) {}\n{}, check diff!".format(np_res, expected_result))
def test_dygraph_with_out(self):
device = fluid.CPUPlace()
with fluid.dygraph.guard(device):
input_array1 = np.random.rand(3, 4).astype("float64")
input_array2 = np.random.rand(4, 3).astype("float64")
out_array = np.random.rand(3, 3).astype("float64")
data1 = fluid.dygraph.to_variable(input_array1)
data2 = fluid.dygraph.to_variable(input_array2)
paddle_out_holder = fluid.dygraph.to_variable(out_array)
out = paddle.mm(data1, data2, out=paddle_out_holder)
self.assertTrue(np.allclose(paddle_out_holder.numpy(), out.numpy()))
def test_dygraph_without_out(self): def test_dygraph_without_out(self):
device = fluid.CPUPlace() device = fluid.CPUPlace()
with fluid.dygraph.guard(device): with fluid.dygraph.guard(device):
......
...@@ -140,7 +140,7 @@ def generate_op_noattr(op_type): ...@@ -140,7 +140,7 @@ def generate_op_noattr(op_type):
""" """
op_proto = OpProtoHolder.instance().get_op_proto(op_type) op_proto = OpProtoHolder.instance().get_op_proto(op_type)
def func(x, name=None, out=None): def func(x, name=None):
if in_dygraph_mode(): if in_dygraph_mode():
op = getattr(core.ops, op_type) op = getattr(core.ops, op_type)
return op(x) return op(x)
...@@ -149,13 +149,6 @@ def generate_op_noattr(op_type): ...@@ -149,13 +149,6 @@ def generate_op_noattr(op_type):
op_type) op_type)
helper = LayerHelper(op_type, **locals()) helper = LayerHelper(op_type, **locals())
if name and out:
warnings.warn(
"Both name and out parameters have been set in fluid.tensor.math.%s(), only out will take effect to specify the result storage. "
"You can discard either one to solve this warning." % op_type,
category=UserWarning,
stacklevel=2)
if not out:
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type=op_type, inputs={"X": x}, outputs={"Out": out}) helper.append_op(type=op_type, inputs={"X": x}, outputs={"Out": out})
return out return out
...@@ -193,7 +186,7 @@ Examples: ...@@ -193,7 +186,7 @@ Examples:
return func return func
@templatedoc() @templatedoc()
def pow(input, exponent, out=None, name=None): def pow(input, exponent, name=None):
""" """
:alias_main: paddle.pow :alias_main: paddle.pow
:alias: paddle.pow,paddle.tensor.pow,paddle.tensor.math.pow :alias: paddle.pow,paddle.tensor.pow,paddle.tensor.math.pow
...@@ -205,8 +198,6 @@ def pow(input, exponent, out=None, name=None): ...@@ -205,8 +198,6 @@ def pow(input, exponent, out=None, name=None):
Args: Args:
input(Variable): A ``Tensor`` or ``LoDTensor`` . The data type is ``float32`` or ``float64``. input(Variable): A ``Tensor`` or ``LoDTensor`` . The data type is ``float32`` or ``float64``.
exponent(float32|Variable): A scalar with type ``float32`` or a ``Tensor`` with shape [1] and type ``float32``. exponent(float32|Variable): A scalar with type ``float32`` or a ``Tensor`` with shape [1] and type ``float32``.
out (Variable, optional): The Variable that stores results of the operation.
If out is None, a new Variable will be created to store the results.
name(str, optional): The default value is None. Normally there is no need for user to set this property. name(str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` . For more information, please refer to :ref:`api_guide_Name` .
...@@ -223,16 +214,17 @@ def pow(input, exponent, out=None, name=None): ...@@ -223,16 +214,17 @@ def pow(input, exponent, out=None, name=None):
x = fluid.data(name="x", shape=[32,32], dtype="float32") x = fluid.data(name="x", shape=[32,32], dtype="float32")
# example 1: argument exponent is float # example 1: argument exponent is float
res = fluid.data(name="output", shape=[32,32], dtype="float32") y_1 = paddle.pow(x, 2.0)
y_1 = paddle.pow(x, 2.0, out=res)
# y_1 is x^{2.0} # y_1 is x^{2.0}
# example 2: argument exponent is Variable # example 2: argument exponent is Variable
exponent_tensor = fluid.layers.fill_constant([1], "float32", 3.0) exponent_tensor = fluid.layers.fill_constant([1], "float32", 3.0)
res = fluid.data(name="output", shape=[32,32], dtype="float32") y_2 = paddle.pow(x, exponent_tensor)
y_2 = paddle.pow(x, exponent_tensor, out=res)
# y_2 is x^{3.0} # y_2 is x^{3.0}
""" """
if in_dygraph_mode():
return core.ops.pow(input, "exponent", exponent)
helper = LayerHelper('pow', **locals()) helper = LayerHelper('pow', **locals())
inputs = {'X': input} inputs = {'X': input}
attrs = {} attrs = {}
...@@ -242,22 +234,11 @@ def pow(input, exponent, out=None, name=None): ...@@ -242,22 +234,11 @@ def pow(input, exponent, out=None, name=None):
else: else:
attrs['factor'] = exponent attrs['factor'] = exponent
if out is None:
out = helper.create_variable_for_type_inference(dtype=input.dtype) out = helper.create_variable_for_type_inference(dtype=input.dtype)
else:
check_dtype( check_dtype(
out.dtype, out.name, out.dtype, out.name,
convert_dtype(input.dtype), 'pow', convert_dtype(input.dtype), 'pow',
'(The out data type in pow must be the same with input data type.)') '(The out data type in pow must be the same with input data type.)')
if name:
warnings.warn(
"The output Variable name of the paddle.tensor.pow operation can only be given by parameter out or name. \
When parameter out and name are set at the same time, out has a higher priority than name. \
Finally, the output Variable name is same as the out name %s"
%
out.name,
category=UserWarning,
stacklevel=2)
helper.append_op( helper.append_op(
type='pow', inputs=inputs, outputs={'Out': out}, attrs=attrs) type='pow', inputs=inputs, outputs={'Out': out}, attrs=attrs)
...@@ -307,8 +288,6 @@ def _elementwise_op(helper): ...@@ -307,8 +288,6 @@ def _elementwise_op(helper):
axis = helper.kwargs.get('axis', -1) axis = helper.kwargs.get('axis', -1)
use_mkldnn = helper.kwargs.get('use_mkldnn', False) use_mkldnn = helper.kwargs.get('use_mkldnn', False)
name = helper.kwargs.get('name', None) name = helper.kwargs.get('name', None)
out = helper.kwargs.get('out', None)
if out is None:
if name is None: if name is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
else: else:
...@@ -325,7 +304,7 @@ def _elementwise_op(helper): ...@@ -325,7 +304,7 @@ def _elementwise_op(helper):
return helper.append_activation(out) return helper.append_activation(out)
def add(x, y, alpha=1, out=None, name=None): def add(x, y, alpha=1, name=None):
""" """
Examples: Examples:
...@@ -417,9 +396,7 @@ Examples: ...@@ -417,9 +396,7 @@ Examples:
x = fluid.data(name="x", shape=[3], dtype="float32") x = fluid.data(name="x", shape=[3], dtype="float32")
y = fluid.data(name='y', shape=[3], dtype='float32') y = fluid.data(name='y', shape=[3], dtype='float32')
z = paddle.add(x, y)
output = fluid.data(name="output", shape=[3], dtype="float32")
z = paddle.add(x, y, out=output)
place = fluid.CPUPlace() place = fluid.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
...@@ -456,18 +433,10 @@ Examples: ...@@ -456,18 +433,10 @@ Examples:
return _elementwise_op_in_dygraph( return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name=op_type) x, y, axis=axis, act=act, op_name=op_type)
original_op_type = 'add'
if name and out:
warnings.warn(
"Both name and out parameters have been set in paddle.tensor.%s, only out will take effect to specify the result storage. "
"You can discard either one to solve this warning." %
original_op_type,
category=UserWarning,
stacklevel=2)
return _elementwise_op(LayerHelper(op_type, **locals())) return _elementwise_op(LayerHelper(op_type, **locals()))
def div(x, y, out=None, name=None): def div(x, y, name=None):
""" """
Examples: Examples:
...@@ -537,8 +506,7 @@ Examples: ...@@ -537,8 +506,7 @@ Examples:
x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32') x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32')
y = fluid.data(name="y", shape=[5], dtype='float32') y = fluid.data(name="y", shape=[5], dtype='float32')
output = fluid.data(name="output", shape=[2,3,4,5], dtype="float32") z = paddle.div(x, y)
z = paddle.div(x, y, out=output)
# z = x / y # z = x / y
place = fluid.CPUPlace() place = fluid.CPUPlace()
...@@ -573,14 +541,6 @@ Examples: ...@@ -573,14 +541,6 @@ Examples:
return _elementwise_op_in_dygraph( return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name=op_type) x, y, axis=axis, act=act, op_name=op_type)
original_op_type = 'div'
if name and out:
warnings.warn(
"Both name and out parameters have been set in paddle.tensor.%s, only out will take effect to specify the result storage. "
"You can discard either one to solve this warning." %
original_op_type,
category=UserWarning,
stacklevel=2)
return _elementwise_op(LayerHelper(op_type, **locals())) return _elementwise_op(LayerHelper(op_type, **locals()))
...@@ -634,9 +594,6 @@ for func in [ ...@@ -634,9 +594,6 @@ for func in [
additional_args_lines = [ additional_args_lines = [
"alpha (int|float, optional): The alpha factor of the input. Default is 1. If alpha is not 1, the equation becomes Out = X + alpha * Y.", "alpha (int|float, optional): The alpha factor of the input. Default is 1. If alpha is not 1, the equation becomes Out = X + alpha * Y.",
"out (Variable, optinal): The Variable that stores results of the operation. Default is None. If out is None, \
a new Variable will be created to store the results."
,
"name (string, optional): Name of the output. \ "name (string, optional): Name of the output. \
Default is None. It's used to print debug info for developers. Details: \ Default is None. It's used to print debug info for developers. Details: \
:ref:`api_guide_Name` " :ref:`api_guide_Name` "
...@@ -858,7 +815,7 @@ def elementwise_sum(inputs, name=None): ...@@ -858,7 +815,7 @@ def elementwise_sum(inputs, name=None):
return out return out
def mm(input, mat2, out=None, name=None): def mm(input, mat2, name=None):
""" """
:alias_main: paddle.mm :alias_main: paddle.mm
:alias: paddle.mm,paddle.tensor.mm,paddle.tensor.math.mm :alias: paddle.mm,paddle.tensor.mm,paddle.tensor.math.mm
...@@ -876,9 +833,6 @@ def mm(input, mat2, out=None, name=None): ...@@ -876,9 +833,6 @@ def mm(input, mat2, out=None, name=None):
Args: Args:
x (Variable): The input variable which is a Tensor or LoDTensor. x (Variable): The input variable which is a Tensor or LoDTensor.
mat2 (Variable): The input variable which is a Tensor or LoDTensor. mat2 (Variable): The input variable which is a Tensor or LoDTensor.
out(Variable, optional): Optional output which can be any created
Variable that meets the requirements to store the result of operation.
if out is None, a new Varibale will be create to store the result.
name(str, optional): The default value is None. Normally there is no need for name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name` user to set this property. For more information, please refer to :ref:`api_guide_Name`
...@@ -914,7 +868,6 @@ def mm(input, mat2, out=None, name=None): ...@@ -914,7 +868,6 @@ def mm(input, mat2, out=None, name=None):
out = paddle.mm(x, mat2) # out shape is [2, 2] out = paddle.mm(x, mat2) # out shape is [2, 2]
""" """
if in_dygraph_mode(): if in_dygraph_mode():
if out is None:
out = _varbase_creator(dtype=input.dtype) out = _varbase_creator(dtype=input.dtype)
core.ops.matmul(input, mat2, out) core.ops.matmul(input, mat2, out)
return out return out
...@@ -955,7 +908,6 @@ def mm(input, mat2, out=None, name=None): ...@@ -955,7 +908,6 @@ def mm(input, mat2, out=None, name=None):
__check_input(input, mat2) __check_input(input, mat2)
helper = LayerHelper('mm', **locals()) helper = LayerHelper('mm', **locals())
if out is None:
out = helper.create_variable_for_type_inference(dtype=input.dtype) out = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op( helper.append_op(
type='matmul', inputs={'X': input, type='matmul', inputs={'X': input,
...@@ -1032,7 +984,7 @@ def addmm(input, x, y, alpha=1.0, beta=1.0, name=None): ...@@ -1032,7 +984,7 @@ def addmm(input, x, y, alpha=1.0, beta=1.0, name=None):
return out return out
def logsumexp(x, dim=None, keepdim=False, out=None, name=None): def logsumexp(x, dim=None, keepdim=False, name=None):
""" """
:alias_main: paddle.logsumexp :alias_main: paddle.logsumexp
:alias: paddle.logsumexp,paddle.tensor.logsumexp,paddle.tensor.math.logsumexp :alias: paddle.logsumexp,paddle.tensor.logsumexp,paddle.tensor.math.logsumexp
...@@ -1052,7 +1004,6 @@ def logsumexp(x, dim=None, keepdim=False, out=None, name=None): ...@@ -1052,7 +1004,6 @@ def logsumexp(x, dim=None, keepdim=False, out=None, name=None):
keep_dim (bool, optional): Whether to reserve the reduced dimension in the output Tensor. keep_dim (bool, optional): Whether to reserve the reduced dimension in the output Tensor.
The result tensor will have one fewer dimension than the :attr:`input` unless :attr:`keep_dim` The result tensor will have one fewer dimension than the :attr:`input` unless :attr:`keep_dim`
is true, default value is False. is true, default value is False.
out (Variable), optional): Enable user to explicitly specify an output variable to save result.
name (str, optional): The default value is None. Normally there is no need for user to name (str, optional): The default value is None. Normally there is no need for user to
set this property. For more information, please refer to :ref:`api_guide_Name` set this property. For more information, please refer to :ref:`api_guide_Name`
...@@ -1094,16 +1045,10 @@ def logsumexp(x, dim=None, keepdim=False, out=None, name=None): ...@@ -1094,16 +1045,10 @@ def logsumexp(x, dim=None, keepdim=False, out=None, name=None):
exp_out = layers.exp(x) exp_out = layers.exp(x)
sum_out = layers.reduce_sum(exp_out, dim, keepdim) sum_out = layers.reduce_sum(exp_out, dim, keepdim)
if out is not None:
check_variable_and_dtype(out, 'out', [x.dtype], op_type)
helper = LayerHelper(op_type, **locals())
helper.append_op(type="log", inputs={"X": sum_out}, outputs={"Out": out})
return out
return layers.log(sum_out, name) return layers.log(sum_out, name)
def inverse(input, out=None, name=None): def inverse(input, name=None):
""" """
:alias_main: paddle.inverse :alias_main: paddle.inverse
:alias: paddle.inverse,paddle.tensor.inverse,paddle.tensor.math.inverse :alias: paddle.inverse,paddle.tensor.inverse,paddle.tensor.math.inverse
...@@ -1117,9 +1062,6 @@ def inverse(input, out=None, name=None): ...@@ -1117,9 +1062,6 @@ def inverse(input, out=None, name=None):
dimensions should be equal. When the number of dimensions is dimensions should be equal. When the number of dimensions is
greater than 2, it is treated as batches of square matrix. The data greater than 2, it is treated as batches of square matrix. The data
type can be float32 and float64. type can be float32 and float64.
out (Variable, optional): Optional output which can be any created
Variable that meets the requirements to store the result of operation.
If out is None, a new Varibale will be create to store the result.
name (str, optional): The default value is None. Normally there is no need for name (str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, user to set this property. For more information,
please refer to :ref:`api_guide_Name` please refer to :ref:`api_guide_Name`
...@@ -1165,20 +1107,16 @@ def inverse(input, out=None, name=None): ...@@ -1165,20 +1107,16 @@ def inverse(input, out=None, name=None):
"of dimensions is no less than 2. But reviced: %d, " "of dimensions is no less than 2. But reviced: %d, "
"input's shape: %s." % (len(input.shape), input.shape)) "input's shape: %s." % (len(input.shape), input.shape))
if out is not None:
check_variable_and_dtype(out, 'out', input.dtype, 'inverse')
_check_input(input) _check_input(input)
helper = LayerHelper('inverse', **locals()) helper = LayerHelper('inverse', **locals())
if out is None:
out = helper.create_variable_for_type_inference(dtype=input.dtype) out = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op( helper.append_op(
type='inverse', inputs={'Input': [input] }, outputs={'Output': [out]}) type='inverse', inputs={'Input': [input] }, outputs={'Output': [out]})
return out return out
def max(input, dim=None, keep_dim=False, out=None, name=None): def max(input, dim=None, keep_dim=False, name=None):
""" """
:alias_main: paddle.max :alias_main: paddle.max
:alias: paddle.max,paddle.tensor.max,paddle.tensor.math.max :alias: paddle.max,paddle.tensor.max,paddle.tensor.math.max
...@@ -1197,9 +1135,6 @@ def max(input, dim=None, keep_dim=False, out=None, name=None): ...@@ -1197,9 +1135,6 @@ def max(input, dim=None, keep_dim=False, out=None, name=None):
output Tensor. The result tensor will have one fewer dimension output Tensor. The result tensor will have one fewer dimension
than the :attr:`input` unless :attr:`keep_dim` is true, default than the :attr:`input` unless :attr:`keep_dim` is true, default
value is False. value is False.
out(Variable, optional): Optional output which can be any created
Variable that meets the requirements to store the result of operation.
if out is None, a new Varibale will be create to store the result.
name(str, optional): The default value is None. Normally there is no need for name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name` user to set this property. For more information, please refer to :ref:`api_guide_Name`
...@@ -1231,7 +1166,6 @@ def max(input, dim=None, keep_dim=False, out=None, name=None): ...@@ -1231,7 +1166,6 @@ def max(input, dim=None, keep_dim=False, out=None, name=None):
""" """
helper = LayerHelper('max', **locals()) helper = LayerHelper('max', **locals())
if out is None:
out = helper.create_variable_for_type_inference( out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype()) dtype=helper.input_dtype())
if dim is not None and not isinstance(dim, list): if dim is not None and not isinstance(dim, list):
...@@ -1258,7 +1192,7 @@ def max(input, dim=None, keep_dim=False, out=None, name=None): ...@@ -1258,7 +1192,7 @@ def max(input, dim=None, keep_dim=False, out=None, name=None):
return out return out
def min(input, dim=None, keep_dim=False, out=None, name=None): def min(input, dim=None, keep_dim=False, name=None):
""" """
:alias_main: paddle.min :alias_main: paddle.min
:alias: paddle.min,paddle.tensor.min,paddle.tensor.math.min :alias: paddle.min,paddle.tensor.min,paddle.tensor.math.min
...@@ -1277,9 +1211,6 @@ def min(input, dim=None, keep_dim=False, out=None, name=None): ...@@ -1277,9 +1211,6 @@ def min(input, dim=None, keep_dim=False, out=None, name=None):
output Tensor. The result tensor will have one fewer dimension output Tensor. The result tensor will have one fewer dimension
than the :attr:`input` unless :attr:`keep_dim` is true, default than the :attr:`input` unless :attr:`keep_dim` is true, default
value is False. value is False.
out(Variable, optional): Optional output which can be any created
Variable that meets the requirements to store the result of operation.
if out is None, a new Varibale will be create to store the result.
name(str, optional): The default value is None. Normally there is no need for name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name` user to set this property. For more information, please refer to :ref:`api_guide_Name`
...@@ -1310,7 +1241,6 @@ def min(input, dim=None, keep_dim=False, out=None, name=None): ...@@ -1310,7 +1241,6 @@ def min(input, dim=None, keep_dim=False, out=None, name=None):
""" """
helper = LayerHelper('min', **locals()) helper = LayerHelper('min', **locals())
if out is None:
out = helper.create_variable_for_type_inference( out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype()) dtype=helper.input_dtype())
if dim is not None and not isinstance(dim, list): if dim is not None and not isinstance(dim, list):
...@@ -1380,7 +1310,7 @@ def log1p(x, name=None): ...@@ -1380,7 +1310,7 @@ def log1p(x, name=None):
return out return out
def addcmul(input, tensor1, tensor2, value=1.0, out=None, name=None): def addcmul(input, tensor1, tensor2, value=1.0, name=None):
""" """
:alias_main: paddle.addcmul :alias_main: paddle.addcmul
:alias: paddle.addcmul,paddle.tensor.addcmul,paddle.tensor.math.addcmul :alias: paddle.addcmul,paddle.tensor.addcmul,paddle.tensor.math.addcmul
...@@ -1396,10 +1326,6 @@ def addcmul(input, tensor1, tensor2, value=1.0, out=None, name=None): ...@@ -1396,10 +1326,6 @@ def addcmul(input, tensor1, tensor2, value=1.0, out=None, name=None):
tensor1(Variable): The tensor to be multiplied. A Tensor with type float32, float64, int32, int64. tensor1(Variable): The tensor to be multiplied. A Tensor with type float32, float64, int32, int64.
tensor2(Variable): The tensor to be multiplied. A Tensor with type float32, float64, int32, int64. tensor2(Variable): The tensor to be multiplied. A Tensor with type float32, float64, int32, int64.
value(int|float): The multiplier for tensor1*tensor2. For float32 and float64 type input, value must be float, otherwise an integer. value(int|float): The multiplier for tensor1*tensor2. For float32 and float64 type input, value must be float, otherwise an integer.
out(Variable, Optional): The variable that specifies the output of the
operator, which can be Variable that has been created in the
program. The default value is None, and a new Variable will be
created to save the output. Default: None.
name(str, Optional): For details, please refer to :ref:`api_guide_Name`. name(str, Optional): For details, please refer to :ref:`api_guide_Name`.
Generally, no setting is required. Default: None. Generally, no setting is required. Default: None.
Returns: Returns:
...@@ -1422,14 +1348,11 @@ def addcmul(input, tensor1, tensor2, value=1.0, out=None, name=None): ...@@ -1422,14 +1348,11 @@ def addcmul(input, tensor1, tensor2, value=1.0, out=None, name=None):
if convert_dtype(input.dtype) in ['int32', 'int64']: if convert_dtype(input.dtype) in ['int32', 'int64']:
check_type(value, 'value', int, 'addcmul') check_type(value, 'value', int, 'addcmul')
if out is not None:
layers.assign(layers.elementwise_add(input, layers.elementwise_mul(tensor1, tensor2) * value), out)
else:
out = layers.elementwise_add(input, layers.elementwise_mul(tensor1, tensor2) * value) out = layers.elementwise_add(input, layers.elementwise_mul(tensor1, tensor2) * value)
return out return out
def clamp(input, min=None, max=None, output=None, name=None): def clamp(input, min=None, max=None, name=None):
""" """
:alias_main: paddle.clamp :alias_main: paddle.clamp
:alias: paddle.clamp,paddle.tensor.clamp,paddle.tensor.math.clamp :alias: paddle.clamp,paddle.tensor.clamp,paddle.tensor.math.clamp
...@@ -1450,8 +1373,6 @@ def clamp(input, min=None, max=None, output=None, name=None): ...@@ -1450,8 +1373,6 @@ def clamp(input, min=None, max=None, output=None, name=None):
with shape [1] and type ``int32``, ``float32``, ``float64``. with shape [1] and type ``int32``, ``float32``, ``float64``.
max (float32|Variable): The upper bound with type ``float32`` or a ``Tensor`` max (float32|Variable): The upper bound with type ``float32`` or a ``Tensor``
with shape [1] and type ``int32``, ``float32``, ``float64``. with shape [1] and type ``int32``, ``float32``, ``float64``.
output (Variable, optional): A tensor or LoDTensor. If :attr:`output` is None,
a new tensor will be created as :attr:`output`. Default: None.
name (str, optional): The default value is None. Normally there is no name (str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`. refer to :ref:`api_guide_Name`.
...@@ -1482,6 +1403,11 @@ def clamp(input, min=None, max=None, output=None, name=None): ...@@ -1482,6 +1403,11 @@ def clamp(input, min=None, max=None, output=None, name=None):
assert min is not None or max is not None, "either min or max should be defined." assert min is not None or max is not None, "either min or max should be defined."
if in_dygraph_mode():
min = sys.float_info.min if min is None else min
max = sys.float_info.max if max is None else max
return core.ops.clip(input, "min", min, "max", max)
if min is not None: if min is not None:
check_type(min, 'min', (float, Variable), 'clamp') check_type(min, 'min', (float, Variable), 'clamp')
if isinstance(min, Variable): if isinstance(min, Variable):
...@@ -1509,7 +1435,6 @@ def clamp(input, min=None, max=None, output=None, name=None): ...@@ -1509,7 +1435,6 @@ def clamp(input, min=None, max=None, output=None, name=None):
attrs['max'] = max attrs['max'] = max
helper = LayerHelper('clamp', **locals()) helper = LayerHelper('clamp', **locals())
if output is None:
output = helper.create_variable_for_type_inference( output = helper.create_variable_for_type_inference(
dtype=helper.input_dtype()) dtype=helper.input_dtype())
helper.append_op( helper.append_op(
...@@ -1517,6 +1442,7 @@ def clamp(input, min=None, max=None, output=None, name=None): ...@@ -1517,6 +1442,7 @@ def clamp(input, min=None, max=None, output=None, name=None):
return output return output
def trace(x, offset=0, axis1=0, axis2=1, name=None): def trace(x, offset=0, axis1=0, axis2=1, name=None):
""" """
:alias_main: paddle.trace :alias_main: paddle.trace
...@@ -1611,7 +1537,7 @@ def trace(x, offset=0, axis1=0, axis2=1, name=None): ...@@ -1611,7 +1537,7 @@ def trace(x, offset=0, axis1=0, axis2=1, name=None):
return out return out
@templatedoc(op_type="kron") @templatedoc(op_type="kron")
def kron(x, y, out=None, name=None): def kron(x, y, name=None):
""" """
:alias_main: paddle.kron :alias_main: paddle.kron
:alias: paddle.kron,paddle.tensor.kron,paddle.tensor.math.kron :alias: paddle.kron,paddle.tensor.kron,paddle.tensor.math.kron
...@@ -1624,10 +1550,6 @@ ${comment} ...@@ -1624,10 +1550,6 @@ ${comment}
y (Variable): the second operand of kron op, data type: float16, y (Variable): the second operand of kron op, data type: float16,
float32, float64, int32 or int64. Its data type should be the same float32, float64, int32 or int64. Its data type should be the same
with x. with x.
out (Variable, optional): Optional output which can be any created
Variable that meets the requirements to store the result of
operation. If out is None, a new Varibale will be create to store
the result. Defaults to None.
name(str, optional): The default value is None. Normally there is no name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`. refer to :ref:`api_guide_Name`.
...@@ -1668,9 +1590,6 @@ ${comment} ...@@ -1668,9 +1590,6 @@ ${comment}
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'kron') check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'kron')
check_variable_and_dtype(y, 'y', ['float16', 'float32', 'float64', 'int32', 'int64'], 'kron') check_variable_and_dtype(y, 'y', ['float16', 'float32', 'float64', 'int32', 'int64'], 'kron')
if out is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
check_variable_and_dtype(out, 'out', ['float16', 'float32', 'float64', 'int32', 'int64'], 'kron')
helper.append_op(type="kron", inputs={"X": x, "Y": y}, outputs={"Out": out}) helper.append_op(type="kron", inputs={"X": x, "Y": y}, outputs={"Out": out})
return out return out
...@@ -85,7 +85,7 @@ api_spec_diff=`python ${PADDLE_ROOT}/tools/check_api_source_without_core_ops.py ...@@ -85,7 +85,7 @@ api_spec_diff=`python ${PADDLE_ROOT}/tools/check_api_source_without_core_ops.py
if [ "$api_spec_diff" != "" ]; then if [ "$api_spec_diff" != "" ]; then
echo_line="You must have one RD (zhiqiu (Recommend) or phlrain) approval for the api change for the opreator-related api without 'core.ops'.\n" echo_line="You must have one RD (zhiqiu (Recommend) or phlrain) approval for the api change for the opreator-related api without 'core.ops'.\n"
echo_line="${echo_line}For more details, please click [https://github.com/PaddlePaddle/Paddle/wiki/paddle_api_development_manual.md]\n" echo_line="${echo_line}For more details, please click [https://github.com/PaddlePaddle/Paddle/wiki/paddle_api_development_manual.md]\n"
echo_line="${echo_line}Related APIs: ${api_spec_diff}" echo_line="${echo_line}Related APIs: ${api_spec_diff}\n"
check_approval 1 6888866 43953930 check_approval 1 6888866 43953930
fi fi
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册