未验证 提交 12bed4a9 编写于 作者: W WuHaobo 提交者: GitHub

remove all op with out args (#25570)

* test=develop,test=document_fix, remove the out args

* Revert "test=develop,test=document_fix, remove the out args"

This reverts commit f430799b.

* test=develop,test=document_fix, remove the out args

* remove all  the out args

* remove all  the out args

* test=develop, remove all  the out args

* test=develop, remove all  the out args

* test=develop, remove out args in matmul testcase

* test=develop, fix the bugs of out args testcase in logsumexp

* test=develop, fix the bugs of out args testcase in elementwise_add

* test=develop, fix the bugs of out args testcase in elementwise_div

* test=develop, fix the bugs of out args testcase in activation

* test=develop, fix the bugs of out args testcase in addcmul

* test=develop, fix the bugs of out args

* test=develop, fix the bugs of out args in API example

* test=develop, fix the bugs of out args in kron testcase

* test=develop, fix the bugs of out args in kron testcase

* test=develop, remove out args

* test=develop, remove out args

* test=develop, fix the coverage

* test=develop, polish codestyle

* test=develop, Update math.py

* test=develop, polish codestyle

* test=develop, polish codestyle

* test=develop, polish codestyle

* test=develop, polish codestyle

* test=develop, polish the test_activation_op.py

* test=develop, Update math.py

* test=develop, fix the failed CI

* test=develop, add core.ops

* test=develop, add core.ops change clamp to clip

* test=develop, add core.ops change clamp to clip

* test=develop, write testcase for clamp OP
上级 a6c87fd0
...@@ -70,25 +70,16 @@ class TestActivation(OpTest): ...@@ -70,25 +70,16 @@ class TestActivation(OpTest):
class TestParameter(object): class TestParameter(object):
def test_out(self):
with fluid.program_guard(fluid.Program()):
data = fluid.layers.data(name="X", shape=[1])
out = eval("paddle.%s(data, out=data)" % self.op_type)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result = exe.run(feed={"X": np.array([0.1])},
fetch_list=[data, out])
self.assertEqual(result[0], result[1])
def test_out_name(self): def test_out_name(self):
with fluid.program_guard(fluid.Program()): with fluid.program_guard(fluid.Program()):
np_x = np.array([0.1])
data = fluid.layers.data(name="X", shape=[1]) data = fluid.layers.data(name="X", shape=[1])
out = eval("paddle.%s(data, name='Y', out=data)" % self.op_type) out = eval("paddle.%s(data, name='Y')" % self.op_type)
place = fluid.CPUPlace() place = fluid.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
result = exe.run(feed={"X": np.array([0.1])}, result, = exe.run(feed={"X": np_x}, fetch_list=[out])
fetch_list=[data, out]) expected = eval("np.%s(np_x)" % self.op_type)
self.assertEqual(result[0], result[1]) self.assertEqual(result, expected)
def test_dygraph(self): def test_dygraph(self):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
...@@ -174,6 +165,17 @@ class TestAtan(TestActivation, TestParameter): ...@@ -174,6 +165,17 @@ class TestAtan(TestActivation, TestParameter):
return return
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out')
def test_out_name(self):
with fluid.program_guard(fluid.Program()):
np_x = np.array([0.1])
data = fluid.layers.data(name="X", shape=[1])
out = paddle.atan(data, name='Y')
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result, = exe.run(feed={"X": np_x}, fetch_list=[out])
expected = np.arctan(np_x)
self.assertEqual(result, expected)
def test_dygraph(self): def test_dygraph(self):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
np_x = np.array([0.1]) np_x = np.array([0.1])
...@@ -1034,21 +1036,18 @@ class TestPow_factor_tensor(TestActivation): ...@@ -1034,21 +1036,18 @@ class TestPow_factor_tensor(TestActivation):
factor_2 = fluid.layers.fill_constant([1], "float32", 3.0) factor_2 = fluid.layers.fill_constant([1], "float32", 3.0)
out_1 = fluid.layers.pow(x, factor=factor_1) out_1 = fluid.layers.pow(x, factor=factor_1)
out_2 = fluid.layers.pow(x, factor=factor_2) out_2 = fluid.layers.pow(x, factor=factor_2)
out_3 = paddle.pow(x, factor_1, out=res)
out_4 = paddle.pow(x, factor_1, name='pow_res') out_4 = paddle.pow(x, factor_1, name='pow_res')
out_5 = paddle.pow(x, factor_1, out=res, name='pow_res')
out_6 = paddle.pow(x, factor_2) out_6 = paddle.pow(x, factor_2)
self.assertEqual(('pow_res' in out_4.name), True) self.assertEqual(('pow_res' in out_4.name), True)
exe = fluid.Executor(place=fluid.CPUPlace()) exe = fluid.Executor(place=fluid.CPUPlace())
res_1, res_2, res_3, res, res_6 = exe.run( res_1, res_2, res, res_6 = exe.run(
fluid.default_main_program(), fluid.default_main_program(),
feed={"x": input}, feed={"x": input},
fetch_list=[out_1, out_2, out_3, res, out_6]) fetch_list=[out_1, out_2, res, out_6])
assert np.array_equal(res_1, np.power(input, 2)) assert np.array_equal(res_1, np.power(input, 2))
assert np.array_equal(res_2, np.power(input, 3)) assert np.array_equal(res_2, np.power(input, 3))
assert np.array_equal(res_3, res)
assert np.array_equal(res_6, np.power(input, 3)) assert np.array_equal(res_6, np.power(input, 3))
def test_error(self): def test_error(self):
......
...@@ -118,17 +118,6 @@ class TestAddcmul(unittest.TestCase): ...@@ -118,17 +118,6 @@ class TestAddcmul(unittest.TestCase):
out = paddle.addcmul(input, tensor1, tensor2) out = paddle.addcmul(input, tensor1, tensor2)
self.assertEqual(out.shape, input.shape) self.assertEqual(out.shape, input.shape)
def test_addcmul_has_out(self):
program = Program()
with program_guard(program):
input = fluid.data(name='in', shape=[4, 100], dtype='float32')
tensor1 = fluid.data(name='t1', shape=[100], dtype='float32')
tensor2 = fluid.data(name='t2', shape=[100], dtype='float32')
out = fluid.data(name='out', shape=[4, 100], dtype='float32')
out = paddle.addcmul(input, tensor1, tensor2, out=out)
self.assertEqual(out.shape, input.shape)
class InvalidInputTest(unittest.TestCase): class InvalidInputTest(unittest.TestCase):
def test_error(self): def test_error(self):
......
...@@ -20,6 +20,18 @@ import unittest ...@@ -20,6 +20,18 @@ import unittest
class TestClampAPI(unittest.TestCase): class TestClampAPI(unittest.TestCase):
def test_dygraph_clamp(self):
in1 = np.array([[1.2, 3.5], [4.5, 6.4]]).astype('float32')
with fluid.dygraph.guard():
x1 = fluid.dygraph.to_variable(in1)
out1 = tensor.clamp(x1, min=3.5, max=5.0)
out2 = tensor.clamp(x1, min=2.5)
self.assertTrue(
np.allclose(
out1.numpy(), in1.clip(
min=3.5, max=5.0)))
self.assertTrue(np.allclose(out2.numpy(), in1.clip(min=2.5)))
def test_clamp(self): def test_clamp(self):
data_shape = [1, 9, 9, 4] data_shape = [1, 9, 9, 4]
data = np.random.random(data_shape).astype('float32') data = np.random.random(data_shape).astype('float32')
......
...@@ -389,44 +389,6 @@ class TestElementwiseAddOpError(unittest.TestCase): ...@@ -389,44 +389,6 @@ class TestElementwiseAddOpError(unittest.TestCase):
class TestAddOp(unittest.TestCase): class TestAddOp(unittest.TestCase):
def test_out(self):
with fluid.program_guard(fluid.Program()):
x = fluid.data(name="x", shape=[3], dtype="float32")
y = fluid.data(name='y', shape=[3], dtype='float32')
res = fluid.data(name="output", shape=[3], dtype="float32")
y_1 = paddle.add(x, y, out=res)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
data1 = np.array([2, 3, 4], dtype='float32')
data2 = np.array([1, 5, 2], dtype='float32')
np_res, np_y_1 = exe.run(feed={'x': data1,
'y': data2},
fetch_list=[res, y_1])
self.assertEqual((np_res == np_y_1).all(), True)
def test_out_gpu(self):
if not fluid.core.is_compiled_with_cuda():
return
with fluid.program_guard(fluid.Program()):
x = fluid.data(name="x", shape=[3], dtype="float32")
y = fluid.data(name='y', shape=[3], dtype='float32')
res = fluid.data(name="output", shape=[3], dtype="float32")
y_1 = paddle.add(x, y, out=res)
place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
data1 = np.array([2, 3, 4], dtype='float32')
data2 = np.array([1, 5, 2], dtype='float32')
np_res, np_y_1 = exe.run(feed={'x': data1,
'y': data2},
fetch_list=[res, y_1])
self.assertEqual((np_res == np_y_1).all(), True)
def test_name(self): def test_name(self):
with fluid.program_guard(fluid.Program()): with fluid.program_guard(fluid.Program()):
x = fluid.data(name="x", shape=[2, 3], dtype="float32") x = fluid.data(name="x", shape=[2, 3], dtype="float32")
......
...@@ -241,44 +241,6 @@ class TestElementwiseDivBroadcast(unittest.TestCase): ...@@ -241,44 +241,6 @@ class TestElementwiseDivBroadcast(unittest.TestCase):
class TestDivOp(unittest.TestCase): class TestDivOp(unittest.TestCase):
def test_out(self):
with fluid.program_guard(fluid.Program()):
x = fluid.data(name="x", shape=[3], dtype="float32")
y = fluid.data(name='y', shape=[3], dtype='float32')
res = fluid.data(name="output", shape=[3], dtype="float32")
y_1 = paddle.div(x, y, out=res)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
data1 = np.array([2, 3, 4], dtype='float32')
data2 = np.array([1, 5, 2], dtype='float32')
np_res, np_y_1 = exe.run(feed={'x': data1,
'y': data2},
fetch_list=[res, y_1])
self.assertEqual((np_res == np_y_1).all(), True)
def test_out_gpu(self):
if not fluid.core.is_compiled_with_cuda():
return
with fluid.program_guard(fluid.Program()):
x = fluid.data(name="x", shape=[3], dtype="float32")
y = fluid.data(name='y', shape=[3], dtype='float32')
res = fluid.data(name="output", shape=[3], dtype="float32")
y_1 = paddle.div(x, y, out=res)
place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
data1 = np.array([2, 3, 4], dtype='float32')
data2 = np.array([1, 5, 2], dtype='float32')
np_res, np_y_1 = exe.run(feed={'x': data1,
'y': data2},
fetch_list=[res, y_1])
self.assertEqual((np_res == np_y_1).all(), True)
def test_name(self): def test_name(self):
with fluid.program_guard(fluid.Program()): with fluid.program_guard(fluid.Program()):
x = fluid.data(name="x", shape=[2, 3], dtype="float32") x = fluid.data(name="x", shape=[2, 3], dtype="float32")
......
...@@ -86,14 +86,10 @@ class TestInverseAPI(unittest.TestCase): ...@@ -86,14 +86,10 @@ class TestInverseAPI(unittest.TestCase):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
self.places.append(fluid.CUDAPlace(0)) self.places.append(fluid.CUDAPlace(0))
def check_static_result(self, place, with_out=False): def check_static_result(self, place):
with fluid.program_guard(fluid.Program(), fluid.Program()): with fluid.program_guard(fluid.Program(), fluid.Program()):
input = fluid.data(name="input", shape=[4, 4], dtype="float64") input = fluid.data(name="input", shape=[4, 4], dtype="float64")
if with_out: result = paddle.inverse(input=input)
out = fluid.data(name="output", shape=[4, 4], dtype="float64")
else:
out = None
result = paddle.inverse(input=input, out=out)
input_np = np.random.random([4, 4]).astype("float64") input_np = np.random.random([4, 4]).astype("float64")
result_np = np.linalg.inv(input_np) result_np = np.linalg.inv(input_np)
......
...@@ -93,8 +93,7 @@ class TestKronLayer(unittest.TestCase): ...@@ -93,8 +93,7 @@ class TestKronLayer(unittest.TestCase):
with fluid.program_guard(main, start): with fluid.program_guard(main, start):
a_var = fluid.data("a", [-1, -1], dtype="float64") a_var = fluid.data("a", [-1, -1], dtype="float64")
b_var = fluid.data("b", [-1, -1], dtype="float64") b_var = fluid.data("b", [-1, -1], dtype="float64")
out_var = fluid.layers.create_tensor("float64", "c") out_var = paddle.kron(a_var, b_var)
paddle.kron(a_var, b_var, out=out_var)
place = fluid.CPUPlace() place = fluid.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
......
...@@ -71,15 +71,6 @@ class TestLogSumExpOp(unittest.TestCase): ...@@ -71,15 +71,6 @@ class TestLogSumExpOp(unittest.TestCase):
x, keepdim=True).numpy(), x, keepdim=True).numpy(),
np.log(np.sum(np.exp(np_x), keepdims=True)))) np.log(np.sum(np.exp(np_x), keepdims=True))))
np_x = np.random.uniform(0.1, 1, [2, 3, 4]).astype(np.float32)
x = fluid.dygraph.to_variable(np_x)
helper = LayerHelper("test_logsumexp")
out = helper.create_variable(
type=x.type, name='out', dtype=x.dtype, persistable=False)
paddle.logsumexp(x, out=out)
self.assertTrue(
np.allclose(out.numpy(), np.log(np.sum(np.exp(np_x)))))
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -245,23 +245,6 @@ for dim in [4]: ...@@ -245,23 +245,6 @@ for dim in [4]:
class API_TestMm(unittest.TestCase): class API_TestMm(unittest.TestCase):
def test_out(self): def test_out(self):
with fluid.program_guard(fluid.Program()):
x = fluid.data(name="x", shape=[3, 2], dtype="float64")
y = fluid.data(name='y', shape=[2, 3], dtype='float64')
res = fluid.data(name="output", shape=[3, 3], dtype="float64")
y_1 = paddle.mm(x, y, out=res)
exe = fluid.Executor(fluid.CPUPlace())
data1 = np.random.rand(3, 2)
data2 = np.random.rand(2, 3)
np_res, expected_result = exe.run(feed={'x': data1,
'y': data2},
fetch_list=[res, y_1])
self.assertTrue(
np.allclose(
np.array(np_res), np.array(expected_result), atol=1e-5),
"two value is\
{}\n{}, check diff!".format(np_res, expected_result))
with fluid.program_guard(fluid.Program()): with fluid.program_guard(fluid.Program()):
x = fluid.data(name="x", shape=[2], dtype="float64") x = fluid.data(name="x", shape=[2], dtype="float64")
y = fluid.data(name='y', shape=[2], dtype='float64') y = fluid.data(name='y', shape=[2], dtype='float64')
...@@ -280,18 +263,6 @@ class API_TestMm(unittest.TestCase): ...@@ -280,18 +263,6 @@ class API_TestMm(unittest.TestCase):
"two value is\ "two value is\
{}\n{}, check diff!".format(np_res, expected_result)) {}\n{}, check diff!".format(np_res, expected_result))
def test_dygraph_with_out(self):
device = fluid.CPUPlace()
with fluid.dygraph.guard(device):
input_array1 = np.random.rand(3, 4).astype("float64")
input_array2 = np.random.rand(4, 3).astype("float64")
out_array = np.random.rand(3, 3).astype("float64")
data1 = fluid.dygraph.to_variable(input_array1)
data2 = fluid.dygraph.to_variable(input_array2)
paddle_out_holder = fluid.dygraph.to_variable(out_array)
out = paddle.mm(data1, data2, out=paddle_out_holder)
self.assertTrue(np.allclose(paddle_out_holder.numpy(), out.numpy()))
def test_dygraph_without_out(self): def test_dygraph_without_out(self):
device = fluid.CPUPlace() device = fluid.CPUPlace()
with fluid.dygraph.guard(device): with fluid.dygraph.guard(device):
......
此差异已折叠。
...@@ -85,7 +85,7 @@ api_spec_diff=`python ${PADDLE_ROOT}/tools/check_api_source_without_core_ops.py ...@@ -85,7 +85,7 @@ api_spec_diff=`python ${PADDLE_ROOT}/tools/check_api_source_without_core_ops.py
if [ "$api_spec_diff" != "" ]; then if [ "$api_spec_diff" != "" ]; then
echo_line="You must have one RD (zhiqiu (Recommend) or phlrain) approval for the api change for the opreator-related api without 'core.ops'.\n" echo_line="You must have one RD (zhiqiu (Recommend) or phlrain) approval for the api change for the opreator-related api without 'core.ops'.\n"
echo_line="${echo_line}For more details, please click [https://github.com/PaddlePaddle/Paddle/wiki/paddle_api_development_manual.md]\n" echo_line="${echo_line}For more details, please click [https://github.com/PaddlePaddle/Paddle/wiki/paddle_api_development_manual.md]\n"
echo_line="${echo_line}Related APIs: ${api_spec_diff}" echo_line="${echo_line}Related APIs: ${api_spec_diff}\n"
check_approval 1 6888866 43953930 check_approval 1 6888866 43953930
fi fi
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册