未验证 提交 090a331d 编写于 作者: W wangchaochaohu 提交者: GitHub

refine the full_like Op for API 2.0 test=develop (#25294)

上级 fed05885
...@@ -97,185 +97,5 @@ class TestFillAnyLikeOpFloat16(TestFillAnyLikeOp): ...@@ -97,185 +97,5 @@ class TestFillAnyLikeOpFloat16(TestFillAnyLikeOp):
self.dtype = np.float16 self.dtype = np.float16
class TestFillAnyLikeOp_attr_out(unittest.TestCase):
""" Test fill_any_like op(whose API is full_like) for attr out. """
def test_attr_tensor_API(self):
startup_program = fluid.Program()
train_program = fluid.Program()
with fluid.program_guard(train_program, startup_program):
fill_value = 2.0
input = fluid.data(name='input', dtype='float32', shape=[2, 3])
output = paddle.full_like(input, fill_value)
output_dtype = paddle.full_like(input, fill_value, dtype='float32')
place = fluid.CPUPlace()
if fluid.core.is_compiled_with_cuda():
place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
exe.run(startup_program)
img = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)
res = exe.run(train_program,
feed={'input': img},
fetch_list=[output])
out_np = np.array(res[0])
self.assertTrue(
not (out_np - np.full_like(img, fill_value)).any(),
msg="full_like output is wrong, out = " + str(out_np))
class TestFillAnyLikeOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
#for ci coverage
input_data = fluid.data(name='input', dtype='float32', shape=[2, 3])
output = paddle.full_like(input_data, 2.0)
def test_input_dtype():
paddle.full_like
self.assertRaises(
ValueError,
paddle.full_like,
input=input_data,
fill_value=2,
dtype='uint4')
self.assertRaises(
TypeError,
paddle.full_like,
input=input_data,
fill_value=2,
dtype='int16')
class ApiOnesLikeTest(unittest.TestCase):
def test_out(self):
with fluid.program_guard(fluid.Program()):
data = fluid.data(shape=[10], dtype="float64", name="data")
ones = paddle.ones_like(data, device="cpu")
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result, = exe.run(feed={"data": np.random.rand(10)},
fetch_list=[ones])
expected_result = np.ones(10, dtype="float64")
self.assertEqual((result == expected_result).all(), True)
with fluid.program_guard(fluid.Program()):
data = fluid.data(shape=[10], dtype="float64", name="data")
ones = paddle.ones_like(data, device="cpu", dtype="float32")
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result, = exe.run(feed={"data": np.random.rand(10)},
fetch_list=[ones])
expected_result = np.ones(10, dtype="float32")
self.assertEqual((result == expected_result).all(), True)
with fluid.program_guard(fluid.Program()):
data = fluid.data(shape=[10], dtype="float64", name="data")
ones = paddle.ones_like(data)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result, = exe.run(feed={"data": np.random.rand(10)},
fetch_list=[ones])
expected_result = np.ones(10, dtype="float32")
self.assertEqual((result == expected_result).all(), True)
class ApiZerosLikeTest(unittest.TestCase):
def test_out(self):
with fluid.program_guard(fluid.Program()):
data = fluid.data(shape=[10], dtype="float64", name="data")
zeros = paddle.zeros_like(data, device="cpu")
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result, = exe.run(feed={"data": np.random.rand(10)},
fetch_list=[zeros])
expected_result = np.zeros(10, dtype="float64")
self.assertEqual((result == expected_result).all(), True)
with fluid.program_guard(fluid.Program()):
data = fluid.data(shape=[10], dtype="float64", name="data")
zeros = paddle.zeros_like(data, device="cpu", dtype="float32")
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result, = exe.run(feed={"data": np.random.rand(10)},
fetch_list=[zeros])
expected_result = np.zeros(10, dtype="float32")
self.assertEqual((result == expected_result).all(), True)
with fluid.program_guard(fluid.Program()):
data = fluid.data(shape=[10], dtype="float64", name="data")
zeros = paddle.zeros_like(data)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result, = exe.run(feed={"data": np.random.rand(10)},
fetch_list=[zeros])
expected_result = np.zeros(10, dtype="float32")
self.assertEqual((result == expected_result).all(), True)
class TestOnesZerosError(unittest.TestCase):
def test_errors(self):
def test_device_error1():
with fluid.program_guard(fluid.Program(), fluid.Program()):
data = fluid.data(name="data", shape=[10], dtype="float32")
paddle.ones_like(data, device="opu")
self.assertRaises(ValueError, test_device_error1)
def test_device_error2():
with fluid.program_guard(fluid.Program(), fluid.Program()):
data = fluid.data(name="data", shape=[10], dtype="float32")
paddle.ones_like(data, dtype="float")
self.assertRaises(ValueError, test_device_error2)
def test_device_error3():
with fluid.program_guard(fluid.Program(), fluid.Program()):
data = fluid.data(name="data", shape=[10], dtype="float32")
paddle.zeros_like(data, device="opu")
self.assertRaises(ValueError, test_device_error3)
def test_device_error4():
with fluid.program_guard(fluid.Program(), fluid.Program()):
data = fluid.data(name="data", shape=[10], dtype="float32")
paddle.zeros_like(data, dtype="float")
self.assertRaises(ValueError, test_device_error4)
def test_ones_like_type_error():
with fluid.program_guard(fluid.Program(), fluid.Program()):
fluid.layers.ones_like([10], dtype="float")
self.assertRaises(TypeError, test_ones_like_type_error)
def test_ones_like_dtype_error():
with fluid.program_guard(fluid.Program(), fluid.Program()):
data = fluid.data(name="data", shape=[10], dtype="float16")
fluid.layers.ones_like(data, dtype="float32")
self.assertRaises(TypeError, test_ones_like_dtype_error)
def test_ones_like_out_type_error():
with fluid.program_guard(fluid.Program(), fluid.Program()):
data = fluid.data(name="data", shape=[10], dtype="float32")
fluid.layers.ones_like(data, dtype="float32", out=[10])
self.assertRaises(TypeError, test_ones_like_out_type_error)
def test_ones_like_out_dtype_error():
with fluid.program_guard(fluid.Program(), fluid.Program()):
data = fluid.data(name="data", shape=[10], dtype="float32")
out = fluid.data(name="out", shape=[10], dtype="float16")
fluid.layers.ones_like(data, dtype="float32", out=out)
self.assertRaises(TypeError, test_ones_like_out_dtype_error)
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import paddle
import paddle.fluid.core as core
from paddle import Program, program_guard
import paddle.compat as cpt
import unittest
import numpy as np
from op_test import OpTest
class TestFullOp(unittest.TestCase):
""" Test fill_any_like op(whose API is full_like) for attr out. """
def test_attr_tensor_API(self):
startup_program = Program()
train_program = Program()
with program_guard(train_program, startup_program):
fill_value = 2.0
input = paddle.data(name='input', dtype='float32', shape=[2, 3])
output = paddle.full_like(input, fill_value)
output_dtype = paddle.full_like(input, fill_value, dtype='float32')
place = paddle.CPUPlace()
if core.is_compiled_with_cuda():
place = paddle.CUDAPlace(0)
exe = paddle.Executor(place)
exe.run(startup_program)
img = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)
res = exe.run(train_program,
feed={'input': img},
fetch_list=[output])
out_np = np.array(res[0])
self.assertTrue(
not (out_np - np.full_like(img, fill_value)).any(),
msg="full_like output is wrong, out = " + str(out_np))
def test_full_like_imperative(self):
with paddle.imperative.guard():
input = paddle.arange(6, 10, dtype='float32')
out = paddle.full_like(input, fill_value=888.88, dtype='float32')
out_numpy = np.random.random((4)).astype("float32")
out_numpy.fill(888.88)
self.assertTrue((out.numpy() == out_numpy).all(), True)
class TestFullOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
#for ci coverage
input_data = paddle.data(
name='input', dtype='float32', shape=[2, 3])
output = paddle.full_like(input_data, 2.0)
def test_input_dtype():
paddle.full_like
self.assertRaises(
TypeError,
paddle.full_like,
x=input_data,
fill_value=2,
dtype='uint4')
self.assertRaises(
TypeError,
paddle.full_like,
x=input_data,
fill_value=2,
dtype='int16')
if __name__ == "__main__":
unittest.main()
...@@ -54,13 +54,7 @@ __all__ = [ ...@@ -54,13 +54,7 @@ __all__ = [
] ]
def full_like(input, def full_like(x, fill_value, dtype=None, name=None):
fill_value,
out=None,
dtype=None,
device=None,
stop_gradient=True,
name=None):
""" """
:alias_main: paddle.full_like :alias_main: paddle.full_like
:alias: paddle.full_like,paddle.tensor.full_like,paddle.tensor.creation.full_like :alias: paddle.full_like,paddle.tensor.full_like,paddle.tensor.creation.full_like
...@@ -70,12 +64,12 @@ def full_like(input, ...@@ -70,12 +64,12 @@ def full_like(input,
with `input`. with `input`.
Args: Args:
input(Variable): The input tensor which specifies shape and data type. The data type can be bool, float16, float32, float64, int32, int64. x(Variable): The input tensor which specifies shape and data type. The data type can be bool, float16, float32, float64, int32, int64.
fill_value(bool|float|int): The value to fill the tensor with. Default value is 0. Note: this value shouldn't exceed the range of the output data type. fill_value(bool|float|int|Variable): The value to fill the tensor with. Default value is 0.
out(Variable, optional): Optional output which can be any created Variable that meets the requirements to store the result of operation. If out is None, a new Varibale will be create to store the result. Default value is None. Note: this value shouldn't exceed the range of the output data type.
dtype(np.dtype|core.VarDesc.VarType|str, optional): The data type of output. The default value is None, which means the output data type is the same as input. dtype(np.dtype|core.VarDesc.VarType|str, optional): The data type of output. The data type can be one
device (string, optional): Which device to run the operator. The :attr:`device` must be None, 'cpu', 'gpu'. If :attr:`device` is None, it will be the device that the user set in the paddle program. Default value is None. of bool, float16, float32, float64, int32, int64. The default value is None, which means the output
stop_gradient(bool, optional): Indicating if we stop gradient from current(out) Variable. Default value is True. data type is the same as input.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns: Returns:
...@@ -85,38 +79,35 @@ def full_like(input, ...@@ -85,38 +79,35 @@ def full_like(input,
.. code-block:: python .. code-block:: python
import paddle import paddle
import paddle.fluid as fluid
import numpy as np import numpy as np
input = fluid.data(name='input', dtype='float32', shape=[2, 3])
paddle.enable_imperative() # Now we are in imperative mode
input = paddle.full(shape=[2, 3], fill_value=0.0, dtype='float32', name='input')
output = paddle.full_like(input, 2.0) output = paddle.full_like(input, 2.0)
exe = fluid.Executor(fluid.CPUPlace()) #output result : [array([[2., 2., 2.], [2., 2., 2.]], dtype=float32)]
exe.run(fluid.default_startup_program())
img=np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)
res = exe.run(fluid.default_main_program(), feed={'input':img}, fetch_list=[output])
print(res) # [array([[2., 2., 2.], [2., 2., 2.]], dtype=float32)]
""" """
helper = LayerHelper("full_like", **locals())
var_dtype = None
if dtype is None: if dtype is None:
var_dtype = input.dtype dtype = x.dtype
else: else:
check_dtype( if not isinstance(dtype, core.VarDesc.VarType):
dtype, 'dtype', dtype = convert_np_dtype_to_dtype_(dtype)
['bool', 'float16', 'float32', 'float64', 'int32', 'int64'],
'full_like')
var_dtype = convert_np_dtype_to_dtype_(dtype)
if out is None: if in_dygraph_mode():
out = helper.create_variable_for_type_inference(dtype=dtype) return core.ops.fill_any_like(x, 'value', fill_value, 'dtype', dtype)
helper = LayerHelper("full_like", **locals())
check_dtype(dtype, 'dtype',
['bool', 'float16', 'float32', 'float64', 'int32', 'int64'],
'full_like')
out = helper.create_variable_for_type_inference(dtype=dtype)
helper.append_op( helper.append_op(
type='fill_any_like', type='fill_any_like',
inputs={'X': [input]}, inputs={'X': [x]},
attrs={'value': fill_value, attrs={'value': fill_value,
"dtype": var_dtype}, "dtype": dtype},
outputs={'Out': [out]}) outputs={'Out': [out]})
out.stop_gradient = stop_gradient
return out return out
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册