未验证 提交 5f2c0e7e 编写于 作者: L lilong12 提交者: GitHub

[2.0 api] fix the example codes in doc strings (#26428)

* fix the example codes, test=develop
上级 ecfb89e1
......@@ -93,8 +93,19 @@ class TestExpandAsOpRank4(OpTest):
self.check_grad(['X'], 'Out')
class TestExpandAsV2Error(unittest.TestCase):
def test_errors(self):
with fluid.program_guard(fluid.Program(), fluid.Program()):
x1 = fluid.layers.data(name='x1', shape=[4], dtype="uint8")
x2 = fluid.layers.data(name='x2', shape=[4], dtype="int32")
self.assertRaises(TypeError, paddle.tensor.expand_as, x1, x2)
x3 = fluid.layers.data(name='x3', shape=[4], dtype="bool")
x3.stop_gradient = False
self.assertRaises(ValueError, paddle.tensor.expand_as, x3, x2)
# Test python API
class TestExpandAPI(unittest.TestCase):
class TestExpandAsV2API(unittest.TestCase):
def test_api(self):
input1 = np.random.random([12, 14]).astype("float32")
input2 = np.random.random([2, 12, 14]).astype("float32")
......
......@@ -215,6 +215,16 @@ class TestTileError(unittest.TestCase):
self.assertRaises(ValueError, paddle.tile, x3, repeat_times)
class TestTileAPIStatic(unittest.TestCase):
def test_api(self):
with program_guard(Program(), Program()):
repeat_times = [2, 2]
x1 = fluid.layers.data(name='x1', shape=[4], dtype="int32")
out = paddle.tile(x1, repeat_times)
positive_2 = fluid.layers.fill_constant([1], dtype="int32", value=2)
out2 = paddle.tile(x1, repeat_times=[positive_2, 2])
# Test python API
class TestTileAPI(unittest.TestCase):
def test_api(self):
......
......@@ -841,7 +841,7 @@ def tile(x, repeat_times, name=None):
"""
Construct a new Tensor by repeating ``x`` the number of times given by ``repeat_times``.
After tiling, the number of elements of the i'th dimension of the output is equal to ``x.dims[i] * repeat_times[i]``.
After tiling, the value of the i'th dimension of the output is equal to ``x.shape[i]*repeat_times[i]``.
Both the number of dimensions of ``x`` and the number of elements in ``repeat_times`` should be less than or equal to 6.
......@@ -862,9 +862,9 @@ def tile(x, repeat_times, name=None):
paddle.disable_static()
np_data = np.array([1, 2, 3]).astype('int32')
data = paddle.to_variable(np_data)
data = paddle.to_tensor(np_data)
out = paddle.tile(data, repeat_times=[2, 1])
np_out = out1.numpy()
np_out = out.numpy()
# [[1, 2, 3], [1, 2, 3]]
out = paddle.tile(data, repeat_times=[2, 2])
......@@ -872,7 +872,7 @@ def tile(x, repeat_times, name=None):
# [[1, 2, 3, 1, 2, 3], [1, 2, 3, 1, 2, 3]]
np_repeat_times = np.array([2, 1]).astype("int32")
repeat_times = paddle.to_variable(np_repeat_times)
repeat_times = paddle.to_tensor(np_repeat_times)
out = paddle.tile(data, repeat_times=repeat_times)
np_out = out.numpy()
# [[1, 2, 3], [1, 2, 3]]
......@@ -884,9 +884,12 @@ def tile(x, repeat_times, name=None):
raise ValueError(
"When the date type is bool for the input 'x' of tile op, you "
"must set its stop_gradient to be True by "
"some_var.stop_gradient == True supporting some_var as the input.")
"some_var.stop_gradient == True supporting some_var is the input.")
if in_dygraph_mode():
return core.ops.tile(x, 'repeat_times', repeat_times)
helper = LayerHelper('tile', input=x, **locals())
helper = LayerHelper('tile', **locals())
inputs = {"X": [x]}
attrs = {}
......@@ -928,7 +931,7 @@ def expand_as(x, y, name=None):
Args:
x (Tensor): The input tensor, its data type is bool, float32, float64, int32 or int64.
y (Tensor): The input tensor gives the shape that ``x`` to expand to.
y (Tensor): The input tensor that gives the shape to expand to.
name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
......@@ -942,10 +945,10 @@ def expand_as(x, y, name=None):
paddle.disable_static()
np_data_x = np.array([1, 2, 3]).astype=('int32)
np_data_y = np.array([[1, 2, 3], [4, 5, 6]]).astype=('int32)
data_x = paddle.to_variable(np_data_x)
data_y = paddle.to_variable(np_data_y)
np_data_x = np.array([1, 2, 3]).astype('int32')
np_data_y = np.array([[1, 2, 3], [4, 5, 6]]).astype('int32')
data_x = paddle.to_tensor(np_data_x)
data_y = paddle.to_tensor(np_data_y)
out = paddle.expand_as(data_x, data_y)
np_out = out.numpy()
# [[1, 2, 3], [1, 2, 3]]
......@@ -962,7 +965,10 @@ def expand_as(x, y, name=None):
"some_var as the input 'x'.")
inputs = {"X": [x], "target_tensor": [y]}
helper = LayerHelper('expand_as', input=x, **locals())
if in_dygraph_mode():
return core.ops.expand_as_v2(x, y)
helper = LayerHelper('expand_as', **locals())
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(type='expand_as_v2', inputs=inputs, outputs={'Out': out})
......@@ -994,16 +1000,16 @@ def expand(x, shape, name=None):
import paddle
paddle.disable_static()
np_data = np.array([1, 2, 3]).astype=('int32)
data = paddle.to_variable(np_data)
np_data = np.array([1, 2, 3]).astype('int32')
data = paddle.to_tensor(np_data)
out = paddle.expand(data, shape=[2, 3])
out = out.numpy()
# [[1, 2, 3], [1, 2, 3]]
np_shape = np.array([2, 3]).astype=('int32)
shape = paddle.to_variable(np_shape)
np_shape = np.array([2, 3]).astype('int32')
shape = paddle.to_tensor(np_shape)
out = paddle.expand(data, shape=shape)
out = out.numpy
out = out.numpy()
# [[1, 2, 3], [1, 2, 3]]
"""
check_variable_and_dtype(
......@@ -1018,7 +1024,10 @@ def expand(x, shape, name=None):
"some_var.stop_gradient = True, supporting "
"some_var as the input.")
helper = LayerHelper('expand', input=x, **locals())
if in_dygraph_mode():
return core.ops.expand_v2(x, 'shape', shape)
helper = LayerHelper('expand', **locals())
def get_attr_expand_shape(list_expand_shape):
attrs_expand_shape = []
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册