未验证 提交 89f024e3 编写于 作者: 2 201716010711 提交者: GitHub

delete shape api (#48546)

上级 0ebace14
...@@ -180,7 +180,7 @@ class Normal(distribution.Distribution): ...@@ -180,7 +180,7 @@ class Normal(distribution.Distribution):
self.loc + self.scale, batch_shape + shape, self.dtype, 0.0 self.loc + self.scale, batch_shape + shape, self.dtype, 0.0
) )
zero_tmp_reshape = paddle.reshape(zero_tmp, output_shape) zero_tmp_reshape = paddle.reshape(zero_tmp, output_shape)
zero_tmp_shape = nn.shape(zero_tmp_reshape) zero_tmp_shape = paddle.shape(zero_tmp_reshape)
normal_random_tmp = nn.gaussian_random( normal_random_tmp = nn.gaussian_random(
zero_tmp_shape, mean=0.0, std=1.0, seed=seed, dtype=self.dtype zero_tmp_shape, mean=0.0, std=1.0, seed=seed, dtype=self.dtype
) )
......
...@@ -330,7 +330,7 @@ def basic_gru( ...@@ -330,7 +330,7 @@ def basic_gru(
mask = None mask = None
if sequence_length: if sequence_length:
max_seq_len = layers.shape(input)[0] max_seq_len = paddle.shape(input)[0]
mask = layers.sequence_mask( mask = layers.sequence_mask(
sequence_length, maxlen=max_seq_len, dtype='float32' sequence_length, maxlen=max_seq_len, dtype='float32'
) )
...@@ -614,7 +614,7 @@ def basic_lstm( ...@@ -614,7 +614,7 @@ def basic_lstm(
mask = None mask = None
if sequence_length: if sequence_length:
max_seq_len = layers.shape(input)[0] max_seq_len = paddle.shape(input)[0]
mask = layers.sequence_mask( mask = layers.sequence_mask(
sequence_length, maxlen=max_seq_len, dtype='float32' sequence_length, maxlen=max_seq_len, dtype='float32'
) )
......
...@@ -1588,7 +1588,7 @@ def ssd_loss( ...@@ -1588,7 +1588,7 @@ def ssd_loss(
raise ValueError("Only support mining_type == max_negative now.") raise ValueError("Only support mining_type == max_negative now.")
num, num_prior, num_class = confidence.shape num, num_prior, num_class = confidence.shape
conf_shape = nn.shape(confidence) conf_shape = paddle.shape(confidence)
def __reshape_to_2d(var): def __reshape_to_2d(var):
out = paddle.flatten(var, 2, -1) out = paddle.flatten(var, 2, -1)
......
...@@ -97,7 +97,6 @@ __all__ = [ ...@@ -97,7 +97,6 @@ __all__ = [
'elementwise_mul', 'elementwise_mul',
'gaussian_random', 'gaussian_random',
'sampling_id', 'sampling_id',
'shape',
'clip', 'clip',
'clip_by_norm', 'clip_by_norm',
'mean', 'mean',
...@@ -5010,95 +5009,6 @@ def sampling_id(x, min=0.0, max=1.0, seed=0, dtype='float32'): ...@@ -5010,95 +5009,6 @@ def sampling_id(x, min=0.0, max=1.0, seed=0, dtype='float32'):
return out return out
def shape(input):
"""
:alias_main: paddle.shape
:alias: paddle.shape,paddle.tensor.shape,paddle.tensor.attribute.shape
:old_api: paddle.fluid.layers.shape
**Shape Layer**
Get the shape of the input.
.. code-block:: text
Case1:
Given N-D Tensor:
input = [ [1, 2, 3, 4], [5, 6, 7, 8] ]
Then:
input.shape = [2, 4]
Case2:
Given SelectedRows:
input.rows = [0, 4, 19]
input.height = 20
input.value = [ [1, 2], [3, 4], [5, 6] ] # inner tensor
Then:
input.shape = [3, 2]
Args:
input (Variable): The input can be N-D Tensor or SelectedRows with data type bool, float16, float32, float64, int32, int64.
If input variable is type of SelectedRows, returns the shape of it's inner tensor.
Returns:
Variable (Tensor): The shape of the input variable.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
import paddle
paddle.enable_static()
inputs = fluid.data(name="x", shape=[3, 100, 100], dtype="float32")
output = fluid.layers.shape(inputs)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
img = np.ones((3, 100, 100)).astype(np.float32)
res = exe.run(fluid.default_main_program(), feed={'x':img}, fetch_list=[output])
print(res) # [array([ 3, 100, 100], dtype=int32)]
"""
if in_dygraph_mode():
out = _C_ops.shape(input)
out.stop_gradient = True
return out
if _in_legacy_dygraph():
out = _legacy_C_ops.shape(input)
out.stop_gradient = True
return out
check_variable_and_dtype(
input,
'input',
[
'bool',
'float16',
'float32',
'float64',
'int32',
'int64',
'complex64',
'complex128',
],
'shape',
)
helper = LayerHelper('shape', **locals())
out = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type='shape',
inputs={'Input': input},
outputs={'Out': out},
stop_gradient=True,
)
return out
def _elementwise_op(helper): def _elementwise_op(helper):
op_type = helper.layer_type op_type = helper.layer_type
x = helper.kwargs.get('x', None) x = helper.kwargs.get('x', None)
......
...@@ -673,7 +673,7 @@ def _rnn_static_graph( ...@@ -673,7 +673,7 @@ def _rnn_static_graph(
inputs = map_structure(_transpose_batch_time, inputs) inputs = map_structure(_transpose_batch_time, inputs)
if sequence_length: if sequence_length:
max_seq_len = nn.shape(flatten(inputs)[0])[0] max_seq_len = paddle.shape(flatten(inputs)[0])[0]
mask = sequence_lod.sequence_mask( mask = sequence_lod.sequence_mask(
sequence_length, sequence_length,
maxlen=max_seq_len, maxlen=max_seq_len,
...@@ -1215,7 +1215,7 @@ class BeamSearchDecoder(Decoder): ...@@ -1215,7 +1215,7 @@ class BeamSearchDecoder(Decoder):
""" """
self.kinf = 1e9 self.kinf = 1e9
state = flatten(initial_cell_states)[0] state = flatten(initial_cell_states)[0]
self.batch_size = nn.shape(state)[0] self.batch_size = paddle.shape(state)[0]
self.start_token_tensor = tensor.fill_constant( self.start_token_tensor = tensor.fill_constant(
shape=[1], dtype="int64", value=self.start_token shape=[1], dtype="int64", value=self.start_token
......
...@@ -151,7 +151,7 @@ def nested_if_else(x_v): ...@@ -151,7 +151,7 @@ def nested_if_else(x_v):
# `x_v.shape[0]` is not Tensor, and `batch_size` is the return value of `true_fn` after transformed. # `x_v.shape[0]` is not Tensor, and `batch_size` is the return value of `true_fn` after transformed.
# col = -1 # col = -1
# batch_size = x_v.shape[0] # batch_size = x_v.shape[0]
batch_size = fluid.layers.shape(x_v)[0] batch_size = paddle.shape(x_v)[0]
# if tensor.shape is [1], now support to compare with numpy. # if tensor.shape is [1], now support to compare with numpy.
if paddle.mean(x_v).numpy() < 0: if paddle.mean(x_v).numpy() < 0:
...@@ -180,7 +180,7 @@ def nested_if_else_2(x): ...@@ -180,7 +180,7 @@ def nested_if_else_2(x):
z = y z = y
x_shape_0 = x.shape[0] x_shape_0 = x.shape[0]
if x_shape_0 < 1: if x_shape_0 < 1:
if fluid.layers.shape(y).numpy()[0] < 1: if paddle.shape(y).numpy()[0] < 1:
res = fluid.layers.fill_constant( res = fluid.layers.fill_constant(
value=2, shape=x.shape, dtype="int32" value=2, shape=x.shape, dtype="int32"
) )
...@@ -212,7 +212,7 @@ def nested_if_else_3(x): ...@@ -212,7 +212,7 @@ def nested_if_else_3(x):
else: else:
out = x - 1 out = x - 1
else: else:
y_shape = fluid.layers.shape(y) y_shape = paddle.shape(y)
if y_shape.numpy()[0] < 1: if y_shape.numpy()[0] < 1:
res = fluid.layers.fill_constant( res = fluid.layers.fill_constant(
value=2, shape=x.shape, dtype="int32" value=2, shape=x.shape, dtype="int32"
...@@ -290,7 +290,7 @@ class NetWithControlFlowIf(fluid.dygraph.Layer): ...@@ -290,7 +290,7 @@ class NetWithControlFlowIf(fluid.dygraph.Layer):
def if_with_and_or(x_v, label=None): def if_with_and_or(x_v, label=None):
batch_size = fluid.layers.shape(x_v) batch_size = paddle.shape(x_v)
if ( if (
x_v is not None x_v is not None
and (paddle.mean(x_v).numpy()[0] > 0 or label is not None) and (paddle.mean(x_v).numpy()[0] > 0 or label is not None)
...@@ -308,7 +308,7 @@ def if_with_and_or(x_v, label=None): ...@@ -308,7 +308,7 @@ def if_with_and_or(x_v, label=None):
def if_with_and_or_1(x, y=None): def if_with_and_or_1(x, y=None):
batch_size = fluid.layers.shape(x) batch_size = paddle.shape(x)
if batch_size[0] > 1 and y is not None: if batch_size[0] > 1 and y is not None:
x = x + 1 x = x + 1
if y is not None or batch_size[0] > 1: if y is not None or batch_size[0] > 1:
...@@ -317,7 +317,7 @@ def if_with_and_or_1(x, y=None): ...@@ -317,7 +317,7 @@ def if_with_and_or_1(x, y=None):
def if_with_and_or_2(x, y=None): def if_with_and_or_2(x, y=None):
batch_size = fluid.layers.shape(x) batch_size = paddle.shape(x)
if x is not None and batch_size[0] > 1 and y is not None: if x is not None and batch_size[0] > 1 and y is not None:
x = x + 1 x = x + 1
if batch_size[0] > 1 or y is not None or x is not None: if batch_size[0] > 1 or y is not None or x is not None:
...@@ -326,7 +326,7 @@ def if_with_and_or_2(x, y=None): ...@@ -326,7 +326,7 @@ def if_with_and_or_2(x, y=None):
def if_with_and_or_3(x, y=None): def if_with_and_or_3(x, y=None):
batch_size = fluid.layers.shape(x) batch_size = paddle.shape(x)
mean_res = paddle.mean(x) mean_res = paddle.mean(x)
if ( if (
x is not None x is not None
...@@ -341,7 +341,7 @@ def if_with_and_or_3(x, y=None): ...@@ -341,7 +341,7 @@ def if_with_and_or_3(x, y=None):
def if_with_and_or_4(x, y=None): def if_with_and_or_4(x, y=None):
batch_size = fluid.layers.shape(x) batch_size = paddle.shape(x)
mean_res = paddle.mean(x) mean_res = paddle.mean(x)
if (x is not None and batch_size[0] > 1) or ( if (x is not None and batch_size[0] > 1) or (
y is not None and mean_res.numpy()[0] > 0 y is not None and mean_res.numpy()[0] > 0
...@@ -361,7 +361,7 @@ def if_with_class_var(x, y=None): ...@@ -361,7 +361,7 @@ def if_with_class_var(x, y=None):
self.b = 2 self.b = 2
foo = Foo() foo = Foo()
batch_size = fluid.layers.shape(x) batch_size = paddle.shape(x)
mean_res = paddle.mean(x) mean_res = paddle.mean(x)
if batch_size[0] > foo.a: if batch_size[0] > foo.a:
......
...@@ -297,8 +297,8 @@ class BaseModel(fluid.dygraph.Layer): ...@@ -297,8 +297,8 @@ class BaseModel(fluid.dygraph.Layer):
loss = paddle.nn.functional.softmax_with_cross_entropy( loss = paddle.nn.functional.softmax_with_cross_entropy(
logits=dec_output, label=label, soft_label=False logits=dec_output, label=label, soft_label=False
) )
loss = paddle.squeeze(loss, axis=[2]) loss = paddle.squeeze(loss, axes=[2])
max_tar_seq_len = fluid.layers.shape(tar)[1] max_tar_seq_len = paddle.shape(tar)[1]
tar_mask = fluid.layers.sequence_mask( tar_mask = fluid.layers.sequence_mask(
tar_sequence_length, maxlen=max_tar_seq_len, dtype='float32' tar_sequence_length, maxlen=max_tar_seq_len, dtype='float32'
) )
...@@ -833,8 +833,8 @@ class AttentionModel(fluid.dygraph.Layer): ...@@ -833,8 +833,8 @@ class AttentionModel(fluid.dygraph.Layer):
loss = paddle.nn.functional.softmax_with_cross_entropy( loss = paddle.nn.functional.softmax_with_cross_entropy(
logits=dec_output, label=label, soft_label=False logits=dec_output, label=label, soft_label=False
) )
loss = paddle.squeeze(loss, axis=[2]) loss = paddle.squeeze(loss, axes=[2])
max_tar_seq_len = fluid.layers.shape(tar)[1] max_tar_seq_len = paddle.shape(tar)[1]
tar_mask = fluid.layers.sequence_mask( tar_mask = fluid.layers.sequence_mask(
tar_sequence_length, maxlen=max_tar_seq_len, dtype='float32' tar_sequence_length, maxlen=max_tar_seq_len, dtype='float32'
) )
......
...@@ -210,7 +210,7 @@ class ConstantLayer: ...@@ -210,7 +210,7 @@ class ConstantLayer:
operation operation
""" """
shape = list(shape) shape = list(shape)
input_shape = fluid.layers.shape(input) input_shape = paddle.shape(input)
shape[0] = input_shape[0] shape[0] = input_shape[0]
constant = fluid.layers.fill_constant(shape, dtype, value) constant = fluid.layers.fill_constant(shape, dtype, value)
return constant return constant
......
...@@ -321,9 +321,7 @@ def bmn_loss_func( ...@@ -321,9 +321,7 @@ def bmn_loss_func(
gt_label = paddle.reshape(x=gt_label, shape=[-1]) gt_label = paddle.reshape(x=gt_label, shape=[-1])
gt_label.stop_gradient = True gt_label.stop_gradient = True
pmask = fluid.layers.cast(x=(gt_label > 0.5), dtype=DATATYPE) pmask = fluid.layers.cast(x=(gt_label > 0.5), dtype=DATATYPE)
num_entries = fluid.layers.cast( num_entries = fluid.layers.cast(paddle.shape(pmask), dtype=DATATYPE)
fluid.layers.shape(pmask), dtype=DATATYPE
)
num_positive = fluid.layers.cast(paddle.sum(pmask), dtype=DATATYPE) num_positive = fluid.layers.cast(paddle.sum(pmask), dtype=DATATYPE)
ratio = num_entries / num_positive ratio = num_entries / num_positive
coef_0 = 0.5 * ratio / (ratio - 1) coef_0 = 0.5 * ratio / (ratio - 1)
......
...@@ -97,8 +97,8 @@ class MainNetWithDict(fluid.dygraph.Layer): ...@@ -97,8 +97,8 @@ class MainNetWithDict(fluid.dygraph.Layer):
), ),
} }
# TODO(Aurelius84): The following code will be converted into: # TODO(Aurelius84): The following code will be converted into:
# max_len = layers.cond(layers.shape(input)[0] != max_len, # max_len = layers.cond(paddle.shape(input)[0] != max_len,
# lambda: layers.shape(input)[0], lambda: max_len) # lambda: paddle.shape(input)[0], lambda: max_len)
# But max_len should be wrapped into tensor, which is not supported. # But max_len should be wrapped into tensor, which is not supported.
# Comment out this line of code for now. # Comment out this line of code for now.
......
...@@ -79,7 +79,7 @@ class DynamicGRU(fluid.dygraph.Layer): ...@@ -79,7 +79,7 @@ class DynamicGRU(fluid.dygraph.Layer):
res = [] res = []
for i in range(inputs.shape[1]): for i in range(inputs.shape[1]):
if self.is_reverse: if self.is_reverse:
j = fluid.layers.shape(inputs)[1] - 1 - i j = paddle.shape(inputs)[1] - 1 - i
else: else:
j = i j = i
......
...@@ -38,7 +38,7 @@ def dyfunc_tensor_shape_2(x): ...@@ -38,7 +38,7 @@ def dyfunc_tensor_shape_2(x):
def dyfunc_tensor_shape_3(x): def dyfunc_tensor_shape_3(x):
# Transform y.shape but run y.shape actually because y is not Tensor # Transform y.shape but run y.shape actually because y is not Tensor
x = fluid.dygraph.to_variable(x) x = fluid.dygraph.to_variable(x)
y = np.ones(5) y = paddle.ones([1, 5])
res = paddle.reshape(x, shape=y.shape) res = paddle.reshape(x, shape=y.shape)
return res return res
...@@ -97,7 +97,7 @@ def dyfunc_paddle_shape_api(x): ...@@ -97,7 +97,7 @@ def dyfunc_paddle_shape_api(x):
a = paddle.shape(x)[0] a = paddle.shape(x)[0]
# alias api will also not be converted. # alias api will also not be converted.
alias_old_api = paddle.fluid.layers alias_old_api = paddle.fluid.layers
b = alias_old_api.shape(x)[1] b = paddle.shape(x)[1]
res = paddle.reshape(x, shape=(b, a)) res = paddle.reshape(x, shape=(b, a))
return res return res
...@@ -199,7 +199,7 @@ def dyfunc_with_while_3(x): ...@@ -199,7 +199,7 @@ def dyfunc_with_while_3(x):
def dyfunc_with_while_4(x): def dyfunc_with_while_4(x):
x = paddle.to_tensor(x) x = paddle.to_tensor(x)
y = np.ones(5) y = paddle.ones([1, 5])
y_shape_0 = y.shape[0] y_shape_0 = y.shape[0]
i = 1 i = 1
...@@ -309,6 +309,11 @@ class TestTensorShapeBasic3(TestTensorShapeBasic): ...@@ -309,6 +309,11 @@ class TestTensorShapeBasic3(TestTensorShapeBasic):
def init_test_func(self): def init_test_func(self):
self.dygraph_func = dyfunc_tensor_shape_3 self.dygraph_func = dyfunc_tensor_shape_3
def _set_expected_op_num(self):
self.expected_op_num = 3
self.expected_shape_op_num = 0
self.expected_slice_op_num = 0
class TestTensorShapeBasic4(TestTensorShapeBasic): class TestTensorShapeBasic4(TestTensorShapeBasic):
def init_test_func(self): def init_test_func(self):
...@@ -475,7 +480,7 @@ class TestTensorShapeInWhile4(TestTensorShapeBasic): ...@@ -475,7 +480,7 @@ class TestTensorShapeInWhile4(TestTensorShapeBasic):
self.dygraph_func = dyfunc_with_while_4 self.dygraph_func = dyfunc_with_while_4
def _set_expected_op_num(self): def _set_expected_op_num(self):
self.expected_op_num = 4 self.expected_op_num = 1
self.expected_shape_op_num = 0 self.expected_shape_op_num = 0
self.expected_slice_op_num = 0 self.expected_slice_op_num = 0
......
...@@ -203,7 +203,7 @@ class Upsample(fluid.dygraph.Layer): ...@@ -203,7 +203,7 @@ class Upsample(fluid.dygraph.Layer):
def forward(self, inputs): def forward(self, inputs):
# get dynamic upsample output shape # get dynamic upsample output shape
shape_nchw = fluid.layers.shape(inputs) shape_nchw = paddle.shape(inputs)
shape_hw = paddle.slice(shape_nchw, axes=[0], starts=[2], ends=[4]) shape_hw = paddle.slice(shape_nchw, axes=[0], starts=[2], ends=[4])
shape_hw.stop_gradient = True shape_hw.stop_gradient = True
in_shape = fluid.layers.cast(shape_hw, dtype='int32') in_shape = fluid.layers.cast(shape_hw, dtype='int32')
......
...@@ -40,7 +40,7 @@ def build_and_run_program(place, batch_size, beam_size, stop_gradient=False): ...@@ -40,7 +40,7 @@ def build_and_run_program(place, batch_size, beam_size, stop_gradient=False):
while_op = layers.While(cond) while_op = layers.While(cond)
scores = layers.array_write(x, step_idx) scores = layers.array_write(x, step_idx)
with while_op.block(): with while_op.block():
bs = layers.cast(layers.shape(x)[0], "int64") bs = layers.cast(paddle.shape(x)[0], "int64")
for _ in range(20): for _ in range(20):
bs = layers.cast(bs, 'int64') bs = layers.cast(bs, 'int64')
bs.stop_gradient = stop_gradient bs.stop_gradient = stop_gradient
......
...@@ -3307,7 +3307,7 @@ class TestBook(LayerTest): ...@@ -3307,7 +3307,7 @@ class TestBook(LayerTest):
input = self._get_data( input = self._get_data(
name="input", shape=[3, 100, 100], dtype="float32" name="input", shape=[3, 100, 100], dtype="float32"
) )
out = layers.shape(input) out = paddle.shape(input)
return out return out
def make_pad2d(self): def make_pad2d(self):
......
...@@ -635,7 +635,7 @@ def def_seq2seq_model( ...@@ -635,7 +635,7 @@ def def_seq2seq_model(
logits=logits, label=label, soft_label=False logits=logits, label=label, soft_label=False
) )
loss = layers.unsqueeze(loss, axes=[2]) loss = layers.unsqueeze(loss, axes=[2])
max_tar_seq_len = layers.shape(target)[1] max_tar_seq_len = paddle.shape(target)[1]
tar_mask = layers.sequence_mask( tar_mask = layers.sequence_mask(
target_length, maxlen=max_tar_seq_len, dtype="float32" target_length, maxlen=max_tar_seq_len, dtype="float32"
) )
......
...@@ -248,7 +248,7 @@ class Seq2SeqModel: ...@@ -248,7 +248,7 @@ class Seq2SeqModel:
), ),
] ]
src_mask = layers.sequence_mask( src_mask = layers.sequence_mask(
src_length, maxlen=layers.shape(src)[1], dtype="float32" src_length, maxlen=paddle.shape(src)[1], dtype="float32"
) )
encoder_padding_mask = (src_mask - 1.0) * 1e9 encoder_padding_mask = (src_mask - 1.0) * 1e9
encoder_padding_mask = layers.unsqueeze(encoder_padding_mask, [1]) encoder_padding_mask = layers.unsqueeze(encoder_padding_mask, [1])
...@@ -400,7 +400,7 @@ class MLE: ...@@ -400,7 +400,7 @@ class MLE:
def learn(self, probs, label, weight=None, length=None): def learn(self, probs, label, weight=None, length=None):
loss = layers.cross_entropy(input=probs, label=label, soft_label=False) loss = layers.cross_entropy(input=probs, label=label, soft_label=False)
max_seq_len = layers.shape(probs)[1] max_seq_len = paddle.shape(probs)[1]
mask = layers.sequence_mask(length, maxlen=max_seq_len, dtype="float32") mask = layers.sequence_mask(length, maxlen=max_seq_len, dtype="float32")
loss = loss * mask loss = loss * mask
loss = paddle.mean(loss, axis=[0]) loss = paddle.mean(loss, axis=[0])
......
...@@ -23,7 +23,7 @@ class StaticShapeInferrenceTest(unittest.TestCase): ...@@ -23,7 +23,7 @@ class StaticShapeInferrenceTest(unittest.TestCase):
data = paddle.fluid.layers.data( data = paddle.fluid.layers.data(
name="x", shape=[-1, 2], dtype='float32' name="x", shape=[-1, 2], dtype='float32'
) )
shape = paddle.fluid.layers.shape(data) # shape should be [-1, 2] shape = paddle.shape(data) # shape should be [-1, 2]
x = paddle.fluid.layers.uniform_random(shape) x = paddle.fluid.layers.uniform_random(shape)
self.assertEqual(x.shape, data.shape) self.assertEqual(x.shape, data.shape)
paddle.disable_static() paddle.disable_static()
......
...@@ -595,7 +595,7 @@ class TestApiWhileLoopSliceInBody(unittest.TestCase): ...@@ -595,7 +595,7 @@ class TestApiWhileLoopSliceInBody(unittest.TestCase):
with program_guard(main_program, startup_program): with program_guard(main_program, startup_program):
x = fluid.layers.data(name='x', shape=[5], dtype='int32') x = fluid.layers.data(name='x', shape=[5], dtype='int32')
z = fluid.layers.fill_constant([1], 'int32', 0) z = fluid.layers.fill_constant([1], 'int32', 0)
x_shape = fluid.layers.shape(x) x_shape = paddle.shape(x)
i = fluid.layers.fill_constant([1], 'int32', 0) i = fluid.layers.fill_constant([1], 'int32', 0)
z, _ = fluid.layers.while_loop(cond, body, [z, i]) z, _ = fluid.layers.while_loop(cond, body, [z, i])
......
...@@ -127,7 +127,7 @@ class DygraphToStaticAst(BaseTransformer): ...@@ -127,7 +127,7 @@ class DygraphToStaticAst(BaseTransformer):
transformers = [ transformers = [
EarlyReturnTransformer, EarlyReturnTransformer,
BasicApiTransformer, # Basic Api BasicApiTransformer, # Basic Api
TensorShapeTransformer, # Tensor.shape -> layers.shape(Tensor) TensorShapeTransformer, # Tensor.shape -> paddle.shape(Tensor)
BreakContinueTransformer, # break/continue in loops BreakContinueTransformer, # break/continue in loops
ReturnTransformer, # return in functions ReturnTransformer, # return in functions
LogicalTransformer, # logical and/or/not LogicalTransformer, # logical and/or/not
......
...@@ -31,7 +31,6 @@ from paddle.fluid.layers import ( ...@@ -31,7 +31,6 @@ from paddle.fluid.layers import (
from paddle.fluid.layers import ( from paddle.fluid.layers import (
cast, cast,
control_flow, control_flow,
nn,
) )
from paddle.fluid.layers.control_flow import ( from paddle.fluid.layers.control_flow import (
cond, cond,
...@@ -524,7 +523,7 @@ def convert_len(var): ...@@ -524,7 +523,7 @@ def convert_len(var):
# so we return a variable dynamically inferred from var.shape. # so we return a variable dynamically inferred from var.shape.
if var.shape[0] > 0 and var.type == core.VarDesc.VarType.LOD_TENSOR: if var.shape[0] > 0 and var.type == core.VarDesc.VarType.LOD_TENSOR:
return var.shape[0] return var.shape[0]
return nn.shape(var)[0] return paddle.shape(var)[0]
elif var.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY: elif var.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY:
return paddle.tensor.array_length(var) return paddle.tensor.array_length(var)
else: else:
...@@ -607,7 +606,7 @@ def convert_shape(x): ...@@ -607,7 +606,7 @@ def convert_shape(x):
if isinstance(x, Variable): if isinstance(x, Variable):
values = list(x.shape) values = list(x.shape)
if has_negative(values): if has_negative(values):
shape_tensor = nn.shape(x) shape_tensor = paddle.shape(x)
for i, v in enumerate(values): for i, v in enumerate(values):
if v is None or v < 0: if v is None or v < 0:
values[i] = shape_tensor[i] values[i] = shape_tensor[i]
......
...@@ -93,7 +93,7 @@ def shape(input): ...@@ -93,7 +93,7 @@ def shape(input):
paddle.enable_static() paddle.enable_static()
inputs = fluid.data(name="x", shape=[3, 100, 100], dtype="float32") inputs = fluid.data(name="x", shape=[3, 100, 100], dtype="float32")
output = fluid.layers.shape(inputs) output = paddle.shape(inputs)
exe = fluid.Executor(fluid.CPUPlace()) exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program()) exe.run(fluid.default_startup_program())
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册