未验证 提交 e75c651d 编写于 作者: K Kevin吴嘉文 提交者: GitHub

Remove reduntant numpy output in Example code (1/3), test=document_fix (#48678)

上级 3a8aac35
...@@ -106,7 +106,7 @@ def all_reduce( ...@@ -106,7 +106,7 @@ def all_reduce(
data = paddle.to_tensor([[1, 2, 3], [1, 2, 3]]) data = paddle.to_tensor([[1, 2, 3], [1, 2, 3]])
task = dist.stream.all_reduce(data, sync_op=False) task = dist.stream.all_reduce(data, sync_op=False)
task.wait() task.wait()
out = data.numpy() out = data
# [[5, 7, 9], [5, 7, 9]] # [[5, 7, 9], [5, 7, 9]]
""" """
if _warn_cur_rank_not_in_group(group): if _warn_cur_rank_not_in_group(group):
......
...@@ -530,26 +530,27 @@ def fftn(x, s=None, axes=None, norm="backward", name=None): ...@@ -530,26 +530,27 @@ def fftn(x, s=None, axes=None, norm="backward", name=None):
x = paddle.meshgrid(arr, arr, arr)[1] x = paddle.meshgrid(arr, arr, arr)[1]
fftn_xp = paddle.fft.fftn(x, axes=(1, 2)) fftn_xp = paddle.fft.fftn(x, axes=(1, 2))
print(fftn_xp.numpy()) print(fftn_xp)
# [[[24.+0.j 0.+0.j 0.+0.j 0.-0.j] # Tensor(shape=[4, 4, 4], dtype=complex128, place=Place(gpu:0), stop_gradient=True,
# [-8.+8.j 0.+0.j 0.+0.j 0.-0.j] # [[[(24+0j), 0j , 0j , -0j ],
# [-8.+0.j 0.+0.j 0.+0.j 0.-0.j] # [(-8+8j), 0j , 0j , -0j ],
# [-8.-8.j 0.+0.j 0.+0.j 0.-0.j]] # [(-8+0j), 0j , 0j , -0j ],
# [(-8-8j), 0j , 0j , -0j ]],
# [[24.+0.j 0.+0.j 0.+0.j 0.-0.j]
# [-8.+8.j 0.+0.j 0.+0.j 0.-0.j] # [[(24+0j), 0j , 0j , -0j ],
# [-8.+0.j 0.+0.j 0.+0.j 0.-0.j] # [(-8+8j), 0j , 0j , -0j ],
# [-8.-8.j 0.+0.j 0.+0.j 0.-0.j]] # [(-8+0j), 0j , 0j , -0j ],
# [(-8-8j), 0j , 0j , -0j ]],
# [[24.+0.j 0.+0.j 0.+0.j 0.-0.j]
# [-8.+8.j 0.+0.j 0.+0.j 0.-0.j] # [[(24+0j), 0j , 0j , -0j ],
# [-8.+0.j 0.+0.j 0.+0.j 0.-0.j] # [(-8+8j), 0j , 0j , -0j ],
# [-8.-8.j 0.+0.j 0.+0.j 0.-0.j]] # [(-8+0j), 0j , 0j , -0j ],
# [(-8-8j), 0j , 0j , -0j ]],
# [[24.+0.j 0.+0.j 0.+0.j 0.-0.j]
# [-8.+8.j 0.+0.j 0.+0.j 0.-0.j] # [[(24+0j), 0j , 0j , -0j ],
# [-8.+0.j 0.+0.j 0.+0.j 0.-0.j] # [(-8+8j), 0j , 0j , -0j ],
# [-8.-8.j 0.+0.j 0.+0.j 0.-0.j]]] # [(-8+0j), 0j , 0j , -0j ],
# [(-8-8j), 0j , 0j , -0j ]]])
""" """
if is_integer(x) or is_floating_point(x): if is_integer(x) or is_floating_point(x):
return fftn_r2c( return fftn_r2c(
......
...@@ -533,8 +533,8 @@ class FusedFeedForward(Layer): ...@@ -533,8 +533,8 @@ class FusedFeedForward(Layer):
fused_feedforward_layer = FusedFeedForward(8, 8) fused_feedforward_layer = FusedFeedForward(8, 8)
x = paddle.rand((1, 8, 8)) x = paddle.rand((1, 8, 8))
out = fused_feedforward_layer(x) out = fused_feedforward_layer(x)
print(out.numpy().shape) print(out.shape)
# (1, 8, 8) # [1, 8, 8]
""" """
def __init__( def __init__(
......
...@@ -1677,11 +1677,12 @@ def glu(x, axis=-1, name=None): ...@@ -1677,11 +1677,12 @@ def glu(x, axis=-1, name=None):
x = paddle.to_tensor( x = paddle.to_tensor(
[[-0.22014759, -1.76358426, 0.80566144, 0.04241343], [[-0.22014759, -1.76358426, 0.80566144, 0.04241343],
[-1.94900405, -1.89956081, 0.17134808, -1.11280477]] [-1.94900405, -1.89956081, 0.17134808, -1.11280477]]
) )
print(F.glu(x).numpy()) print(F.glu(x))
# array([[-0.15216254, -0.9004892 ], # Tensor(shape=[2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [-1.0577879 , -0.46985325]], dtype=float32) # [[-0.15216254, -0.90048921],
# [-1.05778778, -0.46985325]])
""" """
check_variable_and_dtype( check_variable_and_dtype(
......
...@@ -657,10 +657,9 @@ def conv2d( ...@@ -657,10 +657,9 @@ def conv2d(
w_var = paddle.randn((6, 3, 3, 3), dtype='float32') w_var = paddle.randn((6, 3, 3, 3), dtype='float32')
y_var = F.conv2d(x_var, w_var) y_var = F.conv2d(x_var, w_var)
y_np = y_var.numpy()
print(y_np.shape) print(y_var.shape)
# (2, 6, 6, 6) # [2, 6, 6, 6]
""" """
# entry checks # entry checks
if data_format not in ["NCHW", "NHWC"]: if data_format not in ["NCHW", "NHWC"]:
...@@ -1234,10 +1233,9 @@ def conv2d_transpose( ...@@ -1234,10 +1233,9 @@ def conv2d_transpose(
w_var = paddle.randn((3, 6, 3, 3), dtype='float32') w_var = paddle.randn((3, 6, 3, 3), dtype='float32')
y_var = F.conv2d_transpose(x_var, w_var) y_var = F.conv2d_transpose(x_var, w_var)
y_np = y_var.numpy()
print(y_np.shape) print(y_var.shape)
# (2, 6, 10, 10) # [2, 6, 10, 10]
""" """
if data_format not in ['NCHW', 'NHWC']: if data_format not in ['NCHW', 'NHWC']:
...@@ -1523,10 +1521,9 @@ def conv3d( ...@@ -1523,10 +1521,9 @@ def conv3d(
w_var = paddle.randn((6, 3, 3, 3, 3), dtype='float32') w_var = paddle.randn((6, 3, 3, 3, 3), dtype='float32')
y_var = F.conv3d(x_var, w_var) y_var = F.conv3d(x_var, w_var)
y_np = y_var.numpy()
print(y_np.shape) print(y_var.shape)
# (2, 6, 6, 6, 6) # [2, 6, 6, 6, 6]
""" """
# entry check # entry check
if data_format not in ["NCDHW", "NDHWC"]: if data_format not in ["NCDHW", "NDHWC"]:
...@@ -1738,10 +1735,9 @@ def conv3d_transpose( ...@@ -1738,10 +1735,9 @@ def conv3d_transpose(
w_var = paddle.randn((3, 6, 3, 3, 3), dtype='float32') w_var = paddle.randn((3, 6, 3, 3, 3), dtype='float32')
y_var = F.conv3d_transpose(x_var, w_var) y_var = F.conv3d_transpose(x_var, w_var)
y_np = y_var.numpy()
print(y_np.shape) print(y_var.shape)
# (2, 6, 10, 10, 10) # [2, 6, 10, 10, 10]
""" """
# entry checks # entry checks
if data_format not in ["NCDHW", "NDHWC"]: if data_format not in ["NCDHW", "NDHWC"]:
......
...@@ -63,7 +63,9 @@ def pairwise_distance(x, y, p=2.0, epsilon=1e-6, keepdim=False, name=None): ...@@ -63,7 +63,9 @@ def pairwise_distance(x, y, p=2.0, epsilon=1e-6, keepdim=False, name=None):
x = paddle.to_tensor([[1., 3.], [3., 5.]], dtype=paddle.float64) x = paddle.to_tensor([[1., 3.], [3., 5.]], dtype=paddle.float64)
y = paddle.to_tensor([[5., 6.], [7., 8.]], dtype=paddle.float64) y = paddle.to_tensor([[5., 6.], [7., 8.]], dtype=paddle.float64)
distance = paddle.nn.functional.pairwise_distance(x, y) distance = paddle.nn.functional.pairwise_distance(x, y)
print(distance.numpy()) # [5. 5.] print(distance)
# Tensor(shape=[2], dtype=float64, place=Place(gpu:0), stop_gradient=True,
# [4.99999860, 4.99999860])
""" """
check_type(p, 'porder', (float, int), 'PairwiseDistance') check_type(p, 'porder', (float, int), 'PairwiseDistance')
......
...@@ -215,10 +215,11 @@ def sequence_mask(x, maxlen=None, dtype='int64', name=None): ...@@ -215,10 +215,11 @@ def sequence_mask(x, maxlen=None, dtype='int64', name=None):
lengths = paddle.to_tensor([10, 9, 8]) lengths = paddle.to_tensor([10, 9, 8])
mask = paddle.nn.functional.sequence_mask(lengths) mask = paddle.nn.functional.sequence_mask(lengths)
print(mask.numpy()) print(mask)
# [[1 1 1 1 1 1 1 1 1 1] # Tensor(shape=[3, 10], dtype=int64, place=Place(gpu:0), stop_gradient=True,
# [1 1 1 1 1 1 1 1 1 0] # [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
# [1 1 1 1 1 1 1 1 0 0]] # [1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
# [1, 1, 1, 1, 1, 1, 1, 1, 0, 0]])
""" """
......
...@@ -1353,17 +1353,20 @@ def l1_loss(input, label, reduction='mean', name=None): ...@@ -1353,17 +1353,20 @@ def l1_loss(input, label, reduction='mean', name=None):
label = paddle.to_tensor([[1.7, 1], [0.4, 0.5]]) label = paddle.to_tensor([[1.7, 1], [0.4, 0.5]])
l1_loss = paddle.nn.functional.l1_loss(input, label) l1_loss = paddle.nn.functional.l1_loss(input, label)
print(l1_loss.numpy()) print(l1_loss)
# [0.35] # Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [0.34999999])
l1_loss = paddle.nn.functional.l1_loss(input, label, reduction='none') l1_loss = paddle.nn.functional.l1_loss(input, label, reduction='none')
print(l1_loss.numpy()) print(l1_loss)
# [[0.20000005 0.19999999] # Tensor(shape=[2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [0.2 0.79999995]] # [[0.20000005, 0.19999999],
# [0.20000000, 0.79999995]])
l1_loss = paddle.nn.functional.l1_loss(input, label, reduction='sum') l1_loss = paddle.nn.functional.l1_loss(input, label, reduction='sum')
print(l1_loss.numpy()) print(l1_loss)
# [1.4] # Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [1.39999998])
""" """
if reduction not in ['sum', 'mean', 'none']: if reduction not in ['sum', 'mean', 'none']:
...@@ -2530,9 +2533,11 @@ def cross_entropy( ...@@ -2530,9 +2533,11 @@ def cross_entropy(
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
weight=weight, reduction=reduction) weight=weight, reduction=reduction)
dy_ret = cross_entropy_loss( dy_ret = cross_entropy_loss(
input, input,
label) label)
print(dy_ret.numpy()) #[5.41993642] print(dy_ret)
# Tensor(shape=[1], dtype=float64, place=Place(gpu:0), stop_gradient=True,
# [5.34043430])
.. code-block:: python .. code-block:: python
...@@ -2550,13 +2555,15 @@ def cross_entropy( ...@@ -2550,13 +2555,15 @@ def cross_entropy(
labels = paddle.uniform(shape, dtype='float64', min=0.1, max=1.0) labels = paddle.uniform(shape, dtype='float64', min=0.1, max=1.0)
labels /= paddle.sum(labels, axis=axis, keepdim=True) labels /= paddle.sum(labels, axis=axis, keepdim=True)
paddle_loss_mean = paddle.nn.functional.cross_entropy( paddle_loss_mean = paddle.nn.functional.cross_entropy(
logits, logits,
labels, labels,
soft_label=True, soft_label=True,
axis=axis, axis=axis,
weight=weight, weight=weight,
reduction=reduction) reduction=reduction)
print(paddle_loss_mean.numpy()) #[1.12908343] print(paddle_loss_mean)
# Tensor(shape=[1], dtype=float64, place=Place(gpu:0), stop_gradient=True,
# [1.11043464])
""" """
......
...@@ -368,9 +368,8 @@ def pixel_shuffle(x, upscale_factor, data_format="NCHW", name=None): ...@@ -368,9 +368,8 @@ def pixel_shuffle(x, upscale_factor, data_format="NCHW", name=None):
x = paddle.randn(shape=[2,9,4,4]) x = paddle.randn(shape=[2,9,4,4])
out_var = F.pixel_shuffle(x, 3) out_var = F.pixel_shuffle(x, 3)
out = out_var.numpy() print(out_var.shape)
print(out.shape) # [2, 1, 12, 12]
# (2, 1, 12, 12)
""" """
if not isinstance(upscale_factor, int): if not isinstance(upscale_factor, int):
raise TypeError("upscale factor must be int type") raise TypeError("upscale factor must be int type")
......
...@@ -32,11 +32,13 @@ class Constant(ConstantInitializer): ...@@ -32,11 +32,13 @@ class Constant(ConstantInitializer):
data = paddle.rand([30, 10, 2], dtype='float32') data = paddle.rand([30, 10, 2], dtype='float32')
linear = nn.Linear(2, linear = nn.Linear(2,
4, 4,
weight_attr=nn.initializer.Constant(value=2.0)) weight_attr=nn.initializer.Constant(value=2.0))
res = linear(data) res = linear(data)
print(linear.weight.numpy()) print(linear.weight)
#result is [[2. 2. 2. 2.],[2. 2. 2. 2.]] # Tensor(shape=[2, 4], dtype=float32, place=Place(gpu:0), stop_gradient=False,
# [[2., 2., 2., 2.],
# [2., 2., 2., 2.]])
""" """
......
...@@ -668,9 +668,8 @@ class Conv2D(_ConvNd): ...@@ -668,9 +668,8 @@ class Conv2D(_ConvNd):
conv = nn.Conv2D(4, 6, (3, 3)) conv = nn.Conv2D(4, 6, (3, 3))
y_var = conv(x_var) y_var = conv(x_var)
y_np = y_var.numpy() print(y_var.shape)
print(y_np.shape) # [2, 6, 6, 6]
# (2, 6, 6, 6)
""" """
def __init__( def __init__(
...@@ -841,9 +840,8 @@ class Conv2DTranspose(_ConvNd): ...@@ -841,9 +840,8 @@ class Conv2DTranspose(_ConvNd):
conv = nn.Conv2DTranspose(4, 6, (3, 3)) conv = nn.Conv2DTranspose(4, 6, (3, 3))
y_var = conv(x_var) y_var = conv(x_var)
y_np = y_var.numpy() print(y_var.shape)
print(y_np.shape) # [2, 6, 10, 10]
# (2, 6, 10, 10)
""" """
def __init__( def __init__(
...@@ -999,9 +997,8 @@ class Conv3D(_ConvNd): ...@@ -999,9 +997,8 @@ class Conv3D(_ConvNd):
conv = nn.Conv3D(4, 6, (3, 3, 3)) conv = nn.Conv3D(4, 6, (3, 3, 3))
y_var = conv(x_var) y_var = conv(x_var)
y_np = y_var.numpy() print(y_var.shape)
print(y_np.shape) # [2, 6, 6, 6, 6]
# (2, 6, 6, 6, 6)
""" """
def __init__( def __init__(
...@@ -1181,9 +1178,8 @@ class Conv3DTranspose(_ConvNd): ...@@ -1181,9 +1178,8 @@ class Conv3DTranspose(_ConvNd):
conv = nn.Conv3DTranspose(4, 6, (3, 3, 3)) conv = nn.Conv3DTranspose(4, 6, (3, 3, 3))
y_var = conv(x_var) y_var = conv(x_var)
y_np = y_var.numpy() print(y_var.shape)
print(y_np.shape) # [2, 6, 10, 10, 10]
# (2, 6, 10, 10, 10)
""" """
def __init__( def __init__(
......
...@@ -56,7 +56,9 @@ class PairwiseDistance(Layer): ...@@ -56,7 +56,9 @@ class PairwiseDistance(Layer):
y = paddle.to_tensor([[5., 6.], [7., 8.]], dtype=paddle.float64) y = paddle.to_tensor([[5., 6.], [7., 8.]], dtype=paddle.float64)
dist = paddle.nn.PairwiseDistance() dist = paddle.nn.PairwiseDistance()
distance = dist(x, y) distance = dist(x, y)
print(distance.numpy()) # [5. 5.] print(distance)
# Tensor(shape=[2], dtype=float64, place=Place(gpu:0), stop_gradient=True,
# [4.99999860, 4.99999860])
""" """
......
...@@ -102,7 +102,9 @@ class BCEWithLogitsLoss(Layer): ...@@ -102,7 +102,9 @@ class BCEWithLogitsLoss(Layer):
label = paddle.to_tensor([1.0, 0.0, 1.0], dtype="float32") label = paddle.to_tensor([1.0, 0.0, 1.0], dtype="float32")
bce_logit_loss = paddle.nn.BCEWithLogitsLoss() bce_logit_loss = paddle.nn.BCEWithLogitsLoss()
output = bce_logit_loss(logit, label) output = bce_logit_loss(logit, label)
print(output.numpy()) # [0.45618808] print(output)
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [0.45618814])
""" """
...@@ -319,9 +321,11 @@ class CrossEntropyLoss(Layer): ...@@ -319,9 +321,11 @@ class CrossEntropyLoss(Layer):
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
weight=weight, reduction=reduction) weight=weight, reduction=reduction)
dy_ret = cross_entropy_loss( dy_ret = cross_entropy_loss(
input, input,
label) label)
print(dy_ret.numpy()) #[5.41993642] print(dy_ret)
# Tensor(shape=[1], dtype=float64, place=Place(gpu:0), stop_gradient=True,
# [5.34043430])
.. code-block:: python .. code-block:: python
...@@ -339,13 +343,15 @@ class CrossEntropyLoss(Layer): ...@@ -339,13 +343,15 @@ class CrossEntropyLoss(Layer):
labels = paddle.uniform(shape, dtype='float64', min=0.1, max=1.0) labels = paddle.uniform(shape, dtype='float64', min=0.1, max=1.0)
labels /= paddle.sum(labels, axis=axis, keepdim=True) labels /= paddle.sum(labels, axis=axis, keepdim=True)
paddle_loss_mean = paddle.nn.functional.cross_entropy( paddle_loss_mean = paddle.nn.functional.cross_entropy(
logits, logits,
labels, labels,
soft_label=True, soft_label=True,
axis=axis, axis=axis,
weight=weight, weight=weight,
reduction=reduction) reduction=reduction)
print(paddle_loss_mean.numpy()) #[1.12908343] print(paddle_loss_mean)
# Tensor(shape=[1], dtype=float64, place=Place(gpu:0), stop_gradient=True,
# [1.11043464])
""" """
...@@ -635,19 +641,22 @@ class L1Loss(Layer): ...@@ -635,19 +641,22 @@ class L1Loss(Layer):
l1_loss = paddle.nn.L1Loss() l1_loss = paddle.nn.L1Loss()
output = l1_loss(input, label) output = l1_loss(input, label)
print(output.numpy()) print(output)
# [0.35] # Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [0.34999999])
l1_loss = paddle.nn.L1Loss(reduction='sum') l1_loss = paddle.nn.L1Loss(reduction='sum')
output = l1_loss(input, label) output = l1_loss(input, label)
print(output.numpy()) print(output)
# [1.4] # Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [1.39999998])
l1_loss = paddle.nn.L1Loss(reduction='none') l1_loss = paddle.nn.L1Loss(reduction='none')
output = l1_loss(input, label) output = l1_loss(input, label)
print(output) print(output)
# [[0.20000005 0.19999999] # Tensor(shape=[2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [0.2 0.79999995]] # [[0.20000005, 0.19999999],
# [0.20000000, 0.79999995]])
""" """
......
...@@ -52,10 +52,9 @@ class PixelShuffle(Layer): ...@@ -52,10 +52,9 @@ class PixelShuffle(Layer):
x = paddle.randn(shape=[2,9,4,4]) x = paddle.randn(shape=[2,9,4,4])
pixel_shuffle = nn.PixelShuffle(3) pixel_shuffle = nn.PixelShuffle(3)
out_var = pixel_shuffle(x) out = pixel_shuffle(x)
out = out_var.numpy()
print(out.shape) print(out.shape)
# (2, 1, 12, 12) # [2, 1, 12, 12]
""" """
......
...@@ -628,10 +628,8 @@ class QuantizedConv2DTranspose(Layer): ...@@ -628,10 +628,8 @@ class QuantizedConv2DTranspose(Layer):
conv_quantized = QuantizedConv2DTranspose(conv) conv_quantized = QuantizedConv2DTranspose(conv)
y_quantized = conv_quantized(x_var) y_quantized = conv_quantized(x_var)
y_var = conv(x_var) y_var = conv(x_var)
y_quantized_np = y_quantized.numpy() print(y_var.shape, y_quantized.shape)
y_np = y_var.numpy() # [2, 6, 10, 10], [2, 6, 10, 10]
print(y_np.shape, y_quantized_np.shape)
# (2, 6, 10, 10), (2, 6, 10, 10)
""" """
......
...@@ -2453,7 +2453,10 @@ def unique( ...@@ -2453,7 +2453,10 @@ def unique(
x = paddle.to_tensor([2, 3, 3, 1, 5, 3]) x = paddle.to_tensor([2, 3, 3, 1, 5, 3])
unique = paddle.unique(x) unique = paddle.unique(x)
np_unique = unique.numpy() # [1 2 3 5] print(unique)
# Tensor(shape=[4], dtype=int64, place=Place(gpu:0), stop_gradient=True,
# [1, 2, 3, 5])
_, indices, inverse, counts = paddle.unique(x, return_index=True, return_inverse=True, return_counts=True) _, indices, inverse, counts = paddle.unique(x, return_index=True, return_inverse=True, return_counts=True)
print(indices) print(indices)
# Tensor(shape=[4], dtype=int64, place=Place(gpu:0), stop_gradient=True, # Tensor(shape=[4], dtype=int64, place=Place(gpu:0), stop_gradient=True,
......
...@@ -88,7 +88,7 @@ class Conll05st(Dataset): ...@@ -88,7 +88,7 @@ class Conll05st(Dataset):
model = SimpleNet() model = SimpleNet()
pred_idx, mark, label= model(pred_idx, mark, label) pred_idx, mark, label= model(pred_idx, mark, label)
print(pred_idx.numpy(), mark.numpy(), label.numpy()) print(pred_idx, mark, label)
""" """
......
...@@ -67,7 +67,7 @@ class Imdb(Dataset): ...@@ -67,7 +67,7 @@ class Imdb(Dataset):
model = SimpleNet() model = SimpleNet()
image, label = model(doc, label) image, label = model(doc, label)
print(doc.numpy().shape, label.numpy().shape) print(doc.shape, label.shape)
""" """
......
...@@ -67,7 +67,7 @@ class Imikolov(Dataset): ...@@ -67,7 +67,7 @@ class Imikolov(Dataset):
model = SimpleNet() model = SimpleNet()
src, trg = model(src, trg) src, trg = model(src, trg)
print(src.numpy().shape, trg.numpy().shape) print(src.shape, trg.shape)
""" """
......
...@@ -134,7 +134,7 @@ class Movielens(Dataset): ...@@ -134,7 +134,7 @@ class Movielens(Dataset):
model = SimpleNet() model = SimpleNet()
category, title, rating = model(category, title, rating) category, title, rating = model(category, title, rating)
print(category.numpy().shape, title.numpy().shape, rating.numpy().shape) print(category.shape, title.shape, rating.shape)
""" """
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册