未验证 提交 0a15b0db 编写于 作者: Y yuchen202 提交者: GitHub

[xdoctest] reformat example code with google style in No.36-43 (#56440)

上级 71e28b12
...@@ -368,24 +368,24 @@ def conv1d( ...@@ -368,24 +368,24 @@ def conv1d(
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
import paddle.nn.functional as F >>> import paddle.nn.functional as F
x = paddle.to_tensor([[[4, 8, 1, 9], >>> x = paddle.to_tensor([[[4, 8, 1, 9],
[7, 2, 0, 9], ... [7, 2, 0, 9],
[6, 9, 2, 6]]], dtype="float32") ... [6, 9, 2, 6]]], dtype="float32")
w = paddle.to_tensor([[[9, 3, 4], >>> w = paddle.to_tensor([[[9, 3, 4],
[0, 0, 7], ... [0, 0, 7],
[2, 5, 6]], ... [2, 5, 6]],
[[0, 3, 4], ... [[0, 3, 4],
[2, 9, 7], ... [2, 9, 7],
[5, 6, 8]]], dtype="float32") ... [5, 6, 8]]], dtype="float32")
y = F.conv1d(x, w) >>> y = F.conv1d(x, w)
print(y) >>> print(y)
# Tensor(shape=[1, 2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True, Tensor(shape=[1, 2, 2], dtype=float32, place=Place(cpu), stop_gradient=True,
# [[[133., 238.], [[[133., 238.],
# [160., 211.]]]) [160., 211.]]])
""" """
cudnn_version = get_cudnn_version() cudnn_version = get_cudnn_version()
if cudnn_version is not None: if cudnn_version is not None:
...@@ -632,16 +632,16 @@ def conv2d( ...@@ -632,16 +632,16 @@ def conv2d(
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
import paddle.nn.functional as F >>> import paddle.nn.functional as F
x_var = paddle.randn((2, 3, 8, 8), dtype='float32') >>> x_var = paddle.randn((2, 3, 8, 8), dtype='float32')
w_var = paddle.randn((6, 3, 3, 3), dtype='float32') >>> w_var = paddle.randn((6, 3, 3, 3), dtype='float32')
y_var = F.conv2d(x_var, w_var) >>> y_var = F.conv2d(x_var, w_var)
print(y_var.shape) >>> print(y_var.shape)
# [2, 6, 6, 6] [2, 6, 6, 6]
""" """
# entry checks # entry checks
if data_format not in ["NCHW", "NHWC"]: if data_format not in ["NCHW", "NHWC"]:
...@@ -887,20 +887,20 @@ def conv1d_transpose( ...@@ -887,20 +887,20 @@ def conv1d_transpose(
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
import paddle.nn.functional as F >>> import paddle.nn.functional as F
# shape: (1, 2, 4) >>> # shape: (1, 2, 4)
x = paddle.to_tensor([[[4, 0, 9, 7], >>> x = paddle.to_tensor([[[4, 0, 9, 7],
[8, 0, 9, 2,]]], dtype="float32") >>> [8, 0, 9, 2,]]], dtype="float32")
# shape: (2, 1, 2) >>> # shape: (2, 1, 2)
w = paddle.to_tensor([[[7, 0]], >>> w = paddle.to_tensor([[[7, 0]],
[[4, 2]]], dtype="float32") >>> [[4, 2]]], dtype="float32")
y = F.conv1d_transpose(x, w) >>> y = F.conv1d_transpose(x, w)
print(y) >>> print(y)
# Tensor(shape=[1, 1, 5], dtype=float32, place=Place(gpu:0), stop_gradient=True, Tensor(shape=[1, 1, 5], dtype=float32, place=Place(cpu), stop_gradient=True,
# [[[60., 16., 99., 75., 4. ]]]) [[[60., 16., 99., 75., 4. ]]])
""" """
cudnn_version = get_cudnn_version() cudnn_version = get_cudnn_version()
if cudnn_version is not None: if cudnn_version is not None:
...@@ -1183,16 +1183,16 @@ def conv2d_transpose( ...@@ -1183,16 +1183,16 @@ def conv2d_transpose(
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
import paddle.nn.functional as F >>> import paddle.nn.functional as F
x_var = paddle.randn((2, 3, 8, 8), dtype='float32') >>> x_var = paddle.randn((2, 3, 8, 8), dtype='float32')
w_var = paddle.randn((3, 6, 3, 3), dtype='float32') >>> w_var = paddle.randn((3, 6, 3, 3), dtype='float32')
y_var = F.conv2d_transpose(x_var, w_var) >>> y_var = F.conv2d_transpose(x_var, w_var)
print(y_var.shape) >>> print(y_var.shape)
# [2, 6, 10, 10] [2, 6, 10, 10]
""" """
if data_format not in ['NCHW', 'NHWC']: if data_format not in ['NCHW', 'NHWC']:
...@@ -1476,16 +1476,16 @@ def conv3d( ...@@ -1476,16 +1476,16 @@ def conv3d(
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
import paddle.nn.functional as F >>> import paddle.nn.functional as F
x_var = paddle.randn((2, 3, 8, 8, 8), dtype='float32') >>> x_var = paddle.randn((2, 3, 8, 8, 8), dtype='float32')
w_var = paddle.randn((6, 3, 3, 3, 3), dtype='float32') >>> w_var = paddle.randn((6, 3, 3, 3, 3), dtype='float32')
y_var = F.conv3d(x_var, w_var) >>> y_var = F.conv3d(x_var, w_var)
print(y_var.shape) >>> print(y_var.shape)
# [2, 6, 6, 6, 6] [2, 6, 6, 6, 6]
""" """
# entry check # entry check
if data_format not in ["NCDHW", "NDHWC"]: if data_format not in ["NCDHW", "NDHWC"]:
...@@ -1688,18 +1688,18 @@ def conv3d_transpose( ...@@ -1688,18 +1688,18 @@ def conv3d_transpose(
variable storing transposed convolution and non-linearity activation result. variable storing transposed convolution and non-linearity activation result.
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
import paddle.nn.functional as F >>> import paddle.nn.functional as F
x_var = paddle.randn((2, 3, 8, 8, 8), dtype='float32') >>> x_var = paddle.randn((2, 3, 8, 8, 8), dtype='float32')
w_var = paddle.randn((3, 6, 3, 3, 3), dtype='float32') >>> w_var = paddle.randn((3, 6, 3, 3, 3), dtype='float32')
y_var = F.conv3d_transpose(x_var, w_var) >>> y_var = F.conv3d_transpose(x_var, w_var)
print(y_var.shape) >>> print(y_var.shape)
# [2, 6, 10, 10, 10] [2, 6, 10, 10, 10]
""" """
# entry checks # entry checks
if data_format not in ["NCDHW", "NDHWC"]: if data_format not in ["NCDHW", "NDHWC"]:
......
...@@ -59,14 +59,13 @@ def pairwise_distance(x, y, p=2.0, epsilon=1e-6, keepdim=False, name=None): ...@@ -59,14 +59,13 @@ def pairwise_distance(x, y, p=2.0, epsilon=1e-6, keepdim=False, name=None):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
x = paddle.to_tensor([[1., 3.], [3., 5.]], dtype=paddle.float64) >>> x = paddle.to_tensor([[1., 3.], [3., 5.]], dtype=paddle.float64)
y = paddle.to_tensor([[5., 6.], [7., 8.]], dtype=paddle.float64) >>> y = paddle.to_tensor([[5., 6.], [7., 8.]], dtype=paddle.float64)
distance = paddle.nn.functional.pairwise_distance(x, y) >>> distance = paddle.nn.functional.pairwise_distance(x, y)
print(distance) >>> print(distance)
# Tensor(shape=[2], dtype=float64, place=Place(gpu:0), stop_gradient=True, Tensor(shape=[2], dtype=float64, place=Place(cpu), stop_gradient=True,
# [4.99999860, 4.99999860]) [4.99999860, 4.99999860])
""" """
if in_dynamic_mode(): if in_dynamic_mode():
sub = _C_ops.subtract(x, y) sub = _C_ops.subtract(x, y)
......
...@@ -55,48 +55,46 @@ def diag_embed(input, offset=0, dim1=-2, dim2=-1): ...@@ -55,48 +55,46 @@ def diag_embed(input, offset=0, dim1=-2, dim2=-1):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
import paddle.nn.functional as F >>> import paddle.nn.functional as F
diag_embed_input = paddle.arange(6) >>> diag_embed_input = paddle.arange(6)
diag_embed_output1 = F.diag_embed(diag_embed_input) >>> diag_embed_output1 = F.diag_embed(diag_embed_input)
print(diag_embed_output1) >>> print(diag_embed_output1)
# Tensor(shape=[6, 6], dtype=int64, place=Place(cpu), stop_gradient=True, Tensor(shape=[6, 6], dtype=int64, place=Place(cpu), stop_gradient=True,
# [[0, 0, 0, 0, 0, 0], [[0, 0, 0, 0, 0, 0],
# [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0],
# [0, 0, 2, 0, 0, 0], [0, 0, 2, 0, 0, 0],
# [0, 0, 0, 3, 0, 0], [0, 0, 0, 3, 0, 0],
# [0, 0, 0, 0, 4, 0], [0, 0, 0, 0, 4, 0],
# [0, 0, 0, 0, 0, 5]]) [0, 0, 0, 0, 0, 5]])
diag_embed_output2 = F.diag_embed(diag_embed_input, offset=-1, dim1=0,dim2=1 ) >>> diag_embed_output2 = F.diag_embed(diag_embed_input, offset=-1, dim1=0,dim2=1 )
print(diag_embed_output2) >>> print(diag_embed_output2)
# Tensor(shape=[7, 7], dtype=int64, place=Place(cpu), stop_gradient=True, Tensor(shape=[7, 7], dtype=int64, place=Place(cpu), stop_gradient=True,
# [[0, 0, 0, 0, 0, 0, 0], [[0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0],
# [0, 1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0],
# [0, 0, 2, 0, 0, 0, 0], [0, 0, 2, 0, 0, 0, 0],
# [0, 0, 0, 3, 0, 0, 0], [0, 0, 0, 3, 0, 0, 0],
# [0, 0, 0, 0, 4, 0, 0], [0, 0, 0, 0, 4, 0, 0],
# [0, 0, 0, 0, 0, 5, 0]]) [0, 0, 0, 0, 0, 5, 0]])
diag_embed_input_2dim = paddle.reshape(diag_embed_input,[2,3]) >>> diag_embed_input_2dim = paddle.reshape(diag_embed_input,[2,3])
print(diag_embed_input_2dim) >>> print(diag_embed_input_2dim)
# Tensor(shape=[2, 3], dtype=int64, place=Place(cpu), stop_gradient=True, Tensor(shape=[2, 3], dtype=int64, place=Place(cpu), stop_gradient=True,
# [[0, 1, 2], [[0, 1, 2],
# [3, 4, 5]]) [3, 4, 5]])
diag_embed_output3 = F.diag_embed(diag_embed_input_2dim,offset= 0, dim1=0, dim2=2 ) >>> diag_embed_output3 = F.diag_embed(diag_embed_input_2dim,offset= 0, dim1=0, dim2=2 )
print(diag_embed_output3) >>> print(diag_embed_output3)
# Tensor(shape=[3, 2, 3], dtype=int64, place=Place(cpu), stop_gradient=True, Tensor(shape=[3, 2, 3], dtype=int64, place=Place(cpu), stop_gradient=True,
# [[[0, 0, 0], [[[0, 0, 0],
# [3, 0, 0]], [3, 0, 0]],
[[0, 1, 0],
# [[0, 1, 0], [0, 4, 0]],
# [0, 4, 0]], [[0, 0, 2],
[0, 0, 5]]])
# [[0, 0, 2],
# [0, 0, 5]]])
""" """
if not isinstance(input, Variable): if not isinstance(input, Variable):
input = assign(input) input = assign(input)
...@@ -200,16 +198,16 @@ def sequence_mask(x, maxlen=None, dtype='int64', name=None): ...@@ -200,16 +198,16 @@ def sequence_mask(x, maxlen=None, dtype='int64', name=None):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
lengths = paddle.to_tensor([10, 9, 8]) >>> lengths = paddle.to_tensor([10, 9, 8])
mask = paddle.nn.functional.sequence_mask(lengths) >>> mask = paddle.nn.functional.sequence_mask(lengths)
print(mask) >>> print(mask)
# Tensor(shape=[3, 10], dtype=int64, place=Place(gpu:0), stop_gradient=True, Tensor(shape=[3, 10], dtype=int64, place=Place(cpu), stop_gradient=True,
# [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
# [1, 1, 1, 1, 1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
# [1, 1, 1, 1, 1, 1, 1, 1, 0, 0]]) [1, 1, 1, 1, 1, 1, 1, 1, 0, 0]])
""" """
...@@ -296,14 +294,24 @@ def gather_tree(ids, parents): ...@@ -296,14 +294,24 @@ def gather_tree(ids, parents):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
ids = paddle.to_tensor([[[2, 2], [6, 1]], [[3, 9], [6, 1]], [[0, 1], [9, 0]]]) >>> ids = paddle.to_tensor([[[2, 2], [6, 1]], [[3, 9], [6, 1]], [[0, 1], [9, 0]]])
parents = paddle.to_tensor([[[0, 0], [1, 1]], [[1, 0], [1, 0]], [[0, 0], [0, 1]]]) >>> parents = paddle.to_tensor([[[0, 0], [1, 1]], [[1, 0], [1, 0]], [[0, 0], [0, 1]]])
>>> final_sequences = paddle.nn.functional.gather_tree(ids, parents)
>>> [[[2, 2], [1, 6]], [[3, 3], [6, 1]], [[0, 1], [9, 0]]]
>>> final_sequences = paddle.nn.functional.gather_tree(ids, parents)
>>> print(final_sequences)
Tensor(shape=[3, 2, 2], dtype=int64, place=Place(cpu), stop_gradient=True,
[[[2, 2],
[1, 6]],
[[3, 3],
[6, 1]],
[[0, 1],
[9, 0]]])
final_sequences = paddle.nn.functional.gather_tree(ids, parents)
# [[[2, 2], [1, 6]], [[3, 3], [6, 1]], [[0, 1], [9, 0]]]
""" """
if ids.ndim != 3: if ids.ndim != 3:
...@@ -388,11 +396,11 @@ def temporal_shift(x, seg_num, shift_ratio=0.25, name=None, data_format="NCHW"): ...@@ -388,11 +396,11 @@ def temporal_shift(x, seg_num, shift_ratio=0.25, name=None, data_format="NCHW"):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
import paddle.nn.functional as F >>> import paddle.nn.functional as F
input = paddle.randn([6, 4, 2, 2]) >>> input = paddle.randn([6, 4, 2, 2])
out = F.temporal_shift(x=input, seg_num=2, shift_ratio=0.2) >>> out = F.temporal_shift(x=input, seg_num=2, shift_ratio=0.2)
""" """
if data_format not in ["NCHW", "NHWC"]: if data_format not in ["NCHW", "NHWC"]:
raise ValueError( raise ValueError(
......
...@@ -181,13 +181,12 @@ def flash_attention( ...@@ -181,13 +181,12 @@ def flash_attention(
Examples: Examples:
.. code-block:: python .. code-block:: python
# required: skiptest >>> import paddle
import paddle
q = paddle.rand((1, 128, 2, 16), dtype=paddle.float16) >>> paddle.seed(1)
>>> q = paddle.rand((1, 128, 2, 16))
output = paddle.nn.functional.flash_attention(q, q, q, 0.9, False, False) >>> output = paddle.nn.functional.flash_attention.flash_attention(q, q, q, 0.9, False, False)
print(output)
""" """
head_dim = query.shape[3] head_dim = query.shape[3]
sdp_func_name = _select_sdp(head_dim) sdp_func_name = _select_sdp(head_dim)
...@@ -340,13 +339,12 @@ def flash_attn_unpadded( ...@@ -340,13 +339,12 @@ def flash_attn_unpadded(
Examples: Examples:
.. code-block:: python .. code-block:: python
# required: skiptest >>> import paddle
import paddle >>> paddle.seed(1)
>>> q = paddle.rand((1, 128, 2, 16))
q = paddle.rand((1, 128, 2, 16), dtype=paddle.float16)
output = paddle.nn.functional.flash_attn_unpadded(q, q, q, 0.9, False, False) >>> output = paddle.nn.functional.flash_attention.flash_attn_unpadded(q, q, q, 0.9, False, False)
print(output) >>> print(output)
""" """
if in_dynamic_mode(): if in_dynamic_mode():
( (
......
...@@ -158,24 +158,33 @@ class Uniform(UniformInitializer): ...@@ -158,24 +158,33 @@ class Uniform(UniformInitializer):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
>>> paddle.seed(1)
data = paddle.ones(shape=[3, 1, 2], dtype='float32') >>> data = paddle.ones(shape=[3, 1, 2], dtype='float32')
weight_attr = paddle.framework.ParamAttr( >>> weight_attr = paddle.framework.ParamAttr(
name="linear_weight", ... name="linear_weight",
initializer=paddle.nn.initializer.Uniform(low=-0.5, high=0.5)) ... initializer=paddle.nn.initializer.Uniform(low=-0.5, high=0.5))
bias_attr = paddle.framework.ParamAttr( >>> bias_attr = paddle.framework.ParamAttr(
name="linear_bias", ... name="linear_bias",
initializer=paddle.nn.initializer.Uniform(low=-0.5, high=0.5)) ... initializer=paddle.nn.initializer.Uniform(low=-0.5, high=0.5))
linear = paddle.nn.Linear(2, 2, weight_attr=weight_attr, bias_attr=bias_attr) >>> linear = paddle.nn.Linear(2, 2, weight_attr=weight_attr, bias_attr=bias_attr)
# linear.weight: [[-0.46245047 0.05260676] >>> print(linear.weight)
# [ 0.38054508 0.29169726]] Parameter containing:
# linear.bias: [-0.2734719 0.23939109] Tensor(shape=[2, 2], dtype=float32, place=Place(cpu), stop_gradient=False,
[[-0.48212373, 0.26492310],
res = linear(data) [ 0.17605734, -0.45379421]])
# res: [[[-0.3553773 0.5836951]]
# [[-0.3553773 0.5836951]] >>> print(linear.bias)
# [[-0.3553773 0.5836951]]] Parameter containing:
Tensor(shape=[2], dtype=float32, place=Place(cpu), stop_gradient=False,
[-0.11236754, 0.46462214])
>>> res = linear(data)
>>> print(res)
Tensor(shape=[3, 1, 2], dtype=float32, place=Place(cpu), stop_gradient=False,
[[[-0.41843393, 0.27575102]],
[[-0.41843393, 0.27575102]],
[[-0.41843393, 0.27575102]]])
""" """
def __init__(self, low=-1.0, high=1.0, name=None): def __init__(self, low=-1.0, high=1.0, name=None):
......
...@@ -214,24 +214,33 @@ class XavierNormal(XavierInitializer): ...@@ -214,24 +214,33 @@ class XavierNormal(XavierInitializer):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
>>> paddle.seed(1)
data = paddle.ones(shape=[3, 1, 2], dtype='float32') >>> data = paddle.ones(shape=[3, 1, 2], dtype='float32')
weight_attr = paddle.framework.ParamAttr( >>> weight_attr = paddle.framework.ParamAttr(
name="linear_weight", ... name="linear_weight",
initializer=paddle.nn.initializer.XavierNormal()) ... initializer=paddle.nn.initializer.XavierNormal())
bias_attr = paddle.framework.ParamAttr( >>> bias_attr = paddle.framework.ParamAttr(
name="linear_bias", ... name="linear_bias",
initializer=paddle.nn.initializer.XavierNormal()) ... initializer=paddle.nn.initializer.XavierNormal())
linear = paddle.nn.Linear(2, 2, weight_attr=weight_attr, bias_attr=bias_attr) >>> linear = paddle.nn.Linear(2, 2, weight_attr=weight_attr, bias_attr=bias_attr)
# inear.weight: [[ 0.06910077 -0.18103665] >>> print(linear.weight)
# [-0.02546741 -1.0402188 ]] Parameter containing:
# linear.bias: [-0.5012929 0.12418364] Tensor(shape=[2, 2], dtype=float32, place=Place(cpu), stop_gradient=False,
[[-0.21607460, 0.08382989],
res = linear(data) [ 0.29147008, -0.07049121]])
# res: [[[-0.4576595 -1.0970719]]
# [[-0.4576595 -1.0970719]] >>> print(linear.bias)
# [[-0.4576595 -1.0970719]]] Parameter containing:
Tensor(shape=[2], dtype=float32, place=Place(cpu), stop_gradient=False,
[1.06076419, 0.87684733])
>>> res = linear(data)
>>> print(res)
Tensor(shape=[3, 1, 2], dtype=float32, place=Place(cpu), stop_gradient=False,
[[[1.13615966, 0.89018601]],
[[1.13615966, 0.89018601]],
[[1.13615966, 0.89018601]]])
""" """
def __init__(self, fan_in=None, fan_out=None, name=None): def __init__(self, fan_in=None, fan_out=None, name=None):
...@@ -266,24 +275,32 @@ class XavierUniform(XavierInitializer): ...@@ -266,24 +275,32 @@ class XavierUniform(XavierInitializer):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
>>> paddle.seed(1)
data = paddle.ones(shape=[3, 1, 2], dtype='float32') >>> data = paddle.ones(shape=[3, 1, 2], dtype='float32')
weight_attr = paddle.framework.ParamAttr( >>> weight_attr = paddle.framework.ParamAttr(
name="linear_weight", ... name="linear_weight",
initializer=paddle.nn.initializer.XavierUniform()) ... initializer=paddle.nn.initializer.XavierUniform())
bias_attr = paddle.framework.ParamAttr( >>> bias_attr = paddle.framework.ParamAttr(
name="linear_bias", ... name="linear_bias",
initializer=paddle.nn.initializer.XavierUniform()) ... initializer=paddle.nn.initializer.XavierUniform())
linear = paddle.nn.Linear(2, 2, weight_attr=weight_attr, bias_attr=bias_attr) >>> linear = paddle.nn.Linear(2, 2, weight_attr=weight_attr, bias_attr=bias_attr)
# linear.weight: [[-0.04229349 -1.1248565 ] >>> print(linear.weight)
# [-0.10789523 -0.5938053 ]] Parameter containing:
# linear.bias: [ 1.1983747 -0.40201235] Tensor(shape=[2, 2], dtype=float32, place=Place(cpu), stop_gradient=False,
[[-1.18095720, 0.64892638],
res = linear(data) [ 0.43125069, -1.11156428]])
# res: [[[ 1.0481861 -2.1206741]] >>> print(linear.bias)
# [[ 1.0481861 -2.1206741]] Parameter containing:
# [[ 1.0481861 -2.1206741]]] Tensor(shape=[2], dtype=float32, place=Place(cpu), stop_gradient=False,
[-0.27524316, 1.13808715])
>>> res = linear(data)
>>> print(res)
Tensor(shape=[3, 1, 2], dtype=float32, place=Place(cpu), stop_gradient=False,
[[[-1.02494967, 0.67544925]],
[[-1.02494967, 0.67544925]],
[[-1.02494967, 0.67544925]]])
""" """
def __init__(self, fan_in=None, fan_out=None, name=None): def __init__(self, fan_in=None, fan_out=None, name=None):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册