提交 d9f7e56b 编写于 作者: 万万没想到

fix review opinions in doc/comments

上级 0565e464
......@@ -276,7 +276,7 @@ def initializer(init, shape=None, dtype=mstype.float32):
shape (Union[tuple, list, int]): A list of integers, a tuple of integers or an integer as the shape of
output. Default: None.
dtype (:class:`mindspore.dtype`): The type of data in initialized tensor. Default: mstype.float32.
dtype (:class:`mindspore.dtype`): The type of data in initialized tensor. Default: mindspore.float32.
Returns:
Tensor, initialized tensor.
......
......@@ -62,7 +62,7 @@ class ExpandDims(PrimitiveWithInfer):
Examples:
>>> input_tensor = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
>>> expand_dims = ExpandDims()
>>> expand_dims = P.ExpandDims()
>>> output = expand_dims(input_tensor, 0)
"""
......@@ -101,7 +101,7 @@ class DType(PrimitiveWithInfer):
Examples:
>>> input_tensor = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
>>> type = DType()(input_tensor)
>>> type = P.DType()(input_tensor)
"""
@prim_attr_register
......@@ -134,7 +134,7 @@ class SameTypeShape(PrimitiveWithInfer):
Examples:
>>> input_x = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
>>> input_y = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
>>> out = SameTypeShape()(input_x, input_y)
>>> out = P.SameTypeShape()(input_x, input_y)
"""
@prim_attr_register
......@@ -175,7 +175,7 @@ class Cast(PrimitiveWithInfer):
>>> input_np = np.random.randn(2, 3, 4, 5).astype(np.float32)
>>> input_x = Tensor(input_np)
>>> type_dst = mindspore.int32
>>> cast = Cast()
>>> cast = P.Cast()
>>> result = cast(input_x, type_dst)
>>> expect = input_np.astype(type_dst)
"""
......@@ -227,7 +227,7 @@ class IsSubClass(PrimitiveWithInfer):
bool, the check result.
Examples:
>>> result = IsSubClass()(mindspore.int32, mindspore.intc)
>>> result = P.IsSubClass()(mindspore.int32, mindspore.intc)
"""
@prim_attr_register
......@@ -262,7 +262,7 @@ class IsInstance(PrimitiveWithInfer):
Examples:
>>> a = 1
>>> result = IsInstance()(a, mindspore.int32)
>>> result = P.IsInstance()(a, mindspore.int32)
"""
@prim_attr_register
......@@ -303,7 +303,7 @@ class Reshape(PrimitiveWithInfer):
Examples:
>>> input_tensor = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
>>> reshape = Reshape()
>>> reshape = P.Reshape()
>>> output = reshape(input_tensor, (3, 2))
"""
......@@ -366,7 +366,7 @@ class Shape(Primitive):
Examples:
>>> input_tensor = Tensor(np.ones(shape=[3, 2, 1]), mindspore.float32)
>>> shape = Shape()
>>> shape = P.Shape()
>>> output = shape(input_tensor)
"""
......@@ -398,7 +398,7 @@ class Squeeze(PrimitiveWithInfer):
Examples:
>>> input_tensor = Tensor(np.ones(shape=[3, 2, 1]), mindspore.float32)
>>> squeeze = Squeeze(2)
>>> squeeze = P.Squeeze(2)
>>> output = squeeze(input_tensor)
"""
......@@ -450,7 +450,7 @@ class Transpose(PrimitiveWithInfer):
Examples:
>>> input_tensor = Tensor(np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]), mindspore.float32)
>>> perm = (0, 2, 1)
>>> transpose = Transpose()
>>> transpose = P.Transpose()
>>> output = transpose(input_tensor, perm)
"""
......@@ -504,10 +504,10 @@ class GatherV2(PrimitiveWithInfer):
Tensor, the shape of tensor is :math:`(z_1, z_2, ..., z_N)`.
Examples:
>>> params = Tensor(np.array([[1, 2, 7, 42], [3, 4, 54, 22], [2, 2, 55, 3]]), mindspore.float32)
>>> indices = Tensor(np.array([1, 2]), mindspore.int32)
>>> input_params = Tensor(np.array([[1, 2, 7, 42], [3, 4, 54, 22], [2, 2, 55, 3]]), mindspore.float32)
>>> input_indices = Tensor(np.array([1, 2]), mindspore.int32)
>>> axis = 1
>>> out = GatherV2()(params, indices, axis)
>>> out = P.GatherV2()(input_params, input_indices, axis)
"""
@prim_attr_register
......@@ -556,7 +556,7 @@ class Split(PrimitiveWithInfer):
:math:`(y_1, y_2, ..., y_S)`.
Examples:
>>> split = Split(1, 2)
>>> split = P.Split(1, 2)
>>> x = Tensor(np.array([[1, 1, 1, 1], [2, 2, 2, 2]]))
>>> output = split(x)
"""
......@@ -606,7 +606,7 @@ class Rank(PrimitiveWithInfer):
Examples:
>>> input_tensor = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
>>> rank = Rank()
>>> rank = P.Rank()
>>> rank(input_tensor)
"""
......@@ -640,7 +640,7 @@ class TruncatedNormal(PrimitiveWithInfer):
Examples:
>>> input_shape = Tensor(np.array([1, 2, 3]))
>>> truncated_normal = TruncatedNormal()
>>> truncated_normal = P.TruncatedNormal()
>>> output = truncated_normal(input_shape)
"""
......@@ -681,7 +681,7 @@ class Size(PrimitiveWithInfer):
Examples:
>>> input_tensor = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
>>> size = Size()
>>> size = P.Size()
>>> output = size(input_tensor)
"""
......@@ -826,7 +826,7 @@ class TupleToArray(PrimitiveWithInfer):
Tensor, if the input tuple contain `N` numbers, then the output tensor shape is (N,).
Examples:
>>> type = TupleToArray()((1,2,3))
>>> type = P.TupleToArray()((1,2,3))
"""
@prim_attr_register
......@@ -861,7 +861,7 @@ class ScalarToArray(PrimitiveWithInfer):
Tensor. 0-D Tensor and the content is the input.
Examples:
>>> op = ScalarToArray()
>>> op = P.ScalarToArray()
>>> data = 1.0
>>> output = op(data)
"""
......@@ -893,7 +893,7 @@ class ScalarToTensor(PrimitiveWithInfer):
Tensor. 0-D Tensor and the content is the input.
Examples:
>>> op = ScalarToTensor()
>>> op = P.ScalarToTensor()
>>> data = 1
>>> output = op(data, mindspore.float32)
"""
......@@ -934,7 +934,7 @@ class InvertPermutation(PrimitiveWithInfer):
tuple[int]. the lenth is same as input.
Examples:
>>> invert = InvertPermutation()
>>> invert = P.InvertPermutation()
>>> input_data = (3, 4, 0, 2, 1)
>>> output = invert(input_data)
>>> output == (2, 4, 3, 0, 1)
......@@ -982,8 +982,8 @@ class Argmax(PrimitiveWithInfer):
Tensor, indices of the max value of input tensor across the axis.
Examples:
>>> input = Tensor(np.array([2.0, 3.1, 1.2]))
>>> index = Argmax()(input)
>>> input_x = Tensor(np.array([2.0, 3.1, 1.2]))
>>> index = P.Argmax()(input_x)
>>> assert index == Tensor(1, mindspore.int64)
"""
......@@ -1030,8 +1030,8 @@ class Argmin(PrimitiveWithInfer):
Tensor, indices of the min value of input tensor across the axis.
Examples:
>>> input = Tensor(np.array([2.0, 3.1, 1.2]))
>>> index = Argmin()(input)
>>> input_x = Tensor(np.array([2.0, 3.1, 1.2]))
>>> index = P.Argmin()(input_x)
>>> assert index == Tensor(2, mindspore.int64)
"""
......@@ -1082,8 +1082,8 @@ class ArgMaxWithValue(PrimitiveWithInfer):
:math:`(x_1, x_2, ..., x_{axis-1}, x_{axis+1}, ..., x_N)`.
Examples:
>>> input = Tensor(np.random.rand(5))
>>> index, output = ArgMaxWithValue()(input)
>>> input_x = Tensor(np.random.rand(5))
>>> index, output = P.ArgMaxWithValue()(input_x)
"""
@prim_attr_register
......@@ -1129,8 +1129,8 @@ class ArgMinWithValue(PrimitiveWithInfer):
:math:`(x_1, x_2, ..., x_{axis-1}, x_{axis+1}, ..., x_N)`.
Examples:
>>> input = Tensor(np.random.rand(5))
>>> index, output = ArgMinWithValue()(input)
>>> input_x = Tensor(np.random.rand(5))
>>> index, output = P.ArgMinWithValue()(input_x)
"""
@prim_attr_register
def __init__(self, axis=0, keep_dims=False):
......@@ -1325,7 +1325,7 @@ class Concat(PrimitiveWithInfer):
Examples:
>>> data1 = Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32))
>>> data2 = Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32))
>>> op = Concat()
>>> op = P.Concat()
>>> output = op((data1, data2))
"""
......@@ -1607,7 +1607,7 @@ class Select(PrimitiveWithInfer):
Tensor, has the same shape as input_y. The shape is :math:`(x_1, x_2, ..., x_N, ..., x_R)`.
Examples:
>>> select = Select()
>>> select = P.Select()
>>> input_x = Tensor([True, False])
>>> input_y = Tensor([2,3], mindspore.float32)
>>> input_z = Tensor([1,2], mindspore.float32)
......@@ -1681,7 +1681,7 @@ class StridedSlice(PrimitiveWithInfer):
Examples
>>> input_x = Tensor([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]],
>>> [[5, 5, 5], [6, 6, 6]]], mindspore.float32)
>>> slice = StridedSlice()
>>> slice = P.StridedSlice()
>>> output = slice(input_x, (1, 0, 0), (2, 1, 3), (1, 1, 1))
>>> output.shape()
(1, 1, 3)
......@@ -1913,9 +1913,9 @@ class ScatterNd(PrimitiveWithInfer):
Tensor, the new tensor, has the same type as `update` and the same shape as `shape`.
Examples:
>>> op = ScatterNd()
>>> update = Tensor(np.array([3.2, 1.1]), mindspore.float32)
>>> op = P.ScatterNd()
>>> indices = Tensor(np.array([[0, 1], [1, 1]]), mindspore.int32)
>>> update = Tensor(np.array([3.2, 1.1]), mindspore.float32)
>>> shape = (3, 3)
>>> output = op(indices, update, shape)
"""
......@@ -1964,7 +1964,7 @@ class ResizeNearestNeighbor(PrimitiveWithInfer):
Examples:
>>> input_tensor = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
>>> resize = ResizeNearestNeighbor((2, 2))
>>> resize = P.ResizeNearestNeighbor((2, 2))
>>> output = resize(input_tensor)
"""
......@@ -1997,7 +1997,7 @@ class GatherNd(PrimitiveWithInfer):
Examples:
>>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
>>> indices = Tensor(np.array([[0, 0], [1, 1]]), mindspore.int32)
>>> op = GatherNd()
>>> op = P.GatherNd()
>>> output = op(input_x, indices)
"""
......@@ -2039,7 +2039,7 @@ class ScatterNdUpdate(PrimitiveWithInfer):
>>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
>>> indices = Tensor(np.array([[0, 0], [1, 1]]), mindspore.int32)
>>> update = Tensor(np.array([1.0, 2.2]), mindspore.float32)
>>> op = ScatterNdUpdate()
>>> op = P.ScatterNdUpdate()
>>> output = op(input_x, indices, update)
"""
......@@ -2090,7 +2090,7 @@ class SpaceToDepth(PrimitiveWithInfer):
Examples:
>>> x = Tensor(np.random.rand(1,3,2,2), mindspore.float32)
>>> block_size = 2
>>> op = SpaceToDepth(block_size)
>>> op = P.SpaceToDepth(block_size)
>>> output = op(x)
>>> output.asnumpy().shape == (1,12,1,1)
"""
......@@ -2148,7 +2148,7 @@ class DepthToSpace(PrimitiveWithInfer):
Examples:
>>> x = Tensor(np.random.rand(1,12,1,1), mindspore.float32)
>>> block_size = 2
>>> op = DepthToSpace(block_size)
>>> op = P.DepthToSpace(block_size)
>>> output = op(x)
>>> output.asnumpy().shape == (1,3,2,2)
"""
......@@ -2212,8 +2212,8 @@ class SpaceToBatch(PrimitiveWithInfer):
>>> block_size = 2
>>> paddings = [[0, 0], [0, 0]]
>>> space_to_batch = P.SpaceToBatch(block_size, paddings)
>>> x = Tensor(np.array([[[[1, 2], [3, 4]]]]), mindspore.float32)
>>> space_to_batch(x)
>>> input_x = Tensor(np.array([[[[1, 2], [3, 4]]]]), mindspore.float32)
>>> space_to_batch(input_x)
[[[[1.]]], [[[2.]]], [[[3.]]], [[[4.]]]]
"""
......@@ -2280,8 +2280,8 @@ class BatchToSpace(PrimitiveWithInfer):
>>> block_size = 2
>>> crops = [[0, 0], [0, 0]]
>>> op = P.BatchToSpace(block_size, crops)
>>> x = Tensor(np.array([[[[1]]], [[[2]]], [[[3]]], [[[4]]]]), mindspore.float32)
>>> output = op(x)
>>> input_x = Tensor(np.array([[[[1]]], [[[2]]], [[[3]]], [[[4]]]]), mindspore.float32)
>>> output = op(input_x)
[[[[1., 2.], [3., 4.]]]]
"""
......
......@@ -112,9 +112,9 @@ class TensorAdd(_MathBinaryOp):
Examples:
>>> add = P.TensorAdd()
>>> x = Tensor(np.array([1,2,3]).astype(np.float32))
>>> y = Tensor(np.array([4,5,6]).astype(np.float32))
>>> add(x, y)
>>> input_x = Tensor(np.array([1,2,3]).astype(np.float32))
>>> input_y = Tensor(np.array([4,5,6]).astype(np.float32))
>>> add(input_x, input_y)
[5,7,9]
"""
......@@ -124,23 +124,24 @@ class AssignAdd(PrimitiveWithInfer):
Updates a `Parameter` by adding a value to it.
Inputs:
- **input_x** (Parameter) - The `Parameter`.
- **input_y** (Union[scalar, Tensor]) - Has the same shape as `input_x`.
- **variable** (Parameter) - The `Parameter`.
- **value** (Union[numbers.Number, Tensor]) - The value to be added to the `variable`.
It should have the same shape as `variable` if it is a Tensor.
Examples:
>>> class Net(Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.AssignAdd = P.AssignAdd()
>>> self.inputdata = Parameter(initializer(1, [1], mindspore.int64), name="global_step")
>>> self.variable = Parameter(initializer(1, [1], mindspore.int64), name="global_step")
>>>
>>> def construct(self, x):
>>> self.AssignAdd(self.inputdata, x)
>>> return self.inputdata
>>> self.AssignAdd(self.variable, x)
>>> return self.variable
>>>
>>> net = Net()
>>> x = Tensor(np.ones([1]).astype(np.int64)*100)
>>> net(x)
>>> value = Tensor(np.ones([1]).astype(np.int64)*100)
>>> net(value)
"""
__mindspore_signature__ = (
('variable', sig_rw.RW_WRITE, sig_kind.KIND_POSITIONAL_KEYWORD),
......@@ -166,22 +167,24 @@ class AssignSub(PrimitiveWithInfer):
Updates a `Parameter` by subtracting a value from it.
Inputs:
- **input_x** (Parameter) - The `Parameter`.
- **input_y** (Union[scalar, Tensor]) - Has the same shape as `input_x`.
- **variable** (Parameter) - The `Parameter`.
- **value** (Union[numbers.Number, Tensor]) - The value to be subtracted from the `variable`.
It should have the same shape as `variable` if it is a Tensor.
Examples:
>>> class Net(Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.AssignSub = P.AssignSub()
>>> self.inputdata = Parameter(initializer(1, [1], mindspore.int64), name="global_step")
>>> self.variable = Parameter(initializer(1, [1], mindspore.int64), name="global_step")
>>>
>>> def construct(self, x):
>>> self.AssignSub(self.inputdata, x)
>>> return self.inputdata
>>> self.AssignSub(self.variable, x)
>>> return self.variable
>>>
>>> net = Net()
>>> x = Tensor(np.ones([1]).astype(np.int64)*100)
>>> net(x)
>>> value = Tensor(np.ones([1]).astype(np.int64)*100)
>>> net(value)
"""
__mindspore_signature__ = (
......@@ -263,9 +266,9 @@ class ReduceMean(_Reduce):
the shape of output is :math:`(x_1, x_4, ..., x_R)`.
Examples:
>>> data = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
>>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
>>> op = P.ReduceMean(keep_dims=True)
>>> output = op(data, 1)
>>> output = op(input_x, 1)
"""
......@@ -295,9 +298,9 @@ class ReduceSum(_Reduce):
the shape of output is :math:`(x_1, x_4, ..., x_R)`.
Examples:
>>> data = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
>>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
>>> op = P.ReduceSum(keep_dims=True)
>>> output = op(data, 1)
>>> output = op(input_x, 1)
"""
......@@ -328,9 +331,9 @@ class ReduceAll(_Reduce):
the shape of output is :math:`(x_1, x_4, ..., x_R)`.
Examples:
>>> data = Tensor(np.array([[True, False], [True, True]]))
>>> input_x = Tensor(np.array([[True, False], [True, True]]))
>>> op = P.ReduceAll(keep_dims=True)
>>> output = op(data, 1)
>>> output = op(input_x, 1)
"""
def __infer__(self, input_x, axis):
......@@ -364,9 +367,9 @@ class ReduceMax(_Reduce):
the shape of output is :math:`(x_1, x_4, ..., x_R)`.
Examples:
>>> data = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
>>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
>>> op = P.ReduceMax(keep_dims=True)
>>> output = op(data, 1)
>>> output = op(input_x, 1)
"""
......@@ -397,9 +400,9 @@ class ReduceMin(_Reduce):
the shape of output is :math:`(x_1, x_4, ..., x_R)`.
Examples:
>>> data = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
>>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
>>> op = P.ReduceMin(keep_dims=True)
>>> output = op(data, 1)
>>> output = op(input_x, 1)
"""
......@@ -429,9 +432,9 @@ class ReduceProd(_Reduce):
the shape of output is :math:`(x_1, x_4, ..., x_R)`.
Examples:
>>> data = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
>>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
>>> op = P.ReduceProd(keep_dims=True)
>>> output = op(data, 1)
>>> output = op(input_x, 1)
"""
......@@ -451,15 +454,15 @@ class CumProd(PrimitiveWithInfer):
Tensor, has the same shape and dtype as the 'input_x'.
Examples:
>>> data = Tensor(np.array([a, b, c]).astype(np.float32))
>>> input_x = Tensor(np.array([a, b, c]).astype(np.float32))
>>> op0 = P.CumProd()
>>> output = op0(data, 0) # output=[a, a * b, a * b * c]
>>> output = op0(input_x, 0) # output=[a, a * b, a * b * c]
>>> op1 = P.CumProd(exclusive=True)
>>> output = op1(data, 0) # output=[1, a, a * b]
>>> output = op1(input_x, 0) # output=[1, a, a * b]
>>> op2 = P.CumProd(reverse=True)
>>> output = op2(data, 0) # output=[a * b * c, b * c, c]
>>> output = op2(input_x, 0) # output=[a * b * c, b * c, c]
>>> op3 = P.CumProd(exclusive=True, reverse=True)
>>> output = op3(data, 0) # output=[b * c, c, 1]
>>> output = op3(input_x, 0) # output=[b * c, c, 1]
"""
@prim_attr_register
def __init__(self, exclusive=False, reverse=False):
......@@ -1190,7 +1193,7 @@ class FloorMod(_MathBinaryOp):
Examples:
>>> input_x = Tensor(np.array([2, 4, -1]), mindspore.int32)
>>> input_y = Tensor(np.array([3, 3, 3]), mindspore.int32)
>>> floor_mod = FloorMod()
>>> floor_mod = P.FloorMod()
>>> floor_mod(input_x, input_y)
[2, 1, 2]
"""
......@@ -1207,9 +1210,9 @@ class Acosh(PrimitiveWithInfer):
Tensor, has the same shape as `input_x`.
Examples:
>>> acosh = Acosh()
>>> X = Tensor(np.array([1.0, 1.5, 3.0, 100.0]), mindspore.float32)
>>> output = acosh(X)
>>> acosh = P.Acosh()
>>> input_x = Tensor(np.array([1.0, 1.5, 3.0, 100.0]), mindspore.float32)
>>> output = acosh(input_x)
"""
@prim_attr_register
......@@ -1286,7 +1289,7 @@ class EqualCount(PrimitiveWithInfer):
- **input_y** (Tensor) - The second input tensor.
Outputs:
Tensor, has the same shape as the `input_x`.
Tensor, with the type as `mindspore.int32` and size as (1,).
Examples:
>>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32)
......@@ -1324,7 +1327,7 @@ class NotEqual(_LogicBinaryOp):
Inputs:
- **input_x** (Union[Tensor, Number, bool]) - The first input is a tensor whose data type is number or bool, or
a number or a bool object.
- **input_y** (Union[Tensor, Number, bool]) - The second input tensor whose data type is same as 'input_x' or
- **input_y** (Union[Tensor, Number, bool]) - The second input tensor whose data type is same as `input_x` or
a number or a bool object.
Outputs:
......@@ -1359,11 +1362,11 @@ class Greater(_LogicBinaryOp):
Inputs:
- **input_x** (Union[Tensor, Number]) - The first input is a tensor whose data type is number or a number.
- **input_y** (Union[Tensor, Number]) - The second input is a tensor whose data type is same as 'input_x' or
- **input_y** (Union[Tensor, Number]) - The second input is a tensor whose data type is same as `input_x` or
a number.
Outputs:
Tensor, the shape is same as the shape after broadcasting, and the data type is same as 'input_x'.
Tensor, the shape is same as the shape after broadcasting, and the data type is bool.
Examples:
>>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32)
......@@ -1386,11 +1389,11 @@ class GreaterEqual(_LogicBinaryOp):
Inputs:
- **input_x** (Union[Tensor, Number]) - The first input is a tensor whose data type is number or a number.
- **input_y** (Union[Tensor, Number]) - The second input is a tensor whose data type is same as 'input_x' or
- **input_y** (Union[Tensor, Number]) - The second input is a tensor whose data type is same as `input_x` or
a number.
Outputs:
Tensor, the shape is same as the shape after broadcasting, and the data type is bool'.
Tensor, the shape is same as the shape after broadcasting, and the data type is bool.
Examples:
>>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32)
......@@ -1413,7 +1416,7 @@ class Less(_LogicBinaryOp):
Inputs:
- **input_x** (Union[Tensor, Number]) - The first input is a tensor whose data type is number or a number.
- **input_y** (Union[Tensor, Number]) - The second input is a tensor whose data type is same as 'input_x' or
- **input_y** (Union[Tensor, Number]) - The second input is a tensor whose data type is same as `input_x` or
a number.
Outputs:
......@@ -1440,7 +1443,7 @@ class LessEqual(_LogicBinaryOp):
Inputs:
- **input_x** (Union[Tensor, Number]) - The first input is a tensor whose data type is number or a number.
- **input_y** (Union[Tensor, Number]) - The second input is a tensor whose data type is same as 'input_x' or
- **input_y** (Union[Tensor, Number]) - The second input is a tensor whose data type is same as `input_x` or
a number.
Outputs:
......@@ -1752,8 +1755,8 @@ class Cos(PrimitiveWithInfer):
Examples:
>>> cos = P.Cos()
>>> X = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
>>> output = cos(X)
>>> input_x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
>>> output = cos(input_x)
"""
@prim_attr_register
......@@ -1780,8 +1783,8 @@ class ACos(PrimitiveWithInfer):
Examples:
>>> acos = P.ACos()
>>> X = Tensor(np.array([0.74, 0.04, 0.30, 0.56]), mindspore.float32)
>>> output = acos(X)
>>> input_x = Tensor(np.array([0.74, 0.04, 0.30, 0.56]), mindspore.float32)
>>> output = acos(input_x)
"""
@prim_attr_register
......@@ -1993,7 +1996,7 @@ class Atan2(_MathBinaryOp):
- **input_y** (Tensor) - The input tensor.
Outputs:
Tensor, the shape is same as the shape after broadcasting, and the data type is same as 'input_x'.
Tensor, the shape is same as the shape after broadcasting, and the data type is same as `input_x`.
Examples:
>>> input_x = Tensor(np.array([[0, 1]]), mindspore.float32)
......
......@@ -41,7 +41,7 @@ class Flatten(PrimitiveWithInfer):
Examples:
>>> input_tensor = Tensor(np.ones(shape=[1, 2, 3, 4]), mindspore.float32)
>>> flatten = Flatten()
>>> flatten = P.Flatten()
>>> output = flatten(input_tensor)
>>> assert output.shape() == (1, 24)
"""
......@@ -155,7 +155,7 @@ class ReLU(PrimitiveWithInfer):
Examples:
>>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]], np.float32))
>>> relu = ReLU()
>>> relu = P.ReLU()
>>> result = relu(input_x)
[[0, 4.0, 0.0], [2.0, 0.0, 9.0]]
"""
......@@ -188,7 +188,7 @@ class ReLU6(PrimitiveWithInfer):
Examples:
>>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]], np.float32))
>>> relu6 = ReLU6()
>>> relu6 = P.ReLU6()
>>> result = relu6(input_x)
"""
......@@ -222,10 +222,10 @@ class Elu(PrimitiveWithInfer):
Examples:
>>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]], np.float32))
>>> elu = Elu()
>>> elu = P.Elu()
>>> result = elu(input_x)
Tensor([[-0.632 4.0 -0.999]
[2.0 -0.993 9.0 ]], shape=(2, 3), dtype=ms.float32)
[2.0 -0.993 9.0 ]], shape=(2, 3), dtype=mindspore.float32)
"""
@prim_attr_register
......@@ -1082,7 +1082,7 @@ class TopK(PrimitiveWithInfer):
Examples:
>>> topk = P.TopK(sorted=True)
>>> input_x = Tensor([1, 2, 3, 4, 5], mindspore.float16))
>>> input_x = Tensor([1, 2, 3, 4, 5], mindspore.float16)
>>> k = 3
>>> values, indices = topk(input_x, k)
>>> assert values == Tensor(np.array([5, 4, 3]))
......@@ -1223,8 +1223,8 @@ class ApplyMomentum(PrimitiveWithInfer):
Examples:
>>> net = ResNet50()
>>> loss = SoftmaxCrossEntropyWithLogits()
>>> opt = ApplyMomentum(Tensor(np.array([0.001])), Tensor(np.array([0.9])),
>>> loss = nn.SoftmaxCrossEntropyWithLogits()
>>> opt = P.ApplyMomentum(Tensor(np.array([0.001])), Tensor(np.array([0.9])),
filter(lambda x: x.requires_grad, net.get_parameters()))
>>> model = Model(net, loss, opt)
"""
......@@ -1351,6 +1351,7 @@ class SGD(PrimitiveWithInfer):
class ApplyRMSProp(PrimitiveWithInfer):
"""
Optimizer that implements the Root Mean Square prop(RMSProp) algorithm.
Please refer to the usage in source code of `nn.RMSProp`.
Note:
Update `var` according to the RMSProp algorithm.
......@@ -1386,12 +1387,6 @@ class ApplyRMSProp(PrimitiveWithInfer):
Outputs:
Tensor, parameters to be update.
Examples:
>>> net = Net()
>>> loss = nn.SoftmaxCrossEntropyWithLogits()
>>> opt = RMSProp(params=net.trainable_params(), learning_rate=learning_rate)
>>> model = Model(net, loss, opt)
"""
@prim_attr_register
......@@ -1424,6 +1419,7 @@ class ApplyRMSProp(PrimitiveWithInfer):
class ApplyCenteredRMSProp(PrimitiveWithInfer):
"""
Optimizer that implements the centered RMSProp algorithm.
Please refer to the usage in source code of `nn.RMSProp`.
Note:
Update `var` according to the centered RMSProp algorithm.
......@@ -1464,12 +1460,6 @@ class ApplyCenteredRMSProp(PrimitiveWithInfer):
Outputs:
Tensor, parameters to be update.
Examples:
>>> net = Net()
>>> loss = nn.SoftmaxCrossEntropyWithLogits()
>>> opt = RMSProp(params=net.trainable_params(), learning_rate=learning_rate, centered=True)
>>> model = Model(net, loss, opt)
"""
@prim_attr_register
......@@ -1596,7 +1586,7 @@ class DropoutGenMask(Primitive):
Tensor, the value of generated mask for input shape.
Examples:
>>> dropout_gen_mask = DropoutGenMask()
>>> dropout_gen_mask = P.DropoutGenMask()
>>> shape = (20, 16, 50)
>>> keep_prob = Tensor(0.5, mindspore.float32)
>>> mask = dropout_gen_mask(shape, keep_prob)
......@@ -1631,8 +1621,8 @@ class DropoutDoMask(PrimitiveWithInfer):
>>> x = Tensor(np.ones([20, 16, 50]), mindspore.float32)
>>> shape = (20, 16, 50)
>>> keep_prob = Tensor(0.5, mindspore.float32)
>>> dropout_gen_mask = DropoutGenMask()
>>> dropout_do_mask = DropoutDoMask()
>>> dropout_gen_mask = P.DropoutGenMask()
>>> dropout_do_mask = P.DropoutDoMask()
>>> mask = dropout_gen_mask(shape, keep_prob)
>>> output = dropout_do_mask(x, mask, keep_prob)
>>> assert output.shape() == (20, 16, 50)
......@@ -1737,7 +1727,7 @@ class OneHot(PrimitiveWithInfer):
Examples:
>>> indices = Tensor(np.array([0, 1, 2]), mindspore.int32)
>>> depth, on_value, off_value = 3, Tensor(1.0, mindspore.float32), Tensor(0.0, mindspore.float32)
>>> onehot = OneHot()
>>> onehot = P.OneHot()
>>> result = onehot(indices, depth, on_value, off_value)
[[1, 0, 0], [0, 1, 0], [0, 0, 1]]
"""
......@@ -1793,7 +1783,7 @@ class Gelu(PrimitiveWithInfer):
Examples:
>>> tensor = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
>>> gelu = Gelu()
>>> gelu = P.Gelu()
>>> result = gelu(tensor)
"""
......@@ -1834,7 +1824,7 @@ class GetNext(PrimitiveWithInfer):
and the type is described is `types`.
Examples:
>>> get_next = GetNext([mindspore.float32, mindspore.int32], [[32, 1, 28, 28], [10]], 'shared_name')
>>> get_next = P.GetNext([mindspore.float32, mindspore.int32], [[32, 1, 28, 28], [10]], 'shared_name')
>>> feature, label = get_next()
"""
......@@ -2015,7 +2005,7 @@ class Pad(PrimitiveWithInfer):
Examples:
>>> input_tensor = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
>>> pad_op = Pad(((1, 2), (2, 1)))
>>> pad_op = P.Pad(((1, 2), (2, 1)))
>>> output_tensor = pad_op(input_tensor)
>>> assert output_tensor == Tensor(np.array([[ 0. , 0. , 0. , 0. , 0. , 0. ],
>>> [ 0. , 0. , -0.1, 0.3, 3.6, 0. ],
......
......@@ -406,7 +406,7 @@ def export(net, *inputs, file_name, file_format='GEIR'):
file_format (str): MindSpore currently supports 'GEIR', 'ONNX' and 'LITE' format for exported model.
- GEIR: Graph Engine Intermidiate Representation. An intermidiate representation format of
Ascend model.
Ascend model.
- ONNX: Open Neural Network eXchange. An open format built to represent machine learning models.
- LITE: Huawei model format for mobile.
"""
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册