提交 a1bdecb9 编写于 作者: M mindspore-ci-bot 提交者: Gitee

!3856 fix matrixDiag msg , FloorDiv grad, SoftmaxCrossEntropyLogit mag

Merge pull request !3856 from fangzehua/fix_matrix_msg
......@@ -591,7 +591,7 @@ class MatrixDiagPart(Cell):
Tensor, same type as input `x`. The shape should be x.shape[:-2] + [min(x.shape[-2:])].
Examples:
>>> x = Tensor([[[-1, 0], [0, 1]], [-1, 0], [0, 1]], [[-1, 0], [0, 1]]], mindspore.float32)
>>> x = Tensor([[[-1, 0], [0, 1]], [[-1, 0], [0, 1]], [[-1, 0], [0, 1]]], mindspore.float32)
>>> matrix_diag_part = nn.MatrixDiagPart()
>>> result = matrix_diag_part(x)
[[-1., 1.], [-1., 1.], [-1., 1.]]
......@@ -622,11 +622,11 @@ class MatrixSetDiag(Cell):
Tensor, same type as input `x`. The shape same as `x`.
Examples:
>>> x = Tensor([[[-1, 0], [0, 1]], [-1, 0], [0, 1]], [[-1, 0], [0, 1]]], mindspore.float32)
>>> x = Tensor([[[-1, 0], [0, 1]], [[-1, 0], [0, 1]], [[-1, 0], [0, 1]]], mindspore.float32)
>>> diagonal = Tensor([[-1., 2.], [-1., 1.], [-1., 1.]], mindspore.float32)
>>> matrix_set_diag = nn.MatrixSetDiag()
>>> result = matrix_set_diag(x, diagonal)
[[[-1, 0], [0, 2]], [-1, 0], [0, 1]], [[-1, 0], [0, 1]]]
[[[-1, 0], [0, 2]], [[-1, 0], [0, 1]], [[-1, 0], [0, 1]]]
"""
def __init__(self):
super(MatrixSetDiag, self).__init__()
......
......@@ -218,7 +218,8 @@ class SoftmaxCrossEntropyWithLogits(_Loss):
sparse (bool): Specifies whether labels use sparse format or not. Default: False.
reduction (Union[str, None]): Type of reduction to apply to loss. Support 'sum' or 'mean' If None,
do not reduction. Default: None.
smooth_factor (float): Label smoothing factor. It is a optional input. Default: 0.
smooth_factor (float): Label smoothing factor. It is a optional input which should be in range [0, 1].
Default: 0.
num_classes (int): The number of classes in the task. It is a optional input Default: 2.
Inputs:
......
......@@ -284,14 +284,9 @@ def get_bprop_ceil(self):
@bprop_getters.register(P.FloorDiv)
def get_bprop_floordiv(self):
"""Grad definition for `FloorDiv` operation."""
div_op = P.FloorDiv()
neg = P.Neg()
mul_op = P.Mul()
def bprop(x, y, out, dout):
bc_x = div_op(dout, y)
bc_y = neg(mul_op(bc_x, out))
return binop_grad_common(x, y, bc_x, bc_y)
return zeros_like(x), zeros_like(y)
return bprop
......@@ -311,14 +306,9 @@ def get_bprop_floormod(self):
@bprop_getters.register(P.TruncateDiv)
def get_bprop_truncate_div(self):
"""Grad definition for `TruncateDiv` operation."""
div_op = P.TruncateDiv()
neg = P.Neg()
mul_op = P.Mul()
def bprop(x, y, out, dout):
bc_x = div_op(dout, y)
bc_y = neg(mul_op(bc_x, out))
return binop_grad_common(x, y, bc_x, bc_y)
return zeros_like(x), zeros_like(y)
return bprop
......
......@@ -14,7 +14,6 @@
# ============================================================================
"""Define the grad rules of neural network related operations."""
import math
import numpy as np
from mindspore.ops import _selected_grad_ops as SG
from mindspore.ops.primitive import constexpr
......@@ -632,11 +631,8 @@ def get_bprop_onehot(self):
@constexpr
def _range_op(start, limit, delta, dtype):
"""helper function for Grad TopK"""
range_op = inner.Range(float(start), float(limit), float(delta))
length_input = math.ceil((limit - start) / delta)
input_tensor = Tensor(list(range(length_input)), dtype)
range_out = range_op(input_tensor)
return range_out
output_tensor = Tensor(list(range(start, limit, delta)), dtype)
return output_tensor
@constexpr
def _get_1d_shape(in_shape):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册