提交 7fbaf2f6 编写于 作者: M mindspore-ci-bot 提交者: Gitee

!546 GPU add testcase for maximum logical

Merge pull request !546 from VectorSL/add_test_new
...@@ -30,3 +30,8 @@ from .hsigmoid import HSigmoid, gpu_schedule_HSigmoid ...@@ -30,3 +30,8 @@ from .hsigmoid import HSigmoid, gpu_schedule_HSigmoid
from .hsigmoid_grad import HSigmoidGrad, gpu_schedule_HSigmoidGrad from .hsigmoid_grad import HSigmoidGrad, gpu_schedule_HSigmoidGrad
from .hswish import HSwish, gpu_schedule_HSwish from .hswish import HSwish, gpu_schedule_HSwish
from .hswish_grad import HSwishGrad, gpu_schedule_HSwishGrad from .hswish_grad import HSwishGrad, gpu_schedule_HSwishGrad
from .logical_or import LogicalOr, gpu_schedule_LogicalOr
from .logical_not import LogicalNot, gpu_schedule_LogicalNot
from .logical_and import LogicalAnd, gpu_schedule_LogicalAnd
from .sub import Sub, gpu_schedule_Sub
from .less_equal import LessEqual, gpu_schedule_LessEqual
...@@ -209,6 +209,7 @@ class TrainOneStepWithLossScaleCell(Cell): ...@@ -209,6 +209,7 @@ class TrainOneStepWithLossScaleCell(Cell):
self.gpu_target = True self.gpu_target = True
self.float_status = P.FloatStatus() self.float_status = P.FloatStatus()
self.addn = P.AddN() self.addn = P.AddN()
self.reshape = P.Reshape()
else: else:
self.gpu_target = False self.gpu_target = False
self.alloc_status = NPUAllocFloatStatus() self.alloc_status = NPUAllocFloatStatus()
...@@ -260,6 +261,8 @@ class TrainOneStepWithLossScaleCell(Cell): ...@@ -260,6 +261,8 @@ class TrainOneStepWithLossScaleCell(Cell):
else: else:
flag_sum = self.hyper_map(F.partial(_grad_overflow), grads) flag_sum = self.hyper_map(F.partial(_grad_overflow), grads)
flag_sum = self.addn(flag_sum) flag_sum = self.addn(flag_sum)
# convert flag_sum to scalar
flag_sum = self.reshape(flag_sum, (()))
if self.is_distributed: if self.is_distributed:
# sum overflow flag over devices # sum overflow flag over devices
flag_reduce = self.allreduce(flag_sum) flag_reduce = self.allreduce(flag_sum)
......
...@@ -27,3 +27,8 @@ from .hsigmoid import _hsigmoid_akg ...@@ -27,3 +27,8 @@ from .hsigmoid import _hsigmoid_akg
from .hsigmoid_grad import _hsigmoid_grad_akg from .hsigmoid_grad import _hsigmoid_grad_akg
from .hswish import _hswish_akg from .hswish import _hswish_akg
from .hswish_grad import _hswish_grad_akg from .hswish_grad import _hswish_grad_akg
from .sub import _sub_akg
from .logical_and import _logical_and_akg
from .logical_not import _logical_not_akg
from .logical_or import _logical_or_akg
from .lessequal import _lessequal_akg
...@@ -1495,6 +1495,7 @@ class LogicalNot(PrimitiveWithInfer): ...@@ -1495,6 +1495,7 @@ class LogicalNot(PrimitiveWithInfer):
@prim_attr_register @prim_attr_register
def __init__(self): def __init__(self):
"""init LogicalNot""" """init LogicalNot"""
self.init_prim_io_names(inputs=['x'], outputs=['output'])
def infer_shape(self, x_shape): def infer_shape(self, x_shape):
return x_shape return x_shape
......
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import pytest
from mindspore.ops import operations as P
from mindspore.nn import Cell
from mindspore.common.tensor import Tensor
import mindspore.context as context
import numpy as np
class Net(Cell):
def __init__(self):
super(Net, self).__init__()
self.lessequal = P.LessEqual()
def construct(self, x, y):
return self.lessequal(x, y)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_lessequal():
x = Tensor(np.array([[1, 2, 3]]).astype(np.float32))
y = Tensor(np.array([[2]]).astype(np.float32))
expect = [[True, True, False]]
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
lessequal = Net()
output = lessequal(x, y)
assert np.all(output.asnumpy() == expect)
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
lessequal = Net()
output = lessequal(x, y)
assert np.all(output.asnumpy() == expect)
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import pytest
from mindspore.ops import operations as P
from mindspore.nn import Cell
from mindspore.common.tensor import Tensor
import mindspore.context as context
import numpy as np
class NetAnd(Cell):
def __init__(self):
super(NetAnd, self).__init__()
self.logicaland = P.LogicalAnd()
def construct(self, x, y):
return self.logicaland(x, y)
class NetOr(Cell):
def __init__(self):
super(NetOr, self).__init__()
self.logicalor = P.LogicalOr()
def construct(self, x, y):
return self.logicalor(x, y)
class NetNot(Cell):
def __init__(self):
super(NetNot, self).__init__()
self.logicalnot = P.LogicalNot()
def construct(self, x):
return self.logicalnot(x)
x = np.array([True, False, False]).astype(np.bool)
y = np.array([False]).astype(np.bool)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_logicaland():
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
logicaland = NetAnd()
output = logicaland(Tensor(x), Tensor(y))
assert np.all(output.asnumpy() == np.logical_and(x, y))
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
logicaland = NetAnd()
output = logicaland(Tensor(x), Tensor(y))
assert np.all(output.asnumpy() == np.logical_and(x, y))
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_logicalor():
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
logicalor = NetOr()
output = logicalor(Tensor(x), Tensor(y))
assert np.all(output.asnumpy() == np.logical_or(x, y))
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
logicalor = NetOr()
output = logicalor(Tensor(x), Tensor(y))
assert np.all(output.asnumpy() == np.logical_or(x, y))
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_logicalnot():
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
logicalnot = NetNot()
output = logicalnot(Tensor(x))
assert np.all(output.asnumpy() == np.logical_not(x))
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
logicalnot = NetNot()
output = logicalnot(Tensor(x))
assert np.all(output.asnumpy() == np.logical_not(x))
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import pytest
from mindspore.ops import operations as P
from mindspore.nn import Cell
from mindspore.common.tensor import Tensor
import mindspore.context as context
import numpy as np
class Net(Cell):
def __init__(self):
super(Net, self).__init__()
self.max = P.Maximum()
def construct(self, x, y):
return self.max(x, y)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_max():
x = Tensor(np.array([[1, 2, 3]]).astype(np.float32))
y = Tensor(np.array([[2]]).astype(np.float32))
expect = [[2, 2, 3]]
error = np.ones(shape=[1, 3]) * 1.0e-5
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
max = Net()
output = max(x, y)
diff = output.asnumpy() - expect
assert np.all(diff < error)
assert np.all(-diff < error)
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
max = Net()
output = max(x, y)
diff = output.asnumpy() - expect
assert np.all(diff < error)
assert np.all(-diff < error)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册