未验证 提交 6e18a5f9 编写于 作者: L LYZFU 提交者: GitHub

Dev logical_xor module (#5694)

* Dev logical_xor module

* Add single_client api, modify logical_xor implementation, add more test case,

* Update binary_func.h

* Update tensor.rst

* Update logical_xor.py

* Update binary_func.h

* make of_format

* add pytorch auto test

* remove float test

* add test case

* test modified

* delete help.py

* modify name

* make of_format

* modify single-client example result

* remove module not required,call function directly. restore changes under single_client folder

* remove unnecessary nn module package, add shape test of logical_xor

* auto format by CI
Co-authored-by: NZhenhua <1209435+hengzi@users.noreply.github.com>
Co-authored-by: NYao Chi <later@usopp.net>
Co-authored-by: Noneflow-ci-bot <ci-bot@oneflow.org>
Co-authored-by: Noneflow-ci-bot <69100618+oneflow-ci-bot@users.noreply.github.com>
上级 24e4ea2b
......@@ -57,6 +57,7 @@ oneflow
in_top_k,
logical_and,
logical_or,
logical_xor,
load,
log,
log1p,
......
......@@ -70,9 +70,10 @@ OneFlow Tensor Class
kaiming_uniform_,
le,
log,
log1p,
log1p,
logical_and,
logical_or,
logical_xor,
long,
lt,
masked_fill,
......
......@@ -99,6 +99,10 @@
- name: "broadcast_logical_or"
signature: "Tensor BroadcastLogicalOr(Tensor x, Tensor y)"
bind_python: True
- name: "broadcast_logical_xor"
signature: "Tensor BroadcastLogicalXor(Tensor x, Tensor y)"
bind_python: True
- name: "broadcast_less"
signature: "Tensor BroadcastLess(Tensor x, Tensor y)"
......
......@@ -147,6 +147,14 @@ class BroadcastLogicalOrFunctor : public BinaryFunctor {
}
};
class BroadcastLogicalXorFunctor : public BinaryFunctor {
public:
BroadcastLogicalXorFunctor() {
op_ = CHECK_JUST(
one::OpBuilder("broadcast_logical_xor").Input("x").Input("y").Output("z").Build());
}
};
class BroadcastLessFunctor : public BinaryFunctor {
public:
BroadcastLessFunctor() {
......@@ -218,6 +226,7 @@ ONEFLOW_FUNCTION_LIBRARY(m) {
m.add_functor<impl::BroadcastGreaterEqualFunctor>("BroadcastGreaterEqual");
m.add_functor<impl::BroadcastLogicalAndFunctor>("BroadcastLogicalAnd");
m.add_functor<impl::BroadcastLogicalOrFunctor>("BroadcastLogicalOr");
m.add_functor<impl::BroadcastLogicalXorFunctor>("BroadcastLogicalXor");
m.add_functor<impl::BroadcastLessFunctor>("BroadcastLess");
m.add_functor<impl::BroadcastLessEqualFunctor>("BroadcastLessEqual");
m.add_functor<impl::ScalarAddByTensorFunctor>("ScalarAddByTensor");
......
......@@ -29,7 +29,7 @@ limitations under the License.
namespace oneflow {
#define ARITHMETIC_BINARY_FUNC_NAME_SEQ (Add)(Sub)(Mul)(Div)(Min)(Max)(FloorMod)(FMod)
#define LOGICAL_BINARY_FUNC_NAME_SEQ (EQ)(NE)(GT)(GE)(LT)(LE)(AND)(OR)
#define LOGICAL_BINARY_FUNC_NAME_SEQ (EQ)(NE)(GT)(GE)(LT)(LE)(AND)(OR)(XOR)
#define PREPEND_PREFIX_BINARY_FUNC(name) OF_PP_CAT(BinaryFunc, name)
#define ARITHMETIC_BINARY_FUNC_SEQ \
......@@ -172,12 +172,6 @@ template<typename T>
struct BinaryFuncAND final {
static OF_DEVICE_FUNC const int8_t Invoke(const T x, const T y) { return x && y; }
};
template<typename T>
struct BinaryFuncOR final {
static OF_DEVICE_FUNC const int8_t Invoke(const T x, const T y) { return x || y; }
};
template<typename T>
struct BinaryFuncAll final {
static OF_DEVICE_FUNC const int8_t Invoke(const T x, const T y) {
......@@ -186,13 +180,23 @@ struct BinaryFuncAll final {
};
SPECIALIZE_CONST_TYPE_BINARY_FUNC(BinaryFuncAND);
template<typename T>
struct BinaryFuncOR final {
static OF_DEVICE_FUNC const int8_t Invoke(const T x, const T y) { return x || y; }
};
template<typename T>
struct BinaryFuncAny final {
static OF_DEVICE_FUNC const int8_t Invoke(const T x, const T y) {
return BinaryFuncOR<T>::Invoke(x, y);
}
};
SPECIALIZE_CONST_TYPE_BINARY_FUNC(BinaryFuncAny);
SPECIALIZE_CONST_TYPE_BINARY_FUNC(BinaryFuncOR);
template<typename T>
struct BinaryFuncXOR final {
static OF_DEVICE_FUNC const int8_t Invoke(const T x, const T y) { return (!x) != (!y); }
};
SPECIALIZE_CONST_TYPE_BINARY_FUNC(BinaryFuncXOR);
#define NO_HALF_UTIL_FOUND \
printf("cuda arch must >= 530"); \
......
......@@ -38,7 +38,8 @@ namespace oneflow {
OF_PP_MAKE_TUPLE_SEQ("broadcast_less", LT) \
OF_PP_MAKE_TUPLE_SEQ("broadcast_less_equal", LE) \
OF_PP_MAKE_TUPLE_SEQ("broadcast_logical_and", AND) \
OF_PP_MAKE_TUPLE_SEQ("broadcast_logical_or", OR)
OF_PP_MAKE_TUPLE_SEQ("broadcast_logical_or", OR) \
OF_PP_MAKE_TUPLE_SEQ("broadcast_logical_xor", XOR)
} // namespace oneflow
......
......@@ -136,8 +136,6 @@ import oneflow.nn.modules.flip
import oneflow.nn.modules.floor
import oneflow.nn.modules.greater
import oneflow.nn.modules.greater_equal
import oneflow.nn.modules.logical_and
import oneflow.nn.modules.logical_or
import oneflow.nn.modules.in_top_k
import oneflow.nn.modules.masked_select
import oneflow.nn.modules.math_ops
......@@ -225,6 +223,7 @@ from oneflow.nn.modules.greater import greater_op as gt
from oneflow.nn.modules.greater_equal import greater_equal_op as ge
from oneflow.nn.modules.logical_and import logical_and_op as logical_and
from oneflow.nn.modules.logical_or import logical_or_op as logical_or
from oneflow.nn.modules.logical_xor import logical_xor_op as logical_xor
from oneflow.nn.modules.in_top_k import in_top_k_op as in_top_k
from oneflow.nn.modules.less import less_op as lt
from oneflow.nn.modules.less_equal import less_equal_op as le
......
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
from oneflow.framework.tensor import register_tensor_op
def logical_xor_op(input, other):
"""
Computes the element-wise logical XOR of the given input tensors.
Zeros are treated as False and nonzeros are treated as True.
Args:
input (oneflow.Tensor): The input Tensor
other (oneflow.Tensor): The Tensor to compute XOR with
Returns:
oneflow.Tensor: The output Tensor
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow as flow
>>> input1 = flow.Tensor(np.array([1, 0, 1]).astype(np.float32), dtype=flow.float32)
>>> input2 = flow.Tensor(np.array([1, 0, 0]).astype(np.float32), dtype=flow.float32)
>>> out = flow.logical_xor(input1, input2)
>>> out
tensor([0, 0, 1], dtype=oneflow.int8)
"""
assert input.shape == other.shape, "shape of input and other should be same"
if other.dtype != input.dtype:
other = flow.cast(other, input.dtype)
return flow.F.broadcast_logical_xor(input, other)
@register_tensor_op("logical_xor")
def logical_xor_op_tensor(input, other):
"""
logical_xor() -> Tensor
See :func:`oneflow.logical_xor`
"""
return logical_xor_op(input, other)
if __name__ == "__main__":
import doctest
doctest.testmod(raise_on_error=True)
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
from test_util import GenArgList
import oneflow as flow
from automated_test_util import *
def _test_logical_xor_int(test_case, shape, device):
np_input = np.random.randint(-2, 4, size=shape)
np_other = np.random.randint(-2, 4, size=shape)
input = flow.Tensor(np_input, dtype=flow.float32, device=flow.device(device))
other = flow.Tensor(np_other, dtype=flow.float32, device=flow.device(device))
of_out = flow.logical_xor(input, other)
np_out = np.logical_xor(np_input, np_other)
test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))
def _test_logical_xor_float(test_case, shape, device):
np_input = np.random.uniform(low=-5, high=5, size=shape)
np_other = np.random.uniform(low=-5, high=5, size=shape)
input = flow.Tensor(np_input, dtype=flow.float32, device=flow.device(device))
other = flow.Tensor(np_other, dtype=flow.float32, device=flow.device(device))
of_out = flow.logical_xor(input, other)
np_out = np.logical_xor(np_input, np_other)
test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))
def _test_tensor_logical_xor_int(test_case, shape, device):
np_input = np.random.randint(-2, 4, size=shape)
np_other = np.random.randint(-2, 4, size=shape)
input = flow.Tensor(np_input, dtype=flow.float32, device=flow.device(device))
other = flow.Tensor(np_other, dtype=flow.float32, device=flow.device(device))
of_out = input.logical_xor(other)
np_out = np.logical_xor(np_input, np_other)
test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))
def _test_tensor_logical_xor_float(test_case, shape, device):
np_input = np.random.uniform(low=-5, high=5, size=shape)
np_other = np.random.uniform(low=-5, high=5, size=shape)
input = flow.Tensor(np_input, dtype=flow.float32, device=flow.device(device))
other = flow.Tensor(np_other, dtype=flow.float32, device=flow.device(device))
of_out = input.logical_xor(other)
np_out = np.logical_xor(np_input, np_other)
test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))
@flow.unittest.skip_unless_1n1d()
class TestLogicalXorModule(flow.unittest.TestCase):
def test_logical_xor(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [
_test_logical_xor_int,
_test_tensor_logical_xor_int,
_test_logical_xor_float,
_test_tensor_logical_xor_float,
]
arg_dict["shape"] = [(2, 3), (2, 4, 5)]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
@autotest(n=10, auto_backward=False)
def test_logical_xor_with_random_data(test_case):
device = random_device()
shape = random_tensor().value().shape
x1 = random_pytorch_tensor(len(shape), *shape, requires_grad=False).to(device)
x2 = random_pytorch_tensor(len(shape), *shape, requires_grad=False).to(device)
y = torch.logical_xor(x1, x2)
return y
if __name__ == "__main__":
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册