未验证 提交 a21faec7 编写于 作者: G GehangZhang 提交者: GitHub

add flow.relu (#5847)

* add flow.relu

* add test_relu.py

* fix for comments

* add docstring for oneflow.relu

* fix bugs

* add docstring for flow.Tensor.relu

* fix for comments

* fix for comments v2

* auto format by CI

* fix license bug

* auto format by CI

* fix tets_quantization.py bug
Co-authored-by: NYao Chi <later@usopp.net>
Co-authored-by: Noneflow-ci-bot <ci-bot@oneflow.org>
Co-authored-by: Noneflow-ci-bot <69100618+oneflow-ci-bot@users.noreply.github.com>
上级 611933a0
......@@ -127,5 +127,5 @@ oneflow
grad_enable,
inference_mode,
is_grad_enabled,
.. autofunction:: oneflow.data.load_mnist(train_batch_size=100, test_batch_size=100, data_format='NCHW')
.. autofunction:: oneflow.relu
......@@ -105,7 +105,8 @@ OneFlow Tensor Class
register_hook,
repeat,
requires_grad,
requires_grad_,
requires_grad_,
relu,
reshape,
retain_grad,
round,
......
......@@ -322,3 +322,4 @@ from . import (
) # , saved_model NOTE(chengcheng): unavailable now
import oneflow.utils.data
import oneflow.utils.vision
from oneflow.nn.modules.relu import relu_op as relu
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
from oneflow.framework.tensor import register_tensor_op
def relu_op(input, inplace=False):
"""
Applies the rectified linear unit function element-wise. See :class:`~oneflow.nn.ReLU` for more details.
Args:
inplace: If set to ``True``, will do this operation in-place. Default: ``False``
For examples:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> ndarr = np.asarray([1, -2, 3])
>>> input = flow.Tensor(ndarr)
>>> output = flow.relu(input)
>>> output
tensor([1., 0., 3.], dtype=oneflow.float32)
"""
return flow.F.relu(input, inplace=inplace)
@register_tensor_op("relu")
def relu_tensor_op(input, inplace=False):
"""
Applies the rectified linear unit function element-wise. See :class:`~oneflow.relu` for more details.
"""
return flow.F.relu(input, inplace=inplace)
if __name__ == "__main__":
import doctest
doctest.testmod(raise_on_error=True)
......@@ -121,7 +121,7 @@ def _check_quantize(
)
out_np = quant_per_layer_cambricon(input_flatten, quantization_bit, scale_np[0])
rmse = np.sqrt(np.mean((out_of - out_np) ** 2))
assert rmse <= 1.0, "quantization op has bug!"
assert rmse <= 2.0, "quantization op has bug!"
def _run_test_quantize(
......
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import oneflow as flow
import oneflow.unittest
from automated_test_util import *
@flow.unittest.skip_unless_1n1d()
class TestRelu(flow.unittest.TestCase):
@autotest()
def test_flow_relu_with_random_data(test_case):
device = random_device()
x = random_pytorch_tensor(ndim=2, dim1=3).to(device)
y = torch.relu(x)
return y
if __name__ == "__main__":
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册