提交 2efba9a3 编写于 作者: M Megvii Engine Team

fix(mgb/test): use both rtol and atol for stable test result

GitOrigin-RevId: 82a1453e4a482f43df5ae94bf44c666a79a16734
上级 f5f86a05
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
# Unless required by applicable law or agreed to in writing, # Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an # software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import functools
import multiprocessing as mp import multiprocessing as mp
import platform import platform
...@@ -18,6 +19,8 @@ from megengine import Tensor ...@@ -18,6 +19,8 @@ from megengine import Tensor
from megengine.core._trace_option import use_tensor_shape from megengine.core._trace_option import use_tensor_shape
from megengine.module import BatchNorm1d, BatchNorm2d, SyncBatchNorm from megengine.module import BatchNorm1d, BatchNorm2d, SyncBatchNorm
_assert_allclose = functools.partial(np.testing.assert_allclose, atol=5e-6, rtol=5e-6)
@pytest.mark.skipif( @pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now" platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
...@@ -46,9 +49,9 @@ def test_syncbn(): ...@@ -46,9 +49,9 @@ def test_syncbn():
for i in range(steps): for i in range(steps):
yv = bn(Tensor(data[i])) yv = bn(Tensor(data[i]))
np.testing.assert_allclose(yv.numpy(), yv_expect, atol=5e-6) _assert_allclose(yv.numpy(), yv_expect)
np.testing.assert_allclose(bn.running_mean.numpy(), running_mean, atol=5e-6) _assert_allclose(bn.running_mean.numpy(), running_mean)
np.testing.assert_allclose(bn.running_var.numpy(), running_var, atol=5e-6) _assert_allclose(bn.running_var.numpy(), running_var)
xv = [] xv = []
for i in range(steps): for i in range(steps):
...@@ -118,13 +121,9 @@ def test_batchnorm(): ...@@ -118,13 +121,9 @@ def test_batchnorm():
yv = bn(Tensor(xv)) yv = bn(Tensor(xv))
yv_expect = (xv - mean) / sd yv_expect = (xv - mean) / sd
np.testing.assert_allclose(yv.numpy(), yv_expect, atol=5e-6) _assert_allclose(yv.numpy(), yv_expect)
np.testing.assert_allclose( _assert_allclose(bn.running_mean.numpy().reshape(-1), running_mean.reshape(-1))
bn.running_mean.numpy().reshape(-1), running_mean.reshape(-1), atol=5e-6 _assert_allclose(bn.running_var.numpy().reshape(-1), running_var.reshape(-1))
)
np.testing.assert_allclose(
bn.running_var.numpy().reshape(-1), running_var.reshape(-1), atol=5e-6
)
# test set 'training' flag to False # test set 'training' flag to False
mean_backup = bn.running_mean.numpy() mean_backup = bn.running_mean.numpy()
...@@ -138,7 +137,7 @@ def test_batchnorm(): ...@@ -138,7 +137,7 @@ def test_batchnorm():
np.testing.assert_equal(mean_backup, bn.running_mean.numpy()) np.testing.assert_equal(mean_backup, bn.running_mean.numpy())
np.testing.assert_equal(var_backup, bn.running_var.numpy()) np.testing.assert_equal(var_backup, bn.running_var.numpy())
yv_expect = (xv - running_mean) / np.sqrt(running_var + bn.eps) yv_expect = (xv - running_mean) / np.sqrt(running_var + bn.eps)
np.testing.assert_allclose(yv1.numpy(), yv_expect, atol=5e-6) _assert_allclose(yv1.numpy(), yv_expect)
@pytest.mark.skipif( @pytest.mark.skipif(
...@@ -172,13 +171,9 @@ def test_syncbn1d(): ...@@ -172,13 +171,9 @@ def test_syncbn1d():
yv = bn(Tensor(xv)) yv = bn(Tensor(xv))
yv_expect = (xv - mean) / sd yv_expect = (xv - mean) / sd
np.testing.assert_allclose(yv.numpy(), yv_expect, atol=5e-6) _assert_allclose(yv.numpy(), yv_expect)
np.testing.assert_allclose( _assert_allclose(bn.running_mean.numpy().reshape(-1), running_mean.reshape(-1))
bn.running_mean.numpy().reshape(-1), running_mean.reshape(-1), atol=5e-6 _assert_allclose(bn.running_var.numpy().reshape(-1), running_var.reshape(-1))
)
np.testing.assert_allclose(
bn.running_var.numpy().reshape(-1), running_var.reshape(-1), atol=5e-6
)
# test set 'training' flag to False # test set 'training' flag to False
mean_backup = bn.running_mean.numpy() mean_backup = bn.running_mean.numpy()
...@@ -192,7 +187,7 @@ def test_syncbn1d(): ...@@ -192,7 +187,7 @@ def test_syncbn1d():
np.testing.assert_equal(mean_backup, bn.running_mean.numpy()) np.testing.assert_equal(mean_backup, bn.running_mean.numpy())
np.testing.assert_equal(var_backup, bn.running_var.numpy()) np.testing.assert_equal(var_backup, bn.running_var.numpy())
yv_expect = (xv - running_mean) / np.sqrt(running_var + bn.eps) yv_expect = (xv - running_mean) / np.sqrt(running_var + bn.eps)
np.testing.assert_allclose(yv1.numpy(), yv_expect, atol=5e-6) _assert_allclose(yv1.numpy(), yv_expect)
def test_batchnorm2d(): def test_batchnorm2d():
...@@ -220,9 +215,9 @@ def test_batchnorm2d(): ...@@ -220,9 +215,9 @@ def test_batchnorm2d():
yv = bn(Tensor(xv)) yv = bn(Tensor(xv))
yv_expect = (xv - mean) / sd yv_expect = (xv - mean) / sd
np.testing.assert_allclose(yv.numpy(), yv_expect, atol=5e-6) _assert_allclose(yv.numpy(), yv_expect)
np.testing.assert_allclose(bn.running_mean.numpy(), running_mean, atol=5e-6) _assert_allclose(bn.running_mean.numpy(), running_mean)
np.testing.assert_allclose(bn.running_var.numpy(), running_var, atol=5e-6) _assert_allclose(bn.running_var.numpy(), running_var)
# test set 'training' flag to False # test set 'training' flag to False
mean_backup = bn.running_mean.numpy() mean_backup = bn.running_mean.numpy()
...@@ -236,7 +231,7 @@ def test_batchnorm2d(): ...@@ -236,7 +231,7 @@ def test_batchnorm2d():
np.testing.assert_equal(mean_backup, bn.running_mean.numpy()) np.testing.assert_equal(mean_backup, bn.running_mean.numpy())
np.testing.assert_equal(var_backup, bn.running_var.numpy()) np.testing.assert_equal(var_backup, bn.running_var.numpy())
yv_expect = (xv - running_mean) / np.sqrt(running_var + bn.eps) yv_expect = (xv - running_mean) / np.sqrt(running_var + bn.eps)
np.testing.assert_allclose(yv1.numpy(), yv_expect, atol=5e-6) _assert_allclose(yv1.numpy(), yv_expect)
@pytest.mark.skipif( @pytest.mark.skipif(
...@@ -271,9 +266,9 @@ def test_syncbn2d(): ...@@ -271,9 +266,9 @@ def test_syncbn2d():
yv = bn(Tensor(xv)) yv = bn(Tensor(xv))
yv_expect = (xv - mean) / sd yv_expect = (xv - mean) / sd
np.testing.assert_allclose(yv.numpy(), yv_expect, atol=5e-6) _assert_allclose(yv.numpy(), yv_expect)
np.testing.assert_allclose(bn.running_mean.numpy(), running_mean, atol=5e-6) _assert_allclose(bn.running_mean.numpy(), running_mean)
np.testing.assert_allclose(bn.running_var.numpy(), running_var, atol=5e-6) _assert_allclose(bn.running_var.numpy(), running_var)
# test set 'training' flag to False # test set 'training' flag to False
mean_backup = bn.running_mean.numpy() mean_backup = bn.running_mean.numpy()
...@@ -287,7 +282,7 @@ def test_syncbn2d(): ...@@ -287,7 +282,7 @@ def test_syncbn2d():
np.testing.assert_equal(mean_backup, bn.running_mean.numpy()) np.testing.assert_equal(mean_backup, bn.running_mean.numpy())
np.testing.assert_equal(var_backup, bn.running_var.numpy()) np.testing.assert_equal(var_backup, bn.running_var.numpy())
yv_expect = (xv - running_mean) / np.sqrt(running_var + bn.eps) yv_expect = (xv - running_mean) / np.sqrt(running_var + bn.eps)
np.testing.assert_allclose(yv1.numpy(), yv_expect, atol=5e-6) _assert_allclose(yv1.numpy(), yv_expect)
def test_batchnorm_no_stats(): def test_batchnorm_no_stats():
...@@ -310,7 +305,7 @@ def test_batchnorm_no_stats(): ...@@ -310,7 +305,7 @@ def test_batchnorm_no_stats():
yv = bn(Tensor(xv)) yv = bn(Tensor(xv))
yv_expect = (xv - mean) / sd yv_expect = (xv - mean) / sd
np.testing.assert_allclose(yv.numpy(), yv_expect, atol=5e-6) _assert_allclose(yv.numpy(), yv_expect)
@pytest.mark.skipif( @pytest.mark.skipif(
...@@ -340,7 +335,7 @@ def test_syncbn_no_stats(): ...@@ -340,7 +335,7 @@ def test_syncbn_no_stats():
yv = bn(Tensor(xv)) yv = bn(Tensor(xv))
yv_expect = (xv - mean) / sd yv_expect = (xv - mean) / sd
np.testing.assert_allclose(yv.numpy(), yv_expect, atol=5e-6) _assert_allclose(yv.numpy(), yv_expect)
def test_batchnorm2d_no_stats(): def test_batchnorm2d_no_stats():
...@@ -362,7 +357,7 @@ def test_batchnorm2d_no_stats(): ...@@ -362,7 +357,7 @@ def test_batchnorm2d_no_stats():
yv = bn(Tensor(xv)) yv = bn(Tensor(xv))
yv_expect = (xv - mean) / sd yv_expect = (xv - mean) / sd
np.testing.assert_allclose(yv.numpy(), yv_expect, atol=5e-6) _assert_allclose(yv.numpy(), yv_expect)
@pytest.mark.skipif( @pytest.mark.skipif(
...@@ -391,4 +386,4 @@ def test_syncbn2d_no_stats(): ...@@ -391,4 +386,4 @@ def test_syncbn2d_no_stats():
yv = bn(Tensor(xv)) yv = bn(Tensor(xv))
yv_expect = (xv - mean) / sd yv_expect = (xv - mean) / sd
np.testing.assert_allclose(yv.numpy(), yv_expect, atol=5e-6) _assert_allclose(yv.numpy(), yv_expect)
...@@ -60,7 +60,10 @@ def test_TQT(): ...@@ -60,7 +60,10 @@ def test_TQT():
def check_inp(a, b, c, a_np, b_np, c_np): def check_inp(a, b, c, a_np, b_np, c_np):
np.testing.assert_allclose( np.testing.assert_allclose(
f.forward(a, b).numpy(), nf.forward(a_np, b_np).astype("float32"), rtol=1e-6 f.forward(a, b).numpy(),
nf.forward(a_np, b_np).astype("float32"),
rtol=1e-6,
atol=1e-6,
) )
c1, c2 = f.backward(c) c1, c2 = f.backward(c)
c1_np, c2_np = nf.backward(c_np) c1_np, c2_np = nf.backward(c_np)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册