From eaf7146d4689168c23f2601c1f47c8675848fccc Mon Sep 17 00:00:00 2001 From: chang zherui <760161589@qq.com> Date: Tue, 31 Mar 2020 16:18:04 +0800 Subject: [PATCH] modify longtime python ut --- mindspore/nn/layer/conv.py | 2 +- tests/ut/python/parallel/test_auto_parallel_reshape.py | 6 +++--- tests/ut/python/parallel/test_one_hot_net.py | 8 ++++---- tests/ut/python/parallel/test_reshape.py | 8 ++++---- tests/ut/python/train/summary/test_summary_performance.py | 6 +++--- 5 files changed, 15 insertions(+), 15 deletions(-) diff --git a/mindspore/nn/layer/conv.py b/mindspore/nn/layer/conv.py index 6c78e1a71..666be9345 100644 --- a/mindspore/nn/layer/conv.py +++ b/mindspore/nn/layer/conv.py @@ -159,7 +159,7 @@ class Conv2d(_Conv): >>> net = nn.Conv2d(120, 240, 4, has_bias=False, weight_init='normal') >>> input = mindspore.Tensor(np.ones([1, 120, 1024, 640]), mindspore.float32) >>> net(input).shape() - (1, 240, 1024, 637) + (1, 240, 1024, 640) """ @cell_attr_register def __init__(self, diff --git a/tests/ut/python/parallel/test_auto_parallel_reshape.py b/tests/ut/python/parallel/test_auto_parallel_reshape.py index 26e7e95a9..ed9f24cc0 100644 --- a/tests/ut/python/parallel/test_auto_parallel_reshape.py +++ b/tests/ut/python/parallel/test_auto_parallel_reshape.py @@ -49,16 +49,16 @@ def test_reshape_matmul(): super().__init__() self.reshape = P.Reshape() self.matmul = P.MatMul() - self.matmul_weight = Parameter(Tensor(np.ones([25088, 256]), dtype=ms.float32), name="weight") + self.matmul_weight = Parameter(Tensor(np.ones([28, 64]), dtype=ms.float32), name="weight") def construct(self, x): - out = self.reshape(x, (256, 25088)) + out = self.reshape(x, (64, 28)) out = self.matmul(out, self.matmul_weight) return out size = 8 context.set_auto_parallel_context(device_num=size, global_rank=0) - x = Tensor(np.ones([32*size, 512, 7, 7]), dtype=ms.float32) + x = Tensor(np.ones([8*size, 28, 1, 1]), dtype=ms.float32) net = GradWrap(NetWithLoss(Net())) context.set_auto_parallel_context(parallel_mode="auto_parallel") diff --git a/tests/ut/python/parallel/test_one_hot_net.py b/tests/ut/python/parallel/test_one_hot_net.py index bf2677056..87b4acfe3 100644 --- a/tests/ut/python/parallel/test_one_hot_net.py +++ b/tests/ut/python/parallel/test_one_hot_net.py @@ -247,15 +247,15 @@ def fc_with_initialize(input_channels, out_channels): class BNReshapeDenseBNNet(nn.Cell): def __init__(self): super(BNReshapeDenseBNNet, self).__init__() - self.batch_norm = bn_with_initialize(512) + self.batch_norm = bn_with_initialize(2) self.reshape = P.Reshape() self.batch_norm2 = nn.BatchNorm1d(512, affine=False) - self.fc = fc_with_initialize(512 * 32 * 32, 512) + self.fc = fc_with_initialize(2 * 32 * 32, 512) self.loss = SemiAutoOneHotNet(args=Args(), strategy=StrategyBatch()) def construct(self, x, label): x = self.batch_norm(x) - x = self.reshape(x, (16, 512*32*32)) + x = self.reshape(x, (16, 2*32*32)) x = self.fc(x) x = self.batch_norm2(x) loss = self.loss(x, label) @@ -266,7 +266,7 @@ def test_bn_reshape_dense_bn_train_loss(): batch_size = 16 device_num = 16 context.set_auto_parallel_context(device_num=device_num, global_rank=0) - input = Tensor(np.ones([batch_size, 512, 32, 32]).astype(np.float32) * 0.01) + input = Tensor(np.ones([batch_size, 2, 32, 32]).astype(np.float32) * 0.01) label = Tensor(np.ones([batch_size]), dtype=ms.int32) net = GradWrap(NetWithLoss(BNReshapeDenseBNNet())) diff --git a/tests/ut/python/parallel/test_reshape.py b/tests/ut/python/parallel/test_reshape.py index 11ca435e5..43906aec2 100644 --- a/tests/ut/python/parallel/test_reshape.py +++ b/tests/ut/python/parallel/test_reshape.py @@ -490,15 +490,15 @@ def fc_with_initialize(input_channels, out_channels): class BNReshapeDenseBNNet(nn.Cell): def __init__(self): super(BNReshapeDenseBNNet, self).__init__() - self.batch_norm = bn_with_initialize(512) + self.batch_norm = bn_with_initialize(2) self.reshape = P.Reshape() self.cast = P.Cast() self.batch_norm2 = nn.BatchNorm1d(512, affine=False) - self.fc = fc_with_initialize(512 * 32 * 32, 512) + self.fc = fc_with_initialize(2 * 32 * 32, 512) def construct(self, x): x = self.batch_norm(x) - x = self.reshape(x, (16, 512*32*32)) + x = self.reshape(x, (16, 2*32*32)) x = self.fc(x) x = self.batch_norm2(x) return x @@ -508,7 +508,7 @@ def test_bn_reshape_dense_bn_train(): batch_size = 16 device_num = 16 context.set_auto_parallel_context(device_num=device_num, global_rank=0) - input = Tensor(np.ones([batch_size, 512, 32, 32]).astype(np.float32) * 0.01) + input = Tensor(np.ones([batch_size, 2, 32, 32]).astype(np.float32) * 0.01) net = GradWrap(NetWithLoss(BNReshapeDenseBNNet())) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") diff --git a/tests/ut/python/train/summary/test_summary_performance.py b/tests/ut/python/train/summary/test_summary_performance.py index f004d4e58..9ee9725d1 100644 --- a/tests/ut/python/train/summary/test_summary_performance.py +++ b/tests/ut/python/train/summary/test_summary_performance.py @@ -43,9 +43,9 @@ def get_test_data(step): tag1 = "xt1[:Tensor]" tag2 = "xt2[:Tensor]" tag3 = "xt3[:Tensor]" - np1 = np.random.random((50, 40, 30, 50)) - np2 = np.random.random((50, 50, 30, 50)) - np3 = np.random.random((40, 55, 30, 50)) + np1 = np.random.random((5, 4, 3, 5)) + np2 = np.random.random((5, 5, 3, 5)) + np3 = np.random.random((4, 5, 3, 5)) dict1 = {} dict1["name"] = tag1 -- GitLab