提交 2e684e89 编写于 作者: M mindspore-ci-bot 提交者: Gitee

!1446 pylint clean

Merge pull request !1446 from liuxiao78/master
...@@ -167,7 +167,7 @@ class BertAttentionMask(nn.Cell): ...@@ -167,7 +167,7 @@ class BertAttentionMask(nn.Cell):
super(BertAttentionMask, self).__init__() super(BertAttentionMask, self).__init__()
self.has_attention_mask = has_attention_mask self.has_attention_mask = has_attention_mask
self.multiply_data = Tensor([-1000.0, ], dtype=dtype) self.multiply_data = Tensor([-1000.0,], dtype=dtype)
self.multiply = P.Mul() self.multiply = P.Mul()
if self.has_attention_mask: if self.has_attention_mask:
...@@ -198,7 +198,7 @@ class BertAttentionMaskBackward(nn.Cell): ...@@ -198,7 +198,7 @@ class BertAttentionMaskBackward(nn.Cell):
dtype=mstype.float32): dtype=mstype.float32):
super(BertAttentionMaskBackward, self).__init__() super(BertAttentionMaskBackward, self).__init__()
self.has_attention_mask = has_attention_mask self.has_attention_mask = has_attention_mask
self.multiply_data = Tensor([-1000.0, ], dtype=dtype) self.multiply_data = Tensor([-1000.0,], dtype=dtype)
self.multiply = P.Mul() self.multiply = P.Mul()
self.attention_mask = Tensor(np.ones(shape=attention_mask_shape).astype(np.float32)) self.attention_mask = Tensor(np.ones(shape=attention_mask_shape).astype(np.float32))
if self.has_attention_mask: if self.has_attention_mask:
......
...@@ -136,7 +136,7 @@ def test_LSTM(): ...@@ -136,7 +136,7 @@ def test_LSTM():
train_network.set_train() train_network.set_train()
train_features = Tensor(np.ones([64, max_len]).astype(np.int32)) train_features = Tensor(np.ones([64, max_len]).astype(np.int32))
train_labels = Tensor(np.ones([64, ]).astype(np.int32)[0:64]) train_labels = Tensor(np.ones([64,]).astype(np.int32)[0:64])
losses = [] losses = []
for epoch in range(num_epochs): for epoch in range(num_epochs):
loss = train_network(train_features, train_labels) loss = train_network(train_features, train_labels)
......
...@@ -45,7 +45,6 @@ class Net(nn.Cell): ...@@ -45,7 +45,6 @@ class Net(nn.Cell):
@non_graph_engine @non_graph_engine
def test_AssignAdd_1(): def test_AssignAdd_1():
"""test AssignAdd 1""" """test AssignAdd 1"""
import mindspore.context as context
context.set_context(mode=context.GRAPH_MODE) context.set_context(mode=context.GRAPH_MODE)
net = Net() net = Net()
x = Tensor(np.ones([1]).astype(np.int64) * 100) x = Tensor(np.ones([1]).astype(np.int64) * 100)
...@@ -65,7 +64,6 @@ def test_AssignAdd_1(): ...@@ -65,7 +64,6 @@ def test_AssignAdd_1():
@non_graph_engine @non_graph_engine
def test_AssignAdd_2(): def test_AssignAdd_2():
"""test AssignAdd 2""" """test AssignAdd 2"""
import mindspore.context as context
context.set_context(mode=context.GRAPH_MODE) context.set_context(mode=context.GRAPH_MODE)
net = Net() net = Net()
x = Tensor(np.ones([1]).astype(np.int64) * 102) x = Tensor(np.ones([1]).astype(np.int64) * 102)
......
...@@ -13,9 +13,9 @@ ...@@ -13,9 +13,9 @@
# limitations under the License. # limitations under the License.
# ============================================================================ # ============================================================================
"""test_dtype""" """test_dtype"""
from dataclasses import dataclass
import numpy as np import numpy as np
import pytest import pytest
from dataclasses import dataclass
import mindspore as ms import mindspore as ms
from mindspore.common import dtype from mindspore.common import dtype
......
...@@ -34,7 +34,7 @@ ndarr = np.ones((2, 3)) ...@@ -34,7 +34,7 @@ ndarr = np.ones((2, 3))
def test_tensor_flatten(): def test_tensor_flatten():
with pytest.raises(AttributeError): with pytest.raises(AttributeError):
lst = [1, 2, 3, 4, ] lst = [1, 2, 3, 4,]
tensor_list = ms.Tensor(lst, ms.float32) tensor_list = ms.Tensor(lst, ms.float32)
tensor_list = tensor_list.Flatten() tensor_list = tensor_list.Flatten()
print(tensor_list) print(tensor_list)
......
...@@ -107,7 +107,7 @@ class TrainStepWrapForAdamDynamicLr(nn.Cell): ...@@ -107,7 +107,7 @@ class TrainStepWrapForAdamDynamicLr(nn.Cell):
class TempC2Wrap(nn.Cell): class TempC2Wrap(nn.Cell):
def __init__(self, op, c1=None, c2=None, ): def __init__(self, op, c1=None, c2=None,):
super(TempC2Wrap, self).__init__() super(TempC2Wrap, self).__init__()
self.op = op self.op = op
self.c1 = c1 self.c1 = c1
...@@ -387,7 +387,7 @@ test_case_cell_ops = [ ...@@ -387,7 +387,7 @@ test_case_cell_ops = [
'block': set_train(nn.Dense(in_channels=768, 'block': set_train(nn.Dense(in_channels=768,
out_channels=3072, out_channels=3072,
activation='gelu', activation='gelu',
weight_init=TruncatedNormal(0.02), )), weight_init=TruncatedNormal(0.02),)),
'desc_inputs': [[3, 768]], 'desc_inputs': [[3, 768]],
'desc_bprop': [[3, 3072]]}), 'desc_bprop': [[3, 3072]]}),
('GetNextSentenceOutput', { ('GetNextSentenceOutput', {
......
...@@ -12,8 +12,8 @@ ...@@ -12,8 +12,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# ============================================================================ # ============================================================================
import numpy as np
from collections import Counter from collections import Counter
import numpy as np
import mindspore.nn as nn import mindspore.nn as nn
from mindspore import Tensor, Parameter from mindspore import Tensor, Parameter
......
...@@ -13,9 +13,9 @@ ...@@ -13,9 +13,9 @@
# limitations under the License. # limitations under the License.
# ============================================================================ # ============================================================================
""" test container """ """ test container """
from collections import OrderedDict
import numpy as np import numpy as np
import pytest import pytest
from collections import OrderedDict
import mindspore.nn as nn import mindspore.nn as nn
from mindspore import Tensor from mindspore import Tensor
......
...@@ -60,5 +60,5 @@ def test_SoftmaxCrossEntropyExpand(): ...@@ -60,5 +60,5 @@ def test_SoftmaxCrossEntropyExpand():
loss = nn.SoftmaxCrossEntropyExpand() loss = nn.SoftmaxCrossEntropyExpand()
logits = Tensor(np.random.randint(0, 9, [100, 10]).astype(np.float32)) logits = Tensor(np.random.randint(0, 9, [100, 10]).astype(np.float32))
labels = Tensor(np.random.randint(0, 9, [10, ]).astype(np.float32)) labels = Tensor(np.random.randint(0, 9, [10,]).astype(np.float32))
_executor.compile(loss, logits, labels) _executor.compile(loss, logits, labels)
...@@ -54,7 +54,6 @@ def test_parameter_tuple_illegal(): ...@@ -54,7 +54,6 @@ def test_parameter_tuple_illegal():
def test_parameter_init_illegal(): def test_parameter_init_illegal():
import numpy as np
dat = np.array([[1, 2, 3], [2, 3, 4]]) dat = np.array([[1, 2, 3], [2, 3, 4]])
tensor = Tensor(dat) tensor = Tensor(dat)
data_none = None data_none = None
......
...@@ -13,13 +13,11 @@ ...@@ -13,13 +13,11 @@
# limitations under the License. # limitations under the License.
# ============================================================================ # ============================================================================
"""ut for model serialize(save/load)""" """ut for model serialize(save/load)"""
import numpy as np
import os import os
import pytest
import stat import stat
import time import numpy as np
import pytest
import mindspore.common.dtype as mstype
import mindspore.nn as nn import mindspore.nn as nn
from mindspore import context from mindspore import context
from mindspore.common.parameter import Parameter from mindspore.common.parameter import Parameter
......
...@@ -20,8 +20,8 @@ Usage: ...@@ -20,8 +20,8 @@ Usage:
""" """
import argparse import argparse
import numpy as np
import os import os
import numpy as np
import mindspore.context as context import mindspore.context as context
import mindspore.nn as nn import mindspore.nn as nn
......
...@@ -17,7 +17,6 @@ import numpy as np ...@@ -17,7 +17,6 @@ import numpy as np
from mobilenetv2_combined import MobileNetV2 from mobilenetv2_combined import MobileNetV2
import mindspore.context as context import mindspore.context as context
import mindspore.ops.operations as P
from mindspore import Tensor from mindspore import Tensor
from mindspore import nn from mindspore import nn
from mindspore.nn.layer import combined from mindspore.nn.layer import combined
......
...@@ -14,8 +14,8 @@ ...@@ -14,8 +14,8 @@
# ============================================================================ # ============================================================================
""" test_graph_summary """ """ test_graph_summary """
import logging import logging
import numpy as np
import os import os
import numpy as np
import mindspore.nn as nn import mindspore.nn as nn
from mindspore import Model, context from mindspore import Model, context
......
...@@ -15,9 +15,9 @@ ...@@ -15,9 +15,9 @@
"""Test histogram summary.""" """Test histogram summary."""
import logging import logging
import numpy as np
import os import os
import tempfile import tempfile
import numpy as np
from mindspore.common.tensor import Tensor from mindspore.common.tensor import Tensor
from mindspore.train.summary._summary_adapter import _calc_histogram_bins from mindspore.train.summary._summary_adapter import _calc_histogram_bins
......
...@@ -19,8 +19,8 @@ ...@@ -19,8 +19,8 @@
@Desc : test summary function @Desc : test summary function
""" """
import logging import logging
import numpy as np
import os import os
import numpy as np
import mindspore.nn as nn import mindspore.nn as nn
from mindspore import Model, context from mindspore import Model, context
......
...@@ -19,10 +19,11 @@ ...@@ -19,10 +19,11 @@
@Desc : test summary function @Desc : test summary function
""" """
import logging import logging
import numpy as np
import os import os
import pytest
import random import random
import numpy as np
import pytest
import mindspore.nn as nn import mindspore.nn as nn
from mindspore.common.tensor import Tensor from mindspore.common.tensor import Tensor
...@@ -127,7 +128,7 @@ def test_scalar_summary_sample_with_shape_1(): ...@@ -127,7 +128,7 @@ def test_scalar_summary_sample_with_shape_1():
class SummaryDemo(nn.Cell): class SummaryDemo(nn.Cell):
""" SummaryDemo definition """ """ SummaryDemo definition """
def __init__(self, ): def __init__(self,):
super(SummaryDemo, self).__init__() super(SummaryDemo, self).__init__()
self.s = P.ScalarSummary() self.s = P.ScalarSummary()
self.histogram_summary = P.HistogramSummary() self.histogram_summary = P.HistogramSummary()
......
...@@ -19,8 +19,8 @@ ...@@ -19,8 +19,8 @@
@Desc : test summary function of abnormal input @Desc : test summary function of abnormal input
""" """
import logging import logging
import numpy as np
import os import os
import numpy as np
from mindspore.common.tensor import Tensor from mindspore.common.tensor import Tensor
from mindspore.train.summary.summary_record import SummaryRecord from mindspore.train.summary.summary_record import SummaryRecord
......
...@@ -19,8 +19,8 @@ ...@@ -19,8 +19,8 @@
@Desc : test summary function @Desc : test summary function
""" """
import logging import logging
import numpy as np
import os import os
import numpy as np
import mindspore.nn as nn import mindspore.nn as nn
from mindspore.common.tensor import Tensor from mindspore.common.tensor import Tensor
...@@ -110,7 +110,7 @@ def get_test_data_check(step): ...@@ -110,7 +110,7 @@ def get_test_data_check(step):
class SummaryDemo(nn.Cell): class SummaryDemo(nn.Cell):
""" SummaryDemo definition """ """ SummaryDemo definition """
def __init__(self, ): def __init__(self,):
super(SummaryDemo, self).__init__() super(SummaryDemo, self).__init__()
self.s = P.TensorSummary() self.s = P.TensorSummary()
self.add = P.TensorAdd() self.add = P.TensorAdd()
......
...@@ -13,10 +13,10 @@ ...@@ -13,10 +13,10 @@
# limitations under the License. # limitations under the License.
# ============================================================================ # ============================================================================
"""test callback function.""" """test callback function."""
import numpy as np
import os import os
import pytest
import stat import stat
import numpy as np
import pytest
import mindspore.common.dtype as mstype import mindspore.common.dtype as mstype
import mindspore.nn as nn import mindspore.nn as nn
......
...@@ -14,9 +14,9 @@ ...@@ -14,9 +14,9 @@
# ============================================================================ # ============================================================================
""" test_initializer """ """ test_initializer """
import math import math
from functools import reduce
import numpy as np import numpy as np
import pytest as py import pytest as py
from functools import reduce
from scipy import stats from scipy import stats
import mindspore as ms import mindspore as ms
......
...@@ -13,11 +13,11 @@ ...@@ -13,11 +13,11 @@
# limitations under the License. # limitations under the License.
# ============================================================================ # ============================================================================
"""ut for model serialize(save/load)""" """ut for model serialize(save/load)"""
import numpy as np
import os import os
import pytest
import stat import stat
import time import time
import pytest
import numpy as np
import mindspore.common.dtype as mstype import mindspore.common.dtype as mstype
import mindspore.nn as nn import mindspore.nn as nn
...@@ -31,7 +31,7 @@ from mindspore.ops import operations as P ...@@ -31,7 +31,7 @@ from mindspore.ops import operations as P
from mindspore.train.callback import _CheckpointManager from mindspore.train.callback import _CheckpointManager
from mindspore.train.serialization import save_checkpoint, load_checkpoint, load_param_into_net, \ from mindspore.train.serialization import save_checkpoint, load_checkpoint, load_param_into_net, \
_exec_save_checkpoint, export, _save_graph _exec_save_checkpoint, export, _save_graph
from ..ut_filter import run_on_onnxruntime, non_graph_engine from ..ut_filter import non_graph_engine
context.set_context(mode=context.GRAPH_MODE) context.set_context(mode=context.GRAPH_MODE)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册