diff --git a/model_zoo/official/cv/deeplabv3/src/md_dataset.py b/model_zoo/official/cv/deeplabv3/src/md_dataset.py index e136da23e13b409dfeb9f20405f10681a8a4691a..358c28ef2af8510bce75ba120b1185a4e1eb727c 100644 --- a/model_zoo/official/cv/deeplabv3/src/md_dataset.py +++ b/model_zoo/official/cv/deeplabv3/src/md_dataset.py @@ -13,10 +13,10 @@ # limitations under the License. # ============================================================================ """Dataset module.""" +import numpy as np from PIL import Image import mindspore.dataset as de import mindspore.dataset.transforms.vision.c_transforms as C -import numpy as np from .ei_dataset import HwVocRawDataset from .utils import custom_transforms as tr diff --git a/model_zoo/official/cv/faster_rcnn/src/network_define.py b/model_zoo/official/cv/faster_rcnn/src/network_define.py index b4fde3f824fd5317c7648cbc3dd6fb2477eeceea..348c72cee5389a7ab988c67e0e7d81242cb6ce6b 100644 --- a/model_zoo/official/cv/faster_rcnn/src/network_define.py +++ b/model_zoo/official/cv/faster_rcnn/src/network_define.py @@ -110,8 +110,6 @@ class LossCallBack(Callback): class LossNet(nn.Cell): """FasterRcnn loss method""" - def __init__(self): - super(LossNet, self).__init__() def construct(self, x1, x2, x3, x4, x5, x6): return x1 + x2 diff --git a/model_zoo/official/cv/maskrcnn/src/network_define.py b/model_zoo/official/cv/maskrcnn/src/network_define.py index dc18da3956274946450ad40b415aca8cf8adcdd6..481632667bc9db933e03680a949784276a67ef0f 100644 --- a/model_zoo/official/cv/maskrcnn/src/network_define.py +++ b/model_zoo/official/cv/maskrcnn/src/network_define.py @@ -117,8 +117,6 @@ class LossCallBack(Callback): class LossNet(nn.Cell): """MaskRcnn loss method""" - def __init__(self): - super(LossNet, self).__init__() def construct(self, x1, x2, x3, x4, x5, x6, x7): return x1 + x2 diff --git a/model_zoo/official/cv/ssd/src/dataset.py b/model_zoo/official/cv/ssd/src/dataset.py index d842aef709fd549ae2482f0b01a07bcb6756e1b5..f3f700042684ab2fcac91e22b8d77df2429f894b 100644 --- a/model_zoo/official/cv/ssd/src/dataset.py +++ b/model_zoo/official/cv/ssd/src/dataset.py @@ -20,8 +20,8 @@ from __future__ import division import os import json import xml.etree.ElementTree as et -import cv2 import numpy as np +import cv2 import mindspore.dataset as de import mindspore.dataset.transforms.vision.c_transforms as C diff --git a/model_zoo/official/cv/ssd/src/init_params.py b/model_zoo/official/cv/ssd/src/init_params.py index b71ee2c4dc5a47bf0b680347b07e6fb888673280..e144bc7f084a81e0bdb71cd4a598468f0808c5cb 100644 --- a/model_zoo/official/cv/ssd/src/init_params.py +++ b/model_zoo/official/cv/ssd/src/init_params.py @@ -14,8 +14,8 @@ # ============================================================================ """Parameters utils""" -from mindspore.common.initializer import initializer, TruncatedNormal import numpy as np +from mindspore.common.initializer import initializer, TruncatedNormal def init_net_param(network, initialize_mode='TruncatedNormal'): """Init the parameters in net.""" diff --git a/model_zoo/official/cv/warpctc/src/warpctc_for_train.py b/model_zoo/official/cv/warpctc/src/warpctc_for_train.py index 8cbf2f986a6699f6fe9ec4a86681c7608677cf19..5654f05e5dfd65cc888595e4884699e285a9cfe8 100755 --- a/model_zoo/official/cv/warpctc/src/warpctc_for_train.py +++ b/model_zoo/official/cv/warpctc/src/warpctc_for_train.py @@ -13,6 +13,7 @@ # limitations under the License. # ============================================================================ """Automatic differentiation with grad clip.""" +import numpy as np from mindspore.parallel._utils import (_get_device_num, _get_mirror_mean, _get_parallel_mode) from mindspore.train.parallel_utils import ParallelMode @@ -24,7 +25,6 @@ from mindspore.nn.cell import Cell from mindspore.nn.wrap.grad_reducer import DistributedGradReducer import mindspore.nn as nn from mindspore.common.tensor import Tensor -import numpy as np compute_norm = C.MultitypeFuncGraph("compute_norm") diff --git a/model_zoo/official/gnn/gat/src/gat.py b/model_zoo/official/gnn/gat/src/gat.py index ff0c964e9b728d26caf30f0ba8573ff184a46a09..2245aae21495209b9a655ee075b4debe0bdca26b 100644 --- a/model_zoo/official/gnn/gat/src/gat.py +++ b/model_zoo/official/gnn/gat/src/gat.py @@ -297,6 +297,9 @@ class AttentionHead(nn.Cell): self.activation = activation def construct(self, input_feature, bias_mat, training=True): + """ + Attention Head for Graph Attention Networks. + """ if training is True: input_feature = self.in_drop(input_feature) diff --git a/model_zoo/official/gnn/gat/src/utils.py b/model_zoo/official/gnn/gat/src/utils.py index 03305ca3d306904b0a56cd5c17bd16e759829d74..06d32529944593bf2d4aad9cc6b9390b35334ee4 100644 --- a/model_zoo/official/gnn/gat/src/utils.py +++ b/model_zoo/official/gnn/gat/src/utils.py @@ -38,7 +38,7 @@ class MaskedSoftMaxLoss(nn.Cell): self.num_params = len(self.params) def construct(self, logits): - # calc l2 loss + """calc l2 loss""" l2_loss = 0 for i in range(self.num_params): l2_loss = l2_loss + self.l2_coeff * P.L2Loss()(self.params[i]) @@ -69,6 +69,7 @@ class MaskedAccuracy(nn.Cell): self.mask = Tensor(mask, dtype=mstype.float32) def construct(self, logits): + """Calculate accuracy""" logits = P.Reshape()(logits, (-1, self.num_class)) labels = P.Reshape()(self.label, (-1, self.num_class)) mask = P.Reshape()(self.mask, (-1,)) diff --git a/model_zoo/official/gnn/gcn/src/gcn.py b/model_zoo/official/gnn/gcn/src/gcn.py index 6e01c45d8bf8285ecf73aca39ada6fb7926c2a96..3e1c4324056888a0295d7986eee5ff700d0573fb 100644 --- a/model_zoo/official/gnn/gcn/src/gcn.py +++ b/model_zoo/official/gnn/gcn/src/gcn.py @@ -66,6 +66,9 @@ class GraphConvolution(nn.Cell): self.matmul = P.MatMul() def construct(self, adj, input_feature): + """ + GCN graph convolution layer. + """ dropout = input_feature if self.dropout_flag: dropout = self.dropout(dropout) diff --git a/model_zoo/official/gnn/gcn/src/metrics.py b/model_zoo/official/gnn/gcn/src/metrics.py index e923f17e3918fb4ace777aaee88ba19e9018a405..0d47a9bc6c4ca7d6658e9248bccad38be30f6bed 100644 --- a/model_zoo/official/gnn/gcn/src/metrics.py +++ b/model_zoo/official/gnn/gcn/src/metrics.py @@ -39,6 +39,7 @@ class Loss(nn.Cell): self.param = param def construct(self, preds): + """Calculate loss""" param = self.l2_loss(self.param) loss = self.weight_decay * param preds = self.cast(preds, mstype.float32) diff --git a/model_zoo/official/nlp/bert/pretrain_eval.py b/model_zoo/official/nlp/bert/pretrain_eval.py index 5089d88459e7803ffdd70f666e862a87c8ea52c2..fa02ebbcd45a10bb0b041bc18126b6e3e6089bae 100644 --- a/model_zoo/official/nlp/bert/pretrain_eval.py +++ b/model_zoo/official/nlp/bert/pretrain_eval.py @@ -88,6 +88,7 @@ class BertPretrainEva(nn.Cell): def construct(self, input_ids, input_mask, token_type_id, masked_pos, masked_ids, masked_weights, nsp_label): + """Calculate prediction scores""" bs, _ = self.shape(input_ids) probs = self.bert(input_ids, input_mask, token_type_id, masked_pos) index = self.argmax(probs) diff --git a/model_zoo/official/nlp/bert/src/bert_for_finetune.py b/model_zoo/official/nlp/bert/src/bert_for_finetune.py index 5fbf1d81b9b8f0053d3fea003e739ba728d96516..97262b6faec7a55231b51cb6c1c77c8b3503344c 100644 --- a/model_zoo/official/nlp/bert/src/bert_for_finetune.py +++ b/model_zoo/official/nlp/bert/src/bert_for_finetune.py @@ -99,7 +99,7 @@ class BertFinetuneCell(nn.Cell): token_type_id, label_ids, sens=None): - + """Bert Finetune""" weights = self.weights init = False @@ -195,6 +195,7 @@ class BertSquadCell(nn.Cell): unique_id, is_impossible, sens=None): + """BertSquad""" weights = self.weights init = self.alloc_status() loss = self.network(input_ids, @@ -313,6 +314,7 @@ class BertSquad(nn.Cell): self.squeeze = P.Squeeze(axis=-1) def construct(self, input_ids, input_mask, token_type_id, start_position, end_position, unique_id, is_impossible): + """interface for SQuAD finetuning task""" logits = self.bert(input_ids, input_mask, token_type_id) if self.is_training: unstacked_logits_0 = self.squeeze(logits[:, :, 0:1]) diff --git a/model_zoo/official/nlp/bert/src/bert_for_pre_training.py b/model_zoo/official/nlp/bert/src/bert_for_pre_training.py index 1d12ddaf061bc1a41d830ada785aaaa08d91ac0e..14e00281d0c0d4edb38fd80438a0e19368079b0d 100644 --- a/model_zoo/official/nlp/bert/src/bert_for_pre_training.py +++ b/model_zoo/official/nlp/bert/src/bert_for_pre_training.py @@ -103,6 +103,7 @@ class GetMaskedLMOutput(nn.Cell): input_tensor, output_weights, positions): + """Get output log_probs""" flat_offsets = self.reshape( self.rng * self.seq_length_tensor, self.shape_flat_offsets) flat_position = self.reshape(positions + flat_offsets, self.last_idx) @@ -248,6 +249,7 @@ class BertNetworkWithLoss(nn.Cell): masked_lm_positions, masked_lm_ids, masked_lm_weights): + """Get pre-training loss""" prediction_scores, seq_relationship_score = \ self.bert(input_ids, input_mask, token_type_id, masked_lm_positions) total_loss = self.loss(prediction_scores, seq_relationship_score, diff --git a/model_zoo/official/nlp/bert/src/bert_model.py b/model_zoo/official/nlp/bert/src/bert_model.py index 8f972f8cecb518ad7b392150a4957accb1fc4274..4199c13b5a12cf561147877476496f1e9ae9e0f1 100644 --- a/model_zoo/official/nlp/bert/src/bert_model.py +++ b/model_zoo/official/nlp/bert/src/bert_model.py @@ -137,6 +137,7 @@ class EmbeddingLookup(nn.Cell): self.shape = tuple(embedding_shape) def construct(self, input_ids): + """Get output and embeddings lookup table""" extended_ids = self.expand(input_ids, -1) flat_ids = self.reshape(extended_ids, self.shape_flat) if self.use_one_hot_embeddings: @@ -205,6 +206,7 @@ class EmbeddingPostprocessor(nn.Cell): name='full_position_embeddings') def construct(self, token_type_ids, word_embeddings): + """Postprocessors apply positional and token type embeddings to word embeddings.""" output = word_embeddings if self.use_token_type: flat_ids = self.reshape(token_type_ids, self.shape_flat) @@ -288,6 +290,7 @@ class RelaPosMatrixGenerator(nn.Cell): self.cast = P.Cast() def construct(self): + """Generates matrix of relative positions between inputs.""" range_vec_row_out = self.cast(F.tuple_to_array(F.make_range(self._length)), mstype.int32) range_vec_col_out = self.range_mat(range_vec_row_out, (self._length, -1)) tile_row_out = self.tile(range_vec_row_out, (self._length,)) @@ -342,9 +345,9 @@ class RelaPosEmbeddingsGenerator(nn.Cell): self.matmul = P.BatchMatMul() def construct(self): + """Generate embedding for each relative position of dimension depth.""" relative_positions_matrix_out = self.relative_positions_matrix() - # Generate embedding for each relative position of dimension depth. if self.use_one_hot_embeddings: flat_relative_positions_matrix = self.reshape(relative_positions_matrix_out, (-1,)) one_hot_relative_positions_matrix = self.one_hot( @@ -495,7 +498,7 @@ class BertAttention(nn.Cell): use_one_hot_embeddings=use_one_hot_embeddings) def construct(self, from_tensor, to_tensor, attention_mask): - # reshape 2d/3d input tensors to 2d + """reshape 2d/3d input tensors to 2d""" from_tensor_2d = self.reshape(from_tensor, self.shape_from_2d) to_tensor_2d = self.reshape(to_tensor, self.shape_to_2d) query_out = self.query_layer(from_tensor_2d) @@ -784,6 +787,7 @@ class BertTransformer(nn.Cell): self.out_shape = (batch_size, seq_length, hidden_size) def construct(self, input_tensor, attention_mask): + """Multi-layer bert transformer.""" prev_output = self.reshape(input_tensor, self.shape) all_encoder_layers = () @@ -915,7 +919,7 @@ class BertModel(nn.Cell): self._create_attention_mask_from_input_mask = CreateAttentionMaskFromInputMask(config) def construct(self, input_ids, token_type_ids, input_mask): - + """Bidirectional Encoder Representations from Transformers.""" # embedding if not self.token_type_ids_from_dataset: token_type_ids = self.token_type_ids diff --git a/model_zoo/official/nlp/bert/src/finetune_eval_model.py b/model_zoo/official/nlp/bert/src/finetune_eval_model.py index 047decc377aea65cb2e78ad7307241b7c5fed628..5cfb2020820310aaf11799ac611974e3a61aace3 100644 --- a/model_zoo/official/nlp/bert/src/finetune_eval_model.py +++ b/model_zoo/official/nlp/bert/src/finetune_eval_model.py @@ -110,6 +110,7 @@ class BertNERModel(nn.Cell): self.origin_shape = (config.batch_size, config.seq_length, self.num_labels) def construct(self, input_ids, input_mask, token_type_id): + """Return the final logits as the results of log_softmax.""" sequence_output, _, _ = \ self.bert(input_ids, token_type_id, input_mask) seq = self.dropout(sequence_output) diff --git a/model_zoo/official/nlp/bert/src/fused_layer_norm.py b/model_zoo/official/nlp/bert/src/fused_layer_norm.py index 5dbe9999ad784246e7ca18918439d2b5625e6ac5..2736fdbd57791e9d4a07c55ef947b042ca66e7f8 100644 --- a/model_zoo/official/nlp/bert/src/fused_layer_norm.py +++ b/model_zoo/official/nlp/bert/src/fused_layer_norm.py @@ -13,6 +13,7 @@ # limitations under the License. # ============================================================================ """fused layernorm""" +import numpy as np from mindspore.ops import operations as P from mindspore.ops import functional as F from mindspore.common.parameter import Parameter @@ -21,7 +22,6 @@ from mindspore.ops.primitive import constexpr import mindspore.common.dtype as mstype from mindspore.nn.cell import Cell -import numpy as np __all__ = ['FusedLayerNorm'] @@ -101,6 +101,7 @@ class FusedLayerNorm(Cell): self.use_batch_norm = use_batch_norm def construct(self, input_x): + """Applies Layer Normalization over a mini-batch of inputs""" if self.use_batch_norm and self.training: ones = P.Fill()(mstype.float32, F.shape(input_x)[:self.begin_norm_axis], 1.0) zeros = P.Fill()(mstype.float32, F.shape(input_x)[:self.begin_norm_axis], 0.0) diff --git a/model_zoo/official/nlp/mass/src/transformer/components.py b/model_zoo/official/nlp/mass/src/transformer/components.py index 2efa1ee7579d9c7c7d1582ba50225960e1af239e..81db1a345ac232d86b374b1b9a07e98f5e2734f0 100644 --- a/model_zoo/official/nlp/mass/src/transformer/components.py +++ b/model_zoo/official/nlp/mass/src/transformer/components.py @@ -52,6 +52,7 @@ class LayerNorm(nn.Cell): self.get_shape = P.Shape() def construct(self, input_tensor): + """layer norm""" shape = self.get_shape(input_tensor) batch_size = shape[0] max_len = shape[1] diff --git a/model_zoo/official/nlp/tinybert/src/fused_layer_norm.py b/model_zoo/official/nlp/tinybert/src/fused_layer_norm.py index d290842c58a958f3325ee96ab6ade08514244d88..7b9fb56227c387bb7fded27967246faa6f070f3e 100644 --- a/model_zoo/official/nlp/tinybert/src/fused_layer_norm.py +++ b/model_zoo/official/nlp/tinybert/src/fused_layer_norm.py @@ -13,6 +13,7 @@ # limitations under the License. # ============================================================================ """fused layernorm""" +import numpy as np from mindspore.ops import operations as P from mindspore.ops import functional as F from mindspore.common.parameter import Parameter @@ -21,7 +22,6 @@ from mindspore.ops.primitive import constexpr import mindspore.common.dtype as mstype from mindspore.nn.cell import Cell -import numpy as np __all__ = ['FusedLayerNorm'] diff --git a/model_zoo/official/nlp/transformer/src/beam_search.py b/model_zoo/official/nlp/transformer/src/beam_search.py index 9742924a736e282007fbb441c473cfb6ec447022..5200ee32c3a89880c08b7d9338a77257ce2a19d6 100644 --- a/model_zoo/official/nlp/transformer/src/beam_search.py +++ b/model_zoo/official/nlp/transformer/src/beam_search.py @@ -241,6 +241,7 @@ class BeamSearchDecoder(nn.Cell): return cur_input_ids, state_log_probs, state_seq, state_finished, state_length def construct(self, enc_states, enc_attention_mask): + """Get beam search result.""" cur_input_ids = self.start_ids # beam search states state_log_probs = self.init_scores diff --git a/model_zoo/official/nlp/transformer/src/transformer_for_train.py b/model_zoo/official/nlp/transformer/src/transformer_for_train.py index 76237bee96fa688a2899a89ff6429681cdd629af..164c9391e91fe342632bc79e2caa7a46cc1f32da 100644 --- a/model_zoo/official/nlp/transformer/src/transformer_for_train.py +++ b/model_zoo/official/nlp/transformer/src/transformer_for_train.py @@ -55,7 +55,7 @@ class ClipGradients(nn.Cell): grads, clip_type, clip_value): - # return grads + """return grads""" if clip_type != 0 and clip_type != 1: return grads diff --git a/model_zoo/official/nlp/transformer/src/transformer_model.py b/model_zoo/official/nlp/transformer/src/transformer_model.py index fb33f526da9604a47c6043ac5ec8251ef744becc..f05757143e63c7f04c3ff92377bbd58a74d3a7a7 100644 --- a/model_zoo/official/nlp/transformer/src/transformer_model.py +++ b/model_zoo/official/nlp/transformer/src/transformer_model.py @@ -131,6 +131,7 @@ class EmbeddingLookup(nn.Cell): self.shape = P.Shape() def construct(self, input_ids): + """Get a embeddings lookup table with a fixed dictionary and size.""" input_shape = self.shape(input_ids) flat_ids = self.reshape(input_ids, self.shape_flat) @@ -200,6 +201,7 @@ class EmbeddingPostprocessor(nn.Cell): self.shape = P.Shape() def construct(self, word_embeddings): + """Postprocessors apply positional embeddings to word embeddings.""" input_shape = self.shape(word_embeddings) input_len = input_shape[1] @@ -377,7 +379,7 @@ class MultiheadAttention(nn.Cell): self.softmax_cast = P.Cast() def construct(self, from_tensor, to_tensor, attention_mask=None): - # reshape 2d/3d input tensors to 2d + """reshape 2d/3d input tensors to 2d""" from_tensor_2d = self.reshape(from_tensor, self.shape_from_2d) to_tensor_2d = self.reshape(to_tensor, self.shape_to_2d) query_out = self.query_layer(from_tensor_2d) @@ -476,6 +478,7 @@ class SelfAttention(nn.Cell): self.reshape = P.Reshape() self.shape = (-1, hidden_size) def construct(self, input_tensor, memory_tensor, attention_mask): + """Apply self-attention.""" input_tensor = self.reshape(input_tensor, self.shape) memory_tensor = self.reshape(memory_tensor, self.shape) @@ -831,6 +834,7 @@ class CreateAttentionMaskFromInputMask(nn.Cell): self.batch_matmul = P.BatchMatMul() def construct(self, input_mask): + """Create attention mask according to input mask.""" input_shape = self.shape(input_mask) shape_right = (input_shape[0], 1, input_shape[1]) shape_left = input_shape + (1,) @@ -876,6 +880,7 @@ class PredLogProbs(nn.Cell): def construct(self, input_tensor, output_weights): + """Get log probs.""" input_tensor = self.reshape(input_tensor, self.shape_flat_sequence_tensor) input_tensor = self.cast(input_tensor, self.compute_type) output_weights = self.cast(output_weights, self.compute_type) @@ -962,7 +967,10 @@ class TransformerDecoderStep(nn.Cell): self.cast_compute_type = CastWrapper(dst_type=compute_type) def construct(self, input_ids, enc_states, enc_attention_mask): - # input_ids: [batch_size * beam_width] + """ + Multi-layer transformer decoder step. + input_ids: [batch_size * beam_width] + """ # process embedding input_embedding, embedding_tables = self.tfm_embedding_lookup(input_ids) input_embedding = self.tfm_embedding_processor(input_embedding) @@ -1122,6 +1130,7 @@ class TransformerModel(nn.Cell): self.encdec_mask = Tensor(ones, dtype=mstype.float32) def construct(self, source_ids, source_mask, target_ids=None, target_mask=None): + """Transformer with encoder and decoder.""" # process source sentence src_word_embeddings, embedding_tables = self.tfm_embedding_lookup(source_ids) src_embedding_output = self.tfm_embedding_postprocessor_for_encoder(src_word_embeddings) diff --git a/model_zoo/official/nlp/transformer/train.py b/model_zoo/official/nlp/transformer/train.py index f84c4214e3438db7239f4fd31e23892c904c8e95..8b7dc434562216ab2bbef5b5edab4920d7aa5b96 100644 --- a/model_zoo/official/nlp/transformer/train.py +++ b/model_zoo/official/nlp/transformer/train.py @@ -69,6 +69,7 @@ class LossCallBack(Callback): time_stamp_init = True def step_end(self, run_context): + """Monitor the loss in training.""" global time_stamp_first time_stamp_current = get_ms_timestamp() cb_params = run_context.original_args() diff --git a/model_zoo/official/recommend/deepfm/src/callback.py b/model_zoo/official/recommend/deepfm/src/callback.py index fce7b29fa62fda84416978e48cf3aac7abfe9698..fedc3548e02a714c43c8e604ddd515bb4fc0f34d 100644 --- a/model_zoo/official/recommend/deepfm/src/callback.py +++ b/model_zoo/official/recommend/deepfm/src/callback.py @@ -68,6 +68,7 @@ class LossCallBack(Callback): self._per_print_times = per_print_times def step_end(self, run_context): + """Monitor the loss in training.""" cb_params = run_context.original_args() loss = cb_params.net_outputs.asnumpy() cur_step_in_epoch = (cb_params.cur_step_num - 1) % cb_params.batch_num + 1 diff --git a/model_zoo/official/recommend/deepfm/src/dataset.py b/model_zoo/official/recommend/deepfm/src/dataset.py index 4904715220994cdf249862691d64d2679f4abcfe..e023786f429b530ba2a7f77cfbc709d530e6ee42 100644 --- a/model_zoo/official/recommend/deepfm/src/dataset.py +++ b/model_zoo/official/recommend/deepfm/src/dataset.py @@ -19,8 +19,8 @@ import os import math from enum import Enum -import pandas as pd import numpy as np +import pandas as pd import mindspore.dataset.engine as de import mindspore.common.dtype as mstype diff --git a/model_zoo/official/recommend/deepfm/src/deepfm.py b/model_zoo/official/recommend/deepfm/src/deepfm.py index 0fbe3afa49cc2e35064262c8388327a3a18dd257..61dd3b5f85c4e85f78f5cbc4c093bc7b3b4fd289 100644 --- a/model_zoo/official/recommend/deepfm/src/deepfm.py +++ b/model_zoo/official/recommend/deepfm/src/deepfm.py @@ -147,6 +147,7 @@ class DenseLayer(nn.Cell): return act_func def construct(self, x): + """Dense Layer for Deep Layer of DeepFM Model.""" x = self.act_func(x) if self.training: x = self.dropout(x) diff --git a/model_zoo/official/recommend/wide_and_deep/src/callbacks.py b/model_zoo/official/recommend/wide_and_deep/src/callbacks.py index 932529270524bd9850a28fad54f7dc3b7eb89269..d2a2e9442792553428b8277691e80157c3b6f642 100644 --- a/model_zoo/official/recommend/wide_and_deep/src/callbacks.py +++ b/model_zoo/official/recommend/wide_and_deep/src/callbacks.py @@ -47,6 +47,7 @@ class LossCallBack(Callback): self.config = config def step_end(self, run_context): + """Monitor the loss in training.""" cb_params = run_context.original_args() wide_loss, deep_loss = cb_params.net_outputs[0].asnumpy(), cb_params.net_outputs[1].asnumpy() cur_step_in_epoch = (cb_params.cur_step_num - 1) % cb_params.batch_num + 1 diff --git a/model_zoo/official/recommend/wide_and_deep/src/wide_and_deep.py b/model_zoo/official/recommend/wide_and_deep/src/wide_and_deep.py index bda889fe87047cbccb4bf7c17c3cd6025e8f4aa1..b00b40905d0c6f541a78de2f40bd71c42697c756 100644 --- a/model_zoo/official/recommend/wide_and_deep/src/wide_and_deep.py +++ b/model_zoo/official/recommend/wide_and_deep/src/wide_and_deep.py @@ -13,6 +13,7 @@ # limitations under the License. # ============================================================================ """wide and deep model""" +import numpy as np from mindspore import nn from mindspore import Parameter, ParameterTuple import mindspore.common.dtype as mstype @@ -28,7 +29,6 @@ from mindspore.parallel._utils import _get_device_num, _get_parallel_mode, _get_ from mindspore.train.parallel_utils import ParallelMode from mindspore.nn.wrap.grad_reducer import DistributedGradReducer from mindspore.communication.management import get_group_size -import numpy as np np_type = np.float32 ms_type = mstype.float32