From 5d03f95e263d85195a6070dc32f095257674057f Mon Sep 17 00:00:00 2001 From: jrzaurin Date: Sat, 5 Sep 2020 18:43:59 +0200 Subject: [PATCH] updated to new version of black and added quite to isort to avoid simlink warnings --- code_style.sh | 2 +- examples/adult_script.py | 4 +- pytorch_widedeep/models/deep_image.py | 3 +- pytorch_widedeep/models/wide_deep.py | 2 +- .../preprocessing/_preprocessors.py | 37 +++++++------------ pytorch_widedeep/utils/dense_utils.py | 9 ++--- pytorch_widedeep/utils/fastai_transforms.py | 12 ++---- 7 files changed, 27 insertions(+), 42 deletions(-) diff --git a/code_style.sh b/code_style.sh index c8c4d34..fd4d228 100755 --- a/code_style.sh +++ b/code_style.sh @@ -1,5 +1,5 @@ # sort imports -isort . pytorch_widedeep tests examples setup.py +isort --quiet . pytorch_widedeep tests examples setup.py # Black code style black . pytorch_widedeep tests examples setup.py # flake8 standards diff --git a/examples/adult_script.py b/examples/adult_script.py index 46f72d5..91caa39 100644 --- a/examples/adult_script.py +++ b/examples/adult_script.py @@ -104,4 +104,6 @@ if __name__ == "__main__": # model = WideDeep(wide=wide, deepdense=deepdense) # model.load_state_dict(torch.load("model_weights/model_dict.t")) # # - import pdb; pdb.set_trace() # breakpoint dde47114 // + import pdb + + pdb.set_trace() # breakpoint dde47114 // diff --git a/pytorch_widedeep/models/deep_image.py b/pytorch_widedeep/models/deep_image.py index f0d49f8..18be9f8 100644 --- a/pytorch_widedeep/models/deep_image.py +++ b/pytorch_widedeep/models/deep_image.py @@ -166,8 +166,7 @@ class DeepImage(nn.Module): self.output_dim = head_layers[-1] def forward(self, x: Tensor) -> Tensor: # type: ignore - r"""Forward pass connecting the `'backbone'` with the `'head layers'` - """ + r"""Forward pass connecting the `'backbone'` with the `'head layers'`""" x = self.backbone(x) x = x.view(x.size(0), -1) if self.head_layers is not None: diff --git a/pytorch_widedeep/models/wide_deep.py b/pytorch_widedeep/models/wide_deep.py index 06b4e39..cc3bb41 100644 --- a/pytorch_widedeep/models/wide_deep.py +++ b/pytorch_widedeep/models/wide_deep.py @@ -701,7 +701,7 @@ class WideDeep(nn.Module): X_test: Optional[Dict[str, np.ndarray]] = None, ) -> np.ndarray: r"""Returns the predicted probabilities for the test dataset for binary - and multiclass methods + and multiclass methods """ preds_l = self._predict(X_wide, X_deep, X_text, X_img, X_test) if self.method == "binary": diff --git a/pytorch_widedeep/preprocessing/_preprocessors.py b/pytorch_widedeep/preprocessing/_preprocessors.py index 564c0d6..8a1ebc2 100644 --- a/pytorch_widedeep/preprocessing/_preprocessors.py +++ b/pytorch_widedeep/preprocessing/_preprocessors.py @@ -93,15 +93,16 @@ class WidePreprocessor(BasePreprocessor): """ def __init__( - self, wide_cols: List[str], crossed_cols=None, + self, + wide_cols: List[str], + crossed_cols=None, ): super(WidePreprocessor, self).__init__() self.wide_cols = wide_cols self.crossed_cols = crossed_cols def fit(self, df: pd.DataFrame) -> BasePreprocessor: - """Fits the Preprocessor and creates required attributes - """ + """Fits the Preprocessor and creates required attributes""" df_wide = self._prepare_wide(df) self.wide_crossed_cols = df_wide.columns.tolist() vocab = self._make_global_feature_list(df_wide[self.wide_crossed_cols]) @@ -110,8 +111,7 @@ class WidePreprocessor(BasePreprocessor): return self def transform(self, df: pd.DataFrame) -> np.array: - r"""Returns the processed dataframe - """ + r"""Returns the processed dataframe""" try: self.feature_dict except: @@ -147,8 +147,7 @@ class WidePreprocessor(BasePreprocessor): return decoded def fit_transform(self, df: pd.DataFrame) -> np.ndarray: - """Combines ``fit`` and ``transform`` - """ + """Combines ``fit`` and ``transform``""" return self.fit(df).transform(df) def _make_global_feature_list(self, df: pd.DataFrame) -> List: @@ -256,8 +255,7 @@ class DensePreprocessor(BasePreprocessor): ), "'embed_cols' and 'continuous_cols' are 'None'. Please, define at least one of the two." def fit(self, df: pd.DataFrame) -> BasePreprocessor: - """Fits the Preprocessor and creates required attributes - """ + """Fits the Preprocessor and creates required attributes""" if self.embed_cols is not None: df_emb = self._prepare_embed(df) self.label_encoder = LabelEncoder(df_emb.columns.tolist()).fit(df_emb) @@ -274,8 +272,7 @@ class DensePreprocessor(BasePreprocessor): return self def transform(self, df: pd.DataFrame) -> np.ndarray: - """Returns the processed ``dataframe`` as a np.ndarray - """ + """Returns the processed ``dataframe`` as a np.ndarray""" if self.embed_cols is not None: df_emb = self._prepare_embed(df) df_emb = self.label_encoder.transform(df_emb) @@ -302,8 +299,7 @@ class DensePreprocessor(BasePreprocessor): return df_deep.values def fit_transform(self, df: pd.DataFrame) -> np.ndarray: - """Combines ``fit`` and ``transform`` - """ + """Combines ``fit`` and ``transform``""" return self.fit(df).transform(df) def _prepare_embed(self, df: pd.DataFrame) -> pd.DataFrame: @@ -387,8 +383,7 @@ class TextPreprocessor(BasePreprocessor): self.verbose = verbose def fit(self, df: pd.DataFrame) -> BasePreprocessor: - """Builds the vocabulary - """ + """Builds the vocabulary""" texts = df[self.text_col].tolist() tokens = get_texts(texts) self.vocab = Vocab.create( @@ -399,8 +394,7 @@ class TextPreprocessor(BasePreprocessor): return self def transform(self, df: pd.DataFrame) -> np.ndarray: - """Returns the padded, `numericalised` sequences - """ + """Returns the padded, `numericalised` sequences""" try: self.vocab except: @@ -419,8 +413,7 @@ class TextPreprocessor(BasePreprocessor): return padded_seq def fit_transform(self, df: pd.DataFrame) -> np.ndarray: - """Combines ``fit`` and ``transform`` - """ + """Combines ``fit`` and ``transform``""" return self.fit(df).transform(df) @@ -502,8 +495,7 @@ class ImagePreprocessor(BasePreprocessor): return self def transform(self, df: pd.DataFrame) -> np.ndarray: - """Resizes the images to the input height and width. - """ + """Resizes the images to the input height and width.""" try: self.aap except: @@ -564,6 +556,5 @@ class ImagePreprocessor(BasePreprocessor): return np.asarray(resized_imgs) def fit_transform(self, df: pd.DataFrame) -> np.ndarray: - """Combines ``fit`` and ``transform`` - """ + """Combines ``fit`` and ``transform``""" return self.fit(df).transform(df) diff --git a/pytorch_widedeep/utils/dense_utils.py b/pytorch_widedeep/utils/dense_utils.py index c25006b..ef5dd68 100644 --- a/pytorch_widedeep/utils/dense_utils.py +++ b/pytorch_widedeep/utils/dense_utils.py @@ -45,8 +45,7 @@ class LabelEncoder(object): self.columns_to_encode = columns_to_encode def fit(self, df: pd.DataFrame) -> "LabelEncoder": - """Creates encoding attributes - """ + """Creates encoding attributes""" df_inp = df.copy() @@ -78,8 +77,7 @@ class LabelEncoder(object): return self def transform(self, df: pd.DataFrame) -> pd.DataFrame: - """Label Encoded the categories in ``columns_to_encode`` - """ + """Label Encoded the categories in ``columns_to_encode``""" try: self.encoding_dict except AttributeError: @@ -126,8 +124,7 @@ class LabelEncoder(object): return self.fit(df).transform(df) def inverse_transform(self, df: pd.DataFrame) -> pd.DataFrame: - """Returns the original categories - """ + """Returns the original categories""" for k, v in self.inverse_encoding_dict.items(): df[k] = df[k].apply(lambda x: v[x]) diff --git a/pytorch_widedeep/utils/fastai_transforms.py b/pytorch_widedeep/utils/fastai_transforms.py index b85f79d..ec85f29 100644 --- a/pytorch_widedeep/utils/fastai_transforms.py +++ b/pytorch_widedeep/utils/fastai_transforms.py @@ -78,8 +78,7 @@ defaults.text_spec_tok = [UNK, PAD, BOS, EOS, FLD, TK_MAJ, TK_UP, TK_REP, TK_WRE class BaseTokenizer: - """Basic class for a tokenizer function. - """ + """Basic class for a tokenizer function.""" def __init__(self, lang: str): self.lang = lang @@ -278,8 +277,7 @@ class Tokenizer: return toks def _process_all_1(self, texts: Collection[str]) -> List[List[str]]: - """Process a list of ``texts`` in one process. - """ + """Process a list of ``texts`` in one process.""" tok = self.tok_func(self.lang) if self.special_cases: @@ -332,13 +330,11 @@ class Vocab: self.stoi = defaultdict(int, {v: k for k, v in enumerate(self.itos)}) def numericalize(self, t: Collection[str]) -> List[int]: - """Convert a list of str (or tokens) ``t`` to their ids. - """ + """Convert a list of str (or tokens) ``t`` to their ids.""" return [self.stoi[w] for w in t] def textify(self, nums: Collection[int], sep=" ") -> List[str]: - """Convert a list of ``nums`` (or indexes) to their tokens. - """ + """Convert a list of ``nums`` (or indexes) to their tokens.""" return sep.join([self.itos[i] for i in nums]) if sep is not None else [self.itos[i] for i in nums] # type: ignore def __getstate__(self): -- GitLab