diff --git a/pycaret/classification.py b/pycaret/classification.py index e28554a531e4d487ab54910fed5a4236c8dab94e..561c5edf9fda8eb3464d53b7376647df5e6ae936 100644 --- a/pycaret/classification.py +++ b/pycaret/classification.py @@ -1702,6 +1702,7 @@ def setup(data, mlflow.set_experiment(exp_name_log) run_name_ = 'Session Initialized ' + str(USI) + with mlflow.start_run(run_name=run_name_) as run: # Get active run to log as tag @@ -1776,6 +1777,7 @@ def create_model(estimator = None, method = None, fold = 10, round = 4, + fit_only = False, #added in pycaret==2.0.0 verbose = True, system = True, #added in pycaret==2.0.0 **kwargs): #added in pycaret==2.0.0 @@ -1842,6 +1844,11 @@ def create_model(estimator = None, round: integer, default = 4 Number of decimal places the metrics in the score grid will be rounded to. + fit_only: bool, default = False + When fit_only set to True, no cross validation or metric evaluation is performed. + Trained model object returned when using fit_only is same as the one returned + without fit_only. + verbose: Boolean, default = True Score grid is not printed when verbose is set to False. @@ -1932,13 +1939,20 @@ def create_model(estimator = None, if type(verbose) is not bool: sys.exit('(Type Error): Verbose parameter can only take argument as True or False.') + #checking system parameter + if type(system) is not bool: + sys.exit('(Type Error): System parameter can only take argument as True or False.') + + #checking fit_only parameter + if type(fit_only) is not bool: + sys.exit('(Type Error): fit_only parameter can only take argument as True or False.') + #checking boosting conflict with estimators boosting_not_supported = ['lda','qda','ridge','mlp','gpc','svm','knn', 'catboost'] if method == 'Boosting' and estimator in boosting_not_supported: sys.exit("(Type Error): Estimator does not provide class_weights or predict_proba function and hence not supported for the Boosting method. Change the estimator or method to 'Bagging'.") - ''' ERROR HANDLING ENDS HERE @@ -2206,7 +2220,11 @@ def create_model(estimator = None, MONITOR UPDATE STARTS ''' - monitor.iloc[1,1:] = 'Initializing CV' + if fit_only: + monitor.iloc[1,1:] = 'Fitting ' + str(full_name) + else: + monitor.iloc[1,1:] = 'Initializing CV' + if verbose: if html_param: update_display(monitor, display_id = 'monitor') @@ -2215,6 +2233,23 @@ def create_model(estimator = None, MONITOR UPDATE ENDS ''' + if fit_only: + + if fix_imbalance_param: + if fix_imbalance_method_param is None: + from imblearn.over_sampling import SMOTE + resampler = SMOTE(random_state=seed) + else: + resampler = fix_imbalance_method_param + + Xtrain,ytrain = resampler.fit_sample(data_X,data_y) + + model.fit(data_X,data_y) + + if verbose: + clear_output() + + return model fold_num = 1 @@ -4659,7 +4694,7 @@ def compare_models(blacklist = None, update_display(monitor, display_id = 'monitor') progress.value += 1 k = model_dict.get(i) - m = create_model(estimator=k, verbose = False, system=False) + m = create_model(estimator=k, verbose = False, system=False, fit_only=True) model_store_final.append(m) model_fit_end = time.time() @@ -9156,7 +9191,7 @@ def evaluate_model(estimator): ) - d = interact(plot_model, estimator = fixed(estimator), plot = a) + d = interact(plot_model, estimator = fixed(estimator), plot = a, save = fixed(False), verbose = fixed(True), system = fixed(True)) def finalize_model(estimator): diff --git a/pycaret/nlp.py b/pycaret/nlp.py index b292024094890a1a21aac52ed670471648cef08e..6ede63edcfab80b35150f7307c74db52ff6d0926 100644 --- a/pycaret/nlp.py +++ b/pycaret/nlp.py @@ -2980,7 +2980,7 @@ def evaluate_model(model): b = widgets.Dropdown(options=final_list, description='Topic #:', disabled=False) - d = interact_manual(plot_model, model = fixed(model), plot = a, topic_num=b) + d = interact_manual(plot_model, model = fixed(model), plot = a, topic_num=b, save=fixed(False), system=fixed(True)) diff --git a/pycaret/regression.py b/pycaret/regression.py index 22661a469fd1d418dfd8cf02976145b55f429560..1ac7b169e4c0bd4dc55240e0d96a854dce0c0c64 100644 --- a/pycaret/regression.py +++ b/pycaret/regression.py @@ -1731,7 +1731,8 @@ def create_model(estimator = None, ensemble = False, method = None, fold = 10, - round = 4, + round = 4, + fit_only = False, #added in pycaret==2.0.0 verbose = True, system = True, #added in pycaret==2.0.0 **kwargs): #added in pycaret==2.0.0 @@ -1805,6 +1806,11 @@ def create_model(estimator = None, round: integer, default = 4 Number of decimal places the metrics in the score grid will be rounded to. + fit_only: bool, default = False + When fit_only set to True, no cross validation or metric evaluation is performed. + Trained model object returned when using fit_only is same as the one returned + without fit_only. + verbose: Boolean, default = True Score grid is not printed when verbose is set to False. @@ -1888,6 +1894,13 @@ def create_model(estimator = None, if type(verbose) is not bool: sys.exit('(Type Error): Verbose parameter can only take argument as True or False.') + #checking system parameter + if type(system) is not bool: + sys.exit('(Type Error): System parameter can only take argument as True or False.') + + #checking fit_only parameter + if type(fit_only) is not bool: + sys.exit('(Type Error): fit_only parameter can only take argument as True or False.') ''' @@ -2187,7 +2200,11 @@ def create_model(estimator = None, MONITOR UPDATE STARTS ''' - monitor.iloc[1,1:] = 'Initializing CV' + if fit_only: + monitor.iloc[1,1:] = 'Fitting ' + str(full_name) + else: + monitor.iloc[1,1:] = 'Initializing CV' + if verbose: if html_param: update_display(monitor, display_id = 'monitor') @@ -2196,6 +2213,13 @@ def create_model(estimator = None, MONITOR UPDATE ENDS ''' + if fit_only: + model.fit(data_X,data_y) + + if verbose: + clear_output() + + return model fold_num = 1 @@ -3994,7 +4018,7 @@ def compare_models(blacklist = None, update_display(monitor, display_id = 'monitor') progress.value += 1 k = model_dict.get(i) - m = create_model(estimator=k, verbose = False, system=False) + m = create_model(estimator=k, verbose = False, system=False, fit_only=True) model_store_final.append(m) if len(model_store_final) == 1: @@ -8107,7 +8131,7 @@ def evaluate_model(estimator): ) - d = interact(plot_model, estimator = fixed(estimator), plot = a) + d = interact(plot_model, estimator = fixed(estimator), plot = a, save = fixed(False), verbose = fixed(True), system = fixed(True)) def finalize_model(estimator): diff --git a/pycaret/utils.py b/pycaret/utils.py index 10bada52bcfc53b0f6203440aaac885b2fbfe33a..786c9db91b92ea5c40fe58ef35323fdf96fb94d7 100644 --- a/pycaret/utils.py +++ b/pycaret/utils.py @@ -3,7 +3,7 @@ # License: MIT def version(): - print("pycaret-nightly-0.13") + print("pycaret-nightly-0.14") def check_metric(actual, prediction, metric, round=4): diff --git a/setup.py b/setup.py index 0d960f4e82d11ac9c296a2f440fb40497ea14749..25b8424bdf269a02b96ea26cd8f89d1ec782f646 100644 --- a/setup.py +++ b/setup.py @@ -13,7 +13,7 @@ with open('requirements.txt') as f: setup( name="pycaret-nightly", - version="0.13", + version="0.14", description="Nightly build of PyCaret - An open source, low-code machine learning library in Python.", long_description=readme(), long_description_content_type="text/markdown",