提交 67d4cd12 编写于 作者: P PyCaret

pycaret-nightly==0.33

上级 b1390689
...@@ -3565,12 +3565,33 @@ def get_outliers(data, ...@@ -3565,12 +3565,33 @@ def get_outliers(data,
else: else:
ignore_features_pass = ignore_features ignore_features_pass = ignore_features
global X, data_, seed, n_jobs_param, logging_param global X, data_, seed, n_jobs_param, logging_param, logger
n_jobs_param = n_jobs n_jobs_param = n_jobs
logging_param = False logging_param = False
import logging
logger = logging.getLogger('logs')
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
if logger.hasHandlers():
logger.handlers.clear()
ch = logging.FileHandler('logs.log')
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
data_ = data.copy() data_ = data.copy()
seed = 99 seed = 99
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
# Author: Moez Ali <moez.ali@queensu.ca> # Author: Moez Ali <moez.ali@queensu.ca>
# License: MIT # License: MIT
version_ = "pycaret-nightly-0.32" version_ = "pycaret-nightly-0.33"
def version(): def version():
print(version_) print(version_)
...@@ -21,73 +21,79 @@ def check_metric(actual, prediction, metric, round=4): ...@@ -21,73 +21,79 @@ def check_metric(actual, prediction, metric, round=4):
#metric calculation starts here #metric calculation starts here
if metric == 'accuracy': if metric == 'Accuracy':
from sklearn import metrics from sklearn import metrics
result = metrics.accuracy_score(actual,prediction) result = metrics.accuracy_score(actual,prediction)
result = result.round(round) result = result.round(round)
elif metric == 'recall': elif metric == 'Recall':
from sklearn import metrics from sklearn import metrics
result = metrics.recall_score(actual,prediction) result = metrics.recall_score(actual,prediction)
result = result.round(round) result = result.round(round)
elif metric == 'precision': elif metric == 'Precision':
from sklearn import metrics from sklearn import metrics
result = metrics.precision_score(actual,prediction) result = metrics.precision_score(actual,prediction)
result = result.round(round) result = result.round(round)
elif metric == 'f1': elif metric == 'F1':
from sklearn import metrics from sklearn import metrics
result = metrics.f1_score(actual,prediction) result = metrics.f1_score(actual,prediction)
result = result.round(round) result = result.round(round)
elif metric == 'kappa': elif metric == 'Kappa':
from sklearn import metrics from sklearn import metrics
result = metrics.cohen_kappa_score(actual,prediction) result = metrics.cohen_kappa_score(actual,prediction)
result = result.round(round) result = result.round(round)
elif metric == 'auc': elif metric == 'AUC':
from sklearn import metrics from sklearn import metrics
result = metrics.roc_auc_score(actual,prediction) result = metrics.roc_auc_score(actual,prediction)
result = result.round(round) result = result.round(round)
elif metric == 'mae': elif metric == 'MCC':
from sklearn import metrics
result = metrics.matthews_corrcoef(actual,prediction)
result = result.round(round)
elif metric == 'MAE':
from sklearn import metrics from sklearn import metrics
result = metrics.mean_absolute_error(actual,prediction) result = metrics.mean_absolute_error(actual,prediction)
result = result.round(round) result = result.round(round)
elif metric == 'mse': elif metric == 'MSE':
from sklearn import metrics from sklearn import metrics
result = metrics.mean_squared_error(actual,prediction) result = metrics.mean_squared_error(actual,prediction)
result = result.round(round) result = result.round(round)
elif metric == 'rmse': elif metric == 'RMSE':
from sklearn import metrics from sklearn import metrics
result = metrics.mean_squared_error(actual,prediction) result = metrics.mean_squared_error(actual,prediction)
result = np.sqrt(result) result = np.sqrt(result)
result = result.round(round) result = result.round(round)
elif metric == 'r2': elif metric == 'R2':
from sklearn import metrics from sklearn import metrics
result = metrics.r2_score(actual,prediction) result = metrics.r2_score(actual,prediction)
result = result.round(round) result = result.round(round)
elif metric == 'rmsle': elif metric == 'RMSLE':
result = np.sqrt(np.mean(np.power(np.log(np.array(abs(prediction))+1) - np.log(np.array(abs(actual))+1), 2))) result = np.sqrt(np.mean(np.power(np.log(np.array(abs(prediction))+1) - np.log(np.array(abs(actual))+1), 2)))
result = result.round(round) result = result.round(round)
elif metric == 'mape': elif metric == 'MAPE':
mask = actual != 0 mask = actual != 0
result = (np.fabs(actual - prediction)/actual)[mask].mean() result = (np.fabs(actual - prediction)/actual)[mask].mean()
......
...@@ -13,7 +13,7 @@ with open('requirements.txt') as f: ...@@ -13,7 +13,7 @@ with open('requirements.txt') as f:
setup( setup(
name="pycaret-nightly", name="pycaret-nightly",
version="0.32", version="0.33",
description="Nightly build of PyCaret - An open source, low-code machine learning library in Python.", description="Nightly build of PyCaret - An open source, low-code machine learning library in Python.",
long_description=readme(), long_description=readme(),
long_description_content_type="text/markdown", long_description_content_type="text/markdown",
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册