forked from pycaret/pycaret
-
Notifications
You must be signed in to change notification settings - Fork 0
/
utils.py
120 lines (81 loc) · 3.23 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
# Module: Utility
# Author: Moez Ali <[email protected]>
# License: MIT
def version():
print("1.0.0")
def check_metric(actual, prediction, metric, round=4):
"""
reserved for docstring
"""
#general dependencies
import numpy as np
#metric calculation starts here
if metric == 'accuracy':
from sklearn import metrics
result = metrics.accuracy_score(actual,prediction)
result = result.round(round)
elif metric == 'recall':
from sklearn import metrics
result = metrics.recall_score(actual,prediction)
result = result.round(round)
elif metric == 'precision':
from sklearn import metrics
result = metrics.precision_score(actual,prediction)
result = result.round(round)
elif metric == 'f1':
from sklearn import metrics
result = metrics.f1_score(actual,prediction)
result = result.round(round)
elif metric == 'kappa':
from sklearn import metrics
result = metrics.cohen_kappa_score(actual,prediction)
result = result.round(round)
elif metric == 'auc':
from sklearn import metrics
result = metrics.roc_auc_score(actual,prediction)
result = result.round(round)
elif metric == 'mae':
from sklearn import metrics
result = metrics.mean_absolute_error(actual,prediction)
result = result.round(round)
elif metric == 'mse':
from sklearn import metrics
result = metrics.mean_squared_error(actual,prediction)
result = result.round(round)
elif metric == 'rmse':
from sklearn import metrics
result = metrics.mean_squared_error(actual,prediction)
result = np.sqrt(result)
result = result.round(round)
elif metric == 'r2':
from sklearn import metrics
result = metrics.r2_score(actual,prediction)
result = result.round(round)
elif metric == 'rmsle':
result = np.sqrt(np.mean(np.power(np.log(np.array(abs(prediction))+1) - np.log(np.array(abs(actual))+1), 2)))
result = result.round(round)
elif metric == 'mape':
mask = actual != 0
result = (np.fabs(actual - prediction)/actual)[mask].mean()
result = result.round(round)
return result
def enable_colab():
"""
Function to render plotly visuals in colab.
"""
def configure_plotly_browser_state():
import IPython
display(IPython.core.display.HTML('''
<script src="/static/components/requirejs/require.js"></script>
<script>
requirejs.config({
paths: {
base: '/static/base',
plotly: 'https://cdn.plot.ly/plotly-latest.min.js?noext',
},
});
</script>
'''))
import IPython
IPython.get_ipython().events.register('pre_run_cell', configure_plotly_browser_state)
print('Colab mode activated.')