forked from benjamin2044/PV_fault_Python
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathML_Solar_model.py
141 lines (117 loc) · 5.57 KB
/
ML_Solar_model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sb
dataset = pd.read_csv('Solar_categorical.csv')
X = dataset.iloc[:3000, 0:7].values
y = dataset.iloc[:3000, 7].values
print(y)
###########################VISUALIZATION#################################################################
###########################################################################
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state=0)
#The ratio of train and validation set
allLabels = np.concatenate((y_train, y_test))
cat1 = np.repeat('training',len(y_train))
cat2 = np.repeat('validation',len(y_test))
cat = np.concatenate((cat1,cat2))
hist_df = pd.DataFrame(({'labels':allLabels, 'datatype':cat}))
p = sb.countplot(data=hist_df,x='labels',hue='datatype',saturation=1,palette=['c', 'm'])
leg = p.get_legend()
leg.set_title("")
labs = leg.texts
labs[0].set_text("Training")
labs[1].set_text("Validation")
plt.xlabel('labels', fontsize=20)
plt.ylabel('count', fontsize=20)
#####################################################################################
from sklearn.preprocessing import LabelEncoder
from keras.utils import to_categorical
encoder= LabelEncoder()
X[:,6] = encoder.fit_transform(X[:, 6])
#y = encoder.fit_transform(y)
#y_original = encoder.inverse_transform(y_encoded)
#y = to_categorical(y)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state=0)
from sklearn.preprocessing import LabelEncoder
encoder = LabelEncoder()
y_train = encoder.fit_transform(y_train)
y_test = encoder.transform(y_test)
#y_original = encoder.inverse_transform(y_encoded)
#########################################################################################
from sklearn.svm import SVC
svc_clf = SVC(kernel="linear", probability=True)
svc_clf.fit(X_train, y_train)
y_pred = svc_clf.predict(X_test)
y_pred_label = encoder.inverse_transform(y_pred)
print(y_pred_label)
y_test_label = encoder.inverse_transform(y_test)
print(y_test_label)
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test_label, y_pred_label)
cm_fig = pd.DataFrame(cm, columns=np.unique(y_test_label), index=np.unique(y_test_label))
sb.set(font_scale=1.4)
sb.heatmap(cm_fig, cmap="RdBu_r", annot=True, annot_kws={"size":20})
plt.xlabel('Predicted label')
plt.ylabel('True label')
plt.title('Confusion Matrix')
#new_pred1 = svc_clf.predict(np.array([[2.1, 3.1, 1.1, 43, 99]]))
#pred1_label = encoder.inverse_transform(new_pred1)
#print(pred1_label)
#
#new_pred2 = svc_clf.predict(np.array([[4.1, 3.5, 4.6, 45, 100]]))
#pred2_label = encoder.inverse_transform(new_pred2)
#print(pred2_label)
#
#new_pred3 = svc_clf.predict(np.array([[0, 4, 0.4, 15, 64]]))
#pred3_label = encoder.inverse_transform(new_pred3)
#print(pred3_label)
###################################################################################
######################################Evaluation###################################
from sklearn.metrics import f1_score
def evaluate(labelsTrue, predictions):
if len(predictions)>0:
f1 = f1_score(labelsTrue,predictions, average="weighted")
print("F1 score: ",f1)
pred_svc = svc_clf.predict(X_test)
evaluate(y_test, pred_svc)
###############################################################################################
###############################################################################################
################################Compare several classifiers####################################
from sklearn.neighbors import KNeighborsClassifier
knn_clf = KNeighborsClassifier()
knn_clf.fit(X_train, y_train)
pred_knn = knn_clf.predict(X_test)
evaluate(y_test, pred_knn)
#############################################################
from sklearn.ensemble import RandomForestClassifier
forest_clf = RandomForestClassifier(n_estimators=50,n_jobs=-1)
forest_clf.fit(X_train, y_train)
pred_forest = forest_clf.predict(X_test)
evaluate(y_test, pred_forest)
#########################################################
from sklearn.ensemble import ExtraTreesClassifier
trees_clf = ExtraTreesClassifier(n_estimators=50, n_jobs=-1)
trees_clf.fit(X_train, y_train)
pred_trees = trees_clf.predict(X_test)
evaluate(y_test, pred_trees)
##########################################################
from sklearn.ensemble import AdaBoostClassifier
ada_clf = AdaBoostClassifier()
ada_clf.fit(X_train, y_train)
pred_ada = ada_clf.predict(X_test)
#evaluate(y_test, pred_ada)
f1_score(y_test, pred_ada, average='weighted', labels=np.unique(pred_ada))
#########################################################
from sklearn.naive_bayes import GaussianNB
bayes_clf = GaussianNB()
bayes_clf.fit(X_train, y_train)
pred_bayes = bayes_clf.predict(X_test)
evaluate(y_test, pred_bayes)
######################################################################################################
###########################################Correlations between predicted classes#####################
predictions = pd.DataFrame( {'Rand_For': pred_forest,'KNear_Neigh': pred_knn,'Sup_Vec_Mac': pred_svc,
'ExtraTrees': pred_trees, 'AdaBoost': pred_ada,'NaiveBayes': pred_bayes})
sb.heatmap(predictions.corr(), linewidths=0.5, vmax=1.0, square=True, cmap='jet', linecolor='white', annot=True)
########################################################################################################