-
Notifications
You must be signed in to change notification settings - Fork 0
/
model_assessment.py
125 lines (93 loc) · 5.74 KB
/
model_assessment.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
import os
import sys
import csv
import json
import warnings
import argparse
import numpy as np
import pandas as pd
from types import SimpleNamespace
from config.model import model_finalize
from util.io import *
from util.measure import performance_score, fariness_score
from sklearn.model_selection import train_test_split
from types import SimpleNamespace
def execute(cfg, mparam, grp):
''' Load settings '''
model_name = cfg.model_name
model_alg = cfg.model_alg
model_id = cfg.model_id
root_dir = cfg.root_dir
models_dir = cfg.models_dir
processed_dir = cfg.processed_dir
output_roc_dir = cfg.output_roc_dir
output_shap_dir = cfg.output_shap_dir
output_score_dir = cfg.output_score_dir
n_run = cfg.n_run
fairness_tab = pd.DataFrame(np.zeros((n_run, len(grp.fair_measure))))
fairness_tab.columns = grp.fair_measure
performance_tab = pd.DataFrame(np.zeros((n_run, len(grp.perf_measure))))
performance_tab.columns = grp.perf_measure
X = pd.read_csv(os.path.join(root_dir, processed_dir, cfg.input_full_features), index_col=0)
Y = pd.read_csv(os.path.join(root_dir, processed_dir, cfg.input_labels), index_col=0)
for i in range(0,n_run):
print("==================================== iterate",i," running ========================")
random_seed = i
model_save_name = model_name + model_alg + model_id + "bootstrap/"+ str(i)
#Generate Training and Testing Set
X_train, X_test, y_train, y_test = train_test_split(X, Y, stratify=Y, test_size=mparam.setting_params.train_test_ratio, random_state=random_seed)
#Generate Training and Evaluation Set
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, stratify=y_train, test_size=mparam.setting_params.train_val_ratio, random_state=random_seed) #0.125 * 0.8 = 0.1
''' Pre-run Settings '''
if(grp.discrete):
tr_protected_group_idx = np.where(X_train[grp.protected_feature_name] == 1)
tr_privileged_group_idx = np.where(X_train[grp.privileged_feature_name] == 1)
te_protected_group_idx = np.where(X_test[grp.protected_feature_name] == 1)
te_privileged_group_idx = np.where(X_test[grp.privileged_feature_name] == 1)
vl_protected_group_idx = np.where(X_val[grp.protected_feature_name] == 1)
vl_privileged_group_idx = np.where(X_val[grp.privileged_feature_name] == 1)
else:
tr_protected_group_idx = np.where(X_train[grp.protected_feature_name] < grp.cutoff)
tr_privileged_group_idx = np.where(X_train[grp.privileged_feature_name] >= grp.cutoff)
te_protected_group_idx = np.where(X_test[grp.protected_feature_name] < grp.cutoff)
te_privileged_group_idx = np.where(X_test[grp.privileged_feature_name] >= grp.cutoff)
vl_protected_group_idx = np.where(X_val[grp.protected_feature_name] < grp.cutoff)
vl_privileged_group_idx = np.where(X_val[grp.privileged_feature_name] >= grp.cutoff)
if (grp.is_mask_attr):
X_train = X_train.drop(columns=grp.masked_attrs)
X_test = X_test.drop(columns=grp.masked_attrs)
X_val = X_val.drop(columns=grp.masked_attrs)
print(X_train.shape, X_test.shape, X_val.shape)
if (check_model_exist(root_dir, models_dir, model_save_name,"clf.pk")):
clf = load_model(root_dir, models_dir, model_save_name,"clf.pk")
print("Model Exists, Fairness Assessment Running !!!!!!!!!!!!")
else:
print("Model Training & Fairness Assessment Running !!!!!!!!!!!!")
clf, _, fit_params = model_finalize(mparam, X_val=X_val.to_numpy(), y_val=y_val.to_numpy())
clf.fit(X_train.to_numpy(), y_train.to_numpy(), **fit_params)
save_model(clf, root_dir, models_dir, model_save_name,"clf.pk")
test_pred = clf.predict(X_test.to_numpy())
test_pred_prob = clf.predict_proba(X_test.to_numpy())
y_protected_test = y_test.iloc[te_protected_group_idx]
y_privileged_test = y_test.iloc[te_privileged_group_idx]
y_protected_pred = test_pred[te_protected_group_idx]
y_privileged_pred = test_pred[te_privileged_group_idx]
fairness_tab.iloc[i] = fariness_score(y_protected_test, y_privileged_test, y_protected_pred, y_privileged_pred)
performance_tab.iloc[i] = performance_score(y_test.to_numpy(), test_pred, test_pred_prob[:, 1])
print(performance_tab.iloc[i])
save_dataframe(fairness_tab, root_dir, output_score_dir, model_name+model_alg+model_id+grp.subgroup+"bootstrap", "fairness.csv" )
save_dataframe(performance_tab, root_dir, output_score_dir, model_name+model_alg+model_id+"bootstrap", "performance.csv" )
save_model({"config":cfg, "param":mparam, "group":grp}, root_dir, models_dir,model_name+model_alg+model_id+"bootstrap","experimental_config.pk")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--setting", "-s", type=str, required=True)
parser.add_argument("--model_params", "-m", type=str, required=True)
parser.add_argument("--group_info", "-g", type=str, required=True)
args = parser.parse_args()
with open(args.setting) as json_file:
cfg = json.load(json_file, object_hook=lambda d: SimpleNamespace(**d))
with open(args.model_params) as json_file:
mparam = json.load(json_file, object_hook=lambda d: SimpleNamespace(**d))
with open(args.group_info) as json_file:
grp = json.load(json_file, object_hook=lambda d: SimpleNamespace(**d))
execute(cfg, mparam, grp)