-
Notifications
You must be signed in to change notification settings - Fork 0
/
util.py
299 lines (249 loc) · 11.2 KB
/
util.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
import numpy as np
import pandas as pd
import pickle
import os
# Metrics
from fairlearn.metrics import (
MetricFrame,
selection_rate, demographic_parity_difference, demographic_parity_ratio,
false_positive_rate, false_negative_rate, true_negative_rate, true_positive_rate,
false_positive_rate_difference, false_negative_rate_difference,
equalized_odds_difference)
from sklearn.metrics import balanced_accuracy_score, roc_auc_score, zero_one_loss
sample_record_filename_template = "{}_{}_{}_{}_{}"
feature_expand_dict = {'inacc': "ZeroOne", 'dp': "Demographic parity difference", 'eqodds': "Equalized odds difference", 'prp': "Predictive value difference", 'eqopp': "False negative rate difference", 'fnr': "False negative rate difference", 'fpr': "False positive rate difference", 'ppv': "Positive predictive value difference", 'npv': "Negative predictive value difference", 'auc': "Overall AUC", 'auc_diff': "AUC difference", 'error_rate_diff': "Balanced error rate difference"}
def compute_error(Yhat,proba,Y):
err = 1 - np.sum(Yhat == Y) / Y.shape[0]
exp_zeroone = np.mean(np.where(Y == 1 , 1 - proba, proba))
return err, exp_zeroone
def create_features_dict(feature_list):
num_of_features = len(feature_list)
feature = {}
for i, f in enumerate(feature_list):
feature[i] = feature_expand_dict[f]
return feature, num_of_features
def make_experiment_filename(**kwargs):
return sample_record_filename_template.format(kwargs['dataset'], kwargs['demo_baseline'], kwargs['lr_theta'], kwargs['num_of_demos'], kwargs['noise_ratio']).replace('.','-')
def make_demo_list_filename(**kwargs):
return "demo_list_{}_{}_{}_{}".format(kwargs['dataset'], kwargs['demo_baseline'], kwargs['num_of_demos'], kwargs['noise_ratio']).replace('.','-')
def store_object(obj, path, name, exp_idx):
if not os.path.exists(path):
os.makedirs(path)
filepath = os.path.join(path,name)
from main import default_args
if exp_idx == -1 or exp_idx == 0:
with open(filepath, 'wb') as file:
pickle.dump(obj,file)
print("exp {} wrote to {}".format(exp_idx, filepath))
elif exp_idx > 0 and exp_idx < default_args['num_experiment'] - 1:
with open(filepath, 'ab+') as file:
#file = open(filepath, 'wb')
pickle.dump(obj,file)
print("exp {} wrote to {}".format(exp_idx, filepath))
elif exp_idx == default_args['num_experiment'] - 1:
with open(filepath, 'ab+') as file:
#file = open(filepath, 'ab+')
pickle.dump(obj, file)
#file.close()
print("exp {} wrote to {}".format(exp_idx, filepath))
def load_object(path, name, exp_num):
if exp_num == -1:
with open(os.path.join(path,name), 'rb') as file:
return pickle.load(file)
elif exp_num > -1:
data = []
with open(os.path.join(path, name), 'rb') as file:
try:
while True:
data.append(pickle.load(file))
except EOFError:
pass
return data
def find_gamma_superhuman_all(demo_list, model_params):
if not model_params: return
feature = model_params["feature"]
num_of_features = model_params["num_of_features"]
print("gamma-superhuman: ")
gamma_superhuman_arr = []
baseline = {0: 'eval_pp_dp', 1:'eval_pp_eq_odds', 2:'eval_fairll_dp', 3:'eval_fairll_eqodds', 4:'eval_MFOpt', 5: 'superhuman'}
baseline_loss = np.zeros(len(baseline))
dominated = np.zeros(len(baseline))
for j in range(len(demo_list)):
count_baseline = np.zeros(len(baseline))
for i in range(num_of_features):
demo_loss = demo_list[j].metric[i] #for z in range(len(demo_list))]
model_loss = model_params['eval'][-1].loc[feature[i]][0]
baseline_loss[-1] = model_loss
for k in range(len(baseline)-1):
baseline_loss[k] = model_params[baseline[k]].loc[feature[i]][0]
for k in range(len(baseline)):
if baseline_loss[k] <= demo_loss:
count_baseline[k] += 1
if count_baseline[k] == num_of_features:
dominated[k] += 1
dominated = dominated/len(demo_list)
print(baseline)
print("dominated:")
print(dominated)
def find_gamma_superhuman(demo_list, model_params):
if not model_params: return
feature = model_params["feature"]
num_of_features = model_params["num_of_features"]
print("gamma-superhuman: ")
gamma_superhuman_arr = []
for i in range(num_of_features):
demo_loss = [demo_list[z].metric[i] for z in range(len(demo_list))]
model_loss = model_params['eval'][-1].loc[feature[i]][0]
f = feature[i]
n = len(demo_loss)
count = 0
for j in range(n):
if model_loss <= demo_loss[j]:
count += 1
gamma_superhuman = count/n
print(gamma_superhuman, f)
gamma_superhuman_arr.append(gamma_superhuman)
return gamma_superhuman_arr
def true_positives(y_true, y_pred):
tp = 0
for i in range(len(y_true)):
if y_true[i] == 1 and y_pred[i] == 1:
tp += 1
return tp
def true_negatives(y_true, y_pred):
tn = 0
for i in range(len(y_true)):
if y_true[i] == 0 and y_pred[i] == 0:
tn += 1
return tn
def false_positives(y_true, y_pred):
fp = 0
for i in range(len(y_true)):
if y_true[i] == 0 and y_pred[i] == 1:
fp += 1
return fp
def false_negatives(y_true, y_pred):
fn = 0
for i in range(len(y_true)):
if y_true[i] == 1 and y_pred[i] == 0:
fn += 1
return fn
def positive_predictive_value_helper(y_true, y_pred):
tp = true_positives(y_true, y_pred)
fp = false_positives(y_true, y_pred)
if tp == 0 and fp == 0: return 0
return tp / (tp + fp)
def negative_predictive_value_helper(y_true, y_pred):
tn = true_negatives(y_true, y_pred)
fn = false_negatives(y_true, y_pred)
if tn == 0 and fn == 0: return 0
return tn / (tn + fn)
def predictive_value_helper(y_true, y_pred):
ppv = positive_predictive_value_helper(y_true, y_pred)
npv = negative_predictive_value_helper(y_true, y_pred)
return max(ppv, npv)
def positive_predictive_value(y_true, y_pred, group):
return MetricFrame(metrics=positive_predictive_value_helper, y_true=y_true, y_pred=y_pred, sensitive_features=group).difference(method='between_groups')
def negative_predictive_value(y_true, y_pred, group):
return MetricFrame(metrics=negative_predictive_value_helper, y_true=y_true, y_pred=y_pred, sensitive_features=group).difference(method='between_groups')
def predictive_value(y_true, y_pred, group) -> float:
fns = {"ppv": positive_predictive_value_helper, "npv": negative_predictive_value_helper}
prp = MetricFrame(
metrics=fns,
y_true=y_true,
y_pred=y_pred,
sensitive_features=group,
)
return max(prp.difference(method="between_groups"))
def true_positives(y_true, y_pred):
tp = 0
for i in range(len(y_true)):
if y_true[i] == 1 and y_pred[i] == 1:
tp += 1
return tp
def true_negatives(y_true, y_pred):
tn = 0
for i in range(len(y_true)):
if y_true[i] == 0 and y_pred[i] == 0:
tn += 1
return tn
def false_positives(y_true, y_pred):
fp = 0
for i in range(len(y_true)):
if y_true[i] == 0 and y_pred[i] == 1:
fp += 1
return fp
def false_negatives(y_true, y_pred):
fn = 0
for i in range(len(y_true)):
if y_true[i] == 1 and y_pred[i] == 0:
fn += 1
return fn
def positive_predictive_value_helper(y_true, y_pred):
tp = true_positives(y_true, y_pred)
fp = false_positives(y_true, y_pred)
if tp == 0 and fp == 0: return 0
return tp / (tp + fp)
def negative_predictive_value_helper(y_true, y_pred):
tn = true_negatives(y_true, y_pred)
fn = false_negatives(y_true, y_pred)
if tn == 0 and fn == 0: return 0
return tn / (tn + fn)
def predictive_value_helper(y_true, y_pred):
ppv = positive_predictive_value_helper(y_true, y_pred)
npv = negative_predictive_value_helper(y_true, y_pred)
return max(ppv, npv)
def positive_predictive_value(y_true, y_pred, group):
return MetricFrame(metrics=positive_predictive_value_helper, y_true=y_true, y_pred=y_pred, sensitive_features=group).difference(method='between_groups')
def negative_predictive_value(y_true, y_pred, group):
return MetricFrame(metrics=negative_predictive_value_helper, y_true=y_true, y_pred=y_pred, sensitive_features=group).difference(method='between_groups')
def predictive_value(y_true, y_pred, group) -> float:
fns = {"ppv": positive_predictive_value_helper, "npv": negative_predictive_value_helper}
prp = MetricFrame(
metrics=fns,
y_true=y_true,
y_pred=y_pred,
sensitive_features=group,
)
return max(prp.difference(method="between_groups"))
# Helper functions
def get_metrics_df(models_dict, y_true, group, feature, is_demo = False):
metrics_dict = {
"ZeroOne": (
lambda x: zero_one_loss(y_true, x), True),
"Demographic parity difference": (
lambda x: demographic_parity_difference(y_true, x, sensitive_features=group), True),
"Equalized odds difference": (
lambda x: equalized_odds_difference(y_true, x, sensitive_features=group), True),
"Predictive value difference": (
lambda x: predictive_value(y_true, x, group), True),
"Overall selection rate": (
lambda x: selection_rate(y_true, x), True),
"Demographic parity ratio": (
lambda x: demographic_parity_ratio(y_true, x, sensitive_features=group), True),
"Overall balanced error rate": (
lambda x: 1-balanced_accuracy_score(y_true, x), True),
"Balanced error rate difference": (
lambda x: MetricFrame(metrics=balanced_accuracy_score, y_true=y_true, y_pred=x, sensitive_features=group).difference(method='between_groups'), True),
"False positive rate difference": (
lambda x: false_positive_rate_difference(y_true, x, sensitive_features=group), True),
"False negative rate difference": (
lambda x: false_negative_rate_difference(y_true, x, sensitive_features=group), True),
"Positive predictive value difference": (
lambda x: positive_predictive_value(y_true, x, group), True),
"Negative predictive value difference": (
lambda x: negative_predictive_value(y_true, x, group), True),
"Overall AUC": (
lambda x: 1.0 - roc_auc_score(y_true, x), False),
"AUC difference": (
lambda x: MetricFrame(metrics=roc_auc_score, y_true=y_true, y_pred=x, sensitive_features=group).difference(method='between_groups'), False)
}
df_dict = {}
if is_demo == True: # if we are creating demos, let's store all the metrics
metrics_dict_subset = metrics_dict
else: # otherwise only store the metrics we care about
metrics_dict_subset = {k: metrics_dict[k] for k in feature.values()}
for metric_name, (metric_func, use_preds) in metrics_dict_subset.items():
df_dict[metric_name] = [metric_func(preds) if use_preds else metric_func(scores)
for model_name, (preds, scores) in models_dict.items()]
return pd.DataFrame.from_dict(df_dict, orient="index", columns=models_dict.keys())