-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathldl_metrics_np.py
177 lines (115 loc) · 3.86 KB
/
ldl_metrics_np.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
from scipy.spatial import distance
from scipy.stats import entropy
from keras import metrics
import functools
import numpy as np
import tensorflow as tf
# Distance measures: KLDivergence, Chebyshev, Clark, Canberra
# Similarity measures: cosine similarity, intersection similarity
# Assumes the probability distributions passed in are np arrays
epsilon = 0.00001
def chebyshev(y_pred, y_true):
diff = np.abs(y_pred - y_true)
mx = np.max(diff, 1)
distance = np.mean(mx)
return distance
def chebyshev_scipy(y_pred, y_true):
n_samples = y_pred.shape[0]
l = list()
for i in range(n_samples):
l.append(distance.chebyshev(y_true[i], y_pred[i]))
return np.mean(l)
def clark(y_pred, y_true):
y_pred += epsilon
y_true += epsilon
n = np.square(y_true - y_pred)
d = np.square(y_true + y_pred)
res = np.mean(np.sqrt(np.sum(n/d, 1)))
y_pred -= epsilon
y_true -= epsilon
return res
def canberra(y_pred, y_true):
y_pred += epsilon
y_true += epsilon
n = np.abs(y_true - y_pred)
d = y_true + y_pred
sum_vec = np.sum(n/d , 1)
res = np.mean(sum_vec)
y_pred -= epsilon
y_true -= epsilon
return res
def canberra_scipy(y_pred, y_true):
n_samples = y_pred.shape[0]
l = list()
for i in range(n_samples):
l.append(distance.canberra(y_true[i], y_pred[i]))
return np.mean(l)
def kldivergence(y_pred, y_true):
# TODO: normalize distributions if they dont sum to 1
y_pred += epsilon
y_true += epsilon
inner_lg = np.log(y_true / y_pred)
temp = y_true * inner_lg
dist = np.sum(temp, 1)
res = np.mean(dist)
y_pred -= epsilon
y_true -= epsilon
return res
def kldivergence_scipy(y_pred, y_true):
n_samples = y_pred.shape[0]
l = list()
for i in range(n_samples):
l.append(entropy(y_true[i], y_pred[i]))
return np.mean(l)
def cosine_similarity(y_pred, y_true):
y_pred += epsilon
y_true += epsilon
inner = dot(y_true, y_pred)
d1 = np.sqrt(dot(y_true, y_true))
d2 = np.sqrt(dot(y_pred, y_pred))
div = inner / (d1 * d2)
res = np.mean(div)
y_pred -= epsilon
y_true -= epsilon
return 1 - res
def cosine_similarity_scipy(y_pred, y_true):
n_samples = y_pred.shape[0]
l = list()
for i in range(n_samples):
l.append(distance.cosine(y_true[i], y_pred[i]))
return np.mean(l)
def dot(v1, v2):
# computes dot product of two tensors(vectors)
return np.sum(v1 * v2, 1) # element wise
def intersection_similarity(y_pred, y_true):
distance = np.sum(np.minimum(y_pred, y_true), 1)
res = np.mean(distance)
return 1 - res
def get_all_metrics():
metrics_all = [chebyshev_scipy, clark, canberra_scipy,
kldivergence_scipy, cosine_similarity_scipy, intersection_similarity]
metrics_kld_cheby = [kldivergence_scipy, chebyshev]
return metrics_kld_cheby
if __name__ == '__main__':
# y_true = np.asarray([[0.2, 0.3, 0.5],
# [0.2, 0.5, 0.3],
# [0.9, 0.1, 0.0],
# [0.7, 0.2, 0.1]])
#
# y_pred = np.asarray([[0.2, 0.3, 0.5],
# [0.2, 0.4, 0.4],
# [0.7, 0.2, 0.1],
# [0.5, 0.3, 0.2]])
y_true = np.asarray([[1, 0, 0], [0, 1.0, 0.0], [0, 0, 1]])
y_pred = np.asarray([[1, 0, 0], [0, 0.9, 0.1], [0, 0, 1]])
print(chebyshev(y_pred, y_true))
print(chebyshev_scipy(y_pred, y_true))
print(clark(y_true, y_pred))
print(canberra(y_pred, y_true))
print(canberra_scipy(y_pred, y_true))
print(kldivergence(y_pred, y_true))
print(kldivergence_scipy(y_pred, y_true))
print(cosine_similarity(y_pred, y_true))
print(cosine_similarity_scipy(y_pred, y_true))
print(intersection_similarity(y_pred, y_true))
# print(top3_acc(y_pred, y_true).eval())