-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy patheval_for_loveu_cvpr2022.py
42 lines (35 loc) · 1.67 KB
/
eval_for_loveu_cvpr2022.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
import json
import numpy as np
def evaluate_for_scores(all_scores, all_labels):
recall_1, recall_3, mean_rank, mean_reciprocal_rank = [], [], [], []
for scores, label in zip(all_scores, all_labels):
sorted_indices = scores.argsort()[::-1]
mask = sorted_indices == label
recall_1.append(float(mask[0]))
recall_3.append(float(mask[:3].sum()))
mean_rank.append(float(mask.nonzero()[0] + 1))
mean_reciprocal_rank.append(len(mask) / (mean_rank[-1]))
recall_1 = sum(recall_1) / len(recall_1)
recall_3 = sum(recall_3) / len(recall_3)
mean_rank = sum(mean_rank) / len(mean_rank)
mean_reciprocal_rank = sum(mean_reciprocal_rank) / len(mean_reciprocal_rank)
return recall_1, recall_3, mean_rank, mean_reciprocal_rank
def evaluate(all_preds, all_annos):
all_scores, all_labels = [], []
for key in all_annos:
annos = all_annos[key]
preds = all_preds[key]
for pred,anno in zip(preds,annos):
for scores_per_step, label_per_step in zip(pred['scores'], anno):
all_scores.append(np.array(scores_per_step))
all_labels.append(np.array(label_per_step.item())) # label starts from 0
recall_1, recall_3, mean_rank, mean_reciprocal_rank = evaluate_for_scores(all_scores, all_labels)
return recall_1, recall_3, mean_rank, mean_reciprocal_rank
if __name__ == "__main__":
# participates' results
with open("submit_test.json") as f:
all_preds = json.load(f)
# ground-truth annotations (participants dont have now)
with open("/data/chenjoya/assistq/test_with_gt.json") as f:
all_annos = json.load(f)
evaluate(all_preds, all_annos)