-
Notifications
You must be signed in to change notification settings - Fork 2
/
evaluate.py
86 lines (66 loc) · 2.98 KB
/
evaluate.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
from evaluator.evaluator import evaluate_dataset
from utils import write_doc
import argparse
"""
* Note:
The evaluation codes in "./evaluator/" are implemented in PyTorch (GPU-version) for acceleration.
Since some GTs (e.g. in "Cosal2015" dataset) are of too large original sizes to be evaluated on GPU with limited memory
(our "RTX 2080ti" runs out of 12G memory when computing F-measure), the input prediction map and corresponding GT
are resized to 224*224 by our evaluation codes before computing metrics.
"""
"""
evaluate:
Given predictions, compute multiple metrics (max F-measure, S-measure and MAE).
The evaluation results are saved in "doc_path".
"""
def evaluate(roots, doc_path, num_thread, pin):
datasets = roots.keys()
for dataset in datasets:
# Evaluate predictions of "dataset".
results = evaluate_dataset(roots=roots[dataset],
dataset=dataset,
batch_size=1,
num_thread=num_thread,
demical=True,
suffixes={'gt': '.png', 'pred': '_refine.png'},
pin=pin)
# Save evaluation results.
content = '{}:\n'.format(dataset)
content += 'mean-Fmeasure={}'.format(results['mean_f'])
content += ' '
content += 'max-Fmeasure={}'.format(results['max_f'])
content += ' '
content += 'Smeasure={}'.format(results['s'])
content += ' '
content += 'MAE={}\n'.format(results['mae'])
write_doc(doc_path, content)
print(content)
content = '\n'
write_doc(doc_path, content)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--pred_root', type=str, default='./pred',
help='Folder path for the predictions')
parser.add_argument('--eval_num_thread', type=int, default=4, help='Thread number.')
parser.add_argument('--datasets', type=str,nargs='+', default=['CoSOD3k'], help='test dataset.')
return parser.parse_args()
def main():
args = parse_args()
# print(args.datasets)
eval_doc_path = args.pred_root + '/evaluation.txt' # Txt Path to save the evaluation results.
# An example to build "eval_roots".
eval_roots = dict()
for dataset in args.datasets:
import os
print(dataset,len(os.listdir( 'data/{}/gt/'.format(dataset))),len(os.listdir(args.pred_root + '/{}/'.format(dataset))))
roots = {'gt': 'data/{}/gt/'.format(dataset),
'pred': args.pred_root + '/{}/'.format(dataset)}
eval_roots[dataset] = roots
eval_num_thread = args.eval_num_thread
evaluate(roots=eval_roots,
doc_path=eval_doc_path,
num_thread=eval_num_thread,
pin=False)
# ------------- end -------------
if __name__ == "__main__":
main()