-
Notifications
You must be signed in to change notification settings - Fork 12
/
Copy patheval_voc.py
executable file
·114 lines (95 loc) · 4.35 KB
/
eval_voc.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
import utils.gpu as gpu
from model.build_model import Build_Model
from utils.tools import *
from eval.evaluator import Evaluator
import argparse
import time
import logging
from config import cfg
from utils.visualize import *
from utils.torch_utils import *
from utils.log import Logger
class Evaluation(object):
def __init__(self,
gpu_id=0,
weight_path=None,
visiual=None,
eval=False,
):
self.__num_class = cfg.VOC_DATA.NUM
self.__conf_threshold = cfg.VAL.CONF_THRESH
self.__nms_threshold = cfg.VAL.NMS_THRESH
self.__device = gpu.select_device(gpu_id)
self.__multi_scale_val = cfg.VAL.MULTI_SCALE_VAL
self.__flip_val = cfg.VAL.FLIP_VAL
self.__visiual = visiual
self.__eval = eval
self.__classes = cfg.VOC_DATA.CLASSES
self.__model = Build_Model().to(self.__device)
self.__load_model_weights(weight_path)
self.__evalter = Evaluator(self.__model, showatt=False)
def __load_model_weights(self, weight_path):
print("loading weight file from : {}".format(weight_path))
weight = os.path.join(weight_path)
chkpt = torch.load(weight, map_location=self.__device)
self.__model.load_state_dict(chkpt)
print("loading weight file is done")
del chkpt
def val(self):
global logger
if self.__eval:
logger.info("***********Start Evaluation****************")
start = time.time()
mAP = 0
with torch.no_grad():
APs, inference_time = Evaluator(self.__model, showatt=False).APs_voc(self.__multi_scale_val, self.__flip_val)
for i in APs:
logger.info("{} --> mAP : {}".format(i, APs[i]))
mAP += APs[i]
mAP = mAP / self.__num_class
logger.info('mAP:{}'.format(mAP))
logger.info("inference time: {:.2f} ms".format(inference_time))
end = time.time()
logger.info(" ===val cost time:{:.4f}s".format(end - start))
def detection(self):
global logger
if self.__visiual:
imgs = os.listdir(self.__visiual)
logger.info("***********Start Detection****************")
for v in imgs:
path = os.path.join(self.__visiual, v)
logger.info("val images : {}".format(path))
img = cv2.imread(path)
assert img is not None
bboxes_prd = self.__evalter.get_bbox(img,v)
if bboxes_prd.shape[0] != 0:
boxes = bboxes_prd[..., :4]
class_inds = bboxes_prd[..., 5].astype(np.int32)
scores = bboxes_prd[..., 4]
visualize_boxes(image=img, boxes=boxes, labels=class_inds, probs=scores, class_labels=self.__classes)
path = os.path.join(cfg.PROJECT_PATH, "detection_result/{}".format(v))
cv2.imwrite(path, img)
logger.info("saved images : {}".format(path))
if __name__ == "__main__":
global logger
parser = argparse.ArgumentParser()
parser.add_argument('--weight_path', type=str, default='weight/best.pt', help='weight file path')
parser.add_argument('--log_val_path', type=str, default='log_val',
help='weight file path')
parser.add_argument('--gpu_id', type=int, default=-1, help='whither use GPU(eg:0,1,2,3,4,5,6,7,8) or CPU(-1)')
parser.add_argument('--visiual', type=str, default='VOCtest-2007/VOC2007/JPEGImages', help='val data path or None')
parser.add_argument('--eval', action='store_true', default=True, help='eval the mAP or not')
parser.add_argument('--mode', type=str, default='val',
help='val or det')
opt = parser.parse_args()
logger = Logger(log_file_name=opt.log_val_path + '/log_voc_val.txt', log_level=logging.DEBUG, logger_name='YOLOv4').get_log()
if opt.mode == 'val':
Evaluation(gpu_id=opt.gpu_id,
weight_path=opt.weight_path,
eval=opt.eval,
visiual=opt.visiual).val()
else:
Evaluation(gpu_id=opt.gpu_id,
weight_path=opt.weight_path,
eval=opt.eval,
visiual=opt.visiual).detection()