-
Notifications
You must be signed in to change notification settings - Fork 11
Feat/Improve Yolo-v4 Model #149
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from 25 commits
f7928cd
99c2c84
6ea6814
2153462
27e03dc
f8d3996
113ec04
b8d2770
6a0a37e
c595a2e
8b6e8eb
b6b85a4
05aa2bd
230fa01
a971e19
03442a1
fc766c6
5743021
8d14418
3baadb5
0bf662e
e1b8a9a
bb9b8e5
75e6b27
05dbdf9
17583f5
2db6cd2
45a77f6
256bd82
8dc8f8e
7232736
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,144 @@ | ||
| import cv2 | ||
| import numpy as np | ||
| from configparser import RawConfigParser | ||
|
|
||
| DEF_SCORE = 0.3 | ||
| DEF_NMS = 0.4 | ||
hughjazzman marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
|
|
||
| class BaseInference: | ||
| """ | ||
| Base Inference class for predictions | ||
|
|
||
| ... | ||
|
|
||
| Attributes | ||
| ---------- | ||
| weight_path : str | ||
| path to the .weight file | ||
hughjazzman marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
| config_path : str | ||
| path to the .cfg file | ||
| classes : list | ||
| names of classes detected | ||
| score_thresh : float | ||
| threshold to classify object as detected | ||
| nms_threshold : float | ||
| threshold for non-max suppression | ||
yarkhinephyo marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
|
|
||
| Methods | ||
| ------- | ||
| initialize() | ||
| Initializes neural network using given weight and config | ||
| read_config() | ||
| Reads config for trained height and width | ||
| run() | ||
| Obtains predicted boxes | ||
| """ | ||
|
|
||
| def __init__(self, weight_path, config_path, classes, score_thresh=None, nms_thresh=None): | ||
| self.weight_path = weight_path | ||
| self.config_path = config_path | ||
| self.classes = classes | ||
| self.net = None | ||
| self.score_thresh = score_thresh if score_thresh is not None else DEF_SCORE | ||
| self.nms_thresh = nms_thresh if nms_thresh is not None else DEF_NMS | ||
|
|
||
| def initialize(self): | ||
hughjazzman marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
| # Load Yolo | ||
| self.net = cv2.dnn.readNet( | ||
| self.weight_path, | ||
| self.config_path | ||
| ) | ||
| layer_names = self.net.getLayerNames() | ||
| self.output_layers = [layer_names[i[0] - 1] for i in self.net.getUnconnectedOutLayers()] | ||
hughjazzman marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
|
||
| def read_config(self): | ||
|
||
| cfg = RawConfigParser(strict=False) | ||
| cfg.read(self.config_path) | ||
|
|
||
| net_dict = dict(cfg.items('net')) | ||
| self.train_height_width = (int(net_dict['height']), int(net_dict['width'])) | ||
|
|
||
| def run(self, img): | ||
| """ | ||
| Parameters | ||
| ---------- | ||
| img : cv2.Mat | ||
| Image as a matrix | ||
|
|
||
| Returns | ||
| ------- | ||
| class_ids : list(int) | ||
| Class IDs of boxes | ||
| box_dims : list(list(int)) | ||
| Dimensions of boxes | ||
| box_confidences : list(float) | ||
| Confidence scores of boxes | ||
| dets : list(list(float)) | ||
| Normalised dimensions of boxes | ||
| indexes : list(int) | ||
| Indexes of boxes that passed NMS | ||
| """ | ||
hughjazzman marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| height, width = self.get_height_width_from_img(img) | ||
|
|
||
| return self.get_filtered_boxes(img, height, width) | ||
|
|
||
| def get_filtered_boxes(self, img, height, width): | ||
|
||
| outs = self.run_single(img) | ||
| class_ids, box_dims, box_confidences, dets = self.get_boxes(outs, height, width) | ||
| indexes = self.filter_boxes(box_dims, box_confidences) | ||
|
|
||
| return class_ids, box_dims, box_confidences, dets, indexes | ||
|
|
||
| def get_height_width_from_img(self, img): | ||
hughjazzman marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
| height, width, channels = img.shape | ||
| return height, width | ||
|
|
||
| def run_single(self, img): | ||
| # Detecting objects | ||
| blob = cv2.dnn.blobFromImage(img, 0.00392, self.train_height_width, (0, 0, 0), True, crop=False) | ||
hughjazzman marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
|
|
||
| self.net.setInput(blob) | ||
| outs = self.net.forward(self.output_layers) | ||
|
|
||
| return outs | ||
|
|
||
| def get_boxes(self, output, height, width, test=True): | ||
|
|
||
| # Showing informations on the screen | ||
| class_ids = [] | ||
| box_confidences = [] | ||
| box_dims = [] | ||
| # Saving to txt | ||
| dets = [] | ||
|
|
||
| for out in output: | ||
| for detection in out: | ||
|
|
||
| scores = detection[5:] | ||
| class_id = np.argmax(scores) | ||
| confidence = scores[class_id] | ||
| if confidence > self.score_thresh: | ||
| # Object detected | ||
| center_x = int(detection[0] * width) | ||
| center_y = int(detection[1] * height) | ||
| w = int(detection[2] * width) | ||
| h = int(detection[3] * height) | ||
|
|
||
| # Rectangle coordinates | ||
| x = int(center_x - w / 2) | ||
| y = int(center_y - h / 2) | ||
|
|
||
| box_dims.append([x, y, w, h]) | ||
| box_confidences.append(float(confidence)) | ||
| class_ids.append(class_id) | ||
|
|
||
| # Save normalised format | ||
| dets.append(detection[:4]) | ||
|
|
||
| return class_ids, box_dims, box_confidences, dets | ||
|
|
||
| def filter_boxes(self, box_dims, box_confidences): | ||
| indexes = cv2.dnn.NMSBoxes(box_dims, box_confidences, self.score_thresh, self.nms_thresh) | ||
| indexes = [int(i) for i in indexes] | ||
| return indexes | ||
|
|
||
hughjazzman marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,82 @@ | ||
| from base_inference import BaseInference | ||
|
|
||
| class ServiceInference(BaseInference): | ||
| """ | ||
| Inference class for predictions on predict_microservice | ||
|
|
||
| ... | ||
|
|
||
| Methods | ||
| ------- | ||
| run(img, width_dict) | ||
| Obtains predicted boxes for predict_microservice | ||
| """ | ||
hughjazzman marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
|
||
| def run(self, img, width_dict): | ||
|
||
| """ | ||
| Parameters | ||
| ---------- | ||
| img : cv2.Mat | ||
| Image as a matrix | ||
|
|
||
| Returns | ||
| ------- | ||
| scaled_height : int | ||
| Scaled height of img | ||
| scaled_width : int | ||
| Scaled width of img | ||
| boxes : list | ||
| List of predicted boxes in JSON format | ||
| """ | ||
| scaled_height, scaled_width = self.get_height_width_from_img(img, width_dict) | ||
| class_ids, box_dims, box_confidences, _, indexes = super().get_filtered_boxes(img, scaled_height, scaled_width) | ||
| boxes = self.get_boxes_dict(box_dims, box_confidences, class_ids, indexes) | ||
| return scaled_height, scaled_width, boxes | ||
|
|
||
| def get_height_width_from_img(self, img, width_dict): | ||
hughjazzman marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
| height, width = super().get_height_width_from_img(img) | ||
|
|
||
| scaled_width = int(width_dict["content"].decode("utf-8")) | ||
|
|
||
| # If given width is 0, do not scale | ||
| scaled_width = scaled_width if scaled_width != 0 else width | ||
| scaled_height = int((scaled_width / width) * height) | ||
|
|
||
| return scaled_height, scaled_width | ||
|
|
||
| def get_boxes_dict(self, box_dims, box_confidences, class_ids, indexes): | ||
| """ | ||
| Parameters | ||
| ---------- | ||
| box_dims : list | ||
| Dimensions of predicted boxes | ||
| box_confidences : list | ||
| Confidence scores of predicted boxes | ||
| class_ids : list | ||
| Class IDs of predicted boxes | ||
| indexes : list | ||
| Indexes of predicted boxes after NMS | ||
|
|
||
| Returns | ||
| ------- | ||
| boxes : list | ||
| List of predicted boxes in JSON format | ||
| """ | ||
|
|
||
| boxes = [] | ||
| for i in indexes: | ||
| i = int(i) | ||
| x, y, w, h = box_dims[i] | ||
| boxes.append({ | ||
| "x": x, | ||
| "y": y, | ||
| "w": w, | ||
| "h": h, | ||
| "confidence": float(box_confidences[i]), | ||
| "class": str(self.classes[class_ids[i]]) | ||
| }) | ||
|
|
||
| # Sort boxes in descending sizes | ||
| boxes = sorted(boxes, key=lambda box: box["w"] * box["h"], reverse=True) | ||
|
|
||
| return boxes | ||
hughjazzman marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
Uh oh!
There was an error while loading. Please reload this page.