Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Dev #90

Open
wants to merge 6 commits into
base: main
Choose a base branch
from
Open

Dev #90

Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
last version of dir YOEO
Your Name committed Mar 17, 2023
commit 90dcf7e419df90003b7a7a497e05ef3dce6dc83c
Binary file added config/IR&onnx_for_416_Petr_1/yoeo.bin
Binary file not shown.
631 changes: 631 additions & 0 deletions config/IR&onnx_for_416_Petr_1/yoeo.mapping

Large diffs are not rendered by default.

Binary file added config/IR&onnx_for_416_Petr_1/yoeo.onnx
Binary file not shown.
5,155 changes: 5,155 additions & 0 deletions config/IR&onnx_for_416_Petr_1/yoeo.xml

Large diffs are not rendered by default.

Binary file added config/IR&onnx_for_416_mine_759/yoeo_mine_759.bin
Binary file not shown.
631 changes: 631 additions & 0 deletions config/IR&onnx_for_416_mine_759/yoeo_mine_759.mapping

Large diffs are not rendered by default.

Binary file not shown.
5,155 changes: 5,155 additions & 0 deletions config/IR&onnx_for_416_mine_759/yoeo_mine_759.xml

Large diffs are not rendered by default.

2 changes: 2 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -35,3 +35,5 @@ yoeo-detect = "yoeo.detect:run"
yoeo-train = "yoeo.train:run"
yoeo-test = "yoeo.test:run"
yoeo-detectcam = "yoeo.detectwebcam:run"
yoeo-detectcam_IR = "yoeo.detectwebcam_IR:run"
yoeo-detectcam_pt = "yoeo.detectwebcam_pt:run"
66 changes: 66 additions & 0 deletions scripts/convertONNXModelToOpenVinoIR.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
#! /usr/bin/env python3
import argparse
import os


def convert_model(onnx_path: str, output_path: str) -> None:
command = assemble_command(onnx_path, output_path)
run_command(command)


def assemble_command(onnx_path: str, output_path: str) -> str:
# https://docs.openvino.ai/latest/notebooks/102-pytorch-onnx-to-openvino-with-output.html (April 7, 2022)
mo_command = f"""mo
--input_model "{onnx_path}"
--output_dir "{output_path}"
--input InputLayer
--output Detections,Segmentations
--framework onnx
--static_shape
--batch 1
"""
mo_command = " ".join(mo_command.split())

return mo_command


def run_command(command: str) -> None:
# https://docs.openvino.ai/latest/notebooks/102-pytorch-onnx-to-openvino-with-output.html (April 7, 2022)
print("Exporting ONNX model to IR...")

mo_result = os.system(command)

print("=" * 30)
if mo_result == 0:
print("Model conversion was successful")
else:
print("Model conversion failed")


def get_output_path(model_onnx: str) -> str:
return get_parent_dir(model_onnx)


def get_parent_dir(path: str) -> str:
absolute_path = os.path.abspath(path)
parent_dir, filename = os.path.split(absolute_path)

return parent_dir


def run():
parser = argparse.ArgumentParser(description='Convert ONNX Model to OpenVino IR')
parser.add_argument(
"model_onnx",
type=str,
help="full path to model file (.onnx)"
)

args = parser.parse_args()

output_path = get_output_path(args.model_onnx)
convert_model(args.model_onnx, output_path)


if __name__ == "__main__":
run()
100 changes: 100 additions & 0 deletions scripts/convertPyTorchModelToONNX.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
#! /usr/bin/env python3
import argparse
import os.path
from typing import Tuple

import onnx
import torch

import yoeo.models


def convert_model(model_cfg: str, weights_pth: str, output_path: str) -> None:
pytorch_model = yoeo.models.load_model(model_cfg, weights_pth)
convert_to_onnx(model=pytorch_model, output_path=output_path)


def convert_to_onnx(model: yoeo.models.Darknet, output_path: str, image_size: int = 416, batch_size: int = 1) -> None:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

model.to(device)
dummy_input = torch.randn(batch_size, 3, image_size, image_size, device=device)

torch.onnx.export(
model,
dummy_input,
output_path,
verbose=False,
export_params=True,
input_names=["InputLayer"],
output_names=["Detections", "Segmentations"],
opset_version=11
)


def check_model(model_path: str) -> None:
onnx_model = load_onnx(model_path)
check_onnx(onnx_model)


def load_onnx(path: str) -> onnx.onnx_ml_pb2.ModelProto:
return onnx.load(path)


def check_onnx(model: onnx.onnx_ml_pb2.ModelProto) -> None:
# https://github.com/onnx/onnx/blob/main/docs/PythonAPIOverview.md (April 7, 2022)
print("="*30)
try:
onnx.checker.check_model(model)
except onnx.checker.ValidationError as e:
print('The model is invalid: %s' % e)
else:
print('The model is valid!')


def construct_path(model_cfg: str) -> str:
parent_dir = get_parent_dir(model_cfg)
filename = get_filename_wout_extension(model_cfg)

onnx_path = os.path.join(parent_dir, f"{filename}.onnx")

return onnx_path


def get_parent_dir(path: str) -> str:
absolute_path = os.path.abspath(path)
parent_dir, filename = os.path.split(absolute_path)

return parent_dir


def get_filename_wout_extension(path: str) -> str:
absolute_path = os.path.abspath(path)
parent_dir, filename = os.path.split(absolute_path)
filename, ext = os.path.splitext(filename)

return filename


def run():
parser = argparse.ArgumentParser(description='Convert PyTorch Model to ONNX')
parser.add_argument(
"model_cfg",
type=str,
help="full path to model file (.cfg). ONNX model will be output with the same filename as well."
)
parser.add_argument(
"model_weights",
type=str,
help="full path to model weights file (.pth or .weights)."
)

args = parser.parse_args()

onnx_path = construct_path(args.model_cfg)
convert_model(args.model_cfg, args.model_weights, onnx_path)
check_model(onnx_path)


if __name__ == "__main__":
run()
38 changes: 38 additions & 0 deletions tezises/inputs&outputs
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
PYTORCH .cfg + .pth

num 1 DETECTIONS SHAPE IStorch.Size([1, 6000, 8]), <class 'torch.Tensor'>
num 1 SEGMENTATIONS SHAPE IStorch.Size([1, 640, 640]), <class 'torch.Tensor'>

torchscipt

python .pt
SHAPE OF EVERY ELEM: torch.Size([1, 3, 20, 20, 8])
SHAPE OF EVERY ELEM: torch.Size([1, 3, 40, 40, 8])
SHAPE OF EVERY ELEM: torch.Size([1, 3, 640, 640])

cpp .pt
Shape of every elem: [1, 3, 20, 20, 8]
Shape of every elem: [1, 3, 40, 40, 8]
Shape of every elem: [1, 3, 640, 640]

IR

python .xml
outputs[
<ConstOutput: names[Detections] shape[1,2535,8] type: f32>,
<ConstOutput: names[Segmentations] shape[1,416,416] type: f32>
]>

cpp .xml
[1,2535,8]
[1,416,416]


зрение робана

Пути к весам и обработчику сеток: ~/env/common/vision_filters/ball_detection_wideangle.json
Чето там еще (трешолды и другие параметры) ~/env/common/vision_filters/colors_wideangle.json
пайплайн зрения прописан здесь ~/env/common/vision_filters/all.json



12 changes: 11 additions & 1 deletion yoeo/detectwebcam.py
Original file line number Diff line number Diff line change
@@ -51,6 +51,7 @@ def detect_directory(model_path, weights_path, classes, output_path,
:type nms_thres: float, optional
"""
model = load_model(model_path, weights_path)
# model = torch.jit.load(weights_path)
print("NY PRIVET 7")

cam = cv2.VideoCapture(0)
@@ -122,12 +123,18 @@ def detect_image(model, image, img_size=416, conf_thres=0.5, nms_thres=0.5):

# if torch.cuda.is_available():
# input_img = input_img.to("cuda")
print(f"model: {model}")
# print(f"model: {model}")
# Get detections
with torch.no_grad():
detections, segmentations = model(input_img)
print(f"num 1 DETECTIONS SHAPE IS{detections.shape}, {type(detections)}")
print(f"num 1 SEGMENTATIONS SHAPE IS{segmentations.shape}, {type(segmentations)}")
detections = non_max_suppression(detections, conf_thres, nms_thres)
print(f"num 2 DETECTIONS SHAPE IS{len(detections)}")

detections = rescale_boxes(detections[0], img_size, image.shape[0:2])
print(f"num 3 DETECTIONS SHAPE IS{detections.shape}")

segmentations = rescale_segmentation(segmentations, image.shape[0:2])
print(f"detections shape: {detections.shape}")
print(f"detections shape: {detections.shape}")
@@ -238,12 +245,15 @@ def _draw_and_save_output_image(image, detections, seg, img_size, output_path, c
print("JJEEPPAAA")
# Rescale boxes to original image

print(f"num after 1 DETECTIONS SHAPE IS{detections.shape}")
detections = rescale_boxes(detections, img_size, img.shape[:2])

unique_labels = detections[:, -1].cpu().unique()
n_cls_preds = len(unique_labels)
# Bounding-box colors
cmap = plt.get_cmap("tab20b")
colors = [cmap(i) for i in np.linspace(0, 1, len(classes))]
print(f"num after 2 DETECTIONS SHAPE IS{detections.shape}")
for x1, y1, x2, y2, conf, cls_pred in detections:

print(f"\t+ Label: {classes[int(cls_pred)]} | Confidence: {conf.item():0.4f}")
Loading