Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add files via upload #2

Open
wants to merge 7 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions MASK_RCNN_ObjectDetection.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
import pixellib
from pixellib.instance import instance_segmentation
segment_image=instance_segmentation()
segment_image.load_model("mask_rcnn_coco.h5")
segment_image.segmentImage("test2.jpg",show_bboxes=True,output_image_name="output_image.jpg")
139 changes: 139 additions & 0 deletions blackboard-detection/R-CNN ResNet-50 FPN [Testing].ipynb

Large diffs are not rendered by default.

244 changes: 244 additions & 0 deletions blackboard-detection/R-CNN ResNet-50 FPN [Training].ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,244 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "4077a973",
"metadata": {},
"outputs": [],
"source": [
"import torch\n",
"from detecto import core"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "19f75e36",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"False"
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"torch.cuda.is_available()"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "75df1bc3",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/home/hariraagavtr/.local/bin/.virtualenvs/cv/lib/python3.10/site-packages/torchvision/models/_utils.py:208: UserWarning: The parameter 'pretrained' is deprecated since 0.13 and will be removed in 0.15, please use 'weights' instead.\n",
" warnings.warn(\n",
"/home/hariraagavtr/.local/bin/.virtualenvs/cv/lib/python3.10/site-packages/torchvision/models/_utils.py:223: UserWarning: Arguments other than a weight enum or `None` for 'weights' are deprecated since 0.13 and will be removed in 0.15. The current behavior is equivalent to passing `weights=FasterRCNN_ResNet50_FPN_Weights.COCO_V1`. You can also use `weights=FasterRCNN_ResNet50_FPN_Weights.DEFAULT` to get the most up-to-date weights.\n",
" warnings.warn(msg)\n"
]
}
],
"source": [
"dataset = core.Dataset('/home/hariraagavtr/Recapyt/dataset/blackboard/')\n",
"\n",
"# Model: R-CNN ResNet-50 FPN\n",
"model = core.Model(['board'])"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "3f1c10aa",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"It looks like you're training your model on a CPU. Consider switching to a GPU; otherwise, this method can take hours upon hours or even days to finish. For more information, see https://detecto.readthedocs.io/en/latest/usage/quickstart.html#technical-requirements\n",
"Epoch 1 of 10\n",
"Begin iterating over training dataset\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 50/50 [09:06<00:00, 10.93s/it]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 2 of 10\n",
"Begin iterating over training dataset\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 50/50 [08:29<00:00, 10.19s/it]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 3 of 10\n",
"Begin iterating over training dataset\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 50/50 [08:54<00:00, 10.69s/it]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 4 of 10\n",
"Begin iterating over training dataset\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 50/50 [08:08<00:00, 9.77s/it]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 5 of 10\n",
"Begin iterating over training dataset\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 50/50 [08:38<00:00, 10.37s/it]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 6 of 10\n",
"Begin iterating over training dataset\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 50/50 [08:15<00:00, 9.90s/it]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 7 of 10\n",
"Begin iterating over training dataset\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 50/50 [08:48<00:00, 10.57s/it]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 8 of 10\n",
"Begin iterating over training dataset\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 50/50 [10:11<00:00, 12.22s/it]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 9 of 10\n",
"Begin iterating over training dataset\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 50/50 [08:35<00:00, 10.31s/it]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 10 of 10\n",
"Begin iterating over training dataset\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 50/50 [08:08<00:00, 9.76s/it]\n"
]
}
],
"source": [
"model.fit(dataset)\n",
"model.save('BlackboardDetectionWeights.pth')"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "cv",
"language": "python",
"name": "cv"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.4"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
Empty file added blackboard-detection/README.md
Empty file.
40 changes: 40 additions & 0 deletions handtracking_basics.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
# If you are using pycharm go to settings -> project xyx->python interpr ADD opencv-python and mediapipe


import cv2 as cv2
import mediapipe as mp
import time

cap=cv2.VideoCapture(0)

mpHands=mp.solutions.hands
hands=mpHands.Hands() #to check param ctrl +click
mpDraw=mp.solutions.drawing_utils
pTime=0
cTime=0

while True:
success,img=cap.read()
imgRGB=cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
results=hands.process(imgRGB)
#print(results.multi_hand_landmarks) to check if its detecting hands
#print(img)
#To extract hands
if results.multi_hand_landmarks:
for handLms in results.multi_hand_landmarks:
for id,lm in enumerate(handLms.landmark): #to extract cordinates
#print(id,lm)
h,w,c=img.shape
cx,cy=int(lm.x*w),int(lm.y*h)
#print(id,cx,cy)
if id==0: # ID 0 IS WRIST
cv2.circle(img,(cx,cy),25,(255,0,255),cv2.FILLED)
mpDraw.draw_landmarks(img,handLms,mpHands.HAND_CONNECTIONS)#This is a single hand
cTime=time.time()
fps=1/(cTime-pTime)
pTime=cTime

cv2.putText(img,str(int(fps)),(10,70),cv2.FONT_HERSHEY_PLAIN,3,
(255,0,255),3)
cv2.imshow("Image",img)
cv2.waitKey(1)