Skip to content

Commit

Permalink
Merge branch 'main' into main
Browse files Browse the repository at this point in the history
  • Loading branch information
Fang-Haoshu committed Apr 14, 2022
2 parents 89bbe90 + 959bf06 commit 20a9f7c
Show file tree
Hide file tree
Showing 79 changed files with 9,286 additions and 106 deletions.
55 changes: 27 additions & 28 deletions BehavioralClustering/0_video2image.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,40 +3,39 @@
import json
import math
import copy
import pandas as pd

import os
import matplotlib.pyplot as plt
from scipy.ndimage.filters import gaussian_filter
from scipy.cluster.hierarchy import dendrogram, linkage
from scipy.cluster import hierarchy

from tqdm import tqdm
from contour_utils import mkdir_p

import setting


def get_frames(video_path, pose_track_vis_path):
cap = cv2.VideoCapture(video_path)
read_flag, frame = cap.read()
if not read_flag:
print("read " + video_path + " failed!")
width, height, depth = np.asarray(frame).shape

i = 0
if not os.path.exists(pose_track_vis_path):
mkdir_p(pose_track_vis_path)

while read_flag:
cv2.imwrite(pose_track_vis_path + "/frame_{}.png".format(i), frame)
print("\r {}/frame_{}.png".format(pose_track_vis_path, i), end="")

read_flag, frame = cap.read()
i = i + 1


if __name__ == "__main__":
arg = setting.args_class()

for video_path, pose_track_vis_path in zip(arg.videodir, arg.imgdir):
print("generating %s" % (video_path))
get_frames(video_path, pose_track_vis_path)

for video_path, pose_track_vis_path,start_frame, end_frame in zip(arg.videodir,arg.imgdir,arg.start_frame,arg.end_frame):
print('generating %s'%(video_path))
cap = cv2.VideoCapture(video_path)
if cap.isOpened():
success = True
else:
success = False
print(" read failed!make sure that the video format is supported by cv2.VideoCapture")

if not os.path.exists(pose_track_vis_path):
mkdir_p(pose_track_vis_path)

for frame_index in tqdm(range(end_frame)):
success, frame = cap.read()
if not success:
print('read frame failed!')
break
if frame_index < start_frame:
continue
cv2.imwrite(pose_track_vis_path + '/frame_{}.png'.format(frame_index), frame)

cap.release()


Original file line number Diff line number Diff line change
Expand Up @@ -203,10 +203,8 @@ def get_samples(video_path, json_path, contour_path, arg, targetMouseID):
)


if __name__ == "__main__":
# dir_name = '/disk1/zexin/project/mice/clustering_sequencial/forZexin/results/0603/1411_black_two/'
# video_name = '1411_black_two.mov'
# json_name= '/disk1/zexin/project/mice/clustering_sequencial/forZexin/results/0603/1411_black_two/alphapose-results-forvis-tracked.json'

if __name__ == '__main__':
arg = setting.args_class()

if len(sys.argv) != 1:
Expand Down
7 changes: 2 additions & 5 deletions BehavioralClustering/fft_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -352,11 +352,8 @@ def retrieve_poses_Mice(arg):
frame_ind_inClip += 1

except Exception as e:
print(
"failed to process the {}th frame with error:{}. will restart the clip".format(
frame_id, e
)
)
print('failed to process the {}th frame with warning:{}. will restart the clip'.format(frame_id,e))

frame_id += 1
pose_clip = []
poseTheOther_clip = []
Expand Down
13 changes: 11 additions & 2 deletions BehavioralClustering/setting.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,17 @@ def __init__(self):
]
## videodir is a list of videos to be analysed
self.videodir = [
"../Tracking/AlphaTracker/data/demo.mp4",
]
'../Tracking/AlphaTracker/data/demo.mp4',
]
## start_frame is a list of start frame numbers to be extracted. Same as the start_frame in tracking.
self.start_frame = [
0,
]

## end_frame is a list of end frame numbers to be extracted. Same as the end_frame in tracking.
self.end_frame = [
300,
]

## mice_num: the max number of the mice in the videos
self.mice_num = 2
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
<img src = 'Manual/media/Alphatracker Flyer.png' width = 500 >
</p>

[AlphaTracker](https://github.com/ZexinChen/AlphaTracker) is a multi-animal tracking and behavioral analysis tool which incorporates **multi-animal tracking**, **pose estimation** and **unsupervised behavioral clustering** to empower system neuroscience research. Alphatracker achieves the state-of-art accuracy of multi-animal tracking which lays the foundation for stringent biological studies. Moreover, the minimum requirement for hardware (regular webcams) and efficient training procedure allows readily adoption by most neuroscience labs.
[AlphaTracker](https://github.com/MVIG-SJTU/AlphaTracker) is a multi-animal tracking and behavioral analysis tool which incorporates **multi-animal tracking**, **pose estimation** and **unsupervised behavioral clustering** to empower system neuroscience research. Alphatracker achieves the state-of-art accuracy of multi-animal tracking which lays the foundation for stringent biological studies. Moreover, the minimum requirement for hardware (regular webcams) and efficient training procedure allows readily adoption by most neuroscience labs.

<div align="center">
<img src="Manual/media/pipeline.png", width="600" alt><br>
Expand Down
48 changes: 21 additions & 27 deletions Tracking/AlphaTracker/download.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,34 +2,28 @@
import zipfile


sppe_pretrain_weight = "1OPORTWB2cwd5YTVBX-NE8fsauZJWsrtW"
yolo_pretrain_weight = "1g8uJjK7EOlqrUCmjZTtCegwnNsBig6zn"
sppe_trained_weight = "1_BwtYySpX9uWDgdwqw0UEppyMYYv1gkJ"
yolo_trained_weight = "13zXkuZ4dNm3ZOwstr1sSWKOOzJ19XZpN"
demo_data = "1N0JjazqW6JmBheLrn6RoDTSRXSPp1t4K"
sample_training_data = "15dR-vVCEsg2z7mEVzJOF9YDW6YioEU3N"
scipy_data = "1c6vJQbAm_TcGyTCr1ah-x_R-iIYmT9TM"

gdd.download_file_from_google_drive(
file_id=sppe_pretrain_weight, dest_path="./models/sppe/duc_se.pth"
)
gdd.download_file_from_google_drive(
file_id=yolo_pretrain_weight, dest_path="./train_yolo/darknet/darknet53.conv.74"
)
gdd.download_file_from_google_drive(
file_id=sppe_trained_weight, dest_path="./train_sppe/exp/coco/demo/model_10.pkl"
)
gdd.download_file_from_google_drive(
file_id=yolo_trained_weight,
dest_path="./train_yolo/darknet/backup/demo/yolov3-mice_final.weights",
)
gdd.download_file_from_google_drive(file_id=demo_data, dest_path="./data/demo.mp4")
gdd.download_file_from_google_drive(
file_id=sample_training_data, dest_path="./data/sample_annotated_data.zip"
)
gdd.download_file_from_google_drive(
file_id=scipy_data, dest_path="../../UI/data/scipy.data"
)
# you can add https://drive.google.com/file/d/ in front of the following file_id and download them manually through web browser.
sppe_pretrain_weight = '1OPORTWB2cwd5YTVBX-NE8fsauZJWsrtW'
yolo_pretrain_weight = '1g8uJjK7EOlqrUCmjZTtCegwnNsBig6zn'
sppe_trained_weight = '1_BwtYySpX9uWDgdwqw0UEppyMYYv1gkJ'
yolo_trained_weight = '13zXkuZ4dNm3ZOwstr1sSWKOOzJ19XZpN'
demo_data = '1N0JjazqW6JmBheLrn6RoDTSRXSPp1t4K'
sample_training_data='15dR-vVCEsg2z7mEVzJOF9YDW6YioEU3N'
scipy_data = '1c6vJQbAm_TcGyTCr1ah-x_R-iIYmT9TM'

# Remember that after your download, place the files to their corresponding destination folder.
gdd.download_file_from_google_drive(file_id=sppe_pretrain_weight,dest_path='./models/sppe/duc_se.pth')
gdd.download_file_from_google_drive(file_id=yolo_pretrain_weight,dest_path='./train_yolo/darknet/darknet53.conv.74')
gdd.download_file_from_google_drive(file_id=sppe_trained_weight,dest_path='./train_sppe/exp/coco/demo/model_10.pkl')
gdd.download_file_from_google_drive(file_id=yolo_trained_weight,dest_path='./train_yolo/darknet/backup/demo/yolov3-mice_final.weights')
gdd.download_file_from_google_drive(file_id=demo_data,dest_path='./data/demo.mp4')
gdd.download_file_from_google_drive(file_id=sample_training_data,dest_path='./data/sample_annotated_data.zip')
gdd.download_file_from_google_drive(file_id=scipy_data,dest_path='../../UI/data/scipy.data')

# The following command do an unzip operation. You can also unzip the files manually.
with zipfile.ZipFile('./data/sample_annotated_data.zip', 'r') as zip_ref:
zip_ref.extractall('./data/sample_annotated_data/')

with zipfile.ZipFile("./data/sample_annotated_data.zip", "r") as zip_ref:
zip_ref.extractall("./data/sample_annotated_data/")
86 changes: 76 additions & 10 deletions Tracking/AlphaTracker/setting.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import os

import numpy as np
# general setting
gpu_id = 0 # the id of gpu that will be used

Expand Down Expand Up @@ -45,15 +45,13 @@
# video_full_path is the path to the video that will be tracked
video_full_path = "./data/demo.mp4"
video_paths = [
"./data/demo.mp4",
] # make sure video names are different from each other
start_frame = 0 # id of the start frame of the video
end_frame = 9737 # id of the last frame of the video
max_pid_id_setting = 2 # number of mice in the video
result_folder = "./track_result/" # path to the folder used to save the result
remove_oriFrame = (
False # whether to remove the original frame that generated from video
)
'./data/demo.mp4',
] # make sure video names are different from each other
start_frame = 0 # id of the start frame of the video
end_frame = 300 # id of the last frame of the video
max_pid_id_setting = 2 # number of mice in the video
result_folder = './track_result/' # path to the folder used to save the result
remove_oriFrame = False # whether to remove the original frame that generated from video
vis_track_result = 1

# weights and match are parameter of tracking algorithm
Expand All @@ -72,3 +70,71 @@

AlphaTracker_root = os.path.abspath(AlphaTracker_root)
result_folder = os.path.abspath(result_folder)

with open('train.cfg', 'r') as f:
dat = f.read()
if not dat:
print(f'error, train.cfg is empty')
try:
dict_state = eval(dat)
except Exception as e:
print(f'load train.cfg Exception: {e}')
print(dict_state)

gpu_id = int(dict_state['gpu_id']) # the id of gpu that will be used

# data related settings
image_root_list = [dict_state['image_root_list']] # list of image folder paths to the RGB images for training
json_file_list = [dict_state['json_file_list']] # list of paths to the json files that contain labels of the images for training
num_mouse = [int(dict_state['num_mouse'])] # the number of mouse in the images in each image folder path
exp_name = dict_state['exp_name'] # the name of the experiment
num_pose = int(dict_state['num_pose']) # number of the pose that is labeled, remember to change self.nJoints in train_sppe/src/utils/dataset/coco.py

pose_pair = np.array([[float(j) for j in i.split('-')] for i in dict_state['pose_pair'].split(',')])
print('pose pair is:',pose_pair)
train_val_split = float(dict_state['train_val_split']) # ratio of data that used to train model, the rest will be used for validation
image_suffix = dict_state['image_suffix'] # suffix of the image, png or jpg


# training hyperparameter setting
# Protip: if your training does not give good enough tracking you can lower lr and increase epoch number
# but lowering the lr too much can be bad for tracking quality as well.
sppe_lr = float(dict_state['sppe_lr'])
sppe_epoch = int(dict_state['sppe_epoch'])
sppe_pretrain = dict_state['sppe_pretrain']
sppe_batchSize = int(dict_state['sppe_batchSize'])
yolo_lr = float(dict_state['yolo_lr'])
yolo_iter = int(dict_state['yolo_iter']) ## if use pretrained model please make sure yolo_iter to be large enough to guarantee finetune is done
yolo_pretrain = dict_state['yolo_pretrain'] # './train_yolo/darknet/darknet53.conv.74'
yolo_batchSize = int(dict_state['yolo_batchSize'])


with open('track.cfg', 'r') as f:
dat = f.read()
if not dat:
print(f'error, track.cfg is empty')
try:
dict_state2 = eval(dat)
except Exception as e:
print(f'load track.cfg Exception: {e}')
print(dict_state2)


# demo video setting
# note video_full_path is for track.py, video_paths is for track_batch.py
# video_full_path is the path to the video that will be tracked
video_full_path = dict_state2['video_full_path']
video_paths = [
dict_state2['video_full_path'],
] # make sure video names are different from each other
start_frame = int(dict_state2['start_frame']) # id of the start frame of the video
end_frame = int(dict_state2['end_frame']) # id of the last frame of the video
max_pid_id_setting = int(dict_state2['max_pid_id_setting']) # number of mice in the video
result_folder = dict_state2['result_folder'] # path to the folder used to save the result
remove_oriFrame = int(dict_state2['remove_oriFrame']) # whether to remove the original frame that generated from video
vis_track_result = int(dict_state2['vis_track_result'])

# weights and match are parameter of tracking algorithm
# following setting should work fine, no need to change
weights = dict_state2['weights']
match = int(dict_state2['match'])
1 change: 1 addition & 0 deletions Tracking/AlphaTracker/track.cfg
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
{'video_full_path': '/home/flexiv/AlphaTracker/Tracking/AlphaTracker/data/demo.mp4', 'start_frame': '0', 'end_frame': '300', 'max_pid_id_setting': '2', 'result_folder': './track_result/', 'remove_oriFrame': '0', 'vis_track_result': '1', 'weights': '0 6 0 0 0 0 ', 'match': '0'}
1 change: 1 addition & 0 deletions Tracking/AlphaTracker/train.cfg
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
{'image_root_list': '/home/flexiv/AlphaTracker/Tracking/AlphaTracker/data/sample_annotated_data/demo', 'json_file_list': '/home/flexiv/AlphaTracker/Tracking/AlphaTracker/data/sample_annotated_data/demo/train9.json', 'gpu_id': '0', 'num_mouse': '2', 'exp_name': 'demo1', 'num_pose': '4', 'pose_pair': '0-1,0-2,0-3', 'train_val_split': '0.9', 'image_suffix': 'jpg', 'sppe_lr': '0.0001', 'sppe_epoch': '10', 'sppe_pretrain': '', 'sppe_batchSize': '1', 'yolo_lr': '0.0005', 'yolo_iter': '60000', 'yolo_pretrain': '', 'yolo_batchSize': '1'}
2 changes: 1 addition & 1 deletion Tracking/AlphaTracker/train_yolo/darknet/src/gemm.c
Original file line number Diff line number Diff line change
Expand Up @@ -229,7 +229,7 @@ void time_gpu(int TA, int TB, int m, int k, int n)
clock_t start = clock(), end;
for(i = 0; i<iter; ++i){
gemm_gpu(TA,TB,m,n,k,1,a_cl,lda,b_cl,ldb,1,c_cl,n);
cudaThreadSynchronize();
cudaDeviceSynchronize();
}
double flop = ((double)m)*n*(2.*k + 2.)*iter;
double gflop = flop/pow(10., 9);
Expand Down
23 changes: 23 additions & 0 deletions log/install1_log.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
Wed Apr 13 17:51:13 2022
+-----------------------------------------------------------------------------+
| NVIDIA-SMI 455.23.05 Driver Version: 455.23.05 CUDA Version: 11.1 |
|-------------------------------+----------------------+----------------------+
| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
| | | MIG M. |
|===============================+======================+======================|
| 0 GeForce GTX 165... Off | 00000000:01:00.0 Off | N/A |
| N/A 35C P8 7W / N/A | 5MiB / 3911MiB | 0% Default |
| | | N/A |
+-------------------------------+----------------------+----------------------+

+-----------------------------------------------------------------------------+
| Processes: |
| GPU GI CI PID Type Process name GPU Memory |
| ID ID Usage |
|=============================================================================|
| 0 N/A N/A 1583 G /usr/lib/xorg/Xorg 4MiB |
+-----------------------------------------------------------------------------+
====>Checked nvidia driver installation.

====>Checked anaconda installation.
Binary file added res/1_120x74.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added res/2_120x74.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added res/3_120x74.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added res/4_120x74.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added res/5_120x74.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added res/About_72px.ico
Binary file not shown.
20 changes: 20 additions & 0 deletions scripts/behavior.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
#!/bin/bash

# if behavior button is clicked, this script will be called

echo behavior start


. ~/anaconda3/etc/profile.d/conda.sh

conda activate alphatracker

cd ./BehavioralClustering

bash run_all.sh

python fft_main_sep_twoMiceInteract.py


echo behavior over

8 changes: 4 additions & 4 deletions install.sh → scripts/install.sh
Original file line number Diff line number Diff line change
@@ -1,18 +1,18 @@
#!/bin/bash

# if install button is clicked, this script will be called

echo install start

conda create -n alphatracker python=3.8 -y
. ~/anaconda3/etc/profile.d/conda.sh

echo ". ~/anaconda3/etc/profile.d/conda.sh" >> ~/.bashrc
source ~/.bashrc
conda create -n alphatracker python=3.8 -y

conda activate alphatracker
# conda env update --file environment.yml

# Install pytorch
conda install pytorch==1.9.0 torchvision==0.10.0 torchaudio==0.9.0 cudatoolkit=11.1 -c pytorch -c conda-forge -y
conda install pytorch==1.8.0 torchvision==0.9.0 torchaudio==0.8.0 cudatoolkit=11.1 -c pytorch -c conda-forge -y

export PATH=/usr/local/cuda/bin/:$PATH
export LD_LIBRARY_PATH=/usr/local/cuda/lib64/:$LD_LIBRARY_PATH
Expand Down
10 changes: 10 additions & 0 deletions scripts/install1.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
#!/bin/bash
nvidia-smi
echo '====>Checked nvidia driver installation.'
echo
. ~/anaconda3/etc/profile.d/conda.sh
echo '====>Checked anaconda installation.'




Loading

0 comments on commit 20a9f7c

Please sign in to comment.