-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathKITTI_Dataset_Odometry.py
88 lines (66 loc) · 2.61 KB
/
KITTI_Dataset_Odometry.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
import cv2
import numpy as np
from Utilities.VisualOdometry import VisualOdometry
from Utilities.Display import draw_trajectory
# Data directory
main_dir = "Data/KITTI_sequence_2/" # <-- Sequence 2
#main_dir = "Data/KITTI_sequence_1/" # <-- Sequence 1
data_dir = f"{main_dir}image_l" # Image directory
max_frame = 50
draw_scale = 5 # Scaling factor for drawing
img_size = (800, 800) # Trajectory image size
# Initialize the Visual Odometry object
vo = VisualOdometry(camera_calib_file=f"{main_dir}calib.txt")
# Load actual poses from file
actual_poses_file = f"{main_dir}poses.txt"
actual_poses = []
with open(actual_poses_file, 'r') as f:
for line in f:
pose = np.array(line.split(), dtype=float).reshape(3, 4)
actual_poses.append((pose[0, 3], pose[2, 3]))
# Main processing loop
for i in range(max_frame):
# Read the current frame
frame = cv2.imread(f"{data_dir}/{i:06d}.png")
# Update visual odometry with the current frame
vo.update(frame)
# Get data from visual odometry
estimated_poses = vo.estimated_poses
img_matches = vo.display_frame
points = vo.points_3d
pixels = vo.observations
# Draw the trajectory if poses are available
if estimated_poses:
# Extract the trajectory path
path = [(pose[0, 3], pose[2, 3]) for pose in estimated_poses]
# Get the current rotation matrix
rotation = estimated_poses[-1][:3, :3]
# Draw the trajectory image with actual poses
traj_img = draw_trajectory(path, rotation, points, pixels, frame, actual_poses, img_size, draw_scale)
cv2.imshow("Trajectory", traj_img)
# Display matches image
if img_matches is not None:
cv2.imshow("Matches", img_matches)
# Exit loop if 'q' is pressed
if cv2.waitKey(1) & 0xFF == ord('q'):
break
while True:
# Get data from visual odometry
estimated_poses = vo.estimated_poses
img_matches = vo.display_frame
points = vo.points_3d
pixels = vo.observations
# Draw the trajectory if poses are available
if estimated_poses:
# Extract the trajectory path
path = [(pose[0, 3], pose[2, 3]) for pose in estimated_poses]
# Get the current rotation matrix
rotation = estimated_poses[-1][:3, :3]
# Draw the trajectory image with actual poses
traj_img = draw_trajectory(path, rotation, points, pixels, frame, actual_poses, img_size, draw_scale)
cv2.imshow("Trajectory", traj_img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Shut down visual odometry and close windows
vo.shutdown()
cv2.destroyAllWindows()