Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Empty file.
79 changes: 0 additions & 79 deletions autonomy/behaviour/octo_map/octo_map/octo_map_node.py

This file was deleted.

20 changes: 0 additions & 20 deletions autonomy/behaviour/octo_map/package.xml

This file was deleted.

1 change: 0 additions & 1 deletion autonomy/behaviour/octo_map/resource/octo_map

This file was deleted.

4 changes: 0 additions & 4 deletions autonomy/behaviour/octo_map/setup.cfg

This file was deleted.

36 changes: 0 additions & 36 deletions autonomy/behaviour/octo_map/setup.py

This file was deleted.

26 changes: 26 additions & 0 deletions autonomy/perception/launch/pointcloud_demo.launch.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
#!/usr/bin/env python3

from launch import LaunchDescription
from launch_ros.actions import Node


def generate_launch_description():
return LaunchDescription([
# Launch the dummy publisher (publishes depth images and camera info)
Node(
package='perception',
executable='dummy_publisher_node',
name='dummy_publisher',
output='screen',
parameters=[],
),

# Launch the point cloud publisher (converts depth to point cloud)
Node(
package='perception',
executable='pc_publisher_node',
name='pointcloud_publisher',
output='screen',
parameters=[],
),
])
40 changes: 39 additions & 1 deletion autonomy/perception/perception/dummy_publisher_node.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@

from sensor_msgs.msg import Image, CameraInfo
from std_msgs.msg import Header
from geometry_msgs.msg import TransformStamped
from tf2_ros import StaticTransformBroadcaster
import cv2
from cv_bridge import CvBridge
import numpy as np
Expand Down Expand Up @@ -33,16 +35,52 @@ def __init__(self):
qos_profile_sensor_data
)

# Add TF broadcaster for camera frames
self.tf_broadcaster = StaticTransformBroadcaster(self)
self.publish_static_transforms()

self.timer = self.create_timer(1.0, self.publish_dummy_data)

self.get_logger().info('Dummy camera publisher started (1 Hz)')

def publish_static_transforms(self):
"""Publish static transforms for camera frames"""
# Transform from base_link/map to camera_link
t1 = TransformStamped()
t1.header.stamp = self.get_clock().now().to_msg()
t1.header.frame_id = 'map'
t1.child_frame_id = 'camera_link'
t1.transform.translation.x = 0.0
t1.transform.translation.y = 0.0
t1.transform.translation.z = 1.5
t1.transform.rotation.x = 0.0
t1.transform.rotation.y = 0.0
t1.transform.rotation.z = 0.0
t1.transform.rotation.w = 1.0

# Transform from camera_link to camera_depth_optical_frame
t2 = TransformStamped()
t2.header.stamp = self.get_clock().now().to_msg()
t2.header.frame_id = 'camera_link'
t2.child_frame_id = 'camera_depth_optical_frame'
# Standard camera optical frame convention (z forward, x right, y down)
t2.transform.translation.x = 0.0
t2.transform.translation.y = 0.0
t2.transform.translation.z = 0.0
# 90 deg rotation about x, then 90 deg about z to align with optical frame
t2.transform.rotation.x = -0.5
t2.transform.rotation.y = 0.5
t2.transform.rotation.z = -0.5
t2.transform.rotation.w = 0.5

self.tf_broadcaster.sendTransform([t1, t2])

def publish_dummy_data(self):
now = self.get_clock().now().to_msg()

header = Header()
header.stamp = now
header.frame_id = 'camera_link'
header.frame_id = 'camera_depth_optical_frame'

# Create a simple scene with geometric shapes at different depths
rgb_image = np.zeros((480, 640, 3), dtype=np.uint8)
Expand Down
137 changes: 137 additions & 0 deletions autonomy/perception/perception/publish_pointcloud.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,137 @@
import rclpy
from rclpy.node import Node
from rclpy.qos import qos_profile_sensor_data
from sensor_msgs.msg import PointField
import sensor_msgs_py.point_cloud2 as pc2
from sensor_msgs.msg import Image, CameraInfo, PointCloud2
from std_msgs.msg import Header
import cv2
from cv_bridge import CvBridge
import numpy as np


class PCPublisherNode(Node):
def __init__(self):
super().__init__('pc_publisher_node')

self.bridge = CvBridge()
self.depth_sub = self.create_subscription(Image, '/camera/depth/image_raw', self.depth_2_pointcloud, qos_profile_sensor_data)
self.rgb_sub = self.create_subscription(Image, '/camera/color/image_raw', self.rgb_callback, qos_profile_sensor_data)

self.camera_info_sub = self.create_subscription(CameraInfo, '/camera/depth/camera_info', self.camera_info_callback, qos_profile_sensor_data)

self.pc_pub = self.create_publisher(PointCloud2, '/camera/depth/pointcloud', qos_profile_sensor_data)

self.fx, self.fy, self.cx, self.cy = 525.0, 525.0, 320.0, 240.0
self.rgb_image = None

# Same filtering as voxel grid node
self.max_range = 3.5
self.min_range = 0.1

self.get_logger().info('Point cloud publisher with RGB started')

def camera_info_callback(self, msg):
self.fx = msg.k[0]
self.fy = msg.k[4]
self.cx = msg.k[2]
self.cy = msg.k[5]
self.get_logger().info(f'Camera info received')

def rgb_callback(self, msg):
self.rgb_image = self.bridge.imgmsg_to_cv2(msg, desired_encoding="rgb8")

def depth_2_pointcloud(self, depth_msg):

depth_image = self.bridge.imgmsg_to_cv2(depth_msg, desired_encoding="passthrough")
h, w = depth_image.shape

z = depth_image.astype(np.float32) / 1000.0

uu, vv = np.meshgrid(np.arange(w), np.arange(h))

x = (uu - self.cx) * z / self.fx
y = (vv - self.cy) * z / self.fy

valid_mask = (z > self.min_range) & (z < self.max_range) & np.isfinite(x) & np.isfinite(y) & np.isfinite(z)

x_valid = x[valid_mask]
y_valid = y[valid_mask]
z_valid = z[valid_mask]

colors = None
if self.rgb_image is not None and self.rgb_image.shape[:2] == (h, w):
v_indices, u_indices = np.where(valid_mask)
colors = self.rgb_image[v_indices, u_indices] # Shape: (N, 3)

pts = np.column_stack((x_valid, y_valid, z_valid)).astype(np.float32)

self.get_logger().info(f'Processed point cloud: {pts.shape[0]} valid points out of {h*w} pixels')
self.get_logger().info(f'Point ranges - X: [{np.min(x_valid):.3f}, {np.max(x_valid):.3f}], '
f'Y: [{np.min(y_valid):.3f}, {np.max(y_valid):.3f}], '
f'Z: [{np.min(z_valid):.3f}, {np.max(z_valid):.3f}]')

self.publish_pointcloud(pts, colors)


def publish_pointcloud(self, pts, colors=None):
if len(pts) == 0:
self.get_logger().warn('No valid points to publish!')
return

header = Header()
header.stamp = self.get_clock().now().to_msg()
header.frame_id = 'camera_depth_optical_frame' # Use standard ROS camera frame

# Prepare point data with optional colors
if colors is not None:
# Create XYZRGB points
points_list = []
for i in range(len(pts)):
# Convert RGB to packed RGB format
r, g, b = colors[i]
rgb = (int(r) << 16) | (int(g) << 8) | int(b)
points_list.append([pts[i][0], pts[i][1], pts[i][2], rgb])

fields = [
PointField(name='x', offset=0, datatype=PointField.FLOAT32, count=1),
PointField(name='y', offset=4, datatype=PointField.FLOAT32, count=1),
PointField(name='z', offset=8, datatype=PointField.FLOAT32, count=1),
PointField(name='rgb', offset=12, datatype=PointField.UINT32, count=1),
]
self.get_logger().info(f'Published colored point cloud with {len(points_list)} points')
else:
# Create XYZ-only points with bright color for visibility
points_list = []
bright_green = (0 << 16) | (255 << 8) | 0 # Bright green RGB
for i in range(len(pts)):
points_list.append([pts[i][0], pts[i][1], pts[i][2], bright_green])

fields = [
PointField(name='x', offset=0, datatype=PointField.FLOAT32, count=1),
PointField(name='y', offset=4, datatype=PointField.FLOAT32, count=1),
PointField(name='z', offset=8, datatype=PointField.FLOAT32, count=1),
PointField(name='rgb', offset=12, datatype=PointField.UINT32, count=1),
]
self.get_logger().info(f'Published bright green point cloud with {len(points_list)} points')

pc_msg = pc2.create_cloud(header, fields, points_list)
self.pc_pub.publish(pc_msg)



def main(args=None):
rclpy.init(args=args)
node = PCPublisherNode()

try:
rclpy.spin(node)
except KeyboardInterrupt:
pass
finally:
node.destroy_node()
rclpy.shutdown()


if __name__ == '__main__':
main()
3 changes: 2 additions & 1 deletion autonomy/perception/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,8 @@
tests_require=['pytest'],
entry_points={
'console_scripts': [
'dummy_publisher_node = perception.dummy_publisher_node:main'
'dummy_publisher_node = perception.dummy_publisher_node:main',
'pc_publisher_node = perception.publish_pointcloud:main'
],
},
)
Loading
Loading