diff --git a/autonomy/behaviour/octo_map/octo_map/__init__.py b/autonomy/behaviour/octo_map/octo_map/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/autonomy/behaviour/octo_map/octo_map/octo_map_node.py b/autonomy/behaviour/octo_map/octo_map/octo_map_node.py
deleted file mode 100644
index 6a1fb17..0000000
--- a/autonomy/behaviour/octo_map/octo_map/octo_map_node.py
+++ /dev/null
@@ -1,79 +0,0 @@
-import rclpy
-from rclpy.node import Node
-import numpy as np
-import torch
-from spconv.pytorch.utils import PointToVoxel
-import cv2
-from cv_bridge import CvBridge
-
-from sensor_msgs.msg import Image, PointCloud2, PointField, CameraInfo
-from std_msgs.msg import Header
-import sensor_msgs_py.point_cloud2 as pc2
-
-
-class OctoMapNode(Node):
- def __init__(self):
- super().__init__('octo_map_node')
-
- # Subscribe to processed perception data
- self.rgb_sub = self.create_subscription(
- Image, '/perception/rgb_processed', self.rgb_callback, 10)
- self.depth_sub = self.create_subscription(
- Image, '/perception/depth_processed', self.depth_callback, 10)
- self.info_sub = self.create_subscription(
- CameraInfo, '/perception/camera_info', self.info_callback, 10)
-
- self.octo_pub = self.create_publisher(
- PointCloud2, '/behaviour/octo_map', 10)
-
- self.rgb_image = None
- self.depth_image = None
- self.bridge = CvBridge()
-
- # Camera intrinsics (will be updated from camera info)
- self.fx, self.fy, self.cx, self.cy = 525.0, 525.0, 320.0, 240.0
-
- self.voxel_size = 0.04
- self.max_range = 3.5
-
- self.get_logger().info('OctoMap Node started - subscribing to processed perception data')
-
- def info_callback(self, msg):
- """Update camera intrinsics from perception node."""
- self.fx = msg.k[0]
- self.fy = msg.k[4]
- self.cx = msg.k[2]
- self.cy = msg.k[5]
- self.get_logger().info(
- f'Updated camera intrinsics: fx={self.fx}, fy={self.fy}')
-
- def rgb_callback(self, msg):
- """Receive processed RGB image from perception node."""
- self.rgb_image = self.bridge.imgmsg_to_cv2(msg, 'bgr8')
- self.process_rgbd_if_ready()
- self.get_logger().info('Received processed RGB image from perception')
-
- def depth_callback(self, msg):
- """Receive processed depth image from perception node."""
- self.depth_image = self.bridge.imgmsg_to_cv2(msg, 'passthrough')
- self.process_rgbd_if_ready()
- self.get_logger().info('Received processed depth image from perception')
-
- # TODO: Implement point cloud generation and octomap updating
-
-
-def main(args=None):
- rclpy.init(args=args)
- node = OctoMapNode()
-
- try:
- rclpy.spin(node)
- except KeyboardInterrupt:
- pass
- finally:
- node.destroy_node()
- rclpy.shutdown()
-
-
-if __name__ == '__main__':
- main()
diff --git a/autonomy/behaviour/octo_map/package.xml b/autonomy/behaviour/octo_map/package.xml
deleted file mode 100644
index e3c107f..0000000
--- a/autonomy/behaviour/octo_map/package.xml
+++ /dev/null
@@ -1,20 +0,0 @@
-
-
-
- octo_map
- 0.0.0
- OctoMap generation for 3D mapping
- watonomous
- Apache-2.0
-
- rclpy
-
- ament_copyright
- ament_flake8
- ament_pep257
- python3-pytest
-
-
- ament_python
-
-
\ No newline at end of file
diff --git a/autonomy/behaviour/octo_map/resource/octo_map b/autonomy/behaviour/octo_map/resource/octo_map
deleted file mode 100644
index af2095f..0000000
--- a/autonomy/behaviour/octo_map/resource/octo_map
+++ /dev/null
@@ -1 +0,0 @@
-voxel_grid
\ No newline at end of file
diff --git a/autonomy/behaviour/octo_map/setup.cfg b/autonomy/behaviour/octo_map/setup.cfg
deleted file mode 100644
index 09c9c3a..0000000
--- a/autonomy/behaviour/octo_map/setup.cfg
+++ /dev/null
@@ -1,4 +0,0 @@
-[develop]
-script-dir=$base/lib/voxel_grid
-[install]
-install-scripts=$base/lib/voxel_grid
\ No newline at end of file
diff --git a/autonomy/behaviour/octo_map/setup.py b/autonomy/behaviour/octo_map/setup.py
deleted file mode 100644
index b71e5a3..0000000
--- a/autonomy/behaviour/octo_map/setup.py
+++ /dev/null
@@ -1,36 +0,0 @@
-import os
-from glob import glob
-from setuptools import setup
-
-package_name = 'octo_map'
-
-setup(
- name=package_name,
- version='0.0.0',
- packages=[package_name],
- data_files=[
- # Install marker file in the package index
- ('share/ament_index/resource_index/packages',
- ['resource/' + package_name]),
- # Include our package.xml file
- (os.path.join('share', package_name), ['package.xml']),
- # Include all launch files.
- (os.path.join('share', package_name, 'launch'),
- glob(os.path.join('launch', '*.launch.py'))),
- # Include all config files.
- (os.path.join('share', package_name, 'config'),
- glob(os.path.join('config', '*.yaml'))),
- ],
- install_requires=['setuptools'],
- zip_safe=True,
- maintainer='watonomous',
- maintainer_email='watonomous@uwaterloo.ca',
- description='OctoMap generation for 3D mapping',
- license='Apache-2.0',
- tests_require=['pytest'],
- entry_points={
- 'console_scripts': [
- 'octo_map_node = octo_map.octo_map_node:main'
- ],
- },
-)
diff --git a/autonomy/perception/launch/pointcloud_demo.launch.py b/autonomy/perception/launch/pointcloud_demo.launch.py
new file mode 100644
index 0000000..fa01b3e
--- /dev/null
+++ b/autonomy/perception/launch/pointcloud_demo.launch.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python3
+
+from launch import LaunchDescription
+from launch_ros.actions import Node
+
+
+def generate_launch_description():
+ return LaunchDescription([
+ # Launch the dummy publisher (publishes depth images and camera info)
+ Node(
+ package='perception',
+ executable='dummy_publisher_node',
+ name='dummy_publisher',
+ output='screen',
+ parameters=[],
+ ),
+
+ # Launch the point cloud publisher (converts depth to point cloud)
+ Node(
+ package='perception',
+ executable='pc_publisher_node',
+ name='pointcloud_publisher',
+ output='screen',
+ parameters=[],
+ ),
+ ])
\ No newline at end of file
diff --git a/autonomy/perception/perception/dummy_publisher_node.py b/autonomy/perception/perception/dummy_publisher_node.py
index 3d67189..8f8e945 100644
--- a/autonomy/perception/perception/dummy_publisher_node.py
+++ b/autonomy/perception/perception/dummy_publisher_node.py
@@ -4,6 +4,8 @@
from sensor_msgs.msg import Image, CameraInfo
from std_msgs.msg import Header
+from geometry_msgs.msg import TransformStamped
+from tf2_ros import StaticTransformBroadcaster
import cv2
from cv_bridge import CvBridge
import numpy as np
@@ -33,16 +35,52 @@ def __init__(self):
qos_profile_sensor_data
)
+ # Add TF broadcaster for camera frames
+ self.tf_broadcaster = StaticTransformBroadcaster(self)
+ self.publish_static_transforms()
+
self.timer = self.create_timer(1.0, self.publish_dummy_data)
self.get_logger().info('Dummy camera publisher started (1 Hz)')
+ def publish_static_transforms(self):
+ """Publish static transforms for camera frames"""
+ # Transform from base_link/map to camera_link
+ t1 = TransformStamped()
+ t1.header.stamp = self.get_clock().now().to_msg()
+ t1.header.frame_id = 'map'
+ t1.child_frame_id = 'camera_link'
+ t1.transform.translation.x = 0.0
+ t1.transform.translation.y = 0.0
+ t1.transform.translation.z = 1.5
+ t1.transform.rotation.x = 0.0
+ t1.transform.rotation.y = 0.0
+ t1.transform.rotation.z = 0.0
+ t1.transform.rotation.w = 1.0
+
+ # Transform from camera_link to camera_depth_optical_frame
+ t2 = TransformStamped()
+ t2.header.stamp = self.get_clock().now().to_msg()
+ t2.header.frame_id = 'camera_link'
+ t2.child_frame_id = 'camera_depth_optical_frame'
+ # Standard camera optical frame convention (z forward, x right, y down)
+ t2.transform.translation.x = 0.0
+ t2.transform.translation.y = 0.0
+ t2.transform.translation.z = 0.0
+ # 90 deg rotation about x, then 90 deg about z to align with optical frame
+ t2.transform.rotation.x = -0.5
+ t2.transform.rotation.y = 0.5
+ t2.transform.rotation.z = -0.5
+ t2.transform.rotation.w = 0.5
+
+ self.tf_broadcaster.sendTransform([t1, t2])
+
def publish_dummy_data(self):
now = self.get_clock().now().to_msg()
header = Header()
header.stamp = now
- header.frame_id = 'camera_link'
+ header.frame_id = 'camera_depth_optical_frame'
# Create a simple scene with geometric shapes at different depths
rgb_image = np.zeros((480, 640, 3), dtype=np.uint8)
diff --git a/autonomy/perception/perception/publish_pointcloud.py b/autonomy/perception/perception/publish_pointcloud.py
new file mode 100644
index 0000000..937a1d2
--- /dev/null
+++ b/autonomy/perception/perception/publish_pointcloud.py
@@ -0,0 +1,137 @@
+import rclpy
+from rclpy.node import Node
+from rclpy.qos import qos_profile_sensor_data
+from sensor_msgs.msg import PointField
+import sensor_msgs_py.point_cloud2 as pc2
+from sensor_msgs.msg import Image, CameraInfo, PointCloud2
+from std_msgs.msg import Header
+import cv2
+from cv_bridge import CvBridge
+import numpy as np
+
+
+class PCPublisherNode(Node):
+ def __init__(self):
+ super().__init__('pc_publisher_node')
+
+ self.bridge = CvBridge()
+ self.depth_sub = self.create_subscription(Image, '/camera/depth/image_raw', self.depth_2_pointcloud, qos_profile_sensor_data)
+ self.rgb_sub = self.create_subscription(Image, '/camera/color/image_raw', self.rgb_callback, qos_profile_sensor_data)
+
+ self.camera_info_sub = self.create_subscription(CameraInfo, '/camera/depth/camera_info', self.camera_info_callback, qos_profile_sensor_data)
+
+ self.pc_pub = self.create_publisher(PointCloud2, '/camera/depth/pointcloud', qos_profile_sensor_data)
+
+ self.fx, self.fy, self.cx, self.cy = 525.0, 525.0, 320.0, 240.0
+ self.rgb_image = None
+
+ # Same filtering as voxel grid node
+ self.max_range = 3.5
+ self.min_range = 0.1
+
+ self.get_logger().info('Point cloud publisher with RGB started')
+
+ def camera_info_callback(self, msg):
+ self.fx = msg.k[0]
+ self.fy = msg.k[4]
+ self.cx = msg.k[2]
+ self.cy = msg.k[5]
+ self.get_logger().info(f'Camera info received')
+
+ def rgb_callback(self, msg):
+ self.rgb_image = self.bridge.imgmsg_to_cv2(msg, desired_encoding="rgb8")
+
+ def depth_2_pointcloud(self, depth_msg):
+
+ depth_image = self.bridge.imgmsg_to_cv2(depth_msg, desired_encoding="passthrough")
+ h, w = depth_image.shape
+
+ z = depth_image.astype(np.float32) / 1000.0
+
+ uu, vv = np.meshgrid(np.arange(w), np.arange(h))
+
+ x = (uu - self.cx) * z / self.fx
+ y = (vv - self.cy) * z / self.fy
+
+ valid_mask = (z > self.min_range) & (z < self.max_range) & np.isfinite(x) & np.isfinite(y) & np.isfinite(z)
+
+ x_valid = x[valid_mask]
+ y_valid = y[valid_mask]
+ z_valid = z[valid_mask]
+
+ colors = None
+ if self.rgb_image is not None and self.rgb_image.shape[:2] == (h, w):
+ v_indices, u_indices = np.where(valid_mask)
+ colors = self.rgb_image[v_indices, u_indices] # Shape: (N, 3)
+
+ pts = np.column_stack((x_valid, y_valid, z_valid)).astype(np.float32)
+
+ self.get_logger().info(f'Processed point cloud: {pts.shape[0]} valid points out of {h*w} pixels')
+ self.get_logger().info(f'Point ranges - X: [{np.min(x_valid):.3f}, {np.max(x_valid):.3f}], '
+ f'Y: [{np.min(y_valid):.3f}, {np.max(y_valid):.3f}], '
+ f'Z: [{np.min(z_valid):.3f}, {np.max(z_valid):.3f}]')
+
+ self.publish_pointcloud(pts, colors)
+
+
+ def publish_pointcloud(self, pts, colors=None):
+ if len(pts) == 0:
+ self.get_logger().warn('No valid points to publish!')
+ return
+
+ header = Header()
+ header.stamp = self.get_clock().now().to_msg()
+ header.frame_id = 'camera_depth_optical_frame' # Use standard ROS camera frame
+
+ # Prepare point data with optional colors
+ if colors is not None:
+ # Create XYZRGB points
+ points_list = []
+ for i in range(len(pts)):
+ # Convert RGB to packed RGB format
+ r, g, b = colors[i]
+ rgb = (int(r) << 16) | (int(g) << 8) | int(b)
+ points_list.append([pts[i][0], pts[i][1], pts[i][2], rgb])
+
+ fields = [
+ PointField(name='x', offset=0, datatype=PointField.FLOAT32, count=1),
+ PointField(name='y', offset=4, datatype=PointField.FLOAT32, count=1),
+ PointField(name='z', offset=8, datatype=PointField.FLOAT32, count=1),
+ PointField(name='rgb', offset=12, datatype=PointField.UINT32, count=1),
+ ]
+ self.get_logger().info(f'Published colored point cloud with {len(points_list)} points')
+ else:
+ # Create XYZ-only points with bright color for visibility
+ points_list = []
+ bright_green = (0 << 16) | (255 << 8) | 0 # Bright green RGB
+ for i in range(len(pts)):
+ points_list.append([pts[i][0], pts[i][1], pts[i][2], bright_green])
+
+ fields = [
+ PointField(name='x', offset=0, datatype=PointField.FLOAT32, count=1),
+ PointField(name='y', offset=4, datatype=PointField.FLOAT32, count=1),
+ PointField(name='z', offset=8, datatype=PointField.FLOAT32, count=1),
+ PointField(name='rgb', offset=12, datatype=PointField.UINT32, count=1),
+ ]
+ self.get_logger().info(f'Published bright green point cloud with {len(points_list)} points')
+
+ pc_msg = pc2.create_cloud(header, fields, points_list)
+ self.pc_pub.publish(pc_msg)
+
+
+
+def main(args=None):
+ rclpy.init(args=args)
+ node = PCPublisherNode()
+
+ try:
+ rclpy.spin(node)
+ except KeyboardInterrupt:
+ pass
+ finally:
+ node.destroy_node()
+ rclpy.shutdown()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/autonomy/perception/setup.py b/autonomy/perception/setup.py
index c00c154..0e092c9 100644
--- a/autonomy/perception/setup.py
+++ b/autonomy/perception/setup.py
@@ -30,7 +30,8 @@
tests_require=['pytest'],
entry_points={
'console_scripts': [
- 'dummy_publisher_node = perception.dummy_publisher_node:main'
+ 'dummy_publisher_node = perception.dummy_publisher_node:main',
+ 'pc_publisher_node = perception.publish_pointcloud:main'
],
},
)
diff --git a/docker/behaviour/octo_map/octo_map.Dockerfile b/docker/behaviour/octomap/octomap.Dockerfile
similarity index 75%
rename from docker/behaviour/octo_map/octo_map.Dockerfile
rename to docker/behaviour/octomap/octomap.Dockerfile
index 5889363..979d299 100644
--- a/docker/behaviour/octo_map/octo_map.Dockerfile
+++ b/docker/behaviour/octomap/octomap.Dockerfile
@@ -3,20 +3,13 @@ ARG BASE_IMAGE=ghcr.io/watonomous/robot_base/base:humble-ubuntu22.04
################################ Source ################################
FROM ${BASE_IMAGE} AS source
-ARG AMENT_WS=/home/wato/wato_ws
+ARG AMENT_WS=/root/ament_ws
WORKDIR ${AMENT_WS}/src
# Copy in source code
-COPY autonomy/behaviour/octo_map octo_map
COPY autonomy/wato_msgs/common_msgs wato_msgs/common_msgs
-# Scan for rosdeps
-# RUN apt-get -qq update
-# RUN rosdep update
-# RUN rosdep install --from-paths . --ignore-src -r -s \
-# | grep 'apt-get install' \
-# | awk '{print $3}' \
-# | sort > /tmp/colcon_install_list
+RUN git clone https://github.com/OctoMap/octomap_mapping.git
RUN apt-get -qq update
RUN rosdep update
@@ -46,15 +39,20 @@ RUN apt-get update && \
build-essential \
git \
cmake \
- ninja-build \
python3 \
python3-pip \
python3-dev \
python3-setuptools \
- curl \
- ca-certificates \
- gnupg2 \
- lsb-release && \
+ ros-humble-octomap \
+ ros-humble-octomap-msgs \
+ ros-humble-octomap-ros \
+ ros-humble-octomap-rviz-plugins \
+ ros-humble-pcl-ros \
+ ros-humble-pcl-conversions \
+ ros-humble-image-transport \
+ ros-humble-depth-image-proc \
+ ros-humble-tf2-ros \
+ ros-humble-tf2-geometry-msgs && \
rm -rf /var/lib/apt/lists/*
RUN python3 -m pip install --upgrade pip
@@ -64,12 +62,7 @@ RUN python3 -m pip install --no-cache-dir \
ccimport>=0.4.4 \
pybind11>=2.6.0 \
numpy \
- fire \
- cv_bridge \
- opencv-python
-
-# Install prebuilt spconv-cu120 (compatible with CUDA 12.2 due to minor version compatibility)
-RUN python3 -m pip install --no-cache-dir spconv-cu120
+ fire
# Dependency Cleanup
WORKDIR /
@@ -78,15 +71,17 @@ RUN apt-get -qq autoremove -y && apt-get -qq autoclean && apt-get -qq clean && \
################################ Build ################################
FROM dependencies AS build
+ARG AMENT_WS=/root/ament_ws
# Build ROS2 packages
WORKDIR ${AMENT_WS}
+COPY --from=source ${AMENT_WS}/src ./src
RUN . /opt/ros/$ROS_DISTRO/setup.sh && \
colcon build \
--cmake-args -DCMAKE_BUILD_TYPE=Release --install-base ${WATONOMOUS_INSTALL}
-# Source and Build Artifact Cleanup
-RUN rm -rf src/* build/* devel/* install/* log/*
+# Build Artifact Cleanup (keep src for development)
+RUN rm -rf build/* devel/* log/*
# Entrypoint will run before any CMD on launch. Sources ~/opt//setup.bash and ~/ament_ws/install/setup.bash
COPY docker/wato_ros_entrypoint.sh ${AMENT_WS}/wato_ros_entrypoint.sh
diff --git a/modules/docker-compose.behaviour.yaml b/modules/docker-compose.behaviour.yaml
index 1f2011b..8b8deb9 100644
--- a/modules/docker-compose.behaviour.yaml
+++ b/modules/docker-compose.behaviour.yaml
@@ -40,48 +40,31 @@ services:
- ${MONO_DIR}/autonomy/behaviour/voxel_grid:/root/ament_ws/src/voxel_grid
command: tail -F anything
- # octo_map:
- # build:
- # context: ..
- # dockerfile: docker/behaviour/octo_map/octo_map.Dockerfile
- # cache_from:
- # - "${BEHAVIOUR_OCTO_IMAGE:?}:${TAG}"
- # - "${BEHAVIOUR_OCTO_IMAGE:?}:main"
- # args:
- # BASE_IMAGE: ${BASE_IMAGE_OVERRIDE-}
- # image: "${BEHAVIOUR_OCTO_IMAGE:?}:${TAG}"
- # runtime: nvidia
- # environment:
- # - NVIDIA_VISIBLE_DEVICES=all
- # - NVIDIA_DRIVER_CAPABILITIES=compute,utility
- # profiles: [deploy]
- # working_dir: /root/ament_ws
- # deploy:
- # resources:
- # reservations:
- # devices:
- # - driver: nvidia
- # capabilities: [gpu]
- # volumes:
- # - ${MONO_DIR}/autonomy/behaviour/octo_map:/root/ament_ws/src/octo_map
- # command: tail -F anything
+ octomap:
+ build: &octomap_build
+ context: ..
+ dockerfile: docker/behaviour/octomap/octomap.Dockerfile
+ cache_from:
+ - "${BEHAVIOUR_OCTO_IMAGE:?}:${TAG}"
+ - "${BEHAVIOUR_OCTO_IMAGE:?}:main"
+ args:
+ BASE_IMAGE: ${BASE_IMAGE_OVERRIDE-}
+ image: "${BEHAVIOUR_OCTO_IMAGE:?}:${TAG}"
+ runtime: nvidia
+ profiles: [deploy]
+ working_dir: /root/ament_ws
+ deploy:
+ resources:
+ reservations:
+ devices:
+ - driver: nvidia
+ capabilities: [gpu]
+ command: tail -F anything
- # octo_map_dev:
- # build:
- # context: ..
- # dockerfile: docker/behaviour/octo_map/octo_map.Dockerfile
- # cache_from:
- # - "${BEHAVIOUR_OCTO_IMAGE:?}:dev_${TAG}"
- # - "${BEHAVIOUR_OCTO_IMAGE:?}:main"
- # args:
- # BASE_IMAGE: ${BASE_IMAGE_OVERRIDE-}
- # image: "${BEHAVIOUR_OCTO_IMAGE:?}:dev_${TAG}"
- # runtime: nvidia
- # environment:
- # - NVIDIA_VISIBLE_DEVICES=all
- # - NVIDIA_DRIVER_CAPABILITIES=compute,utility
- # profiles: [develop]
- # working_dir: /root/ament_ws
- # volumes:
- # - ${MONO_DIR}/autonomy/behaviour/octo_map:/root/ament_ws/src/octo_map
- # command: tail -F anything
+ octomap_dev:
+ build: *octomap_build
+ image: "${BEHAVIOUR_OCTO_IMAGE:?}:dev_${TAG}"
+ runtime: nvidia
+ profiles: [develop]
+ working_dir: /root/ament_ws
+ command: tail -F anything