From 1d59b3891d6d1d117a41194bf8986d44978bc316 Mon Sep 17 00:00:00 2001 From: Ivan Nikolic Date: Fri, 30 Jan 2026 01:07:14 +0800 Subject: [PATCH 01/36] feat: add RerunBridge module and CLI tool - Add RerunBridgeModule that subscribes to all LCM messages and logs those with to_rerun() methods to Rerun viewer - Add SubscribeAllCapable protocol to pubsub spec for type-safe subscribe_all support - Add rerun-bridge CLI tool with --viewer-mode native|web|none - Add "none" option to ViewerBackend for headless operation - Add unitree_go2_bridge blueprint variant --- dimos/core/global_config.py | 2 +- dimos/protocol/pubsub/spec.py | 16 +- dimos/robot/all_blueprints.py | 1 + .../unitree_webrtc/unitree_go2_blueprints.py | 4 + dimos/visualization/rerun/bridge.py | 143 ++++++++++++++++++ pyproject.toml | 1 + 6 files changed, 165 insertions(+), 2 deletions(-) create mode 100644 dimos/visualization/rerun/bridge.py diff --git a/dimos/core/global_config.py b/dimos/core/global_config.py index d83544344..3e97a0bbf 100644 --- a/dimos/core/global_config.py +++ b/dimos/core/global_config.py @@ -20,7 +20,7 @@ from dimos.mapping.occupancy.path_map import NavigationStrategy -ViewerBackend: TypeAlias = Literal["rerun-web", "rerun-native", "foxglove"] +ViewerBackend: TypeAlias = Literal["rerun-web", "rerun-native", "foxglove", "none"] def _get_all_numbers(s: str) -> list[float]: diff --git a/dimos/protocol/pubsub/spec.py b/dimos/protocol/pubsub/spec.py index 389439b43..12c1bbe97 100644 --- a/dimos/protocol/pubsub/spec.py +++ b/dimos/protocol/pubsub/spec.py @@ -17,10 +17,24 @@ from collections.abc import AsyncIterator, Callable from contextlib import asynccontextmanager from dataclasses import dataclass -from typing import Any, Generic, TypeVar +from typing import Any, Generic, Protocol, TypeVar, runtime_checkable MsgT = TypeVar("MsgT") TopicT = TypeVar("TopicT") +MsgT_co = TypeVar("MsgT_co", covariant=True) +TopicT_co = TypeVar("TopicT_co", covariant=True) + + +@runtime_checkable +class SubscribeAllCapable(Protocol[MsgT_co, TopicT_co]): + """Protocol for pubsubs that support subscribe_all. + + Both AllPubSub (native) and DiscoveryPubSub (synthesized) satisfy this. + """ + + def subscribe_all(self, callback: Callable[[Any, Any], Any]) -> Callable[[], None]: + """Subscribe to all topics.""" + ... class PubSubBaseMixin(Generic[TopicT, MsgT]): diff --git a/dimos/robot/all_blueprints.py b/dimos/robot/all_blueprints.py index ccd7ef185..a7f2e7dde 100644 --- a/dimos/robot/all_blueprints.py +++ b/dimos/robot/all_blueprints.py @@ -50,6 +50,7 @@ "unitree-go2-basic": "dimos.robot.unitree_webrtc.unitree_go2_blueprints:unitree_go2_basic", "unitree-go2-detection": "dimos.robot.unitree_webrtc.unitree_go2_blueprints:unitree_go2_detection", "unitree-go2-ros": "dimos.robot.unitree_webrtc.unitree_go2_blueprints:unitree_go2_ros", + "unitree-go2-bridge": "dimos.robot.unitree_webrtc.unitree_go2_blueprints:unitree_go2_bridge", "unitree-go2-spatial": "dimos.robot.unitree_webrtc.unitree_go2_blueprints:unitree_go2_spatial", "unitree-go2-temporal-memory": "dimos.robot.unitree_webrtc.unitree_go2_blueprints:unitree_go2_temporal_memory", "unitree-go2-vlm-stream-test": "dimos.robot.unitree_webrtc.unitree_go2_blueprints:unitree_go2_vlm_stream_test", diff --git a/dimos/robot/unitree_webrtc/unitree_go2_blueprints.py b/dimos/robot/unitree_webrtc/unitree_go2_blueprints.py index 6dda182c1..2da7c8a49 100644 --- a/dimos/robot/unitree_webrtc/unitree_go2_blueprints.py +++ b/dimos/robot/unitree_webrtc/unitree_go2_blueprints.py @@ -62,6 +62,7 @@ from dimos.robot.unitree.connection.go2 import GO2Connection, go2_connection from dimos.robot.unitree_webrtc.unitree_skill_container import unitree_skills from dimos.utils.monitoring import utilization +from dimos.visualization.rerun.bridge import rerun_bridge from dimos.web.websocket_vis.websocket_vis_module import websocket_vis _GO2_URDF = Path(_go2_mod.__file__).parent.parent / "go2" / "go2.urdf" @@ -107,6 +108,9 @@ wavefront_frontier_explorer(), ).global_config(n_dask_workers=6, robot_model="unitree_go2") +unitree_go2_bridge = autoconnect(unitree_go2, rerun_bridge()) + + unitree_go2_ros = unitree_go2.transports( { ("lidar", PointCloud2): ROSTransport("lidar", PointCloud2), diff --git a/dimos/visualization/rerun/bridge.py b/dimos/visualization/rerun/bridge.py new file mode 100644 index 000000000..cbdd2ce5a --- /dev/null +++ b/dimos/visualization/rerun/bridge.py @@ -0,0 +1,143 @@ +# Copyright 2025-2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Rerun bridge for logging pubsub messages with to_rerun() methods.""" + +from __future__ import annotations + +from dataclasses import dataclass, field +from typing import TYPE_CHECKING, Any, Literal + +import rerun as rr + +from dimos.core import Module, rpc +from dimos.core.module import ModuleConfig +from dimos.protocol.pubsub.impl.lcmpubsub import LCM + +if TYPE_CHECKING: + from collections.abc import Callable + + from dimos.protocol.pubsub.spec import SubscribeAllCapable + +ViewerMode = Literal["native", "web", "none"] + + +@dataclass +class Config(ModuleConfig): + """Configuration for RerunBridgeModule.""" + + pubsubs: list[SubscribeAllCapable[Any, Any]] = field( + default_factory=lambda: [LCM(autoconf=True)] + ) + entity_prefix: str = "world" + topic_to_entity: Callable[[Any], str] | None = None + viewer_mode: ViewerMode = "native" + + +class RerunBridgeModule(Module): + """Bridge that logs messages from pubsubs to Rerun. + + Spawns its own Rerun viewer and subscribes to all topics on each provided + pubsub. Any message that has a to_rerun() method is automatically logged. + + Example: + from dimos.protocol.pubsub.impl.lcmpubsub import LCM + + lcm = LCM(autoconf=True) + bridge = RerunBridgeModule(pubsubs=[lcm]) + bridge.start() + # All messages with to_rerun() are now logged to Rerun + bridge.stop() + """ + + default_config = Config + config: Config + + def _get_entity_path(self, topic: Any) -> str: + """Convert a topic to a Rerun entity path.""" + if self.config.topic_to_entity: + return self.config.topic_to_entity(topic) + + # Default: use topic.name if available (LCM Topic), else str + topic_str = getattr(topic, "name", None) or str(topic) + return f"{self.config.entity_prefix}/{topic_str}" + + def _on_message(self, msg: Any, topic: Any) -> None: + """Handle incoming message - log to rerun if it has to_rerun.""" + if not hasattr(msg, "to_rerun"): + return + + rerun_data = msg.to_rerun() + + # TFMessage returns list of (entity_path, transform) tuples + if isinstance(rerun_data, list): + for entity_path, archetype in rerun_data: + rr.log(entity_path, archetype) + else: + entity_path = self._get_entity_path(topic) + rr.log(entity_path, rerun_data) + + @rpc + def start(self) -> None: + from reactivex.disposable import Disposable + + super().start() + + # Initialize and spawn Rerun viewer + rr.init("dimos-bridge") + if self.config.viewer_mode == "native": + rr.spawn(connect=True) + elif self.config.viewer_mode == "web": + rr.serve_web_viewer(open_browser=True) + # "none" - just init, no viewer (connect externally) + + # Start pubsubs and subscribe to all messages + for pubsub in self.config.pubsubs: + if hasattr(pubsub, "start"): + pubsub.start() # type: ignore[union-attr] + unsub = pubsub.subscribe_all(self._on_message) + self._disposables.add(Disposable(unsub)) + + # Add pubsub stop as disposable + for pubsub in self.config.pubsubs: + if hasattr(pubsub, "stop"): + self._disposables.add(Disposable(pubsub.stop)) # type: ignore[union-attr] + + +rerun_bridge = RerunBridgeModule.blueprint + + +def main() -> None: + """CLI entry point for rerun-bridge.""" + import argparse + import signal + + parser = argparse.ArgumentParser(description="Rerun bridge for LCM messages") + parser.add_argument( + "--viewer-mode", + choices=["native", "web", "none"], + default="native", + help="Viewer mode: native (desktop), web (browser), none (headless)", + ) + args = parser.parse_args() + + bridge = RerunBridgeModule(viewer_mode=args.viewer_mode) + bridge.start() + + signal.signal(signal.SIGINT, lambda *_: bridge.stop()) + signal.pause() + + +if __name__ == "__main__": + main() diff --git a/pyproject.toml b/pyproject.toml index 00636e011..669439791 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -78,6 +78,7 @@ skillspy = "dimos.utils.cli.skillspy.skillspy:main" agentspy = "dimos.utils.cli.agentspy.agentspy:main" humancli = "dimos.utils.cli.human.humanclianim:main" dimos = "dimos.robot.cli.dimos:main" +rerun-bridge = "dimos.visualization.rerun.bridge:main" doclinks = "dimos.utils.docs.doclinks:main" [project.optional-dependencies] From 1d417697085ac65ee32af1f23f5409bf84bac78b Mon Sep 17 00:00:00 2001 From: Ivan Nikolic Date: Fri, 30 Jan 2026 13:23:52 +0800 Subject: [PATCH 02/36] rerun bridge blueprint --- dimos/robot/all_blueprints.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/dimos/robot/all_blueprints.py b/dimos/robot/all_blueprints.py index a7f2e7dde..15bbf3f10 100644 --- a/dimos/robot/all_blueprints.py +++ b/dimos/robot/all_blueprints.py @@ -48,9 +48,9 @@ "unitree-go2-agentic-mcp": "dimos.robot.unitree_webrtc.unitree_go2_blueprints:unitree_go2_agentic_mcp", "unitree-go2-agentic-ollama": "dimos.robot.unitree_webrtc.unitree_go2_blueprints:unitree_go2_agentic_ollama", "unitree-go2-basic": "dimos.robot.unitree_webrtc.unitree_go2_blueprints:unitree_go2_basic", + "unitree-go2-bridge": "dimos.robot.unitree_webrtc.unitree_go2_blueprints:unitree_go2_bridge", "unitree-go2-detection": "dimos.robot.unitree_webrtc.unitree_go2_blueprints:unitree_go2_detection", "unitree-go2-ros": "dimos.robot.unitree_webrtc.unitree_go2_blueprints:unitree_go2_ros", - "unitree-go2-bridge": "dimos.robot.unitree_webrtc.unitree_go2_blueprints:unitree_go2_bridge", "unitree-go2-spatial": "dimos.robot.unitree_webrtc.unitree_go2_blueprints:unitree_go2_spatial", "unitree-go2-temporal-memory": "dimos.robot.unitree_webrtc.unitree_go2_blueprints:unitree_go2_temporal_memory", "unitree-go2-vlm-stream-test": "dimos.robot.unitree_webrtc.unitree_go2_blueprints:unitree_go2_vlm_stream_test", @@ -87,6 +87,7 @@ "person_tracker_module": "dimos.perception.detection.person_tracker", "realsense_camera": "dimos.hardware.sensors.camera.realsense.camera", "replanning_a_star_planner": "dimos.navigation.replanning_a_star.module", + "rerun_bridge": "dimos.visualization.rerun.bridge", "rerun_scene_wiring": "dimos.dashboard.rerun_scene_wiring", "ros_nav": "dimos.navigation.rosnav", "spatial_memory": "dimos.perception.spatial_perception", From 543fbd30a18c7338932afc73e04fb6d0ad5d30ff Mon Sep 17 00:00:00 2001 From: Ivan Nikolic Date: Fri, 30 Jan 2026 13:29:31 +0800 Subject: [PATCH 03/36] adding stop method to bridge --- dimos/visualization/rerun/bridge.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/dimos/visualization/rerun/bridge.py b/dimos/visualization/rerun/bridge.py index cbdd2ce5a..f91013657 100644 --- a/dimos/visualization/rerun/bridge.py +++ b/dimos/visualization/rerun/bridge.py @@ -114,6 +114,10 @@ def start(self) -> None: if hasattr(pubsub, "stop"): self._disposables.add(Disposable(pubsub.stop)) # type: ignore[union-attr] + @rpc + def stop(self) -> None: + super().stop() + rerun_bridge = RerunBridgeModule.blueprint From 99fd5fca5d4895434624832c73028ef22fac55a2 Mon Sep 17 00:00:00 2001 From: Ivan Nikolic Date: Fri, 30 Jan 2026 17:52:51 +0800 Subject: [PATCH 04/36] working on rerun conversion to bridge --- dimos/core/blueprints.py | 62 ----------------- dimos/core/global_config.py | 4 +- dimos/dashboard/rerun_init.py | 12 ++-- dimos/mapping/costmapper.py | 46 ------------- dimos/mapping/voxels.py | 69 ------------------- dimos/msgs/geometry_msgs/Quaternion.py | 10 ++- dimos/msgs/geometry_msgs/Transform.py | 18 ++--- dimos/msgs/nav_msgs/OccupancyGrid.py | 2 +- dimos/msgs/sensor_msgs/PointCloud2.py | 4 +- dimos/msgs/tf2_msgs/TFMessage.py | 2 +- .../unitree_webrtc/unitree_go2_blueprints.py | 55 +++++++-------- dimos/visualization/rerun/bridge.py | 20 +++--- .../web/websocket_vis/websocket_vis_module.py | 4 +- docs/api/visualization.md | 8 +-- 14 files changed, 70 insertions(+), 246 deletions(-) diff --git a/dimos/core/blueprints.py b/dimos/core/blueprints.py index 83cc002f6..66aeaec58 100644 --- a/dimos/core/blueprints.py +++ b/dimos/core/blueprints.py @@ -23,9 +23,6 @@ from types import MappingProxyType from typing import Any, Literal, get_args, get_origin, get_type_hints -import rerun as rr -import rerun.blueprint as rrb - from dimos.core.global_config import GlobalConfig from dimos.core.module import Module from dimos.core.module_coordinator import ModuleCoordinator @@ -316,50 +313,6 @@ def _connect_rpc_methods(self, module_coordinator: ModuleCoordinator) -> None: requested_method_name, rpc_methods_dot[requested_method_name] ) - def _init_rerun_blueprint(self, module_coordinator: ModuleCoordinator) -> None: - """Compose and send Rerun blueprint from module contributions. - - Collects rerun_views() from all modules and composes them into a unified layout. - """ - # Collect view contributions from all modules - side_panels = [] - for blueprint in self.blueprints: - if hasattr(blueprint.module, "rerun_views"): - views = blueprint.module.rerun_views() - if views: - side_panels.extend(views) - - # Always include latency panel if we have any panels - if side_panels: - side_panels.append( - rrb.TimeSeriesView( - name="Latency (ms)", - origin="/metrics", - contents=[ - "+ /metrics/voxel_map/latency_ms", - "+ /metrics/costmap/latency_ms", - ], - ) - ) - - # Compose final layout - if side_panels: - composed_blueprint = rrb.Blueprint( - rrb.Horizontal( - rrb.Spatial3DView( - name="3D View", - origin="world", - background=[0, 0, 0], - ), - rrb.Vertical(*side_panels, row_shares=[2] + [1] * (len(side_panels) - 1)), - column_shares=[3, 1], - ), - rrb.TimePanel(state="collapsed"), - rrb.SelectionPanel(state="collapsed"), - rrb.BlueprintPanel(state="collapsed"), - ) - rr.send_blueprint(composed_blueprint) - def build( self, global_config: GlobalConfig | None = None, @@ -374,17 +327,6 @@ def build( self._check_requirements() self._verify_no_name_conflicts() - # Initialize Rerun server before deploying modules (if backend is Rerun) - if global_config.rerun_enabled and global_config.viewer_backend.startswith("rerun"): - try: - from dimos.dashboard.rerun_init import init_rerun_server - - server_addr = init_rerun_server(viewer_mode=global_config.viewer_backend) - global_config = global_config.model_copy(update={"rerun_server_addr": server_addr}) - logger.info("Rerun server initialized", addr=server_addr) - except Exception as e: - logger.warning(f"Failed to initialize Rerun server: {e}") - module_coordinator = ModuleCoordinator(global_config=global_config) module_coordinator.start() @@ -394,10 +336,6 @@ def build( module_coordinator.start_all_modules() - # Compose and send Rerun blueprint from module contributions - if global_config.viewer_backend.startswith("rerun"): - self._init_rerun_blueprint(module_coordinator) - return module_coordinator diff --git a/dimos/core/global_config.py b/dimos/core/global_config.py index 3e97a0bbf..7bcbf8a6e 100644 --- a/dimos/core/global_config.py +++ b/dimos/core/global_config.py @@ -20,7 +20,7 @@ from dimos.mapping.occupancy.path_map import NavigationStrategy -ViewerBackend: TypeAlias = Literal["rerun-web", "rerun-native", "foxglove", "none"] +ViewerBackend: TypeAlias = Literal["rerun", "rerun-web", "foxglove", "none"] def _get_all_numbers(s: str) -> list[float]: @@ -33,7 +33,7 @@ class GlobalConfig(BaseSettings): replay: bool = False rerun_enabled: bool = True rerun_server_addr: str | None = None - viewer_backend: ViewerBackend = "rerun-native" + viewer_backend: ViewerBackend = "rerun" n_dask_workers: int = 2 memory_limit: str = "auto" mujoco_camera_position: str | None = None diff --git a/dimos/dashboard/rerun_init.py b/dimos/dashboard/rerun_init.py index c71219dd3..fb0fd5f13 100644 --- a/dimos/dashboard/rerun_init.py +++ b/dimos/dashboard/rerun_init.py @@ -21,15 +21,15 @@ Viewer modes (set via VIEWER_BACKEND config or environment variable): - "rerun-web": Web viewer on port 9090 - - "rerun-native" (default): Native Rerun viewer (requires display) + - "rerun" (default): Native Rerun viewer (requires display) - "foxglove": Use Foxglove instead of Rerun Usage: # Set via environment: - VIEWER_BACKEND=rerun-web # or rerun-native or foxglove + VIEWER_BACKEND=rerun-web # or rerun or foxglove # Or via .env file: - viewer_backend=rerun-native + viewer_backend=rerun # In main process (blueprints.py handles this automatically): from dimos.dashboard.rerun_init import init_rerun_server @@ -71,7 +71,7 @@ def init_rerun_server(viewer_mode: str, memory_limit: str = "4GB") -> str: Should only be called once from the main process. Args: - viewer_mode: One of "rerun-web", "rerun-native", or "rerun-grpc-only" + viewer_mode: One of "rerun-web", "rerun", or "rerun-grpc-only" memory_limit: Maximum memory for Rerun viewer (e.g., "16GB", "25%"). Default 16GB. Returns: @@ -88,7 +88,7 @@ def init_rerun_server(viewer_mode: str, memory_limit: str = "4GB") -> str: rr.init("dimos") - if viewer_mode == "rerun-native": + if viewer_mode == "rerun": # Spawn native viewer (requires display) rr.spawn(port=RERUN_GRPC_PORT, connect=True, memory_limit=memory_limit) logger.info("Rerun: spawned native viewer", port=RERUN_GRPC_PORT, memory_limit=memory_limit) @@ -138,7 +138,7 @@ def connect_rerun( return # Skip if foxglove backend selected - if global_config and not global_config.viewer_backend.startswith("rerun"): + if global_config and global_config.viewer_backend not in ("rerun", "rerun-web"): logger.debug("Rerun connection skipped", viewer_backend=global_config.viewer_backend) return diff --git a/dimos/mapping/costmapper.py b/dimos/mapping/costmapper.py index 97c96f818..1cb028205 100644 --- a/dimos/mapping/costmapper.py +++ b/dimos/mapping/costmapper.py @@ -16,8 +16,6 @@ import time from reactivex import operators as ops -import rerun as rr -import rerun.blueprint as rrb from dimos.core import In, Module, Out, rpc from dimos.core.global_config import GlobalConfig @@ -48,21 +46,6 @@ class CostMapper(Module): global_map: In[PointCloud2] global_costmap: Out[OccupancyGrid] - @classmethod - def rerun_views(cls): # type: ignore[no-untyped-def] - """Return Rerun view blueprints for costmap visualization.""" - return [ - rrb.Spatial2DView( - name="Costmap", - origin="world/nav/costmap/image", - ), - rrb.TimeSeriesView( - name="Costmap (ms)", - origin="/metrics/costmap", - contents=["+ /metrics/costmap/calc_ms"], - ), - ] - def __init__(self, global_config: GlobalConfig | None = None, **kwargs: object) -> None: super().__init__(**kwargs) self._global_config = global_config or GlobalConfig() @@ -77,37 +60,8 @@ def start(self) -> None: logger.info("CostMapper: Rerun logging enabled (sync)") def _publish_costmap(grid: OccupancyGrid, calc_time_ms: float, rx_monotonic: float) -> None: - # Publish to downstream first. self.global_costmap.publish(grid) - # Synchronous Rerun logging (no queues/threads). - if self._global_config.viewer_backend.startswith("rerun"): - try: - # 2D image panel - rr.log( - "world/nav/costmap/image", - grid.to_rerun( - mode="image", - colormap="RdBu_r", - ), - ) - - # 3D floor overlay (mesh) - rr.log( - "world/nav/costmap/floor", - grid.to_rerun( - mode="mesh", - colormap=None, # grayscale / foxglove-style - z_offset=0.07, - ), - ) - - rr.log("metrics/costmap/calc_ms", rr.Scalars(calc_time_ms)) - latency_ms = (time.monotonic() - rx_monotonic) * 1000 - rr.log("metrics/costmap/latency_ms", rr.Scalars(latency_ms)) - except Exception as e: - logger.warning(f"Rerun logging error: {e}") - def _calculate_and_time( msg: PointCloud2, ) -> tuple[OccupancyGrid, float, float]: diff --git a/dimos/mapping/voxels.py b/dimos/mapping/voxels.py index 6570d9ba3..4147f9050 100644 --- a/dimos/mapping/voxels.py +++ b/dimos/mapping/voxels.py @@ -21,13 +21,10 @@ from reactivex import interval, operators as ops from reactivex.disposable import Disposable from reactivex.subject import Subject -import rerun as rr -import rerun.blueprint as rrb from dimos.core import In, Module, Out, rpc from dimos.core.global_config import GlobalConfig from dimos.core.module import ModuleConfig -from dimos.dashboard.rerun_init import connect_rerun from dimos.msgs.sensor_msgs import PointCloud2 from dimos.utils.decorators import simple_mcache from dimos.utils.logging_config import setup_logger @@ -54,26 +51,6 @@ class VoxelGridMapper(Module): lidar: In[PointCloud2] global_map: Out[PointCloud2] - @classmethod - def rerun_views(cls): # type: ignore[no-untyped-def] - """Return Rerun view blueprints for voxel map visualization.""" - return [ - rrb.TimeSeriesView( - name="Voxel Pipeline (ms)", - origin="/metrics/voxel_map", - contents=[ - "+ /metrics/voxel_map/extract_ms", - "+ /metrics/voxel_map/transport_ms", - "+ /metrics/voxel_map/publish_ms", - ], - ), - rrb.TimeSeriesView( - name="Voxel Count", - origin="/metrics/voxel_map", - contents=["+ /metrics/voxel_map/voxel_count"], - ), - ] - def __init__(self, global_config: GlobalConfig | None = None, **kwargs: object) -> None: super().__init__(**kwargs) self._global_config = global_config or GlobalConfig() @@ -100,18 +77,11 @@ def __init__(self, global_config: GlobalConfig | None = None, **kwargs: object) self._voxel_hashmap = self.vbg.hashmap() self._key_dtype = self._voxel_hashmap.key_tensor().dtype self._latest_frame_ts: float = 0.0 - # Monotonic timestamp of last received frame (for accurate latency in replay) - self._latest_frame_rx_monotonic: float | None = None @rpc def start(self) -> None: super().start() - # Only start Rerun logging if Rerun backend is selected - if self._global_config.viewer_backend.startswith("rerun"): - connect_rerun(global_config=self._global_config) - logger.info("VoxelGridMapper: Rerun logging enabled (sync)") - # Subject to trigger publishing, with backpressure to drop if busy self._publish_trigger: Subject[None] = Subject() self._disposables.add( @@ -136,52 +106,13 @@ def stop(self) -> None: super().stop() def _on_frame(self, frame: PointCloud2) -> None: - # Track receipt time with monotonic clock (works correctly in replay) - self._latest_frame_rx_monotonic = time.monotonic() self.add_frame(frame) if self.config.publish_interval == 0: self._publish_trigger.on_next(None) def publish_global_map(self) -> None: - # Snapshot monotonic timestamp once (won't be overwritten during slow publish) - rx_monotonic = self._latest_frame_rx_monotonic - - start_total = time.perf_counter() - - # 1. Extract pointcloud from GPU hashmap - t1 = time.perf_counter() pc = self.get_global_pointcloud2() - extract_ms = (time.perf_counter() - t1) * 1000 - - # 2. Publish to downstream (NO auto-logging - fast!) - t2 = time.perf_counter() self.global_map.publish(pc) - publish_ms = (time.perf_counter() - t2) * 1000 - # 3. Synchronous Rerun logging (no queues/threads). - if self._global_config.viewer_backend.startswith("rerun"): - try: - rr.log( - "world/map", - pc.to_rerun( - mode="boxes", - size=self.config.voxel_size, - colormap="turbo", - ), - ) - except Exception as e: - logger.warning(f"Rerun logging error: {e}") - - # Log detailed timing breakdown to Rerun - total_ms = (time.perf_counter() - start_total) * 1000 - rr.log("metrics/voxel_map/publish_ms", rr.Scalars(total_ms)) - rr.log("metrics/voxel_map/extract_ms", rr.Scalars(extract_ms)) - rr.log("metrics/voxel_map/transport_ms", rr.Scalars(publish_ms)) - rr.log("metrics/voxel_map/voxel_count", rr.Scalars(float(len(pc)))) - - # Log pipeline latency (time from frame receipt to publish complete) - if rx_monotonic is not None: - latency_ms = (time.monotonic() - rx_monotonic) * 1000 - rr.log("metrics/voxel_map/latency_ms", rr.Scalars(latency_ms)) def size(self) -> int: return self._voxel_hashmap.size() # type: ignore[no-any-return] diff --git a/dimos/msgs/geometry_msgs/Quaternion.py b/dimos/msgs/geometry_msgs/Quaternion.py index 02c9592ea..8e83a32e5 100644 --- a/dimos/msgs/geometry_msgs/Quaternion.py +++ b/dimos/msgs/geometry_msgs/Quaternion.py @@ -17,7 +17,10 @@ from collections.abc import Sequence from io import BytesIO import struct -from typing import BinaryIO, TypeAlias +from typing import TYPE_CHECKING, BinaryIO, TypeAlias + +if TYPE_CHECKING: + import rerun as rr from dimos_lcm.geometry_msgs import Quaternion as LCMQuaternion import numpy as np @@ -112,6 +115,11 @@ def to_radians(self) -> Vector3: """Radians representation of the quaternion (x, y, z, w).""" return self.to_euler() + def to_rerun(self) -> rr.Quaternion: + import rerun as rr + + return rr.Quaternion(xyzw=[self.x, self.y, self.z, self.w]) + @classmethod def from_euler(cls, vector: Vector3) -> Quaternion: """Convert Euler angles (roll, pitch, yaw) in radians to quaternion. diff --git a/dimos/msgs/geometry_msgs/Transform.py b/dimos/msgs/geometry_msgs/Transform.py index 3a52f5a8c..d1a0bc74b 100644 --- a/dimos/msgs/geometry_msgs/Transform.py +++ b/dimos/msgs/geometry_msgs/Transform.py @@ -15,7 +15,10 @@ from __future__ import annotations import time -from typing import BinaryIO +from typing import TYPE_CHECKING, BinaryIO + +if TYPE_CHECKING: + import rerun as rr from dimos_lcm.geometry_msgs import ( Transform as LCMTransform, @@ -34,7 +37,6 @@ ROSTransform = None # type: ignore[assignment, misc] ROSVector3 = None # type: ignore[assignment, misc] ROSQuaternion = None # type: ignore[assignment, misc] -import rerun as rr from dimos.msgs.geometry_msgs.Quaternion import Quaternion from dimos.msgs.geometry_msgs.Vector3 import Vector3 @@ -361,17 +363,17 @@ def lcm_decode(cls, data: bytes | BinaryIO) -> Transform: ts=ts, ) - def to_rerun(self): # type: ignore[no-untyped-def] + def to_rerun(self) -> rr.Transform3D: """Convert to rerun Transform3D format with frame IDs. Returns: rr.Transform3D archetype for logging to rerun with parent/child frames """ + import rerun as rr + return rr.Transform3D( translation=[self.translation.x, self.translation.y, self.translation.z], - rotation=rr.Quaternion( - xyzw=[self.rotation.x, self.rotation.y, self.rotation.z, self.rotation.w] - ), - parent_frame=self.frame_id, # type: ignore[call-arg] - child_frame=self.child_frame_id, # type: ignore[call-arg] + rotation=self.rotation.to_rerun(), + parent_frame="tf#/" + self.frame_id, + child_frame="tf#/" + self.child_frame_id, ) diff --git a/dimos/msgs/nav_msgs/OccupancyGrid.py b/dimos/msgs/nav_msgs/OccupancyGrid.py index 3876b44fa..15cba81d8 100644 --- a/dimos/msgs/nav_msgs/OccupancyGrid.py +++ b/dimos/msgs/nav_msgs/OccupancyGrid.py @@ -430,7 +430,7 @@ def cell_value(self, world_position: Vector3) -> int: def to_rerun( # type: ignore[no-untyped-def] self, colormap: str | None = None, - mode: str = "image", + mode: str = "mesh", z_offset: float = 0.01, **kwargs: Any, ): # type: ignore[no-untyped-def] diff --git a/dimos/msgs/sensor_msgs/PointCloud2.py b/dimos/msgs/sensor_msgs/PointCloud2.py index d68c62f51..301758e13 100644 --- a/dimos/msgs/sensor_msgs/PointCloud2.py +++ b/dimos/msgs/sensor_msgs/PointCloud2.py @@ -620,8 +620,8 @@ def __len__(self) -> int: def to_rerun( # type: ignore[no-untyped-def] self, - radii: float = 0.02, - colormap: str | None = None, + radii: float = 0.025, + colormap: str | None = "turbo", colors: list[int] | None = None, mode: str = "boxes", size: float | None = None, diff --git a/dimos/msgs/tf2_msgs/TFMessage.py b/dimos/msgs/tf2_msgs/TFMessage.py index 54eaaf921..c4a160dc5 100644 --- a/dimos/msgs/tf2_msgs/TFMessage.py +++ b/dimos/msgs/tf2_msgs/TFMessage.py @@ -178,6 +178,6 @@ def to_rerun(self): # type: ignore[no-untyped-def] """ results = [] for transform in self.transforms: - entity_path = f"world/tf/{transform.child_frame_id}" + entity_path = f"world/{transform.child_frame_id}" results.append((entity_path, transform.to_rerun())) # type: ignore[no-untyped-call] return results diff --git a/dimos/robot/unitree_webrtc/unitree_go2_blueprints.py b/dimos/robot/unitree_webrtc/unitree_go2_blueprints.py index 2da7c8a49..c1b8309b2 100644 --- a/dimos/robot/unitree_webrtc/unitree_go2_blueprints.py +++ b/dimos/robot/unitree_webrtc/unitree_go2_blueprints.py @@ -14,7 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from pathlib import Path import platform from dimos_lcm.foxglove_msgs.ImageAnnotations import ( @@ -34,6 +33,7 @@ from dimos.agents.vlm_stream_tester import vlm_stream_tester from dimos.constants import DEFAULT_CAPACITY_COLOR_IMAGE from dimos.core.blueprints import autoconnect +from dimos.core.global_config import GlobalConfig from dimos.core.transport import ( JpegLcmTransport, JpegShmTransport, @@ -41,7 +41,6 @@ ROSTransport, pSHMTransport, ) -from dimos.dashboard.tf_rerun_module import tf_rerun from dimos.mapping.costmapper import cost_mapper from dimos.mapping.voxels import voxel_mapper from dimos.msgs.geometry_msgs import PoseStamped @@ -58,58 +57,50 @@ from dimos.perception.spatial_perception import spatial_memory from dimos.protocol.mcp.mcp import MCPModule from dimos.robot.foxglove_bridge import foxglove_bridge -import dimos.robot.unitree.connection.go2 as _go2_mod from dimos.robot.unitree.connection.go2 import GO2Connection, go2_connection from dimos.robot.unitree_webrtc.unitree_skill_container import unitree_skills from dimos.utils.monitoring import utilization from dimos.visualization.rerun.bridge import rerun_bridge from dimos.web.websocket_vis.websocket_vis_module import websocket_vis -_GO2_URDF = Path(_go2_mod.__file__).parent.parent / "go2" / "go2.urdf" +_config = GlobalConfig() -# Mac has some issue with high bandwidth UDP -# -# so we use pSHMTransport for color_image -# (Could we adress this on the system config layer? Is this fixable on mac?) -_mac = autoconnect( - foxglove_bridge( - shm_channels=[ - "/color_image#sensor_msgs.Image", - ] +# Mac has some issue with high bandwidth UDP, so we use pSHMTransport for color_image +_mac_transports: dict[tuple[str, type], pSHMTransport[Image]] = { + ("color_image", Image): pSHMTransport( + "color_image", default_capacity=DEFAULT_CAPACITY_COLOR_IMAGE ), -).transports( - { - ("color_image", Image): pSHMTransport( - "color_image", default_capacity=DEFAULT_CAPACITY_COLOR_IMAGE - ), - } -) - - -_linux = autoconnect(foxglove_bridge()) +} + +if _config.viewer_backend == "foxglove": + _mac = autoconnect( + foxglove_bridge(shm_channels=["/color_image#sensor_msgs.Image"]), + ).transports(_mac_transports) + _linux = autoconnect(foxglove_bridge()) +elif _config.viewer_backend == "rerun": + _mac = autoconnect(rerun_bridge(viewer_mode="native")).transports(_mac_transports) + _linux = autoconnect(rerun_bridge(viewer_mode="native")) +elif _config.viewer_backend == "rerun-web": + _mac = autoconnect(rerun_bridge(viewer_mode="web")).transports(_mac_transports) + _linux = autoconnect(rerun_bridge(viewer_mode="web")) +else: # "none" + _mac = autoconnect().transports(_mac_transports) + _linux = autoconnect() unitree_go2_basic = autoconnect( go2_connection(), _linux if platform.system() == "Linux" else _mac, websocket_vis(), - tf_rerun( - urdf_path=str(_GO2_URDF), - cameras=[ - ("world/robot/camera", "camera_optical", GO2Connection.camera_info_static), - ], - ), ).global_config(n_dask_workers=4, robot_model="unitree_go2") unitree_go2 = autoconnect( unitree_go2_basic, - voxel_mapper(voxel_size=0.1), + voxel_mapper(voxel_size=0.05), cost_mapper(), replanning_a_star_planner(), wavefront_frontier_explorer(), ).global_config(n_dask_workers=6, robot_model="unitree_go2") -unitree_go2_bridge = autoconnect(unitree_go2, rerun_bridge()) - unitree_go2_ros = unitree_go2.transports( { diff --git a/dimos/visualization/rerun/bridge.py b/dimos/visualization/rerun/bridge.py index f91013657..bae198d6d 100644 --- a/dimos/visualization/rerun/bridge.py +++ b/dimos/visualization/rerun/bridge.py @@ -48,17 +48,17 @@ class Config(ModuleConfig): class RerunBridgeModule(Module): """Bridge that logs messages from pubsubs to Rerun. - Spawns its own Rerun viewer and subscribes to all topics on each provided - pubsub. Any message that has a to_rerun() method is automatically logged. + Spawns its own Rerun viewer and subscribes to all topics on each provided + pubsub. Any message that has a to_rerun() method is automatically logged. - Example: - from dimos.protocol.pubsub.impl.lcmpubsub import LCM + Example: + from dimos.protocol.pubsub.impl.lcmpubsub import LCM - lcm = LCM(autoconf=True) - bridge = RerunBridgeModule(pubsubs=[lcm]) - bridge.start() - # All messages with to_rerun() are now logged to Rerun - bridge.stop() + lcm = LCM(autoconf=True) + bridge = RerunBridgeModule(pubsubs=[lcm]) + f bridge.start() + # All messages with to_rerun() are now logged to Rerun + bridge.stop() """ default_config = Config @@ -74,7 +74,7 @@ def _get_entity_path(self, topic: Any) -> str: return f"{self.config.entity_prefix}/{topic_str}" def _on_message(self, msg: Any, topic: Any) -> None: - """Handle incoming message - log to rerun if it has to_rerun.""" + """Handle incoming message - -4=----log to rerun if it has to_rerun.""" if not hasattr(msg, "to_rerun"): return diff --git a/dimos/web/websocket_vis/websocket_vis_module.py b/dimos/web/websocket_vis/websocket_vis_module.py index e02ee3491..e861fd6c3 100644 --- a/dimos/web/websocket_vis/websocket_vis_module.py +++ b/dimos/web/websocket_vis/websocket_vis_module.py @@ -154,7 +154,7 @@ def start(self) -> None: self._uvicorn_server_thread.start() # Auto-open browser only for rerun-web (dashboard with Rerun iframe + command center) - # For rerun-native and foxglove, users access the command center manually if needed + # For rerun and foxglove, users access the command center manually if needed if self._global_config.viewer_backend == "rerun-web": url = f"http://localhost:{self.port}/" logger.info(f"Dimensional Command Center: {url}") @@ -228,7 +228,7 @@ def _create_server(self) -> None: async def serve_index(request): # type: ignore[no-untyped-def] """Serve appropriate HTML based on viewer mode.""" # If running native Rerun, redirect to standalone command center - if self._global_config.viewer_backend == "rerun-native": + if self._global_config.viewer_backend == "rerun": return RedirectResponse(url="/command-center") # Otherwise serve full dashboard with Rerun iframe return FileResponse(_DASHBOARD_HTML, media_type="text/html") diff --git a/docs/api/visualization.md b/docs/api/visualization.md index 08919b7a7..1b946b19f 100644 --- a/docs/api/visualization.md +++ b/docs/api/visualization.md @@ -11,7 +11,7 @@ Choose your viewer backend via the CLI (preferred): dimos run unitree-go2 # Explicitly select the viewer backend: -dimos --viewer-backend rerun-native run unitree-go2 +dimos --viewer-backend rerun run unitree-go2 dimos --viewer-backend rerun-web run unitree-go2 dimos --viewer-backend foxglove run unitree-go2 ``` @@ -19,7 +19,7 @@ dimos --viewer-backend foxglove run unitree-go2 Alternative (environment variable): ```bash -VIEWER_BACKEND=rerun-native dimos run unitree-go2 +VIEWER_BACKEND=rerun dimos run unitree-go2 # Rerun web viewer - Full dashboard in browser VIEWER_BACKEND=rerun-web dimos run unitree-go2 @@ -39,7 +39,7 @@ VIEWER_BACKEND=foxglove dimos run unitree-go2 --- -### Rerun Native (`rerun-native`) +### Rerun Native (`rerun`) **What you get:** - Native Rerun application (separate window opens automatically) @@ -182,7 +182,7 @@ This appendix is an **inventory of every current Rerun touchpoint** in the repos - **`GlobalConfig` flags** - **File**: [`dimos/core/global_config.py`](/dimos/core/global_config.py) - - **What**: Defines `rerun_enabled`, `viewer_backend` (`rerun-web`, `rerun-native`, `foxglove`), and `rerun_server_addr`. + - **What**: Defines `rerun_enabled`, `viewer_backend` (`rerun-web`, `rerun`, `foxglove`), and `rerun_server_addr`. - **Rerun process lifecycle** - **File**: [`dimos/dashboard/rerun_init.py`](/dimos/dashboard/rerun_init.py) From a12a955d737f4a4cdf182cb26c9258080ac3f95e Mon Sep 17 00:00:00 2001 From: Ivan Nikolic Date: Fri, 30 Jan 2026 18:06:41 +0800 Subject: [PATCH 05/36] small cleanup --- .../unitree_webrtc/unitree_go2_blueprints.py | 7 ++++++- dimos/visualization/rerun/bridge.py | 20 ++++++++++--------- 2 files changed, 17 insertions(+), 10 deletions(-) diff --git a/dimos/robot/unitree_webrtc/unitree_go2_blueprints.py b/dimos/robot/unitree_webrtc/unitree_go2_blueprints.py index c1b8309b2..d830b06e4 100644 --- a/dimos/robot/unitree_webrtc/unitree_go2_blueprints.py +++ b/dimos/robot/unitree_webrtc/unitree_go2_blueprints.py @@ -65,6 +65,10 @@ _config = GlobalConfig() + +# This stuff (also for viewer) needs to be in some shared base blueprints file +# I just want to start from Base here in go2 + # Mac has some issue with high bandwidth UDP, so we use pSHMTransport for color_image _mac_transports: dict[tuple[str, type], pSHMTransport[Image]] = { ("color_image", Image): pSHMTransport( @@ -72,6 +76,7 @@ ), } + if _config.viewer_backend == "foxglove": _mac = autoconnect( foxglove_bridge(shm_channels=["/color_image#sensor_msgs.Image"]), @@ -95,7 +100,7 @@ unitree_go2 = autoconnect( unitree_go2_basic, - voxel_mapper(voxel_size=0.05), + voxel_mapper(voxel_size=0.1), cost_mapper(), replanning_a_star_planner(), wavefront_frontier_explorer(), diff --git a/dimos/visualization/rerun/bridge.py b/dimos/visualization/rerun/bridge.py index bae198d6d..51c4ff563 100644 --- a/dimos/visualization/rerun/bridge.py +++ b/dimos/visualization/rerun/bridge.py @@ -48,17 +48,17 @@ class Config(ModuleConfig): class RerunBridgeModule(Module): """Bridge that logs messages from pubsubs to Rerun. - Spawns its own Rerun viewer and subscribes to all topics on each provided - pubsub. Any message that has a to_rerun() method is automatically logged. + Spawns its own Rerun viewer and subscribes to all topics on each provided + pubsub. Any message that has a to_rerun() method is automatically logged. - Example: - from dimos.protocol.pubsub.impl.lcmpubsub import LCM + Example: + from dimos.protocol.pubsub.impl.lcmpubsub import LCM - lcm = LCM(autoconf=True) - bridge = RerunBridgeModule(pubsubs=[lcm]) - f bridge.start() - # All messages with to_rerun() are now logged to Rerun - bridge.stop() + lcm = LCM(autoconf=True) + bridge = RerunBridgeModule(pubsubs=[lcm]) + bridge.start() + # All messages with to_rerun() are now logged to Rerun + bridge.stop() """ default_config = Config @@ -70,7 +70,9 @@ def _get_entity_path(self, topic: Any) -> str: return self.config.topic_to_entity(topic) # Default: use topic.name if available (LCM Topic), else str + # Strip everything after # (LCM topic suffix) topic_str = getattr(topic, "name", None) or str(topic) + topic_str = topic_str.split("#")[0] return f"{self.config.entity_prefix}/{topic_str}" def _on_message(self, msg: Any, topic: Any) -> None: From c001f37f2dc4fead1a2c7b4f77afeb1fa9475b83 Mon Sep 17 00:00:00 2001 From: Ivan Nikolic Date: Fri, 30 Jan 2026 18:17:55 +0800 Subject: [PATCH 06/36] supress treid warning --- dimos/models/embedding/treid.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/dimos/models/embedding/treid.py b/dimos/models/embedding/treid.py index 1294df817..23da1effe 100644 --- a/dimos/models/embedding/treid.py +++ b/dimos/models/embedding/treid.py @@ -12,11 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. +import warnings from dataclasses import dataclass from functools import cached_property import torch import torch.nn.functional as functional + +warnings.filterwarnings("ignore", message="Cython evaluation.*unavailable", category=UserWarning) from torchreid import utils as torchreid_utils from dimos.models.base import LocalModel From 4886723f021eab5123385803706d415748e61841 Mon Sep 17 00:00:00 2001 From: Ivan Nikolic Date: Fri, 30 Jan 2026 19:35:04 +0800 Subject: [PATCH 07/36] occupancygrid meshing rewrite --- dimos/msgs/nav_msgs/OccupancyGrid.py | 213 ++++++++++++--------------- dimos/visualization/rerun/bridge.py | 14 +- 2 files changed, 107 insertions(+), 120 deletions(-) diff --git a/dimos/msgs/nav_msgs/OccupancyGrid.py b/dimos/msgs/nav_msgs/OccupancyGrid.py index 15cba81d8..c36d3f7a1 100644 --- a/dimos/msgs/nav_msgs/OccupancyGrid.py +++ b/dimos/msgs/nav_msgs/OccupancyGrid.py @@ -469,56 +469,83 @@ def to_rerun( # type: ignore[no-untyped-def] else: return self._to_rerun_image(colormap) - def _to_rerun_image(self, colormap: str | None = None): # type: ignore[no-untyped-def] - """Convert to 2D image visualization.""" - # Use existing cached visualization functions for supported palettes - if colormap in ("turbo", "rainbow"): - from dimos.mapping.occupancy.visualizations import rainbow_image, turbo_image + def _generate_rgba_texture(self, colormap: str | None = None) -> NDArray[np.uint8]: + """Generate RGBA texture for the occupancy grid. - if colormap == "turbo": - bgr_image = turbo_image(self.grid) - else: - bgr_image = rainbow_image(self.grid) - - # Convert BGR to RGB and flip for world coordinates - rgb_image = np.flipud(bgr_image[:, :, ::-1]) - return rr.Image(rgb_image, color_model="RGB") + Args: + colormap: Optional matplotlib colormap name. + Returns: + RGBA numpy array of shape (height, width, 4). + Note: NOT flipped - caller handles orientation. + """ if colormap is not None: - # Use matplotlib colormap (cached for performance) cmap = _get_matplotlib_cmap(colormap) - grid_float = self.grid.astype(np.float32) - # Create RGBA image vis = np.zeros((self.height, self.width, 4), dtype=np.uint8) - # Free space: low cost (blue in RdBu_r) free_mask = self.grid == 0 - # Occupied: high cost (red in RdBu_r) occupied_mask = self.grid > 0 - # Unknown: transparent gray - unknown_mask = self.grid == -1 - # Map free to 0, costs to normalized value if np.any(free_mask): - colors_free = (cmap(0.0)[:3] * np.array([255, 255, 255])).astype(np.uint8) + colors_free = (np.array(cmap(0.0)[:3]) * 255).astype(np.uint8) vis[free_mask, :3] = colors_free vis[free_mask, 3] = 255 if np.any(occupied_mask): - # Normalize costs 1-100 to 0.5-1.0 range costs = grid_float[occupied_mask] cost_norm = 0.5 + (costs / 100) * 0.5 colors_occ = (cmap(cost_norm)[:, :3] * 255).astype(np.uint8) vis[occupied_mask, :3] = colors_occ vis[occupied_mask, 3] = 255 - if np.any(unknown_mask): - vis[unknown_mask] = [128, 128, 128, 100] # Semi-transparent gray + # Unknown cells: fully transparent + # (vis is already zero-initialized, so alpha=0 by default) + + return vis + + # Default: Foxglove-style coloring + vis = np.zeros((self.height, self.width, 4), dtype=np.uint8) + + free_mask = self.grid == 0 + occupied_mask = self.grid > 0 + + # Free space: blue-purple #484981 + vis[free_mask] = [72, 73, 129, 180] + + # Occupied: gradient from blue-purple to black + if np.any(occupied_mask): + costs = self.grid[occupied_mask].astype(np.float32) + factor = (1 - costs / 100).clip(0, 1) + vis[occupied_mask, 0] = (72 * factor).astype(np.uint8) + vis[occupied_mask, 1] = (73 * factor).astype(np.uint8) + vis[occupied_mask, 2] = (129 * factor).astype(np.uint8) + vis[occupied_mask, 3] = 220 - # Flip vertically to match world coordinates (y=0 at bottom) - return rr.Image(np.flipud(vis), color_model="RGBA") + # Unknown cells: fully transparent (already zero from initialization) + + return vis + + def _to_rerun_image(self, colormap: str | None = None): # type: ignore[no-untyped-def] + """Convert to 2D image visualization.""" + # Use existing cached visualization functions for supported palettes + if colormap in ("turbo", "rainbow"): + from dimos.mapping.occupancy.visualizations import rainbow_image, turbo_image + + if colormap == "turbo": + bgr_image = turbo_image(self.grid) + else: + bgr_image = rainbow_image(self.grid) + + # Convert BGR to RGB and flip for world coordinates + rgb_image = np.flipud(bgr_image[:, :, ::-1]) + return rr.Image(rgb_image, color_model="RGB") + + if colormap is not None: + # Use helper and flip for image display + rgba = self._generate_rgba_texture(colormap) + return rr.Image(np.flipud(rgba), color_model="RGBA") # Grayscale visualization (no colormap) vis_gray = np.zeros((self.height, self.width), dtype=np.uint8) @@ -579,104 +606,54 @@ def _to_rerun_points(self, colormap: str | None = None, z_offset: float = 0.01): ) def _to_rerun_mesh(self, colormap: str | None = None, z_offset: float = 0.01): # type: ignore[no-untyped-def] - """Convert to 3D mesh overlay on floor plane. + """Convert to 3D textured mesh overlay on floor plane. - Only renders known cells (free or occupied), skipping unknown cells. - Uses per-vertex colors for proper alpha blending. - Fully vectorized for performance (~100x faster than loop version). + Uses a single quad with the occupancy grid as a texture. + Much more efficient than per-cell quads (4 vertices vs n_cells*4). """ - # Only render known cells (not unknown = -1) - known_mask = self.grid != -1 - if not np.any(known_mask): + if self.grid.size == 0: return rr.Mesh3D(vertex_positions=[]) - # Get grid coordinates of known cells - gy, gx = np.where(known_mask) - n_cells = len(gy) + # Generate RGBA texture and flip to match world coordinates + # Grid row 0 is at world y=origin (bottom), but texture row 0 is at UV v=0 (top) + rgba = np.flipud(self._generate_rgba_texture(colormap)) + # Single quad covering entire grid ox = self.origin.position.x oy = self.origin.position.y - r = self.resolution - - # === VECTORIZED VERTEX GENERATION === - # World positions of cell corners (bottom-left of each cell) - wx = ox + gx.astype(np.float32) * r - wy = oy + gy.astype(np.float32) * r - - # Each cell has 4 vertices: (wx,wy), (wx+r,wy), (wx+r,wy+r), (wx,wy+r) - # Shape: (n_cells, 4, 3) - vertices = np.zeros((n_cells, 4, 3), dtype=np.float32) - vertices[:, 0, 0] = wx - vertices[:, 0, 1] = wy - vertices[:, 0, 2] = z_offset - vertices[:, 1, 0] = wx + r - vertices[:, 1, 1] = wy - vertices[:, 1, 2] = z_offset - vertices[:, 2, 0] = wx + r - vertices[:, 2, 1] = wy + r - vertices[:, 2, 2] = z_offset - vertices[:, 3, 0] = wx - vertices[:, 3, 1] = wy + r - vertices[:, 3, 2] = z_offset - # Flatten to (n_cells*4, 3) - flat_vertices = vertices.reshape(-1, 3) - - # === VECTORIZED INDEX GENERATION === - # Base vertex indices for each cell: [0, 4, 8, 12, ...] - base_v = np.arange(n_cells, dtype=np.uint32) * 4 - # Two triangles per cell: (0,1,2) and (0,2,3) relative to base - indices = np.zeros((n_cells, 2, 3), dtype=np.uint32) - indices[:, 0, 0] = base_v - indices[:, 0, 1] = base_v + 1 - indices[:, 0, 2] = base_v + 2 - indices[:, 1, 0] = base_v - indices[:, 1, 1] = base_v + 2 - indices[:, 1, 2] = base_v + 3 - # Flatten to (n_cells*2, 3) - flat_indices = indices.reshape(-1, 3) - - # === VECTORIZED COLOR GENERATION === - cell_values = self.grid[gy, gx] # Get all cell values at once - - if colormap: - cmap = _get_matplotlib_cmap(colormap) - # Normalize costs: free(0) -> 0.0, cost(1-100) -> 0.5-1.0 - cost_norm = np.where(cell_values == 0, 0.0, 0.5 + (cell_values / 100) * 0.5) - # Sample colormap for all cells at once (returns Nx4 RGBA float) - rgba_float = cmap(cost_norm)[:, :3] # Drop alpha, we set our own - rgb = (rgba_float * 255).astype(np.uint8) - # Alpha: 180 for free, 220 for occupied - alpha = np.where(cell_values == 0, 180, 220).astype(np.uint8) - else: - # Foxglove-style coloring: blue-purple for free, black for occupied - # Free (0): #484981 = RGB(72, 73, 129) - # Occupied (100): #000000 = RGB(0, 0, 0) - rgb = np.zeros((n_cells, 3), dtype=np.uint8) - is_free = cell_values == 0 - is_occupied = ~is_free - - # Free space: blue-purple #484981 - rgb[is_free] = [72, 73, 129] - - # Occupied: gradient from blue-purple to black based on cost - # cost 1 -> mostly blue-purple, cost 100 -> black - if np.any(is_occupied): - costs = cell_values[is_occupied].astype(np.float32) - # Linear interpolation: (1 - cost/100) * blue-purple - factor = (1 - costs / 100).clip(0, 1) - rgb[is_occupied, 0] = (72 * factor).astype(np.uint8) - rgb[is_occupied, 1] = (73 * factor).astype(np.uint8) - rgb[is_occupied, 2] = (129 * factor).astype(np.uint8) - - alpha = np.where(is_free, 180, 220).astype(np.uint8) - - # Combine RGB and alpha into RGBA - colors_per_cell = np.column_stack([rgb, alpha]) # (n_cells, 4) - # Repeat each color 4 times (one per vertex) - colors = np.repeat(colors_per_cell, 4, axis=0) # (n_cells*4, 4) + w = self.width * self.resolution + h = self.height * self.resolution + + vertices = np.array( + [ + [ox, oy, z_offset], # 0: bottom-left (world) + [ox + w, oy, z_offset], # 1: bottom-right + [ox + w, oy + h, z_offset], # 2: top-right + [ox, oy + h, z_offset], # 3: top-left + ], + dtype=np.float32, + ) + + indices = np.array([[0, 1, 2], [0, 2, 3]], dtype=np.uint32) + + # UV coords: Rerun uses top-left origin for textures + # Grid row 0 is at world y=oy (bottom), row H-1 at y=oy+h (top) + # Texture row 0 = grid row 0, so: + # world bottom (v0,v1) -> texture v=1 (bottom of texture) + # world top (v2,v3) -> texture v=0 (top of texture) + texcoords = np.array( + [ + [0.0, 1.0], # v0: bottom-left world -> bottom-left tex + [1.0, 1.0], # v1: bottom-right world -> bottom-right tex + [1.0, 0.0], # v2: top-right world -> top-right tex + [0.0, 0.0], # v3: top-left world -> top-left tex + ], + dtype=np.float32, + ) return rr.Mesh3D( - vertex_positions=flat_vertices, - triangle_indices=flat_indices, - vertex_colors=colors, + vertex_positions=vertices, + triangle_indices=indices, + vertex_texcoords=texcoords, + albedo_texture=rgba, ) diff --git a/dimos/visualization/rerun/bridge.py b/dimos/visualization/rerun/bridge.py index 51c4ff563..924d8518a 100644 --- a/dimos/visualization/rerun/bridge.py +++ b/dimos/visualization/rerun/bridge.py @@ -24,6 +24,9 @@ from dimos.core import Module, rpc from dimos.core.module import ModuleConfig from dimos.protocol.pubsub.impl.lcmpubsub import LCM +from dimos.utils.logging_config import setup_logger + +logger = setup_logger() if TYPE_CHECKING: from collections.abc import Callable @@ -43,6 +46,7 @@ class Config(ModuleConfig): entity_prefix: str = "world" topic_to_entity: Callable[[Any], str] | None = None viewer_mode: ViewerMode = "native" + memory_limit: str = "25%" class RerunBridgeModule(Module): @@ -99,13 +103,14 @@ def start(self) -> None: # Initialize and spawn Rerun viewer rr.init("dimos-bridge") if self.config.viewer_mode == "native": - rr.spawn(connect=True) + rr.spawn(connect=True, memory_limit=self.config.memory_limit) elif self.config.viewer_mode == "web": rr.serve_web_viewer(open_browser=True) # "none" - just init, no viewer (connect externally) # Start pubsubs and subscribe to all messages for pubsub in self.config.pubsubs: + logger.info(f"bridge listening on {pubsub.__class__.__name__}") if hasattr(pubsub, "start"): pubsub.start() # type: ignore[union-attr] unsub = pubsub.subscribe_all(self._on_message) @@ -136,9 +141,14 @@ def main() -> None: default="native", help="Viewer mode: native (desktop), web (browser), none (headless)", ) + parser.add_argument( + "--memory-limit", + default="25%", + help="Memory limit for Rerun viewer (e.g., '4GB', '16GB', '25%%')", + ) args = parser.parse_args() - bridge = RerunBridgeModule(viewer_mode=args.viewer_mode) + bridge = RerunBridgeModule(viewer_mode=args.viewer_mode, memory_limit=args.memory_limit) bridge.start() signal.signal(signal.SIGINT, lambda *_: bridge.stop()) From 19ff943942011d4ce2526d84b5e8ee0879694d94 Mon Sep 17 00:00:00 2001 From: Ivan Nikolic Date: Fri, 30 Jan 2026 20:37:44 +0800 Subject: [PATCH 08/36] bridge inline imports --- dimos/visualization/rerun/bridge.py | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/dimos/visualization/rerun/bridge.py b/dimos/visualization/rerun/bridge.py index 924d8518a..d3803e09e 100644 --- a/dimos/visualization/rerun/bridge.py +++ b/dimos/visualization/rerun/bridge.py @@ -19,7 +19,7 @@ from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, Literal -import rerun as rr +from reactivex.disposable import Disposable from dimos.core import Module, rpc from dimos.core.module import ModuleConfig @@ -40,9 +40,7 @@ class Config(ModuleConfig): """Configuration for RerunBridgeModule.""" - pubsubs: list[SubscribeAllCapable[Any, Any]] = field( - default_factory=lambda: [LCM(autoconf=True)] - ) + spy: list[SubscribeAllCapable[Any, Any]] = field(default_factory=lambda: [LCM(autoconf=True)]) entity_prefix: str = "world" topic_to_entity: Callable[[Any], str] | None = None viewer_mode: ViewerMode = "native" @@ -80,7 +78,9 @@ def _get_entity_path(self, topic: Any) -> str: return f"{self.config.entity_prefix}/{topic_str}" def _on_message(self, msg: Any, topic: Any) -> None: - """Handle incoming message - -4=----log to rerun if it has to_rerun.""" + import rerun as rr + + """Handle incoming message - log to rerun if it has to_rerun.""" if not hasattr(msg, "to_rerun"): return @@ -96,7 +96,7 @@ def _on_message(self, msg: Any, topic: Any) -> None: @rpc def start(self) -> None: - from reactivex.disposable import Disposable + import rerun as rr super().start() @@ -126,9 +126,6 @@ def stop(self) -> None: super().stop() -rerun_bridge = RerunBridgeModule.blueprint - - def main() -> None: """CLI entry point for rerun-bridge.""" import argparse @@ -148,7 +145,13 @@ def main() -> None: ) args = parser.parse_args() - bridge = RerunBridgeModule(viewer_mode=args.viewer_mode, memory_limit=args.memory_limit) + bridge = RerunBridgeModule( + viewer_mode=args.viewer_mode, + memory_limit=args.memory_limit, + # spy={ + # LCM: {"/color_image": {"mode": "mesh"}}, + # }, + ) bridge.start() signal.signal(signal.SIGINT, lambda *_: bridge.stop()) @@ -157,3 +160,5 @@ def main() -> None: if __name__ == "__main__": main() + +rerun_bridge = RerunBridgeModule.blueprint From b18992621d6f93faf7f014386b8e89d482785e17 Mon Sep 17 00:00:00 2001 From: Ivan Nikolic Date: Sat, 31 Jan 2026 23:33:24 +0800 Subject: [PATCH 09/36] implemented to_observable for dimos standard callbacks --- dimos/utils/reactive.py | 48 +++++++++++++++++++++++++++ docs/api/sensor_streams/reactivex.md | 49 +++++++++++++++++++++++++++- 2 files changed, 96 insertions(+), 1 deletion(-) diff --git a/dimos/utils/reactive.py b/dimos/utils/reactive.py index bfc9cd046..d76bd518c 100644 --- a/dimos/utils/reactive.py +++ b/dimos/utils/reactive.py @@ -193,6 +193,27 @@ def callback_to_observable( start: Callable[[CB[T]], Any], stop: Callable[[CB[T]], Any], ) -> Observable[T]: + """Convert a register/unregister callback API to an Observable. + + Use this for APIs where you register a callback with one function and + unregister with another, passing the same callback reference: + + sensor.register(callback) # start + sensor.unregister(callback) # stop - needs the same callback + + Example: + obs = callback_to_observable( + start=sensor.register, + stop=sensor.unregister, + ) + sub = obs.subscribe(lambda x: print(x)) + # ... + sub.dispose() # calls sensor.unregister(callback) + + For APIs where subscribe() returns an unsubscribe callable, use + unsub_to_observable() instead. + """ + def _subscribe(observer, _scheduler=None): # type: ignore[no-untyped-def] def _on_msg(value: T) -> None: observer.on_next(value) @@ -203,6 +224,33 @@ def _on_msg(value: T) -> None: return rx.create(_subscribe) +def to_observable( + subscribe: Callable[[Callable[[T], Any]], Callable[[], None]], +) -> Observable[T]: + """Convert a subscribe-returns-unsub API to an Observable. + + Use this for APIs where subscribe() returns an unsubscribe callable: + + unsub = pubsub.subscribe(callback) # returns unsubscribe function + unsub() # to unsubscribe + + Example: + obs = to_observable(pubsub.subscribe) + sub = obs.subscribe(lambda x: print(x)) + # ... + sub.dispose() # calls the unsub function returned by pubsub.subscribe + + For APIs with separate register/unregister functions, use + callback_to_observable() instead. + """ + + def _subscribe(observer, _scheduler=None): # type: ignore[no-untyped-def] + unsub = subscribe(observer.on_next) + return Disposable(unsub) + + return rx.create(_subscribe) + + def spy(name: str): # type: ignore[no-untyped-def] def spyfun(x): # type: ignore[no-untyped-def] print(f"SPY {name}:", x) diff --git a/docs/api/sensor_streams/reactivex.md b/docs/api/sensor_streams/reactivex.md index a80318e02..45873b471 100644 --- a/docs/api/sensor_streams/reactivex.md +++ b/docs/api/sensor_streams/reactivex.md @@ -311,7 +311,16 @@ got 4 ## Creating Observables -### From callback-based APIs +There are two common callback patterns in APIs. Use the appropriate helper: + +| Pattern | Example | Helper | +|---------|---------|--------| +| Register/unregister with same callback | `sensor.register(cb)` / `sensor.unregister(cb)` | `callback_to_observable` | +| Subscribe returns unsub function | `unsub = pubsub.subscribe(cb)` | `to_observable` | + +### From register/unregister APIs + +Use `callback_to_observable` when the API has separate register and unregister functions that take the same callback reference: ```python session=create import reactivex as rx @@ -353,6 +362,44 @@ received: ['reading_1', 'reading_2'] callbacks after dispose: 0 ``` +### From subscribe-returns-unsub APIs + +Use `to_observable` when the subscribe function returns an unsubscribe callable: + +```python session=create +from dimos.utils.reactive import to_observable + +class MockPubSub: + def __init__(self): + self._callbacks = [] + def subscribe(self, cb): + self._callbacks.append(cb) + return lambda: self._callbacks.remove(cb) # returns unsub function + def publish(self, value): + for cb in self._callbacks: + cb(value) + +pubsub = MockPubSub() + +obs = to_observable(pubsub.subscribe) + +received = [] +sub = obs.subscribe(lambda x: received.append(x)) + +pubsub.publish("msg_1") +pubsub.publish("msg_2") +print("received:", received) + +sub.dispose() +print("callbacks after dispose:", len(pubsub._callbacks)) +``` + + +``` +received: ['msg_1', 'msg_2'] +callbacks after dispose: 0 +``` + ### From scratch with `rx.create` ```python session=create From d0d3fb3d8ebe8bc302625246ab91d5b983c4ba89 Mon Sep 17 00:00:00 2001 From: Ivan Nikolic Date: Sat, 31 Jan 2026 23:54:41 +0800 Subject: [PATCH 10/36] settled on the temporary bridge api --- dimos/core/transport.py | 12 +++++ dimos/protocol/pubsub/impl/lcmpubsub.py | 58 +---------------------- dimos/protocol/pubsub/spec.py | 56 ++++++++++++++++++++++ dimos/protocol/pubsub/test_pattern_sub.py | 4 +- dimos/visualization/rerun/bridge.py | 28 +++++++++-- 5 files changed, 95 insertions(+), 63 deletions(-) diff --git a/dimos/core/transport.py b/dimos/core/transport.py index 6605b13f3..dbdc3de42 100644 --- a/dimos/core/transport.py +++ b/dimos/core/transport.py @@ -37,6 +37,18 @@ T = TypeVar("T") # type: ignore[misc] +# TODO +# Transports need to be rewritten and simplified, +# +# there is no need for them to get a reference to "a stream" +# this is a legacy from dask transports. +# +# new transport should literally have 2 functions (next to start/stop) +# "send(msg)" and "receive(callback)" and that's all +# +# we can also consider pubsubs conforming directly to Transport specs +# and removing PubSubTransport glue entirely + class PubSubTransport(Transport[T]): topic: Any diff --git a/dimos/protocol/pubsub/impl/lcmpubsub.py b/dimos/protocol/pubsub/impl/lcmpubsub.py index 652b506bf..6f8025a52 100644 --- a/dimos/protocol/pubsub/impl/lcmpubsub.py +++ b/dimos/protocol/pubsub/impl/lcmpubsub.py @@ -23,7 +23,7 @@ LCMEncoderMixin, PickleEncoderMixin, ) -from dimos.protocol.pubsub.spec import AllPubSub +from dimos.protocol.pubsub.spec import AllPubSub, Glob from dimos.protocol.service.lcmservice import LCMConfig, LCMService, autoconf from dimos.utils.logging_config import setup_logger @@ -36,62 +36,6 @@ logger = setup_logger() -class Glob: - """Glob pattern that compiles to regex for LCM subscriptions. - - Supports: - * - matches any characters except / - ** - matches any characters including / - ? - matches single character - - Example: - Topic(topic=Glob("/sensor/*")) # matches /sensor/temp, /sensor/humidity - Topic(topic=Glob("/robot/**")) # matches /robot/arm/joint1, /robot/leg/motor - """ - - def __init__(self, pattern: str) -> None: - self._glob = pattern - self._regex = self._compile(pattern) - - @staticmethod - def _compile(pattern: str) -> str: - """Convert glob pattern to regex.""" - result = [] - i = 0 - while i < len(pattern): - c = pattern[i] - if c == "*": - if i + 1 < len(pattern) and pattern[i + 1] == "*": - result.append(".*") - i += 2 - else: - result.append("[^/]*") - i += 1 - elif c == "?": - result.append(".") - i += 1 - elif c in r"\^$.|+[]{}()": - result.append("\\" + c) - i += 1 - else: - result.append(c) - i += 1 - return "".join(result) - - @property - def pattern(self) -> str: - """Return the regex pattern string.""" - return self._regex - - @property - def glob(self) -> str: - """Return the original glob pattern.""" - return self._glob - - def __repr__(self) -> str: - return f"Glob({self._glob!r})" - - @dataclass class Topic: topic: str | re.Pattern[str] | Glob diff --git a/dimos/protocol/pubsub/spec.py b/dimos/protocol/pubsub/spec.py index 12c1bbe97..fa96714b4 100644 --- a/dimos/protocol/pubsub/spec.py +++ b/dimos/protocol/pubsub/spec.py @@ -25,6 +25,62 @@ TopicT_co = TypeVar("TopicT_co", covariant=True) +class Glob: + """Glob pattern that compiles to regex + + Supports: + * - matches any characters except / + ** - matches any characters including / + ? - matches single character + + Example: + Topic(topic=Glob("/sensor/*")) # matches /sensor/temp, /sensor/humidity + Topic(topic=Glob("/robot/**")) # matches /robot/arm/joint1, /robot/leg/motor + """ + + def __init__(self, pattern: str) -> None: + self._glob = pattern + self._regex = self._compile(pattern) + + @staticmethod + def _compile(pattern: str) -> str: + """Convert glob pattern to regex.""" + result = [] + i = 0 + while i < len(pattern): + c = pattern[i] + if c == "*": + if i + 1 < len(pattern) and pattern[i + 1] == "*": + result.append(".*") + i += 2 + else: + result.append("[^/]*") + i += 1 + elif c == "?": + result.append(".") + i += 1 + elif c in r"\^$.|+[]{}()": + result.append("\\" + c) + i += 1 + else: + result.append(c) + i += 1 + return "".join(result) + + @property + def pattern(self) -> str: + """Return the regex pattern string.""" + return self._regex + + @property + def glob(self) -> str: + """Return the original glob pattern.""" + return self._glob + + def __repr__(self) -> str: + return f"Glob({self._glob!r})" + + @runtime_checkable class SubscribeAllCapable(Protocol[MsgT_co, TopicT_co]): """Protocol for pubsubs that support subscribe_all. diff --git a/dimos/protocol/pubsub/test_pattern_sub.py b/dimos/protocol/pubsub/test_pattern_sub.py index 2706e09b6..11c46aa6f 100644 --- a/dimos/protocol/pubsub/test_pattern_sub.py +++ b/dimos/protocol/pubsub/test_pattern_sub.py @@ -25,8 +25,8 @@ import pytest from dimos.msgs.geometry_msgs import Pose, Quaternion, Vector3 -from dimos.protocol.pubsub.impl.lcmpubsub import LCM, Glob, LCMPubSubBase, Topic -from dimos.protocol.pubsub.spec import AllPubSub, PubSub +from dimos.protocol.pubsub.impl.lcmpubsub import LCM, LCMPubSubBase, Topic +from dimos.protocol.pubsub.spec import AllPubSub, Glob, PubSub TopicT = TypeVar("TopicT") MsgT = TypeVar("MsgT") diff --git a/dimos/visualization/rerun/bridge.py b/dimos/visualization/rerun/bridge.py index d3803e09e..33ed13e34 100644 --- a/dimos/visualization/rerun/bridge.py +++ b/dimos/visualization/rerun/bridge.py @@ -24,6 +24,7 @@ from dimos.core import Module, rpc from dimos.core.module import ModuleConfig from dimos.protocol.pubsub.impl.lcmpubsub import LCM +from dimos.protocol.pubsub.spec import Glob from dimos.utils.logging_config import setup_logger logger = setup_logger() @@ -36,11 +37,31 @@ ViewerMode = Literal["native", "web", "none"] +# Notes on this system +# +# In the future it would be nice if modules can annotate their individual OUTs with (general or specific) +# hints related to their visualization +# +# so stuff like color, update frequency etc (some Image needs to be rendered on the 3d floor like occupancy grid) +# some other image is an image to be streamed into a specific 2D view etc. +# +# to achieve this we'd feed a full blueprint into the rerun bridge. +# rerun bridge can then inspect all transports used, all modules with their outs, +# automatically spy an all the transports and read visualization hints +# +# this is the correct implementation. +# temporarily we are using these "sideloading" transforms={} to define custom to_rerun methods for specific topics +# as well as pubsubs to specify which protocols to listen to. + + @dataclass class Config(ModuleConfig): """Configuration for RerunBridgeModule.""" - spy: list[SubscribeAllCapable[Any, Any]] = field(default_factory=lambda: [LCM(autoconf=True)]) + pubsubs: list[SubscribeAllCapable[Any, Any]] = ( + field(default_factory=lambda: [LCM(autoconf=True)]), + ) + entity_prefix: str = "world" topic_to_entity: Callable[[Any], str] | None = None viewer_mode: ViewerMode = "native" @@ -148,9 +169,8 @@ def main() -> None: bridge = RerunBridgeModule( viewer_mode=args.viewer_mode, memory_limit=args.memory_limit, - # spy={ - # LCM: {"/color_image": {"mode": "mesh"}}, - # }, + pubsubs=[LCM(autoconf=True)], + transforms={Glob("**/costmap/**#Image"): lambda Image: Image.to_rerun(something=3)}, ) bridge.start() From 9900c2fc172981688564eeb97e30d4ced0f26e25 Mon Sep 17 00:00:00 2001 From: Ivan Nikolic Date: Sun, 1 Feb 2026 00:49:53 +0800 Subject: [PATCH 11/36] restructure of pubsub patterns, good bridge match api --- dimos/protocol/pubsub/impl/lcmpubsub.py | 3 +- dimos/protocol/pubsub/patterns.py | 98 ++++++++++++++++ dimos/protocol/pubsub/spec.py | 80 ++----------- dimos/protocol/pubsub/test_pattern_sub.py | 3 +- dimos/protocol/pubsub/test_patterns.py | 132 ++++++++++++++++++++++ dimos/visualization/rerun/bridge.py | 75 ++++++++++-- pyproject.toml | 8 +- uv.lock | 10 ++ 8 files changed, 320 insertions(+), 89 deletions(-) create mode 100644 dimos/protocol/pubsub/patterns.py create mode 100644 dimos/protocol/pubsub/test_patterns.py diff --git a/dimos/protocol/pubsub/impl/lcmpubsub.py b/dimos/protocol/pubsub/impl/lcmpubsub.py index 97871db28..bf6bbd0de 100644 --- a/dimos/protocol/pubsub/impl/lcmpubsub.py +++ b/dimos/protocol/pubsub/impl/lcmpubsub.py @@ -23,7 +23,8 @@ LCMEncoderMixin, PickleEncoderMixin, ) -from dimos.protocol.pubsub.spec import AllPubSub, Glob +from dimos.protocol.pubsub.patterns import Glob +from dimos.protocol.pubsub.spec import AllPubSub from dimos.protocol.service.lcmservice import LCMConfig, LCMService, autoconf from dimos.utils.logging_config import setup_logger diff --git a/dimos/protocol/pubsub/patterns.py b/dimos/protocol/pubsub/patterns.py new file mode 100644 index 000000000..b7c24f4b0 --- /dev/null +++ b/dimos/protocol/pubsub/patterns.py @@ -0,0 +1,98 @@ +# Copyright 2025-2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import re +from typing import TypeVar + +TopicT = TypeVar("TopicT") +MsgT = TypeVar("MsgT") + + +class Glob: + """Glob pattern that compiles to regex + + Supports: + * - matches any characters except / + ** - matches any characters including / + ? - matches single character + + Example: + Topic(topic=Glob("/sensor/*")) # matches /sensor/temp, /sensor/humidity + Topic(topic=Glob("/robot/**")) # matches /robot/arm/joint1, /robot/leg/motor + """ + + def __init__(self, pattern: str) -> None: + self._glob = pattern + self._regex = self._compile(pattern) + + @staticmethod + def _compile(pattern: str) -> str: + """Convert glob pattern to regex.""" + result = [] + i = 0 + while i < len(pattern): + c = pattern[i] + if c == "*": + if i + 1 < len(pattern) and pattern[i + 1] == "*": + result.append(".*") + i += 2 + else: + result.append("[^/]*") + i += 1 + elif c == "?": + result.append(".") + i += 1 + elif c in r"\^$.|+[]{}()": + result.append("\\" + c) + i += 1 + else: + result.append(c) + i += 1 + return "".join(result) + + @property + def pattern(self) -> str: + """Return the regex pattern string.""" + return self._regex + + @property + def glob(self) -> str: + """Return the original glob pattern.""" + return self._glob + + def __repr__(self) -> str: + return f"Glob({self._glob!r})" + + +Pattern = str | re.Pattern[str] | Glob + + +def pattern_matches(pattern: Pattern, topic_str: str) -> bool: + """Check if a topic string matches a pattern. + + Args: + pattern: A string (exact match), compiled regex, or Glob pattern. + topic_str: The topic string to match against. + + Returns: + True if the topic matches the pattern. + """ + if isinstance(pattern, str): + return pattern == topic_str + elif isinstance(pattern, Glob): + return bool(re.fullmatch(pattern.pattern, topic_str)) + else: + return bool(pattern.fullmatch(topic_str)) diff --git a/dimos/protocol/pubsub/spec.py b/dimos/protocol/pubsub/spec.py index 49ddc5824..43774e673 100644 --- a/dimos/protocol/pubsub/spec.py +++ b/dimos/protocol/pubsub/spec.py @@ -25,74 +25,6 @@ TopicT_co = TypeVar("TopicT_co", covariant=True) -class Glob: - """Glob pattern that compiles to regex - - Supports: - * - matches any characters except / - ** - matches any characters including / - ? - matches single character - - Example: - Topic(topic=Glob("/sensor/*")) # matches /sensor/temp, /sensor/humidity - Topic(topic=Glob("/robot/**")) # matches /robot/arm/joint1, /robot/leg/motor - """ - - def __init__(self, pattern: str) -> None: - self._glob = pattern - self._regex = self._compile(pattern) - - @staticmethod - def _compile(pattern: str) -> str: - """Convert glob pattern to regex.""" - result = [] - i = 0 - while i < len(pattern): - c = pattern[i] - if c == "*": - if i + 1 < len(pattern) and pattern[i + 1] == "*": - result.append(".*") - i += 2 - else: - result.append("[^/]*") - i += 1 - elif c == "?": - result.append(".") - i += 1 - elif c in r"\^$.|+[]{}()": - result.append("\\" + c) - i += 1 - else: - result.append(c) - i += 1 - return "".join(result) - - @property - def pattern(self) -> str: - """Return the regex pattern string.""" - return self._regex - - @property - def glob(self) -> str: - """Return the original glob pattern.""" - return self._glob - - def __repr__(self) -> str: - return f"Glob({self._glob!r})" - - -@runtime_checkable -class SubscribeAllCapable(Protocol[MsgT_co, TopicT_co]): - """Protocol for pubsubs that support subscribe_all. - - Both AllPubSub (native) and DiscoveryPubSub (synthesized) satisfy this. - """ - - def subscribe_all(self, callback: Callable[[Any, Any], Any]) -> Callable[[], None]: - """Subscribe to all topics.""" - ... - - class PubSubBaseMixin(Generic[TopicT, MsgT]): """Mixin class providing sugar methods for PubSub implementations. Depends on the basic publish and subscribe methods being implemented. @@ -245,3 +177,15 @@ def unsubscribe_all() -> None: unsub() return unsubscribe_all + + +@runtime_checkable +class AllSubscribable(Protocol[MsgT_co, TopicT_co]): + """Protocol for pubsubs that support subscribe_all. + + Both AllPubSub (native) and DiscoveryPubSub (synthesized) satisfy this. + """ + + def subscribe_all(self, callback: Callable[[Any, Any], Any]) -> Callable[[], None]: + """Subscribe to all topics.""" + ... diff --git a/dimos/protocol/pubsub/test_pattern_sub.py b/dimos/protocol/pubsub/test_pattern_sub.py index 11c46aa6f..99aea49b0 100644 --- a/dimos/protocol/pubsub/test_pattern_sub.py +++ b/dimos/protocol/pubsub/test_pattern_sub.py @@ -26,7 +26,8 @@ from dimos.msgs.geometry_msgs import Pose, Quaternion, Vector3 from dimos.protocol.pubsub.impl.lcmpubsub import LCM, LCMPubSubBase, Topic -from dimos.protocol.pubsub.spec import AllPubSub, Glob, PubSub +from dimos.protocol.pubsub.patterns import Glob +from dimos.protocol.pubsub.spec import AllPubSub, PubSub TopicT = TypeVar("TopicT") MsgT = TypeVar("MsgT") diff --git a/dimos/protocol/pubsub/test_patterns.py b/dimos/protocol/pubsub/test_patterns.py new file mode 100644 index 000000000..6d0ce3501 --- /dev/null +++ b/dimos/protocol/pubsub/test_patterns.py @@ -0,0 +1,132 @@ +# Copyright 2025-2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for pattern matching utilities.""" + +import re + +from dimos.protocol.pubsub.patterns import Glob, pattern_matches + + +class TestPatternMatchesString: + """Tests for exact string matching.""" + + def test_exact_match(self) -> None: + assert pattern_matches("/sensor/temp", "/sensor/temp") is True + + def test_no_match(self) -> None: + assert pattern_matches("/sensor/temp", "/sensor/humidity") is False + + def test_empty_string(self) -> None: + assert pattern_matches("", "") is True + assert pattern_matches("", "/sensor") is False + + def test_partial_match_fails(self) -> None: + assert pattern_matches("/sensor", "/sensor/temp") is False + assert pattern_matches("/sensor/temp", "/sensor") is False + + +class TestPatternMatchesGlob: + """Tests for Glob pattern matching.""" + + def test_single_wildcard(self) -> None: + glob = Glob("/sensor/*") + assert pattern_matches(glob, "/sensor/temp") is True + assert pattern_matches(glob, "/sensor/humidity") is True + assert pattern_matches(glob, "/sensor/") is True + + def test_single_wildcard_no_slash(self) -> None: + glob = Glob("/sensor/*") + assert pattern_matches(glob, "/sensor/nested/path") is False + + def test_double_wildcard(self) -> None: + glob = Glob("/robot/**") + assert pattern_matches(glob, "/robot/arm") is True + assert pattern_matches(glob, "/robot/arm/joint1") is True + assert pattern_matches(glob, "/robot/leg/motor/encoder") is True + + def test_question_mark(self) -> None: + glob = Glob("/sensor/?") + assert pattern_matches(glob, "/sensor/a") is True + assert pattern_matches(glob, "/sensor/1") is True + assert pattern_matches(glob, "/sensor/ab") is False + + def test_mixed_patterns(self) -> None: + glob = Glob("/robot/*/joint?") + assert pattern_matches(glob, "/robot/arm/joint1") is True + assert pattern_matches(glob, "/robot/leg/joint2") is True + assert pattern_matches(glob, "/robot/arm/joint12") is False + assert pattern_matches(glob, "/robot/arm/nested/joint1") is False + + def test_no_wildcards(self) -> None: + glob = Glob("/exact/path") + assert pattern_matches(glob, "/exact/path") is True + assert pattern_matches(glob, "/exact/other") is False + + def test_double_wildcard_middle(self) -> None: + glob = Glob("/start/**/end") + # Note: ** becomes .* so /start/**/end requires a / before end + assert pattern_matches(glob, "/start//end") is True + assert pattern_matches(glob, "/start/middle/end") is True + assert pattern_matches(glob, "/start/a/b/c/end") is True + + +class TestPatternMatchesRegex: + """Tests for compiled regex pattern matching.""" + + def test_simple_regex(self) -> None: + pattern = re.compile(r"/sensor/\w+") + assert pattern_matches(pattern, "/sensor/temp") is True + assert pattern_matches(pattern, "/sensor/123") is True + + def test_regex_anchored(self) -> None: + pattern = re.compile(r"/sensor/temp") + assert pattern_matches(pattern, "/sensor/temp") is True + assert pattern_matches(pattern, "/sensor/temperature") is False + + def test_regex_groups(self) -> None: + pattern = re.compile(r"/robot/(arm|leg)/joint(\d+)") + assert pattern_matches(pattern, "/robot/arm/joint1") is True + assert pattern_matches(pattern, "/robot/leg/joint42") is True + assert pattern_matches(pattern, "/robot/head/joint1") is False + + def test_regex_optional(self) -> None: + pattern = re.compile(r"/sensor/temp/?") + assert pattern_matches(pattern, "/sensor/temp") is True + assert pattern_matches(pattern, "/sensor/temp/") is True + + +class TestGlobClass: + """Tests for the Glob class itself.""" + + def test_pattern_property(self) -> None: + glob = Glob("/sensor/*") + assert glob.pattern == "/sensor/[^/]*" + + def test_glob_property(self) -> None: + glob = Glob("/sensor/*") + assert glob.glob == "/sensor/*" + + def test_repr(self) -> None: + glob = Glob("/sensor/*") + assert repr(glob) == "Glob('/sensor/*')" + + def test_double_star_regex(self) -> None: + glob = Glob("/robot/**") + assert glob.pattern == "/robot/.*" + + def test_special_chars_escaped(self) -> None: + glob = Glob("/path.with.dots") + assert pattern_matches(glob, "/path.with.dots") is True + assert pattern_matches(glob, "/pathXwithXdots") is False diff --git a/dimos/visualization/rerun/bridge.py b/dimos/visualization/rerun/bridge.py index 33ed13e34..534e07623 100644 --- a/dimos/visualization/rerun/bridge.py +++ b/dimos/visualization/rerun/bridge.py @@ -17,14 +17,16 @@ from __future__ import annotations from dataclasses import dataclass, field -from typing import TYPE_CHECKING, Any, Literal +from functools import lru_cache +from typing import TYPE_CHECKING, Any, Literal, Protocol, TypeAlias, runtime_checkable from reactivex.disposable import Disposable +from toolz import pipe # type: ignore[import-untyped] from dimos.core import Module, rpc from dimos.core.module import ModuleConfig from dimos.protocol.pubsub.impl.lcmpubsub import LCM -from dimos.protocol.pubsub.spec import Glob +from dimos.protocol.pubsub.patterns import Glob, pattern_matches from dimos.utils.logging_config import setup_logger logger = setup_logger() @@ -32,14 +34,27 @@ if TYPE_CHECKING: from collections.abc import Callable + from rerun._baseclasses import Archetype + from dimos.protocol.pubsub.spec import SubscribeAllCapable +# to_rerun() can return a single archetype or a list of (entity_path, archetype) tuples +RerunData: TypeAlias = "Archetype | list[tuple[str, Archetype]]" + + +@runtime_checkable +class RerunConvertible(Protocol): + """Protocol for messages that can be converted to Rerun data.""" + + def to_rerun(self) -> RerunData: ... + + ViewerMode = Literal["native", "web", "none"] # Notes on this system # -# In the future it would be nice if modules can annotate their individual OUTs with (general or specific) +# In the future it would be nice if modules can annotate their individual OUTs with (general or rerun specific) # hints related to their visualization # # so stuff like color, update frequency etc (some Image needs to be rendered on the 3d floor like occupancy grid) @@ -58,10 +73,12 @@ class Config(ModuleConfig): """Configuration for RerunBridgeModule.""" - pubsubs: list[SubscribeAllCapable[Any, Any]] = ( - field(default_factory=lambda: [LCM(autoconf=True)]), + pubsubs: list[SubscribeAllCapable[Any, Any]] = field( + default_factory=lambda: [LCM(autoconf=True)] ) + transforms: dict[Glob | str, Callable[[Any], Archetype]] = field(default_factory=dict) + entity_prefix: str = "world" topic_to_entity: Callable[[Any], str] | None = None viewer_mode: ViewerMode = "native" @@ -87,32 +104,60 @@ class RerunBridgeModule(Module): default_config = Config config: Config + @lru_cache(maxsize=256) + def _transform_for_entity_path(self, entity_path: str) -> Callable[[Any], RerunData | None]: + """Return a composed transform for the entity path. + + Chains matching transforms from config, ending with obligatory_transform + which handles .to_rerun() or passes through Archetypes. + """ + from rerun._baseclasses import Archetype + + matches = [ + fn + for pattern, fn in self.config.transforms.items() + if pattern_matches(pattern, entity_path) + ] + + def obligatory_transform(msg: Any) -> RerunData | None: + if isinstance(msg, Archetype): + return msg + if isinstance(msg, RerunConvertible): + return msg.to_rerun() + return None + + return lambda msg: pipe(msg, *matches, obligatory_transform) + def _get_entity_path(self, topic: Any) -> str: """Convert a topic to a Rerun entity path.""" if self.config.topic_to_entity: return self.config.topic_to_entity(topic) # Default: use topic.name if available (LCM Topic), else str - # Strip everything after # (LCM topic suffix) topic_str = getattr(topic, "name", None) or str(topic) + # Strip everything after # (LCM topic suffix) topic_str = topic_str.split("#")[0] return f"{self.config.entity_prefix}/{topic_str}" def _on_message(self, msg: Any, topic: Any) -> None: import rerun as rr - """Handle incoming message - log to rerun if it has to_rerun.""" - if not hasattr(msg, "to_rerun"): - return + """Handle incoming message - log to rerun.""" + # convert a potentially complex topic object into an str rerun entity path + entity_path: str = self._get_entity_path(topic) + + # apply transforms (including obligatory_transform which handles .to_rerun()) + rerun_data: RerunData | None = self._transform_for_entity_path(entity_path)(msg) - rerun_data = msg.to_rerun() + # transforms can also supress logging by returning None + if not rerun_data: + return # TFMessage returns list of (entity_path, transform) tuples if isinstance(rerun_data, list): for entity_path, archetype in rerun_data: rr.log(entity_path, archetype) else: - entity_path = self._get_entity_path(topic) rr.log(entity_path, rerun_data) @rpc @@ -169,9 +214,15 @@ def main() -> None: bridge = RerunBridgeModule( viewer_mode=args.viewer_mode, memory_limit=args.memory_limit, + # any pubsub that supports subscribe_all and topic that supports str(topic) + # is acceptable here pubsubs=[LCM(autoconf=True)], - transforms={Glob("**/costmap/**#Image"): lambda Image: Image.to_rerun(something=3)}, + # defines transforms for rerun entity paths + transforms={ + Glob("debug_navigation"): lambda image: image.to_rerun(mode="floor"), + }, ) + bridge.start() signal.signal(signal.SIGINT, lambda *_: bridge.stop()) diff --git a/pyproject.toml b/pyproject.toml index 669439791..b349a7329 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -32,7 +32,6 @@ dependencies = [ # Transport Protocols "dimos-lcm", "PyTurboJPEG==1.8.2", - # Core "numpy>=1.26.4", "scipy>=1.15.1", @@ -41,33 +40,28 @@ dependencies = [ "sortedcontainers==2.4.0", "pydantic", "python-dotenv", - # Multiprocess "dask[complete]==2025.5.1", "plum-dispatch==2.5.7", - # Logging "structlog>=25.5.0,<26", "colorlog==6.9.0", - # Core Msgs "opencv-python", "open3d", - # CLI "pydantic-settings>=2.11.0,<3", "textual==3.7.1", "terminaltexteffects==0.12.2", "typer>=0.19.2,<1", "plotext==5.3.2", - # Used for calculating the occupancy map. "numba>=0.60.0", # First version supporting Python 3.12 "llvmlite>=0.42.0", # Required by numba 0.60+ - # TODO: rerun shouldn't be required but rn its in core (there is NO WAY to use dimos without rerun rn) # remove this once rerun is optional in core "rerun-sdk>=0.20.0", + "toolz>=1.1.0", ] diff --git a/uv.lock b/uv.lock index dc844f554..6e9820aca 100644 --- a/uv.lock +++ b/uv.lock @@ -1408,6 +1408,7 @@ dependencies = [ { name = "structlog" }, { name = "terminaltexteffects" }, { name = "textual" }, + { name = "toolz" }, { name = "typer" }, ] @@ -1722,6 +1723,7 @@ requires-dist = [ { name = "textual", specifier = "==3.7.1" }, { name = "tiktoken", marker = "extra == 'misc'", specifier = ">=0.8.0" }, { name = "timm", marker = "extra == 'misc'", specifier = ">=1.0.15" }, + { name = "toolz", specifier = ">=1.1.0" }, { name = "torchreid", marker = "extra == 'misc'", specifier = "==0.2.5" }, { name = "tqdm", marker = "extra == 'manipulation'", specifier = ">=4.65.0" }, { name = "transformers", extras = ["torch"], marker = "extra == 'perception'", specifier = "==4.49.0" }, @@ -2384,6 +2386,14 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/f7/02/6e639e90f181dc9127046e00d0528f9f7ad12d428972e3a5378b9aefdb0b/glfw-2.10.0-py2.py27.py3.py30.py31.py32.py33.py34.py35.py36.py37.py38.p39.p310.p311.p312.p313-none-manylinux_2_28_x86_64.whl", hash = "sha256:7916034efa867927892635733a3b6af8cd95ceb10566fd7f1e0d2763c2ee8b12", size = 243525, upload-time = "2025-09-12T08:54:34.006Z" }, { url = "https://files.pythonhosted.org/packages/84/06/cb588ca65561defe0fc48d1df4c2ac12569b81231ae4f2b52ab37007d0bd/glfw-2.10.0-py2.py27.py3.py30.py31.py32.py33.py34.py35.py36.py37.py38.p39.p310.p311.p312.p313-none-win32.whl", hash = "sha256:6c9549da71b93e367b4d71438798daae1da2592039fd14204a80a1a2348ae127", size = 552685, upload-time = "2025-09-12T08:54:35.723Z" }, { url = "https://files.pythonhosted.org/packages/86/27/00c9c96af18ac0a5eac2ff61cbe306551a2d770d7173f396d0792ee1a59e/glfw-2.10.0-py2.py27.py3.py30.py31.py32.py33.py34.py35.py36.py37.py38.p39.p310.p311.p312.p313-none-win_amd64.whl", hash = "sha256:6292d5d6634d668cd23d337e6089491d3945a9aa4ac6e1667b0003520d7caa51", size = 559466, upload-time = "2025-09-12T08:54:37.661Z" }, + { url = "https://files.pythonhosted.org/packages/b3/87/de0b33f6f00687499ca1371f22aa73396341b85bf88f1a284f9da8842493/glfw-2.10.0-py2.py27.py3.py30.py31.py32.py33.py34.py35.py36.py37.py38.py39.py310.py311.py312.py313.py314-none-macosx_10_6_intel.whl", hash = "sha256:2aab89d2d9535635ba011fc7303390685169a1aa6731ad580d08d043524b8899", size = 105326, upload-time = "2026-01-28T05:57:56.083Z" }, + { url = "https://files.pythonhosted.org/packages/b6/a6/6ea2f73ad4474896d9e38b3ffbe6ffd5a802c738392269e99e8c6621a461/glfw-2.10.0-py2.py27.py3.py30.py31.py32.py33.py34.py35.py36.py37.py38.py39.py310.py311.py312.py313.py314-none-macosx_11_0_arm64.whl", hash = "sha256:23936202a107039b5372f0b88ae1d11080746aa1c78910a45d4a0c4cf408cfaa", size = 102180, upload-time = "2026-01-28T05:57:57.787Z" }, + { url = "https://files.pythonhosted.org/packages/58/19/d81b19e8261b9cb51b81d1402167791fef81088dfe91f0c4e9d136fdc5ca/glfw-2.10.0-py2.py27.py3.py30.py31.py32.py33.py34.py35.py36.py37.py38.py39.py310.py311.py312.py313.py314-none-manylinux2014_aarch64.whl", hash = "sha256:7be06d0838f61df67bd54cb6266a6193d54083acb3624ff3c3812a6358406fa4", size = 230038, upload-time = "2026-01-28T05:57:59.105Z" }, + { url = "https://files.pythonhosted.org/packages/e2/fa/b035636cd82198b97b51a93efe9cfc4343d6b15cefbd336a3f2be871d848/glfw-2.10.0-py2.py27.py3.py30.py31.py32.py33.py34.py35.py36.py37.py38.py39.py310.py311.py312.py313.py314-none-manylinux2014_x86_64.whl", hash = "sha256:91d36b3582a766512eff8e3b5dcc2d3ffcbf10b7cf448551085a08a10f1b8244", size = 241983, upload-time = "2026-01-28T05:58:00.352Z" }, + { url = "https://files.pythonhosted.org/packages/ff/b4/f7b6cc022dd7c68b6c702d19da5d591f978f89c958b9bd3090615db0c739/glfw-2.10.0-py2.py27.py3.py30.py31.py32.py33.py34.py35.py36.py37.py38.py39.py310.py311.py312.py313.py314-none-manylinux_2_28_aarch64.whl", hash = "sha256:27c9e9a2d5e1dc3c9e3996171d844d9df9a5a101e797cb94cce217b7afcf8fd9", size = 231053, upload-time = "2026-01-28T05:58:01.683Z" }, + { url = "https://files.pythonhosted.org/packages/5a/3f/efeb7c6801c46e11bd666a5180f0d615f74f72264212f74f39586c6fda9d/glfw-2.10.0-py2.py27.py3.py30.py31.py32.py33.py34.py35.py36.py37.py38.py39.py310.py311.py312.py313.py314-none-manylinux_2_28_x86_64.whl", hash = "sha256:ce6724bb7cb3d0543dcba17206dce909f94176e68220b8eafee72e9f92bcf542", size = 243522, upload-time = "2026-01-28T05:58:03.517Z" }, + { url = "https://files.pythonhosted.org/packages/cf/b9/b04c3aa0aad2870cfe799f32f8b59789c98e1816bbce9e83f4823c5b840b/glfw-2.10.0-py2.py27.py3.py30.py31.py32.py33.py34.py35.py36.py37.py38.py39.py310.py311.py312.py313.py314-none-win32.whl", hash = "sha256:fca724a21a372731edb290841edd28a9fb1ee490f833392752844ac807c0086a", size = 552682, upload-time = "2026-01-28T05:58:05.649Z" }, + { url = "https://files.pythonhosted.org/packages/bd/e1/6d6816b296a529ac9b897ad228b1e084eb1f92319e96371880eebdc874a6/glfw-2.10.0-py2.py27.py3.py30.py31.py32.py33.py34.py35.py36.py37.py38.py39.py310.py311.py312.py313.py314-none-win_amd64.whl", hash = "sha256:823c0bd7770977d4b10e0ed0aef2f3682276b7c88b8b65cfc540afce5951392f", size = 559464, upload-time = "2026-01-28T05:58:07.261Z" }, ] [[package]] From 6344b0da60b02603bb6205fc6acccb2cede259e5 Mon Sep 17 00:00:00 2001 From: Ivan Nikolic Date: Sun, 1 Feb 2026 09:36:46 +0800 Subject: [PATCH 12/36] planner uses occupancygrid now, bridge improvements --- dimos/msgs/nav_msgs/OccupancyGrid.py | 150 ++++++++++-------- .../replanning_a_star/global_planner.py | 3 +- .../replanning_a_star/local_planner.py | 45 +----- dimos/navigation/replanning_a_star/module.py | 3 +- dimos/protocol/pubsub/spec.py | 4 +- dimos/visualization/rerun/bridge.py | 63 ++++++-- 6 files changed, 141 insertions(+), 127 deletions(-) diff --git a/dimos/msgs/nav_msgs/OccupancyGrid.py b/dimos/msgs/nav_msgs/OccupancyGrid.py index c36d3f7a1..c0b1ee24b 100644 --- a/dimos/msgs/nav_msgs/OccupancyGrid.py +++ b/dimos/msgs/nav_msgs/OccupancyGrid.py @@ -432,22 +432,27 @@ def to_rerun( # type: ignore[no-untyped-def] colormap: str | None = None, mode: str = "mesh", z_offset: float = 0.01, + brightness: float = 1.0, + cost_range: tuple[int, int] | None = None, + free_color: str | None = None, **kwargs: Any, ): # type: ignore[no-untyped-def] """Convert to Rerun visualization format. Args: colormap: Optional colormap name (e.g., "RdBu_r" for blue=free, red=occupied). - If None, uses grayscale for image mode or default colors for 3D modes. + If None, uses grayscale for image mode or default colors for mesh mode. mode: Visualization mode: - - "image": 2D grayscale/colored image (default) - - "mesh": 3D textured plane overlay on floor - - "points": 3D points for occupied cells only - z_offset: Height offset for 3D modes (default 0.01m above floor) + - "image": 2D grayscale/colored image + - "mesh": 3D textured plane overlay on floor (default) + z_offset: Height offset for mesh mode (default 0.01m above floor) + brightness: Brightness multiplier (0.0 to 1.0, default 1.0) + cost_range: Optional (min, max) cost range to display. Cells outside range use free_color. + free_color: Hex color for out-of-range cells (e.g. "#484981"). Default is black. **kwargs: Additional args (ignored for compatibility) Returns: - Rerun archetype for logging (rr.Image, rr.Mesh3D, or rr.Points3D) + Rerun archetype for logging (rr.Image or rr.Mesh3D) The visualization uses: - Free space (value 0): white/blue @@ -455,30 +460,48 @@ def to_rerun( # type: ignore[no-untyped-def] - Occupied space (value > 0): black/red with gradient """ if self.grid.size == 0: - if mode == "image": - return rr.Image(np.zeros((1, 1), dtype=np.uint8), color_model="L") - elif mode == "mesh": + if mode == "mesh": return rr.Mesh3D(vertex_positions=[]) else: - return rr.Points3D([]) + return rr.Image(np.zeros((1, 1), dtype=np.uint8), color_model="L") - if mode == "points": - return self._to_rerun_points(colormap, z_offset) - elif mode == "mesh": - return self._to_rerun_mesh(colormap, z_offset) + if mode == "mesh": + return self._to_rerun_mesh(colormap, z_offset, brightness, cost_range, free_color) else: - return self._to_rerun_image(colormap) + return self._to_rerun_image(colormap, brightness, cost_range) - def _generate_rgba_texture(self, colormap: str | None = None) -> NDArray[np.uint8]: + def _generate_rgba_texture( + self, + colormap: str | None = None, + brightness: float = 1.0, + cost_range: tuple[int, int] | None = None, + free_color: str | None = None, + ) -> NDArray[np.uint8]: """Generate RGBA texture for the occupancy grid. Args: colormap: Optional matplotlib colormap name. + brightness: Brightness multiplier (0.0 to 1.0). + cost_range: Optional (min, max) cost range. Cells outside range use free_color. + free_color: Hex color for out-of-range cells (e.g. "#484981"). Default is black. Returns: RGBA numpy array of shape (height, width, 4). Note: NOT flipped - caller handles orientation. """ + # Parse free_color hex to RGB + if free_color is not None: + fc = free_color.lstrip("#") + free_rgb = [int(fc[i : i + 2], 16) for i in (0, 2, 4)] + else: + free_rgb = [0, 0, 0] + + # Determine which cells are in range (if cost_range specified) + if cost_range is not None: + in_range_mask = (self.grid >= cost_range[0]) & (self.grid <= cost_range[1]) + else: + in_range_mask = None + if colormap is not None: cmap = _get_matplotlib_cmap(colormap) grid_float = self.grid.astype(np.float32) @@ -489,20 +512,25 @@ def _generate_rgba_texture(self, colormap: str | None = None) -> NDArray[np.uint occupied_mask = self.grid > 0 if np.any(free_mask): - colors_free = (np.array(cmap(0.0)[:3]) * 255).astype(np.uint8) + colors_free = (np.array(cmap(0.0)[:3]) * 255 * brightness).astype(np.uint8) vis[free_mask, :3] = colors_free vis[free_mask, 3] = 255 if np.any(occupied_mask): costs = grid_float[occupied_mask] cost_norm = 0.5 + (costs / 100) * 0.5 - colors_occ = (cmap(cost_norm)[:, :3] * 255).astype(np.uint8) + colors_occ = (cmap(cost_norm)[:, :3] * 255 * brightness).astype(np.uint8) vis[occupied_mask, :3] = colors_occ vis[occupied_mask, 3] = 255 # Unknown cells: fully transparent # (vis is already zero-initialized, so alpha=0 by default) + # Apply cost_range filter - set out-of-range cells to free_color (Rerun mesh doesn't support alpha) + if in_range_mask is not None: + vis[~in_range_mask, :3] = free_rgb + vis[~in_range_mask, 3] = 255 + return vis # Default: Foxglove-style coloring @@ -512,22 +540,32 @@ def _generate_rgba_texture(self, colormap: str | None = None) -> NDArray[np.uint occupied_mask = self.grid > 0 # Free space: blue-purple #484981 - vis[free_mask] = [72, 73, 129, 180] + vis[free_mask] = [int(72 * brightness), int(73 * brightness), int(129 * brightness), 180] # Occupied: gradient from blue-purple to black if np.any(occupied_mask): costs = self.grid[occupied_mask].astype(np.float32) factor = (1 - costs / 100).clip(0, 1) - vis[occupied_mask, 0] = (72 * factor).astype(np.uint8) - vis[occupied_mask, 1] = (73 * factor).astype(np.uint8) - vis[occupied_mask, 2] = (129 * factor).astype(np.uint8) + vis[occupied_mask, 0] = (72 * factor * brightness).astype(np.uint8) + vis[occupied_mask, 1] = (73 * factor * brightness).astype(np.uint8) + vis[occupied_mask, 2] = (129 * factor * brightness).astype(np.uint8) vis[occupied_mask, 3] = 220 # Unknown cells: fully transparent (already zero from initialization) + # Apply cost_range filter - set out-of-range cells to free_color + if in_range_mask is not None: + vis[~in_range_mask, :3] = free_rgb + vis[~in_range_mask, 3] = 255 + return vis - def _to_rerun_image(self, colormap: str | None = None): # type: ignore[no-untyped-def] + def _to_rerun_image( # type: ignore[no-untyped-def] + self, + colormap: str | None = None, + brightness: float = 1.0, + cost_range: tuple[int, int] | None = None, + ): """Convert to 2D image visualization.""" # Use existing cached visualization functions for supported palettes if colormap in ("turbo", "rainbow"): @@ -538,13 +576,23 @@ def _to_rerun_image(self, colormap: str | None = None): # type: ignore[no-untyp else: bgr_image = rainbow_image(self.grid) - # Convert BGR to RGB and flip for world coordinates - rgb_image = np.flipud(bgr_image[:, :, ::-1]) + # Convert BGR to RGB, apply brightness, and flip for world coordinates + rgb_image = (np.flipud(bgr_image[:, :, ::-1]) * brightness).astype(np.uint8) + + # Apply cost_range filter for turbo/rainbow + if cost_range is not None: + in_range = (self.grid >= cost_range[0]) & (self.grid <= cost_range[1]) + rgba_image = np.zeros((rgb_image.shape[0], rgb_image.shape[1], 4), dtype=np.uint8) + rgba_image[:, :, :3] = rgb_image + rgba_image[:, :, 3] = 255 + rgba_image[np.flipud(~in_range), 3] = 0 # Flip mask to match flipped image + return rr.Image(rgba_image, color_model="RGBA") + return rr.Image(rgb_image, color_model="RGB") if colormap is not None: # Use helper and flip for image display - rgba = self._generate_rgba_texture(colormap) + rgba = self._generate_rgba_texture(colormap, brightness, cost_range) return rr.Image(np.flipud(rgba), color_model="RGBA") # Grayscale visualization (no colormap) @@ -566,46 +614,14 @@ def _to_rerun_image(self, colormap: str | None = None): # type: ignore[no-untyp # Flip vertically to match world coordinates (y=0 at bottom) return rr.Image(np.flipud(vis_gray), color_model="L") - def _to_rerun_points(self, colormap: str | None = None, z_offset: float = 0.01): # type: ignore[no-untyped-def] - """Convert to 3D points for occupied cells.""" - # Find occupied cells (cost > 0) - occupied_mask = self.grid > 0 - if not np.any(occupied_mask): - return rr.Points3D([]) - - # Get grid coordinates of occupied cells - gy, gx = np.where(occupied_mask) - costs = self.grid[occupied_mask].astype(np.float32) - - # Convert to world coordinates - ox = self.origin.position.x - oy = self.origin.position.y - wx = ox + (gx + 0.5) * self.resolution - wy = oy + (gy + 0.5) * self.resolution - wz = np.full_like(wx, z_offset) - - points = np.column_stack([wx, wy, wz]) - - # Determine colors - if colormap is not None: - # Normalize costs to 0-1 range - cost_norm = costs / 100.0 - cmap = _get_matplotlib_cmap(colormap) - point_colors = (cmap(cost_norm)[:, :3] * 255).astype(np.uint8) - else: - # Default: red gradient based on cost - intensity = (costs / 100.0 * 255).astype(np.uint8) - point_colors = np.column_stack( - [intensity, np.zeros_like(intensity), np.zeros_like(intensity)] - ) - - return rr.Points3D( - positions=points, - radii=self.resolution / 2, - colors=point_colors, - ) - - def _to_rerun_mesh(self, colormap: str | None = None, z_offset: float = 0.01): # type: ignore[no-untyped-def] + def _to_rerun_mesh( # type: ignore[no-untyped-def] + self, + colormap: str | None = None, + z_offset: float = 0.01, + brightness: float = 1.0, + cost_range: tuple[int, int] | None = None, + free_color: str | None = None, + ): """Convert to 3D textured mesh overlay on floor plane. Uses a single quad with the occupancy grid as a texture. @@ -616,7 +632,7 @@ def _to_rerun_mesh(self, colormap: str | None = None, z_offset: float = 0.01): # Generate RGBA texture and flip to match world coordinates # Grid row 0 is at world y=origin (bottom), but texture row 0 is at UV v=0 (top) - rgba = np.flipud(self._generate_rgba_texture(colormap)) + rgba = np.flipud(self._generate_rgba_texture(colormap, brightness, cost_range, free_color)) # Single quad covering entire grid ox = self.origin.position.x diff --git a/dimos/navigation/replanning_a_star/global_planner.py b/dimos/navigation/replanning_a_star/global_planner.py index 8dc1a42cc..c8e25213c 100644 --- a/dimos/navigation/replanning_a_star/global_planner.py +++ b/dimos/navigation/replanning_a_star/global_planner.py @@ -28,7 +28,6 @@ from dimos.msgs.geometry_msgs.Vector3 import Vector3 from dimos.msgs.nav_msgs.OccupancyGrid import CostValues, OccupancyGrid from dimos.msgs.nav_msgs.Path import Path -from dimos.msgs.sensor_msgs import Image from dimos.navigation.base import NavigationState from dimos.navigation.replanning_a_star.goal_validator import find_safe_goal from dimos.navigation.replanning_a_star.local_planner import LocalPlanner, StopMessage @@ -156,7 +155,7 @@ def cmd_vel(self) -> Subject[Twist]: return self._local_planner.cmd_vel @property - def debug_navigation(self) -> Subject[Image]: + def debug_navigation(self) -> Subject[OccupancyGrid]: return self._local_planner.debug_navigation def _thread_entrypoint(self) -> None: diff --git a/dimos/navigation/replanning_a_star/local_planner.py b/dimos/navigation/replanning_a_star/local_planner.py index 65a18d063..787f93d72 100644 --- a/dimos/navigation/replanning_a_star/local_planner.py +++ b/dimos/navigation/replanning_a_star/local_planner.py @@ -23,11 +23,9 @@ from dimos.core.global_config import GlobalConfig from dimos.core.resource import Resource -from dimos.mapping.occupancy.visualize_path import visualize_path from dimos.msgs.geometry_msgs import Twist from dimos.msgs.geometry_msgs.PoseStamped import PoseStamped -from dimos.msgs.nav_msgs import Path -from dimos.msgs.sensor_msgs import Image +from dimos.msgs.nav_msgs import OccupancyGrid, Path from dimos.navigation.base import NavigationState from dimos.navigation.replanning_a_star.controllers import Controller, PController from dimos.navigation.replanning_a_star.navigation_map import NavigationMap @@ -47,7 +45,7 @@ class LocalPlanner(Resource): cmd_vel: Subject[Twist] stopped_navigating: Subject[StopMessage] - debug_navigation: Subject[Image] + debug_navigation: Subject[OccupancyGrid] _thread: Thread | None = None _path: Path | None = None @@ -323,41 +321,4 @@ def _send_debug_navigation(self, path: Path, path_clearance: PathClearance) -> N self._debug_navigation_last = now - self.debug_navigation.on_next(self._make_debug_navigation_image(path, path_clearance)) - - def _make_debug_navigation_image(self, path: Path, path_clearance: PathClearance) -> Image: - scale = 8 - image = visualize_path( - self._navigation_map.gradient_costmap, - path, - self._global_config.robot_width, - self._global_config.robot_rotation_diameter, - 2, - scale, - ) - image.data = np.flipud(image.data) - - # Add path mask. - mask = path_clearance.mask - scaled_mask = np.repeat(np.repeat(mask, scale, axis=0), scale, axis=1) - scaled_mask = np.flipud(scaled_mask) - white = np.array([255, 255, 255], dtype=np.int16) - image.data[scaled_mask] = (image.data[scaled_mask].astype(np.int16) * 3 + white * 7) // 10 - - with self._lock: - current_odom = self._current_odom - - # Draw robot position. - if current_odom is not None: - grid_pos = self._navigation_map.gradient_costmap.world_to_grid(current_odom.position) - x = int(grid_pos.x * scale) - y = image.data.shape[0] - 1 - int(grid_pos.y * scale) - radius = 8 - for dy in range(-radius, radius + 1): - for dx in range(-radius, radius + 1): - if dx * dx + dy * dy <= radius * radius: - py, px = y + dy, x + dx - if 0 <= py < image.data.shape[0] and 0 <= px < image.data.shape[1]: - image.data[py, px] = [255, 255, 255] - - return image + self.debug_navigation.on_next(self._navigation_map.gradient_costmap) diff --git a/dimos/navigation/replanning_a_star/module.py b/dimos/navigation/replanning_a_star/module.py index 6ba1ae0ba..0c11c6d10 100644 --- a/dimos/navigation/replanning_a_star/module.py +++ b/dimos/navigation/replanning_a_star/module.py @@ -23,7 +23,6 @@ from dimos.dashboard.rerun_init import connect_rerun from dimos.msgs.geometry_msgs import PoseStamped, Twist from dimos.msgs.nav_msgs import OccupancyGrid, Path -from dimos.msgs.sensor_msgs import Image from dimos.navigation.base import NavigationInterface, NavigationState from dimos.navigation.replanning_a_star.global_planner import GlobalPlanner @@ -38,7 +37,7 @@ class ReplanningAStarPlanner(Module, NavigationInterface): navigation_state: Out[String] # TODO: set it cmd_vel: Out[Twist] path: Out[Path] - debug_navigation: Out[Image] + debug_navigation: Out[OccupancyGrid] _planner: GlobalPlanner _global_config: GlobalConfig diff --git a/dimos/protocol/pubsub/spec.py b/dimos/protocol/pubsub/spec.py index 43774e673..fe979fce8 100644 --- a/dimos/protocol/pubsub/spec.py +++ b/dimos/protocol/pubsub/spec.py @@ -180,12 +180,12 @@ def unsubscribe_all() -> None: @runtime_checkable -class AllSubscribable(Protocol[MsgT_co, TopicT_co]): +class SubscribeAllCapable(Protocol[MsgT_co, TopicT_co]): """Protocol for pubsubs that support subscribe_all. Both AllPubSub (native) and DiscoveryPubSub (synthesized) satisfy this. """ def subscribe_all(self, callback: Callable[[Any, Any], Any]) -> Callable[[], None]: - """Subscribe to all topics.""" + """Subscribe to all messages on all topics.""" ... diff --git a/dimos/visualization/rerun/bridge.py b/dimos/visualization/rerun/bridge.py index 534e07623..c1fd32bd3 100644 --- a/dimos/visualization/rerun/bridge.py +++ b/dimos/visualization/rerun/bridge.py @@ -18,7 +18,16 @@ from dataclasses import dataclass, field from functools import lru_cache -from typing import TYPE_CHECKING, Any, Literal, Protocol, TypeAlias, runtime_checkable +from typing import ( + TYPE_CHECKING, + Any, + Literal, + Protocol, + TypeAlias, + TypeGuard, + cast, + runtime_checkable, +) from reactivex.disposable import Disposable from toolz import pipe # type: ignore[import-untyped] @@ -39,7 +48,22 @@ from dimos.protocol.pubsub.spec import SubscribeAllCapable # to_rerun() can return a single archetype or a list of (entity_path, archetype) tuples -RerunData: TypeAlias = "Archetype | list[tuple[str, Archetype]]" +RerunMulti: TypeAlias = "list[tuple[str, Archetype]]" +RerunData: TypeAlias = "Archetype | RerunMulti" + + +def is_rerun_multi(data: Any) -> TypeGuard[RerunMulti]: + """Check if data is a list of (entity_path, archetype) tuples.""" + from rerun._baseclasses import Archetype + + return ( + isinstance(data, list) + and bool(data) + and isinstance(data[0], tuple) + and len(data[0]) == 2 + and isinstance(data[0][0], str) + and isinstance(data[0][1], Archetype) + ) @runtime_checkable @@ -104,6 +128,9 @@ class RerunBridgeModule(Module): default_config = Config config: Config + def __init__(self, **kwargs: Any) -> None: + super().__init__(**kwargs) + @lru_cache(maxsize=256) def _transform_for_entity_path(self, entity_path: str) -> Callable[[Any], RerunData | None]: """Return a composed transform for the entity path. @@ -113,20 +140,25 @@ def _transform_for_entity_path(self, entity_path: str) -> Callable[[Any], RerunD """ from rerun._baseclasses import Archetype + # find all matching transforms for this entity path matches = [ fn for pattern, fn in self.config.transforms.items() if pattern_matches(pattern, entity_path) ] - def obligatory_transform(msg: Any) -> RerunData | None: + # final obligatory transform (ensures we return Archetype or None) + def final_transform(msg: Any) -> RerunData | None: if isinstance(msg, Archetype): return msg + if is_rerun_multi(msg): + return msg if isinstance(msg, RerunConvertible): return msg.to_rerun() return None - return lambda msg: pipe(msg, *matches, obligatory_transform) + # compose all transforms + return lambda msg: pipe(msg, *matches, final_transform) def _get_entity_path(self, topic: Any) -> str: """Convert a topic to a Rerun entity path.""" @@ -137,12 +169,12 @@ def _get_entity_path(self, topic: Any) -> str: topic_str = getattr(topic, "name", None) or str(topic) # Strip everything after # (LCM topic suffix) topic_str = topic_str.split("#")[0] - return f"{self.config.entity_prefix}/{topic_str}" + return f"{self.config.entity_prefix}{topic_str}" def _on_message(self, msg: Any, topic: Any) -> None: + """Handle incoming message - log to rerun.""" import rerun as rr - """Handle incoming message - log to rerun.""" # convert a potentially complex topic object into an str rerun entity path entity_path: str = self._get_entity_path(topic) @@ -153,12 +185,12 @@ def _on_message(self, msg: Any, topic: Any) -> None: if not rerun_data: return - # TFMessage returns list of (entity_path, transform) tuples - if isinstance(rerun_data, list): - for entity_path, archetype in rerun_data: - rr.log(entity_path, archetype) + # TFMessage for example returns list of (entity_path, archetype) tuples + if is_rerun_multi(rerun_data): + for path, archetype in rerun_data: + rr.log(path, archetype) else: - rr.log(entity_path, rerun_data) + rr.log(entity_path, cast("Archetype", rerun_data)) @rpc def start(self) -> None: @@ -219,7 +251,14 @@ def main() -> None: pubsubs=[LCM(autoconf=True)], # defines transforms for rerun entity paths transforms={ - Glob("debug_navigation"): lambda image: image.to_rerun(mode="floor"), + Glob("world/debug_navigation"): lambda grid: grid.to_rerun( + mode="mesh", + colormap="Purples", + z_offset=0.015, + brightness=0.5, + cost_range=[0, 20], + free_color="#484981", + ), }, ) From 433c7862f22f4ae2a7b20a2ed20eef1d8bfe7b8e Mon Sep 17 00:00:00 2001 From: Ivan Nikolic Date: Sun, 1 Feb 2026 09:37:23 +0800 Subject: [PATCH 13/36] image pickle fix --- dimos/msgs/sensor_msgs/Image.py | 697 ++++++++++-------- .../sensor_msgs/image_impls/AbstractImage.py | 264 +------ 2 files changed, 382 insertions(+), 579 deletions(-) diff --git a/dimos/msgs/sensor_msgs/Image.py b/dimos/msgs/sensor_msgs/Image.py index a9c842f1a..0a7a7e0d8 100644 --- a/dimos/msgs/sensor_msgs/Image.py +++ b/dimos/msgs/sensor_msgs/Image.py @@ -15,6 +15,8 @@ from __future__ import annotations import base64 +from dataclasses import dataclass, field +from enum import Enum import time from typing import TYPE_CHECKING, Any, Literal, TypedDict @@ -24,16 +26,9 @@ import numpy as np import reactivex as rx from reactivex import operators as ops +import rerun as rr from turbojpeg import TurboJPEG # type: ignore[import-untyped] -from dimos.msgs.sensor_msgs.image_impls.AbstractImage import ( - HAS_CUDA, - HAS_NVIMGCODEC, - NVIMGCODEC_LAST_USED, - ImageFormat, -) -from dimos.msgs.sensor_msgs.image_impls.CudaImage import CudaImage -from dimos.msgs.sensor_msgs.image_impls.NumpyImage import NumpyImage from dimos.types.timestamped import Timestamped, TimestampedBufferCollection, to_human_readable from dimos.utils.reactive import quality_barrier @@ -42,21 +37,46 @@ from reactivex.observable import Observable - from dimos.msgs.sensor_msgs.image_impls.AbstractImage import ( - AbstractImage, - ) - -try: - import cupy as cp # type: ignore[import-not-found, import-untyped] -except Exception: - cp = None - try: from sensor_msgs.msg import Image as ROSImage # type: ignore[attr-defined] except ImportError: ROSImage = None # type: ignore[assignment, misc] +class ImageFormat(Enum): + BGR = "BGR" + RGB = "RGB" + RGBA = "RGBA" + BGRA = "BGRA" + GRAY = "GRAY" + GRAY16 = "GRAY16" + DEPTH = "DEPTH" + DEPTH16 = "DEPTH16" + + +def _format_to_rerun(data: np.ndarray, fmt: ImageFormat) -> Any: # type: ignore[type-arg] + """Convert image data to Rerun archetype based on format.""" + match fmt: + case ImageFormat.RGB: + return rr.Image(data, color_model="RGB") + case ImageFormat.RGBA: + return rr.Image(data, color_model="RGBA") + case ImageFormat.BGR: + return rr.Image(data, color_model="BGR") + case ImageFormat.BGRA: + return rr.Image(data, color_model="BGRA") + case ImageFormat.GRAY: + return rr.Image(data, color_model="L") + case ImageFormat.GRAY16: + return rr.Image(data, color_model="L") + case ImageFormat.DEPTH: + return rr.DepthImage(data) + case ImageFormat.DEPTH16: + return rr.DepthImage(data) + case _: + raise ValueError(f"Unsupported format for Rerun: {fmt}") + + class AgentImageMessage(TypedDict): """Type definition for agent-compatible image representation.""" @@ -66,108 +86,104 @@ class AgentImageMessage(TypedDict): data: str # Base64 encoded image data +@dataclass class Image(Timestamped): + """Simple NumPy-based image container.""" + msg_name = "sensor_msgs.Image" - def __init__( # type: ignore[no-untyped-def] - self, - impl: AbstractImage | None = None, - *, - data=None, - format: ImageFormat | None = None, - frame_id: str | None = None, - ts: float | None = None, - ) -> None: - """Construct an Image facade. + data: np.ndarray[Any, np.dtype[Any]] = field( + default_factory=lambda: np.zeros((1, 1, 3), dtype=np.uint8) + ) + format: ImageFormat = field(default=ImageFormat.BGR) + frame_id: str = field(default="") + ts: float = field(default_factory=time.time) - Usage: - - Image(impl=) - - Image(data=, format=ImageFormat.RGB, frame_id=str, ts=float) + def __post_init__(self) -> None: + if not isinstance(self.data, np.ndarray): + self.data = np.asarray(self.data) + if self.data.ndim < 2: + raise ValueError("Image requires a 2D/3D NumPy array") - Notes: - - When constructed from `data`, uses CudaImage if `data` is a CuPy array and CUDA is available; otherwise NumpyImage. - - `format` defaults to ImageFormat.RGB; `frame_id` defaults to ""; `ts` defaults to `time.time()`. - """ - # Disallow mixing impl with raw kwargs - if impl is not None and any(x is not None for x in (data, format, frame_id, ts)): - raise TypeError( - "Provide either 'impl' or ('data', 'format', 'frame_id', 'ts'), not both" - ) + def __str__(self) -> str: + return ( + f"Image(shape={self.shape}, format={self.format.value}, dtype={self.dtype}, " + f"ts={to_human_readable(self.ts)})" + ) - if impl is not None: - self._impl = impl - return + def __repr__(self) -> str: + return f"Image(shape={self.shape}, format={self.format.value}, dtype={self.dtype}, frame_id='{self.frame_id}', ts={self.ts})" - # Raw constructor path - if data is None: - raise TypeError("'data' is required when constructing Image without 'impl'") - fmt = format if format is not None else ImageFormat.BGR - fid = frame_id if frame_id is not None else "" - tstamp = ts if ts is not None else time.time() + def __eq__(self, other: object) -> bool: + if not isinstance(other, Image): + return False + return ( + np.array_equal(self.data, other.data) + and self.format == other.format + and self.frame_id == other.frame_id + and abs(self.ts - other.ts) < 1e-6 + ) - # Detect CuPy array without a hard dependency - is_cu = False - try: - import cupy as _cp + def __len__(self) -> int: + return int(self.height * self.width) - is_cu = isinstance(data, _cp.ndarray) - except Exception: - is_cu = False + def __getstate__(self) -> dict[str, Any]: + return {"data": self.data, "format": self.format, "frame_id": self.frame_id, "ts": self.ts} - if is_cu and HAS_CUDA: - self._impl = CudaImage(data, fmt, fid, tstamp) - else: - self._impl = NumpyImage(np.asarray(data), fmt, fid, tstamp) + def __setstate__(self, state: dict[str, Any]) -> None: + self.data = state.get("data", np.zeros((1, 1, 3), dtype=np.uint8)) + self.format = state.get("format", ImageFormat.BGR) + self.frame_id = state.get("frame_id", "") + self.ts = state.get("ts", time.time()) - def __str__(self) -> str: - dev = "cuda" if self.is_cuda else "cpu" - return ( - f"Image(shape={self.shape}, format={self.format.value}, dtype={self.dtype}, " - f"dev={dev}, ts={to_human_readable(self.ts)})" - ) + @property + def height(self) -> int: + return int(self.data.shape[0]) - @classmethod - def from_impl(cls, impl: AbstractImage) -> Image: - return cls(impl) + @property + def width(self) -> int: + return int(self.data.shape[1]) + + @property + def channels(self) -> int: + if self.data.ndim == 2: + return 1 + if self.data.ndim == 3: + return int(self.data.shape[2]) + raise ValueError("Invalid image dimensions") + + @property + def shape(self) -> tuple[int, ...]: + return tuple(self.data.shape) + + @property + def dtype(self) -> np.dtype[Any]: + return self.data.dtype + + def copy(self) -> Image: + return Image(data=self.data.copy(), format=self.format, frame_id=self.frame_id, ts=self.ts) @classmethod - def from_numpy( # type: ignore[no-untyped-def] + def from_numpy( cls, np_image: np.ndarray, # type: ignore[type-arg] format: ImageFormat = ImageFormat.BGR, - to_cuda: bool = False, - **kwargs, + frame_id: str = "", + ts: float | None = None, ) -> Image: - if kwargs.pop("to_gpu", False): - to_cuda = True - if to_cuda and HAS_CUDA: - return cls( - CudaImage( - np_image if hasattr(np_image, "shape") else np.asarray(np_image), - format, - kwargs.get("frame_id", ""), - kwargs.get("ts", time.time()), - ) - ) return cls( - NumpyImage( - np.asarray(np_image), - format, - kwargs.get("frame_id", ""), - kwargs.get("ts", time.time()), - ) + data=np.asarray(np_image), + format=format, + frame_id=frame_id, + ts=ts if ts is not None else time.time(), ) @classmethod - def from_file( # type: ignore[no-untyped-def] + def from_file( cls, filepath: str | os.PathLike[str], format: ImageFormat = ImageFormat.RGB, - to_cuda: bool = False, - **kwargs, ) -> Image: - if kwargs.pop("to_gpu", False): - to_cuda = True arr = cv2.imread(str(filepath), cv2.IMREAD_UNCHANGED) if arr is None: raise ValueError(f"Could not load image from {filepath}") @@ -179,156 +195,231 @@ def from_file( # type: ignore[no-untyped-def] detected = ImageFormat.BGRA # OpenCV default else: detected = format - return cls(CudaImage(arr, detected) if to_cuda and HAS_CUDA else NumpyImage(arr, detected)) + return cls(data=arr, format=detected) @classmethod - def from_opencv( # type: ignore[no-untyped-def] + def from_opencv( cls, cv_image: np.ndarray, # type: ignore[type-arg] format: ImageFormat = ImageFormat.BGR, - **kwargs, + frame_id: str = "", + ts: float | None = None, ) -> Image: """Construct from an OpenCV image (NumPy array).""" return cls( - NumpyImage(cv_image, format, kwargs.get("frame_id", ""), kwargs.get("ts", time.time())) - ) - - @classmethod - def from_depth( # type: ignore[no-untyped-def] - cls, depth_data, frame_id: str = "", ts: float | None = None, to_cuda: bool = False - ) -> Image: - arr = np.asarray(depth_data) - if arr.dtype != np.float32: - arr = arr.astype(np.float32) - impl = ( - CudaImage(arr, ImageFormat.DEPTH, frame_id, time.time() if ts is None else ts) - if to_cuda and HAS_CUDA - else NumpyImage(arr, ImageFormat.DEPTH, frame_id, time.time() if ts is None else ts) + data=cv_image, + format=format, + frame_id=frame_id, + ts=ts if ts is not None else time.time(), ) - return cls(impl) - - # Delegation - @property - def is_cuda(self) -> bool: - return self._impl.is_cuda - - @property - def data(self): # type: ignore[no-untyped-def] - return self._impl.data - - @data.setter - def data(self, value) -> None: # type: ignore[no-untyped-def] - # Preserve backend semantics: ensure array type matches implementation - if isinstance(self._impl, NumpyImage): - self._impl.data = np.asarray(value) - elif isinstance(self._impl, CudaImage): - if cp is None: - raise RuntimeError("CuPy not available to set CUDA image data") - self._impl.data = cp.asarray(value) - else: - self._impl.data = value - - @property - def format(self) -> ImageFormat: - return self._impl.format - - @format.setter - def format(self, value) -> None: # type: ignore[no-untyped-def] - if isinstance(value, ImageFormat): - self._impl.format = value - elif isinstance(value, str): - try: - self._impl.format = ImageFormat[value] - except KeyError as e: - raise ValueError(f"Invalid ImageFormat: {value}") from e - else: - raise TypeError("format must be ImageFormat or str name") - - @property - def frame_id(self) -> str: - return self._impl.frame_id - - @frame_id.setter - def frame_id(self, value: str) -> None: - self._impl.frame_id = str(value) - - @property - def ts(self) -> float: - return self._impl.ts - - @ts.setter - def ts(self, value: float) -> None: - self._impl.ts = float(value) - - @property - def height(self) -> int: - return self._impl.height - - @property - def width(self) -> int: - return self._impl.width - - @property - def channels(self) -> int: - return self._impl.channels - - @property - def shape(self): # type: ignore[no-untyped-def] - return self._impl.shape - @property - def dtype(self): # type: ignore[no-untyped-def] - return self._impl.dtype + def to_opencv(self) -> np.ndarray: # type: ignore[type-arg] + """Convert to OpenCV BGR format.""" + arr = self.data + if self.format == ImageFormat.BGR: + return arr + if self.format == ImageFormat.RGB: + return cv2.cvtColor(arr, cv2.COLOR_RGB2BGR) + if self.format == ImageFormat.RGBA: + return cv2.cvtColor(arr, cv2.COLOR_RGBA2BGR) + if self.format == ImageFormat.BGRA: + return cv2.cvtColor(arr, cv2.COLOR_BGRA2BGR) + if self.format in ( + ImageFormat.GRAY, + ImageFormat.GRAY16, + ImageFormat.DEPTH, + ImageFormat.DEPTH16, + ): + return arr + raise ValueError(f"Unsupported format: {self.format}") - def copy(self) -> Image: - return Image(self._impl.copy()) + def as_numpy(self) -> np.ndarray: # type: ignore[type-arg] + """Get image data as numpy array.""" + return self.data - def to_cpu(self) -> Image: - if isinstance(self._impl, NumpyImage): + def to_rgb(self) -> Image: + if self.format == ImageFormat.RGB: return self.copy() + arr = self.data + if self.format == ImageFormat.BGR: + return Image( + data=cv2.cvtColor(arr, cv2.COLOR_BGR2RGB), + format=ImageFormat.RGB, + frame_id=self.frame_id, + ts=self.ts, + ) + if self.format == ImageFormat.RGBA: + return self.copy() # RGBA contains RGB + alpha + if self.format == ImageFormat.BGRA: + rgba = cv2.cvtColor(arr, cv2.COLOR_BGRA2RGBA) + return Image(data=rgba, format=ImageFormat.RGBA, frame_id=self.frame_id, ts=self.ts) + if self.format in (ImageFormat.GRAY, ImageFormat.GRAY16, ImageFormat.DEPTH16): + gray8 = (arr / 256).astype(np.uint8) if self.format != ImageFormat.GRAY else arr + rgb = cv2.cvtColor(gray8, cv2.COLOR_GRAY2RGB) + return Image(data=rgb, format=ImageFormat.RGB, frame_id=self.frame_id, ts=self.ts) + return self.copy() - data = self._impl.data.get() # CuPy array to NumPy - - return Image( - NumpyImage( - data, - self._impl.format, - self._impl.frame_id, - self._impl.ts, + def to_bgr(self) -> Image: + if self.format == ImageFormat.BGR: + return self.copy() + arr = self.data + if self.format == ImageFormat.RGB: + return Image( + data=cv2.cvtColor(arr, cv2.COLOR_RGB2BGR), + format=ImageFormat.BGR, + frame_id=self.frame_id, + ts=self.ts, ) - ) + if self.format == ImageFormat.RGBA: + return Image( + data=cv2.cvtColor(arr, cv2.COLOR_RGBA2BGR), + format=ImageFormat.BGR, + frame_id=self.frame_id, + ts=self.ts, + ) + if self.format == ImageFormat.BGRA: + return Image( + data=cv2.cvtColor(arr, cv2.COLOR_BGRA2BGR), + format=ImageFormat.BGR, + frame_id=self.frame_id, + ts=self.ts, + ) + if self.format in (ImageFormat.GRAY, ImageFormat.GRAY16, ImageFormat.DEPTH16): + gray8 = (arr / 256).astype(np.uint8) if self.format != ImageFormat.GRAY else arr + return Image( + data=cv2.cvtColor(gray8, cv2.COLOR_GRAY2BGR), + format=ImageFormat.BGR, + frame_id=self.frame_id, + ts=self.ts, + ) + return self.copy() - def to_cupy(self) -> Image: - if isinstance(self._impl, CudaImage): + def to_grayscale(self) -> Image: + if self.format in (ImageFormat.GRAY, ImageFormat.GRAY16, ImageFormat.DEPTH): return self.copy() - return Image( - CudaImage( - np.asarray(self._impl.data), self._impl.format, self._impl.frame_id, self._impl.ts + if self.format == ImageFormat.BGR: + return Image( + data=cv2.cvtColor(self.data, cv2.COLOR_BGR2GRAY), + format=ImageFormat.GRAY, + frame_id=self.frame_id, + ts=self.ts, ) - ) + if self.format == ImageFormat.RGB: + return Image( + data=cv2.cvtColor(self.data, cv2.COLOR_RGB2GRAY), + format=ImageFormat.GRAY, + frame_id=self.frame_id, + ts=self.ts, + ) + if self.format in (ImageFormat.RGBA, ImageFormat.BGRA): + code = cv2.COLOR_RGBA2GRAY if self.format == ImageFormat.RGBA else cv2.COLOR_BGRA2GRAY + return Image( + data=cv2.cvtColor(self.data, code), + format=ImageFormat.GRAY, + frame_id=self.frame_id, + ts=self.ts, + ) + raise ValueError(f"Unsupported format: {self.format}") - def to_opencv(self) -> np.ndarray: # type: ignore[type-arg] - return self._impl.to_opencv() + def to_rerun( + self, + mode: str = "image", + origin: tuple[float, float] = (-17.05, 4.65), # TODO: hardcoded for now + resolution: float = 0.1 / 16, + z_offset: float = 0.012, + ) -> Any: + """Convert to rerun visualization format. - def as_numpy(self) -> np.ndarray: # type: ignore[type-arg] - """Get image data as numpy array in RGB format.""" - return np.asarray(self.data) + Args: + mode: Visualization mode: + - "image": 2D image (default) + - "mesh": 3D textured plane on floor + origin: World (x, y) position for mesh mode (default (0, 0)) + resolution: Meters per pixel for mesh mode (default 0.05) + z_offset: Height offset for mesh mode (default 0.01) - def to_rgb(self) -> Image: - return Image(self._impl.to_rgb()) + Returns: + Rerun archetype (rr.Image or rr.Mesh3D) + """ + if mode == "image": + return _format_to_rerun(self.data, self.format) + elif mode == "mesh": + return self._to_rerun_mesh(origin, resolution, z_offset) + else: + raise ValueError(f"Unsupported mode: {mode}") - def to_bgr(self) -> Image: - return Image(self._impl.to_bgr()) + def _to_rerun_mesh( + self, + origin: tuple[float, float], + resolution: float, + z_offset: float, + ) -> Any: + """Convert to 3D textured mesh overlay on floor plane.""" + h, w = self.data.shape[:2] + if h == 0 or w == 0: + return rr.Mesh3D(vertex_positions=[]) + + # Convert to RGBA for texture + if self.format == ImageFormat.RGBA: + rgba = self.data + elif self.format == ImageFormat.BGRA: + rgba = cv2.cvtColor(self.data, cv2.COLOR_BGRA2RGBA) + elif self.format == ImageFormat.RGB: + rgba = cv2.cvtColor(self.data, cv2.COLOR_RGB2RGBA) + elif self.format == ImageFormat.BGR: + rgba = cv2.cvtColor(self.data, cv2.COLOR_BGR2RGBA) + elif self.format in (ImageFormat.GRAY, ImageFormat.GRAY16): + rgba = cv2.cvtColor(self.data, cv2.COLOR_GRAY2RGBA) + else: + # For depth images, normalize and convert to RGBA + normalized = np.zeros_like(self.data, dtype=np.uint8) + cv2.normalize(self.data, normalized, 0, 255, cv2.NORM_MINMAX) + rgba = cv2.cvtColor(normalized, cv2.COLOR_GRAY2RGBA) + + # Note: Unlike OccupancyGrid (where row 0 = world bottom), Image row 0 is + # visually the top. UV coords map this correctly without flipping. + + # Single quad covering image extent + ox, oy = origin + world_w = w * resolution + world_h = h * resolution + + vertices = np.array( + [ + [ox, oy, z_offset], + [ox + world_w, oy, z_offset], + [ox + world_w, oy + world_h, z_offset], + [ox, oy + world_h, z_offset], + ], + dtype=np.float32, + ) - def to_grayscale(self) -> Image: - return Image(self._impl.to_grayscale()) + indices = np.array([[0, 1, 2], [0, 2, 3]], dtype=np.uint32) - def to_rerun(self) -> Any: - """Convert to rerun Image format.""" - return self._impl.to_rerun() + texcoords = np.array( + [ + [0.0, 1.0], + [1.0, 1.0], + [1.0, 0.0], + [0.0, 0.0], + ], + dtype=np.float32, + ) + + return rr.Mesh3D( + vertex_positions=vertices, + triangle_indices=indices, + vertex_texcoords=texcoords, + albedo_texture=rgba, + ) def resize(self, width: int, height: int, interpolation: int = cv2.INTER_LINEAR) -> Image: - return Image(self._impl.resize(width, height, interpolation)) + return Image( + data=cv2.resize(self.data, (width, height), interpolation=interpolation), + format=self.format, + frame_id=self.frame_id, + ts=self.ts, + ) def resize_to_fit( self, max_width: int, max_height: int, interpolation: int = cv2.INTER_LINEAR @@ -349,29 +440,48 @@ def resize_to_fit( return self.resize(new_width, new_height, interpolation), scale def crop(self, x: int, y: int, width: int, height: int) -> Image: - return Image(self._impl.crop(x, y, width, height)) # type: ignore[attr-defined] + """Crop the image to the specified region. - @property - def sharpness(self) -> float: - """Return sharpness score.""" - return self._impl.sharpness() + Args: + x: Starting x coordinate (left edge) + y: Starting y coordinate (top edge) + width: Width of the cropped region + height: Height of the cropped region - def to_depth_meters(self) -> Image: - """Return a depth image normalized to meters as float32.""" - depth_cv = self.to_opencv() - fmt = self.format + Returns: + A new Image containing the cropped region + """ + img_height, img_width = self.data.shape[:2] + + # Clamp the crop region to image bounds + x = max(0, min(x, img_width)) + y = max(0, min(y, img_height)) + x_end = min(x + width, img_width) + y_end = min(y + height, img_height) + + # Perform the crop using array slicing + if self.data.ndim == 2: + cropped_data = self.data[y:y_end, x:x_end] + else: + cropped_data = self.data[y:y_end, x:x_end, :] - if fmt == ImageFormat.DEPTH16: - depth_cv = depth_cv.astype(np.float32) / 1000.0 - fmt = ImageFormat.DEPTH - elif depth_cv.dtype != np.float32: - depth_cv = depth_cv.astype(np.float32) - fmt = ImageFormat.DEPTH if fmt == ImageFormat.DEPTH else fmt + return Image(data=cropped_data, format=self.format, frame_id=self.frame_id, ts=self.ts) - return Image.from_numpy(depth_cv, format=fmt, frame_id=self.frame_id, ts=self.ts) + @property + def sharpness(self) -> float: + """Return sharpness score.""" + gray = self.to_grayscale() + sx = cv2.Sobel(gray.data, cv2.CV_32F, 1, 0, ksize=5) + sy = cv2.Sobel(gray.data, cv2.CV_32F, 0, 1, ksize=5) + magnitude = cv2.magnitude(sx, sy) + mean_mag = float(magnitude.mean()) + if mean_mag <= 0: + return 0.0 + return float(np.clip((np.log10(mean_mag + 1) - 1.7) / 2.0, 0.0, 1.0)) def save(self, filepath: str) -> bool: - return self._impl.save(filepath) + arr = self.to_opencv() + return cv2.imwrite(filepath, arr) def to_base64( self, @@ -455,27 +565,25 @@ def lcm_encode(self, frame_id: str | None = None) -> bytes: return msg.lcm_encode() # type: ignore[no-any-return] @classmethod - def lcm_decode(cls, data: bytes, **kwargs) -> Image: # type: ignore[no-untyped-def] + def lcm_decode(cls, data: bytes, **kwargs: Any) -> Image: msg = LCMImage.lcm_decode(data) fmt, dtype, channels = _parse_lcm_encoding(msg.encoding) - arr = np.frombuffer(msg.data, dtype=dtype) + arr: np.ndarray[Any, Any] = np.frombuffer(msg.data, dtype=dtype) if channels == 1: arr = arr.reshape((msg.height, msg.width)) else: arr = arr.reshape((msg.height, msg.width, channels)) return cls( - NumpyImage( - arr, - fmt, - msg.header.frame_id if hasattr(msg, "header") else "", - ( - msg.header.stamp.sec + msg.header.stamp.nsec / 1e9 - if hasattr(msg, "header") - and hasattr(msg.header, "stamp") - and msg.header.stamp.sec > 0 - else time.time() - ), - ) + data=arr, + format=fmt, + frame_id=msg.header.frame_id if hasattr(msg, "header") else "", + ts=( + msg.header.stamp.sec + msg.header.stamp.nsec / 1e9 + if hasattr(msg, "header") + and hasattr(msg.header, "stamp") + and msg.header.stamp.sec > 0 + else time.time() + ), ) def lcm_jpeg_encode(self, quality: int = 75, frame_id: str | None = None) -> bytes: @@ -524,7 +632,7 @@ def lcm_jpeg_encode(self, quality: int = 75, frame_id: str | None = None) -> byt return msg.lcm_encode() # type: ignore[no-any-return] @classmethod - def lcm_jpeg_decode(cls, data: bytes, **kwargs) -> Image: # type: ignore[no-untyped-def] + def lcm_jpeg_decode(cls, data: bytes, **kwargs: Any) -> Image: """Decode an LCM Image message with JPEG-compressed data. Args: @@ -543,36 +651,18 @@ def lcm_jpeg_decode(cls, data: bytes, **kwargs) -> Image: # type: ignore[no-unt bgr_array = jpeg.decode(msg.data) return cls( - NumpyImage( - bgr_array, - ImageFormat.BGR, - msg.header.frame_id if hasattr(msg, "header") else "", - ( - msg.header.stamp.sec + msg.header.stamp.nsec / 1e9 - if hasattr(msg, "header") - and hasattr(msg.header, "stamp") - and msg.header.stamp.sec > 0 - else time.time() - ), - ) + data=bgr_array, + format=ImageFormat.BGR, + frame_id=msg.header.frame_id if hasattr(msg, "header") else "", + ts=( + msg.header.stamp.sec + msg.header.stamp.nsec / 1e9 + if hasattr(msg, "header") + and hasattr(msg.header, "stamp") + and msg.header.stamp.sec > 0 + else time.time() + ), ) - # PnP wrappers - def solve_pnp(self, *args, **kwargs): # type: ignore[no-untyped-def] - return self._impl.solve_pnp(*args, **kwargs) # type: ignore[attr-defined] - - def solve_pnp_ransac(self, *args, **kwargs): # type: ignore[no-untyped-def] - return self._impl.solve_pnp_ransac(*args, **kwargs) # type: ignore[attr-defined] - - def solve_pnp_batch(self, *args, **kwargs): # type: ignore[no-untyped-def] - return self._impl.solve_pnp_batch(*args, **kwargs) # type: ignore[attr-defined] - - def create_csrt_tracker(self, *args, **kwargs): # type: ignore[no-untyped-def] - return self._impl.create_csrt_tracker(*args, **kwargs) # type: ignore[attr-defined] - - def csrt_update(self, *args, **kwargs): # type: ignore[no-untyped-def] - return self._impl.csrt_update(*args, **kwargs) # type: ignore[attr-defined] - @classmethod def from_ros_msg(cls, ros_msg: ROSImage) -> Image: """Create an Image from a ROS sensor_msgs/Image message. @@ -635,7 +725,7 @@ def from_ros_msg(cls, ros_msg: ROSImage) -> Image: ) @staticmethod - def _parse_encoding(encoding: str) -> dict: # type: ignore[type-arg] + def _parse_encoding(encoding: str) -> dict[str, Any]: """Translate ROS encoding strings into format metadata.""" encoding_map = { "mono8": {"format": ImageFormat.GRAY, "dtype": np.uint8, "channels": 1}, @@ -658,44 +748,9 @@ def _parse_encoding(encoding: str) -> dict: # type: ignore[type-arg] raise ValueError(f"Unsupported encoding: {encoding}") - def __repr__(self) -> str: - dev = "cuda" if self.is_cuda else "cpu" - return f"Image(shape={self.shape}, format={self.format.value}, dtype={self.dtype}, dev={dev}, frame_id='{self.frame_id}', ts={self.ts})" - - def __eq__(self, other) -> bool: # type: ignore[no-untyped-def] - if not isinstance(other, Image): - return False - return ( - np.array_equal(self.data, other.data) - and self.format == other.format - and self.frame_id == other.frame_id - and abs(self.ts - other.ts) < 1e-6 - ) - def __len__(self) -> int: - return int(self.height * self.width) - - def __getstate__(self): # type: ignore[no-untyped-def] - return {"data": self.data, "format": self.format, "frame_id": self.frame_id, "ts": self.ts} - - def __setstate__(self, state) -> None: # type: ignore[no-untyped-def] - self.__init__( # type: ignore[misc] - data=state.get("data"), - format=state.get("format"), - frame_id=state.get("frame_id"), - ts=state.get("ts"), - ) - - -# Re-exports for tests -HAS_CUDA = HAS_CUDA -ImageFormat = ImageFormat -NVIMGCODEC_LAST_USED = NVIMGCODEC_LAST_USED -HAS_NVIMGCODEC = HAS_NVIMGCODEC __all__ = [ - "HAS_CUDA", - "HAS_NVIMGCODEC", - "NVIMGCODEC_LAST_USED", + "Image", "ImageFormat", "sharpness_barrier", "sharpness_window", @@ -704,27 +759,29 @@ def __setstate__(self, state) -> None: # type: ignore[no-untyped-def] def sharpness_window(target_frequency: float, source: Observable[Image]) -> Observable[Image]: """Emit the sharpest Image seen within each sliding time window.""" + from reactivex.scheduler import ThreadPoolScheduler + if target_frequency <= 0: raise ValueError("target_frequency must be positive") window = TimestampedBufferCollection(1.0 / target_frequency) # type: ignore[var-annotated] source.subscribe(window.add) - thread_scheduler = ThreadPoolScheduler(max_workers=1) # type: ignore[name-defined] + thread_scheduler = ThreadPoolScheduler(max_workers=1) - def find_best(*_args): # type: ignore[no-untyped-def] + def find_best(*_args: Any) -> Image | None: if not window._items: return None - return max(window._items, key=lambda img: img.sharpness) + return max(window._items, key=lambda img: img.sharpness) # type: ignore[no-any-return] - return rx.interval(1.0 / target_frequency).pipe( + return rx.interval(1.0 / target_frequency).pipe( # type: ignore[misc] ops.observe_on(thread_scheduler), ops.map(find_best), ops.filter(lambda img: img is not None), ) -def sharpness_barrier(target_frequency: float): # type: ignore[no-untyped-def] +def sharpness_barrier(target_frequency: float) -> Any: """Select the sharpest Image within each time window.""" if target_frequency <= 0: raise ValueError("target_frequency must be positive") @@ -760,7 +817,7 @@ def _get_lcm_encoding(fmt: ImageFormat, dtype: np.dtype) -> str: # type: ignore raise ValueError(f"Unsupported LCM encoding for fmt={fmt}, dtype={dtype}") -def _parse_lcm_encoding(enc: str): # type: ignore[no-untyped-def] +def _parse_lcm_encoding(enc: str) -> tuple[ImageFormat, type, int]: m = { "mono8": (ImageFormat.GRAY, np.uint8, 1), "mono16": (ImageFormat.GRAY16, np.uint16, 1), diff --git a/dimos/msgs/sensor_msgs/image_impls/AbstractImage.py b/dimos/msgs/sensor_msgs/image_impls/AbstractImage.py index 9cff0e8bd..80a1bb7ce 100644 --- a/dimos/msgs/sensor_msgs/image_impls/AbstractImage.py +++ b/dimos/msgs/sensor_msgs/image_impls/AbstractImage.py @@ -1,4 +1,4 @@ -# Copyright 2025-2026 Dimensional Inc. +# Copyright 2026 Dimensional Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,262 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import annotations +# Backwards compatibility stub for unpickling old data. +# AbstractImage and ImageFormat were moved to Image. +from dimos.msgs.sensor_msgs.Image import Image as AbstractImage, ImageFormat -from abc import ABC, abstractmethod -import base64 -from enum import Enum -import os -from typing import Any - -import cv2 -import numpy as np -import rerun as rr - -try: - import cupy as cp # type: ignore[import-not-found, import-untyped] - - HAS_CUDA = True -except Exception: # pragma: no cover - optional dependency - cp = None - HAS_CUDA = False - -# NVRTC defaults to C++11; libcu++ in recent CUDA requires at least C++17. -if HAS_CUDA: - try: - import cupy.cuda.compiler as _cupy_compiler # type: ignore[import-not-found, import-untyped] - - if not getattr(_cupy_compiler, "_dimos_force_cxx17", False): - _orig_compile_using_nvrtc = _cupy_compiler.compile_using_nvrtc - - def _compile_using_nvrtc( # type: ignore[no-untyped-def] - source, options=(), *args, **kwargs - ): - filtered = tuple( - opt - for opt in options - if opt not in ("-std=c++11", "--std=c++11", "-std=c++14", "--std=c++14") - ) - if "--std=c++17" not in filtered and "-std=c++17" not in filtered: - filtered = (*filtered, "--std=c++17") - return _orig_compile_using_nvrtc(source, filtered, *args, **kwargs) - - _cupy_compiler.compile_using_nvrtc = _compile_using_nvrtc - _cupy_compiler._dimos_force_cxx17 = True - except Exception: - pass - -# Optional nvImageCodec (preferred GPU codec) -USE_NVIMGCODEC = os.environ.get("USE_NVIMGCODEC", "0") == "1" -NVIMGCODEC_LAST_USED = False -try: # pragma: no cover - optional dependency - if HAS_CUDA and USE_NVIMGCODEC: - from nvidia import nvimgcodec # type: ignore[import-untyped] - - try: - _enc_probe = nvimgcodec.Encoder() - HAS_NVIMGCODEC = True - except Exception: - nvimgcodec = None - HAS_NVIMGCODEC = False - else: - nvimgcodec = None - HAS_NVIMGCODEC = False -except Exception: # pragma: no cover - optional dependency - nvimgcodec = None - HAS_NVIMGCODEC = False - - -class ImageFormat(Enum): - BGR = "BGR" - RGB = "RGB" - RGBA = "RGBA" - BGRA = "BGRA" - GRAY = "GRAY" - GRAY16 = "GRAY16" - DEPTH = "DEPTH" - DEPTH16 = "DEPTH16" - - -def _is_cu(x) -> bool: # type: ignore[no-untyped-def] - return HAS_CUDA and cp is not None and isinstance(x, cp.ndarray) - - -def _ascontig(x): # type: ignore[no-untyped-def] - if _is_cu(x): - return x if x.flags["C_CONTIGUOUS"] else cp.ascontiguousarray(x) - return x if x.flags["C_CONTIGUOUS"] else np.ascontiguousarray(x) - - -def _to_cpu(x): # type: ignore[no-untyped-def] - return cp.asnumpy(x) if _is_cu(x) else x - - -def _to_cu(x): # type: ignore[no-untyped-def] - if HAS_CUDA and cp is not None and isinstance(x, np.ndarray): - return cp.asarray(x) - return x - - -def _encode_nvimgcodec_cuda(bgr_cu, quality: int = 80) -> bytes: # type: ignore[no-untyped-def] # pragma: no cover - optional - if not HAS_NVIMGCODEC or nvimgcodec is None: - raise RuntimeError("nvimgcodec not available") - if bgr_cu.ndim != 3 or bgr_cu.shape[2] != 3: - raise RuntimeError("nvimgcodec expects HxWx3 image") - if bgr_cu.dtype != cp.uint8: - raise RuntimeError("nvimgcodec requires uint8 input") - if not bgr_cu.flags["C_CONTIGUOUS"]: - bgr_cu = cp.ascontiguousarray(bgr_cu) - encoder = nvimgcodec.Encoder() - try: - img = nvimgcodec.Image(bgr_cu, nvimgcodec.PixelFormat.BGR) - except Exception: - img = nvimgcodec.Image(cp.asnumpy(bgr_cu), nvimgcodec.PixelFormat.BGR) - if hasattr(nvimgcodec, "EncodeParams"): - params = nvimgcodec.EncodeParams(quality=quality) - bitstreams = encoder.encode([img], [params]) - else: - bitstreams = encoder.encode([img]) - bs0 = bitstreams[0] - if hasattr(bs0, "buf"): - return bytes(bs0.buf) - return bytes(bs0) - - -def format_to_rerun(data, fmt: ImageFormat): # type: ignore[no-untyped-def] - """Convert image data to Rerun archetype based on format. - - Args: - data: Image data (numpy array or cupy array on CPU) - fmt: ImageFormat enum value - - Returns: - Rerun archetype (rr.Image or rr.DepthImage) - """ - match fmt: - case ImageFormat.RGB: - return rr.Image(data, color_model="RGB") - case ImageFormat.RGBA: - return rr.Image(data, color_model="RGBA") - case ImageFormat.BGR: - return rr.Image(data, color_model="BGR") - case ImageFormat.BGRA: - return rr.Image(data, color_model="BGRA") - case ImageFormat.GRAY: - return rr.Image(data, color_model="L") - case ImageFormat.GRAY16: - return rr.Image(data, color_model="L") - case ImageFormat.DEPTH: - return rr.DepthImage(data) - case ImageFormat.DEPTH16: - return rr.DepthImage(data) - case _: - raise ValueError(f"Unsupported format for Rerun: {fmt}") - - -class AbstractImage(ABC): - data: Any - format: ImageFormat - frame_id: str - ts: float - - @property - @abstractmethod - def is_cuda(self) -> bool: # pragma: no cover - abstract - ... - - @property - def height(self) -> int: - return int(self.data.shape[0]) - - @property - def width(self) -> int: - return int(self.data.shape[1]) - - @property - def channels(self) -> int: - if getattr(self.data, "ndim", 0) == 2: - return 1 - if getattr(self.data, "ndim", 0) == 3: - return int(self.data.shape[2]) - raise ValueError("Invalid image dimensions") - - @property - def shape(self): # type: ignore[no-untyped-def] - return tuple(self.data.shape) - - @property - def dtype(self): # type: ignore[no-untyped-def] - return self.data.dtype - - @abstractmethod - def to_opencv(self) -> np.ndarray: # type: ignore[type-arg] # pragma: no cover - abstract - ... - - @abstractmethod - def to_rgb(self) -> AbstractImage: # pragma: no cover - abstract - ... - - @abstractmethod - def to_bgr(self) -> AbstractImage: # pragma: no cover - abstract - ... - - @abstractmethod - def to_grayscale(self) -> AbstractImage: # pragma: no cover - abstract - ... - - @abstractmethod - def resize( - self, width: int, height: int, interpolation: int = cv2.INTER_LINEAR - ) -> AbstractImage: # pragma: no cover - abstract - ... - - @abstractmethod - def to_rerun(self) -> Any: # pragma: no cover - abstract - ... - - @abstractmethod - def sharpness(self) -> float: # pragma: no cover - abstract - ... - - def copy(self) -> AbstractImage: - return self.__class__( # type: ignore[call-arg] - data=self.data.copy(), format=self.format, frame_id=self.frame_id, ts=self.ts - ) - - def save(self, filepath: str) -> bool: - global NVIMGCODEC_LAST_USED - if self.is_cuda and HAS_NVIMGCODEC and nvimgcodec is not None: - try: - bgr = self.to_bgr() - if _is_cu(bgr.data): - jpeg = _encode_nvimgcodec_cuda(bgr.data) - NVIMGCODEC_LAST_USED = True - with open(filepath, "wb") as f: - f.write(jpeg) - return True - except Exception: - NVIMGCODEC_LAST_USED = False - arr = self.to_opencv() - return cv2.imwrite(filepath, arr) - - def to_base64(self, quality: int = 80) -> str: - global NVIMGCODEC_LAST_USED - if self.is_cuda and HAS_NVIMGCODEC and nvimgcodec is not None: - try: - bgr = self.to_bgr() - if _is_cu(bgr.data): - jpeg = _encode_nvimgcodec_cuda(bgr.data, quality=quality) - NVIMGCODEC_LAST_USED = True - return base64.b64encode(jpeg).decode("utf-8") - except Exception: - NVIMGCODEC_LAST_USED = False - bgr = self.to_bgr() - success, buffer = cv2.imencode( - ".jpg", - _to_cpu(bgr.data), # type: ignore[no-untyped-call] - [int(cv2.IMWRITE_JPEG_QUALITY), int(quality)], - ) - if not success: - raise ValueError("Failed to encode image as JPEG") - return base64.b64encode(buffer.tobytes()).decode("utf-8") +__all__ = ["AbstractImage", "ImageFormat"] From d9a841c4c0ef34a7522b19c81a34286c3c36b244 Mon Sep 17 00:00:00 2001 From: Ivan Nikolic Date: Sun, 1 Feb 2026 10:01:53 +0800 Subject: [PATCH 14/36] paul good vis --- dimos/visualization/rerun/bridge.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/dimos/visualization/rerun/bridge.py b/dimos/visualization/rerun/bridge.py index c1fd32bd3..8252d370b 100644 --- a/dimos/visualization/rerun/bridge.py +++ b/dimos/visualization/rerun/bridge.py @@ -75,7 +75,6 @@ def to_rerun(self) -> RerunData: ... ViewerMode = Literal["native", "web", "none"] - # Notes on this system # # In the future it would be nice if modules can annotate their individual OUTs with (general or rerun specific) @@ -252,12 +251,10 @@ def main() -> None: # defines transforms for rerun entity paths transforms={ Glob("world/debug_navigation"): lambda grid: grid.to_rerun( - mode="mesh", colormap="Purples", z_offset=0.015, - brightness=0.5, - cost_range=[0, 20], - free_color="#484981", + brightness=0.75, + cost_range=[0, 40], ), }, ) From ed0b926c4cfe61e415c9da8a0961444035a587ef Mon Sep 17 00:00:00 2001 From: Ivan Nikolic Date: Sun, 1 Feb 2026 10:41:46 +0800 Subject: [PATCH 15/36] renamed visual transforms to converters --- dimos/visualization/rerun/bridge.py | 35 ++++++++++++++--------------- 1 file changed, 17 insertions(+), 18 deletions(-) diff --git a/dimos/visualization/rerun/bridge.py b/dimos/visualization/rerun/bridge.py index 8252d370b..66338e812 100644 --- a/dimos/visualization/rerun/bridge.py +++ b/dimos/visualization/rerun/bridge.py @@ -88,7 +88,7 @@ def to_rerun(self) -> RerunData: ... # automatically spy an all the transports and read visualization hints # # this is the correct implementation. -# temporarily we are using these "sideloading" transforms={} to define custom to_rerun methods for specific topics +# temporarily we are using these "sideloading" converters={} to define custom to_rerun methods for specific topics # as well as pubsubs to specify which protocols to listen to. @@ -100,7 +100,7 @@ class Config(ModuleConfig): default_factory=lambda: [LCM(autoconf=True)] ) - transforms: dict[Glob | str, Callable[[Any], Archetype]] = field(default_factory=dict) + converters: dict[Glob | str, Callable[[Any], Archetype]] = field(default_factory=dict) entity_prefix: str = "world" topic_to_entity: Callable[[Any], str] | None = None @@ -131,23 +131,23 @@ def __init__(self, **kwargs: Any) -> None: super().__init__(**kwargs) @lru_cache(maxsize=256) - def _transform_for_entity_path(self, entity_path: str) -> Callable[[Any], RerunData | None]: - """Return a composed transform for the entity path. + def _converter_for_entity_path(self, entity_path: str) -> Callable[[Any], RerunData | None]: + """Return a composed converter for the entity path. - Chains matching transforms from config, ending with obligatory_transform + Chains matching converters from config, ending with final_convert which handles .to_rerun() or passes through Archetypes. """ from rerun._baseclasses import Archetype - # find all matching transforms for this entity path + # find all matching converters for this entity path matches = [ fn - for pattern, fn in self.config.transforms.items() + for pattern, fn in self.config.converters.items() if pattern_matches(pattern, entity_path) ] - # final obligatory transform (ensures we return Archetype or None) - def final_transform(msg: Any) -> RerunData | None: + # final step (ensures we return Archetype or None) + def final_convert(msg: Any) -> RerunData | None: if isinstance(msg, Archetype): return msg if is_rerun_multi(msg): @@ -156,8 +156,8 @@ def final_transform(msg: Any) -> RerunData | None: return msg.to_rerun() return None - # compose all transforms - return lambda msg: pipe(msg, *matches, final_transform) + # compose all converters + return lambda msg: pipe(msg, *matches, final_convert) def _get_entity_path(self, topic: Any) -> str: """Convert a topic to a Rerun entity path.""" @@ -177,10 +177,10 @@ def _on_message(self, msg: Any, topic: Any) -> None: # convert a potentially complex topic object into an str rerun entity path entity_path: str = self._get_entity_path(topic) - # apply transforms (including obligatory_transform which handles .to_rerun()) - rerun_data: RerunData | None = self._transform_for_entity_path(entity_path)(msg) + # apply converters (including final_convert which handles .to_rerun()) + rerun_data: RerunData | None = self._converter_for_entity_path(entity_path)(msg) - # transforms can also supress logging by returning None + # converters can also suppress logging by returning None if not rerun_data: return @@ -248,13 +248,12 @@ def main() -> None: # any pubsub that supports subscribe_all and topic that supports str(topic) # is acceptable here pubsubs=[LCM(autoconf=True)], - # defines transforms for rerun entity paths - transforms={ + # custom converters for specific rerun entity paths + converters={ Glob("world/debug_navigation"): lambda grid: grid.to_rerun( colormap="Purples", z_offset=0.015, - brightness=0.75, - cost_range=[0, 40], + brightness=0.5, ), }, ) From 7eae78eeb7429b69f5816cb7cc043631199355b3 Mon Sep 17 00:00:00 2001 From: Ivan Nikolic Date: Sun, 1 Feb 2026 11:35:17 +0800 Subject: [PATCH 16/36] small cleamnup, pulled image from dev --- dimos/core/global_config.py | 2 +- dimos/msgs/sensor_msgs/Image.py | 697 ++++++++---------- .../sensor_msgs/image_impls/AbstractImage.py | 264 ++++++- dimos/visualization/rerun/bridge.py | 18 +- .../web/websocket_vis/websocket_vis_module.py | 3 +- 5 files changed, 592 insertions(+), 392 deletions(-) diff --git a/dimos/core/global_config.py b/dimos/core/global_config.py index 7bcbf8a6e..f1497b213 100644 --- a/dimos/core/global_config.py +++ b/dimos/core/global_config.py @@ -33,7 +33,7 @@ class GlobalConfig(BaseSettings): replay: bool = False rerun_enabled: bool = True rerun_server_addr: str | None = None - viewer_backend: ViewerBackend = "rerun" + viewer_backend: ViewerBackend = "none" n_dask_workers: int = 2 memory_limit: str = "auto" mujoco_camera_position: str | None = None diff --git a/dimos/msgs/sensor_msgs/Image.py b/dimos/msgs/sensor_msgs/Image.py index 0a7a7e0d8..a9c842f1a 100644 --- a/dimos/msgs/sensor_msgs/Image.py +++ b/dimos/msgs/sensor_msgs/Image.py @@ -15,8 +15,6 @@ from __future__ import annotations import base64 -from dataclasses import dataclass, field -from enum import Enum import time from typing import TYPE_CHECKING, Any, Literal, TypedDict @@ -26,9 +24,16 @@ import numpy as np import reactivex as rx from reactivex import operators as ops -import rerun as rr from turbojpeg import TurboJPEG # type: ignore[import-untyped] +from dimos.msgs.sensor_msgs.image_impls.AbstractImage import ( + HAS_CUDA, + HAS_NVIMGCODEC, + NVIMGCODEC_LAST_USED, + ImageFormat, +) +from dimos.msgs.sensor_msgs.image_impls.CudaImage import CudaImage +from dimos.msgs.sensor_msgs.image_impls.NumpyImage import NumpyImage from dimos.types.timestamped import Timestamped, TimestampedBufferCollection, to_human_readable from dimos.utils.reactive import quality_barrier @@ -37,46 +42,21 @@ from reactivex.observable import Observable + from dimos.msgs.sensor_msgs.image_impls.AbstractImage import ( + AbstractImage, + ) + +try: + import cupy as cp # type: ignore[import-not-found, import-untyped] +except Exception: + cp = None + try: from sensor_msgs.msg import Image as ROSImage # type: ignore[attr-defined] except ImportError: ROSImage = None # type: ignore[assignment, misc] -class ImageFormat(Enum): - BGR = "BGR" - RGB = "RGB" - RGBA = "RGBA" - BGRA = "BGRA" - GRAY = "GRAY" - GRAY16 = "GRAY16" - DEPTH = "DEPTH" - DEPTH16 = "DEPTH16" - - -def _format_to_rerun(data: np.ndarray, fmt: ImageFormat) -> Any: # type: ignore[type-arg] - """Convert image data to Rerun archetype based on format.""" - match fmt: - case ImageFormat.RGB: - return rr.Image(data, color_model="RGB") - case ImageFormat.RGBA: - return rr.Image(data, color_model="RGBA") - case ImageFormat.BGR: - return rr.Image(data, color_model="BGR") - case ImageFormat.BGRA: - return rr.Image(data, color_model="BGRA") - case ImageFormat.GRAY: - return rr.Image(data, color_model="L") - case ImageFormat.GRAY16: - return rr.Image(data, color_model="L") - case ImageFormat.DEPTH: - return rr.DepthImage(data) - case ImageFormat.DEPTH16: - return rr.DepthImage(data) - case _: - raise ValueError(f"Unsupported format for Rerun: {fmt}") - - class AgentImageMessage(TypedDict): """Type definition for agent-compatible image representation.""" @@ -86,104 +66,108 @@ class AgentImageMessage(TypedDict): data: str # Base64 encoded image data -@dataclass class Image(Timestamped): - """Simple NumPy-based image container.""" - msg_name = "sensor_msgs.Image" - data: np.ndarray[Any, np.dtype[Any]] = field( - default_factory=lambda: np.zeros((1, 1, 3), dtype=np.uint8) - ) - format: ImageFormat = field(default=ImageFormat.BGR) - frame_id: str = field(default="") - ts: float = field(default_factory=time.time) - - def __post_init__(self) -> None: - if not isinstance(self.data, np.ndarray): - self.data = np.asarray(self.data) - if self.data.ndim < 2: - raise ValueError("Image requires a 2D/3D NumPy array") - - def __str__(self) -> str: - return ( - f"Image(shape={self.shape}, format={self.format.value}, dtype={self.dtype}, " - f"ts={to_human_readable(self.ts)})" - ) - - def __repr__(self) -> str: - return f"Image(shape={self.shape}, format={self.format.value}, dtype={self.dtype}, frame_id='{self.frame_id}', ts={self.ts})" - - def __eq__(self, other: object) -> bool: - if not isinstance(other, Image): - return False - return ( - np.array_equal(self.data, other.data) - and self.format == other.format - and self.frame_id == other.frame_id - and abs(self.ts - other.ts) < 1e-6 - ) + def __init__( # type: ignore[no-untyped-def] + self, + impl: AbstractImage | None = None, + *, + data=None, + format: ImageFormat | None = None, + frame_id: str | None = None, + ts: float | None = None, + ) -> None: + """Construct an Image facade. - def __len__(self) -> int: - return int(self.height * self.width) + Usage: + - Image(impl=) + - Image(data=, format=ImageFormat.RGB, frame_id=str, ts=float) - def __getstate__(self) -> dict[str, Any]: - return {"data": self.data, "format": self.format, "frame_id": self.frame_id, "ts": self.ts} + Notes: + - When constructed from `data`, uses CudaImage if `data` is a CuPy array and CUDA is available; otherwise NumpyImage. + - `format` defaults to ImageFormat.RGB; `frame_id` defaults to ""; `ts` defaults to `time.time()`. + """ + # Disallow mixing impl with raw kwargs + if impl is not None and any(x is not None for x in (data, format, frame_id, ts)): + raise TypeError( + "Provide either 'impl' or ('data', 'format', 'frame_id', 'ts'), not both" + ) - def __setstate__(self, state: dict[str, Any]) -> None: - self.data = state.get("data", np.zeros((1, 1, 3), dtype=np.uint8)) - self.format = state.get("format", ImageFormat.BGR) - self.frame_id = state.get("frame_id", "") - self.ts = state.get("ts", time.time()) + if impl is not None: + self._impl = impl + return - @property - def height(self) -> int: - return int(self.data.shape[0]) + # Raw constructor path + if data is None: + raise TypeError("'data' is required when constructing Image without 'impl'") + fmt = format if format is not None else ImageFormat.BGR + fid = frame_id if frame_id is not None else "" + tstamp = ts if ts is not None else time.time() - @property - def width(self) -> int: - return int(self.data.shape[1]) + # Detect CuPy array without a hard dependency + is_cu = False + try: + import cupy as _cp - @property - def channels(self) -> int: - if self.data.ndim == 2: - return 1 - if self.data.ndim == 3: - return int(self.data.shape[2]) - raise ValueError("Invalid image dimensions") + is_cu = isinstance(data, _cp.ndarray) + except Exception: + is_cu = False - @property - def shape(self) -> tuple[int, ...]: - return tuple(self.data.shape) + if is_cu and HAS_CUDA: + self._impl = CudaImage(data, fmt, fid, tstamp) + else: + self._impl = NumpyImage(np.asarray(data), fmt, fid, tstamp) - @property - def dtype(self) -> np.dtype[Any]: - return self.data.dtype + def __str__(self) -> str: + dev = "cuda" if self.is_cuda else "cpu" + return ( + f"Image(shape={self.shape}, format={self.format.value}, dtype={self.dtype}, " + f"dev={dev}, ts={to_human_readable(self.ts)})" + ) - def copy(self) -> Image: - return Image(data=self.data.copy(), format=self.format, frame_id=self.frame_id, ts=self.ts) + @classmethod + def from_impl(cls, impl: AbstractImage) -> Image: + return cls(impl) @classmethod - def from_numpy( + def from_numpy( # type: ignore[no-untyped-def] cls, np_image: np.ndarray, # type: ignore[type-arg] format: ImageFormat = ImageFormat.BGR, - frame_id: str = "", - ts: float | None = None, + to_cuda: bool = False, + **kwargs, ) -> Image: + if kwargs.pop("to_gpu", False): + to_cuda = True + if to_cuda and HAS_CUDA: + return cls( + CudaImage( + np_image if hasattr(np_image, "shape") else np.asarray(np_image), + format, + kwargs.get("frame_id", ""), + kwargs.get("ts", time.time()), + ) + ) return cls( - data=np.asarray(np_image), - format=format, - frame_id=frame_id, - ts=ts if ts is not None else time.time(), + NumpyImage( + np.asarray(np_image), + format, + kwargs.get("frame_id", ""), + kwargs.get("ts", time.time()), + ) ) @classmethod - def from_file( + def from_file( # type: ignore[no-untyped-def] cls, filepath: str | os.PathLike[str], format: ImageFormat = ImageFormat.RGB, + to_cuda: bool = False, + **kwargs, ) -> Image: + if kwargs.pop("to_gpu", False): + to_cuda = True arr = cv2.imread(str(filepath), cv2.IMREAD_UNCHANGED) if arr is None: raise ValueError(f"Could not load image from {filepath}") @@ -195,231 +179,156 @@ def from_file( detected = ImageFormat.BGRA # OpenCV default else: detected = format - return cls(data=arr, format=detected) + return cls(CudaImage(arr, detected) if to_cuda and HAS_CUDA else NumpyImage(arr, detected)) @classmethod - def from_opencv( + def from_opencv( # type: ignore[no-untyped-def] cls, cv_image: np.ndarray, # type: ignore[type-arg] format: ImageFormat = ImageFormat.BGR, - frame_id: str = "", - ts: float | None = None, + **kwargs, ) -> Image: """Construct from an OpenCV image (NumPy array).""" return cls( - data=cv_image, - format=format, - frame_id=frame_id, - ts=ts if ts is not None else time.time(), + NumpyImage(cv_image, format, kwargs.get("frame_id", ""), kwargs.get("ts", time.time())) ) - def to_opencv(self) -> np.ndarray: # type: ignore[type-arg] - """Convert to OpenCV BGR format.""" - arr = self.data - if self.format == ImageFormat.BGR: - return arr - if self.format == ImageFormat.RGB: - return cv2.cvtColor(arr, cv2.COLOR_RGB2BGR) - if self.format == ImageFormat.RGBA: - return cv2.cvtColor(arr, cv2.COLOR_RGBA2BGR) - if self.format == ImageFormat.BGRA: - return cv2.cvtColor(arr, cv2.COLOR_BGRA2BGR) - if self.format in ( - ImageFormat.GRAY, - ImageFormat.GRAY16, - ImageFormat.DEPTH, - ImageFormat.DEPTH16, - ): - return arr - raise ValueError(f"Unsupported format: {self.format}") + @classmethod + def from_depth( # type: ignore[no-untyped-def] + cls, depth_data, frame_id: str = "", ts: float | None = None, to_cuda: bool = False + ) -> Image: + arr = np.asarray(depth_data) + if arr.dtype != np.float32: + arr = arr.astype(np.float32) + impl = ( + CudaImage(arr, ImageFormat.DEPTH, frame_id, time.time() if ts is None else ts) + if to_cuda and HAS_CUDA + else NumpyImage(arr, ImageFormat.DEPTH, frame_id, time.time() if ts is None else ts) + ) + return cls(impl) - def as_numpy(self) -> np.ndarray: # type: ignore[type-arg] - """Get image data as numpy array.""" - return self.data + # Delegation + @property + def is_cuda(self) -> bool: + return self._impl.is_cuda - def to_rgb(self) -> Image: - if self.format == ImageFormat.RGB: - return self.copy() - arr = self.data - if self.format == ImageFormat.BGR: - return Image( - data=cv2.cvtColor(arr, cv2.COLOR_BGR2RGB), - format=ImageFormat.RGB, - frame_id=self.frame_id, - ts=self.ts, - ) - if self.format == ImageFormat.RGBA: - return self.copy() # RGBA contains RGB + alpha - if self.format == ImageFormat.BGRA: - rgba = cv2.cvtColor(arr, cv2.COLOR_BGRA2RGBA) - return Image(data=rgba, format=ImageFormat.RGBA, frame_id=self.frame_id, ts=self.ts) - if self.format in (ImageFormat.GRAY, ImageFormat.GRAY16, ImageFormat.DEPTH16): - gray8 = (arr / 256).astype(np.uint8) if self.format != ImageFormat.GRAY else arr - rgb = cv2.cvtColor(gray8, cv2.COLOR_GRAY2RGB) - return Image(data=rgb, format=ImageFormat.RGB, frame_id=self.frame_id, ts=self.ts) - return self.copy() + @property + def data(self): # type: ignore[no-untyped-def] + return self._impl.data + + @data.setter + def data(self, value) -> None: # type: ignore[no-untyped-def] + # Preserve backend semantics: ensure array type matches implementation + if isinstance(self._impl, NumpyImage): + self._impl.data = np.asarray(value) + elif isinstance(self._impl, CudaImage): + if cp is None: + raise RuntimeError("CuPy not available to set CUDA image data") + self._impl.data = cp.asarray(value) + else: + self._impl.data = value - def to_bgr(self) -> Image: - if self.format == ImageFormat.BGR: + @property + def format(self) -> ImageFormat: + return self._impl.format + + @format.setter + def format(self, value) -> None: # type: ignore[no-untyped-def] + if isinstance(value, ImageFormat): + self._impl.format = value + elif isinstance(value, str): + try: + self._impl.format = ImageFormat[value] + except KeyError as e: + raise ValueError(f"Invalid ImageFormat: {value}") from e + else: + raise TypeError("format must be ImageFormat or str name") + + @property + def frame_id(self) -> str: + return self._impl.frame_id + + @frame_id.setter + def frame_id(self, value: str) -> None: + self._impl.frame_id = str(value) + + @property + def ts(self) -> float: + return self._impl.ts + + @ts.setter + def ts(self, value: float) -> None: + self._impl.ts = float(value) + + @property + def height(self) -> int: + return self._impl.height + + @property + def width(self) -> int: + return self._impl.width + + @property + def channels(self) -> int: + return self._impl.channels + + @property + def shape(self): # type: ignore[no-untyped-def] + return self._impl.shape + + @property + def dtype(self): # type: ignore[no-untyped-def] + return self._impl.dtype + + def copy(self) -> Image: + return Image(self._impl.copy()) + + def to_cpu(self) -> Image: + if isinstance(self._impl, NumpyImage): return self.copy() - arr = self.data - if self.format == ImageFormat.RGB: - return Image( - data=cv2.cvtColor(arr, cv2.COLOR_RGB2BGR), - format=ImageFormat.BGR, - frame_id=self.frame_id, - ts=self.ts, - ) - if self.format == ImageFormat.RGBA: - return Image( - data=cv2.cvtColor(arr, cv2.COLOR_RGBA2BGR), - format=ImageFormat.BGR, - frame_id=self.frame_id, - ts=self.ts, - ) - if self.format == ImageFormat.BGRA: - return Image( - data=cv2.cvtColor(arr, cv2.COLOR_BGRA2BGR), - format=ImageFormat.BGR, - frame_id=self.frame_id, - ts=self.ts, - ) - if self.format in (ImageFormat.GRAY, ImageFormat.GRAY16, ImageFormat.DEPTH16): - gray8 = (arr / 256).astype(np.uint8) if self.format != ImageFormat.GRAY else arr - return Image( - data=cv2.cvtColor(gray8, cv2.COLOR_GRAY2BGR), - format=ImageFormat.BGR, - frame_id=self.frame_id, - ts=self.ts, + + data = self._impl.data.get() # CuPy array to NumPy + + return Image( + NumpyImage( + data, + self._impl.format, + self._impl.frame_id, + self._impl.ts, ) - return self.copy() + ) - def to_grayscale(self) -> Image: - if self.format in (ImageFormat.GRAY, ImageFormat.GRAY16, ImageFormat.DEPTH): + def to_cupy(self) -> Image: + if isinstance(self._impl, CudaImage): return self.copy() - if self.format == ImageFormat.BGR: - return Image( - data=cv2.cvtColor(self.data, cv2.COLOR_BGR2GRAY), - format=ImageFormat.GRAY, - frame_id=self.frame_id, - ts=self.ts, - ) - if self.format == ImageFormat.RGB: - return Image( - data=cv2.cvtColor(self.data, cv2.COLOR_RGB2GRAY), - format=ImageFormat.GRAY, - frame_id=self.frame_id, - ts=self.ts, - ) - if self.format in (ImageFormat.RGBA, ImageFormat.BGRA): - code = cv2.COLOR_RGBA2GRAY if self.format == ImageFormat.RGBA else cv2.COLOR_BGRA2GRAY - return Image( - data=cv2.cvtColor(self.data, code), - format=ImageFormat.GRAY, - frame_id=self.frame_id, - ts=self.ts, + return Image( + CudaImage( + np.asarray(self._impl.data), self._impl.format, self._impl.frame_id, self._impl.ts ) - raise ValueError(f"Unsupported format: {self.format}") - - def to_rerun( - self, - mode: str = "image", - origin: tuple[float, float] = (-17.05, 4.65), # TODO: hardcoded for now - resolution: float = 0.1 / 16, - z_offset: float = 0.012, - ) -> Any: - """Convert to rerun visualization format. + ) - Args: - mode: Visualization mode: - - "image": 2D image (default) - - "mesh": 3D textured plane on floor - origin: World (x, y) position for mesh mode (default (0, 0)) - resolution: Meters per pixel for mesh mode (default 0.05) - z_offset: Height offset for mesh mode (default 0.01) + def to_opencv(self) -> np.ndarray: # type: ignore[type-arg] + return self._impl.to_opencv() - Returns: - Rerun archetype (rr.Image or rr.Mesh3D) - """ - if mode == "image": - return _format_to_rerun(self.data, self.format) - elif mode == "mesh": - return self._to_rerun_mesh(origin, resolution, z_offset) - else: - raise ValueError(f"Unsupported mode: {mode}") + def as_numpy(self) -> np.ndarray: # type: ignore[type-arg] + """Get image data as numpy array in RGB format.""" + return np.asarray(self.data) - def _to_rerun_mesh( - self, - origin: tuple[float, float], - resolution: float, - z_offset: float, - ) -> Any: - """Convert to 3D textured mesh overlay on floor plane.""" - h, w = self.data.shape[:2] - if h == 0 or w == 0: - return rr.Mesh3D(vertex_positions=[]) - - # Convert to RGBA for texture - if self.format == ImageFormat.RGBA: - rgba = self.data - elif self.format == ImageFormat.BGRA: - rgba = cv2.cvtColor(self.data, cv2.COLOR_BGRA2RGBA) - elif self.format == ImageFormat.RGB: - rgba = cv2.cvtColor(self.data, cv2.COLOR_RGB2RGBA) - elif self.format == ImageFormat.BGR: - rgba = cv2.cvtColor(self.data, cv2.COLOR_BGR2RGBA) - elif self.format in (ImageFormat.GRAY, ImageFormat.GRAY16): - rgba = cv2.cvtColor(self.data, cv2.COLOR_GRAY2RGBA) - else: - # For depth images, normalize and convert to RGBA - normalized = np.zeros_like(self.data, dtype=np.uint8) - cv2.normalize(self.data, normalized, 0, 255, cv2.NORM_MINMAX) - rgba = cv2.cvtColor(normalized, cv2.COLOR_GRAY2RGBA) - - # Note: Unlike OccupancyGrid (where row 0 = world bottom), Image row 0 is - # visually the top. UV coords map this correctly without flipping. - - # Single quad covering image extent - ox, oy = origin - world_w = w * resolution - world_h = h * resolution - - vertices = np.array( - [ - [ox, oy, z_offset], - [ox + world_w, oy, z_offset], - [ox + world_w, oy + world_h, z_offset], - [ox, oy + world_h, z_offset], - ], - dtype=np.float32, - ) + def to_rgb(self) -> Image: + return Image(self._impl.to_rgb()) - indices = np.array([[0, 1, 2], [0, 2, 3]], dtype=np.uint32) + def to_bgr(self) -> Image: + return Image(self._impl.to_bgr()) - texcoords = np.array( - [ - [0.0, 1.0], - [1.0, 1.0], - [1.0, 0.0], - [0.0, 0.0], - ], - dtype=np.float32, - ) + def to_grayscale(self) -> Image: + return Image(self._impl.to_grayscale()) - return rr.Mesh3D( - vertex_positions=vertices, - triangle_indices=indices, - vertex_texcoords=texcoords, - albedo_texture=rgba, - ) + def to_rerun(self) -> Any: + """Convert to rerun Image format.""" + return self._impl.to_rerun() def resize(self, width: int, height: int, interpolation: int = cv2.INTER_LINEAR) -> Image: - return Image( - data=cv2.resize(self.data, (width, height), interpolation=interpolation), - format=self.format, - frame_id=self.frame_id, - ts=self.ts, - ) + return Image(self._impl.resize(width, height, interpolation)) def resize_to_fit( self, max_width: int, max_height: int, interpolation: int = cv2.INTER_LINEAR @@ -440,48 +349,29 @@ def resize_to_fit( return self.resize(new_width, new_height, interpolation), scale def crop(self, x: int, y: int, width: int, height: int) -> Image: - """Crop the image to the specified region. - - Args: - x: Starting x coordinate (left edge) - y: Starting y coordinate (top edge) - width: Width of the cropped region - height: Height of the cropped region - - Returns: - A new Image containing the cropped region - """ - img_height, img_width = self.data.shape[:2] - - # Clamp the crop region to image bounds - x = max(0, min(x, img_width)) - y = max(0, min(y, img_height)) - x_end = min(x + width, img_width) - y_end = min(y + height, img_height) - - # Perform the crop using array slicing - if self.data.ndim == 2: - cropped_data = self.data[y:y_end, x:x_end] - else: - cropped_data = self.data[y:y_end, x:x_end, :] - - return Image(data=cropped_data, format=self.format, frame_id=self.frame_id, ts=self.ts) + return Image(self._impl.crop(x, y, width, height)) # type: ignore[attr-defined] @property def sharpness(self) -> float: """Return sharpness score.""" - gray = self.to_grayscale() - sx = cv2.Sobel(gray.data, cv2.CV_32F, 1, 0, ksize=5) - sy = cv2.Sobel(gray.data, cv2.CV_32F, 0, 1, ksize=5) - magnitude = cv2.magnitude(sx, sy) - mean_mag = float(magnitude.mean()) - if mean_mag <= 0: - return 0.0 - return float(np.clip((np.log10(mean_mag + 1) - 1.7) / 2.0, 0.0, 1.0)) + return self._impl.sharpness() + + def to_depth_meters(self) -> Image: + """Return a depth image normalized to meters as float32.""" + depth_cv = self.to_opencv() + fmt = self.format + + if fmt == ImageFormat.DEPTH16: + depth_cv = depth_cv.astype(np.float32) / 1000.0 + fmt = ImageFormat.DEPTH + elif depth_cv.dtype != np.float32: + depth_cv = depth_cv.astype(np.float32) + fmt = ImageFormat.DEPTH if fmt == ImageFormat.DEPTH else fmt + + return Image.from_numpy(depth_cv, format=fmt, frame_id=self.frame_id, ts=self.ts) def save(self, filepath: str) -> bool: - arr = self.to_opencv() - return cv2.imwrite(filepath, arr) + return self._impl.save(filepath) def to_base64( self, @@ -565,25 +455,27 @@ def lcm_encode(self, frame_id: str | None = None) -> bytes: return msg.lcm_encode() # type: ignore[no-any-return] @classmethod - def lcm_decode(cls, data: bytes, **kwargs: Any) -> Image: + def lcm_decode(cls, data: bytes, **kwargs) -> Image: # type: ignore[no-untyped-def] msg = LCMImage.lcm_decode(data) fmt, dtype, channels = _parse_lcm_encoding(msg.encoding) - arr: np.ndarray[Any, Any] = np.frombuffer(msg.data, dtype=dtype) + arr = np.frombuffer(msg.data, dtype=dtype) if channels == 1: arr = arr.reshape((msg.height, msg.width)) else: arr = arr.reshape((msg.height, msg.width, channels)) return cls( - data=arr, - format=fmt, - frame_id=msg.header.frame_id if hasattr(msg, "header") else "", - ts=( - msg.header.stamp.sec + msg.header.stamp.nsec / 1e9 - if hasattr(msg, "header") - and hasattr(msg.header, "stamp") - and msg.header.stamp.sec > 0 - else time.time() - ), + NumpyImage( + arr, + fmt, + msg.header.frame_id if hasattr(msg, "header") else "", + ( + msg.header.stamp.sec + msg.header.stamp.nsec / 1e9 + if hasattr(msg, "header") + and hasattr(msg.header, "stamp") + and msg.header.stamp.sec > 0 + else time.time() + ), + ) ) def lcm_jpeg_encode(self, quality: int = 75, frame_id: str | None = None) -> bytes: @@ -632,7 +524,7 @@ def lcm_jpeg_encode(self, quality: int = 75, frame_id: str | None = None) -> byt return msg.lcm_encode() # type: ignore[no-any-return] @classmethod - def lcm_jpeg_decode(cls, data: bytes, **kwargs: Any) -> Image: + def lcm_jpeg_decode(cls, data: bytes, **kwargs) -> Image: # type: ignore[no-untyped-def] """Decode an LCM Image message with JPEG-compressed data. Args: @@ -651,18 +543,36 @@ def lcm_jpeg_decode(cls, data: bytes, **kwargs: Any) -> Image: bgr_array = jpeg.decode(msg.data) return cls( - data=bgr_array, - format=ImageFormat.BGR, - frame_id=msg.header.frame_id if hasattr(msg, "header") else "", - ts=( - msg.header.stamp.sec + msg.header.stamp.nsec / 1e9 - if hasattr(msg, "header") - and hasattr(msg.header, "stamp") - and msg.header.stamp.sec > 0 - else time.time() - ), + NumpyImage( + bgr_array, + ImageFormat.BGR, + msg.header.frame_id if hasattr(msg, "header") else "", + ( + msg.header.stamp.sec + msg.header.stamp.nsec / 1e9 + if hasattr(msg, "header") + and hasattr(msg.header, "stamp") + and msg.header.stamp.sec > 0 + else time.time() + ), + ) ) + # PnP wrappers + def solve_pnp(self, *args, **kwargs): # type: ignore[no-untyped-def] + return self._impl.solve_pnp(*args, **kwargs) # type: ignore[attr-defined] + + def solve_pnp_ransac(self, *args, **kwargs): # type: ignore[no-untyped-def] + return self._impl.solve_pnp_ransac(*args, **kwargs) # type: ignore[attr-defined] + + def solve_pnp_batch(self, *args, **kwargs): # type: ignore[no-untyped-def] + return self._impl.solve_pnp_batch(*args, **kwargs) # type: ignore[attr-defined] + + def create_csrt_tracker(self, *args, **kwargs): # type: ignore[no-untyped-def] + return self._impl.create_csrt_tracker(*args, **kwargs) # type: ignore[attr-defined] + + def csrt_update(self, *args, **kwargs): # type: ignore[no-untyped-def] + return self._impl.csrt_update(*args, **kwargs) # type: ignore[attr-defined] + @classmethod def from_ros_msg(cls, ros_msg: ROSImage) -> Image: """Create an Image from a ROS sensor_msgs/Image message. @@ -725,7 +635,7 @@ def from_ros_msg(cls, ros_msg: ROSImage) -> Image: ) @staticmethod - def _parse_encoding(encoding: str) -> dict[str, Any]: + def _parse_encoding(encoding: str) -> dict: # type: ignore[type-arg] """Translate ROS encoding strings into format metadata.""" encoding_map = { "mono8": {"format": ImageFormat.GRAY, "dtype": np.uint8, "channels": 1}, @@ -748,9 +658,44 @@ def _parse_encoding(encoding: str) -> dict[str, Any]: raise ValueError(f"Unsupported encoding: {encoding}") + def __repr__(self) -> str: + dev = "cuda" if self.is_cuda else "cpu" + return f"Image(shape={self.shape}, format={self.format.value}, dtype={self.dtype}, dev={dev}, frame_id='{self.frame_id}', ts={self.ts})" + + def __eq__(self, other) -> bool: # type: ignore[no-untyped-def] + if not isinstance(other, Image): + return False + return ( + np.array_equal(self.data, other.data) + and self.format == other.format + and self.frame_id == other.frame_id + and abs(self.ts - other.ts) < 1e-6 + ) + def __len__(self) -> int: + return int(self.height * self.width) + + def __getstate__(self): # type: ignore[no-untyped-def] + return {"data": self.data, "format": self.format, "frame_id": self.frame_id, "ts": self.ts} + + def __setstate__(self, state) -> None: # type: ignore[no-untyped-def] + self.__init__( # type: ignore[misc] + data=state.get("data"), + format=state.get("format"), + frame_id=state.get("frame_id"), + ts=state.get("ts"), + ) + + +# Re-exports for tests +HAS_CUDA = HAS_CUDA +ImageFormat = ImageFormat +NVIMGCODEC_LAST_USED = NVIMGCODEC_LAST_USED +HAS_NVIMGCODEC = HAS_NVIMGCODEC __all__ = [ - "Image", + "HAS_CUDA", + "HAS_NVIMGCODEC", + "NVIMGCODEC_LAST_USED", "ImageFormat", "sharpness_barrier", "sharpness_window", @@ -759,29 +704,27 @@ def _parse_encoding(encoding: str) -> dict[str, Any]: def sharpness_window(target_frequency: float, source: Observable[Image]) -> Observable[Image]: """Emit the sharpest Image seen within each sliding time window.""" - from reactivex.scheduler import ThreadPoolScheduler - if target_frequency <= 0: raise ValueError("target_frequency must be positive") window = TimestampedBufferCollection(1.0 / target_frequency) # type: ignore[var-annotated] source.subscribe(window.add) - thread_scheduler = ThreadPoolScheduler(max_workers=1) + thread_scheduler = ThreadPoolScheduler(max_workers=1) # type: ignore[name-defined] - def find_best(*_args: Any) -> Image | None: + def find_best(*_args): # type: ignore[no-untyped-def] if not window._items: return None - return max(window._items, key=lambda img: img.sharpness) # type: ignore[no-any-return] + return max(window._items, key=lambda img: img.sharpness) - return rx.interval(1.0 / target_frequency).pipe( # type: ignore[misc] + return rx.interval(1.0 / target_frequency).pipe( ops.observe_on(thread_scheduler), ops.map(find_best), ops.filter(lambda img: img is not None), ) -def sharpness_barrier(target_frequency: float) -> Any: +def sharpness_barrier(target_frequency: float): # type: ignore[no-untyped-def] """Select the sharpest Image within each time window.""" if target_frequency <= 0: raise ValueError("target_frequency must be positive") @@ -817,7 +760,7 @@ def _get_lcm_encoding(fmt: ImageFormat, dtype: np.dtype) -> str: # type: ignore raise ValueError(f"Unsupported LCM encoding for fmt={fmt}, dtype={dtype}") -def _parse_lcm_encoding(enc: str) -> tuple[ImageFormat, type, int]: +def _parse_lcm_encoding(enc: str): # type: ignore[no-untyped-def] m = { "mono8": (ImageFormat.GRAY, np.uint8, 1), "mono16": (ImageFormat.GRAY16, np.uint16, 1), diff --git a/dimos/msgs/sensor_msgs/image_impls/AbstractImage.py b/dimos/msgs/sensor_msgs/image_impls/AbstractImage.py index 80a1bb7ce..9cff0e8bd 100644 --- a/dimos/msgs/sensor_msgs/image_impls/AbstractImage.py +++ b/dimos/msgs/sensor_msgs/image_impls/AbstractImage.py @@ -1,4 +1,4 @@ -# Copyright 2026 Dimensional Inc. +# Copyright 2025-2026 Dimensional Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,8 +12,262 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Backwards compatibility stub for unpickling old data. -# AbstractImage and ImageFormat were moved to Image. -from dimos.msgs.sensor_msgs.Image import Image as AbstractImage, ImageFormat +from __future__ import annotations -__all__ = ["AbstractImage", "ImageFormat"] +from abc import ABC, abstractmethod +import base64 +from enum import Enum +import os +from typing import Any + +import cv2 +import numpy as np +import rerun as rr + +try: + import cupy as cp # type: ignore[import-not-found, import-untyped] + + HAS_CUDA = True +except Exception: # pragma: no cover - optional dependency + cp = None + HAS_CUDA = False + +# NVRTC defaults to C++11; libcu++ in recent CUDA requires at least C++17. +if HAS_CUDA: + try: + import cupy.cuda.compiler as _cupy_compiler # type: ignore[import-not-found, import-untyped] + + if not getattr(_cupy_compiler, "_dimos_force_cxx17", False): + _orig_compile_using_nvrtc = _cupy_compiler.compile_using_nvrtc + + def _compile_using_nvrtc( # type: ignore[no-untyped-def] + source, options=(), *args, **kwargs + ): + filtered = tuple( + opt + for opt in options + if opt not in ("-std=c++11", "--std=c++11", "-std=c++14", "--std=c++14") + ) + if "--std=c++17" not in filtered and "-std=c++17" not in filtered: + filtered = (*filtered, "--std=c++17") + return _orig_compile_using_nvrtc(source, filtered, *args, **kwargs) + + _cupy_compiler.compile_using_nvrtc = _compile_using_nvrtc + _cupy_compiler._dimos_force_cxx17 = True + except Exception: + pass + +# Optional nvImageCodec (preferred GPU codec) +USE_NVIMGCODEC = os.environ.get("USE_NVIMGCODEC", "0") == "1" +NVIMGCODEC_LAST_USED = False +try: # pragma: no cover - optional dependency + if HAS_CUDA and USE_NVIMGCODEC: + from nvidia import nvimgcodec # type: ignore[import-untyped] + + try: + _enc_probe = nvimgcodec.Encoder() + HAS_NVIMGCODEC = True + except Exception: + nvimgcodec = None + HAS_NVIMGCODEC = False + else: + nvimgcodec = None + HAS_NVIMGCODEC = False +except Exception: # pragma: no cover - optional dependency + nvimgcodec = None + HAS_NVIMGCODEC = False + + +class ImageFormat(Enum): + BGR = "BGR" + RGB = "RGB" + RGBA = "RGBA" + BGRA = "BGRA" + GRAY = "GRAY" + GRAY16 = "GRAY16" + DEPTH = "DEPTH" + DEPTH16 = "DEPTH16" + + +def _is_cu(x) -> bool: # type: ignore[no-untyped-def] + return HAS_CUDA and cp is not None and isinstance(x, cp.ndarray) + + +def _ascontig(x): # type: ignore[no-untyped-def] + if _is_cu(x): + return x if x.flags["C_CONTIGUOUS"] else cp.ascontiguousarray(x) + return x if x.flags["C_CONTIGUOUS"] else np.ascontiguousarray(x) + + +def _to_cpu(x): # type: ignore[no-untyped-def] + return cp.asnumpy(x) if _is_cu(x) else x + + +def _to_cu(x): # type: ignore[no-untyped-def] + if HAS_CUDA and cp is not None and isinstance(x, np.ndarray): + return cp.asarray(x) + return x + + +def _encode_nvimgcodec_cuda(bgr_cu, quality: int = 80) -> bytes: # type: ignore[no-untyped-def] # pragma: no cover - optional + if not HAS_NVIMGCODEC or nvimgcodec is None: + raise RuntimeError("nvimgcodec not available") + if bgr_cu.ndim != 3 or bgr_cu.shape[2] != 3: + raise RuntimeError("nvimgcodec expects HxWx3 image") + if bgr_cu.dtype != cp.uint8: + raise RuntimeError("nvimgcodec requires uint8 input") + if not bgr_cu.flags["C_CONTIGUOUS"]: + bgr_cu = cp.ascontiguousarray(bgr_cu) + encoder = nvimgcodec.Encoder() + try: + img = nvimgcodec.Image(bgr_cu, nvimgcodec.PixelFormat.BGR) + except Exception: + img = nvimgcodec.Image(cp.asnumpy(bgr_cu), nvimgcodec.PixelFormat.BGR) + if hasattr(nvimgcodec, "EncodeParams"): + params = nvimgcodec.EncodeParams(quality=quality) + bitstreams = encoder.encode([img], [params]) + else: + bitstreams = encoder.encode([img]) + bs0 = bitstreams[0] + if hasattr(bs0, "buf"): + return bytes(bs0.buf) + return bytes(bs0) + + +def format_to_rerun(data, fmt: ImageFormat): # type: ignore[no-untyped-def] + """Convert image data to Rerun archetype based on format. + + Args: + data: Image data (numpy array or cupy array on CPU) + fmt: ImageFormat enum value + + Returns: + Rerun archetype (rr.Image or rr.DepthImage) + """ + match fmt: + case ImageFormat.RGB: + return rr.Image(data, color_model="RGB") + case ImageFormat.RGBA: + return rr.Image(data, color_model="RGBA") + case ImageFormat.BGR: + return rr.Image(data, color_model="BGR") + case ImageFormat.BGRA: + return rr.Image(data, color_model="BGRA") + case ImageFormat.GRAY: + return rr.Image(data, color_model="L") + case ImageFormat.GRAY16: + return rr.Image(data, color_model="L") + case ImageFormat.DEPTH: + return rr.DepthImage(data) + case ImageFormat.DEPTH16: + return rr.DepthImage(data) + case _: + raise ValueError(f"Unsupported format for Rerun: {fmt}") + + +class AbstractImage(ABC): + data: Any + format: ImageFormat + frame_id: str + ts: float + + @property + @abstractmethod + def is_cuda(self) -> bool: # pragma: no cover - abstract + ... + + @property + def height(self) -> int: + return int(self.data.shape[0]) + + @property + def width(self) -> int: + return int(self.data.shape[1]) + + @property + def channels(self) -> int: + if getattr(self.data, "ndim", 0) == 2: + return 1 + if getattr(self.data, "ndim", 0) == 3: + return int(self.data.shape[2]) + raise ValueError("Invalid image dimensions") + + @property + def shape(self): # type: ignore[no-untyped-def] + return tuple(self.data.shape) + + @property + def dtype(self): # type: ignore[no-untyped-def] + return self.data.dtype + + @abstractmethod + def to_opencv(self) -> np.ndarray: # type: ignore[type-arg] # pragma: no cover - abstract + ... + + @abstractmethod + def to_rgb(self) -> AbstractImage: # pragma: no cover - abstract + ... + + @abstractmethod + def to_bgr(self) -> AbstractImage: # pragma: no cover - abstract + ... + + @abstractmethod + def to_grayscale(self) -> AbstractImage: # pragma: no cover - abstract + ... + + @abstractmethod + def resize( + self, width: int, height: int, interpolation: int = cv2.INTER_LINEAR + ) -> AbstractImage: # pragma: no cover - abstract + ... + + @abstractmethod + def to_rerun(self) -> Any: # pragma: no cover - abstract + ... + + @abstractmethod + def sharpness(self) -> float: # pragma: no cover - abstract + ... + + def copy(self) -> AbstractImage: + return self.__class__( # type: ignore[call-arg] + data=self.data.copy(), format=self.format, frame_id=self.frame_id, ts=self.ts + ) + + def save(self, filepath: str) -> bool: + global NVIMGCODEC_LAST_USED + if self.is_cuda and HAS_NVIMGCODEC and nvimgcodec is not None: + try: + bgr = self.to_bgr() + if _is_cu(bgr.data): + jpeg = _encode_nvimgcodec_cuda(bgr.data) + NVIMGCODEC_LAST_USED = True + with open(filepath, "wb") as f: + f.write(jpeg) + return True + except Exception: + NVIMGCODEC_LAST_USED = False + arr = self.to_opencv() + return cv2.imwrite(filepath, arr) + + def to_base64(self, quality: int = 80) -> str: + global NVIMGCODEC_LAST_USED + if self.is_cuda and HAS_NVIMGCODEC and nvimgcodec is not None: + try: + bgr = self.to_bgr() + if _is_cu(bgr.data): + jpeg = _encode_nvimgcodec_cuda(bgr.data, quality=quality) + NVIMGCODEC_LAST_USED = True + return base64.b64encode(jpeg).decode("utf-8") + except Exception: + NVIMGCODEC_LAST_USED = False + bgr = self.to_bgr() + success, buffer = cv2.imencode( + ".jpg", + _to_cpu(bgr.data), # type: ignore[no-untyped-call] + [int(cv2.IMWRITE_JPEG_QUALITY), int(quality)], + ) + if not success: + raise ValueError("Failed to encode image as JPEG") + return base64.b64encode(buffer.tobytes()).decode("utf-8") diff --git a/dimos/visualization/rerun/bridge.py b/dimos/visualization/rerun/bridge.py index 66338e812..bf25c1eb7 100644 --- a/dimos/visualization/rerun/bridge.py +++ b/dimos/visualization/rerun/bridge.py @@ -100,7 +100,7 @@ class Config(ModuleConfig): default_factory=lambda: [LCM(autoconf=True)] ) - converters: dict[Glob | str, Callable[[Any], Archetype]] = field(default_factory=dict) + visual_override: dict[Glob | str, Callable[[Any], Archetype]] = field(default_factory=dict) entity_prefix: str = "world" topic_to_entity: Callable[[Any], str] | None = None @@ -131,10 +131,12 @@ def __init__(self, **kwargs: Any) -> None: super().__init__(**kwargs) @lru_cache(maxsize=256) - def _converter_for_entity_path(self, entity_path: str) -> Callable[[Any], RerunData | None]: - """Return a composed converter for the entity path. + def _visual_override_for_entity_path( + self, entity_path: str + ) -> Callable[[Any], RerunData | None]: + """Return a composed visual override for the entity path. - Chains matching converters from config, ending with final_convert + Chains matching overrides from config, ending with final_convert which handles .to_rerun() or passes through Archetypes. """ from rerun._baseclasses import Archetype @@ -142,7 +144,7 @@ def _converter_for_entity_path(self, entity_path: str) -> Callable[[Any], RerunD # find all matching converters for this entity path matches = [ fn - for pattern, fn in self.config.converters.items() + for pattern, fn in self.config.visual_override.items() if pattern_matches(pattern, entity_path) ] @@ -177,8 +179,8 @@ def _on_message(self, msg: Any, topic: Any) -> None: # convert a potentially complex topic object into an str rerun entity path entity_path: str = self._get_entity_path(topic) - # apply converters (including final_convert which handles .to_rerun()) - rerun_data: RerunData | None = self._converter_for_entity_path(entity_path)(msg) + # apply visual overrides (including final_convert which handles .to_rerun()) + rerun_data: RerunData | None = self._visual_override_for_entity_path(entity_path)(msg) # converters can also suppress logging by returning None if not rerun_data: @@ -249,7 +251,7 @@ def main() -> None: # is acceptable here pubsubs=[LCM(autoconf=True)], # custom converters for specific rerun entity paths - converters={ + visual_override={ Glob("world/debug_navigation"): lambda grid: grid.to_rerun( colormap="Purples", z_offset=0.015, diff --git a/dimos/web/websocket_vis/websocket_vis_module.py b/dimos/web/websocket_vis/websocket_vis_module.py index e861fd6c3..3ae3d5c4f 100644 --- a/dimos/web/websocket_vis/websocket_vis_module.py +++ b/dimos/web/websocket_vis/websocket_vis_module.py @@ -228,8 +228,9 @@ def _create_server(self) -> None: async def serve_index(request): # type: ignore[no-untyped-def] """Serve appropriate HTML based on viewer mode.""" # If running native Rerun, redirect to standalone command center - if self._global_config.viewer_backend == "rerun": + if self._global_config.viewer_backend != "rerun-web": return RedirectResponse(url="/command-center") + # Otherwise serve full dashboard with Rerun iframe return FileResponse(_DASHBOARD_HTML, media_type="text/html") From f05ce21b79f50c0a5c8f535c102c0824f332165c Mon Sep 17 00:00:00 2001 From: Ivan Nikolic Date: Sun, 1 Feb 2026 12:09:17 +0800 Subject: [PATCH 17/36] looks nice --- dimos/msgs/nav_msgs/OccupancyGrid.py | 100 +++++++++++++++------------ dimos/msgs/nav_msgs/Path.py | 4 +- dimos/visualization/rerun/bridge.py | 8 ++- 3 files changed, 63 insertions(+), 49 deletions(-) diff --git a/dimos/msgs/nav_msgs/OccupancyGrid.py b/dimos/msgs/nav_msgs/OccupancyGrid.py index c0b1ee24b..82fb2ee34 100644 --- a/dimos/msgs/nav_msgs/OccupancyGrid.py +++ b/dimos/msgs/nav_msgs/OccupancyGrid.py @@ -432,9 +432,9 @@ def to_rerun( # type: ignore[no-untyped-def] colormap: str | None = None, mode: str = "mesh", z_offset: float = 0.01, - brightness: float = 1.0, + opacity: float = 1.0, cost_range: tuple[int, int] | None = None, - free_color: str | None = None, + background: str | None = None, **kwargs: Any, ): # type: ignore[no-untyped-def] """Convert to Rerun visualization format. @@ -446,9 +446,9 @@ def to_rerun( # type: ignore[no-untyped-def] - "image": 2D grayscale/colored image - "mesh": 3D textured plane overlay on floor (default) z_offset: Height offset for mesh mode (default 0.01m above floor) - brightness: Brightness multiplier (0.0 to 1.0, default 1.0) - cost_range: Optional (min, max) cost range to display. Cells outside range use free_color. - free_color: Hex color for out-of-range cells (e.g. "#484981"). Default is black. + opacity: Blend factor (0.0 to 1.0). Blends costmap colors towards background color. + cost_range: Optional (min, max) cost range to display. Cells outside range use background. + background: Hex color for background/out-of-range cells (e.g. "#484981"). Default is black. **kwargs: Additional args (ignored for compatibility) Returns: @@ -466,35 +466,35 @@ def to_rerun( # type: ignore[no-untyped-def] return rr.Image(np.zeros((1, 1), dtype=np.uint8), color_model="L") if mode == "mesh": - return self._to_rerun_mesh(colormap, z_offset, brightness, cost_range, free_color) + return self._to_rerun_mesh(colormap, z_offset, opacity, cost_range, background) else: - return self._to_rerun_image(colormap, brightness, cost_range) + return self._to_rerun_image(colormap, opacity, cost_range) def _generate_rgba_texture( self, colormap: str | None = None, - brightness: float = 1.0, + opacity: float = 1.0, cost_range: tuple[int, int] | None = None, - free_color: str | None = None, + background: str | None = None, ) -> NDArray[np.uint8]: """Generate RGBA texture for the occupancy grid. Args: colormap: Optional matplotlib colormap name. - brightness: Brightness multiplier (0.0 to 1.0). - cost_range: Optional (min, max) cost range. Cells outside range use free_color. - free_color: Hex color for out-of-range cells (e.g. "#484981"). Default is black. + opacity: Blend factor (0.0 to 1.0). Blends towards background color. + cost_range: Optional (min, max) cost range. Cells outside range use background. + background: Hex color for background (e.g. "#484981"). Default is black. Returns: RGBA numpy array of shape (height, width, 4). Note: NOT flipped - caller handles orientation. """ - # Parse free_color hex to RGB - if free_color is not None: - fc = free_color.lstrip("#") - free_rgb = [int(fc[i : i + 2], 16) for i in (0, 2, 4)] + # Parse background hex to RGB + if background is not None: + bg = background.lstrip("#") + bg_rgb = np.array([int(bg[i : i + 2], 16) for i in (0, 2, 4)], dtype=np.float32) else: - free_rgb = [0, 0, 0] + bg_rgb = np.array([0, 0, 0], dtype=np.float32) # Determine which cells are in range (if cost_range specified) if cost_range is not None: @@ -512,24 +512,29 @@ def _generate_rgba_texture( occupied_mask = self.grid > 0 if np.any(free_mask): - colors_free = (np.array(cmap(0.0)[:3]) * 255 * brightness).astype(np.uint8) - vis[free_mask, :3] = colors_free + fg = np.array(cmap(0.0)[:3]) * 255 + blended = fg * opacity + bg_rgb * (1 - opacity) + vis[free_mask, :3] = blended.astype(np.uint8) vis[free_mask, 3] = 255 if np.any(occupied_mask): costs = grid_float[occupied_mask] cost_norm = 0.5 + (costs / 100) * 0.5 - colors_occ = (cmap(cost_norm)[:, :3] * 255 * brightness).astype(np.uint8) - vis[occupied_mask, :3] = colors_occ + fg = cmap(cost_norm)[:, :3] * 255 + blended = fg * opacity + bg_rgb * (1 - opacity) + vis[occupied_mask, :3] = blended.astype(np.uint8) vis[occupied_mask, 3] = 255 - # Unknown cells: fully transparent - # (vis is already zero-initialized, so alpha=0 by default) + # Unknown cells: always black + unknown_mask = self.grid == -1 + vis[unknown_mask, :3] = 0 + vis[unknown_mask, 3] = 255 - # Apply cost_range filter - set out-of-range cells to free_color (Rerun mesh doesn't support alpha) + # Apply cost_range filter - set out-of-range cells to background if in_range_mask is not None: - vis[~in_range_mask, :3] = free_rgb - vis[~in_range_mask, 3] = 255 + out_of_range = ~in_range_mask & (self.grid != -1) + vis[out_of_range, :3] = bg_rgb.astype(np.uint8) + vis[out_of_range, 3] = 255 return vis @@ -539,31 +544,38 @@ def _generate_rgba_texture( free_mask = self.grid == 0 occupied_mask = self.grid > 0 - # Free space: blue-purple #484981 - vis[free_mask] = [int(72 * brightness), int(73 * brightness), int(129 * brightness), 180] + # Free space: blue-purple #484981, blended with background + fg_free = np.array([72, 73, 129], dtype=np.float32) + blended_free = fg_free * opacity + bg_rgb * (1 - opacity) + vis[free_mask, :3] = blended_free.astype(np.uint8) + vis[free_mask, 3] = 255 - # Occupied: gradient from blue-purple to black + # Occupied: gradient from blue-purple to black, blended with background if np.any(occupied_mask): costs = self.grid[occupied_mask].astype(np.float32) factor = (1 - costs / 100).clip(0, 1) - vis[occupied_mask, 0] = (72 * factor * brightness).astype(np.uint8) - vis[occupied_mask, 1] = (73 * factor * brightness).astype(np.uint8) - vis[occupied_mask, 2] = (129 * factor * brightness).astype(np.uint8) - vis[occupied_mask, 3] = 220 + fg_occ = np.column_stack([72 * factor, 73 * factor, 129 * factor]) + blended_occ = fg_occ * opacity + bg_rgb * (1 - opacity) + vis[occupied_mask, :3] = blended_occ.astype(np.uint8) + vis[occupied_mask, 3] = 255 - # Unknown cells: fully transparent (already zero from initialization) + # Unknown cells: always black + unknown_mask = self.grid == -1 + vis[unknown_mask, :3] = 0 + vis[unknown_mask, 3] = 255 - # Apply cost_range filter - set out-of-range cells to free_color + # Apply cost_range filter - set out-of-range cells to background if in_range_mask is not None: - vis[~in_range_mask, :3] = free_rgb - vis[~in_range_mask, 3] = 255 + out_of_range = ~in_range_mask & (self.grid != -1) + vis[out_of_range, :3] = bg_rgb.astype(np.uint8) + vis[out_of_range, 3] = 255 return vis def _to_rerun_image( # type: ignore[no-untyped-def] self, colormap: str | None = None, - brightness: float = 1.0, + opacity: float = 1.0, cost_range: tuple[int, int] | None = None, ): """Convert to 2D image visualization.""" @@ -576,8 +588,8 @@ def _to_rerun_image( # type: ignore[no-untyped-def] else: bgr_image = rainbow_image(self.grid) - # Convert BGR to RGB, apply brightness, and flip for world coordinates - rgb_image = (np.flipud(bgr_image[:, :, ::-1]) * brightness).astype(np.uint8) + # Convert BGR to RGB, apply opacity, and flip for world coordinates + rgb_image = (np.flipud(bgr_image[:, :, ::-1]) * opacity).astype(np.uint8) # Apply cost_range filter for turbo/rainbow if cost_range is not None: @@ -592,7 +604,7 @@ def _to_rerun_image( # type: ignore[no-untyped-def] if colormap is not None: # Use helper and flip for image display - rgba = self._generate_rgba_texture(colormap, brightness, cost_range) + rgba = self._generate_rgba_texture(colormap, opacity, cost_range) return rr.Image(np.flipud(rgba), color_model="RGBA") # Grayscale visualization (no colormap) @@ -618,9 +630,9 @@ def _to_rerun_mesh( # type: ignore[no-untyped-def] self, colormap: str | None = None, z_offset: float = 0.01, - brightness: float = 1.0, + opacity: float = 1.0, cost_range: tuple[int, int] | None = None, - free_color: str | None = None, + background: str | None = None, ): """Convert to 3D textured mesh overlay on floor plane. @@ -632,7 +644,7 @@ def _to_rerun_mesh( # type: ignore[no-untyped-def] # Generate RGBA texture and flip to match world coordinates # Grid row 0 is at world y=origin (bottom), but texture row 0 is at UV v=0 (top) - rgba = np.flipud(self._generate_rgba_texture(colormap, brightness, cost_range, free_color)) + rgba = np.flipud(self._generate_rgba_texture(colormap, opacity, cost_range, background)) # Single quad covering entire grid ox = self.origin.position.x diff --git a/dimos/msgs/nav_msgs/Path.py b/dimos/msgs/nav_msgs/Path.py index e92eab17a..7928bcd3b 100644 --- a/dimos/msgs/nav_msgs/Path.py +++ b/dimos/msgs/nav_msgs/Path.py @@ -235,8 +235,8 @@ def to_ros_msg(self) -> ROSPath: def to_rerun( # type: ignore[no-untyped-def] self, - color: tuple[int, int, int] = (0, 255, 128), - z_offset: float = 0.2, + color: tuple[int, int, int] = (255, 128, 0), + z_offset: float = 0.5, radii: float = 0.05, ): """Convert to rerun LineStrips3D format. diff --git a/dimos/visualization/rerun/bridge.py b/dimos/visualization/rerun/bridge.py index bf25c1eb7..2e14572de 100644 --- a/dimos/visualization/rerun/bridge.py +++ b/dimos/visualization/rerun/bridge.py @@ -252,10 +252,12 @@ def main() -> None: pubsubs=[LCM(autoconf=True)], # custom converters for specific rerun entity paths visual_override={ - Glob("world/debug_navigation"): lambda grid: grid.to_rerun( - colormap="Purples", + "world/global_map": lambda grid: grid.to_rerun(radii=0.05), + "world/debug_navigation": lambda grid: grid.to_rerun( + colormap="Accent", z_offset=0.015, - brightness=0.5, + opacity=0.33, + background="#484981", ), }, ) From fe36ba3ccd86740732abf86d8c7a3259e8de13ce Mon Sep 17 00:00:00 2001 From: Ivan Nikolic Date: Sun, 1 Feb 2026 12:39:14 +0800 Subject: [PATCH 18/36] camerainfo knows how to render itself in the space --- dimos/msgs/nav_msgs/Path.py | 2 +- dimos/msgs/sensor_msgs/CameraInfo.py | 48 ++++++++++++++++++++++++++-- dimos/visualization/rerun/bridge.py | 2 +- 3 files changed, 48 insertions(+), 4 deletions(-) diff --git a/dimos/msgs/nav_msgs/Path.py b/dimos/msgs/nav_msgs/Path.py index 7928bcd3b..c7b438f20 100644 --- a/dimos/msgs/nav_msgs/Path.py +++ b/dimos/msgs/nav_msgs/Path.py @@ -235,7 +235,7 @@ def to_ros_msg(self) -> ROSPath: def to_rerun( # type: ignore[no-untyped-def] self, - color: tuple[int, int, int] = (255, 128, 0), + color: tuple[int, int, int] = (0, 255, 128), z_offset: float = 0.5, radii: float = 0.05, ): diff --git a/dimos/msgs/sensor_msgs/CameraInfo.py b/dimos/msgs/sensor_msgs/CameraInfo.py index 855276b4e..4fab6f136 100644 --- a/dimos/msgs/sensor_msgs/CameraInfo.py +++ b/dimos/msgs/sensor_msgs/CameraInfo.py @@ -395,7 +395,13 @@ def __eq__(self, other) -> bool: # type: ignore[no-untyped-def] and self.frame_id == other.frame_id ) - def to_rerun(self, image_plane_distance: float = 0.5): # type: ignore[no-untyped-def] + def to_rerun( + self, + image_plane_distance: float = 1.0, + # These are defaults for a typical RGB camera with a known transform + image_topic: str | None = "color_image", + optical_transform: str | None = "camera_optical", + ): """Convert to Rerun Pinhole archetype for camera frustum visualization. Args: @@ -411,7 +417,7 @@ def to_rerun(self, image_plane_distance: float = 0.5): # type: ignore[no-untype fx, fy = self.K[0], self.K[4] cx, cy = self.K[2], self.K[5] - return rr.Pinhole( + pinhole = rr.Pinhole( focal_length=[fx, fy], principal_point=[cx, cy], width=self.width, @@ -419,6 +425,44 @@ def to_rerun(self, image_plane_distance: float = 0.5): # type: ignore[no-untype image_plane_distance=image_plane_distance, ) + # If no image topic is specified, We don't know which Image this CameraInfo refers to + # return just the pinhole + if not image_topic: + return pinhole + + ret = [] + + # Add pinhole under world/image_topic (we know which Image this CameraInfo refers to) + ret.append( + ( + f"world/{image_topic}", + rr.Pinhole( + focal_length=[fx, fy], + principal_point=[cx, cy], + width=self.width, + height=self.height, + image_plane_distance=image_plane_distance, + ), + ) + ) + + if not optical_transform: + return ret + + # Add 3d transform from optical frame to world/image_topic (We know where the camera is) + ret.append( + ( + "world/color_image", + rr.Transform3D( + translation=[0.0, 0.0, 0.0], + rotation=rr.Quaternion(xyzw=[0.0, 0.0, 0.0, 1.0]), + parent_frame=f"tf#/{optical_transform}", + ), + ) + ) + + return ret + class CalibrationProvider: """Provides lazy-loaded access to camera calibration YAML files in a directory.""" diff --git a/dimos/visualization/rerun/bridge.py b/dimos/visualization/rerun/bridge.py index 2e14572de..7a2c655c3 100644 --- a/dimos/visualization/rerun/bridge.py +++ b/dimos/visualization/rerun/bridge.py @@ -256,7 +256,7 @@ def main() -> None: "world/debug_navigation": lambda grid: grid.to_rerun( colormap="Accent", z_offset=0.015, - opacity=0.33, + opacity=0.2, background="#484981", ), }, From 4eff56c5fc9f0e8292b603c8e63e8e73862ed695 Mon Sep 17 00:00:00 2001 From: Ivan Nikolic Date: Sun, 1 Feb 2026 13:00:07 +0800 Subject: [PATCH 19/36] typing for to_rerun calls --- dimos/core/blueprints.py | 3 +++ dimos/hardware/sensors/camera/module.py | 10 ++++++++-- dimos/msgs/geometry_msgs/PoseStamped.py | 7 +++++-- dimos/msgs/nav_msgs/OccupancyGrid.py | 13 +++++++------ dimos/msgs/nav_msgs/Path.py | 6 ++++-- dimos/msgs/sensor_msgs/CameraInfo.py | 8 ++++++-- dimos/msgs/sensor_msgs/PointCloud2.py | 8 +++++--- dimos/msgs/tf2_msgs/TFMessage.py | 8 +++++--- 8 files changed, 43 insertions(+), 20 deletions(-) diff --git a/dimos/core/blueprints.py b/dimos/core/blueprints.py index 5d0e25df9..1a8f449ea 100644 --- a/dimos/core/blueprints.py +++ b/dimos/core/blueprints.py @@ -468,6 +468,9 @@ def _init_rerun_blueprint(self, module_coordinator: ModuleCoordinator) -> None: # Always include latency panel if we have any panels if side_panels: + import rerun as rr + import rerun.blueprint as rrb + side_panels.append( rrb.TimeSeriesView( name="Latency (ms)", diff --git a/dimos/hardware/sensors/camera/module.py b/dimos/hardware/sensors/camera/module.py index 54c4b338f..4173423cb 100644 --- a/dimos/hardware/sensors/camera/module.py +++ b/dimos/hardware/sensors/camera/module.py @@ -15,7 +15,10 @@ from collections.abc import Callable, Generator from dataclasses import dataclass, field import time -from typing import Any +from typing import TYPE_CHECKING, Any, cast + +if TYPE_CHECKING: + from rerun._baseclasses import Archetype import reactivex as rx from reactivex import operators as ops @@ -105,7 +108,10 @@ def publish_metadata(self) -> None: self.camera_info.publish(camera_info) if self._rerun_enabled: - rr.log("world/robot/camera", camera_info.to_rerun()) + rr.log( + "world/robot/camera", + cast("Archetype", camera_info.to_rerun(image_topic=None, optical_transform=None)), + ) if not self.config.transform: return diff --git a/dimos/msgs/geometry_msgs/PoseStamped.py b/dimos/msgs/geometry_msgs/PoseStamped.py index 406c5d7ac..30b39f51a 100644 --- a/dimos/msgs/geometry_msgs/PoseStamped.py +++ b/dimos/msgs/geometry_msgs/PoseStamped.py @@ -16,7 +16,10 @@ import math import time -from typing import BinaryIO, TypeAlias +from typing import TYPE_CHECKING, BinaryIO, TypeAlias + +if TYPE_CHECKING: + from rerun._baseclasses import Archetype from dimos_lcm.geometry_msgs import PoseStamped as LCMPoseStamped @@ -87,7 +90,7 @@ def __str__(self) -> str: f"euler=[{math.degrees(self.roll):.1f}, {math.degrees(self.pitch):.1f}, {math.degrees(self.yaw):.1f}])" ) - def to_rerun(self): # type: ignore[no-untyped-def] + def to_rerun(self) -> Archetype: """Convert to rerun Transform3D format. Returns a Transform3D that can be logged to Rerun to position diff --git a/dimos/msgs/nav_msgs/OccupancyGrid.py b/dimos/msgs/nav_msgs/OccupancyGrid.py index 82fb2ee34..7b37a9fd0 100644 --- a/dimos/msgs/nav_msgs/OccupancyGrid.py +++ b/dimos/msgs/nav_msgs/OccupancyGrid.py @@ -43,6 +43,7 @@ def _get_matplotlib_cmap(name: str): # type: ignore[no-untyped-def] from pathlib import Path from numpy.typing import NDArray + from rerun._baseclasses import Archetype class CostValues(IntEnum): @@ -427,7 +428,7 @@ def cell_value(self, world_position: Vector3) -> int: return int(self.grid[y, x]) - def to_rerun( # type: ignore[no-untyped-def] + def to_rerun( self, colormap: str | None = None, mode: str = "mesh", @@ -436,7 +437,7 @@ def to_rerun( # type: ignore[no-untyped-def] cost_range: tuple[int, int] | None = None, background: str | None = None, **kwargs: Any, - ): # type: ignore[no-untyped-def] + ) -> Archetype: """Convert to Rerun visualization format. Args: @@ -572,12 +573,12 @@ def _generate_rgba_texture( return vis - def _to_rerun_image( # type: ignore[no-untyped-def] + def _to_rerun_image( self, colormap: str | None = None, opacity: float = 1.0, cost_range: tuple[int, int] | None = None, - ): + ) -> Archetype: """Convert to 2D image visualization.""" # Use existing cached visualization functions for supported palettes if colormap in ("turbo", "rainbow"): @@ -626,14 +627,14 @@ def _to_rerun_image( # type: ignore[no-untyped-def] # Flip vertically to match world coordinates (y=0 at bottom) return rr.Image(np.flipud(vis_gray), color_model="L") - def _to_rerun_mesh( # type: ignore[no-untyped-def] + def _to_rerun_mesh( self, colormap: str | None = None, z_offset: float = 0.01, opacity: float = 1.0, cost_range: tuple[int, int] | None = None, background: str | None = None, - ): + ) -> Archetype: """Convert to 3D textured mesh overlay on floor plane. Uses a single quad with the occupancy grid as a texture. diff --git a/dimos/msgs/nav_msgs/Path.py b/dimos/msgs/nav_msgs/Path.py index c7b438f20..434301ac7 100644 --- a/dimos/msgs/nav_msgs/Path.py +++ b/dimos/msgs/nav_msgs/Path.py @@ -38,6 +38,8 @@ if TYPE_CHECKING: from collections.abc import Iterator + from rerun._baseclasses import Archetype + def sec_nsec(ts): # type: ignore[no-untyped-def] s = int(ts) @@ -233,12 +235,12 @@ def to_ros_msg(self) -> ROSPath: return ros_msg - def to_rerun( # type: ignore[no-untyped-def] + def to_rerun( self, color: tuple[int, int, int] = (0, 255, 128), z_offset: float = 0.5, radii: float = 0.05, - ): + ) -> Archetype: """Convert to rerun LineStrips3D format. Args: diff --git a/dimos/msgs/sensor_msgs/CameraInfo.py b/dimos/msgs/sensor_msgs/CameraInfo.py index 4fab6f136..59e22d73c 100644 --- a/dimos/msgs/sensor_msgs/CameraInfo.py +++ b/dimos/msgs/sensor_msgs/CameraInfo.py @@ -15,6 +15,10 @@ from __future__ import annotations import time +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from dimos.visualization.rerun.bridge import RerunData, RerunMulti # Import LCM types from dimos_lcm.sensor_msgs import CameraInfo as LCMCameraInfo @@ -401,7 +405,7 @@ def to_rerun( # These are defaults for a typical RGB camera with a known transform image_topic: str | None = "color_image", optical_transform: str | None = "camera_optical", - ): + ) -> RerunData: """Convert to Rerun Pinhole archetype for camera frustum visualization. Args: @@ -430,7 +434,7 @@ def to_rerun( if not image_topic: return pinhole - ret = [] + ret: RerunMulti = [] # Add pinhole under world/image_topic (we know which Image this CameraInfo refers to) ret.append( diff --git a/dimos/msgs/sensor_msgs/PointCloud2.py b/dimos/msgs/sensor_msgs/PointCloud2.py index 301758e13..56737de29 100644 --- a/dimos/msgs/sensor_msgs/PointCloud2.py +++ b/dimos/msgs/sensor_msgs/PointCloud2.py @@ -46,6 +46,8 @@ from dimos.types.timestamped import Timestamped if TYPE_CHECKING: + from rerun._baseclasses import Archetype + from dimos.msgs.sensor_msgs.CameraInfo import CameraInfo from dimos.msgs.sensor_msgs.Image import Image @@ -618,7 +620,7 @@ def __len__(self) -> int: return 0 return int(self._pcd_tensor.point["positions"].shape[0]) - def to_rerun( # type: ignore[no-untyped-def] + def to_rerun( self, radii: float = 0.025, colormap: str | None = "turbo", @@ -626,8 +628,8 @@ def to_rerun( # type: ignore[no-untyped-def] mode: str = "boxes", size: float | None = None, fill_mode: str = "solid", - **kwargs, # type: ignore[no-untyped-def] - ): # type: ignore[no-untyped-def] + **kwargs: object, + ) -> Archetype: import rerun as rr """Convert to Rerun Points3D or Boxes3D archetype. diff --git a/dimos/msgs/tf2_msgs/TFMessage.py b/dimos/msgs/tf2_msgs/TFMessage.py index c4a160dc5..db5bbed70 100644 --- a/dimos/msgs/tf2_msgs/TFMessage.py +++ b/dimos/msgs/tf2_msgs/TFMessage.py @@ -47,6 +47,8 @@ if TYPE_CHECKING: from collections.abc import Iterator + from dimos.visualization.rerun.bridge import RerunMulti + class TFMessage: """TFMessage that accepts Transform objects and encodes to LCM format.""" @@ -160,7 +162,7 @@ def to_ros_msg(self) -> ROSTFMessage: return ros_msg - def to_rerun(self): # type: ignore[no-untyped-def] + def to_rerun(self) -> RerunMulti: """Convert to a list of rerun Transform3D archetypes. Returns a list of tuples (entity_path, Transform3D) for each transform @@ -176,8 +178,8 @@ def to_rerun(self): # type: ignore[no-untyped-def] for path, transform in tf_msg.to_rerun(): rr.log(path, transform) """ - results = [] + results: RerunMulti = [] for transform in self.transforms: entity_path = f"world/{transform.child_frame_id}" - results.append((entity_path, transform.to_rerun())) # type: ignore[no-untyped-call] + results.append((entity_path, transform.to_rerun())) return results From b49b5968647d363559cf1ca8843271846849f1c2 Mon Sep 17 00:00:00 2001 From: Ivan Nikolic Date: Sun, 1 Feb 2026 13:01:29 +0800 Subject: [PATCH 20/36] utils type fix --- dimos/spec/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dimos/spec/utils.py b/dimos/spec/utils.py index 54b94b657..b9786b91b 100644 --- a/dimos/spec/utils.py +++ b/dimos/spec/utils.py @@ -15,7 +15,7 @@ import inspect from typing import Any, Protocol, runtime_checkable -from annotation_protocol import AnnotationProtocol # type: ignore[import-untyped] +from annotation_protocol import AnnotationProtocol # type: ignore[import-not-found,import-untyped] from typing_extensions import is_protocol From 1c76ebe1fd8ae37080bc473ca625d76c40dc569c Mon Sep 17 00:00:00 2001 From: Ivan Nikolic Date: Sun, 1 Feb 2026 13:04:41 +0800 Subject: [PATCH 21/36] better transport comment --- dimos/core/transport.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/dimos/core/transport.py b/dimos/core/transport.py index 9951be965..fe26b5a93 100644 --- a/dimos/core/transport.py +++ b/dimos/core/transport.py @@ -40,7 +40,7 @@ # TODO # Transports need to be rewritten and simplified, # -# there is no need for them to get a reference to "a stream" +# there is no need for them to get a reference to "a stream" on publish/subscribe calls # this is a legacy from dask transports. # # new transport should literally have 2 functions (next to start/stop) @@ -48,6 +48,14 @@ # # we can also consider pubsubs conforming directly to Transport specs # and removing PubSubTransport glue entirely +# +# Why not ONLY pubsubs without Transport abstraction? +# +# General idea for transports (and why they exist at all) +# is that they can be * anything * like +# +# a web camera rtsp stream for Image, audio stream from mic, etc +# http binary streams, tcp connections etc class PubSubTransport(Transport[T]): From 16843de55c34f04387b2114ffc9091853e598633 Mon Sep 17 00:00:00 2001 From: Ivan Nikolic Date: Sun, 1 Feb 2026 13:18:50 +0800 Subject: [PATCH 22/36] base blueprints defined --- dimos/core/base_blueprints.py | 53 +++++++++++++++++++ .../unitree_webrtc/unitree_go2_blueprints.py | 33 +----------- dimos/visualization/rerun/bridge.py | 1 + 3 files changed, 56 insertions(+), 31 deletions(-) create mode 100644 dimos/core/base_blueprints.py diff --git a/dimos/core/base_blueprints.py b/dimos/core/base_blueprints.py new file mode 100644 index 000000000..94f56ceba --- /dev/null +++ b/dimos/core/base_blueprints.py @@ -0,0 +1,53 @@ +# Copyright 2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import platform + +from dimos.core.blueprints import autoconnect +from dimos.core.global_config import GlobalConfig + +_config = GlobalConfig() +from dimos.constants import DEFAULT_CAPACITY_COLOR_IMAGE +from dimos.core.transport import ( + pSHMTransport, +) +from dimos.msgs.sensor_msgs import Image + +# Mac has some issue with high bandwidth UDP, so we use pSHMTransport for color_image +# actually we can use pSHMTransport for all platforms, and for all streams +# TODO need a global transport toggle on blueprints/global config +mac_transports: dict[tuple[str, type], pSHMTransport[Image]] = { + ("color_image", Image): pSHMTransport( + "color_image", default_capacity=DEFAULT_CAPACITY_COLOR_IMAGE + ), +} + +mac = autoconnect().transports(mac_transports) +linux = autoconnect() + +base = linux if platform.system() == "Linux" else mac + +if _config.viewer_backend == "foxglove": + base = autoconnect( + base, + foxglove_bridge(shm_channels=["/color_image#sensor_msgs.Image"]), + ) + +if _config.viewer_backend == "rerun": + base = autoconnect(base, rerun_bridge(viewer_mode="native")) +elif _config.viewer_backend == "rerun-web": + base = autoconnect(base, rerun_bridge(viewer_mode="web")) + + +base_blueprint = base diff --git a/dimos/robot/unitree_webrtc/unitree_go2_blueprints.py b/dimos/robot/unitree_webrtc/unitree_go2_blueprints.py index d830b06e4..a06e7b645 100644 --- a/dimos/robot/unitree_webrtc/unitree_go2_blueprints.py +++ b/dimos/robot/unitree_webrtc/unitree_go2_blueprints.py @@ -14,7 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import platform from dimos_lcm.foxglove_msgs.ImageAnnotations import ( ImageAnnotations, # type: ignore[import-untyped] @@ -31,7 +30,7 @@ from dimos.agents.spec import Provider from dimos.agents.vlm_agent import vlm_agent from dimos.agents.vlm_stream_tester import vlm_stream_tester -from dimos.constants import DEFAULT_CAPACITY_COLOR_IMAGE +from dimos.core.base_blueprints import base_blueprint from dimos.core.blueprints import autoconnect from dimos.core.global_config import GlobalConfig from dimos.core.transport import ( @@ -39,7 +38,6 @@ JpegShmTransport, LCMTransport, ROSTransport, - pSHMTransport, ) from dimos.mapping.costmapper import cost_mapper from dimos.mapping.voxels import voxel_mapper @@ -60,41 +58,14 @@ from dimos.robot.unitree.connection.go2 import GO2Connection, go2_connection from dimos.robot.unitree_webrtc.unitree_skill_container import unitree_skills from dimos.utils.monitoring import utilization -from dimos.visualization.rerun.bridge import rerun_bridge from dimos.web.websocket_vis.websocket_vis_module import websocket_vis _config = GlobalConfig() -# This stuff (also for viewer) needs to be in some shared base blueprints file -# I just want to start from Base here in go2 - -# Mac has some issue with high bandwidth UDP, so we use pSHMTransport for color_image -_mac_transports: dict[tuple[str, type], pSHMTransport[Image]] = { - ("color_image", Image): pSHMTransport( - "color_image", default_capacity=DEFAULT_CAPACITY_COLOR_IMAGE - ), -} - - -if _config.viewer_backend == "foxglove": - _mac = autoconnect( - foxglove_bridge(shm_channels=["/color_image#sensor_msgs.Image"]), - ).transports(_mac_transports) - _linux = autoconnect(foxglove_bridge()) -elif _config.viewer_backend == "rerun": - _mac = autoconnect(rerun_bridge(viewer_mode="native")).transports(_mac_transports) - _linux = autoconnect(rerun_bridge(viewer_mode="native")) -elif _config.viewer_backend == "rerun-web": - _mac = autoconnect(rerun_bridge(viewer_mode="web")).transports(_mac_transports) - _linux = autoconnect(rerun_bridge(viewer_mode="web")) -else: # "none" - _mac = autoconnect().transports(_mac_transports) - _linux = autoconnect() - unitree_go2_basic = autoconnect( + base_blueprint, go2_connection(), - _linux if platform.system() == "Linux" else _mac, websocket_vis(), ).global_config(n_dask_workers=4, robot_model="unitree_go2") diff --git a/dimos/visualization/rerun/bridge.py b/dimos/visualization/rerun/bridge.py index 7a2c655c3..77d8243a7 100644 --- a/dimos/visualization/rerun/bridge.py +++ b/dimos/visualization/rerun/bridge.py @@ -88,6 +88,7 @@ def to_rerun(self) -> RerunData: ... # automatically spy an all the transports and read visualization hints # # this is the correct implementation. +# # temporarily we are using these "sideloading" converters={} to define custom to_rerun methods for specific topics # as well as pubsubs to specify which protocols to listen to. From a7f5c84d06238a87491673f5dafafd0d912326a1 Mon Sep 17 00:00:00 2001 From: Ivan Nikolic Date: Sun, 1 Feb 2026 13:55:29 +0800 Subject: [PATCH 23/36] small restructure --- dimos/core/base_blueprints.py | 58 ++++++++++-------- dimos/core/blueprints.py | 60 ------------------- dimos/core/global_config.py | 2 +- dimos/mapping/costmapper.py | 6 -- dimos/navigation/replanning_a_star/module.py | 11 ---- dimos/robot/unitree/connection/go2.py | 8 --- .../unitree_webrtc/unitree_go2_blueprints.py | 21 +------ 7 files changed, 34 insertions(+), 132 deletions(-) diff --git a/dimos/core/base_blueprints.py b/dimos/core/base_blueprints.py index 94f56ceba..987a120b8 100644 --- a/dimos/core/base_blueprints.py +++ b/dimos/core/base_blueprints.py @@ -14,40 +14,46 @@ import platform -from dimos.core.blueprints import autoconnect -from dimos.core.global_config import GlobalConfig - -_config = GlobalConfig() from dimos.constants import DEFAULT_CAPACITY_COLOR_IMAGE +from dimos.core.blueprints import autoconnect from dimos.core.transport import ( pSHMTransport, ) from dimos.msgs.sensor_msgs import Image +from dimos.robot.foxglove_bridge import foxglove_bridge +from dimos.visualization.rerun.bridge import rerun_bridge -# Mac has some issue with high bandwidth UDP, so we use pSHMTransport for color_image -# actually we can use pSHMTransport for all platforms, and for all streams -# TODO need a global transport toggle on blueprints/global config -mac_transports: dict[tuple[str, type], pSHMTransport[Image]] = { - ("color_image", Image): pSHMTransport( - "color_image", default_capacity=DEFAULT_CAPACITY_COLOR_IMAGE - ), -} -mac = autoconnect().transports(mac_transports) -linux = autoconnect() +def base_blueprint(): + from dimos.core.global_config import GlobalConfig -base = linux if platform.system() == "Linux" else mac + _config = GlobalConfig() -if _config.viewer_backend == "foxglove": - base = autoconnect( - base, - foxglove_bridge(shm_channels=["/color_image#sensor_msgs.Image"]), - ) - -if _config.viewer_backend == "rerun": - base = autoconnect(base, rerun_bridge(viewer_mode="native")) -elif _config.viewer_backend == "rerun-web": - base = autoconnect(base, rerun_bridge(viewer_mode="web")) + # Mac has some issue with high bandwidth UDP, so we use pSHMTransport for color_image + # actually we can use pSHMTransport for all platforms, and for all streams + # TODO need a global transport toggle on blueprints/global config + mac_transports: dict[tuple[str, type], pSHMTransport[Image]] = { + ("color_image", Image): pSHMTransport( + "color_image", default_capacity=DEFAULT_CAPACITY_COLOR_IMAGE + ), + } + base = ( + autoconnect() if platform.system() == "Linux" else autoconnect().transports(mac_transports) + ) -base_blueprint = base + with_vis = None + + if _config.viewer_backend == "foxglove": + with_vis = autoconnect( + base, + foxglove_bridge(shm_channels=["/color_image#sensor_msgs.Image"]), + ) + elif _config.viewer_backend == "rerun": + with_vis = autoconnect(base, rerun_bridge()) + elif _config.viewer_backend == "rerun-web": + with_vis = autoconnect(base, rerun_bridge(viewer_mode="web")) + else: + with_vis = base + + return with_vis diff --git a/dimos/core/blueprints.py b/dimos/core/blueprints.py index 1a8f449ea..4c1558cfe 100644 --- a/dimos/core/blueprints.py +++ b/dimos/core/blueprints.py @@ -453,65 +453,6 @@ def _connect_rpc_methods(self, module_coordinator: ModuleCoordinator) -> None: requested_method_name, rpc_methods_dot[requested_method_name] ) - def _init_rerun_blueprint(self, module_coordinator: ModuleCoordinator) -> None: - """Compose and send Rerun blueprint from module contributions. - - Collects rerun_views() from all modules and composes them into a unified layout. - """ - # Collect view contributions from all modules - side_panels = [] - for blueprint in self.blueprints: - if hasattr(blueprint.module, "rerun_views"): - views = blueprint.module.rerun_views() - if views: - side_panels.extend(views) - - # Always include latency panel if we have any panels - if side_panels: - import rerun as rr - import rerun.blueprint as rrb - - side_panels.append( - rrb.TimeSeriesView( - name="Latency (ms)", - origin="/metrics", - contents=[ - "+ /metrics/voxel_map/latency_ms", - "+ /metrics/costmap/latency_ms", - ], - ) - ) - - # Compose final layout - if side_panels: - composed_blueprint = rrb.Blueprint( - rrb.Horizontal( - rrb.Spatial3DView( - name="3D View", - origin="world", - background=[0, 0, 0], - ), - rrb.Vertical(*side_panels, row_shares=[2] + [1] * (len(side_panels) - 1)), - column_shares=[3, 1], - ), - rrb.TimePanel(state="collapsed"), - rrb.SelectionPanel(state="collapsed"), - rrb.BlueprintPanel(state="collapsed"), - ) - rr.send_blueprint(composed_blueprint) - - def _start_rerun(self, global_config: GlobalConfig) -> None: - # Initialize Rerun server before deploying modules (if backend is Rerun) - if global_config.rerun_enabled and global_config.viewer_backend.startswith("rerun"): - try: - from dimos.dashboard.rerun_init import init_rerun_server - - server_addr = init_rerun_server(viewer_mode=global_config.viewer_backend) - global_config.model_copy(update={"rerun_server_addr": server_addr}) - logger.info("Rerun server initialized", addr=server_addr) - except Exception as e: - logger.warning(f"Failed to initialize Rerun server: {e}") - def build( self, global_config: GlobalConfig | None = None, @@ -525,7 +466,6 @@ def build( self._check_requirements() self._verify_no_name_conflicts() - self._start_rerun(global_config) module_coordinator = ModuleCoordinator(global_config=global_config) module_coordinator.start() diff --git a/dimos/core/global_config.py b/dimos/core/global_config.py index f1497b213..7bcbf8a6e 100644 --- a/dimos/core/global_config.py +++ b/dimos/core/global_config.py @@ -33,7 +33,7 @@ class GlobalConfig(BaseSettings): replay: bool = False rerun_enabled: bool = True rerun_server_addr: str | None = None - viewer_backend: ViewerBackend = "none" + viewer_backend: ViewerBackend = "rerun" n_dask_workers: int = 2 memory_limit: str = "auto" mujoco_camera_position: str | None = None diff --git a/dimos/mapping/costmapper.py b/dimos/mapping/costmapper.py index 1cb028205..287caf290 100644 --- a/dimos/mapping/costmapper.py +++ b/dimos/mapping/costmapper.py @@ -20,7 +20,6 @@ from dimos.core import In, Module, Out, rpc from dimos.core.global_config import GlobalConfig from dimos.core.module import ModuleConfig -from dimos.dashboard.rerun_init import connect_rerun from dimos.mapping.pointclouds.occupancy import ( OCCUPANCY_ALGOS, HeightCostConfig, @@ -54,11 +53,6 @@ def __init__(self, global_config: GlobalConfig | None = None, **kwargs: object) def start(self) -> None: super().start() - # Only start Rerun logging if Rerun backend is selected - if self._global_config.viewer_backend.startswith("rerun"): - connect_rerun(global_config=self._global_config) - logger.info("CostMapper: Rerun logging enabled (sync)") - def _publish_costmap(grid: OccupancyGrid, calc_time_ms: float, rx_monotonic: float) -> None: self.global_costmap.publish(grid) diff --git a/dimos/navigation/replanning_a_star/module.py b/dimos/navigation/replanning_a_star/module.py index 0c11c6d10..afba27915 100644 --- a/dimos/navigation/replanning_a_star/module.py +++ b/dimos/navigation/replanning_a_star/module.py @@ -16,11 +16,9 @@ from dimos_lcm.std_msgs import Bool, String from reactivex.disposable import Disposable -import rerun as rr from dimos.core import In, Module, Out, rpc from dimos.core.global_config import GlobalConfig -from dimos.dashboard.rerun_init import connect_rerun from dimos.msgs.geometry_msgs import PoseStamped, Twist from dimos.msgs.nav_msgs import OccupancyGrid, Path from dimos.navigation.base import NavigationInterface, NavigationState @@ -51,15 +49,6 @@ def __init__(self, global_config: GlobalConfig | None = None) -> None: def start(self) -> None: super().start() - if self._global_config.viewer_backend.startswith("rerun"): - connect_rerun(global_config=self._global_config) - - # Manual Rerun logging for path - def _log_path_to_rerun(path: Path) -> None: - rr.log("world/nav/path", path.to_rerun()) # type: ignore[no-untyped-call] - - self._disposables.add(self._planner.path.subscribe(_log_path_to_rerun)) - self._disposables.add(Disposable(self.odom.subscribe(self._planner.handle_odom))) self._disposables.add( Disposable(self.global_costmap.subscribe(self._planner.handle_global_costmap)) diff --git a/dimos/robot/unitree/connection/go2.py b/dimos/robot/unitree/connection/go2.py index 1a81f7284..245b254a7 100644 --- a/dimos/robot/unitree/connection/go2.py +++ b/dimos/robot/unitree/connection/go2.py @@ -19,14 +19,12 @@ from reactivex.disposable import Disposable from reactivex.observable import Observable -import rerun as rr import rerun.blueprint as rrb from dimos import spec from dimos.core import DimosCluster, In, LCMTransport, Module, Out, pSHMTransport, rpc from dimos.core.global_config import GlobalConfig from dimos.core.skill_module import SkillModule -from dimos.dashboard.rerun_init import connect_rerun from dimos.msgs.geometry_msgs import ( PoseStamped, Quaternion, @@ -210,15 +208,9 @@ def start(self) -> None: self.connection.start() - # Connect this worker process to Rerun if it will log sensor data. - if self._global_config.viewer_backend.startswith("rerun"): - connect_rerun(global_config=self._global_config) - def onimage(image: Image) -> None: self.color_image.publish(image) self._latest_video_frame = image - if self._global_config.viewer_backend.startswith("rerun"): - rr.log("world/robot/camera/rgb", image.to_rerun()) self._disposables.add(self.connection.lidar_stream().subscribe(self.lidar.publish)) self._disposables.add(self.connection.odom_stream().subscribe(self._publish_tf)) diff --git a/dimos/robot/unitree_webrtc/unitree_go2_blueprints.py b/dimos/robot/unitree_webrtc/unitree_go2_blueprints.py index a06e7b645..1ea3562a5 100644 --- a/dimos/robot/unitree_webrtc/unitree_go2_blueprints.py +++ b/dimos/robot/unitree_webrtc/unitree_go2_blueprints.py @@ -32,10 +32,8 @@ from dimos.agents.vlm_stream_tester import vlm_stream_tester from dimos.core.base_blueprints import base_blueprint from dimos.core.blueprints import autoconnect -from dimos.core.global_config import GlobalConfig from dimos.core.transport import ( JpegLcmTransport, - JpegShmTransport, LCMTransport, ROSTransport, ) @@ -54,17 +52,13 @@ from dimos.perception.experimental.temporal_memory import temporal_memory from dimos.perception.spatial_perception import spatial_memory from dimos.protocol.mcp.mcp import MCPModule -from dimos.robot.foxglove_bridge import foxglove_bridge from dimos.robot.unitree.connection.go2 import GO2Connection, go2_connection from dimos.robot.unitree_webrtc.unitree_skill_container import unitree_skills from dimos.utils.monitoring import utilization from dimos.web.websocket_vis.websocket_vis_module import websocket_vis -_config = GlobalConfig() - - unitree_go2_basic = autoconnect( - base_blueprint, + base_blueprint(), go2_connection(), websocket_vis(), ).global_config(n_dask_workers=4, robot_model="unitree_go2") @@ -140,19 +134,6 @@ } ) -_with_jpegshm = autoconnect( - unitree_go2.transports( - { - ("color_image", Image): JpegShmTransport("/color_image", quality=75), - } - ), - foxglove_bridge( - jpeg_shm_channels=[ - "/color_image#sensor_msgs.Image", - ] - ), -) - _common_agentic = autoconnect( human_input(), navigation_skill(), From 834fe3bf83db496d2a93c91574bfe6d7f2c85dfb Mon Sep 17 00:00:00 2001 From: Ivan Nikolic Date: Sun, 1 Feb 2026 14:02:34 +0800 Subject: [PATCH 24/36] old rerun kicked out --- dimos/dashboard/__init__.py | 34 --- dimos/dashboard/dimos.rbl | Bin 156991 -> 0 bytes dimos/dashboard/rerun_init.py | 166 ------------- dimos/dashboard/rerun_scene_wiring.py | 152 ------------ dimos/dashboard/tf_rerun_module.py | 172 ------------- dimos/hardware/sensors/camera/module.py | 20 +- dimos/robot/all_blueprints.py | 1 - .../unitree_webrtc/unitree_g1_blueprints.py | 7 - docs/api/visualization.md | 229 ------------------ 9 files changed, 1 insertion(+), 780 deletions(-) delete mode 100644 dimos/dashboard/__init__.py delete mode 100644 dimos/dashboard/dimos.rbl delete mode 100644 dimos/dashboard/rerun_init.py delete mode 100644 dimos/dashboard/rerun_scene_wiring.py delete mode 100644 dimos/dashboard/tf_rerun_module.py diff --git a/dimos/dashboard/__init__.py b/dimos/dashboard/__init__.py deleted file mode 100644 index fc9780593..000000000 --- a/dimos/dashboard/__init__.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright 2025 Dimensional Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Dashboard module for visualization and monitoring. - -Rerun Initialization: - Main process (e.g., blueprints.build) starts Rerun server automatically. - Worker modules connect to the server via connect_rerun(). - -Usage in modules: - import rerun as rr - from dimos.dashboard.rerun_init import connect_rerun - - class MyModule(Module): - def start(self): - super().start() - connect_rerun() # Connect to Rerun server - rr.log("my/entity", my_data.to_rerun()) -""" - -from dimos.dashboard.rerun_init import connect_rerun, init_rerun_server, shutdown_rerun - -__all__ = ["connect_rerun", "init_rerun_server", "shutdown_rerun"] diff --git a/dimos/dashboard/dimos.rbl b/dimos/dashboard/dimos.rbl deleted file mode 100644 index 160180e27a62916f51989243cdca2a4200c6f13f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 156991 zcmeFa4O|pe+dqDt*;im;XIWlWL0J(M6>x#&MMHNKG~X&xGBQgLP*hYBR5GirsHmtY zHK{16%&bhYtjrKtBeQN(Dl03yrEV+hwn9Zk#rS{E%<|?-x7G7}{`c?s#K*6DopWZ+ zoH;XdU30GQbJuBPr*P$L zPT{b{ISUJBFPKksD1wfLpknF|WaTcJKX-a`Vq#qKXxpf`(YE;Lczb-pP@8>b=8U+^ z=-7nloGiPOoC|XnE}B1VcJ?zAYCAoaXbr7YVgI~}W9r3yEvzO*fpK)_x zP63KCY;-(Ef{*|VSv)&u$&grE){Lx~Ihm2MF*EIv_MwUP$e}s0@sSBbqZ8uo*$MX8 z_?RJ6ux0Xs`Gq<23k%4UO|LJFHXl-jN#1Sm( zX*qh!FcC8IqmTBYXGQWu=lGEEMrP8NGo6h@O*$iiO#CwHDT8QsVa_~18taR_o}n4T zZd#PH@a8Z|-HCK*8B?ueq1k8b3>`};BxPq7W~LCeFku=im^*uZHZ}?;YGi(c(pZx| zYDV6oocx8e=NCq0Etr?TU_L*EsH^AGjL}8;3v&t@8A*aLku?1&b$a2=`8l~{KA27+ z!COf0XUr$9M7XD0x8RxzegmweE2GrRg;}{dITT(H<;8T^H55&m@>^RQPN0}5NkGGx zr&Y(FgPXKCB`s-jw@eWPoZx=pro08SrWekhhtugvff?z{TwO_oE+Ex(G(V>ZV&FlX zf|ErTE?ksK+I6&jcENPqGleC-@u_>u)z$(PJk z6m1~&VZoe~s1i0aD-(9&s`Vz57uvUxKTgI3H2x)Clp!S~YAesnqJgGBgHfv|jKqSL1Lin6`}QMzyd~WlOte+#X`n zv{fS~O}gsZVL3&(g`?Ge!>Wa7JsI+}6fBUMXdlZk<2-H&4W*9?@-wq?LWhMeT(Bhc z)`=R!Sd!-D6lN}@5a%2+TqQcFi>Om?U^(D!J zO!{6Fi(5iea_T(zmR1t_lh!I%f~1+3CSdzIk(Zh&gV9806sS)~TO9S#33QTK?%wTn z!_k{d=YDsnapvj4O+kcmU!#*E@f(TXR~GNPqWK1>=}*+i2{saoR#ynT5)?)oTZMjT zWm(vQg|lbPo>!;N>C-Re%|Fk;g_s^;4 zoncVVuX!u#8F$?!sb^6iOh*MgTe!%ChZwgft_Jp%Do$LocE&yYJK8zUHYz!$yLO(D z?d+-`4Ofs9ZHpOdA%Fky3o7RnJ8$Cg&_QV8rR!!QQ#u{((M2I+ciz$Y_^%jd ztWrZ%htv0if?mO(C)UC)g;TVJtC2^bCk%<$noVPsM7yB4TYw^_vXZYx*2agB-cJ%& z$|4;#_Qb~A9E@t$tHb06X1a1mC&s+eCm zH1i6#W}ZjF3ep5JT1MJbuGk$EbEcGeEl7usF8-~QsfHBdfA8(tvkUU`GH-_VoR^cw zuB}GHi%Qv$`ctLMY(5-L7Hfl`zmu7+%p&75<_RO4#|4$=deTNP?J{Q0mv}~;v;lpD zXI!r`Pt>cRI=frCnQD{1Qcb}mh$<`7)bq#PZSAzds$RW=RnDxu%z}dHv$LyIqh`*| zS!ko62CFJG{Y*HcY5Wz!XjN&DkZ3a|7fLpq^uDx(L|-_aRq{^ z_A}Tg-2VG@DwwBW4{n}px(l19Ici~>)@_~|G~LXThwrc}wL(3keXz<5=hoFcF}=R5 z-8hkfby_DpWJI2!MS``%HciDVf(CDeV%^V7u}z+4ixY3DD%z)|X^iUi+&EdN!#LfF zJ3tInj8mmzoc79h#X2oYU{qw|WKnlFPI!t@Dw`i}t}ewmu}9R}%eiTTsj|tt@*l-; zvFSQp0}QH5Q0?+`SBz5H%M?tztiJ_3|@yC%Lto#jRZ^&%rT_YBrEtJJoU(&o#yVqQUO~JWtU^ ziOr*FI=;ItHpN=(V|1V*iM?u^rr{fzMlM;fFpqJo7}nJ)ZuinCR&m>TR`Dt5Xa4u+ ztfD+<`QX2B&=Qvtn>ez&S%M<|2-EQ2H-w3gOGrpLM_0#p)74#uF!s1VGK9GlP5t)} z=I$A>Qz(&k58=;0p6oceIR+wdJrq8@yB0-J_% z?yZYd?ddRl`5*b6K5r>&#m%vex3Bx@qOYRJM_Glr1~zJspgn_I7Sd;NBtXEOhfj&I^^qZ7uQV>4sB+04H% zzL}Ag9Ty)R7Z;fmlb9K4pP4;0GBYvL9vPb%8)KW95HmCvzsm*5bP#Y zy6gA`%*drvG?${O{~q7`J-+#SeDnAC=KtdO=29sdvU|aTv5+=@;v*J|dvMblxzug- zj$cL=&`R9~H`6-@H|)jMKQXv*Q#H3IFDNLSmzjSKS5vEuZRTe#9+EYlg?1g!=#$+h z^4;@H-1+7)pOXjA2aTP}?~+GNzl24BT7D zqZn+_(Y6BWM~Gr*qH{vbc9HMNb(ES2I%|lUj`!h#JejlS=PVqO5FIykXnd?aGG=IW zd?f5KxEq^2HZnUa&Xzb75pJ0Yab%iy!AOR6=<#Z>)BC`bwSW#yL?lBctDw~Rh-Ao@ z7{=IXtc*u8AmE1B3aY}-UQqH4fW!nRvoe)|M#Y?IYwJ&lD3F6043yRj+a=F|ut_8x#I+bRk%DgKbktQnRPVzTaBjmxaHJCvdI#mvNsAg7A>qlrnN4$ffBicR&Y52nMGdkhkodfV^FDz=KUq z*#i1NiN||RU=13f;Sxgfv<)Q4^_~-z=WGvoSfQo%poe_b1tRl&wLIwIk|XnY%!8GT z&BQ|$&k4&Dqu$d*BPy>+YZ~WN0v_A}dE}?(0T1as;K6s;br%W9TdWp-ZjX3y>bgce z%=2gu$lK}hp91pM=sQCm%y=S6;xP|l9Wf6#;(pZ8Iue~nl$eK!hl@{zu3vZcut%7)eBvwC->}jg4<7M?Hw`Q4bni zBE;r}wAm=wAW*9k@6cvRbDPo=a=CX`Qz{hmn1uoI0CL*=Hf~Cn@OXt_p2JVAwB1h_ zsk(}fHDxXh1uAoUWJGk}K#9w}j`WlRhcAWM8AxsZmoh@S69F2;qK!_LH(erfI!;g! zB38OuQj5IM!r)Q=KUou!dBQFfzOCb?E$)ZhTn#IsF*@%E% z$A+$2i#~+AZ;_j>yA59vR-1=ZOVBmSv5Clq5hkmE`&MNYOxJpI0~`|47E2)>0-cf#@Qp0t zEw=$a*^8HpT@5gmbK9wSaK&QAg8>V=8eo-Kjy1$CLl#Ilw$pJdg_2Xlw7japYP{R> z7<@!8BH(x+`p0d!jm2`M(IEp#6MjOm%$G8yr%`P3zmzdqARmJJb~VJe6}b&@&3T4+ z70dzu`*Wd;JNf*pdL@)G5eDXxhQ`OVdK|C`fEntXe}z%nvP++SweI)r@Tgb>$aJ56 zWpj3oWHdLy4tLDHisQ4dj%v;s8l!dA(?vv$bX>sf1tr2iA9H#jn-aUtsMcw!A4V|*mvSL*r`maDuawkRq~ii?9lGEQlJ+sD!k#0nU;qU`dA%^e;&07tcJ_ExMf^ zlV@KAtJ+OWqcZy{UokNl>`Cs~R}kgk_Kv~cIkT@Y*voYXw=)AX4eK`jN*V2~lbeA5 zl;PgSzhGp}k8Q6%$HY*$TC@9)&A+N7a!(5)jsbkn7SI6tF`UJy!98Dch%T!-c`5Ic7@}U{=>wpiOR&Q6bzlI^1>ImHyn39bP%l8`)VeKJ5zAwS>o2x+<=- zy+hjikT(Q_;_JLoIPikoKC}!W0|M5&+6TN5VpAn1y}_d5_TjyDi*U{WEu?KEMYOS% zzs`Q@v+u_^?tbUMjh~Lp++R<-?O&^VRgX z8Bo)OP}60&)2cA>S)K?)%-pV{Q&GbObTr<;p=K}`C?*LTaAo}M8R&M@0+p>O<1!2O zO~b)cpM`nrRLBK|nT0u4c%0lbt@4?w1a3JDg3_UYJ5=#LCM311Vm@!E7!$2pl>w2f zt0umk%##;!tDsD@8p$VGl@Xso)ODT}SHgOhmWU=rcJWQl$sK7jWJe zm;pOV*1PKoXzO4pg7zhc9~!l&a~${+%mdNFzPY`)+xW{M_@W> zr^Bstg3%v;&F|E}69pxj{DO|Q?f-Ks_?-E#;yFqS)(nXtS3T3^w1c9G#69sCETz7FP7U&XfJT>XZ-wXS-m!L4;qO`u*~RW9Xqypret zbcdR&+$X!K+-aRYu{~Vu-&MaxtaavghzErNdei?6|{ zcx|*2n1lbk3c<3A>(`W#wix@Ugpqbze0E&y%=pY~EEIAP6r)i!q|3= zYbfGV_x!^2d}`}#I@aywHx+YEIUN-4p4=*No%=9cI2lm7;dT7Y7w#uc2dzn=4xQWX zf0Fs25Vy|N)UzP?N6aZz@C&g}GB)wEGJz8@YsWG^oxb_p>GUvF*$mU^NmtCLD;6_R zcUa)l>2p>5wXT@C{RVvPfw4?$VXt+rJy0D%7Ay9M`X^I)Gey5tvaauVx2i@C!*tqo z;?wK*VGM&u9j-#*BGc>-7^PKfpI^{6a4rP9MhPg4hD8r2TLaiUqXRcOt-lgH%VkTwEWlBL;3x4 z>R{*PTph$J>Hsfpm!u99!Wb_Yy!T<;iJX8u?>!g;LhGgJgRvLW2hl^ZD9GrP(WAQR zgY4+onF-FW!{Wd)=mSi9ho2$-;xSeAc4~@FLG$?A#ih@CA2Ryp9KIw8_lCu$)&6qU-91|29zdt#ZEVI_5Udm}Qb0Hv z?DBy30V0XYh}>&?coAES*FZiLR$kr41)%01DRWrtFa=2BUQM_!%N!{DVql7p;wHw@?=i zf~6GbkjmhMW?+KMm8Qwl?++1Q7=%((+?BdlfF{e6-wg7xG)R8u(ja}KvLsc8yevpI zmPz1)(`Irn3Sw1rEmooCS}aXy_kK20*F`}#2*MHOo^`!U=&HCjseOmx#iL%Hb$yue z=cbi9wASwoPQgOJO&6NTx_Lh*@(#38ul}SFC`-i`53=B-r>R2oz(V?O=u3iC=+Iof zte$AQC+^fR0qTpaTw{q6p7uj(dXTP0KTBRvThLifyx`=&F858OM#q&H7SyTECtAt$ zR_>)VjSAq@VI8&$znQ5d=7X>sC8D&6`za0i1BMJ!)Z1@&h-5ENJ&8?AL`q(K*@~^u zlFK39Jo>$*$gM#6vgt{Xo`-Zt#wkdzK)P$(NJy_ix?8n9mBhEO%e{%$oQAj^nGalp ziN4})NDoiG7d3tbX~kV$5{chKdh&+d86>ud6uX*MS;^W)5Z_B0!$mvF-+#Iil@CMa zt|(VNGRGow&)p7WUJdEv(<5=1sgOSVaSAHWhVXAL?_Y&x0O zNVaYI5M(#M*NP4LLu%>GdnZQ3x)Kd{OJv=q7uhJWEzp%g%eU#rU`LZ`bw1zkN^GO^ zThPSi8?dMNN{>_$=Rz`n=L@M2QoygHAvs0r+jBq$LuDC4imBJjciBjM1nuf$y$8il zVZ*Q`YLUbjAPu+)FHvF@q`|SHGKfT$Vv6eBgzl)v9??^N!a0429-Ni9UnHxC3F4fv z`_WR5AnY@bPN1GJNDCebL_Lv^7A||&N_WMg=>e)oUlf_u`xE49*Z0PjUt@Pq+l(@- zTT>;Z1o+TVWFEnROaouS?q^YKKJf{zpAYW8*noG=;;Q+0WA`5Yuf>)D$PBjKUrC}* z6H5sQ`F15*H^@q{p}SKu$mCN+_Wqgwz=lbvGW^>YQ#qfP5-@7aA)HSx@9ILV(#~liXmO~$Wu-dAA~fyAI?cEhctziiFDU9IGaGTFLtyV7x9)|HZTKT zd7COZuxHORjmW(ZwFlqWV8io;%#hhCCo-Q#W@y?bRKEsQg_Ykc@nU=6fCN#n8s9|e zkkzx0XA|s!F^p7J=wr%2JoX7{Cjz=Gwu+Wix_u!p45i2_Oqen_`OPE4Y<=!XXMnw_Wa^Ds2Wgs&S zTYk0uePoV6Kb?NHQEf7b%>G-!EL;^)m26?t{p)CjX(=wIMPst&lXw?)xnUqKleh=F zWaKxZntjO2y2W54@c^WmiTwHa3DVpnXf!oyQ!Miz8G`2G-DhOc%x||!I_oxHVkwq; z^Lik=A3FK|HRdX^`r5G9gQp)sCKO2QBYnrFB2$96D&dtfh*zQf@#E+o5jVHJVnqwK z%!0If+s1qnXG41S{x499&apo~YcxuiL0Y$ZL$Q@)U5sSn?s zO5$Gh?T#nrK>8A;dmc!#F{;y~SU$apU#_1~`uXcskdC4B%SVH8j8izJuS17RWHoD$ zdBF2-bd*z@Z24wc9~&|Q(Ip3$m~f5EeNq0yj5=HpJpY!TUOZzZF%DH8{`n&`k>~w- z>rorl^};SkT<vt>_c`T&|J)Xcz zAqgADWZ>x+QhI2X<-7CrE58$|@G)He9-(6-y=bn|rS#|*)60t1YEpU(eBTq*a4Evj z=p^w|iR@7i)}V#o;J~q&o6AW2H#$4+c}X+G+^q3}UlxNTV?22y!i1_t!r-9Ri%Pjb zC43N}KItVCio#AVt7^S8X$FM8^AlCFM=cQr~CBG+J9xLQS?v)RDySKQ)Ti#=(meA3UDVwd*;w^%9n)sfN zD_Kv-0aAh>|!eCyr?_03jNuHXZlj?LMIto|Fq)l*B1Haau{7krS;-;;fwb4JT2@PeSvs zUteNZtIdL*%ql<)V4*NGcv!Oz&;uTTC*TDb0B^ttFajcA0(=28;0O2v0YD&N0eS#E zfgm6l=mqo!LI5k!2M7iF>Y)MyF%)OUT8$9WHVA@1I?6(0HB_ykEgIUap(+g(nrR)D zLelWn&?+=$!wL;G1X8KSEEa31$&Z$4s0dqW%mE;eu)uM*xm0-(>uZ%8IKpxcpX#w*j|T z_c%7}4hVMwcLBw~-M~G-y+8?YA8X9^EoBf@62d=V@>;|*H#%TOHkT6LY)NUBI9 zf<15-^)RNZc+8~fIA7eb?WXl3^wQK{qFk%QzLMq2X2*d?BVA8IKC$=Tj)nUoTyM_l zFXe7L>?m9n;aa(Upfuym_l}=KBU~Hj4VH?hA9P&#`CwO!B~mKc`He&Uz+hL<_9*GM zE#ErI#te21@QaZaR~>M?5j5Bp26_MBM#qVkL9UV2u~Ox_M#tVy2f5zQij#V8IOI6{ z_#oG#XD*ZW?mpvqW5^)an_lCjyq5*39*Z9)>2KA!?q207ZBMvN628*9zL@VJwf31HN$XD4RrUMD zQ9tTRDe=wYbz#4J@0e=3O8V*5=DK3jcaHGXtE8P-&2>*Z4?1#Fr%GY>eqTo+|8Rs) zoGF#h`@AmX=8X>fH?t-2r;qC<#l7H&shT5=`0k^+x@(_zZ25VPv}M&tb-UhQ<9JUq zSIWKlqq+%?u5pBfTqk{^J5-l`;6q2##;c`cIY;WIY7aP?K9i&gsVdjAM+M2ZV7O#1 z(71MerInH+lB83T*7aeUUK;<#Wzxh4w625u_0k9Bmr36~rF9+tO)u^98YDfs-sCzl zyqC1`%P{H85kJ?M%lb;szv?Z0RMOY=$&umGo;0m=EF#=>^-W_W`)ltz#L>fDYSTRF zjd!*>?m0EmHFLuv>EMUgIo3F*y567gh_v_lD8~mAGhIIxuaZ`*4slexl;ygA+Pq{@}I`TD#@3kqeFx672Y4T=A1*Nz?&AwS$_wN@R_iamdjkxlS5?;)fAEr;c*1 z`CyWC>&}-(Y#%<#HL}kY(#@_%M|_hs%5~F2 zcHDbSkZWqtaYxmxD;z&>33E-U@scXzlN{Fh16<{=iPA1tf}^2NxT~VoBsENmcRYQ4 zxNG4|UukDuoa5<*;jV|ve5Hom4@dm;_&}Hah3_3yzHhj*exU2+X|Fiy4t%|B&+%~A z{n|$zqRPAGji19^Yr-FJ)LC^kt*&s_gUFXsLTYwA817mDId#~^nqmL!?|OvGem~Vj z)c13hLaut}c+Io-_j5TRHv}}-G#B=BB^9o86wNqV^ZkweToK=tJ95Xg)@=3Z=aR0d zaO^+Tvv&TWzOE6FEBf@P4S&0@>*LVp9Q8viwJp!}bzM`s))71`uy$@~U)SO07abET z`_z8i8tQrqZC>|eNNxXJp|0qGn;n%QLhXBB_H~&bdeb4kbD-vzd0{T?(X9?C|K*w; zzlFID2EOZ9_x@8gH}>r3`sb3Jj+UUZnt-@|u1`MuhohllT+P@S$iMsxN0mOR#&B~# zSMD?W9hLq*H7yJJx%{sFmxEsSs9BrZ&ozSg+v}y(dFlOJ%a)&Z&}$V*8KM1L4<`wd zQ&=^8*zkU?AMVyjjR7wWAJH7`k3Pu^OKJI<_fOeudQ^X9+~6lHQUP-bo_OP^@`-$ zlFvuDeo9J^Y`cfm-v7yPSIL|>X`N$e?f1_OcbWa;q`bQlYiB+<-1YilyHq?OvG&b- zhP$>j+NHW@c5l0KXkXV8=$Eq3ylZZ&>FZhzIe$}F&5-;1x+)SII7asr<^8+Cl%+xu(nxl!Bv9 z)?T+==bEeOA&vd6x%T*dI#-$Nq^slN(bwGIS;ln*$!_c(!DftXqYBxXK%O$KLsp_>cwcq^G+m(t(CcS4|?duPOxR&CP*|la!ZR^eumlN{A zaboT5Pgq^QJ=^5SPiw2Ojql?Uo@;W5%f7Gq_NhLuh0VttV_o}dzI>yPYir6$N5+)x zHE$0Hb=5(hsMmC*gSP%ND!vD&gHD>a$!pzru7doqFWy=3_vh%KlChwJs>5VD2yeIa zSLqq!6NbGTfvPM41#)Rt&aZfD~#{ z8JvjDXPB&s1%tbhLK9_DXg?=~3T5J>2&7O^H&STq#YmxWnG{+hlS1hiA%$M;LJB#% zkwS0%1yTq@hkh7^f*4Bgh?l`=rI>-?8c4mS!ne6gW43I;8rTXc6u4mkUm001@;ES~ zNS&n6G)@Yc13Waxo-@5W4d`LI~c?2;r7?6G9ebb`V0z0U<~BrGhYu!2V!_V9=`C384|lI+qZdgFJ;0DnNw_ zA#^LGiACX6ih~`55bmdkk*5$sPe4)#q33Wm?S#-0-ZD-IS&g{sNA?^ve5+M$+h%+K zTenj}DV!3*MXBUXlNq74*b9u1m5IhqO6V;Vcc+9vGJ(HQD4{QG?Uc|#RIN}#O>jsn zl+a03IOb80GLSzi6oV2%+dYu!W`ul^c`hR~2**p`GYlKXq7;M>BybgOLMR!MLI_Pn zH6VmCm{q(6nF=FxBP4|p%7vsbLh~WDGeWUogsj-ZiG!R&39UkwLJ7T#9TZAvHzb7; z+6PIYgud;hgnmI2+?0?B<>ygCVW?7}grcyen-WUEmS27BWdj9{e!4g%v<#P%Q$iAn zcVicY68Z?cD3s8b$WthxZy+g@&|ye2B_!(`nG&-4G8`J5kQHQ1CnGeVlM#x+P6{J* z86=qz5=UVlHz9O2G8IDTI!Fp3lmlt?t|K^wxsVh>Xb~iZ5V{Q#2qBb~Kms9D1*#YA zzBnPY-gZ7ARNFxa?L;l-5<;J$XB9$dKT6LfgnmV-LI@p4sX_>y!6~^3AyATC384qF z@|2sBW#1%yyOfuZSOgvLl@2P2dLhb+h+g%C=|vCk!h@{o5?La6v| zTxf+6;@xDI86lbH4IQ%}6_mjFgpjz$Ms^TF=;F_@he8Pb3sP+UNj%U$qq8|7+s0}NL3_a?$0J z^fF`eUquFO{-b2j*b9te5N`my;b_2dg&Dd|Z8|C6ZY@gfmrMq!N0?*jzlsg2 zlGz~Iph47=@#1vQ`t#`^(b7%_IWc{xD;<>BP6tH<2%_diPq9sWH!DBPcNKnGd;^KqweI_O7#XFD5I zVp)DkWRRuD?~p4h=nb>+8xIpU{hoYH5#R@HrwQ8vQ#+7fB@<&V|vcy7vwus-vo zLq}KE-8=M+;om(sL%ObbW!lpR%wE zge$K&a>?P!brv;Z&wG@l$j*p6J2sdvOIGv zm;%?I<9RwPA*xkgLIYDFex?j15?P)(=EW?^@)=nag%;=UBYeX#0jqcwGiTLFe02;+ z7Z6E|D&z&cZo^!`{)CBb>JqF}QNmPrv@h2McM!hdwF@Uxj4ycYZ029^Y6_?65yEoQZd6a7#GeceEjcB2-?^_Eo$R)nZPh znhvX2J<~eX;`HnJcM8pg>3AqLIIg$6p4VoHS21+@3BG>GPPKl15#|f6X|<_B1Kg{X zGzw~)N?ENWROdTANXsgf)k;=iwGu2v;%%YWyTB>95jnc(@trY8Cd_ptCZ!;5A7&>SSd;&@Pr`qyV#>1q3 zE;Fle_Tn7)acAVJ4C|S>^{{&)kg-Cm2@N~z$CoIh&1z$=N^crjNTxJg@%6akMd+^e zOz(l?V+}KNHEy`|Z0JYboUKllp*UOjdY~2|SZ`&*#Eu!?qVyC~edIxPUYaSMJWK-W zfij$0tnZb|a3<7fP$p0&dMWwwwRCN)-MNp*joQ;eFc>h-Z(>X!h8+P7+PwyS-Ll~;TU*&C-NX|^<6UUKf5V@mO48>>-yJ8(eBqR+< zxD3*L0xc6&dQu}OzfOeH=q)}I)r7cxR>U{tnZA1ybT+E<4j{xH;sE~Yd}3dSx>RD@ zm@1VI2zfxix{TOIg02jepA|rH_K^aNq^Xjl|20mDCdTlg4A7n4GIY((JVHT+umC zr>m3~EF-51!(uG(gD&nN&mr*M`}(M@@*IL~^fCt3pMAL;C;S?mzF+JZdD=?iR*3sA zw=PBQyC^@v9n(B|IQg5VH-==}Jg`Cz4W0d5mP^{fM~{nBs|}q08(!Cw@k&%7hf3+2RaTcNS+d z7r`8e?)E|x7KH80r{yX?^tQ3}lRTWO`eS7Owtk_FmaD%;wn$BvS0VdH)UR^p;z-A^ zftHQI(eVr;YO!AmLm2}ejrVcaTOr zV@2svNYc{&RlEaZN1ee_Cu&pG$zsZ0Pn?w2Xe0ZWQ`cj|9yq;m%@ags)ee9-@!2Cd zQ*8n&zw*UOwWt+UDdN@RtT=;-xB!z+zAKTKj>@O3oeL=o(sfl&SxH<7Y1$Y3oJEyg zys^QlvTEZ`Hxhhuu6!%ck|3L>@gM%yzlK;RE>MwUK+7SA~6F}*|?W% z@aJh##O0NEv}I|P|6r8fVWvD07@;meSCbJ|mHpBX2lU*=v@3 z@RT6F)odkej$A%hN#x0ic}ilwoLHcAFjj`cJ~yFn!UNDZR=iu{!@8s{@)8#*0rZRI z&6g@p3Tt4jGO7=Z+_Pxli zlJAB4l<5rt;m<31P|1Es&VE=)l*x%ll*FTQV!4J}R*{Z#Gi!{&3?+fuT6IaX z_o0^%Y8Jx$5nk;}=_YcTsL)Ec8%ga)vy3zWZ#2%{6yZxa8@xqlYx%oCv6Up$+>^k~ z2W!nXwU*Xd=r97I{b(=Znf(wNZI;$(s9Zyr8L7?3JRaYPou;8wjd{{o4JDzcHF#ww zi-rs+@Z%rN#Y{x38rXm_gqpUfX}y}Dr52`HRm;?rkL7LEl&+?UYN`mp*Urtw!D`B| z;2|S>5QZSssG_~9WBpg)K6nf$2c7^bfTw{<;8|b|uohScyacQVHW>Vtzlz_@z#C0{ z%C_RS8mN&(M79%c2kLTF;rUGu>4)LF9O~VEQgha4j>or~qz{(vcf2Z@r1IOoc6@%TC|#jG;P_XPDB15h z;P~Z?QR<~{bo`26gwIo3!kMi`Nw~k!5&5Q%wEB^69fuBiN`s<)a%}iQD?L5&sAHFk zNi$l`I`04Gq$AO&mfBL99JhY0lMcN0Z-;k(PpKvP-wrlQPddD0tJAp?Q%7UpjJj_#UL=Y5}dkcG_M?!$U|OJeRr9n`Wl*zY@KKGl z;`$Sg=#SOX15dU%;$KxsCg&N)F^3?{yS~jadJ;+Ne`eC^uvW*9x2vTvI_c;WgV@F= ze|4-4@RDBo_(#X1r!(nTSkjmLwN5JYB1g}2fFxAFNpWRXJh&vQ3Jv2m5fo*i1$u5ogdX=NT)GjUf z{xLlTn9~#K*bE~!-G@!rB*#h{iyn8}(KA+R{@`(kAFA4os&-F|k*=Tlq{Fc;T3WiX z!Vz?Pw6vW))aLwu#L z^y%}shJ}ak>MQkX*%HWCP&{|}Q=!%z1Z@imx~SQJsbsXSH~#|t zK?9X&&HYNX<^c@`+GU^>h_*MFBg%SHh1NWzyeF+Um}Ax$Xq}cS4CeS12CC9hsev{_ zl^JLYj%DEMzu&<4nzMNZ$~927fe^Dl4b^EW&A?;$$Bi{mih6?rpxMiueb~!9^Pm@>MzG(@ zJZG;LpH{HTi_b5p_rmLo`KB#iwAo8t#e2~ok5^=fz%W&BKN;)f<&>#aFsqxf?zw<9 zjR-Mk2L+?#(bgujffZ^xsG`k2=2!Q79370nD~baUB1I1Y+khW{0D(vX766X}SV;pe zJfc1-OaK9vs@C^}U#e}`CrF$HBGj1e0o)C21pW!=p&;?nL76}quoVD>gxNYoc$XlU zS$Yz97eJR#Zyjcp08409z=E^3wRKy2EGUUamzS>`9L}F z9&iM(!oN8lxEXj3_!vNs(_r-dH9#@20r(owd1JB+kO4dlyagNre0?xO2AB&x0n`J> zfL`!4PXrbN&jWja-vIs&i9rG3)s?CMganX>3AZvZ11JUF2EGIQd@&IRm4j?v+z31fya{{{h`n(wf!RPgupKx81cl&Q0)@bHz=uF9 z(BFz{3EU2B0QLd8K19(#25>*{7Vs~?HxvuB0CRzrKt1pa(5o+IJOPV=N?;Fg3J4Fw z>?Yt2;AP-TK-~}561V{<1>OL@1C0G~ErDEM72pDzfSv?h5umDL`R#aQSC4Jc9I)#RjxtQPoPc-I*^JQk>7>qpu}VDxxc5!G$pmJ@67{+18W2D< zY7nVW2klS^nB43pkSdv##=<4Y50BmWkEBv(;Kc!Nz%rkMS>$+;=y@KW^9tOs3ZJv7 zlh1*pxSh{=2jvQ%1E+pFpYtg)6+Y)%ND80BJ>d$Ua|ZrlH=iRwbn`j5ChdHV1)C~- z&LCvE`5eU3yZM~qkQ6@WicUUfIx-bLXD%d#&shvf;d8(cxcMCJ;8*yZ=OD>^4rQDPh0mD=N#S#5V`n#?Q-I6~lcY-cA|WY! z&I6DXJ_mk$H=nZxlEUX~hNSR0b&wQ3=O2*H;d2h6Ei#`Ya}^4o(}v@^`5X_33+$d) zWL_2E=5w4NZW@s7=5xMs^Eu!%)SMYm_#7|;7vOVztQ~w#AU1IGIeoB~!skRmlKC9I z=!+=xIWl*n@HyAm+VGg0`QyHSn8=R5*Q;d53)Quv&gA<2A> zT(!*S$dbb6yl-pgbM|37h0nohg^hTo3>;50@?<`TIEy0_I##U~X9YqB7hda)a)rFlHG|Hr(>eI8 zBvc+<0;8isL^ZtZ1F`4-FN6+8uuD9wBzen=u`KaWagHWAfHYSjceKJ>jFI_(%Qd9s zh2!Ly-0=!kFhQm|poeh|S14Im%3EKhBvR$X)e1-QKS=82@?N_zsS{0bJQzpf|C#c^ zXDJ8&UnX^66{%Hb-2PuCb=q2Cd_~X>ZpsJY%H-v177la&E7+dHe(=8bA;c+P=1cp` z6zfY3p|nDzT2nN9Rk;E5fQfebQGp2&Q$!f%7x4r&Tg^e0{#0S27CklVsYy?V^>j#2 z2ldpb$C@>L;e^%GEwAw_|^xQ$1rl*HZG*NFJpQ0yBR!q`Uq8<}A z$flM%1q-JpyR-G%b)?b<_f9td5#==A1)1I;f*Y zxMOv+Pe*%o+%vgLM?2w=)gh3Fs&(SRj4)c(*XICV0?5$`= zR4&j80kIKOicm#?it*VxL;?v^BoL;n=A#mUas~eV#|(j{VH;0M!{k)~b9}MzE2UtW zdV!JzO2pUrJo$HyBCvS5z*k1MB1a&66A`Y}Di2!YLFFD)>OuR1sL;b)Jk5hDdr^u9 zX9OcWu(l)ldHhL=ZDNm3ip@5llVWoQT$o}D3p4L-2@*ff2uQ}=_gQ|(?@?@l{6*6n zSA@spgJI*LQ%#IV45l(7MpY!X)~lP^K1Pb?ECtp8yMY!U6x*Y(XfaR;d;l~9y;X2< z0t<6@Pe%pYlKryfZ*arx3en$XP04MMguow6Z2!{iF z60i(d3p4gXsC3@7*)ARlOVjQ;}J0w?$= zAP;y9r~`fi{4fj{0ptRY0x&G}Js?843w}1hT@OU5txF6UI{1ec_5gr3f z1MUH;fG+{i(-Z}y19t-JfzSNaC)yr{H+&F~2HXa$13m^?f&NfsslZZT4X_(%0YahP zCIX9rO5g*a8R!kK_*h^ePyy@!nt-10ieptlnh%r%+kwMC0KDQ7Fb7xx)BuM7UwFln zfSEuUuoXB6_`oxs2+ROVfj59gz!RSFc5itJ1&_R@?x9{~+17S9HuN+~+Grys zatFB+UlkdKylxKi(+M{!4)UGnImnmd+2MbG(m_59{_z)j%Kq^p4W1rovnKLH0JDL> zB|<_+oB2SV`OsuMzNEL1@+q8VB-FqbzzNtzlEN>2Ee?Ea`e+F1rT#!x?xOi~r$;9y z#wCxAzSvxcOlMd3cwj{#o&mDO#o8v5{4MWe>hnKX)?nvh5*PelSrc`i#h^M!lT^6f zgk0Jrbgr#oRo2!hLo3^tS~qljm>~wMNaS_-R@n%wt&!CATV+)wVMhU<$C-0hVkF>p z?f$K@uP)(RWt$mR7N3NAnp-pQ96*ODUhxP#G=ku4Uu7e>1Q!G=WF#@nQ@G^sl_8p5 z{tBo2V`Y7*dMy2?ZRz~1))VixN`&lI*&;kt~P5^J3_q| z2>fGUY5^Yu+oI|o>g^{ltx+Z~txwRHAsWcDez}uAM1$p z{{8jV^|%q7d}WP@j+HfL!qo6LiEt?(im(nkR(7YE?e$K_T1}^8?LCMG{KgN%Vc=}BBT^dD7myUk z8mzP1vDOUf$JXaFl$E#PSaZThsKuDw?N~EGM4&X>Gpr{zRUB&($aFi_5|OC{N{@u3 zIM%M}bgX3}Q*o@#gQPgtmO@e-Yxh7>0;RbtLUF7;4@n7>hRe_$D7_7m;#m6tlI&PR z_oVnJj2C; zfi9l4f8zj(XYC|5P$H!@`IMqWN{b?QtR=0L5Z2rSTf(u16YhsC;a5vVW(+cwKxqdg zB~W@SB*n3I9V9tW`u}V1d*HLW%KL9f+D4kTDMV!*t#8{{V`*hs$`toQ#~eEB_$lkK#u{6uS;xAlS;sooF~>SqnPVHP zSYz}1KIc65+;h)+@B5zjeGerFH=mRHKIb{-Ip;jjbDs0(Irm;V&0Q}KO0Pp^-dKAO zk#b}0BlsmZ)@a93ZmfL)TUzf}k(!V?@2iNz#u{3@55MF=>HYX6HrBAkR$nO(O1}Wk z$c;7ZJn_cbD@Z3c)+%Z9kvG=L@l$N9(V^)Dm2zY4ax|4U*4_?0Z>+tOH`er%(k&=Q z?yM~Xb-A;47k+IEXBF)}BQUxwG~| zV8zZF?Qn^WHTogXN&l=aI46A`s?7Up7vrZmC!M?%y$zArS?j{FqS^SV zj!N^^+Cmg3PfFi`;>6Y(l_GZ5=xjT0t!)DGGu3OL>V5bXJt$4-wR6(fFY~?1Z~DQ; z+V|wfT6j*n4o6WH<3>4`2)vVH)HoA` zLFD}x6WNu5EFYtOd&NvdeZPVbksUYrV(|`H6LpNb6bG)rUs~+`NtbA^-h+0CrT&#I4ga$l+Fhp##_%y zy@Paa+8ZwQ4wOz_xWCjJEcFITJxs;+mU??IAuE-8YT~E2tJK?B>g_0{-H>gi-qupD zztr1Oidm4ixzy{b^fr}x8%pVem0hJ?XQ|g->a~^P9c}n54L;xFEkXRnm{roI%7RkQ zFQxBi-82cqP(}O2SX0p!4!C~F4DTX*Gs~Mp)12>L^fM3pIhkK0tqc}>)xB{?m7dL5 ziS=RzNLPU?u}iTM`)avZi8bc85<7`PsqDQ7y$JPKiES1uvHy8fC16nV>ZZ?}J_~p3 zsCeD$)~`(WG~Jh8-;HfHKQB6$y0Y>=WIrIx-*T3P}i-M-z8mwZ(sS|*R}Q73UB33NN8V)S91GGEXFo1 zqNaTc3yX>tv>;fCwR<0|!n6U8+csb&)=)ax@&Y%4?~!`dUS-Sg zdl{c1#ePVQ*D?gMeVrxveB9*nW|%|IOQ}`XNXdl8V3k!@gPPz|q{ov#McVI8X!g## z$_qY4x_OB=Ipt3%nde=C#d6gpUEcY@cdiyqsCs+KyP%&x3f1o)sbb^GbQrtR+CEbz~-A*be7=3S}Y%NqUtz*nG`?8 zT5GFVYvJow<>rI6!N;L9ziu^o0bbL%jh0#`Jh9Y5pT?K^=qr6o%&+vJM@F8j-B(J_ z)z*u}7S@3MU?U!*=ErKw8Ykd++sf%rYA-U}TBd*9>K3)y+KDH^&DB=)xmv8Zn>~L` z8ZWBE@JF5ejV1nhlS;O!#nvO(ek$<}c=!zcLVBzgYpq?_IV+vKY%@N)EZ17$Q?>Lw zjaMc<%jbKghbENt($}r%?fiIZ2UEvEe3kE5^s9V`rl(m;FYxf@O7W3C)Z*f|j9ulC z9lB6_+4GVMwmFM5uw7J~t)n2jqcB=Aa^cw|S(pmCGfF%S@{3m+I<1M;9 zdi9y@7hFO|C%;o_9-V}(7grJX=#R#mJ(^Q@LtWkVk*|2R)z#O{>n+Y6sjtkP276R> z!!B<{llVYqp_b^4(=RHC`zG=^L$vPxTh1AxEQaW~N9*{OJrm{kLcdjZJ2@YP?Rm@X zYrV_ZCgrw68z=5*5q9VeWq}fkCSrXz5zh|Eoo*3Vf>|y8Xi6pj)2Df{Z`+J;Go}*@F!tVnVcdsn zZkm%*uuBa$5j|irtqL?JAq{+7hq1Sh$9242@WJhb-wsPTuMTX5e-*XEnsMLQ z7$!L$vZEbzl~gp+e*~)gG~jEV92*140Drzrhrt&(qCp@0#7%sbWyUIejC- zn|A><(br2w@{ITL$Vul^ROBFNZ$SV%M4jjVIea1bO}wV^+gf{CaTA_~_38EZu5G%f zV{IGaIUTiU28I0UhU0{As|kAAv=VUxOt%`piEkoJF9 z^rho8;mW{2Y6sDwcLmi;kcF5}r=mz~(?F1j>_h{t@N+}if;tdFa{6@Lnln*NDagGf6RewYB zCxtX2eUi3p)RJGnWdTqEcm3kUx4o|^y`d+)mR2P6{bj-9cBX-G5iRn(2d^SbcUSAG z^i@q);mt``eMrdA%uiFlK(AJQGkYIt0e;c9B7Nk%ppSF{SB7$fK5`3wOUL5MRc&(` zX3m*Yo1WF!T3f%mZC-6_V{3iw?AF<{>Q>L4H80&dr|FK4^!;JaSk>9u-Mze{jnr{< zM;cR8u`sJaA32QQeFy=MS3oW{eWZ`m)gv!VOdfT^16@cT*FO$1O{xp&w(K9FUMdal zo=)w%ky?dhp^Wi9BeSp6fsfRl8U%QgyT*}p@NHW4Lh;{yHB zb!jnhQkQ1et-^as(yg_#XRWTUt)JIeUpp^7yPf7el<5ShqZ$&pnz8tN; zl|R&}BcQ(xA<(J419m!fh|?`V-o_&8h^&b^=7V>n52Rlh{#ceu*4@)v)OP{0>Q$dc zhqk%0utn1WRgO2r`7LxRLK;PB^pxg#&p}_d0;lOrWIP@KejjwB3Tas9l&v`koUR`? zAx*)yrmds9tF!e1@nLGv!L-4{OTD~d-1`s$ofvw`PA86YI^vTnJF@`oB^^j1&~9bM zIDD%d1c+DCPAR#mF}ROukRH((9JMP^CdAp4A%lw!c+w{Iq7r(s zA2iz$0=+mqXr~wDTqlwdrl?Y1sYv`z8eisfo+O?tDbvdi*Y&8m4@hRU1zVZ!G^9%* zkS=6zz21jasoo)`L$;4-sU3dvH-v*qn7epVd7xT0c;D`Sr-Yy&mPM7gQ z65;Yew*@8lIx!SUTV=z8@kq~yXD4qRggYrPO_ir(%cfuEuk+aZ{lpmDCZ+S zy)B?WJ)9q@(SYYi%3bOO{&XV5whq3>HKX$Te9iE z5Ywy%z0C0=%9J==AQ=Shtcj_Up$p0oBWbKYTs)NBPK-hqPX1-QK1@Mg-A;sS6&t=A z`jE$3r2= #kSw`N6ttB7VzpTdwEpIh{+L=wq6s6C}eNI$^a|Im9$co`tSeQjmp9 zy}0Jt?0QiJeC2_7y{H3D*Nbrcu+fXc$B%00sf}JN;&eXv9gZJ)*DZ1U*v<3FJ^T;~MS zXhi-T`_-A`ktd|7W<}m>kTuCc*9W2>=)*DKkNzNHzm)%?{QFV+CB`+iwzYbxQ9=HT z(Z7*0&7~*Rs5{A?+B1VfesxRKk4;RMbc6UN^+U;=RvqHY2tZ|BFYnjr6ym=q1OJ2e z<Qhau^qta`k>o~QRu?Sk%)Z|`>ogfUHpFQdq7L$d!at9;1dmL z+UO=5(gp=3-_{U*)Ba9iL#Cj9#FJdQ;T!1!8>03?o5+Tw*R;Y_u&TAQ^MU2*HC;Uq zcpErhj856KEm5b2nJ(>jT#b;$<}6-!|LyIq>(g0vD+M_cA8HWD-ef8OwxaG|-c5vv zr=cz=J@(p|2DR0dey#mmd@up@bp4|7D6lunfL{t-((sinjGv18t-#Zset{+>3r=qnn2~ z-2&ukL@2a=$eO5cKKMv=sYXybulrza_y3tr3UuH)H}?M(OJenFKj*DSKGIWD=hZ0H zJ>6>W|5@k_@mtfK$lm`x;P*l&Xq>JOnKwO?p5wxE8^bfwBhO(}QoqkSRf%vX5;$ zm~Nn7G4@CCCeHR8=KR!NSGMP6$bKj2>H49MZ3lrL`G?5ZCg#3RBc6Cfm#!o41+3|a zjr~v35ivF|m)kT#SL$PQ<-|cdU75=DjnNgGIwtB$57SNRubQqXalc!fB+0HT2beC6 zeODr=%;6e>bS1*0IGw3_LGs6?&RlUQyUtVqU;bjI&b$IWve21g>`H_mv`&oEnHsJK zjgO?eNo%SWP8XvyHg!zYnO#iRwa$3NceJNiooQw|E_G%x^kEz7HU)vlnaDWg13&+8 z{5Z4>INin=E%4CmT3jjGoxkEm2Q~nXc<`s0REbJ*h^Zc}{|D z5xj}BDeau!rGBi3?6c{|3gDOhC|*A{0jKLnxR-3RH&^&xa@RlE>Blxs=Y!YbUUJ@b zOVpE-Oy5FJ^4d#o0bfZ^XpNDNFS3vG(R{Ahz2rwBbDEEk-AEx|ema5bgLfLic3&v` z)ILSa<&uCq*WI-oE7W@^Qr|;N{c|XzR85^TB6oYc5;YW~8&-BiPRQ zVr-;M+Y)vE1k)v5!!hP;bM+g=S0&{(LDqeOX2^Wzc8YzdBpusv#7@VC zjC8g-=AoS=w-_BOY8+n5bS-pDe27$jy)^s$emB!39mA3PxG^}w*Ek(JuC>?Idhk`~ z&koclNym-?f8^gYb?gU-Paz*&S#->%ugBT32rshPu`<+ybb$1p>X9_JpJt@9)v-3t z7o%e~ZA-Lc15DSYj#c6eF!7=o9b3XQsJ*Uq?9P|R#)ux-D7Ir0N#E%Dz)xsAq}@z; zQT^rkE6?8RRCN_3**xui#VPGF&)d%W~bhdq8Q&5mx@K#6B>{bg2(i zBV^fkyIy=NcuDI@B+ELMS3wrUiyDN4u{^@pI6J$PX;6Dz^?{ZDkzK!9fM4{V@%q&X zTo(PZ8SCO~Y=jTl^s60uZ=+xRMmk&l8sdC0`eoC$MEy#EZ&YuW`qjfUi_x!rOv9yq zJq_L2indNch_hdNfZzRJ@%nWDI9c2_19ITFoj#o4bWwef#j7e;^|Mj$--T!1O1-?aNsjzDWfnL29s zjr|UrHCxI;)5Yr@GNtho*coR22s2B~~n`0cwbpN8;Eu|D8*eOUyU^o8QefR|?w z@#Z^Wxy>_((jW2KFJ#@2B{E;2G1z6wVi2r1#%9wT~!%1Vu zG)_l)O=-Qk}EN+5=DAI&x)IRP%&m_7wS2k(gUMhs$0@C<6WHB zr5@l>RyF^~R}aW1j<5WP{b}N>JAEK7WWP{t?Av3$lbZSYRhP6sKjQ}-AKQQEU$;H` z_&Y}4^_8i^4fUUTtmX0V-TEIFUG?1HdF3UOh6c~0Jo~;Q`=9MQuO3Ai^~iH{5NC%G zHX-;3VbR zcsJgM^%cCY_se(>#UJDTCD8dJj1vg^ccLF64F4fsf{$?E3;6uW4y6ABj76WvyRSY6 zyZ2eo>)nRWpge}T{-=@VQ%Lhkyw9m0b@>F+d<!P0}YIFP>mgRl)KG z4L-DkCwJkV#=Q-+uq(c#6=UUcyqoJz)OQ)u{RYlmzaP9tr|r7~&($x%w`Fcezrle` zzXfS-MSb6c`rvR?^+M0v(TuX+h5JpoFF@WKQ1%hbL$oEe0Y989#h%y^c z<{ZdnHr~rL3uV`VcQe3;TJUNb=v)gvz&LGs8*pz0t{Qo+Lffwdt+$}eE6~2FNb{>` z*RP|nZJxtBtq&B1I(W{Z`{o(1+YImBsQ%cTA$ zS%+d}vWe-ql*x7Ady-5hwnk-w?wp5AFjK=DSK|7~f}8DS(#2_A%Vdb@7Aup=cgeQ6 zl*tcP!1o7Un|ABJA%7q7bX6k+_C@p$n#I$J%_i{z70pt~esKWlbz3#W^`^d<%T}G> zv?K$V@r?PUvKo25>ts>0d@UgrxpbG|y{!*G&OA8o!%<}cukX6IB2@4A(e*Tjbk z1l=Z0Y>VnYhN-MPCo4qyZt}YR7JGeP!D(IddLPrxm)FNRp9@}pW!2bN3-}Yq>-*AC zUSnF2mDkd`#OwCXh4%a&=CrQ)UGwh3bbAHobHVS+z|$q*V-& z5V0fiPRa~>GK_p%5QuN2^HUbt^LzoPr8Z(Jn9+YVdqVx!>YX{;m?qi9%stoK{?>z> zpV|{QA5#{?@;#1^t(Ls60zK2NtZa|!I@vHA83gvl_*uiG1w|5dly^WW#RF8RNeX%@==5zg<5|DObpmw>O8 zEQ7%Ad<}B;5l2@RyQ4cuGVIPE@@_()x{}@LZ?Wh70H;kMU!lGo@4eY{U9Woly2|JL zu6X_0-N|<5+0Ll0XR$MS#hU^q=y~^>t z-p%=4@%nD?Gs&(rtc~(Ii(SzxPWzbRy_BTOg9tjmJ8!qw?QNVk1sa9&`w-J8l;2aA z$ojhC_X}&{_itW>Tz#~YF0!KuW2fnD%&;XR$g9s`4src$^t=4mWm~Cy;+MJSZp>k< z-to4KY0~@y+hN9;F|#@GI=_QyxRSx0>ymZ;+7Cu`{vD{ZttbqnZJA`oAKdCjBXl`mkqKcf0T<_R6?x6&*T+5KSz z-43{;b}RJP(?+F{=NxN3QHoW(m3+gb|99 zK|B01wtZkLr*$QRBTTOt8PvdsLG^bjgO?vjo(DbgU{nUg3riUUOE97PW?wkY?Xc+! zQ{gY8HoKBTC(|oN4ttn}OF66uua}^ml?cR_U>@{I@X8mk9QU*Fe^ubF3SH`g|0BrX zk5G?5{h|y08R99GL-&9teu{J6!rrh?eLJV6ygB?-dA+ym1kx&}P(5l~ecbl1pOBvk! zJIUktq`qk1$UTk+FNYGYG&P^n<YXn zKN^(-_2oc+4)zSnn{>5^`x z5DGm%T?5}3wT0wCw9NXcoh5mmC~`y_$u+yDQhF~x8p>k|CF1X>97Mb?V6TL3p%2JL zCgDD~DUV~^1{--yfp3J`>slUNOt)Bh>|;7E<*{)~ygb?w?~@=PAo>@M-8S+F_r*`daY9Cqdw%HLDymSQfNs%H!O$2S13d z45o2fl0lCBsJ!0Gx|!)t1>H#q`XOg|{9CslY6spM;CwW86?<;_zE6yeHG>X~4Z1CA zL%feTTtfH5Gt=?>ChJ3E!2yJR1mYFJ?eKTl@_8$#bk+PR6=+bs13LJbu_o}~2ca~sdA*eB=F97T&gX*H z3qKjZ4!axt@rfC@$nFLF5%ZmJuSGv(94^9;o_IctumvH|?`HTiZ1uZ`(^4CA_?WHF zOAj&4YS1j?ymUEyUV$FefJTn_jp7oL#?EcN`>F73g|6p&5${tb*7MM}%=Sc4Zi?_f zuq!6t2f5xhx_*q)y5zeL-wE-pP`>wae$pcse829~q0Z}kpN4oJW#AI_B|OiVo$tDx zF?l~9J_~BQ4e#4ItxMhyFwH`FKg#)C@&3ujLf#Wkz-sY)#tV?KFW{{9CUTB6Tv_ox z5Att9pt_R%+4muP-XGz#Dda2kENKn=BFTELS6#OU;asJi^Sk2pHQVCtOBLdMv=f)m z^DOp7uXN%W>C}hq`MikJrcijHeC}r&@j9(nIiAnMoZl6nKmPlnPV4q$6XJce6IU*K zqE|X_g$W%VL}p#5x4`FPYftuZ+7xIM%Ii_4Q7EtH!zV)Zb;avfKO3$;t3WrfAD5w? zzJT@f(HV9-<23O_EX%o|| z0lh-cM-MU$S2B2Ld%V7PBHl+maS447&qn8#LojzS`M(MN8(V$f#c5sgf0St!!+#&X z4r+rd{=fG5un*|=<{09A;y&{~V^1ucH)iw$eGX&F!24}`8BF7}E@jZmG>ehJex~6{ z29NyTaBQuTEEn^bFGAM7fc10H(Er3Up`it$ImW6flJE_#${ zl073`OKJvC+8Re$I&VIF8&v;B1Xt&xU)vG#KBE7K_fZZmVSmCi(b@PP?gg8?Kgo3` z`BEFQ+5T$y%P5UY-fv)#XIoH9vic%~Dl_dW2#*y{Zb zPV18QCzxiTysw9U!nNMN@`vHLtJ|D6p{~AQy|M2eb2uh^$!@q^6Y#$eG{`;@e1hw5 zqx-4fm2IVcA{XbOyO>@L=v|JGwHd(2`OEh*4GZ1RBp;y}`l9apqmci)-DyL-k36`9 z?uWi|*qu!3R%fAm;7_sEe@^R)|HqkLG5oLpn6>_AatMqJWg7DTl`qB5dyXL9C+;)< zi4TFT&cXj++N1U>^*(Tt+hEfNs^NzrdAX8757R3~2K$(X%RaDiSJ(%18}wQ5%18U@ zip)DRrHiK)*!>hveFXnUkhdS99)WDo2KZHor#AfG$!RHX4!=`g=b%TKE{y|)oP(YZ zp9QtSr3_yCawr4c4jn_hkFszHJ4CvZtsf{k$SF{)3`V*BHZrLBJ=s>*GU#Qxh00)% z^ShM6BYzSvgD%ATBmv8@K*@kD5$XL1g5D2$;iIwb2fI0~YZ;WmCqZ>6R0az;ze^dM z|J86jApHc(#Tx7y@W2a3(2YSA ztj<3(T^eVuL{OQvwS7P@_>OoumWJ!H5vJo(9*;d9%0stHn-TA$uDFC<3eP@U$wXME zIC*S?zs9y-?B}$uhL4iIHC2C z$ehXSBhx-(FO#L5*0oIbFx_Hha+2w|l*yC-FB~^?oAm%Jb!K<^jR z5bvWbT%up(TE~Tlwv*31&xgOlRtD{y)};&vm}W6D@IL2W22byeA1n4C-WMW}GN3-3 z-8QMd5V1>UU)axWpgu?P2xwAKrXS}xr*$cZ1@Mhfxy8t#k7>B<3k#nJ{lAop&B) zx};kvghJ0dPx*teUl1*`ero3;kMqti{COx3-8Ky&-bWd@)Er9jF1Jh)=TL{Z4L0&9 zhtGp#=UN^sm~OH1*u`|Hf4Gvz-QS3xLoG$Tk8*ISIaD6RSyzPqAL0Ls%7bPf@rRztP8~q#M<6~De1hvu^mEy% z6nq~+zsRxol-HT(4NP|`>X6Tw=RKT{#;;<}Jl{PK@>;h?OA+sbJ}#mEDg4OFZ*7ms zpHEi{L3#TPHx#aZ`rkgLXr|y(>cfsrDzZK8x{fPI`UR;9L_=H4uUTb?vCa({0 zeQosmB&T)F>lXM+NZ$GKx{vd@;PnmP4%Yz0i@=7gM4Np9%YE#qPT!zrL6L*EY857J z$1w75K_I@7E%D)>u+{HQPD^df;lsB+_dLint3k7nbI;x%%lcD$Y7lbFa}<-1R9n-j z*^u|tV90yjrtC(%PnlTHsVpo0YqrJY{Q%dUWSq;6ALX3H7PAl^qkaS0nkw$+OFnyoQ;-w1z%t=_kBT9>@v$21G&{YlR6iuX@E6Y`$)F|aw$ zL&m;j-VP$O&g;}}dtNW%v?)-%1>G2?f&g(7k`PkZ&eVjH08in$DlxY;o>-q3~ zP<>tT`ql4+`vX;=8`zM`P)}dLo;MTsB-@>F@YAp(Bgm`IT}HY7R93F>v*u4_TgiqK zzsz}p>h1ZQe`cE0PYXW(%rsod;Gw?^WuV)ZPQ?4DCoZAy)9}Mu4%!^XltC~2HMTmx zo71|Kfd`)jm0yeu7BCH0GMMmJ@iI96Y*Ys1YqXYuHkUDFQ1&%@8PsuFmonJQG>ehJ z5YuoagWow2o&~J}p95R;CCJ(ru;j{K_qq)_{@054!UmBq&w~GIf7s;x zDA%3jo74XPndFm8-fv=>h4OxY^Sk8zLw^%L-|0lW4?f|FnOi1lO)%Su-#gd@|BS8f z@8YyB`9I1u3+4ZO_${dZF8Tl3aJWyV+np;=SD(0#i}pQ3y^s5la`gQ30i@S$&Izu! zjozpBx!3z1rkO&$s19*0Ky<9V*~5A3fqy%KIS*4iF_z!PUi*WP-@2_?hIk+JaS45o z?n}niIP{a7{BDCk#a7SzIju`QKf*LqXiUERPQf2Rb$7+@7k?OU+Ycb#M>)6zzo}2= z(D8_$$>jM#t~a$emu)Zmrh6S<$~03bKVP12<-D$VzW8rLo>N_dxy*X-$QQ8eOCi5+ zdLT_MrSRQ*`(bKVRZD>`(y@Fokr+StlEP~&}R?jzZT9PxJJ(m4g%`4D* zxzO1Q1pfp+d?)Yz@OdwMCboP&%xR0|bInt-%oOU8FQ3~uuPZ)Z zbugaKm5BFIKU^{M!vsES>!Lwq>_rIpJQaQvTRt~)+G6?K&va6#N4|VMz_+Fhb@wdC_t5T2_vofW*oytg--{6Nx)1&lTVC(ww8ipz zRHK7W)El-#t37aW=lAA+N7mUDzh8YJUayZL-bWp93B8Wa|Dt-W)mpvN!sPV{t~bdv z*VveX?}X%1EU()&I)(6h7w31y>(z%s-PUdO62$vxFD}7rnq%7NcQ6?#Lwb|nZSalQ z>UKY;EtcO0H9Cdxd&+*dy8Zk=hWsYI3v9_Np>Iy?AB4P)IZ=CGM|a1{&a`e!;`lwn zb+^%P4?dGXzex`=<{@{;GO5NyFKHgqtkEGqbaL(EZAl;JZ$#c4`zV>DkteiI;e2G_ zKZQKkb-WSrKJwraI$nbxR(#jyB&Lox!yjU+<2{^~bi5dz4`_4>;rS@%cggdo4#)F+ zH{yM?7nk69)CZj@(xyPywfTw3^8xrvY-NOn)4zEF*LU%-<8G`86G1AR_o%AgVc5nCCwaavb0*vs^ak-H?YZD!^Ln8=V5CS_i0Dp+B432VI z+E>i+4Ai?&gBxW9vd?+u!%suylN{*1z%jj9Z%wQ%>SY=(<#5B%P!4)us6)Judg2nc zC}&^LYtM&d;^eRZz7bnFba7gja@fx_i;+V){0`IxmvVUa-{a-55Ai-pf#om*KkWO2 zv`TSuIKXY7zL#rVdy><-l*6L$7vCTHnTAU_EdFsgHtTk2J$T@QS9C?s62$c6Rly?$ z%AGCdMr4!v5dxdk0>6oEU)aQHDQ}MR?s>h}Vua~dgKi=3wWxwG!gXJG{#Ymj-6riv zypMX~5;iHi-;hlXaxoV416>9qTz`^vu72Q+$hNwc!BVDMs0_AoewY2=13!tEK{Mig zl7Q_TRrQ6aEz)JM6h0T*ez2L-x|YEqrdy~CrowOGx*xpwpW!oSq@Q59c(284khd>j zX-l$>2gHBvq5aSz$@SVH(9vaagxg`$A1ePY><=}dWBP>DDynYO(3wrFR%GzY#C zL1n%{J$4ao71G45-S#jYm;GY>f5nf}D-iFa9=L>U$~jI)TcFl&raXG!r?KrDJ2)-% z4VUsb$#jdAhYuf#>pn8!f5JYZj}^!NJ1URpS=gK!r4HlJm|@DK96knHnbdPy*D~3{ zbc>bA2-9)dM;<#7_7UA~JpuXq;3r*m2s!MQdZ6F5StH1+ukE(|gS|ZVb6P4t$J$P= z>ny(OHwAtW(g*5umm|b#SI`>Z<80M3rr}Z!SG*EGhF2ioM?G)}Ta|O4$#}suQtPUoj{DMZ)34Nao8&yol*2UmFl_t7 zQcg>9p!sK(dDQiy21{C3lIl=9l6-9BL&+hBur;6)XOoUI4VQBG+N<$4X$Rtc)B~5D zO$uwT=1!&z_WZNG3=VNxmolh>uccTS^e_#VGH4hL`+{zhsuAxC5m?$JO$OO)QX~8t zwtb}dRJfXZzMtK*Z;Bf9M8yJ+= z2ERza&z$>FcpJzRXwM{XvUrzO1tdc62k{YzzbOPXaRSM@9MUcWhlsY1fQyP0x*pa= zHa@7!@jU8sC;SCVKrhr~?|i9W_?{tnNc}n2`<0s7Tf3K|3OyaGmbbO`v{L;@#||TG zMIc!c-1l#iN7P0tL&{uc1el=*`#60)a5X{t>8jP!wS9`b-XB$oHWNRmA*2v;wGT;R zZOc-oL+vA4=J>BpspRqgsGq=ZK>a*U9w#AJI-^hZC0nSy4JSha#P4AQo&V*aP3^Yj zeOB3~@fv$ztD&a|iqdi$Eui&nqBTYA^Bm2DNn+d1FW{ zzekX#A0g294KL@%?_Hdp+Lz%og>pSl1Sc|mOhNSuADATM)nnK*ogp2F z>w|`lD|R8d)@M{I?>YDalK6fQ^3CLXUYMI<*~dzhL{D&qALuuLi$d>iHA!0VL`9AY@H-O?;v-gY)59RpwFxNA9w;e$+c|$D@Hx(eW|EH33~kIT?CG}v z7pMF4!80l+lUP$%XX^uE8k3Hh(3ByMsryTh=coI9oZdqBTTqwVF{2XC|4=)U?E4|6 zN%}-4NvT*}}H?Bh3sW}FNLA#>7~*e47VbblD>`?-!JixI9f=^WWN z$)Du8WEtw?TKAVS%@oR~cE+^_(Xl+=%6aR7Cx5tU2bDHkKI3@}_U@DTT?by#o_X{s z3iZTW2FHoN`w=!HkWLW10R9@{pDn+8IDIwp6!Yxc0MkigqrjxRWjHt_&S65UQl=E*_aduMRz7lA!0Aa2q_^f=LRtAd&rEG+x@6z3v}qsG zg!WsN>K$P^F6FTi`};{YX*1-XFeYdo?|4}pL>qLQv;+Q>d}J}q=`C!M^;wW=w3X^W zvP~El!geLdpq**BlmYFKlCQusd<98znF`({$R+fZC9H1`qZC~(jqrcuBbRngPjVr< zlBJJaZ|e^qV47Lv61EFv#`O*Ip_F4bLT#boQa*bj^GeVp3E>jwJYPbbFF@(9Th)H= z+IyC-e4r=2yt`v#dilL;so$0L4fY2-zI$}-`n zq`H$VIIrMCCAdiOmx!5Tx$-x1x#tm%t}^lGd0`vj3F+v0V0l+-PrFQ#_#agp+kiK}9k=}lN-Lp=3Ax`Om3EV0S>TwsVUxazH(@iqDIXIa@9E9nS=>oiOz`X`M$Z$WG z(<=|Hsrn2utne2^=?^lj@H~JAqr^9R- z+-tyt1{_SPgZlYA*$v=c10FPB@1kgVz5(|d@Sp*ULxNg;eV#z5@(p;foE9ZKWN|?R`~S>o?(UWHt-B9e2;-=SmEzA z@C+;beFmOkg}>jxGpz6%3_QaM|A2vKSm8Gsc!m}JK?BdQ!f!J03@iMH3_QaM|B!)a zSm8fx;2Bo;G^+S*J}(I2Q;qW7`m(M)(=R z)g_*{7~x@rM-e`aumj;BLgfU{`!$5M2=^m=1R>bpLDsyk8TaAIlNqz?R;^sMI^9}3 zd)Dgu+WL8o^|kZTvm0vX&YL;6p}uWy{p^NWGwAUGI<7&FDdmw(6p)^?dRAS1>+IUb zm8~ml>sPkTs%>30udQ}wLqlC_+sv75tqm(<3b4v13RtsPnU zt8Hy;t*@QkI(t^#>bbM#rCaC36kwH&4g`q)bgr!sPSjq*pi0Lo?eN(n#Jgdv$H&oK;A^dQQ4+PW{Y=nQf~C zC%e|I@0rolu_g_d=h}3q{?lGQk)x`42q>B_&)SE{t$0JN5smFmccyV>VBK17v3d$n z!qz%mZ2z0`+s|Ei=lO5C`GK-m|LhBMuh=i(^Dcav;crR!hmSJ+9hv@pA7c2s62ADu z3_l~`8_OB~3z`39Kf_N;c*8>sKPBO>rx_lU@Rz1B{B43?nDFk6{+iog`Rc$g9s>L; z4bxwIQo!Gu{8Ixy3Al3CnvG2of5Bf0_|CbnGCXA9p9Or$1>2utd?~f>l9oLaH_G&7 zZ^$KxV)-wQ2IMLv3%=O$VNS*6`u;f64emUR@XWheE@!j872E!ga!EDk%OBTWB=0|- zB=1*TChvds7J2{4HS*p&UEbf&An$wLDesln%llh+T|9lcg#Vi7X`lUF34im8@_y4E zdH>tJ@}BSiV%z_T^JIzVUm)+FXZUIZp3Lwge1CxNpW*Z^d><>7bjE%~-ao;7{1(HP zzFEd^WIp$nNq7U_C1n4<*!GuK$g-Nbp1I&4ow{@Wz6&Lta^}m>)eKk5`w;6vAFmh7 zFPHIs<fHOos@5QN~yTN{BW7L7b}N+^A{`sGrj%Z zihMbCdeWPA-&rlrkoKE)-&Olh_x#Vc9p=8HZN6OTdROF1F+T98O8?K4_W!c!e=uLL z)vi;e|AS|_{+}uB|7FqtK<|V5IsLcez^StT=k)*e-Tw*nKDgUS-^=j3+UCodZbi-% z<08SS(tqy^Y5y;a{V!CRPSyO!KSShys`UR%Y5y;a{^x7=gZ^@=^#4q;f2T_S&y@E6 zvgm)H&-va@nf_PiTa(u}eWCWkx|ZX-{el0>x;=R&9Pj&|!THNPn`NJz&#&2Mczsfy za=j0lF;3fms`Wl-{&>~@Q?B;`1JsksD&ma#QLO(zd{X<2YPPCuvF#tP`mY+8<40g8 zgL{tTndsoaMkeO$h&jvKKVI~|VErvpORp_5|CyfsY;V6S{WtYD7d&3{=4_Y$+1~zh z`afQJ;_0c!IsJFz|IKCZP5kMRc4nz>%#1p#V$SyV7i<4bz0L)XJLP(K+||gcLV-Wb zDsi?%XK4Eae^|Dr+2D%Rf15J0r7`3NEJn*x2W8O zXC5!@KVAA?sNNUO@%WsB@zJL&mS0n%o_{-K1z8dvv`ptJ*?4RJ>#hHR-UoMAd@YpD zV4)cc%s1ZTf3~;(Y}bFkKs_!rTcH1i=69XXcTF${z46xm)2078*!23L$BF;*bUpv? zo#pL+z4bpyu}{~0$kqP0S^rX_p~3qvjOf!2p6%^-rT?bCT4|S=L(+o z$4}1k_7|)F#j49`X1~{4Yy%3+AH4ss!0gVl^*W=JAEEKm{?n!Zr(5p>X$=NHUZ=bH z{>#^?*u-K66EUX-G2Yt$dh36n_rd+taM_;fpnXy&DaV`q&-V78?fUOK(Q11lf&Sa) zIJGJLQ%gRB`gm*q>C*oU#+YA{?%#Y%URz3V)egRxt(VAduYFy g0Pg&TY{`RvB5U-ytAG5$;>9=5y7{)-mQc+92j|+ZfB*mh diff --git a/dimos/dashboard/rerun_init.py b/dimos/dashboard/rerun_init.py deleted file mode 100644 index fb0fd5f13..000000000 --- a/dimos/dashboard/rerun_init.py +++ /dev/null @@ -1,166 +0,0 @@ -# Copyright 2025-2026 Dimensional Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Rerun initialization with multi-process support. - -Architecture: - - Main process calls init_rerun_server() to start gRPC server + viewer - - Worker processes call connect_rerun() to connect to the server - - All processes share the same Rerun recording stream - -Viewer modes (set via VIEWER_BACKEND config or environment variable): - - "rerun-web": Web viewer on port 9090 - - "rerun" (default): Native Rerun viewer (requires display) - - "foxglove": Use Foxglove instead of Rerun - -Usage: - # Set via environment: - VIEWER_BACKEND=rerun-web # or rerun or foxglove - - # Or via .env file: - viewer_backend=rerun - - # In main process (blueprints.py handles this automatically): - from dimos.dashboard.rerun_init import init_rerun_server - server_addr = init_rerun_server(viewer_mode="rerun-web") - - # In worker modules: - from dimos.dashboard.rerun_init import connect_rerun - connect_rerun() - - # On shutdown: - from dimos.dashboard.rerun_init import shutdown_rerun - shutdown_rerun() -""" - -import atexit -import threading - -import rerun as rr - -from dimos.core.global_config import GlobalConfig -from dimos.utils.logging_config import setup_logger - -logger = setup_logger() - -RERUN_GRPC_PORT = 9876 -RERUN_WEB_PORT = 9090 -RERUN_GRPC_ADDR = f"rerun+http://127.0.0.1:{RERUN_GRPC_PORT}/proxy" - -# Track initialization state -_server_started = False -_connected = False -_rerun_init_lock = threading.Lock() - - -def init_rerun_server(viewer_mode: str, memory_limit: str = "4GB") -> str: - """Initialize Rerun server in the main process. - - Starts the gRPC server and optionally the web/native viewer. - Should only be called once from the main process. - - Args: - viewer_mode: One of "rerun-web", "rerun", or "rerun-grpc-only" - memory_limit: Maximum memory for Rerun viewer (e.g., "16GB", "25%"). Default 16GB. - - Returns: - Server address for workers to connect to. - - Raises: - RuntimeError: If server initialization fails. - """ - global _server_started - - if _server_started: - logger.debug("Rerun server already started") - return RERUN_GRPC_ADDR - - rr.init("dimos") - - if viewer_mode == "rerun": - # Spawn native viewer (requires display) - rr.spawn(port=RERUN_GRPC_PORT, connect=True, memory_limit=memory_limit) - logger.info("Rerun: spawned native viewer", port=RERUN_GRPC_PORT, memory_limit=memory_limit) - elif viewer_mode == "rerun-web": - # Start gRPC + web viewer (headless friendly) - server_uri = rr.serve_grpc(grpc_port=RERUN_GRPC_PORT) - rr.serve_web_viewer(web_port=RERUN_WEB_PORT, open_browser=False, connect_to=server_uri) - logger.info( - "Rerun: web viewer started", - web_port=RERUN_WEB_PORT, - url=f"http://localhost:{RERUN_WEB_PORT}", - ) - else: - # Just gRPC server, no viewer (connect externally) - rr.serve_grpc(grpc_port=RERUN_GRPC_PORT) - logger.info( - "Rerun: gRPC server only", - port=RERUN_GRPC_PORT, - connect_command=f"rerun --connect {RERUN_GRPC_ADDR}", - ) - - _server_started = True - - # Register shutdown handler - atexit.register(shutdown_rerun) - - return RERUN_GRPC_ADDR - - -def connect_rerun( - global_config: GlobalConfig | None = None, - server_addr: str | None = None, -) -> None: - """Connect to Rerun server from a worker process. - - Modules should check global_config.viewer_backend before calling this. - - Args: - global_config: Global configuration (checks viewer_backend) - server_addr: Server address to connect to. Defaults to RERUN_GRPC_ADDR. - """ - global _connected - - with _rerun_init_lock: - if _connected: - logger.debug("Already connected to Rerun server") - return - - # Skip if foxglove backend selected - if global_config and global_config.viewer_backend not in ("rerun", "rerun-web"): - logger.debug("Rerun connection skipped", viewer_backend=global_config.viewer_backend) - return - - addr = server_addr or RERUN_GRPC_ADDR - - rr.init("dimos") - rr.connect_grpc(addr) - logger.info("Rerun: connected to server", addr=addr) - - _connected = True - - -def shutdown_rerun() -> None: - """Disconnect from Rerun and cleanup resources.""" - global _server_started, _connected - - if _server_started or _connected: - try: - rr.disconnect() - logger.info("Rerun: disconnected") - except Exception as e: - logger.warning("Rerun: error during disconnect", error=str(e)) - - _server_started = False - _connected = False diff --git a/dimos/dashboard/rerun_scene_wiring.py b/dimos/dashboard/rerun_scene_wiring.py deleted file mode 100644 index 56efe306e..000000000 --- a/dimos/dashboard/rerun_scene_wiring.py +++ /dev/null @@ -1,152 +0,0 @@ -# Copyright 2025-2026 Dimensional Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Rerun scene wiring helpers (static attachments, URDF, pinholes). - -This module is intentionally *not* a TF visualizer. -It only provides static Rerun scene setup: -- view coordinates -- attach semantic entity paths (world/robot/...) under named TF frames (base_link, camera_optical, ...) -- optional URDF logging -- optional axes gizmo + camera pinhole(s) - -Dynamic TF visualization remains the responsibility of `TFRerunModule`. -""" - -from __future__ import annotations - -from pathlib import Path -from typing import TYPE_CHECKING, Any, Protocol - -import rerun as rr - -from dimos.core import Module, rpc -from dimos.core.global_config import GlobalConfig -from dimos.dashboard.rerun_init import connect_rerun - -if TYPE_CHECKING: - from collections.abc import Sequence - - -class _HasToRerun(Protocol): - def to_rerun(self) -> Any: ... - - -def _attach_entity(entity_path: str, parent_frame: str) -> None: - """Attach an entity path's implicit frame (tf#/...) under a named frame.""" - rr.log( - entity_path, - rr.Transform3D( - translation=[0.0, 0.0, 0.0], - rotation=rr.Quaternion(xyzw=[0.0, 0.0, 0.0, 1.0]), - parent_frame=parent_frame, # type: ignore[call-arg] - ), - static=True, - ) - - -class RerunSceneWiringModule(Module): - """Static Rerun scene wiring for semantic entity paths.""" - - _global_config: GlobalConfig - - # Semantic entity roots - world_entity: str - robot_entity: str - robot_axes_entity: str - - # Named TF frames to attach to - world_frame: str - robot_frame: str - - # Optional assets - urdf_path: str | Path | None - axes_size: float | None - - # Multi-camera wiring: - # tuple = (camera_entity_path, camera_named_frame, camera_info_static) - cameras: Sequence[tuple[str, str, _HasToRerun]] - camera_rgb_suffix: str - - def __init__( - self, - *, - global_config: GlobalConfig | None = None, - world_entity: str = "world", - robot_entity: str = "world/robot", - robot_axes_entity: str = "world/robot/axes", - world_frame: str = "world", - robot_frame: str = "base_link", - urdf_path: str | Path | None = None, - axes_size: float | None = 0.5, - cameras: Sequence[tuple[str, str, _HasToRerun]] = (), - camera_rgb_suffix: str = "rgb", - **kwargs: Any, - ) -> None: - super().__init__(**kwargs) - self._global_config = global_config or GlobalConfig() - - self.world_entity = world_entity - self.robot_entity = robot_entity - self.robot_axes_entity = robot_axes_entity - - self.world_frame = world_frame - self.robot_frame = robot_frame - - self.urdf_path = urdf_path - self.axes_size = axes_size - - self.cameras = cameras - self.camera_rgb_suffix = camera_rgb_suffix - - @rpc - def start(self) -> None: - super().start() - - if not self._global_config.viewer_backend.startswith("rerun"): - return - - connect_rerun(global_config=self._global_config) - - # Global view coordinates (applies to views at/under this origin). - rr.log(self.world_entity, rr.ViewCoordinates.RIGHT_HAND_Z_UP, static=True) - - # Attach semantic entity paths to named TF frames. - _attach_entity(self.world_entity, self.world_frame) - _attach_entity(self.robot_entity, self.robot_frame) - - if self.axes_size is not None: - rr.log(self.robot_axes_entity, rr.TransformAxes3D(self.axes_size), static=True) # type: ignore[attr-defined] - - # Optional URDF load (purely visual). - if self.urdf_path is not None: - p = Path(self.urdf_path) - if p.exists(): - rr.log_file_from_path( - str(p), - entity_path_prefix=self.robot_entity, - static=True, - ) - - # Multi-camera: attach camera entities + log static pinholes. - for cam_entity, cam_frame, cam_info in self.cameras: - _attach_entity(cam_entity, cam_frame) - rr.log(cam_entity, cam_info.to_rerun(), static=True) # type: ignore[no-untyped-call] - - @rpc - def stop(self) -> None: - super().stop() - - -rerun_scene_wiring = RerunSceneWiringModule.blueprint diff --git a/dimos/dashboard/tf_rerun_module.py b/dimos/dashboard/tf_rerun_module.py deleted file mode 100644 index 25f57a65a..000000000 --- a/dimos/dashboard/tf_rerun_module.py +++ /dev/null @@ -1,172 +0,0 @@ -# Copyright 2025-2026 Dimensional Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""TF Rerun Module - Snapshot TF visualization in Rerun. - -This module polls the TF buffer at a configurable rate and logs the latest -transform for each edge to Rerun. This provides stable, rate-limited TF -visualization without subscribing to the /tf transport from here. - -Usage: - # In blueprints: - from dimos.dashboard.tf_rerun_module import tf_rerun - - def my_robot(): - return ( - robot_connection() - + tf_rerun() # Add TF visualization - + other_modules() - ) -""" - -from collections.abc import Sequence -import threading -import time -from typing import Any, cast - -import rerun as rr - -from dimos.core import Module, rpc -from dimos.core.blueprints import Blueprint, autoconnect -from dimos.core.global_config import GlobalConfig -from dimos.dashboard.rerun_init import connect_rerun -from dimos.dashboard.rerun_scene_wiring import rerun_scene_wiring -from dimos.utils.logging_config import setup_logger - -logger = setup_logger() - - -class TFRerunModule(Module): - """Polls TF buffer and logs snapshot transforms to Rerun. - - This module automatically visualizes the TF tree in Rerun by: - - Using `self.tf` (the system TF service) to maintain the TF buffer - - Polling at a configurable rate and logging the latest transform per edge - """ - - _global_config: GlobalConfig - _poll_thread: threading.Thread | None = None - _stop_event: threading.Event | None = None - _poll_hz: float - _last_ts_by_edge: dict[tuple[str, str], float] - - def __init__( - self, - global_config: GlobalConfig | None = None, - poll_hz: float = 30.0, - **kwargs: Any, - ) -> None: - """Initialize TFRerunModule. - - Args: - global_config: Optional global configuration for viewer backend settings - **kwargs: Additional arguments passed to parent Module - """ - super().__init__(**kwargs) - self._global_config = global_config or GlobalConfig() - self._poll_hz = poll_hz - self._last_ts_by_edge = {} - - @rpc - def start(self) -> None: - """Start the TF visualization module.""" - super().start() - - # Only connect if Rerun backend is selected - if self._global_config.viewer_backend.startswith("rerun"): - connect_rerun(global_config=self._global_config) - - # Ensure TF transport is started so its internal subscription populates the buffer. - self.tf.start(sub=True) - - self._stop_event = threading.Event() - self._poll_thread = threading.Thread(target=self._poll_loop, daemon=True) - self._poll_thread.start() - logger.info("TFRerunModule: started TF snapshot polling", poll_hz=self._poll_hz) - - def _poll_loop(self) -> None: - assert self._stop_event is not None - period_s = 1.0 / max(self._poll_hz, 0.1) - - while not self._stop_event.is_set(): - # Snapshot keys to avoid concurrent modification while TF buffer updates. - items = list(self.tf.buffers.items()) # type: ignore[attr-defined] - for (parent, child), buffer in items: - latest = buffer.get() - if latest is None: - continue - last_ts = self._last_ts_by_edge.get((parent, child)) - if last_ts is not None and latest.ts == last_ts: - continue - - # Log under `world/tf/...` so it is visible under the default 3D view origin. - rr.log(f"world/tf/{child}", latest.to_rerun()) # type: ignore[no-untyped-call] - self._last_ts_by_edge[(parent, child)] = latest.ts - - time.sleep(period_s) - - @rpc - def stop(self) -> None: - """Stop the TF visualization module and cleanup LCM subscription.""" - if self._stop_event is not None: - self._stop_event.set() - self._stop_event = None - - if self._poll_thread is not None and self._poll_thread.is_alive(): - self._poll_thread.join(timeout=1.0) - self._poll_thread = None - - super().stop() - - -def tf_rerun( - *, - poll_hz: float = 30.0, - scene: bool = True, - # Scene wiring kwargs (only used if scene=True) - world_entity: str = "world", - robot_entity: str = "world/robot", - robot_axes_entity: str = "world/robot/axes", - world_frame: str = "world", - robot_frame: str = "base_link", - urdf_path: str | None = None, - axes_size: float | None = 0.5, - cameras: Sequence[tuple[str, str, Any]] = (), - camera_rgb_suffix: str = "rgb", -) -> Blueprint: - """Convenience blueprint: TF snapshot polling + (optional) static scene wiring. - - - TF visualization stays in `TFRerunModule` (poll TF buffer, log to `world/tf/*`). - - Scene wiring is handled by `RerunSceneWiringModule` (view coords, attachments, URDF, pinholes). - """ - tf_bp = cast("Blueprint", TFRerunModule.blueprint(poll_hz=poll_hz)) - if not scene: - return tf_bp - - scene_bp = cast( - "Blueprint", - rerun_scene_wiring( - world_entity=world_entity, - robot_entity=robot_entity, - robot_axes_entity=robot_axes_entity, - world_frame=world_frame, - robot_frame=robot_frame, - urdf_path=urdf_path, - axes_size=axes_size, - cameras=cameras, - camera_rgb_suffix=camera_rgb_suffix, - ), - ) - - return autoconnect(tf_bp, scene_bp) diff --git a/dimos/hardware/sensors/camera/module.py b/dimos/hardware/sensors/camera/module.py index 4173423cb..639c1e045 100644 --- a/dimos/hardware/sensors/camera/module.py +++ b/dimos/hardware/sensors/camera/module.py @@ -15,20 +15,15 @@ from collections.abc import Callable, Generator from dataclasses import dataclass, field import time -from typing import TYPE_CHECKING, Any, cast - -if TYPE_CHECKING: - from rerun._baseclasses import Archetype +from typing import Any import reactivex as rx from reactivex import operators as ops -import rerun as rr from dimos.agents import Output, Reducer, Stream, skill from dimos.core import Module, ModuleConfig, Out, rpc from dimos.core.blueprints import autoconnect from dimos.core.global_config import GlobalConfig -from dimos.dashboard.rerun_init import connect_rerun from dimos.hardware.sensors.camera.spec import CameraHardware from dimos.hardware.sensors.camera.webcam import Webcam from dimos.msgs.geometry_msgs import Quaternion, Transform, Vector3 @@ -80,11 +75,6 @@ def start(self) -> None: else: self.hardware = self.config.hardware - # Connect to Rerun if enabled (cache flag for use in callbacks) - self._rerun_enabled = self._global_config.viewer_backend.startswith("rerun") - if self._rerun_enabled: - connect_rerun(global_config=self._global_config) - stream = self.hardware.image_stream() if self.config.frequency > 0: @@ -92,8 +82,6 @@ def start(self) -> None: def on_image(image: Image) -> None: self.color_image.publish(image) - if self._rerun_enabled: - rr.log("world/robot/camera/rgb", image.to_rerun()) self._disposables.add( stream.subscribe(on_image), @@ -107,12 +95,6 @@ def publish_metadata(self) -> None: camera_info = self.hardware.camera_info.with_ts(time.time()) self.camera_info.publish(camera_info) - if self._rerun_enabled: - rr.log( - "world/robot/camera", - cast("Archetype", camera_info.to_rerun(image_topic=None, optical_transform=None)), - ) - if not self.config.transform: return diff --git a/dimos/robot/all_blueprints.py b/dimos/robot/all_blueprints.py index a794f1573..deeb34d17 100644 --- a/dimos/robot/all_blueprints.py +++ b/dimos/robot/all_blueprints.py @@ -92,7 +92,6 @@ "realsense_camera": "dimos.hardware.sensors.camera.realsense.camera", "replanning_a_star_planner": "dimos.navigation.replanning_a_star.module", "rerun_bridge": "dimos.visualization.rerun.bridge", - "rerun_scene_wiring": "dimos.dashboard.rerun_scene_wiring", "ros_nav": "dimos.navigation.rosnav", "spatial_memory": "dimos.perception.spatial_perception", "speak_skill": "dimos.agents.skills.speak_skill", diff --git a/dimos/robot/unitree_webrtc/unitree_g1_blueprints.py b/dimos/robot/unitree_webrtc/unitree_g1_blueprints.py index 0dd003a30..562b65838 100644 --- a/dimos/robot/unitree_webrtc/unitree_g1_blueprints.py +++ b/dimos/robot/unitree_webrtc/unitree_g1_blueprints.py @@ -31,7 +31,6 @@ from dimos.constants import DEFAULT_CAPACITY_COLOR_IMAGE from dimos.core.blueprints import autoconnect from dimos.core.transport import LCMTransport, pSHMTransport -from dimos.dashboard.tf_rerun_module import tf_rerun from dimos.hardware.sensors.camera import zed from dimos.hardware.sensors.camera.module import camera_module # type: ignore[attr-defined] from dimos.hardware.sensors.camera.webcam import Webcam @@ -87,12 +86,6 @@ # Visualization websocket_vis(), foxglove_bridge(), - tf_rerun( - robot_frame="base_link", - cameras=[ - ("world/robot/camera", "camera_optical", zed.CameraInfo.SingleWebcam), - ], - ), ) .global_config(n_dask_workers=4, robot_model="unitree_g1") .transports( diff --git a/docs/api/visualization.md b/docs/api/visualization.md index be41898cc..9259e7e06 100644 --- a/docs/api/visualization.md +++ b/docs/api/visualization.md @@ -90,232 +90,3 @@ voxel_mapper(voxel_size=0.1), # 10cm voxels ## How to use Rerun on `dev` (and the TF/entity nuances) Rerun on `dev` is **module-driven**: modules decide what to log, and `Blueprint.build()` sets up the shared viewer + default layout. - -### Rerun lifecycle (what happens automatically vs what modules must do) - -- **Server/viewer startup happens in the build step** - - [`dimos/core/blueprints.py`](/dimos/core/blueprints.py#L26) calls `init_rerun_server()` (from [`dimos/dashboard/rerun_init.py`](/dimos/dashboard/rerun_init.py#L18)) when `GlobalConfig.viewer_backend` starts with `rerun` and `rerun_enabled=True`. - -- **Worker processes must connect before logging** - - If a module is going to call `rr.log(...)`, it should call `connect_rerun(global_config=...)` first (see examples in: - - [`dimos/robot/unitree/connection/go2.py`](/dimos/robot/unitree/connection/go2.py) - - [`dimos/mapping/costmapper.py`](/dimos/mapping/costmapper.py) - - [`dimos/mapping/voxels.py`](/dimos/mapping/voxels.py) - - [`dimos/navigation/replanning_a_star/module.py`](/dimos/navigation/replanning_a_star/module.py) - ). - -### Panels and how to use `rr.log` in DimOS - -On `dev`, the default layout is composed from modules’ `rerun_views()` contributions: - -- Implement `@classmethod rerun_views()` on your module to add panels (2D/3D/time series). -- The build step aggregates them and sends a composed blueprint (see [`dimos/core/blueprints.py`](/dimos/core/blueprints.py)). - -If you `rr.log("some/new/entity", ...)` but don’t see it where you expect: -- it may not be included by any existing view panel, so add a `rerun_views()` panel that points at your entity path. - -### TF visualization + scene wiring: the `tf_rerun()` helper - -The intended pattern for TF visualization on `dev` is: - -- **Publish transforms normally** via `self.tf.publish(...)` (TF is available on every module). -- In blueprints, add `tf_rerun(...)` which composes two modules: - 1. **`TFRerunModule`**: Polls `self.tf.buffers` at a configurable rate and logs the latest transform per edge under `world/tf/{child}` using `Transform.to_rerun()`. - 2. **`RerunSceneWiringModule`**: Logs static scene setup (view coordinates, entity-path attachments, optional URDF, axes, camera pinholes). - -The `tf_rerun()` helper accepts scene configuration: - -```python -from dimos.dashboard.tf_rerun_module import tf_rerun - -# Default: TF polling + minimal scene wiring (world + world/robot attachments) -tf_rerun() - -# With URDF and camera: -tf_rerun( - urdf_path=Path("path/to/robot.urdf"), - cameras=[ - ("world/robot/camera", "camera_optical", GO2Connection.camera_info_static), - ], -) - -# Multi-camera robot: -tf_rerun( - cameras=[ - ("world/robot/cameras/front", "cam_front_optical", front_cam_info), - ("world/robot/cameras/left", "cam_left_optical", left_cam_info), - ("world/robot/cameras/right", "cam_right_optical", right_cam_info), - ("world/robot/cameras/rear", "cam_rear_optical", rear_cam_info), - ], -) - -# Disable scene wiring entirely (TF polling only): -tf_rerun(scene=False) -``` - -### Entity paths vs TF frames - -Rerun has two “spaces” you’re always juggling: - -- **Entity paths**: strings like `world/robot/camera/rgb` (how data is organized/browsed). -- **Transform frames**: names like `base_link`, `camera_optical` (how motion is defined). - -In DimOS on `dev`: -- TF frames come from the TF system (`Transform.frame_id` / `Transform.child_frame_id`) and are logged via `Transform.to_rerun()` (see [`dimos/msgs/geometry_msgs/Transform.py`](/dimos/msgs/geometry_msgs/Transform.py)). -- Visualization entity paths should be treated as **semantic organization** (`world/**`, `metrics/**`, etc). - -**Rule of thumb**: -- Put geometry and sensor data under semantic paths (e.g. `world/robot/**`, `world/nav/**`). -- Drive motion through TF by emitting transforms via `self.tf.publish(...)`. -- If you need a semantic entity to "live in" a particular TF frame (e.g. camera frustum under `camera_optical`), configure it via `tf_rerun(cameras=[...])` which handles the attachment in [`dimos/dashboard/rerun_scene_wiring.py`](/dimos/dashboard/rerun_scene_wiring.py#L20). - -### Cameras: pinhole projection vs lens distortion - -- The camera frustum/projection comes from `CameraInfo.to_rerun()` → `rr.Pinhole(...)` (see [`dimos/msgs/sensor_msgs/CameraInfo.py`](/dimos/msgs/sensor_msgs/CameraInfo.py)). -- Rerun pinholes model **intrinsics** (focal length / principal point / resolution). Lens distortion coefficients are **not** part of the pinhole archetype. If you need distortion-correct visualization, you must undistort upstream and log the undistorted image. - -## Appendix: Where Rerun is used in the codebase - -This appendix is an **inventory of every current Rerun touchpoint** in the repository (as of this doc), grouped by role (good for reference). - -### Rerun lifecycle (server, viewer, client connections) - -- **`GlobalConfig` flags** - - **File**: [`dimos/core/global_config.py`](/dimos/core/global_config.py) - - **What**: Defines `rerun_enabled`, `viewer_backend` (`rerun-web`, `rerun`, `foxglove`), and `rerun_server_addr`. - -- **Rerun process lifecycle** - - **File**: [`dimos/dashboard/rerun_init.py`](/dimos/dashboard/rerun_init.py) - - **What**: - - `init_rerun_server()` starts the gRPC server and optionally the native/web viewer (`rr.spawn`, `rr.serve_grpc`, `rr.serve_web_viewer`). - - `connect_rerun()` connects a process to the shared recording (`rr.connect_grpc`). - - `shutdown_rerun()` disconnects (`rr.disconnect`). - -- **Dashboard re-exports** - - **File**: [`dimos/dashboard/__init__.py`](/dimos/dashboard/__init__.py) - - **What**: Re-exports `connect_rerun`, `init_rerun_server`, `shutdown_rerun`. - -### Blueprint/layout composition (Rerun UI) - -- **Blueprint composition and server init during build** - - **File**: [`dimos/core/blueprints.py`](/dimos/core/blueprints.py) - - **What**: - - Calls `init_rerun_server()` during `Blueprint.build()` when backend is Rerun. - - Collects per-module `rerun_views()` panels and composes a default `rrb.Blueprint(...)`. - - Sends the blueprint via `rr.send_blueprint(...)`. - -### TF visualization - -- **TF visualization module (polling snapshot)** - - **File**: [`dimos/dashboard/tf_rerun_module.py`](/dimos/dashboard/tf_rerun_module.py) - - **What**: Polls `self.tf.buffers` at a configurable rate (`poll_hz`) and logs the latest transform per TF edge to `world/tf/{child}` using `Transform.to_rerun()`. - -- **Scene wiring module (static setup)** - - **File**: [`dimos/dashboard/rerun_scene_wiring.py`](/dimos/dashboard/rerun_scene_wiring.py) - - **What**: Logs all static Rerun scene setup once at startup: - - View coordinates (`rr.ViewCoordinates.RIGHT_HAND_Z_UP`) - - Entity-path attachments under named TF frames (`world` → `world`, `world/robot` → `base_link`) - - Optional URDF load under `world/robot` - - Optional axes gizmo at `world/robot/axes` - - Camera entity-path attachments + pinholes (configurable via tuple list) - - **Why**: Keeps robot modules focused on I/O + TF publishing; centralizes visualization scene wiring. - -- **`tf_rerun()` helper (composed blueprint)** - - **File**: [`dimos/dashboard/tf_rerun_module.py`](/dimos/dashboard/tf_rerun_module.py) - - **What**: Returns a `Blueprint` that composes `TFRerunModule` + `RerunSceneWiringModule` via `autoconnect(...)`. Blueprints add one line (`tf_rerun(...)`) to get both TF polling and scene wiring. - -- **TF message → Rerun entity mapping** - - **File**: [`dimos/msgs/tf2_msgs/TFMessage.py`](/dimos/msgs/tf2_msgs/TFMessage.py) - - **What**: `TFMessage.to_rerun()` returns `(entity_path, rr.Transform3D)` pairs for each transform, currently under `world/tf/{child_frame_id}`. - -- **Transform → Rerun transform archetype** - - **File**: [`dimos/msgs/geometry_msgs/Transform.py`](/dimos/msgs/geometry_msgs/Transform.py) - - **What**: `Transform.to_rerun()` produces `rr.Transform3D(parent_frame=..., child_frame=...)`. - -### Robot/device visualization (GO2) - -- **GO2 connection: sensor data logging + TF publishing** - - **File**: [`dimos/robot/unitree/connection/go2.py`](/dimos/robot/unitree/connection/go2.py) - - **What**: - - Connects to Rerun via `connect_rerun()` (only to log sensor data). - - Publishes TF transforms via `self.tf.publish(...)` (base_link, camera_link, camera_optical). - - Logs camera images to `world/robot/camera/rgb`. - - Contributes a camera panel via `rerun_views()` (`rrb.Spatial2DView(origin="world/robot/camera/rgb")`). - - **Note**: Static scene setup (view coordinates, URDF, axes, camera entity attachments, pinholes) is handled by `RerunSceneWiringModule` via `tf_rerun(...)` in the blueprint. GO2 no longer does this directly. - -### Mapping/navigation visualization (modules) - -- **Costmap visualization + metrics** - - **File**: [`dimos/mapping/costmapper.py`](/dimos/mapping/costmapper.py) - - **What**: - - Logs 2D costmap image at `world/nav/costmap/image` (`OccupancyGrid.to_rerun(mode="image")`). - - Logs 3D floor overlay at `world/nav/costmap/floor` (`mode="mesh"`). - - Logs time series metrics under `metrics/costmap/*` via `rr.Scalars`. - - Contributes a 2D panel (`rrb.Spatial2DView(origin="world/nav/costmap/image")`) and metrics panel via `rerun_views()`. - -- **Voxel map visualization + metrics** - - **File**: [`dimos/mapping/voxels.py`](/dimos/mapping/voxels.py) - - **What**: - - Logs voxel map at `world/map` via `PointCloud2.to_rerun(mode="boxes", ...)`. - - Logs time series metrics under `metrics/voxel_map/*` via `rr.Scalars`. - - Contributes metrics panels via `rerun_views()`. - -- **Planner debugging path logging** - - **File**: [`dimos/navigation/replanning_a_star/module.py`](/dimos/navigation/replanning_a_star/module.py) - - **What**: Logs navigation path at `world/nav/path` using `Path.to_rerun()` when Rerun backend is active. - -### Metrics helpers - -- **Timing decorator (logs to Rerun scalars)** - - **File**: [`dimos/utils/metrics.py`](/dimos/utils/metrics.py) - - **What**: `log_timing_to_rerun(entity_path)` wraps a function and logs its duration to `rr.Scalars(...)` at the given path. - -### Message-level `to_rerun()` implementations (conversion layer) - -These pull `rerun` into the message layer by returning Rerun archetypes. - -- **Camera intrinsics → pinhole** - - **File**: [`dimos/msgs/sensor_msgs/CameraInfo.py`](/dimos/msgs/sensor_msgs/CameraInfo.py) - - **What**: `CameraInfo.to_rerun()` returns `rr.Pinhole(...)` for frustum/projection (intrinsics only). - -- **PointCloud2 → points/boxes** - - **File**: [`dimos/msgs/sensor_msgs/PointCloud2.py`](/dimos/msgs/sensor_msgs/PointCloud2.py) - - **What**: `PointCloud2.to_rerun()` returns `rr.Points3D(...)` or `rr.Boxes3D(...)` depending on mode. - -- **Image/DepthImage formatting** - - **File**: [`dimos/msgs/sensor_msgs/image_impls/AbstractImage.py`](/dimos/msgs/sensor_msgs/image_impls/AbstractImage.py) - - **What**: Helpers that construct `rr.Image(...)` / `rr.DepthImage(...)` with appropriate color model. - -- **OccupancyGrid → image/mesh/points** - - **File**: [`dimos/msgs/nav_msgs/OccupancyGrid.py`](/dimos/msgs/nav_msgs/OccupancyGrid.py) - - **What**: `OccupancyGrid.to_rerun(mode="image"|"mesh"|"points")` returns `rr.Image`, `rr.Mesh3D`, or `rr.Points3D`. - -- **Path → line strips** - - **File**: [`dimos/msgs/nav_msgs/Path.py`](/dimos/msgs/nav_msgs/Path.py) - - **What**: `Path.to_rerun()` returns `rr.LineStrips3D(...)`. - -- **PoseStamped → transform (and arrows)** - - **File**: [`dimos/msgs/geometry_msgs/PoseStamped.py`](/dimos/msgs/geometry_msgs/PoseStamped.py) - - **What**: `PoseStamped.to_rerun()` returns `rr.Transform3D(...)` (and includes arrow helpers using `rr.Arrows3D`). - -### Web UI embedding (split-screen dashboard) - -- **Split-screen dashboard HTML** - - **File**: [`dimos/web/templates/rerun_dashboard.html`](/dimos/web/templates/rerun_dashboard.html) - - **What**: Embeds: - - Rerun web viewer iframe (`http://localhost:9090/?url=...9876/proxy`) - - command center iframe (`http://localhost:7779/command-center`) - -- **Websocket visualization server** - - **File**: [`dimos/web/websocket_vis/websocket_vis_module.py`](/dimos/web/websocket_vis/websocket_vis_module.py) - - **What**: Serves either the split-screen dashboard or command-center-only depending on `viewer_backend`. - -- **Command center client** - - **File**: [`dimos/web/command-center-extension/src/Connection.ts`](/dimos/web/command-center-extension/src/Connection.ts) - - **What**: Connects to the websocket server on port `7779` (not Rerun SDK, but part of the Rerun-web dashboard experience). - -### Related documentation - -- **TF and transforms concepts** - - **File**: [`docs/api/transforms.md`](/docs/api/transforms.md) - - **What**: Explains frames/transforms and how `self.tf` is intended to be used. From dc5604d8e1736e477c75975e32c5c80e34ce60f7 Mon Sep 17 00:00:00 2001 From: Ivan Nikolic Date: Sun, 1 Feb 2026 14:14:48 +0800 Subject: [PATCH 25/36] typing cleanup --- dimos/msgs/geometry_msgs/PoseStamped.py | 5 ++++- dimos/msgs/nav_msgs/OccupancyGrid.py | 7 ++++++- dimos/msgs/nav_msgs/Path.py | 3 ++- dimos/msgs/sensor_msgs/PointCloud2.py | 8 ++++---- dimos/visualization/rerun/bridge.py | 2 +- 5 files changed, 17 insertions(+), 8 deletions(-) diff --git a/dimos/msgs/geometry_msgs/PoseStamped.py b/dimos/msgs/geometry_msgs/PoseStamped.py index 30b39f51a..abdbdc7c9 100644 --- a/dimos/msgs/geometry_msgs/PoseStamped.py +++ b/dimos/msgs/geometry_msgs/PoseStamped.py @@ -30,7 +30,6 @@ except ImportError: ROSPoseStamped = None # type: ignore[assignment, misc] from plum import dispatch -import rerun as rr from dimos.msgs.geometry_msgs.Pose import Pose from dimos.msgs.geometry_msgs.Quaternion import Quaternion, QuaternionConvertable @@ -96,6 +95,8 @@ def to_rerun(self) -> Archetype: Returns a Transform3D that can be logged to Rerun to position child entities in the transform hierarchy. """ + import rerun as rr + return rr.Transform3D( translation=[self.x, self.y, self.z], rotation=rr.Quaternion( @@ -110,6 +111,8 @@ def to_rerun(self) -> Archetype: def to_rerun_arrow(self, length: float = 0.5): # type: ignore[no-untyped-def] """Convert to rerun Arrows3D format for visualization.""" + import rerun as rr + origin = [[self.x, self.y, self.z]] forward = self.orientation.rotate_vector(Vector3(length, 0, 0)) vector = [[forward.x, forward.y, forward.z]] diff --git a/dimos/msgs/nav_msgs/OccupancyGrid.py b/dimos/msgs/nav_msgs/OccupancyGrid.py index 7b37a9fd0..4e03ce143 100644 --- a/dimos/msgs/nav_msgs/OccupancyGrid.py +++ b/dimos/msgs/nav_msgs/OccupancyGrid.py @@ -27,7 +27,6 @@ import matplotlib.pyplot as plt import numpy as np from PIL import Image -import rerun as rr from dimos.msgs.geometry_msgs import Pose, Vector3, VectorLike from dimos.types.timestamped import Timestamped @@ -460,6 +459,8 @@ def to_rerun( - Unknown space (value -1): gray/transparent - Occupied space (value > 0): black/red with gradient """ + import rerun as rr + if self.grid.size == 0: if mode == "mesh": return rr.Mesh3D(vertex_positions=[]) @@ -580,6 +581,8 @@ def _to_rerun_image( cost_range: tuple[int, int] | None = None, ) -> Archetype: """Convert to 2D image visualization.""" + import rerun as rr + # Use existing cached visualization functions for supported palettes if colormap in ("turbo", "rainbow"): from dimos.mapping.occupancy.visualizations import rainbow_image, turbo_image @@ -640,6 +643,8 @@ def _to_rerun_mesh( Uses a single quad with the occupancy grid as a texture. Much more efficient than per-cell quads (4 vertices vs n_cells*4). """ + import rerun as rr + if self.grid.size == 0: return rr.Mesh3D(vertex_positions=[]) diff --git a/dimos/msgs/nav_msgs/Path.py b/dimos/msgs/nav_msgs/Path.py index 434301ac7..b25c778cd 100644 --- a/dimos/msgs/nav_msgs/Path.py +++ b/dimos/msgs/nav_msgs/Path.py @@ -30,7 +30,6 @@ from nav_msgs.msg import Path as ROSPath # type: ignore[attr-defined] except ImportError: ROSPath = None # type: ignore[assignment, misc] -import rerun as rr from dimos.msgs.geometry_msgs.PoseStamped import PoseStamped from dimos.types.timestamped import Timestamped @@ -251,6 +250,8 @@ def to_rerun( Returns: rr.LineStrips3D archetype for logging to rerun """ + import rerun as rr + if not self.poses: return rr.LineStrips3D([]) diff --git a/dimos/msgs/sensor_msgs/PointCloud2.py b/dimos/msgs/sensor_msgs/PointCloud2.py index 56737de29..60e5ab767 100644 --- a/dimos/msgs/sensor_msgs/PointCloud2.py +++ b/dimos/msgs/sensor_msgs/PointCloud2.py @@ -622,7 +622,7 @@ def __len__(self) -> int: def to_rerun( self, - radii: float = 0.025, + voxel_size: float = 0.05, colormap: str | None = "turbo", colors: list[int] | None = None, mode: str = "boxes", @@ -635,7 +635,7 @@ def to_rerun( """Convert to Rerun Points3D or Boxes3D archetype. Args: - radii: Point radius for visualization (only for mode="points") + voxel_size: size for visualization colormap: Optional colormap name (e.g., "turbo", "viridis") to color by height colors: Optional RGB color [r, g, b] for all points (0-255) mode: Visualization mode - "points" for spheres, "boxes" for cubes (default) @@ -663,7 +663,7 @@ def to_rerun( if mode == "boxes": # Use boxes for voxel visualization - box_size = size if size is not None else radii * 2 + box_size = size if size is not None else voxel_size half = box_size / 2 return rr.Boxes3D( centers=points, @@ -674,7 +674,7 @@ def to_rerun( else: return rr.Points3D( positions=points, - radii=radii, + radii=voxel_size / 2, colors=point_colors, ) diff --git a/dimos/visualization/rerun/bridge.py b/dimos/visualization/rerun/bridge.py index 77d8243a7..7df119dec 100644 --- a/dimos/visualization/rerun/bridge.py +++ b/dimos/visualization/rerun/bridge.py @@ -253,7 +253,7 @@ def main() -> None: pubsubs=[LCM(autoconf=True)], # custom converters for specific rerun entity paths visual_override={ - "world/global_map": lambda grid: grid.to_rerun(radii=0.05), + "world/global_map": lambda grid: grid.to_rerun(voxel_size=0.1), "world/debug_navigation": lambda grid: grid.to_rerun( colormap="Accent", z_offset=0.015, From 93865b8e3c62a7db930b48161cd667985df4e159 Mon Sep 17 00:00:00 2001 From: Ivan Nikolic Date: Sun, 1 Feb 2026 19:27:59 +0800 Subject: [PATCH 26/36] blueprints update --- dimos/robot/all_blueprints.py | 1 - 1 file changed, 1 deletion(-) diff --git a/dimos/robot/all_blueprints.py b/dimos/robot/all_blueprints.py index deeb34d17..a0103ad5a 100644 --- a/dimos/robot/all_blueprints.py +++ b/dimos/robot/all_blueprints.py @@ -49,7 +49,6 @@ "unitree-go2-agentic-mcp": "dimos.robot.unitree_webrtc.unitree_go2_blueprints:unitree_go2_agentic_mcp", "unitree-go2-agentic-ollama": "dimos.robot.unitree_webrtc.unitree_go2_blueprints:unitree_go2_agentic_ollama", "unitree-go2-basic": "dimos.robot.unitree_webrtc.unitree_go2_blueprints:unitree_go2_basic", - "unitree-go2-bridge": "dimos.robot.unitree_webrtc.unitree_go2_blueprints:unitree_go2_bridge", "unitree-go2-detection": "dimos.robot.unitree_webrtc.unitree_go2_blueprints:unitree_go2_detection", "unitree-go2-ros": "dimos.robot.unitree_webrtc.unitree_go2_blueprints:unitree_go2_ros", "unitree-go2-spatial": "dimos.robot.unitree_webrtc.unitree_go2_blueprints:unitree_go2_spatial", From face6e553a68a50d8a2a04debdd34c6bce76468a Mon Sep 17 00:00:00 2001 From: Ivan Nikolic Date: Sun, 1 Feb 2026 19:31:06 +0800 Subject: [PATCH 27/36] type fixes, manip type ignores --- dimos/core/base_blueprints.py | 4 ++-- .../planning/kinematics/drake_optimization_ik.py | 4 ++-- dimos/manipulation/planning/world/drake_world.py | 14 ++++++++------ 3 files changed, 12 insertions(+), 10 deletions(-) diff --git a/dimos/core/base_blueprints.py b/dimos/core/base_blueprints.py index 987a120b8..249cfc191 100644 --- a/dimos/core/base_blueprints.py +++ b/dimos/core/base_blueprints.py @@ -15,7 +15,7 @@ import platform from dimos.constants import DEFAULT_CAPACITY_COLOR_IMAGE -from dimos.core.blueprints import autoconnect +from dimos.core.blueprints import Blueprint, autoconnect from dimos.core.transport import ( pSHMTransport, ) @@ -24,7 +24,7 @@ from dimos.visualization.rerun.bridge import rerun_bridge -def base_blueprint(): +def base_blueprint() -> Blueprint: from dimos.core.global_config import GlobalConfig _config = GlobalConfig() diff --git a/dimos/manipulation/planning/kinematics/drake_optimization_ik.py b/dimos/manipulation/planning/kinematics/drake_optimization_ik.py index 453c57138..1e6b1962a 100644 --- a/dimos/manipulation/planning/kinematics/drake_optimization_ik.py +++ b/dimos/manipulation/planning/kinematics/drake_optimization_ik.py @@ -178,7 +178,7 @@ def _solve_single( # Add position constraint ik.AddPositionConstraint( frameB=ee_frame, - p_BQ=np.array([0.0, 0.0, 0.0]), + p_BQ=np.array([0.0, 0.0, 0.0]), # type: ignore[arg-type] frameA=plant.world_frame(), p_AQ_lower=target_transform.translation() - np.array([position_tolerance] * 3), p_AQ_upper=target_transform.translation() + np.array([position_tolerance] * 3), @@ -227,7 +227,7 @@ def _solve_single( position_error, orientation_error = compute_pose_error( pose_to_matrix(actual_pose), - target_transform.GetAsMatrix4(), + target_transform.GetAsMatrix4(), # type: ignore[arg-type] ) return _create_success_result( diff --git a/dimos/manipulation/planning/world/drake_world.py b/dimos/manipulation/planning/world/drake_world.py index ea17684c8..836d3ef4f 100644 --- a/dimos/manipulation/planning/world/drake_world.py +++ b/dimos/manipulation/planning/world/drake_world.py @@ -317,7 +317,7 @@ def _add_obstacle_to_plant(self, obstacle: Obstacle, obstacle_id: str) -> Any: body = self._plant.AddRigidBody( obstacle_id, - self._obstacles_model_instance, + self._obstacles_model_instance, # type: ignore[arg-type] ) transform = self._pose_to_rigid_transform(obstacle.pose) @@ -335,7 +335,7 @@ def _add_obstacle_to_plant(self, obstacle: Obstacle, obstacle_id: str) -> Any: RigidTransform(), shape, obstacle_id + "_visual", - diffuse_color, + diffuse_color, # type: ignore[arg-type] ) self._plant.WeldFrames( @@ -371,7 +371,9 @@ def _add_obstacle_to_scene_graph(self, obstacle: Obstacle, obstacle_id: str) -> shape=shape, name=obstacle_id, ) - geometry_instance.set_illustration_properties(MakePhongIllustrationProperties(rgba_array)) + geometry_instance.set_illustration_properties( + MakePhongIllustrationProperties(rgba_array) # type: ignore[arg-type] + ) geometry_instance.set_proximity_properties(proximity_props) frame_id = self._scene_graph.world_frame_id() @@ -803,8 +805,8 @@ def get_link_pose( X_WL = self._plant.EvalBodyPoseInWorld(plant_ctx, body) - result: NDArray[np.float64] = X_WL.GetAsMatrix4() - return result + result = X_WL.GetAsMatrix4() + return result # type: ignore[return-value] def get_jacobian(self, ctx: Context, robot_id: WorldRobotID) -> NDArray[np.float64]: """Get geometric Jacobian (6 x n_joints). @@ -825,7 +827,7 @@ def get_jacobian(self, ctx: Context, robot_id: WorldRobotID) -> NDArray[np.float plant_ctx, JacobianWrtVariable.kQDot, robot_data.ee_frame, - np.array([0.0, 0.0, 0.0]), # Point on end-effector + np.array([0.0, 0.0, 0.0]), # type: ignore[arg-type] # Point on end-effector self._plant.world_frame(), self._plant.world_frame(), ) From 810e79e8700ec986283851dc837aafc8a4e5aa11 Mon Sep 17 00:00:00 2001 From: Ivan Nikolic Date: Sun, 1 Feb 2026 19:34:40 +0800 Subject: [PATCH 28/36] occupancygrid is never an image --- dimos/msgs/nav_msgs/OccupancyGrid.py | 105 +-------------------------- 1 file changed, 2 insertions(+), 103 deletions(-) diff --git a/dimos/msgs/nav_msgs/OccupancyGrid.py b/dimos/msgs/nav_msgs/OccupancyGrid.py index 4e03ce143..d45e1b623 100644 --- a/dimos/msgs/nav_msgs/OccupancyGrid.py +++ b/dimos/msgs/nav_msgs/OccupancyGrid.py @@ -17,7 +17,7 @@ from enum import IntEnum from functools import lru_cache import time -from typing import TYPE_CHECKING, Any, BinaryIO +from typing import TYPE_CHECKING, BinaryIO from dimos_lcm.nav_msgs import ( MapMetaData, @@ -427,51 +427,6 @@ def cell_value(self, world_position: Vector3) -> int: return int(self.grid[y, x]) - def to_rerun( - self, - colormap: str | None = None, - mode: str = "mesh", - z_offset: float = 0.01, - opacity: float = 1.0, - cost_range: tuple[int, int] | None = None, - background: str | None = None, - **kwargs: Any, - ) -> Archetype: - """Convert to Rerun visualization format. - - Args: - colormap: Optional colormap name (e.g., "RdBu_r" for blue=free, red=occupied). - If None, uses grayscale for image mode or default colors for mesh mode. - mode: Visualization mode: - - "image": 2D grayscale/colored image - - "mesh": 3D textured plane overlay on floor (default) - z_offset: Height offset for mesh mode (default 0.01m above floor) - opacity: Blend factor (0.0 to 1.0). Blends costmap colors towards background color. - cost_range: Optional (min, max) cost range to display. Cells outside range use background. - background: Hex color for background/out-of-range cells (e.g. "#484981"). Default is black. - **kwargs: Additional args (ignored for compatibility) - - Returns: - Rerun archetype for logging (rr.Image or rr.Mesh3D) - - The visualization uses: - - Free space (value 0): white/blue - - Unknown space (value -1): gray/transparent - - Occupied space (value > 0): black/red with gradient - """ - import rerun as rr - - if self.grid.size == 0: - if mode == "mesh": - return rr.Mesh3D(vertex_positions=[]) - else: - return rr.Image(np.zeros((1, 1), dtype=np.uint8), color_model="L") - - if mode == "mesh": - return self._to_rerun_mesh(colormap, z_offset, opacity, cost_range, background) - else: - return self._to_rerun_image(colormap, opacity, cost_range) - def _generate_rgba_texture( self, colormap: str | None = None, @@ -574,63 +529,7 @@ def _generate_rgba_texture( return vis - def _to_rerun_image( - self, - colormap: str | None = None, - opacity: float = 1.0, - cost_range: tuple[int, int] | None = None, - ) -> Archetype: - """Convert to 2D image visualization.""" - import rerun as rr - - # Use existing cached visualization functions for supported palettes - if colormap in ("turbo", "rainbow"): - from dimos.mapping.occupancy.visualizations import rainbow_image, turbo_image - - if colormap == "turbo": - bgr_image = turbo_image(self.grid) - else: - bgr_image = rainbow_image(self.grid) - - # Convert BGR to RGB, apply opacity, and flip for world coordinates - rgb_image = (np.flipud(bgr_image[:, :, ::-1]) * opacity).astype(np.uint8) - - # Apply cost_range filter for turbo/rainbow - if cost_range is not None: - in_range = (self.grid >= cost_range[0]) & (self.grid <= cost_range[1]) - rgba_image = np.zeros((rgb_image.shape[0], rgb_image.shape[1], 4), dtype=np.uint8) - rgba_image[:, :, :3] = rgb_image - rgba_image[:, :, 3] = 255 - rgba_image[np.flipud(~in_range), 3] = 0 # Flip mask to match flipped image - return rr.Image(rgba_image, color_model="RGBA") - - return rr.Image(rgb_image, color_model="RGB") - - if colormap is not None: - # Use helper and flip for image display - rgba = self._generate_rgba_texture(colormap, opacity, cost_range) - return rr.Image(np.flipud(rgba), color_model="RGBA") - - # Grayscale visualization (no colormap) - vis_gray = np.zeros((self.height, self.width), dtype=np.uint8) - - # Free space = white - vis_gray[self.grid == 0] = 255 - - # Unknown = gray - vis_gray[self.grid == -1] = 128 - - # Occupied (100) = black, costs (1-99) = gradient - occupied_mask = self.grid > 0 - if np.any(occupied_mask): - # Map 1-100 to 127-0 (darker = more occupied) - costs = self.grid[occupied_mask].astype(np.float32) - vis_gray[occupied_mask] = (127 * (1 - costs / 100)).astype(np.uint8) - - # Flip vertically to match world coordinates (y=0 at bottom) - return rr.Image(np.flipud(vis_gray), color_model="L") - - def _to_rerun_mesh( + def to_rerun( self, colormap: str | None = None, z_offset: float = 0.01, From 9d32431788338835e7257f9bfc948a289f197f47 Mon Sep 17 00:00:00 2001 From: Ivan Nikolic Date: Mon, 2 Feb 2026 13:21:03 +0800 Subject: [PATCH 29/36] tf cleanup --- dimos/msgs/sensor_msgs/CameraInfo.py | 21 +++++++++--------- dimos/msgs/tf2_msgs/TFMessage.py | 6 +++-- dimos/msgs/tf2_msgs/test_TFMessage.py | 32 +++++++++++++++++++++++++++ dimos/visualization/rerun/bridge.py | 21 ++++++++++++++++++ 4 files changed, 68 insertions(+), 12 deletions(-) diff --git a/dimos/msgs/sensor_msgs/CameraInfo.py b/dimos/msgs/sensor_msgs/CameraInfo.py index 59e22d73c..b34e923b5 100644 --- a/dimos/msgs/sensor_msgs/CameraInfo.py +++ b/dimos/msgs/sensor_msgs/CameraInfo.py @@ -403,8 +403,13 @@ def to_rerun( self, image_plane_distance: float = 1.0, # These are defaults for a typical RGB camera with a known transform - image_topic: str | None = "color_image", - optical_transform: str | None = "camera_optical", + # + # TODO this should be done by the actual emitting modules, + # they know the camera image topic, spatial relationships etc + # + # poor CameraInfo class has no idea on this + image_topic: str | None = None, + optical_frame: str | None = None, ) -> RerunData: """Convert to Rerun Pinhole archetype for camera frustum visualization. @@ -439,7 +444,7 @@ def to_rerun( # Add pinhole under world/image_topic (we know which Image this CameraInfo refers to) ret.append( ( - f"world/{image_topic}", + image_topic, rr.Pinhole( focal_length=[fx, fy], principal_point=[cx, cy], @@ -450,18 +455,14 @@ def to_rerun( ) ) - if not optical_transform: + if not optical_frame: return ret # Add 3d transform from optical frame to world/image_topic (We know where the camera is) ret.append( ( - "world/color_image", - rr.Transform3D( - translation=[0.0, 0.0, 0.0], - rotation=rr.Quaternion(xyzw=[0.0, 0.0, 0.0, 1.0]), - parent_frame=f"tf#/{optical_transform}", - ), + image_topic, + rr.Transform3D(parent_frame=f"tf#/{optical_frame}"), ) ) diff --git a/dimos/msgs/tf2_msgs/TFMessage.py b/dimos/msgs/tf2_msgs/TFMessage.py index db5bbed70..13cf57984 100644 --- a/dimos/msgs/tf2_msgs/TFMessage.py +++ b/dimos/msgs/tf2_msgs/TFMessage.py @@ -127,7 +127,9 @@ def __repr__(self) -> str: def __str__(self) -> str: lines = [f"TFMessage with {len(self.transforms)} transforms:"] for i, transform in enumerate(self.transforms): - lines.append(f" [{i}] {transform.frame_id} @ {transform.ts:.3f}") + lines.append( + f" [{i}] {transform.frame_id} → {transform.child_frame_id} @ {transform.ts:.3f}" + ) return "\n".join(lines) @classmethod @@ -180,6 +182,6 @@ def to_rerun(self) -> RerunMulti: """ results: RerunMulti = [] for transform in self.transforms: - entity_path = f"world/{transform.child_frame_id}" + entity_path = f"world/tf/{transform.child_frame_id}" results.append((entity_path, transform.to_rerun())) return results diff --git a/dimos/msgs/tf2_msgs/test_TFMessage.py b/dimos/msgs/tf2_msgs/test_TFMessage.py index 783692fb3..d58f30093 100644 --- a/dimos/msgs/tf2_msgs/test_TFMessage.py +++ b/dimos/msgs/tf2_msgs/test_TFMessage.py @@ -70,6 +70,38 @@ def test_tfmessage_add_transform() -> None: assert msg[0] == tf +def test_tfmessage_tree() -> None: + """Test adding transforms to TFMessage.""" + msg = TFMessage() + + msg.add_transform( + Transform( + translation=Vector3(1, 2, 0.5), frame_id="world", child_frame_id="robot", ts=200.0 + ) + ) + msg.add_transform( + Transform( + translation=Vector3(0.1, 0, 1.0), frame_id="robot", child_frame_id="camera", ts=200.0 + ) + ) + msg.add_transform( + Transform( + translation=Vector3(0.2, 0, 0.2), frame_id="robot", child_frame_id="lidar", ts=200.0 + ) + ) + msg.add_transform( + Transform( + translation=Vector3(0.05, 0, 0.0), + frame_id="lidar", + child_frame_id="lidar_scanner", + ts=200.0, + ) + ) + + assert len(msg) == 4 + print(msg) + + def test_tfmessage_lcm_encode_decode() -> None: """Test encoding TFMessage to LCM bytes.""" # Create transforms diff --git a/dimos/visualization/rerun/bridge.py b/dimos/visualization/rerun/bridge.py index 7df119dec..0e74bf454 100644 --- a/dimos/visualization/rerun/bridge.py +++ b/dimos/visualization/rerun/bridge.py @@ -103,6 +103,9 @@ class Config(ModuleConfig): visual_override: dict[Glob | str, Callable[[Any], Archetype]] = field(default_factory=dict) + # Static items logged once after start. Maps entity_path -> callable(rr) returning Archetype + static: dict[str, Callable[[Any], Archetype]] = field(default_factory=dict) + entity_prefix: str = "world" topic_to_entity: Callable[[Any], str] | None = None viewer_mode: ViewerMode = "native" @@ -221,6 +224,14 @@ def start(self) -> None: if hasattr(pubsub, "stop"): self._disposables.add(Disposable(pubsub.stop)) # type: ignore[union-attr] + self._log_static() + + def _log_static(self) -> None: + import rerun as rr + + for entity_path, factory in self.config.static.items(): + rr.log(entity_path, factory(rr), static=True) + @rpc def stop(self) -> None: super().stop() @@ -253,6 +264,10 @@ def main() -> None: pubsubs=[LCM(autoconf=True)], # custom converters for specific rerun entity paths visual_override={ + "world/camera_info": lambda camera_info: camera_info.to_rerun( + image_topic="world/color_image", + optical_frame="camera_optical", + ), "world/global_map": lambda grid: grid.to_rerun(voxel_size=0.1), "world/debug_navigation": lambda grid: grid.to_rerun( colormap="Accent", @@ -261,6 +276,12 @@ def main() -> None: background="#484981", ), }, + static={ + "world/tf/base_link/box": lambda rr: rr.Boxes3D( + half_sizes=[2.0, 2.0, 1.0], + centers=[0, 0, 0], + ) + }, ) bridge.start() From 7844b002bb99e9d4dd73ab1469ac7daf2bc7e22c Mon Sep 17 00:00:00 2001 From: Ivan Nikolic Date: Mon, 2 Feb 2026 13:54:53 +0800 Subject: [PATCH 30/36] bridge comments, box for go2 --- dimos/visualization/rerun/bridge.py | 46 ++++++++++++++++++++++++----- 1 file changed, 38 insertions(+), 8 deletions(-) diff --git a/dimos/visualization/rerun/bridge.py b/dimos/visualization/rerun/bridge.py index 0e74bf454..5d40ae775 100644 --- a/dimos/visualization/rerun/bridge.py +++ b/dimos/visualization/rerun/bridge.py @@ -75,7 +75,7 @@ def to_rerun(self) -> RerunData: ... ViewerMode = Literal["native", "web", "none"] -# Notes on this system +# TODO on plans with this system # # In the future it would be nice if modules can annotate their individual OUTs with (general or rerun specific) # hints related to their visualization @@ -93,6 +93,22 @@ def to_rerun(self) -> RerunData: ... # as well as pubsubs to specify which protocols to listen to. +# TODO Notes for correct TF processing +# +# TF for rerun would ideally follow rerun entity path conventions +# +# /world/robot1/base_link/camera/optical +# +# while we have decoupled entity paths and actual transforms (like ROS TF frames) +# +# tf#/world +# tf#/base_link +# tf#/camera +# +# In order to solve this, bridge needs to own it's own tf service +# and render it's tf tree into correct rerun entity paths + + @dataclass class Config(ModuleConfig): """Configuration for RerunBridgeModule.""" @@ -230,7 +246,12 @@ def _log_static(self) -> None: import rerun as rr for entity_path, factory in self.config.static.items(): - rr.log(entity_path, factory(rr), static=True) + data = factory(rr) + if isinstance(data, list): + for archetype in data: + rr.log(entity_path, archetype, static=True) + else: + rr.log(entity_path, data, static=True) @rpc def stop(self) -> None: @@ -262,10 +283,14 @@ def main() -> None: # any pubsub that supports subscribe_all and topic that supports str(topic) # is acceptable here pubsubs=[LCM(autoconf=True)], - # custom converters for specific rerun entity paths + # Custom converters for specific rerun entity paths + # Normally all these would be specified in their respectative modules + # Until this is implemented we have central overrides here + # + # This is unsustainable once we move to multi robot etc visual_override={ "world/camera_info": lambda camera_info: camera_info.to_rerun( - image_topic="world/color_image", + image_topic="/world/color_image", optical_frame="camera_optical", ), "world/global_map": lambda grid: grid.to_rerun(voxel_size=0.1), @@ -276,11 +301,16 @@ def main() -> None: background="#484981", ), }, + # slapping a go2 shaped box on top of tf/base_link static={ - "world/tf/base_link/box": lambda rr: rr.Boxes3D( - half_sizes=[2.0, 2.0, 1.0], - centers=[0, 0, 0], - ) + "world/tf/base_link": lambda rr: [ + rr.Boxes3D( + half_sizes=[0.35, 0.155, 0.2], + colors=[(0, 255, 127)], + fill_mode="wireframe", + ), + rr.Transform3D(parent_frame="tf#/base_link"), + ] }, ) From 5673c7d0134ee1735f6f2d8f5ebad552850b85dc Mon Sep 17 00:00:00 2001 From: Ivan Nikolic Date: Tue, 3 Feb 2026 11:51:20 +0800 Subject: [PATCH 31/36] small comments --- dimos/msgs/sensor_msgs/CameraInfo.py | 32 +++++++----- dimos/visualization/rerun/bridge.py | 76 ++++++++++++++++------------ 2 files changed, 62 insertions(+), 46 deletions(-) diff --git a/dimos/msgs/sensor_msgs/CameraInfo.py b/dimos/msgs/sensor_msgs/CameraInfo.py index b34e923b5..b1a597fec 100644 --- a/dimos/msgs/sensor_msgs/CameraInfo.py +++ b/dimos/msgs/sensor_msgs/CameraInfo.py @@ -407,7 +407,9 @@ def to_rerun( # TODO this should be done by the actual emitting modules, # they know the camera image topic, spatial relationships etc # - # poor CameraInfo class has no idea on this + # Poor CameraInfo class has no idea on this + # We just provide the parameters here for convenience in case your + # module doesn't implement this correctly image_topic: str | None = None, optical_frame: str | None = None, ) -> RerunData: @@ -441,19 +443,23 @@ def to_rerun( ret: RerunMulti = [] + pinhole = { + "focal_length": [fx, fy], + "principal_point": [cx, cy], + "width": self.width, + "height": self.height, + "image_plane_distance": image_plane_distance, + # This is supposed to work + # https://rerun.io/docs/reference/types/archetypes/pinhole + # But it doesn't + # + # "parent_frame": f"tf#/{optical_frame}" + # + # For now we add the transform separately below + } + # Add pinhole under world/image_topic (we know which Image this CameraInfo refers to) - ret.append( - ( - image_topic, - rr.Pinhole( - focal_length=[fx, fy], - principal_point=[cx, cy], - width=self.width, - height=self.height, - image_plane_distance=image_plane_distance, - ), - ) - ) + ret.append((image_topic, rr.Pinhole(**pinhole))) if not optical_frame: return ret diff --git a/dimos/visualization/rerun/bridge.py b/dimos/visualization/rerun/bridge.py index 5d40ae775..3d16270d8 100644 --- a/dimos/visualization/rerun/bridge.py +++ b/dimos/visualization/rerun/bridge.py @@ -38,6 +38,49 @@ from dimos.protocol.pubsub.patterns import Glob, pattern_matches from dimos.utils.logging_config import setup_logger +# TODO OUT visual annotations +# +# In the future it would be nice if modules can annotate their individual OUTs with (general or rerun specific) +# hints related to their visualization +# +# so stuff like color, update frequency etc (some Image needs to be rendered on the 3d floor like occupancy grid) +# some other image is an image to be streamed into a specific 2D view etc. +# +# To achieve this we'd feed a full blueprint into the rerun bridge. +# +# rerun bridge can then inspect all transports used, all modules with their outs, +# automatically spy an all the transports and read visualization hints +# +# Temporarily we are using these "sideloading" visual_override={} dict on the bridge +# to define custom visualizations for specific topics +# +# as well as pubsubs={} to specify which protocols to listen to. + + +# TODO better TF processing +# +# this is rerun bridge specific, rerun has a specific (better) way of handling TFs +# using entity path conventions, each of these nodes in a path are TF frames: +# +# /world/robot1/base_link/camera/optical +# +# While here since we are just listening on TFMessage messages which optionally contain +# just a subset of full TF tree we don't know the full tree structure to build full entity +# path for a transform being published +# +# This is easy to reconstruct but a service/tf.py already does this so should be integrated here +# +# we have decoupled entity paths and actual transforms (like ROS TF frames) +# https://rerun.io/docs/concepts/logging-and-ingestion/transforms +# +# tf#/world +# tf#/base_link +# tf#/camera +# +# In order to solve this, bridge needs to own it's own tf service +# and render it's tf tree into correct rerun entity paths + + logger = setup_logger() if TYPE_CHECKING: @@ -75,39 +118,6 @@ def to_rerun(self) -> RerunData: ... ViewerMode = Literal["native", "web", "none"] -# TODO on plans with this system -# -# In the future it would be nice if modules can annotate their individual OUTs with (general or rerun specific) -# hints related to their visualization -# -# so stuff like color, update frequency etc (some Image needs to be rendered on the 3d floor like occupancy grid) -# some other image is an image to be streamed into a specific 2D view etc. -# -# to achieve this we'd feed a full blueprint into the rerun bridge. -# rerun bridge can then inspect all transports used, all modules with their outs, -# automatically spy an all the transports and read visualization hints -# -# this is the correct implementation. -# -# temporarily we are using these "sideloading" converters={} to define custom to_rerun methods for specific topics -# as well as pubsubs to specify which protocols to listen to. - - -# TODO Notes for correct TF processing -# -# TF for rerun would ideally follow rerun entity path conventions -# -# /world/robot1/base_link/camera/optical -# -# while we have decoupled entity paths and actual transforms (like ROS TF frames) -# -# tf#/world -# tf#/base_link -# tf#/camera -# -# In order to solve this, bridge needs to own it's own tf service -# and render it's tf tree into correct rerun entity paths - @dataclass class Config(ModuleConfig): From 670c3d3330f86d7de22464038306a76ce4b2c1a7 Mon Sep 17 00:00:00 2001 From: Ivan Nikolic Date: Tue, 3 Feb 2026 14:28:42 +0800 Subject: [PATCH 32/36] turn off local rerun --- dimos/core/global_config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dimos/core/global_config.py b/dimos/core/global_config.py index 7bcbf8a6e..f1497b213 100644 --- a/dimos/core/global_config.py +++ b/dimos/core/global_config.py @@ -33,7 +33,7 @@ class GlobalConfig(BaseSettings): replay: bool = False rerun_enabled: bool = True rerun_server_addr: str | None = None - viewer_backend: ViewerBackend = "rerun" + viewer_backend: ViewerBackend = "none" n_dask_workers: int = 2 memory_limit: str = "auto" mujoco_camera_position: str | None = None From eeeea9c8b8630b5166c4bbda5821be70825f2c98 Mon Sep 17 00:00:00 2001 From: Ivan Nikolic Date: Tue, 3 Feb 2026 18:23:47 +0800 Subject: [PATCH 33/36] fix mypy type errors in CameraInfo and drake_world --- .../planning/world/drake_world.py | 2 +- dimos/msgs/sensor_msgs/CameraInfo.py | 31 +++++++++---------- 2 files changed, 16 insertions(+), 17 deletions(-) diff --git a/dimos/manipulation/planning/world/drake_world.py b/dimos/manipulation/planning/world/drake_world.py index 836d3ef4f..60cae3d99 100644 --- a/dimos/manipulation/planning/world/drake_world.py +++ b/dimos/manipulation/planning/world/drake_world.py @@ -806,7 +806,7 @@ def get_link_pose( X_WL = self._plant.EvalBodyPoseInWorld(plant_ctx, body) result = X_WL.GetAsMatrix4() - return result # type: ignore[return-value] + return result # type: ignore[no-any-return] def get_jacobian(self, ctx: Context, robot_id: WorldRobotID) -> NDArray[np.float64]: """Get geometric Jacobian (6 x n_joints). diff --git a/dimos/msgs/sensor_msgs/CameraInfo.py b/dimos/msgs/sensor_msgs/CameraInfo.py index b1a597fec..b296e726c 100644 --- a/dimos/msgs/sensor_msgs/CameraInfo.py +++ b/dimos/msgs/sensor_msgs/CameraInfo.py @@ -443,23 +443,22 @@ def to_rerun( ret: RerunMulti = [] - pinhole = { - "focal_length": [fx, fy], - "principal_point": [cx, cy], - "width": self.width, - "height": self.height, - "image_plane_distance": image_plane_distance, - # This is supposed to work - # https://rerun.io/docs/reference/types/archetypes/pinhole - # But it doesn't - # - # "parent_frame": f"tf#/{optical_frame}" - # - # For now we add the transform separately below - } - # Add pinhole under world/image_topic (we know which Image this CameraInfo refers to) - ret.append((image_topic, rr.Pinhole(**pinhole))) + # Note: parent_frame is supposed to work according to: + # https://rerun.io/docs/reference/types/archetypes/pinhole + # But it doesn't, so we add the transform separately below + ret.append( + ( + image_topic, + rr.Pinhole( + focal_length=[fx, fy], + principal_point=[cx, cy], + width=self.width, + height=self.height, + image_plane_distance=image_plane_distance, + ), + ) + ) if not optical_frame: return ret From 9658646c7b3ef6d907b6fa43537814ae811afb8b Mon Sep 17 00:00:00 2001 From: Ivan Nikolic Date: Thu, 5 Feb 2026 19:25:52 +0800 Subject: [PATCH 34/36] global config singleton, web view works --- dimos/core/base_blueprints.py | 4 +- dimos/core/blueprints.py | 13 ++-- dimos/core/global_config.py | 19 ++++- dimos/core/module_coordinator.py | 4 +- dimos/hardware/sensors/camera/module.py | 4 +- dimos/mapping/costmapper.py | 4 +- dimos/mapping/voxels.py | 4 +- dimos/navigation/replanning_a_star/module.py | 4 +- dimos/robot/cli/dimos.py | 7 +- dimos/robot/unitree/connection/g1.py | 4 +- dimos/robot/unitree/connection/g1sim.py | 4 +- dimos/robot/unitree/connection/go2.py | 4 +- dimos/robot/unitree_webrtc/type/map.py | 4 +- .../unitree_webrtc/unitree_go2_blueprints.py | 77 ++++++++++++++++++- dimos/visualization/rerun/bridge.py | 33 +++++++- .../web/websocket_vis/websocket_vis_module.py | 4 +- 16 files changed, 154 insertions(+), 39 deletions(-) diff --git a/dimos/core/base_blueprints.py b/dimos/core/base_blueprints.py index 249cfc191..d666ede03 100644 --- a/dimos/core/base_blueprints.py +++ b/dimos/core/base_blueprints.py @@ -25,9 +25,9 @@ def base_blueprint() -> Blueprint: - from dimos.core.global_config import GlobalConfig + from dimos.core.global_config import globalconfig - _config = GlobalConfig() + _config = globalconfig # Mac has some issue with high bandwidth UDP, so we use pSHMTransport for color_image # actually we can use pSHMTransport for all platforms, and for all streams diff --git a/dimos/core/blueprints.py b/dimos/core/blueprints.py index 4c1558cfe..862a2a021 100644 --- a/dimos/core/blueprints.py +++ b/dimos/core/blueprints.py @@ -23,7 +23,7 @@ from types import MappingProxyType from typing import Any, Literal, get_args, get_origin, get_type_hints -from dimos.core.global_config import GlobalConfig +from dimos.core.global_config import GlobalConfig, globalconfig from dimos.core.module import Module, is_module_type from dimos.core.module_coordinator import ModuleCoordinator from dimos.core.stream import In, Out @@ -455,23 +455,20 @@ def _connect_rpc_methods(self, module_coordinator: ModuleCoordinator) -> None: def build( self, - global_config: GlobalConfig | None = None, cli_config_overrides: Mapping[str, Any] | None = None, ) -> ModuleCoordinator: - if global_config is None: - global_config = GlobalConfig() - global_config = global_config.model_copy(update=dict(self.global_config_overrides)) + globalconfig.update(**dict(self.global_config_overrides)) if cli_config_overrides: - global_config = global_config.model_copy(update=dict(cli_config_overrides)) + globalconfig.update(**dict(cli_config_overrides)) self._check_requirements() self._verify_no_name_conflicts() - module_coordinator = ModuleCoordinator(global_config=global_config) + module_coordinator = ModuleCoordinator(global_config=globalconfig) module_coordinator.start() # all module constructors are called here (each of them setup their own) - self._deploy_all_modules(module_coordinator, global_config) + self._deploy_all_modules(module_coordinator, globalconfig) self._connect_streams(module_coordinator) self._connect_rpc_methods(module_coordinator) self._connect_module_refs(module_coordinator) diff --git a/dimos/core/global_config.py b/dimos/core/global_config.py index f1497b213..38c15897e 100644 --- a/dimos/core/global_config.py +++ b/dimos/core/global_config.py @@ -33,7 +33,7 @@ class GlobalConfig(BaseSettings): replay: bool = False rerun_enabled: bool = True rerun_server_addr: str | None = None - viewer_backend: ViewerBackend = "none" + viewer_backend: ViewerBackend = "rerun" n_dask_workers: int = 2 memory_limit: str = "auto" mujoco_camera_position: str | None = None @@ -54,9 +54,21 @@ class GlobalConfig(BaseSettings): env_file=".env", env_file_encoding="utf-8", extra="ignore", - frozen=True, ) + def update(self, **kwargs: object) -> None: + """Update config fields in place and invalidate cached properties.""" + for key, value in kwargs.items(): + if not hasattr(self, key): + raise AttributeError(f"GlobalConfig has no field '{key}'") + setattr(self, key, value) + for prop in ( + "unitree_connection_type", + "mujoco_start_pos_float", + "mujoco_camera_position_float", + ): + self.__dict__.pop(prop, None) + @cached_property def unitree_connection_type(self) -> str: if self.replay: @@ -75,3 +87,6 @@ def mujoco_camera_position_float(self) -> tuple[float, ...]: if self.mujoco_camera_position is None: return (-0.906, 0.008, 1.101, 4.931, 89.749, -46.378) return tuple(_get_all_numbers(self.mujoco_camera_position)) + + +globalconfig = GlobalConfig() diff --git a/dimos/core/module_coordinator.py b/dimos/core/module_coordinator.py index 573414e62..6699c3e36 100644 --- a/dimos/core/module_coordinator.py +++ b/dimos/core/module_coordinator.py @@ -18,7 +18,7 @@ from dimos import core from dimos.core import DimosCluster -from dimos.core.global_config import GlobalConfig +from dimos.core.global_config import GlobalConfig, globalconfig from dimos.core.module import Module, ModuleT from dimos.core.resource import Resource from dimos.core.worker_manager import WorkerManager @@ -39,7 +39,7 @@ def __init__( n: int | None = None, global_config: GlobalConfig | None = None, ) -> None: - cfg = global_config or GlobalConfig() + cfg = global_config or globalconfig self._n = n if n is not None else cfg.n_dask_workers self._memory_limit = cfg.memory_limit self._global_config = cfg diff --git a/dimos/hardware/sensors/camera/module.py b/dimos/hardware/sensors/camera/module.py index 639c1e045..9538dc90d 100644 --- a/dimos/hardware/sensors/camera/module.py +++ b/dimos/hardware/sensors/camera/module.py @@ -23,7 +23,7 @@ from dimos.agents import Output, Reducer, Stream, skill from dimos.core import Module, ModuleConfig, Out, rpc from dimos.core.blueprints import autoconnect -from dimos.core.global_config import GlobalConfig +from dimos.core.global_config import GlobalConfig, globalconfig from dimos.hardware.sensors.camera.spec import CameraHardware from dimos.hardware.sensors.camera.webcam import Webcam from dimos.msgs.geometry_msgs import Quaternion, Transform, Vector3 @@ -63,7 +63,7 @@ class CameraModule(Module[CameraModuleConfig], perception.Camera): def __init__( self, *args: Any, global_config: GlobalConfig | None = None, **kwargs: Any ) -> None: - self._global_config = global_config or GlobalConfig() + self._global_config = global_config or globalconfig super().__init__(*args, **kwargs) @rpc diff --git a/dimos/mapping/costmapper.py b/dimos/mapping/costmapper.py index 287caf290..775680a32 100644 --- a/dimos/mapping/costmapper.py +++ b/dimos/mapping/costmapper.py @@ -18,7 +18,7 @@ from reactivex import operators as ops from dimos.core import In, Module, Out, rpc -from dimos.core.global_config import GlobalConfig +from dimos.core.global_config import GlobalConfig, globalconfig from dimos.core.module import ModuleConfig from dimos.mapping.pointclouds.occupancy import ( OCCUPANCY_ALGOS, @@ -47,7 +47,7 @@ class CostMapper(Module): def __init__(self, global_config: GlobalConfig | None = None, **kwargs: object) -> None: super().__init__(**kwargs) - self._global_config = global_config or GlobalConfig() + self._global_config = global_config or globalconfig @rpc def start(self) -> None: diff --git a/dimos/mapping/voxels.py b/dimos/mapping/voxels.py index 4147f9050..2eac7d86a 100644 --- a/dimos/mapping/voxels.py +++ b/dimos/mapping/voxels.py @@ -23,7 +23,7 @@ from reactivex.subject import Subject from dimos.core import In, Module, Out, rpc -from dimos.core.global_config import GlobalConfig +from dimos.core.global_config import GlobalConfig, globalconfig from dimos.core.module import ModuleConfig from dimos.msgs.sensor_msgs import PointCloud2 from dimos.utils.decorators import simple_mcache @@ -53,7 +53,7 @@ class VoxelGridMapper(Module): def __init__(self, global_config: GlobalConfig | None = None, **kwargs: object) -> None: super().__init__(**kwargs) - self._global_config = global_config or GlobalConfig() + self._global_config = global_config or globalconfig dev = ( o3c.Device(self.config.device) diff --git a/dimos/navigation/replanning_a_star/module.py b/dimos/navigation/replanning_a_star/module.py index afba27915..43aa6914c 100644 --- a/dimos/navigation/replanning_a_star/module.py +++ b/dimos/navigation/replanning_a_star/module.py @@ -18,7 +18,7 @@ from reactivex.disposable import Disposable from dimos.core import In, Module, Out, rpc -from dimos.core.global_config import GlobalConfig +from dimos.core.global_config import GlobalConfig, globalconfig from dimos.msgs.geometry_msgs import PoseStamped, Twist from dimos.msgs.nav_msgs import OccupancyGrid, Path from dimos.navigation.base import NavigationInterface, NavigationState @@ -42,7 +42,7 @@ class ReplanningAStarPlanner(Module, NavigationInterface): def __init__(self, global_config: GlobalConfig | None = None) -> None: super().__init__() - self._global_config = global_config or GlobalConfig() + self._global_config = global_config or globalconfig self._planner = GlobalPlanner(self._global_config) @rpc diff --git a/dimos/robot/cli/dimos.py b/dimos/robot/cli/dimos.py index 5dad9d10a..5552aaa0b 100644 --- a/dimos/robot/cli/dimos.py +++ b/dimos/robot/cli/dimos.py @@ -20,7 +20,7 @@ import typer from dimos.core.blueprints import autoconnect -from dimos.core.global_config import GlobalConfig +from dimos.core.global_config import GlobalConfig, globalconfig from dimos.protocol import pubsub from dimos.robot.all_blueprints import all_blueprints from dimos.robot.cli.topic import topic_echo, topic_send @@ -113,6 +113,7 @@ def run( setup_exception_handler() cli_config_overrides: dict[str, Any] = ctx.obj + globalconfig.update(**cli_config_overrides) pubsub.lcm.autoconf() # type: ignore[attr-defined] blueprint = get_blueprint_by_name(robot_type.value) @@ -128,9 +129,9 @@ def run( def show_config(ctx: typer.Context) -> None: """Show current config settings and their values.""" cli_config_overrides: dict[str, Any] = ctx.obj - config = GlobalConfig().model_copy(update=cli_config_overrides) + globalconfig.update(**cli_config_overrides) - for field_name, value in config.model_dump().items(): + for field_name, value in globalconfig.model_dump().items(): typer.echo(f"{field_name}: {value}") diff --git a/dimos/robot/unitree/connection/g1.py b/dimos/robot/unitree/connection/g1.py index 1e1580914..3bc7996c1 100644 --- a/dimos/robot/unitree/connection/g1.py +++ b/dimos/robot/unitree/connection/g1.py @@ -19,7 +19,7 @@ from dimos import spec from dimos.core import DimosCluster, In, Module, rpc -from dimos.core.global_config import GlobalConfig +from dimos.core.global_config import GlobalConfig, globalconfig from dimos.msgs.geometry_msgs import Twist from dimos.robot.unitree.connection.connection import UnitreeWebRTCConnection from dimos.utils.logging_config import setup_logger @@ -43,7 +43,7 @@ def __init__( *args: Any, **kwargs: Any, ) -> None: - self._global_config = global_config or GlobalConfig() + self._global_config = global_config or globalconfig self.ip = ip if ip is not None else self._global_config.robot_ip self.connection_type = connection_type or self._global_config.unitree_connection_type self.connection = None diff --git a/dimos/robot/unitree/connection/g1sim.py b/dimos/robot/unitree/connection/g1sim.py index 196b11f21..df53d7f8b 100644 --- a/dimos/robot/unitree/connection/g1sim.py +++ b/dimos/robot/unitree/connection/g1sim.py @@ -19,7 +19,7 @@ from reactivex.disposable import Disposable from dimos.core import In, Module, Out, rpc -from dimos.core.global_config import GlobalConfig +from dimos.core.global_config import GlobalConfig, globalconfig from dimos.msgs.geometry_msgs import ( PoseStamped, Quaternion, @@ -51,7 +51,7 @@ def __init__( *args: Any, **kwargs: Any, ) -> None: - self._global_config = global_config or GlobalConfig() + self._global_config = global_config or globalconfig self.ip = ip if ip is not None else self._global_config.robot_ip self.connection: MujocoConnection | None = None super().__init__(*args, **kwargs) diff --git a/dimos/robot/unitree/connection/go2.py b/dimos/robot/unitree/connection/go2.py index 3f99c7ca2..cb397acf4 100644 --- a/dimos/robot/unitree/connection/go2.py +++ b/dimos/robot/unitree/connection/go2.py @@ -23,7 +23,7 @@ from dimos import spec from dimos.core import DimosCluster, In, LCMTransport, Module, Out, pSHMTransport, rpc -from dimos.core.global_config import GlobalConfig +from dimos.core.global_config import GlobalConfig, globalconfig from dimos.core.skill_module import SkillModule from dimos.msgs.geometry_msgs import ( PoseStamped, @@ -173,7 +173,7 @@ def __init__( # type: ignore[no-untyped-def] *args, **kwargs, ) -> None: - self._global_config = global_config or GlobalConfig() + self._global_config = global_config or globalconfig ip = ip if ip is not None else self._global_config.robot_ip diff --git a/dimos/robot/unitree_webrtc/type/map.py b/dimos/robot/unitree_webrtc/type/map.py index f9abd96b8..988e9d8e1 100644 --- a/dimos/robot/unitree_webrtc/type/map.py +++ b/dimos/robot/unitree_webrtc/type/map.py @@ -21,7 +21,7 @@ from reactivex.disposable import Disposable from dimos.core import DimosCluster, In, LCMTransport, Module, Out, rpc -from dimos.core.global_config import GlobalConfig +from dimos.core.global_config import GlobalConfig, globalconfig from dimos.mapping.pointclouds.accumulators.general import GeneralPointCloudAccumulator from dimos.mapping.pointclouds.accumulators.protocol import PointCloudAccumulator from dimos.mapping.pointclouds.occupancy import general_occupancy @@ -54,7 +54,7 @@ def __init__( # type: ignore[no-untyped-def] self.global_publish_interval = global_publish_interval self.min_height = min_height self.max_height = max_height - self._global_config = global_config or GlobalConfig() + self._global_config = global_config or globalconfig self._point_cloud_accumulator = GeneralPointCloudAccumulator( self.voxel_size, self._global_config ) diff --git a/dimos/robot/unitree_webrtc/unitree_go2_blueprints.py b/dimos/robot/unitree_webrtc/unitree_go2_blueprints.py index 1ea3562a5..d693677bf 100644 --- a/dimos/robot/unitree_webrtc/unitree_go2_blueprints.py +++ b/dimos/robot/unitree_webrtc/unitree_go2_blueprints.py @@ -15,6 +15,8 @@ # limitations under the License. +import platform + from dimos_lcm.foxglove_msgs.ImageAnnotations import ( ImageAnnotations, # type: ignore[import-untyped] ) @@ -30,12 +32,14 @@ from dimos.agents.spec import Provider from dimos.agents.vlm_agent import vlm_agent from dimos.agents.vlm_stream_tester import vlm_stream_tester -from dimos.core.base_blueprints import base_blueprint +from dimos.constants import DEFAULT_CAPACITY_COLOR_IMAGE from dimos.core.blueprints import autoconnect +from dimos.core.global_config import globalconfig from dimos.core.transport import ( JpegLcmTransport, LCMTransport, ROSTransport, + pSHMTransport, ) from dimos.mapping.costmapper import cost_mapper from dimos.mapping.voxels import voxel_mapper @@ -52,13 +56,82 @@ from dimos.perception.experimental.temporal_memory import temporal_memory from dimos.perception.spatial_perception import spatial_memory from dimos.protocol.mcp.mcp import MCPModule +from dimos.protocol.pubsub.impl.lcmpubsub import LCM from dimos.robot.unitree.connection.go2 import GO2Connection, go2_connection from dimos.robot.unitree_webrtc.unitree_skill_container import unitree_skills from dimos.utils.monitoring import utilization from dimos.web.websocket_vis.websocket_vis_module import websocket_vis +# Mac has some issue with high bandwidth UDP, so we use pSHMTransport for color_image +# actually we can use pSHMTransport for all platforms, and for all streams +# TODO need a global transport toggle on blueprints/global config +mac_transports: dict[tuple[str, type], pSHMTransport[Image]] = { + ("color_image", Image): pSHMTransport( + "color_image", default_capacity=DEFAULT_CAPACITY_COLOR_IMAGE + ), +} + +base = autoconnect() if platform.system() == "Linux" else autoconnect().transports(mac_transports) + + +rerun_config = { + # any pubsub that supports subscribe_all and topic that supports str(topic) + # is acceptable here + "pubsubs": [LCM(autoconf=True)], + # Custom converters for specific rerun entity paths + # Normally all these would be specified in their respectative modules + # Until this is implemented we have central overrides here + # + # This is unsustainable once we move to multi robot etc + "visual_override": { + "world/camera_info": lambda camera_info: camera_info.to_rerun( + image_topic="/world/color_image", + optical_frame="camera_optical", + ), + "world/global_map": lambda grid: grid.to_rerun(voxel_size=0.1), + "world/debug_navigation": lambda grid: grid.to_rerun( + colormap="Accent", + z_offset=0.015, + opacity=0.2, + background="#484981", + ), + }, + # slapping a go2 shaped box on top of tf/base_link + "static": { + "world/tf/base_link": lambda rr: [ + rr.Boxes3D( + half_sizes=[0.35, 0.155, 0.2], + colors=[(0, 255, 127)], + fill_mode="wireframe", + ), + rr.Transform3D(parent_frame="tf#/base_link"), + ] + }, +} + + +match globalconfig.viewer_backend: + case "foxglove": + from dimos.robot.foxglove_bridge import foxglove_bridge + + with_vis = autoconnect( + base, + foxglove_bridge(shm_channels=["/color_image#sensor_msgs.Image"]), + ) + case "rerun": + from dimos.visualization.rerun.bridge import rerun_bridge + + with_vis = autoconnect(base, rerun_bridge(**rerun_config)) + case "rerun-web": + from dimos.visualization.rerun.bridge import rerun_bridge + + with_vis = autoconnect(base, rerun_bridge(viewer_mode="web", **rerun_config)) + case _: + with_vis = base + + unitree_go2_basic = autoconnect( - base_blueprint(), + with_vis, go2_connection(), websocket_vis(), ).global_config(n_dask_workers=4, robot_model="unitree_go2") diff --git a/dimos/visualization/rerun/bridge.py b/dimos/visualization/rerun/bridge.py index 3d16270d8..0a6ed6fe4 100644 --- a/dimos/visualization/rerun/bridge.py +++ b/dimos/visualization/rerun/bridge.py @@ -85,11 +85,15 @@ if TYPE_CHECKING: from collections.abc import Callable + from types import ModuleType from rerun._baseclasses import Archetype + from rerun.blueprint import Blueprint from dimos.protocol.pubsub.spec import SubscribeAllCapable +BlueprintFactory: TypeAlias = "Callable[[ModuleType], Blueprint]" + # to_rerun() can return a single archetype or a list of (entity_path, archetype) tuples RerunMulti: TypeAlias = "list[tuple[str, Archetype]]" RerunData: TypeAlias = "Archetype | RerunMulti" @@ -119,6 +123,21 @@ def to_rerun(self) -> RerunData: ... ViewerMode = Literal["native", "web", "none"] +def _default_blueprint(rrb: ModuleType) -> Blueprint: + """Default blueprint with black background and raised grid.""" + import rerun as rr + + return rrb.Blueprint( + rrb.Spatial3DView( + origin="world", + background=rrb.Background(kind="SolidColor", color=[0, 0, 0]), + line_grid=rrb.LineGrid3D( + plane=rr.components.Plane3D.XY.with_distance(0.2), + ), + ), + ) + + @dataclass class Config(ModuleConfig): """Configuration for RerunBridgeModule.""" @@ -137,6 +156,10 @@ class Config(ModuleConfig): viewer_mode: ViewerMode = "native" memory_limit: str = "25%" + # Blueprint factory: callable(rrb) -> Blueprint for viewer layout configuration + # Set to None to disable default blueprint + blueprint: BlueprintFactory | None = _default_blueprint + class RerunBridgeModule(Module): """Bridge that logs messages from pubsubs to Rerun. @@ -230,13 +253,19 @@ def start(self) -> None: super().start() # Initialize and spawn Rerun viewer - rr.init("dimos-bridge") + rr.init("dimos") if self.config.viewer_mode == "native": rr.spawn(connect=True, memory_limit=self.config.memory_limit) elif self.config.viewer_mode == "web": - rr.serve_web_viewer(open_browser=True) + server_uri = rr.serve_grpc() + rr.serve_web_viewer(connect_to=server_uri, open_browser=False) # "none" - just init, no viewer (connect externally) + if self.config.blueprint: + import rerun.blueprint as rrb + + rr.send_blueprint(self.config.blueprint(rrb)) + # Start pubsubs and subscribe to all messages for pubsub in self.config.pubsubs: logger.info(f"bridge listening on {pubsub.__class__.__name__}") diff --git a/dimos/web/websocket_vis/websocket_vis_module.py b/dimos/web/websocket_vis/websocket_vis_module.py index 3ae3d5c4f..59f6a8e71 100644 --- a/dimos/web/websocket_vis/websocket_vis_module.py +++ b/dimos/web/websocket_vis/websocket_vis_module.py @@ -46,7 +46,7 @@ ) from dimos.core import In, Module, Out, rpc -from dimos.core.global_config import GlobalConfig +from dimos.core.global_config import GlobalConfig, globalconfig from dimos.mapping.occupancy.gradient import gradient from dimos.mapping.occupancy.inflation import simple_inflate from dimos.mapping.types import LatLon @@ -108,7 +108,7 @@ def __init__( global_config: Optional global config for viewer backend settings """ super().__init__(**kwargs) - self._global_config = global_config or GlobalConfig() + self._global_config = global_config or globalconfig self.port = port self._uvicorn_server_thread: threading.Thread | None = None From f4f8865b5592bfaad18aa652dd9238821ca3ba26 Mon Sep 17 00:00:00 2001 From: Ivan Nikolic Date: Thu, 5 Feb 2026 19:26:16 +0800 Subject: [PATCH 35/36] deleted base_blueprints --- dimos/core/base_blueprints.py | 59 ----------------------------------- 1 file changed, 59 deletions(-) delete mode 100644 dimos/core/base_blueprints.py diff --git a/dimos/core/base_blueprints.py b/dimos/core/base_blueprints.py deleted file mode 100644 index d666ede03..000000000 --- a/dimos/core/base_blueprints.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright 2026 Dimensional Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import platform - -from dimos.constants import DEFAULT_CAPACITY_COLOR_IMAGE -from dimos.core.blueprints import Blueprint, autoconnect -from dimos.core.transport import ( - pSHMTransport, -) -from dimos.msgs.sensor_msgs import Image -from dimos.robot.foxglove_bridge import foxglove_bridge -from dimos.visualization.rerun.bridge import rerun_bridge - - -def base_blueprint() -> Blueprint: - from dimos.core.global_config import globalconfig - - _config = globalconfig - - # Mac has some issue with high bandwidth UDP, so we use pSHMTransport for color_image - # actually we can use pSHMTransport for all platforms, and for all streams - # TODO need a global transport toggle on blueprints/global config - mac_transports: dict[tuple[str, type], pSHMTransport[Image]] = { - ("color_image", Image): pSHMTransport( - "color_image", default_capacity=DEFAULT_CAPACITY_COLOR_IMAGE - ), - } - - base = ( - autoconnect() if platform.system() == "Linux" else autoconnect().transports(mac_transports) - ) - - with_vis = None - - if _config.viewer_backend == "foxglove": - with_vis = autoconnect( - base, - foxglove_bridge(shm_channels=["/color_image#sensor_msgs.Image"]), - ) - elif _config.viewer_backend == "rerun": - with_vis = autoconnect(base, rerun_bridge()) - elif _config.viewer_backend == "rerun-web": - with_vis = autoconnect(base, rerun_bridge(viewer_mode="web")) - else: - with_vis = base - - return with_vis From ac110f66cd99859bbb514a7c930a6529580a2b65 Mon Sep 17 00:00:00 2001 From: Ivan Nikolic Date: Thu, 5 Feb 2026 19:36:47 +0800 Subject: [PATCH 36/36] type fixes --- dimos/manipulation/planning/world/drake_world.py | 2 +- dimos/visualization/rerun/bridge.py | 12 +++++------- 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/dimos/manipulation/planning/world/drake_world.py b/dimos/manipulation/planning/world/drake_world.py index 60cae3d99..533919634 100644 --- a/dimos/manipulation/planning/world/drake_world.py +++ b/dimos/manipulation/planning/world/drake_world.py @@ -806,7 +806,7 @@ def get_link_pose( X_WL = self._plant.EvalBodyPoseInWorld(plant_ctx, body) result = X_WL.GetAsMatrix4() - return result # type: ignore[no-any-return] + return result # type: ignore[no-any-return, return-value] def get_jacobian(self, ctx: Context, robot_id: WorldRobotID) -> NDArray[np.float64]: """Get geometric Jacobian (6 x n_joints). diff --git a/dimos/visualization/rerun/bridge.py b/dimos/visualization/rerun/bridge.py index 0a6ed6fe4..82706bced 100644 --- a/dimos/visualization/rerun/bridge.py +++ b/dimos/visualization/rerun/bridge.py @@ -85,14 +85,13 @@ if TYPE_CHECKING: from collections.abc import Callable - from types import ModuleType from rerun._baseclasses import Archetype from rerun.blueprint import Blueprint from dimos.protocol.pubsub.spec import SubscribeAllCapable -BlueprintFactory: TypeAlias = "Callable[[ModuleType], Blueprint]" +BlueprintFactory: TypeAlias = "Callable[[], Blueprint]" # to_rerun() can return a single archetype or a list of (entity_path, archetype) tuples RerunMulti: TypeAlias = "list[tuple[str, Archetype]]" @@ -123,11 +122,12 @@ def to_rerun(self) -> RerunData: ... ViewerMode = Literal["native", "web", "none"] -def _default_blueprint(rrb: ModuleType) -> Blueprint: +def _default_blueprint() -> Blueprint: """Default blueprint with black background and raised grid.""" import rerun as rr + import rerun.blueprint as rrb - return rrb.Blueprint( + return rrb.Blueprint( # type: ignore[no-any-return] rrb.Spatial3DView( origin="world", background=rrb.Background(kind="SolidColor", color=[0, 0, 0]), @@ -262,9 +262,7 @@ def start(self) -> None: # "none" - just init, no viewer (connect externally) if self.config.blueprint: - import rerun.blueprint as rrb - - rr.send_blueprint(self.config.blueprint(rrb)) + rr.send_blueprint(self.config.blueprint()) # Start pubsubs and subscribe to all messages for pubsub in self.config.pubsubs: