diff --git a/config/test-realsense-D400.json b/config/test-realsense-D400.json index 867d9e2b6..c793b7b88 100644 --- a/config/test-realsense-D400.json +++ b/config/test-realsense-D400.json @@ -10,7 +10,9 @@ "depth_rgb": true, "depth_resolution": [640, 360], "rgb_resolution": [640, 360], - "depth_fps": 30 + "depth_fps": 30, + "rgbd": true, + "img_format": "*.jpeg" } } }, diff --git a/config/test-rs-multicam.json b/config/test-rs-multicam.json new file mode 100644 index 000000000..7016bfb83 --- /dev/null +++ b/config/test-rs-multicam.json @@ -0,0 +1,24 @@ +{ + "version": 2, + "robot": { + "modules": { + "app": { + "driver": "realsense_multicam", + "init": { + "devices": [{"type": "D400", "serial_number": "920312071702", "name": "D435", + "depth_resolution": [640, 360], "rgb_resolution": [640, 360], "depth_fps": 30}, + + {"type": "D400", "serial_number": "934222071502", "name": "D435_B", + "depth_resolution": [1280, 720], "rgb_resolution": [1280, 720], "depth_fps": 6} + ], + "depth_subsample": 3, + "depth_rgb": true, + "depth_infra": true, + "rgbd": false, + "img_format": "*.png" + } + } + }, + "links": [] + } +} diff --git a/osgar/drivers/realsense.py b/osgar/drivers/realsense.py index 2def64135..895d2b2bd 100644 --- a/osgar/drivers/realsense.py +++ b/osgar/drivers/realsense.py @@ -26,6 +26,7 @@ from osgar.node import Node from osgar.lib import quaternion +from osgar.lib.serialize import serialize # https://github.com/IntelRealSense/librealsense/blob/master/doc/t265.md#sensor-origin-and-coordinate-system @@ -58,7 +59,6 @@ def __init__(self, config, bus): self.tracking_sensor_yaw_quat = euler_to_quaternion( math.radians(config.get('tracking_sensor_yaw', 0)), 0, 0) elif self.device in ['D400', 'L500']: - bus.register('depth:gz', 'color', 'infra') self.depth_subsample = config.get("depth_subsample", 3) self.depth_rgb = config.get("depth_rgb", False) self.depth_infra = config.get("depth_infra", False) @@ -66,10 +66,18 @@ def __init__(self, config, bus): self.default_rgb_resolution = config.get("rgb_resolution", [640, 360]) self.depth_fps = config.get("depth_fps", 30) self.disable_emitor = config.get("disable_emitor", False) + camera_pose = config.get("camera_pose", [[0, 0, 0], [0, 0, 0, 1]]) + self.img_format = config.get("img_format", "*.jpeg") if self.depth_rgb or self.depth_infra: import cv2 global cv2 + + self.rgbd = config.get("rgbd", False) + if self.rgbd: + assert self.depth_rgb, self.depth_rgb + self.rgbd_bundle = {"camera_pose": camera_pose} # TODO add meta data + bus.register('depth:gz', 'color', 'infra', 'rgbd_raw:gz') else: g_logger.warning("Device is not specified in the config!") @@ -116,7 +124,12 @@ def pose_callback(self, frame): def depth_callback(self, frameset): try: assert frameset.is_frameset() - depth_frame = frameset.as_frameset().get_depth_frame() + if self.rgbd: + frameset_as = self.align.process(frameset.as_frameset()) + else: + frameset_as = frameset.as_frameset() + depth_frame = frameset_as.get_depth_frame() + n = depth_frame.get_frame_number() if n % self.depth_subsample != 0: return @@ -124,25 +137,29 @@ def depth_callback(self, frameset): depth_image = np.asanyarray(depth_frame.as_depth_frame().get_data()) if self.depth_rgb: - color_frame = frameset.as_frameset().get_color_frame() + color_frame = frameset_as.get_color_frame() assert color_frame.is_video_frame() color_image = np.asanyarray(color_frame.as_video_frame().get_data()) - __, data = cv2.imencode('*.jpeg', color_image) + __, data = cv2.imencode(self.img_format, color_image) if self.depth_infra: - infra_frame = frameset.as_frameset().get_infrared_frame() + infra_frame = frameset_as.get_infrared_frame() assert infra_frame.is_video_frame() infra_image = np.asanyarray(infra_frame.as_video_frame().get_data()) - __, infra_data = cv2.imencode('*.jpeg', infra_image) + __, infra_data = cv2.imencode(self.img_format, infra_image) except Exception as e: print(e) self.finished.set() return - self.publish('depth', depth_image) - if self.depth_rgb: - self.publish('color', data.tobytes()) + if self.rgbd: + self.rgbd_bundle["data"] = [data.tobytes(), serialize(depth_image)] + self.publish('rgbd_raw', self.rgbd_bundle) + else: + self.publish('depth', depth_image) + if self.depth_rgb: + self.publish('color', data.tobytes()) if self.depth_infra: self.publish('infra', infra_data.tobytes()) @@ -154,6 +171,8 @@ def start(self): info_msg = "Enabling streams: depth" if self.depth_rgb: info_msg += ", depth_rgb" + if self.rgbd: + info_msg += " (rgbd allowed)" if self.depth_infra: info_msg += ", depth_infra" g_logger.info(info_msg) @@ -167,6 +186,9 @@ def start(self): if self.depth_rgb: w, h = self.default_rgb_resolution depth_cfg.enable_stream(rs.stream.color, w, h, rs.format.bgr8, self.depth_fps) + if self.rgbd: + align_to = rs.stream.color + self.align = rs.align(align_to) if self.depth_infra: w, h = self.default_depth_resolution depth_cfg.enable_stream(rs.stream.infrared, w, h, rs.format.y8, self.depth_fps) @@ -230,12 +252,14 @@ def __init__(self, config, bus): self.depth_subsample = config.get('depth_subsample', 3) self.depth_rgb = config.get('depth_rgb', False) self.depth_infra = config.get('depth_infra', False) - self.default_depth_resolution = config.get('depth_resolution', [640, 360]) - self.default_rgb_resolution = config.get('rgb_resolution', [640, 360]) - self.depth_fps = config.get('depth_fps', 30) self.pose_subsample = config.get('pose_subsample', 20) self.tracking_sensor_pitch_quat_inv = {} self.tracking_sensor_yaw_quat = {} + self.camera_pose = config.get("camera_pose", [[0, 0, 0], [0, 0, 0, 1]]) + self.img_format = config.get("img_format", "*.jpeg") + self.rgbd = config.get("rgbd", False) + if self.rgbd: + assert self.depth_rgb, self.depth_rgb streams = [] for device in self.devices: @@ -254,7 +278,11 @@ def __init__(self, config, bus): self.tracking_sensor_yaw_quat[name] = euler_to_quaternion( math.radians(device.get('yaw', 0)), 0, 0) elif device_type in ['D400', 'L500']: - for stream in ['depth:gz', 'color', 'infra']: + if self.rgbd: + streams_to_register = ['rgbd_raw:gz', 'infra'] + else: + streams_to_register = ['depth:gz', 'color', 'infra'] + for stream in streams_to_register: streams.append(f'{name}_{stream}') if self.depth_rgb or self.depth_infra: @@ -315,8 +343,13 @@ def pose_callback(self, frame): def depth_callback(self, frameset): try: assert frameset.is_frameset() - depth_frame = frameset.as_frameset().get_depth_frame() - profile = depth_frame.get_profile() + if self.rgbd: + frameset_as = self.align.process(frameset.as_frameset()) + else: + frameset_as = frameset.as_frameset() + depth_frame = frameset_as.get_depth_frame() + + profile = frameset.get_profile() with self.origins_lock: try: name = self.origins[profile.unique_id()] @@ -330,25 +363,29 @@ def depth_callback(self, frameset): depth_image = np.asanyarray(depth_frame.as_depth_frame().get_data()) if self.depth_rgb: - color_frame = frameset.as_frameset().get_color_frame() + color_frame = frameset_as.get_color_frame() assert color_frame.is_video_frame() color_image = np.asanyarray(color_frame.as_video_frame().get_data()) - __, data = cv2.imencode('*.jpeg', color_image) + __, data = cv2.imencode(self.img_format, color_image) if self.depth_infra: - infra_frame = frameset.as_frameset().get_infrared_frame() + infra_frame = frameset_as.get_infrared_frame() assert infra_frame.is_video_frame() infra_image = np.asanyarray(infra_frame.as_video_frame().get_data()) - __, infra_data = cv2.imencode('*.jpeg', infra_image) + __, infra_data = cv2.imencode(self.img_format, infra_image) except Exception as e: print(e) self.finished.set() return - self.publish(f'{name}_depth', depth_image) - if self.depth_rgb: - self.publish(f'{name}_color', data.tobytes()) + if self.rgbd: + self.publish(f'{name}_rgbd_raw', + [None, self.camera_pose, data.tobytes(), serialize(depth_image)]) # None for robot pose + else: + self.publish(f'{name}_depth', depth_image) + if self.depth_rgb: + self.publish(f'{name}_color', data.tobytes()) if self.depth_infra: self.publish(f'{name}_infra', infra_data.tobytes()) @@ -368,6 +405,8 @@ def start(self): info_msg = f'Enabling streams: {name}_depth' if self.depth_rgb: info_msg += f', {name}_color' + if self.rgbd: + info_msg += " (rgbd allowed)" if self.depth_infra: info_msg += f', {name}_infra' g_logger.info(info_msg) @@ -375,14 +414,18 @@ def start(self): depth_pipeline = rs.pipeline(ctx) depth_cfg = rs.config() depth_cfg.enable_device(serial_number) - w, h = self.default_depth_resolution - depth_cfg.enable_stream(rs.stream.depth, w, h, rs.format.z16, self.depth_fps) + w, h = device.get('depth_resolution', [640, 360]) + depth_fps = device.get('depth_fps', 30) + depth_cfg.enable_stream(rs.stream.depth, w, h, rs.format.z16, depth_fps) if self.depth_rgb: - w, h = self.default_rgb_resolution - depth_cfg.enable_stream(rs.stream.color, w, h, rs.format.bgr8, self.depth_fps) + w, h = device.get('rgb_resolution', [640, 360]) + depth_cfg.enable_stream(rs.stream.color, w, h, rs.format.bgr8, depth_fps) + if self.rgbd: + align_to = rs.stream.color + self.align = rs.align(align_to) if self.depth_infra: - w, h = self.default_depth_resolution - depth_cfg.enable_stream(rs.stream.infrared, w, h, rs.format.y8, self.depth_fps) + w, h = device.get('depth_resolution', [640, 360]) + depth_cfg.enable_stream(rs.stream.infrared, w, h, rs.format.y8, depth_fps) if depth_cfg.can_resolve(depth_pipeline): self.pipelines.append(depth_pipeline) configs.append(depth_cfg) diff --git a/osgar/tools/lidarview.py b/osgar/tools/lidarview.py index 681210889..95058b0ac 100644 --- a/osgar/tools/lidarview.py +++ b/osgar/tools/lidarview.py @@ -221,9 +221,9 @@ def get_image(data): image = pygame.image.frombuffer(im_color.tostring(), im_color.shape[1::-1], "RGB") g_depth = data elif isinstance(data, tuple): - img_data, depth_data = data + img_data, depth_data, dist_scale = data image = pygame.image.load(io.BytesIO(img_data), 'JPG').convert() - g_depth = decompress_depth(depth_data) + g_depth = decompress_depth(depth_data)*dist_scale elif isinstance(data, list): # image stereo artefact localization # expects localized pair of images [camera_name, [robot_pose, camera_pose, image], [robot_pose, camera_pose, image]] @@ -398,8 +398,14 @@ def _step(self, direction): elif stream_id == self.camera2_id: self.image2, _ = get_image(deserialize(data)) elif stream_id == self.rgbd_id: - _, _, img_data, depth_data = deserialize(data) - self.image, self.image2 = get_image((img_data, depth_data)) + rgbd_data = deserialize(data) + if isinstance(rgbd_data, dict): + img_data, depth_data = rgbd_data["data"] + dist_scale = 0.001 # TODO read from bundle stream ? + self.image, self.image2 = get_image((img_data, depth_data, dist_scale)) + else: + _, _, img_data, depth_data = rgbd_data + self.image, self.image2 = get_image((img_data, depth_data, 1)) if self.lidar_id is None: keyframe = self.keyframe self.keyframe = False