Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Added Visualization Lables and static publishers for images #211

Open
wants to merge 16 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 7 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@
# Reference code Boston Dynamics Spot-sdk: https://github.com/boston-dynamics/spot-sdk/tree/master/python/examples/web_cam_image_service

import logging
import os
import signal
import time
from typing import Any, List

Expand Down Expand Up @@ -211,17 +213,24 @@ def blocking_capture(self):
)
pass
else:
return (
IntelRealSenseCameraInterface.DEPTH_FRAME,
IntelRealSenseCameraInterface.CAPTURE_TIME,
)
try:
return (
IntelRealSenseCameraInterface.DEPTH_FRAME,
IntelRealSenseCameraInterface.CAPTURE_TIME,
)
except Exception as e:
_LOGGER.warning(
f"Unable to display the IntelRealSense images captured.{str(e)}"
)
os.kill(os.getpid(), signal.SIGINT) # Kill the entire code
# pass
Achuthankrishna marked this conversation as resolved.
Show resolved Hide resolved

return color_image, capture_time
except Exception as e:
print(
f"Unsuccessful in getting frames from self.pipeline.wait_for_frames() due to {str(e)}"
)
exit(0)
os.kill(os.getpid(), signal.SIGINT) # Kill the entire code

def image_decode(self, image_data, image_proto, image_req):
pixel_format = image_req.pixel_format
Expand Down
5 changes: 4 additions & 1 deletion spot_rl_experiments/configs/ros_topic_names.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,10 @@ HEAD_DEPTH: "/raw_head_depth"
HAND_DEPTH: "/raw_hand_depth"
HAND_DEPTH_UNSCALED: "/raw_hand_depth_unscaled"
HAND_RGB: "/hand_rgb"

IRS_RGB: "/irs_rgb"
IRS_DEPTH: "/irs_depth"
GRIPPER_RGB: "/gripper_rgb"
GRIPPER_DEPTH: "/gripper_depth"
COMPRESSED_IMAGES: "/compressed_images"

FILTERED_HEAD_DEPTH: "/filtered_head_depth"
Expand Down
4 changes: 3 additions & 1 deletion spot_rl_experiments/experiments/skill_test/test_sem_place.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,10 @@
contnue = True
while contnue:
rospy.set_param("is_gripper_blocked", 0)
spotskillmanager.place(place_target, is_local=is_local, visualize=True)
spotskillmanager.place(place_target, is_local=is_local, visualize=False)
Achuthankrishna marked this conversation as resolved.
Show resolved Hide resolved
contnue = map_user_input_to_boolean("Do you want to do it again ? Y/N ")
if not contnue:
spotskillmanager.spot.sit()

# The following is a helpful tip to debug the arm
# We get Spot class
Expand Down
6 changes: 6 additions & 0 deletions spot_rl_experiments/spot_rl/envs/base_env.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,8 @@
except Exception:
pass

import signal

from sensor_msgs.msg import Image
from spot_rl.utils.gripper_t_intel_path import GRIPPER_T_INTEL_PATH
from spot_rl.utils.pose_estimation import pose_estimation
Expand Down Expand Up @@ -698,8 +700,12 @@ def get_gripper_images(self, save_image=False):
self.detections_buffer["filtered_depth"][self.detection_timestamp],
)
arm_depth = self.msg_to_cv2(filtered_hand_depth, "mono8")
if np.all(arm_depth == 0):
Achuthankrishna marked this conversation as resolved.
Show resolved Hide resolved
os.kill(os.getpid(), signal.SIGKILL)
else:
arm_depth = self.msg_to_cv2(self.filtered_hand_depth, "mono8")
if np.all(arm_depth == 0):
os.kill(os.getpid(), signal.SIGKILL)

# Crop out black vertical bars on the left and right edges of aligned depth img
arm_depth = arm_depth[:, LEFT_CROP:-RIGHT_CROP]
Expand Down
108 changes: 102 additions & 6 deletions spot_rl_experiments/spot_rl/ros_img_vis.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,12 +17,13 @@
from spot_rl.utils.utils import ros_topics as rt
from spot_wrapper.utils import resize_to_tallest

RAW_IMG_TOPICS = [rt.HEAD_DEPTH, rt.HAND_DEPTH, rt.HAND_RGB]
RAW_IMG_TOPICS = [rt.HEAD_DEPTH, rt.GRIPPER_DEPTH, rt.GRIPPER_RGB, rt.IRS_RGB]

PROCESSED_IMG_TOPICS = [
rt.FILTERED_HEAD_DEPTH,
rt.FILTERED_HAND_DEPTH,
rt.MASK_RCNN_VIZ_TOPIC,
rt.IRS_DEPTH,
]

FOUR_CC = cv2.VideoWriter_fourcc(*"MP4V")
Expand Down Expand Up @@ -165,11 +166,21 @@ class SpotRosVisualizer(VisualizerMixin, SpotRobotSubscriberMixin):
no_raw = False
proprioception = False

# Define a timeout duration (in seconds)
Achuthankrishna marked this conversation as resolved.
Show resolved Hide resolved
TIMEOUT_DURATION = 5

def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.last_seen = {topic: time.time() for topic in self.msgs.keys()}
self.fps = {topic: deque(maxlen=10) for topic in self.msgs.keys()}

# Checking for empty image to make an overlay text
def is_empty_image(self, img):
"""Determine if an image is empty or has no meaningful data"""
if np.all(img == 0):
Achuthankrishna marked this conversation as resolved.
Show resolved Hide resolved
return True
return False

def generate_composite(self):
if not any(self.updated.values()):
# No imgs were refreshed. Skip.
Expand All @@ -182,21 +193,62 @@ def generate_composite(self):
processed_msgs = [self.msgs[i] for i in PROCESSED_IMG_TOPICS]

raw_imgs = [self.msg_to_cv2(i) for i in raw_msgs if i is not None]

# Replace any Nones with black images if raw version exists. We (safely) assume
# here that there is no processed image anyway if the raw image does not exist.
processed_imgs = []
# Handle processed messages and fill with zeros if needed
for idx, raw_msg in enumerate(raw_msgs):
if processed_msgs[idx] is not None:
processed_imgs.append(self.msg_to_cv2(processed_msgs[idx]))
elif processed_msgs[idx] is None and raw_msg is not None:
else:
processed_imgs.append(np.zeros_like(raw_imgs[idx]))

# Crop gripper images
# Crop and process images as needed
if raw_msgs[1] is not None:
for imgs in [raw_imgs, processed_imgs]:
imgs[1] = imgs[1][:, 124:-60]

# Resizing and lowering the contrast in depth image
raw_imgs[1] = cv2.convertScaleAbs(raw_imgs[1], alpha=0.03)
processed_imgs[2] = cv2.resize(processed_imgs[2], (640, 480))
processed_imgs[3] = cv2.convertScaleAbs(processed_imgs[3], alpha=0.03)

# Check for topic in list and call is_empty_image() to check whether the image is empty and then overlay text.
for topic in RAW_IMG_TOPICS:
Achuthankrishna marked this conversation as resolved.
Show resolved Hide resolved
if topic in RAW_IMG_TOPICS:
idx = RAW_IMG_TOPICS.index(topic)
if idx < len(raw_imgs):
if self.is_empty_image(raw_imgs[idx]):
print(f"Image for topic {topic} is empty or disconnected.")
raw_imgs[idx] = self.overlay_text(
raw_imgs[idx],
"DISCONNECTED",
color=(255, 0, 0),
size=2.0,
thickness=4,
)
for topic in PROCESSED_IMG_TOPICS:
Achuthankrishna marked this conversation as resolved.
Show resolved Hide resolved
if topic in PROCESSED_IMG_TOPICS:
idx = PROCESSED_IMG_TOPICS.index(topic)
if idx < len(processed_imgs):
if self.is_empty_image(processed_imgs[idx]):
print(f"Image for topic {topic} is empty or disconnected.")
processed_imgs[idx] = self.overlay_text(
processed_imgs[idx],
"DISCONNECTED",
color=(255, 0, 0),
size=2.0,
thickness=4,
)

# Overlay topic text
raw_imgs = [
self.overlay_topic_text(img, topic)
for img, topic in zip(raw_imgs, RAW_IMG_TOPICS)
]
processed_imgs = [
self.overlay_topic_text(img, topic)
for img, topic in zip(processed_imgs, PROCESSED_IMG_TOPICS)
]

img = np.vstack(
[
resize_to_tallest(bgrify_grayscale_imgs(i), hstack=True)
Expand Down Expand Up @@ -235,6 +287,50 @@ def generate_composite(self):

return img

@staticmethod
# Method to obtain image, add a white strip on top of the image by resizing it and putting text on that white strip
def overlay_topic_text(
img,
topic,
box_color=(0, 0, 0),
text_color=(0, 0, 0),
font_size=1.3,
thickness=2,
):
# Original image dimensions
topic = topic.replace("_", " ").replace("/", "")

og_height, og_width = img.shape[:2]

strip_height = 50
if len(img.shape) == 3:
white_strip = 255 * np.ones((strip_height, og_width, 3), dtype=np.uint8)
else:
white_strip = 255 * np.ones((strip_height, og_width), dtype=np.uint8)

# Resize the original image height by adding the white strip height
viz_img = np.vstack((white_strip, img))

font = cv2.FONT_HERSHEY_SIMPLEX
text = f"{topic}"
(text_width, text_height), _ = cv2.getTextSize(text, font, font_size, thickness)

margin = 50
text_x = margin
text_y = strip_height - margin + text_height
cv2.putText(
viz_img,
text,
(text_x, text_y),
font,
font_size,
text_color,
thickness,
cv2.LINE_AA,
)

return viz_img

Achuthankrishna marked this conversation as resolved.
Show resolved Hide resolved

def bgrify_grayscale_imgs(imgs):
return [
Expand Down
Loading