This commit is contained in:
Cyrille Nofficial 2022-08-02 12:27:48 +02:00
parent 2c9c7d9078
commit 32eb2f618f

View File

@ -6,9 +6,16 @@ import events.events_pb2
import depthai as dai import depthai as dai
import cv2 import cv2
import numpy as np
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
# Closer-in minimum depth, disparity range is doubled (from 95 to 190):
extended_disparity = False
# Better accuracy for longer distance, fractional disparity 32-levels:
subpixel = True
# Better handling for occlusions:
lr_check = True
class FramePublisher: class FramePublisher:
def __init__(self, mqtt_client: mqtt.Client, frame_topic: str, img_width: int, img_height: int): def __init__(self, mqtt_client: mqtt.Client, frame_topic: str, img_width: int, img_height: int):
@ -16,6 +23,7 @@ class FramePublisher:
self._frame_topic = frame_topic self._frame_topic = frame_topic
self._img_width = img_width self._img_width = img_width
self._img_height = img_height self._img_height = img_height
self._depth = None
self._pipeline = self._configure_pipeline() self._pipeline = self._configure_pipeline()
def _configure_pipeline(self) -> dai.Pipeline: def _configure_pipeline(self) -> dai.Pipeline:
@ -27,6 +35,45 @@ class FramePublisher:
xout_rgb.setStreamName("rgb") xout_rgb.setStreamName("rgb")
monoLeft = pipeline.create(dai.node.MonoCamera)
monoRight = pipeline.create(dai.node.MonoCamera)
depth = pipeline.create(dai.node.StereoDepth)
xout = pipeline.create(dai.node.XLinkOut)
self._depth = depth
xout.setStreamName("disparity")
# Properties
monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT)
monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT)
# Create a node that will produce the depth map (using disparity output as it's easier to visualize depth this way)
depth.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY)
# Options: MEDIAN_OFF, KERNEL_3x3, KERNEL_5x5, KERNEL_7x7 (default)
depth.initialConfig.setMedianFilter(dai.MedianFilter.KERNEL_7x7)
depth.setLeftRightCheck(lr_check)
depth.setExtendedDisparity(extended_disparity)
depth.setSubpixel(subpixel)
config = depth.initialConfig.get()
config.postProcessing.speckleFilter.enable = True
config.postProcessing.speckleFilter.speckleRange = 50
config.postProcessing.temporalFilter.enable = False
config.postProcessing.spatialFilter.enable = False
config.postProcessing.spatialFilter.holeFillingRadius = 2
config.postProcessing.spatialFilter.numIterations = 1
#config.postProcessing.thresholdFilter.minRange = 400
#config.postProcessing.thresholdFilter.maxRange = 15000
config.postProcessing.decimationFilter.decimationFactor = 2
depth.initialConfig.set(config)
# Linking
monoLeft.out.link(depth.left)
monoRight.out.link(depth.right)
depth.disparity.link(xout.input)
# Properties # Properties
cam_rgb.setBoardSocket(dai.CameraBoardSocket.RGB) cam_rgb.setBoardSocket(dai.CameraBoardSocket.RGB)
cam_rgb.setPreviewSize(width=self._img_width, height=self._img_height) cam_rgb.setPreviewSize(width=self._img_width, height=self._img_height)
@ -53,12 +100,22 @@ class FramePublisher:
queue_size = 4 queue_size = 4
q_rgb = device.getOutputQueue("rgb", maxSize=queue_size, blocking=False) q_rgb = device.getOutputQueue("rgb", maxSize=queue_size, blocking=False)
# Output queue will be used to get the disparity frames from the outputs defined above
q_disparity = device.getOutputQueue(name="disparity", maxSize=4, blocking=False)
while True: while True:
try: try:
logger.debug("wait for new frame") logger.debug("wait for new frame")
inRgb = q_rgb.get() # blocking call, will wait until a new data has arrived inRgb = q_rgb.get() # blocking call, will wait until a new data has arrived
inDisparity = q_disparity.get()
# im_resize = inRgb.getCvFrame()
im_resize = inDisparity.getCvFrame()
im_resize = inRgb.getCvFrame() # Normalization for better visualization
im_resize = (im_resize * (255 / self._depth.initialConfig.getMaxDisparity())).astype(np.uint8)
# Available color maps: https://docs.opencv.org/3.4/d3/d50/group__imgproc__colormap.html
# im_resize = cv2.applyColorMap(im_resize, cv2.COLORMAP_JET)
is_success, im_buf_arr = cv2.imencode(".jpg", im_resize) is_success, im_buf_arr = cv2.imencode(".jpg", im_resize)
byte_im = im_buf_arr.tobytes() byte_im = im_buf_arr.tobytes()