Compare commits
	
		
			2 Commits
		
	
	
		
			feat/objec
			...
			feat/depth
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| 32eb2f618f | |||
| 2c9c7d9078 | 
							
								
								
									
										12
									
								
								Dockerfile
									
									
									
									
									
								
							
							
						
						
									
										12
									
								
								Dockerfile
									
									
									
									
									
								
							@@ -1,12 +1,3 @@
 | 
			
		||||
FROM docker.io/library/python:3.9-slim AS model
 | 
			
		||||
 | 
			
		||||
RUN python3 -m pip install blobconverter
 | 
			
		||||
 | 
			
		||||
RUN mkdir -p /models
 | 
			
		||||
 | 
			
		||||
RUN blobconverter --zoo-name mobile_object_localizer_192x192 --zoo-type depthai --shaves 6 --version 2021.4 --output-dir /models || echo ""
 | 
			
		||||
RUN ls /models
 | 
			
		||||
#######
 | 
			
		||||
FROM docker.io/library/python:3.9-slim
 | 
			
		||||
 | 
			
		||||
# Configure piwheels repo to use pre-compiled numpy wheels for arm
 | 
			
		||||
@@ -16,9 +7,6 @@ RUN apt-get update && apt-get install -y libgl1 libglib2.0-0
 | 
			
		||||
 | 
			
		||||
RUN pip3 install numpy
 | 
			
		||||
 | 
			
		||||
RUN mkdir /models
 | 
			
		||||
 | 
			
		||||
COPY --from=model /models/mobile_object_localizer_192x192_openvino_2021.4_6shave.blob /models/mobile_object_localizer_192x192_openvino_2021.4_6shave.blob
 | 
			
		||||
ADD requirements.txt requirements.txt
 | 
			
		||||
 | 
			
		||||
RUN pip3 install -r requirements.txt
 | 
			
		||||
 
 | 
			
		||||
@@ -2,10 +2,8 @@
 | 
			
		||||
Publish data from oak-lite device
 | 
			
		||||
 | 
			
		||||
Usage: rc-oak-camera [-u USERNAME | --mqtt-username=USERNAME] [--mqtt-password=PASSWORD] [--mqtt-broker=HOSTNAME] \
 | 
			
		||||
    [--mqtt-topic-robocar-oak-camera="TOPIC_CAMERA"] [--mqtt-topic-robocar-objects="TOPIC_OBJECTS"] \
 | 
			
		||||
    [--mqtt-client-id=CLIENT_ID] \
 | 
			
		||||
    [-H IMG_HEIGHT | --image-height=IMG_HEIGHT] [-W IMG_WIDTH | --image-width=IMG_width] \
 | 
			
		||||
    [-t OBJECTS_THRESHOLD | --objects-threshold=OBJECTS_THRESHOLD]
 | 
			
		||||
    [--mqtt-topic-robocar-oak-camera="TOPIC_CAMERA"] [--mqtt-client-id=CLIENT_ID] \
 | 
			
		||||
    [-H IMG_HEIGHT | --image-height=IMG_HEIGHT] [-W IMG_WIDTH | --image-width=IMG_width]
 | 
			
		||||
 | 
			
		||||
Options:
 | 
			
		||||
-h --help                                               Show this screen.
 | 
			
		||||
@@ -14,10 +12,8 @@ Options:
 | 
			
		||||
-b HOSTNAME --mqtt-broker=HOSTNAME                      MQTT broker host
 | 
			
		||||
-C CLIENT_ID --mqtt-client-id=CLIENT_ID                 MQTT client id
 | 
			
		||||
-c TOPIC_CAMERA --mqtt-topic-robocar-oak-camera=TOPIC_CAMERA        MQTT topic where to publish robocar-oak-camera frames
 | 
			
		||||
-o TOPIC_OBJECTS --mqtt-topic-robocar-objects=TOPIC_OBJECTS         MQTT topic where to publish objects detection results
 | 
			
		||||
-H IMG_HEIGHT --image-height=IMG_HEIGHT                 IMG_HEIGHT image height
 | 
			
		||||
-W IMG_WIDTH --image-width=IMG_width                    IMG_WIDTH image width
 | 
			
		||||
-t OBJECTS_THRESHOLD --objects-threshold=OBJECTS_THRESHOLD    OBJECTS_THRESHOLD threshold to filter objects detected
 | 
			
		||||
"""
 | 
			
		||||
import logging
 | 
			
		||||
import os
 | 
			
		||||
@@ -54,13 +50,9 @@ def execute_from_command_line():
 | 
			
		||||
                                                          default_client_id),
 | 
			
		||||
                              )
 | 
			
		||||
    frame_topic = get_default_value(args["--mqtt-topic-robocar-oak-camera"], "MQTT_TOPIC_CAMERA", "/oak/camera_rgb")
 | 
			
		||||
    objects_topic = get_default_value(args["--mqtt-topic-robocar-objects"], "MQTT_TOPIC_OBJECTS", "/objects")
 | 
			
		||||
 | 
			
		||||
    frame_processor = cam.FramePublisher(mqtt_client=client,
 | 
			
		||||
                                         frame_topic=frame_topic,
 | 
			
		||||
                                         objects_topic=objects_topic,
 | 
			
		||||
                                         objects_threshold=float(get_default_value(args["--objects-threshold"],
 | 
			
		||||
                                                                                   "OBJECTS_THRESHOLD", 0.2)),
 | 
			
		||||
                                         img_width=int(get_default_value(args["--image-width"], "IMAGE_WIDTH", 160)),
 | 
			
		||||
                                         img_height=int(get_default_value(args["--image-height"], "IMAGE_HEIGHT", 120)))
 | 
			
		||||
    frame_processor.run()
 | 
			
		||||
 
 | 
			
		||||
@@ -10,49 +10,69 @@ import numpy as np
 | 
			
		||||
 | 
			
		||||
logger = logging.getLogger(__name__)
 | 
			
		||||
 | 
			
		||||
NN_PATH = "/models/mobile_object_localizer_192x192_openvino_2021.4_6shave.blob"
 | 
			
		||||
NN_WIDTH = 192
 | 
			
		||||
NN_HEIGHT = 192
 | 
			
		||||
 | 
			
		||||
# Closer-in minimum depth, disparity range is doubled (from 95 to 190):
 | 
			
		||||
extended_disparity = False
 | 
			
		||||
# Better accuracy for longer distance, fractional disparity 32-levels:
 | 
			
		||||
subpixel = True
 | 
			
		||||
# Better handling for occlusions:
 | 
			
		||||
lr_check = True
 | 
			
		||||
 | 
			
		||||
class FramePublisher:
 | 
			
		||||
    def __init__(self, mqtt_client: mqtt.Client, frame_topic: str, objects_topic: str, objects_threshold: float,
 | 
			
		||||
                 img_width: int, img_height: int):
 | 
			
		||||
    def __init__(self, mqtt_client: mqtt.Client, frame_topic: str, img_width: int, img_height: int):
 | 
			
		||||
        self._mqtt_client = mqtt_client
 | 
			
		||||
        self._frame_topic = frame_topic
 | 
			
		||||
        self._objects_topic = objects_topic
 | 
			
		||||
        self._objects_threshold = objects_threshold
 | 
			
		||||
        self._img_width = img_width
 | 
			
		||||
        self._img_height = img_height
 | 
			
		||||
        self._depth = None
 | 
			
		||||
        self._pipeline = self._configure_pipeline()
 | 
			
		||||
 | 
			
		||||
    def _configure_pipeline(self) -> dai.Pipeline:
 | 
			
		||||
        logger.info("configure pipeline")
 | 
			
		||||
        pipeline = dai.Pipeline()
 | 
			
		||||
 | 
			
		||||
        pipeline.setOpenVINOVersion(version=dai.OpenVINO.VERSION_2021_4)
 | 
			
		||||
 | 
			
		||||
        # Define a neural network that will make predictions based on the source frames
 | 
			
		||||
        detection_nn = pipeline.create(dai.node.NeuralNetwork)
 | 
			
		||||
        detection_nn.setBlobPath(NN_PATH)
 | 
			
		||||
        detection_nn.setNumPoolFrames(4)
 | 
			
		||||
        detection_nn.input.setBlocking(False)
 | 
			
		||||
        detection_nn.setNumInferenceThreads(2)
 | 
			
		||||
 | 
			
		||||
        xout_nn = pipeline.create(dai.node.XLinkOut)
 | 
			
		||||
        xout_nn.setStreamName("nn")
 | 
			
		||||
        xout_nn.input.setBlocking(False)
 | 
			
		||||
 | 
			
		||||
        # Resize image
 | 
			
		||||
        manip = pipeline.create(dai.node.ImageManip)
 | 
			
		||||
        manip.initialConfig.setResize(NN_WIDTH, NN_HEIGHT)
 | 
			
		||||
        manip.initialConfig.setFrameType(dai.ImgFrame.Type.RGB888p)
 | 
			
		||||
        manip.initialConfig.setKeepAspectRatio(False)
 | 
			
		||||
 | 
			
		||||
        cam_rgb = pipeline.create(dai.node.ColorCamera)
 | 
			
		||||
        xout_rgb = pipeline.create(dai.node.XLinkOut)
 | 
			
		||||
 | 
			
		||||
        xout_rgb.setStreamName("rgb")
 | 
			
		||||
 | 
			
		||||
        monoLeft = pipeline.create(dai.node.MonoCamera)
 | 
			
		||||
        monoRight = pipeline.create(dai.node.MonoCamera)
 | 
			
		||||
        depth = pipeline.create(dai.node.StereoDepth)
 | 
			
		||||
        xout = pipeline.create(dai.node.XLinkOut)
 | 
			
		||||
        self._depth = depth
 | 
			
		||||
 | 
			
		||||
        xout.setStreamName("disparity")
 | 
			
		||||
 | 
			
		||||
        # Properties
 | 
			
		||||
        monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
 | 
			
		||||
        monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT)
 | 
			
		||||
        monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
 | 
			
		||||
        monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT)
 | 
			
		||||
 | 
			
		||||
        # Create a node that will produce the depth map (using disparity output as it's easier to visualize depth this way)
 | 
			
		||||
        depth.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY)
 | 
			
		||||
        # Options: MEDIAN_OFF, KERNEL_3x3, KERNEL_5x5, KERNEL_7x7 (default)
 | 
			
		||||
        depth.initialConfig.setMedianFilter(dai.MedianFilter.KERNEL_7x7)
 | 
			
		||||
        depth.setLeftRightCheck(lr_check)
 | 
			
		||||
        depth.setExtendedDisparity(extended_disparity)
 | 
			
		||||
        depth.setSubpixel(subpixel)
 | 
			
		||||
 | 
			
		||||
        config = depth.initialConfig.get()
 | 
			
		||||
        config.postProcessing.speckleFilter.enable = True
 | 
			
		||||
        config.postProcessing.speckleFilter.speckleRange = 50
 | 
			
		||||
        config.postProcessing.temporalFilter.enable = False
 | 
			
		||||
        config.postProcessing.spatialFilter.enable = False
 | 
			
		||||
        config.postProcessing.spatialFilter.holeFillingRadius = 2
 | 
			
		||||
        config.postProcessing.spatialFilter.numIterations = 1
 | 
			
		||||
        #config.postProcessing.thresholdFilter.minRange = 400
 | 
			
		||||
        #config.postProcessing.thresholdFilter.maxRange = 15000
 | 
			
		||||
        config.postProcessing.decimationFilter.decimationFactor = 2
 | 
			
		||||
        depth.initialConfig.set(config)
 | 
			
		||||
 | 
			
		||||
        # Linking
 | 
			
		||||
        monoLeft.out.link(depth.left)
 | 
			
		||||
        monoRight.out.link(depth.right)
 | 
			
		||||
        depth.disparity.link(xout.input)
 | 
			
		||||
 | 
			
		||||
        # Properties
 | 
			
		||||
        cam_rgb.setBoardSocket(dai.CameraBoardSocket.RGB)
 | 
			
		||||
@@ -61,14 +81,8 @@ class FramePublisher:
 | 
			
		||||
        cam_rgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.RGB)
 | 
			
		||||
        cam_rgb.setFps(30)
 | 
			
		||||
 | 
			
		||||
        # Link preview to manip and manip to nn
 | 
			
		||||
        cam_rgb.preview.link(manip.inputImage)
 | 
			
		||||
        manip.out.link(detection_nn.input)
 | 
			
		||||
 | 
			
		||||
        # Linking to output
 | 
			
		||||
        # Linking
 | 
			
		||||
        cam_rgb.preview.link(xout_rgb.input)
 | 
			
		||||
        detection_nn.out.link(xout_nn.input)
 | 
			
		||||
 | 
			
		||||
        logger.info("pipeline configured")
 | 
			
		||||
        return pipeline
 | 
			
		||||
 | 
			
		||||
@@ -84,15 +98,24 @@ class FramePublisher:
 | 
			
		||||
            device.startPipeline()
 | 
			
		||||
            # Queues
 | 
			
		||||
            queue_size = 4
 | 
			
		||||
            q_rgb = device.getOutputQueue(name="rgb", maxSize=queue_size, blocking=False)
 | 
			
		||||
            q_nn = device.getOutputQueue(name="nn", maxSize=queue_size, blocking=False)
 | 
			
		||||
            q_rgb = device.getOutputQueue("rgb", maxSize=queue_size, blocking=False)
 | 
			
		||||
 | 
			
		||||
            # Output queue will be used to get the disparity frames from the outputs defined above
 | 
			
		||||
            q_disparity = device.getOutputQueue(name="disparity", maxSize=4, blocking=False)
 | 
			
		||||
 | 
			
		||||
            while True:
 | 
			
		||||
                try:
 | 
			
		||||
                    logger.debug("wait for new frame")
 | 
			
		||||
                    inRgb = q_rgb.get()  # blocking call, will wait until a new data has arrived
 | 
			
		||||
                    inDisparity = q_disparity.get()
 | 
			
		||||
                    # im_resize = inRgb.getCvFrame()
 | 
			
		||||
                    im_resize = inDisparity.getCvFrame()
 | 
			
		||||
 | 
			
		||||
                    im_resize = inRgb.getCvFrame()
 | 
			
		||||
                    # Normalization for better visualization
 | 
			
		||||
                    im_resize = (im_resize * (255 / self._depth.initialConfig.getMaxDisparity())).astype(np.uint8)
 | 
			
		||||
 | 
			
		||||
                    # Available color maps: https://docs.opencv.org/3.4/d3/d50/group__imgproc__colormap.html
 | 
			
		||||
                    # im_resize = cv2.applyColorMap(im_resize, cv2.COLORMAP_JET)
 | 
			
		||||
 | 
			
		||||
                    is_success, im_buf_arr = cv2.imencode(".jpg", im_resize)
 | 
			
		||||
                    byte_im = im_buf_arr.tobytes()
 | 
			
		||||
@@ -110,42 +133,5 @@ class FramePublisher:
 | 
			
		||||
                                              qos=0,
 | 
			
		||||
                                              retain=False)
 | 
			
		||||
 | 
			
		||||
                    in_nn = q_nn.get()
 | 
			
		||||
 | 
			
		||||
                    # get outputs
 | 
			
		||||
                    detection_boxes = np.array(in_nn.getLayerFp16("ExpandDims")).reshape((100, 4))
 | 
			
		||||
                    detection_scores = np.array(in_nn.getLayerFp16("ExpandDims_2")).reshape((100,))
 | 
			
		||||
 | 
			
		||||
                    # keep boxes bigger than threshold
 | 
			
		||||
                    mask = detection_scores >= self._objects_threshold
 | 
			
		||||
                    boxes = detection_boxes[mask]
 | 
			
		||||
                    scores = detection_scores[mask]
 | 
			
		||||
 | 
			
		||||
                    if boxes.shape[0] > 0:
 | 
			
		||||
                        objects_msg = events.events_pb2.ObjectsMessage()
 | 
			
		||||
                        objs = []
 | 
			
		||||
                        for i in range(boxes.shape[0]):
 | 
			
		||||
                            bbox = boxes[i]
 | 
			
		||||
                            logger.debug("new object detected: %s", str(bbox))
 | 
			
		||||
                            o = events.events_pb2.Object()
 | 
			
		||||
                            o.type = events.events_pb2.TypeObject.ANY
 | 
			
		||||
                            o.top = bbox[0].astype(float)
 | 
			
		||||
                            o.right = bbox[1].astype(float)
 | 
			
		||||
                            o.bottom = bbox[2].astype(float)
 | 
			
		||||
                            o.left = bbox[3].astype(float)
 | 
			
		||||
                            o.confidence = scores[i].astype(float)
 | 
			
		||||
                            objs.append(o)
 | 
			
		||||
                        objects_msg.objects.extend(objs)
 | 
			
		||||
 | 
			
		||||
                        objects_msg.frame_ref.name = frame_msg.id.name
 | 
			
		||||
                        objects_msg.frame_ref.id = frame_msg.id.id
 | 
			
		||||
                        objects_msg.frame_ref.created_at.FromDatetime(now)
 | 
			
		||||
 | 
			
		||||
                        logger.debug("publish object event to %s", self._frame_topic)
 | 
			
		||||
                        self._mqtt_client.publish(topic=self._objects_topic,
 | 
			
		||||
                                                  payload=objects_msg.SerializeToString(),
 | 
			
		||||
                                                  qos=0,
 | 
			
		||||
                                                  retain=False)
 | 
			
		||||
 | 
			
		||||
                except Exception as e:
 | 
			
		||||
                    logger.exception("unexpected error: %s", str(e))
 | 
			
		||||
 
 | 
			
		||||
@@ -14,7 +14,7 @@ _sym_db = _symbol_database.Default()
 | 
			
		||||
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x13\x65vents/events.proto\x12\x0erobocar.events\x1a\x1fgoogle/protobuf/timestamp.proto\"T\n\x08\x46rameRef\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\n\n\x02id\x18\x02 \x01(\t\x12.\n\ncreated_at\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"C\n\x0c\x46rameMessage\x12$\n\x02id\x18\x01 \x01(\x0b\x32\x18.robocar.events.FrameRef\x12\r\n\x05\x66rame\x18\x02 \x01(\x0c\"d\n\x0fSteeringMessage\x12\x10\n\x08steering\x18\x01 \x01(\x02\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12+\n\tframe_ref\x18\x03 \x01(\x0b\x32\x18.robocar.events.FrameRef\"d\n\x0fThrottleMessage\x12\x10\n\x08throttle\x18\x01 \x01(\x02\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12+\n\tframe_ref\x18\x03 \x01(\x0b\x32\x18.robocar.events.FrameRef\"A\n\x10\x44riveModeMessage\x12-\n\ndrive_mode\x18\x01 \x01(\x0e\x32\x19.robocar.events.DriveMode\"f\n\x0eObjectsMessage\x12\'\n\x07objects\x18\x01 \x03(\x0b\x32\x16.robocar.events.Object\x12+\n\tframe_ref\x18\x02 \x01(\x0b\x32\x18.robocar.events.FrameRef\"\x80\x01\n\x06Object\x12(\n\x04type\x18\x01 \x01(\x0e\x32\x1a.robocar.events.TypeObject\x12\x0c\n\x04left\x18\x02 \x01(\x02\x12\x0b\n\x03top\x18\x03 \x01(\x02\x12\r\n\x05right\x18\x04 \x01(\x02\x12\x0e\n\x06\x62ottom\x18\x05 \x01(\x02\x12\x12\n\nconfidence\x18\x06 \x01(\x02\"&\n\x13SwitchRecordMessage\x12\x0f\n\x07\x65nabled\x18\x01 \x01(\x08\"\x8c\x01\n\x0bRoadMessage\x12&\n\x07\x63ontour\x18\x01 \x03(\x0b\x32\x15.robocar.events.Point\x12(\n\x07\x65llipse\x18\x02 \x01(\x0b\x32\x17.robocar.events.Ellipse\x12+\n\tframe_ref\x18\x03 \x01(\x0b\x32\x18.robocar.events.FrameRef\"\x1d\n\x05Point\x12\t\n\x01x\x18\x01 \x01(\x05\x12\t\n\x01y\x18\x02 \x01(\x05\"r\n\x07\x45llipse\x12%\n\x06\x63\x65nter\x18\x01 \x01(\x0b\x32\x15.robocar.events.Point\x12\r\n\x05width\x18\x02 \x01(\x05\x12\x0e\n\x06height\x18\x03 \x01(\x05\x12\r\n\x05\x61ngle\x18\x04 \x01(\x02\x12\x12\n\nconfidence\x18\x05 \x01(\x02\"\x82\x01\n\rRecordMessage\x12+\n\x05\x66rame\x18\x01 \x01(\x0b\x32\x1c.robocar.events.FrameMessage\x12\x31\n\x08steering\x18\x02 \x01(\x0b\x32\x1f.robocar.events.SteeringMessage\x12\x11\n\trecordSet\x18\x03 \x01(\t*-\n\tDriveMode\x12\x0b\n\x07INVALID\x10\x00\x12\x08\n\x04USER\x10\x01\x12\t\n\x05PILOT\x10\x02*2\n\nTypeObject\x12\x07\n\x03\x41NY\x10\x00\x12\x07\n\x03\x43\x41R\x10\x01\x12\x08\n\x04\x42UMP\x10\x02\x12\x08\n\x04PLOT\x10\x03\x42\nZ\x08./eventsb\x06proto3')
 | 
			
		||||
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x13\x65vents/events.proto\x12\x0erobocar.events\x1a\x1fgoogle/protobuf/timestamp.proto\"T\n\x08\x46rameRef\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\n\n\x02id\x18\x02 \x01(\t\x12.\n\ncreated_at\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"C\n\x0c\x46rameMessage\x12$\n\x02id\x18\x01 \x01(\x0b\x32\x18.robocar.events.FrameRef\x12\r\n\x05\x66rame\x18\x02 \x01(\x0c\"d\n\x0fSteeringMessage\x12\x10\n\x08steering\x18\x01 \x01(\x02\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12+\n\tframe_ref\x18\x03 \x01(\x0b\x32\x18.robocar.events.FrameRef\"d\n\x0fThrottleMessage\x12\x10\n\x08throttle\x18\x01 \x01(\x02\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12+\n\tframe_ref\x18\x03 \x01(\x0b\x32\x18.robocar.events.FrameRef\"A\n\x10\x44riveModeMessage\x12-\n\ndrive_mode\x18\x01 \x01(\x0e\x32\x19.robocar.events.DriveMode\"f\n\x0eObjectsMessage\x12\'\n\x07objects\x18\x01 \x03(\x0b\x32\x16.robocar.events.Object\x12+\n\tframe_ref\x18\x02 \x01(\x0b\x32\x18.robocar.events.FrameRef\"\x80\x01\n\x06Object\x12(\n\x04type\x18\x01 \x01(\x0e\x32\x1a.robocar.events.TypeObject\x12\x0c\n\x04left\x18\x02 \x01(\x05\x12\x0b\n\x03top\x18\x03 \x01(\x05\x12\r\n\x05right\x18\x04 \x01(\x05\x12\x0e\n\x06\x62ottom\x18\x05 \x01(\x05\x12\x12\n\nconfidence\x18\x06 \x01(\x02\"&\n\x13SwitchRecordMessage\x12\x0f\n\x07\x65nabled\x18\x01 \x01(\x08\"\x8c\x01\n\x0bRoadMessage\x12&\n\x07\x63ontour\x18\x01 \x03(\x0b\x32\x15.robocar.events.Point\x12(\n\x07\x65llipse\x18\x02 \x01(\x0b\x32\x17.robocar.events.Ellipse\x12+\n\tframe_ref\x18\x03 \x01(\x0b\x32\x18.robocar.events.FrameRef\"\x1d\n\x05Point\x12\t\n\x01x\x18\x01 \x01(\x05\x12\t\n\x01y\x18\x02 \x01(\x05\"r\n\x07\x45llipse\x12%\n\x06\x63\x65nter\x18\x01 \x01(\x0b\x32\x15.robocar.events.Point\x12\r\n\x05width\x18\x02 \x01(\x05\x12\x0e\n\x06height\x18\x03 \x01(\x05\x12\r\n\x05\x61ngle\x18\x04 \x01(\x02\x12\x12\n\nconfidence\x18\x05 \x01(\x02\"\x82\x01\n\rRecordMessage\x12+\n\x05\x66rame\x18\x01 \x01(\x0b\x32\x1c.robocar.events.FrameMessage\x12\x31\n\x08steering\x18\x02 \x01(\x0b\x32\x1f.robocar.events.SteeringMessage\x12\x11\n\trecordSet\x18\x03 \x01(\t*-\n\tDriveMode\x12\x0b\n\x07INVALID\x10\x00\x12\x08\n\x04USER\x10\x01\x12\t\n\x05PILOT\x10\x02*2\n\nTypeObject\x12\x07\n\x03\x41NY\x10\x00\x12\x07\n\x03\x43\x41R\x10\x01\x12\x08\n\x04\x42UMP\x10\x02\x12\x08\n\x04PLOT\x10\x03\x42\nZ\x08./eventsb\x06proto3')
 | 
			
		||||
 | 
			
		||||
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
 | 
			
		||||
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'events.events_pb2', globals())
 | 
			
		||||
 
 | 
			
		||||
@@ -1,8 +1,8 @@
 | 
			
		||||
paho-mqtt~=1.6.1
 | 
			
		||||
docopt~=0.6.2
 | 
			
		||||
depthai==2.17.2.0
 | 
			
		||||
opencv-python==4.6.0.66
 | 
			
		||||
depthai==2.14.1.0
 | 
			
		||||
opencv-python~=4.5.5.62
 | 
			
		||||
google~=3.0.0
 | 
			
		||||
google-api-core~=2.4.0
 | 
			
		||||
setuptools==60.5.0
 | 
			
		||||
blobconverter==1.3.0
 | 
			
		||||
protobuf3
 | 
			
		||||
		Reference in New Issue
	
	Block a user