Compare commits
4 Commits
feat/depth
...
v0.2.0
Author | SHA1 | Date | |
---|---|---|---|
3766531936 | |||
fe63597ba4 | |||
52c3808d83 | |||
33c16699ae |
12
Dockerfile
12
Dockerfile
@ -1,3 +1,12 @@
|
|||||||
|
FROM docker.io/library/python:3.9-slim AS model
|
||||||
|
|
||||||
|
RUN python3 -m pip install blobconverter
|
||||||
|
|
||||||
|
RUN mkdir -p /models
|
||||||
|
|
||||||
|
RUN blobconverter --zoo-name mobile_object_localizer_192x192 --zoo-type depthai --shaves 6 --version 2021.4 --output-dir /models || echo ""
|
||||||
|
RUN ls /models
|
||||||
|
#######
|
||||||
FROM docker.io/library/python:3.9-slim
|
FROM docker.io/library/python:3.9-slim
|
||||||
|
|
||||||
# Configure piwheels repo to use pre-compiled numpy wheels for arm
|
# Configure piwheels repo to use pre-compiled numpy wheels for arm
|
||||||
@ -7,6 +16,9 @@ RUN apt-get update && apt-get install -y libgl1 libglib2.0-0
|
|||||||
|
|
||||||
RUN pip3 install numpy
|
RUN pip3 install numpy
|
||||||
|
|
||||||
|
RUN mkdir /models
|
||||||
|
|
||||||
|
COPY --from=model /models/mobile_object_localizer_192x192_openvino_2021.4_6shave.blob /models/mobile_object_localizer_192x192_openvino_2021.4_6shave.blob
|
||||||
ADD requirements.txt requirements.txt
|
ADD requirements.txt requirements.txt
|
||||||
|
|
||||||
RUN pip3 install -r requirements.txt
|
RUN pip3 install -r requirements.txt
|
||||||
|
@ -1,19 +1,25 @@
|
|||||||
"""
|
"""
|
||||||
Publish data from oak-lite device
|
Publish data from oak-lite device
|
||||||
|
|
||||||
Usage: rc-oak-camera [-u USERNAME | --mqtt-username=USERNAME] [--mqtt-password=PASSWORD] [--mqtt-broker=HOSTNAME] \
|
Usage: rc-oak-camera [-u USERNAME | --mqtt-username=USERNAME] [--mqtt-password=PASSWORD] \
|
||||||
[--mqtt-topic-robocar-oak-camera="TOPIC_CAMERA"] [--mqtt-client-id=CLIENT_ID] \
|
[--mqtt-broker-host=HOSTNAME] [--mqtt-broker-port=PORT] \
|
||||||
[-H IMG_HEIGHT | --image-height=IMG_HEIGHT] [-W IMG_WIDTH | --image-width=IMG_width]
|
[--mqtt-topic-robocar-oak-camera="TOPIC_CAMERA"] [--mqtt-topic-robocar-objects="TOPIC_OBJECTS"] \
|
||||||
|
[--mqtt-client-id=CLIENT_ID] \
|
||||||
|
[-H IMG_HEIGHT | --image-height=IMG_HEIGHT] [-W IMG_WIDTH | --image-width=IMG_width] \
|
||||||
|
[-t OBJECTS_THRESHOLD | --objects-threshold=OBJECTS_THRESHOLD]
|
||||||
|
|
||||||
Options:
|
Options:
|
||||||
-h --help Show this screen.
|
-h --help Show this screen.
|
||||||
-u USERID --mqtt-username=USERNAME MQTT user
|
-u USERID --mqtt-username=USERNAME MQTT user
|
||||||
-p PASSWORD --mqtt-password=PASSWORD MQTT password
|
-p PASSWORD --mqtt-password=PASSWORD MQTT password
|
||||||
-b HOSTNAME --mqtt-broker=HOSTNAME MQTT broker host
|
-b HOSTNAME --mqtt-broker-host=HOSTNAME MQTT broker host
|
||||||
|
-P HOSTNAME --mqtt-broker-port=PORT MQTT broker port
|
||||||
-C CLIENT_ID --mqtt-client-id=CLIENT_ID MQTT client id
|
-C CLIENT_ID --mqtt-client-id=CLIENT_ID MQTT client id
|
||||||
-c TOPIC_CAMERA --mqtt-topic-robocar-oak-camera=TOPIC_CAMERA MQTT topic where to publish robocar-oak-camera frames
|
-c TOPIC_CAMERA --mqtt-topic-robocar-oak-camera=TOPIC_CAMERA MQTT topic where to publish robocar-oak-camera frames
|
||||||
|
-o TOPIC_OBJECTS --mqtt-topic-robocar-objects=TOPIC_OBJECTS MQTT topic where to publish objects detection results
|
||||||
-H IMG_HEIGHT --image-height=IMG_HEIGHT IMG_HEIGHT image height
|
-H IMG_HEIGHT --image-height=IMG_HEIGHT IMG_HEIGHT image height
|
||||||
-W IMG_WIDTH --image-width=IMG_width IMG_WIDTH image width
|
-W IMG_WIDTH --image-width=IMG_width IMG_WIDTH image width
|
||||||
|
-t OBJECTS_THRESHOLD --objects-threshold=OBJECTS_THRESHOLD OBJECTS_THRESHOLD threshold to filter objects detected
|
||||||
"""
|
"""
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
@ -27,13 +33,13 @@ logging.basicConfig(level=logging.INFO)
|
|||||||
default_client_id = "robocar-depthai"
|
default_client_id = "robocar-depthai"
|
||||||
|
|
||||||
|
|
||||||
def init_mqtt_client(broker_host: str, user: str, password: str, client_id: str) -> mqtt.Client:
|
def init_mqtt_client(broker_host: str, broker_port, user: str, password: str, client_id: str) -> mqtt.Client:
|
||||||
logger.info("Start part.py-robocar-oak-camera")
|
logger.info("Start part.py-robocar-oak-camera")
|
||||||
client = mqtt.Client(client_id=client_id, clean_session=True, userdata=None, protocol=mqtt.MQTTv311)
|
client = mqtt.Client(client_id=client_id, clean_session=True, userdata=None, protocol=mqtt.MQTTv311)
|
||||||
|
|
||||||
client.username_pw_set(user, password)
|
client.username_pw_set(user, password)
|
||||||
logger.info("Connect to mqtt broker "+ broker_host)
|
logger.info("Connect to mqtt broker "+ broker_host)
|
||||||
client.connect(host=broker_host, port=1883, keepalive=60)
|
client.connect(host=broker_host, port=broker_port, keepalive=60)
|
||||||
logger.info("Connected to mqtt broker")
|
logger.info("Connected to mqtt broker")
|
||||||
return client
|
return client
|
||||||
|
|
||||||
@ -43,16 +49,21 @@ def execute_from_command_line():
|
|||||||
|
|
||||||
args = docopt(__doc__)
|
args = docopt(__doc__)
|
||||||
|
|
||||||
client = init_mqtt_client(broker_host=get_default_value(args["--mqtt-broker"], "MQTT_BROKER", "localhost"),
|
client = init_mqtt_client(broker_host=get_default_value(args["--mqtt-broker-host"], "MQTT_BROKER_HOST", "localhost"),
|
||||||
|
broker_port=int(get_default_value(args["--mqtt-broker-port"], "MQTT_BROKER_PORT", "1883")),
|
||||||
user=get_default_value(args["--mqtt-username"], "MQTT_USERNAME", ""),
|
user=get_default_value(args["--mqtt-username"], "MQTT_USERNAME", ""),
|
||||||
password=get_default_value(args["--mqtt-password"], "MQTT_PASSWORD", ""),
|
password=get_default_value(args["--mqtt-password"], "MQTT_PASSWORD", ""),
|
||||||
client_id=get_default_value(args["--mqtt-client-id"], "MQTT_CLIENT_ID",
|
client_id=get_default_value(args["--mqtt-client-id"], "MQTT_CLIENT_ID",
|
||||||
default_client_id),
|
default_client_id),
|
||||||
)
|
)
|
||||||
frame_topic = get_default_value(args["--mqtt-topic-robocar-oak-camera"], "MQTT_TOPIC_CAMERA", "/oak/camera_rgb")
|
frame_topic = get_default_value(args["--mqtt-topic-robocar-oak-camera"], "MQTT_TOPIC_CAMERA", "/oak/camera_rgb")
|
||||||
|
objects_topic = get_default_value(args["--mqtt-topic-robocar-objects"], "MQTT_TOPIC_OBJECTS", "/objects")
|
||||||
|
|
||||||
frame_processor = cam.FramePublisher(mqtt_client=client,
|
frame_processor = cam.FramePublisher(mqtt_client=client,
|
||||||
frame_topic=frame_topic,
|
frame_topic=frame_topic,
|
||||||
|
objects_topic=objects_topic,
|
||||||
|
objects_threshold=float(get_default_value(args["--objects-threshold"],
|
||||||
|
"OBJECTS_THRESHOLD", 0.2)),
|
||||||
img_width=int(get_default_value(args["--image-width"], "IMAGE_WIDTH", 160)),
|
img_width=int(get_default_value(args["--image-width"], "IMAGE_WIDTH", 160)),
|
||||||
img_height=int(get_default_value(args["--image-height"], "IMAGE_HEIGHT", 120)))
|
img_height=int(get_default_value(args["--image-height"], "IMAGE_HEIGHT", 120)))
|
||||||
frame_processor.run()
|
frame_processor.run()
|
||||||
|
@ -10,69 +10,49 @@ import numpy as np
|
|||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
# Closer-in minimum depth, disparity range is doubled (from 95 to 190):
|
NN_PATH = "/models/mobile_object_localizer_192x192_openvino_2021.4_6shave.blob"
|
||||||
extended_disparity = False
|
NN_WIDTH = 192
|
||||||
# Better accuracy for longer distance, fractional disparity 32-levels:
|
NN_HEIGHT = 192
|
||||||
subpixel = True
|
|
||||||
# Better handling for occlusions:
|
|
||||||
lr_check = True
|
|
||||||
|
|
||||||
class FramePublisher:
|
class FramePublisher:
|
||||||
def __init__(self, mqtt_client: mqtt.Client, frame_topic: str, img_width: int, img_height: int):
|
def __init__(self, mqtt_client: mqtt.Client, frame_topic: str, objects_topic: str, objects_threshold: float,
|
||||||
|
img_width: int, img_height: int):
|
||||||
self._mqtt_client = mqtt_client
|
self._mqtt_client = mqtt_client
|
||||||
self._frame_topic = frame_topic
|
self._frame_topic = frame_topic
|
||||||
|
self._objects_topic = objects_topic
|
||||||
|
self._objects_threshold = objects_threshold
|
||||||
self._img_width = img_width
|
self._img_width = img_width
|
||||||
self._img_height = img_height
|
self._img_height = img_height
|
||||||
self._depth = None
|
|
||||||
self._pipeline = self._configure_pipeline()
|
self._pipeline = self._configure_pipeline()
|
||||||
|
|
||||||
def _configure_pipeline(self) -> dai.Pipeline:
|
def _configure_pipeline(self) -> dai.Pipeline:
|
||||||
logger.info("configure pipeline")
|
logger.info("configure pipeline")
|
||||||
pipeline = dai.Pipeline()
|
pipeline = dai.Pipeline()
|
||||||
|
|
||||||
|
pipeline.setOpenVINOVersion(version=dai.OpenVINO.VERSION_2021_4)
|
||||||
|
|
||||||
|
# Define a neural network that will make predictions based on the source frames
|
||||||
|
detection_nn = pipeline.create(dai.node.NeuralNetwork)
|
||||||
|
detection_nn.setBlobPath(NN_PATH)
|
||||||
|
detection_nn.setNumPoolFrames(4)
|
||||||
|
detection_nn.input.setBlocking(False)
|
||||||
|
detection_nn.setNumInferenceThreads(2)
|
||||||
|
|
||||||
|
xout_nn = pipeline.create(dai.node.XLinkOut)
|
||||||
|
xout_nn.setStreamName("nn")
|
||||||
|
xout_nn.input.setBlocking(False)
|
||||||
|
|
||||||
|
# Resize image
|
||||||
|
manip = pipeline.create(dai.node.ImageManip)
|
||||||
|
manip.initialConfig.setResize(NN_WIDTH, NN_HEIGHT)
|
||||||
|
manip.initialConfig.setFrameType(dai.ImgFrame.Type.RGB888p)
|
||||||
|
manip.initialConfig.setKeepAspectRatio(False)
|
||||||
|
|
||||||
cam_rgb = pipeline.create(dai.node.ColorCamera)
|
cam_rgb = pipeline.create(dai.node.ColorCamera)
|
||||||
xout_rgb = pipeline.create(dai.node.XLinkOut)
|
xout_rgb = pipeline.create(dai.node.XLinkOut)
|
||||||
|
|
||||||
xout_rgb.setStreamName("rgb")
|
xout_rgb.setStreamName("rgb")
|
||||||
|
|
||||||
monoLeft = pipeline.create(dai.node.MonoCamera)
|
|
||||||
monoRight = pipeline.create(dai.node.MonoCamera)
|
|
||||||
depth = pipeline.create(dai.node.StereoDepth)
|
|
||||||
xout = pipeline.create(dai.node.XLinkOut)
|
|
||||||
self._depth = depth
|
|
||||||
|
|
||||||
xout.setStreamName("disparity")
|
|
||||||
|
|
||||||
# Properties
|
|
||||||
monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
|
|
||||||
monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT)
|
|
||||||
monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
|
|
||||||
monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT)
|
|
||||||
|
|
||||||
# Create a node that will produce the depth map (using disparity output as it's easier to visualize depth this way)
|
|
||||||
depth.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY)
|
|
||||||
# Options: MEDIAN_OFF, KERNEL_3x3, KERNEL_5x5, KERNEL_7x7 (default)
|
|
||||||
depth.initialConfig.setMedianFilter(dai.MedianFilter.KERNEL_7x7)
|
|
||||||
depth.setLeftRightCheck(lr_check)
|
|
||||||
depth.setExtendedDisparity(extended_disparity)
|
|
||||||
depth.setSubpixel(subpixel)
|
|
||||||
|
|
||||||
config = depth.initialConfig.get()
|
|
||||||
config.postProcessing.speckleFilter.enable = True
|
|
||||||
config.postProcessing.speckleFilter.speckleRange = 50
|
|
||||||
config.postProcessing.temporalFilter.enable = False
|
|
||||||
config.postProcessing.spatialFilter.enable = False
|
|
||||||
config.postProcessing.spatialFilter.holeFillingRadius = 2
|
|
||||||
config.postProcessing.spatialFilter.numIterations = 1
|
|
||||||
#config.postProcessing.thresholdFilter.minRange = 400
|
|
||||||
#config.postProcessing.thresholdFilter.maxRange = 15000
|
|
||||||
config.postProcessing.decimationFilter.decimationFactor = 2
|
|
||||||
depth.initialConfig.set(config)
|
|
||||||
|
|
||||||
# Linking
|
|
||||||
monoLeft.out.link(depth.left)
|
|
||||||
monoRight.out.link(depth.right)
|
|
||||||
depth.disparity.link(xout.input)
|
|
||||||
|
|
||||||
# Properties
|
# Properties
|
||||||
cam_rgb.setBoardSocket(dai.CameraBoardSocket.RGB)
|
cam_rgb.setBoardSocket(dai.CameraBoardSocket.RGB)
|
||||||
@ -81,8 +61,14 @@ class FramePublisher:
|
|||||||
cam_rgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.RGB)
|
cam_rgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.RGB)
|
||||||
cam_rgb.setFps(30)
|
cam_rgb.setFps(30)
|
||||||
|
|
||||||
# Linking
|
# Link preview to manip and manip to nn
|
||||||
|
cam_rgb.preview.link(manip.inputImage)
|
||||||
|
manip.out.link(detection_nn.input)
|
||||||
|
|
||||||
|
# Linking to output
|
||||||
cam_rgb.preview.link(xout_rgb.input)
|
cam_rgb.preview.link(xout_rgb.input)
|
||||||
|
detection_nn.out.link(xout_nn.input)
|
||||||
|
|
||||||
logger.info("pipeline configured")
|
logger.info("pipeline configured")
|
||||||
return pipeline
|
return pipeline
|
||||||
|
|
||||||
@ -98,24 +84,15 @@ class FramePublisher:
|
|||||||
device.startPipeline()
|
device.startPipeline()
|
||||||
# Queues
|
# Queues
|
||||||
queue_size = 4
|
queue_size = 4
|
||||||
q_rgb = device.getOutputQueue("rgb", maxSize=queue_size, blocking=False)
|
q_rgb = device.getOutputQueue(name="rgb", maxSize=queue_size, blocking=False)
|
||||||
|
q_nn = device.getOutputQueue(name="nn", maxSize=queue_size, blocking=False)
|
||||||
# Output queue will be used to get the disparity frames from the outputs defined above
|
|
||||||
q_disparity = device.getOutputQueue(name="disparity", maxSize=4, blocking=False)
|
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
logger.debug("wait for new frame")
|
logger.debug("wait for new frame")
|
||||||
inRgb = q_rgb.get() # blocking call, will wait until a new data has arrived
|
inRgb = q_rgb.get() # blocking call, will wait until a new data has arrived
|
||||||
inDisparity = q_disparity.get()
|
|
||||||
# im_resize = inRgb.getCvFrame()
|
|
||||||
im_resize = inDisparity.getCvFrame()
|
|
||||||
|
|
||||||
# Normalization for better visualization
|
im_resize = inRgb.getCvFrame()
|
||||||
im_resize = (im_resize * (255 / self._depth.initialConfig.getMaxDisparity())).astype(np.uint8)
|
|
||||||
|
|
||||||
# Available color maps: https://docs.opencv.org/3.4/d3/d50/group__imgproc__colormap.html
|
|
||||||
# im_resize = cv2.applyColorMap(im_resize, cv2.COLORMAP_JET)
|
|
||||||
|
|
||||||
is_success, im_buf_arr = cv2.imencode(".jpg", im_resize)
|
is_success, im_buf_arr = cv2.imencode(".jpg", im_resize)
|
||||||
byte_im = im_buf_arr.tobytes()
|
byte_im = im_buf_arr.tobytes()
|
||||||
@ -133,5 +110,42 @@ class FramePublisher:
|
|||||||
qos=0,
|
qos=0,
|
||||||
retain=False)
|
retain=False)
|
||||||
|
|
||||||
|
in_nn = q_nn.get()
|
||||||
|
|
||||||
|
# get outputs
|
||||||
|
detection_boxes = np.array(in_nn.getLayerFp16("ExpandDims")).reshape((100, 4))
|
||||||
|
detection_scores = np.array(in_nn.getLayerFp16("ExpandDims_2")).reshape((100,))
|
||||||
|
|
||||||
|
# keep boxes bigger than threshold
|
||||||
|
mask = detection_scores >= self._objects_threshold
|
||||||
|
boxes = detection_boxes[mask]
|
||||||
|
scores = detection_scores[mask]
|
||||||
|
|
||||||
|
if boxes.shape[0] > 0:
|
||||||
|
objects_msg = events.events_pb2.ObjectsMessage()
|
||||||
|
objs = []
|
||||||
|
for i in range(boxes.shape[0]):
|
||||||
|
bbox = boxes[i]
|
||||||
|
logger.debug("new object detected: %s", str(bbox))
|
||||||
|
o = events.events_pb2.Object()
|
||||||
|
o.type = events.events_pb2.TypeObject.ANY
|
||||||
|
o.top = bbox[0].astype(float)
|
||||||
|
o.right = bbox[3].astype(float)
|
||||||
|
o.bottom = bbox[2].astype(float)
|
||||||
|
o.left = bbox[1].astype(float)
|
||||||
|
o.confidence = scores[i].astype(float)
|
||||||
|
objs.append(o)
|
||||||
|
objects_msg.objects.extend(objs)
|
||||||
|
|
||||||
|
objects_msg.frame_ref.name = frame_msg.id.name
|
||||||
|
objects_msg.frame_ref.id = frame_msg.id.id
|
||||||
|
objects_msg.frame_ref.created_at.FromDatetime(now)
|
||||||
|
|
||||||
|
logger.debug("publish object event to %s", self._frame_topic)
|
||||||
|
self._mqtt_client.publish(topic=self._objects_topic,
|
||||||
|
payload=objects_msg.SerializeToString(),
|
||||||
|
qos=0,
|
||||||
|
retain=False)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.exception("unexpected error: %s", str(e))
|
logger.exception("unexpected error: %s", str(e))
|
||||||
|
@ -14,7 +14,7 @@ _sym_db = _symbol_database.Default()
|
|||||||
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
|
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
|
||||||
|
|
||||||
|
|
||||||
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x13\x65vents/events.proto\x12\x0erobocar.events\x1a\x1fgoogle/protobuf/timestamp.proto\"T\n\x08\x46rameRef\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\n\n\x02id\x18\x02 \x01(\t\x12.\n\ncreated_at\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"C\n\x0c\x46rameMessage\x12$\n\x02id\x18\x01 \x01(\x0b\x32\x18.robocar.events.FrameRef\x12\r\n\x05\x66rame\x18\x02 \x01(\x0c\"d\n\x0fSteeringMessage\x12\x10\n\x08steering\x18\x01 \x01(\x02\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12+\n\tframe_ref\x18\x03 \x01(\x0b\x32\x18.robocar.events.FrameRef\"d\n\x0fThrottleMessage\x12\x10\n\x08throttle\x18\x01 \x01(\x02\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12+\n\tframe_ref\x18\x03 \x01(\x0b\x32\x18.robocar.events.FrameRef\"A\n\x10\x44riveModeMessage\x12-\n\ndrive_mode\x18\x01 \x01(\x0e\x32\x19.robocar.events.DriveMode\"f\n\x0eObjectsMessage\x12\'\n\x07objects\x18\x01 \x03(\x0b\x32\x16.robocar.events.Object\x12+\n\tframe_ref\x18\x02 \x01(\x0b\x32\x18.robocar.events.FrameRef\"\x80\x01\n\x06Object\x12(\n\x04type\x18\x01 \x01(\x0e\x32\x1a.robocar.events.TypeObject\x12\x0c\n\x04left\x18\x02 \x01(\x05\x12\x0b\n\x03top\x18\x03 \x01(\x05\x12\r\n\x05right\x18\x04 \x01(\x05\x12\x0e\n\x06\x62ottom\x18\x05 \x01(\x05\x12\x12\n\nconfidence\x18\x06 \x01(\x02\"&\n\x13SwitchRecordMessage\x12\x0f\n\x07\x65nabled\x18\x01 \x01(\x08\"\x8c\x01\n\x0bRoadMessage\x12&\n\x07\x63ontour\x18\x01 \x03(\x0b\x32\x15.robocar.events.Point\x12(\n\x07\x65llipse\x18\x02 \x01(\x0b\x32\x17.robocar.events.Ellipse\x12+\n\tframe_ref\x18\x03 \x01(\x0b\x32\x18.robocar.events.FrameRef\"\x1d\n\x05Point\x12\t\n\x01x\x18\x01 \x01(\x05\x12\t\n\x01y\x18\x02 \x01(\x05\"r\n\x07\x45llipse\x12%\n\x06\x63\x65nter\x18\x01 \x01(\x0b\x32\x15.robocar.events.Point\x12\r\n\x05width\x18\x02 \x01(\x05\x12\x0e\n\x06height\x18\x03 \x01(\x05\x12\r\n\x05\x61ngle\x18\x04 \x01(\x02\x12\x12\n\nconfidence\x18\x05 \x01(\x02\"\x82\x01\n\rRecordMessage\x12+\n\x05\x66rame\x18\x01 \x01(\x0b\x32\x1c.robocar.events.FrameMessage\x12\x31\n\x08steering\x18\x02 \x01(\x0b\x32\x1f.robocar.events.SteeringMessage\x12\x11\n\trecordSet\x18\x03 \x01(\t*-\n\tDriveMode\x12\x0b\n\x07INVALID\x10\x00\x12\x08\n\x04USER\x10\x01\x12\t\n\x05PILOT\x10\x02*2\n\nTypeObject\x12\x07\n\x03\x41NY\x10\x00\x12\x07\n\x03\x43\x41R\x10\x01\x12\x08\n\x04\x42UMP\x10\x02\x12\x08\n\x04PLOT\x10\x03\x42\nZ\x08./eventsb\x06proto3')
|
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x13\x65vents/events.proto\x12\x0erobocar.events\x1a\x1fgoogle/protobuf/timestamp.proto\"T\n\x08\x46rameRef\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\n\n\x02id\x18\x02 \x01(\t\x12.\n\ncreated_at\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"C\n\x0c\x46rameMessage\x12$\n\x02id\x18\x01 \x01(\x0b\x32\x18.robocar.events.FrameRef\x12\r\n\x05\x66rame\x18\x02 \x01(\x0c\"d\n\x0fSteeringMessage\x12\x10\n\x08steering\x18\x01 \x01(\x02\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12+\n\tframe_ref\x18\x03 \x01(\x0b\x32\x18.robocar.events.FrameRef\"d\n\x0fThrottleMessage\x12\x10\n\x08throttle\x18\x01 \x01(\x02\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12+\n\tframe_ref\x18\x03 \x01(\x0b\x32\x18.robocar.events.FrameRef\"A\n\x10\x44riveModeMessage\x12-\n\ndrive_mode\x18\x01 \x01(\x0e\x32\x19.robocar.events.DriveMode\"f\n\x0eObjectsMessage\x12\'\n\x07objects\x18\x01 \x03(\x0b\x32\x16.robocar.events.Object\x12+\n\tframe_ref\x18\x02 \x01(\x0b\x32\x18.robocar.events.FrameRef\"\x80\x01\n\x06Object\x12(\n\x04type\x18\x01 \x01(\x0e\x32\x1a.robocar.events.TypeObject\x12\x0c\n\x04left\x18\x02 \x01(\x02\x12\x0b\n\x03top\x18\x03 \x01(\x02\x12\r\n\x05right\x18\x04 \x01(\x02\x12\x0e\n\x06\x62ottom\x18\x05 \x01(\x02\x12\x12\n\nconfidence\x18\x06 \x01(\x02\"&\n\x13SwitchRecordMessage\x12\x0f\n\x07\x65nabled\x18\x01 \x01(\x08\"\x8c\x01\n\x0bRoadMessage\x12&\n\x07\x63ontour\x18\x01 \x03(\x0b\x32\x15.robocar.events.Point\x12(\n\x07\x65llipse\x18\x02 \x01(\x0b\x32\x17.robocar.events.Ellipse\x12+\n\tframe_ref\x18\x03 \x01(\x0b\x32\x18.robocar.events.FrameRef\"\x1d\n\x05Point\x12\t\n\x01x\x18\x01 \x01(\x05\x12\t\n\x01y\x18\x02 \x01(\x05\"r\n\x07\x45llipse\x12%\n\x06\x63\x65nter\x18\x01 \x01(\x0b\x32\x15.robocar.events.Point\x12\r\n\x05width\x18\x02 \x01(\x05\x12\x0e\n\x06height\x18\x03 \x01(\x05\x12\r\n\x05\x61ngle\x18\x04 \x01(\x02\x12\x12\n\nconfidence\x18\x05 \x01(\x02\"\x82\x01\n\rRecordMessage\x12+\n\x05\x66rame\x18\x01 \x01(\x0b\x32\x1c.robocar.events.FrameMessage\x12\x31\n\x08steering\x18\x02 \x01(\x0b\x32\x1f.robocar.events.SteeringMessage\x12\x11\n\trecordSet\x18\x03 \x01(\t*-\n\tDriveMode\x12\x0b\n\x07INVALID\x10\x00\x12\x08\n\x04USER\x10\x01\x12\t\n\x05PILOT\x10\x02*2\n\nTypeObject\x12\x07\n\x03\x41NY\x10\x00\x12\x07\n\x03\x43\x41R\x10\x01\x12\x08\n\x04\x42UMP\x10\x02\x12\x08\n\x04PLOT\x10\x03\x42\nZ\x08./eventsb\x06proto3')
|
||||||
|
|
||||||
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
|
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
|
||||||
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'events.events_pb2', globals())
|
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'events.events_pb2', globals())
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
paho-mqtt~=1.6.1
|
paho-mqtt~=1.6.1
|
||||||
docopt~=0.6.2
|
docopt~=0.6.2
|
||||||
depthai==2.14.1.0
|
depthai==2.17.2.0
|
||||||
opencv-python~=4.5.5.62
|
opencv-python==4.6.0.66
|
||||||
google~=3.0.0
|
google~=3.0.0
|
||||||
google-api-core~=2.4.0
|
google-api-core~=2.4.0
|
||||||
setuptools==60.5.0
|
setuptools==60.5.0
|
||||||
protobuf3
|
blobconverter==1.3.0
|
Reference in New Issue
Block a user