Compare commits
17 Commits
Author | SHA1 | Date | |
---|---|---|---|
863cddbc17 | |||
35dd8d957a | |||
5948706773 | |||
30cb017455 | |||
c8372239d3 | |||
000dd65054 | |||
b39ec8ec98 | |||
b2e4ca1c28 | |||
fe9bb60fd0 | |||
1f302934bd | |||
b9f554d4ba | |||
3d5ac90a57 | |||
1715c122e2 | |||
327d122721 | |||
9ca77f526a | |||
17730a4994 | |||
4844669191 |
4
.gitignore
vendored
4
.gitignore
vendored
@ -1 +1,5 @@
|
||||
/venv/
|
||||
.eggs
|
||||
*.egg-info
|
||||
.idea
|
||||
*/__pycache__/
|
||||
|
17
Dockerfile
17
Dockerfile
@ -3,24 +3,21 @@ FROM docker.io/library/python:3.9-slim
|
||||
# Configure piwheels repo to use pre-compiled numpy wheels for arm
|
||||
RUN echo -n "[global]\nextra-index-url=https://www.piwheels.org/simple\n" >> /etc/pip.conf
|
||||
|
||||
RUN apt-get update && apt-get install -y libusb-1.0-0
|
||||
|
||||
RUN apt-get update && apt-get install -y libgl1 libglib2.0-0
|
||||
|
||||
RUN pip3 install numpy
|
||||
|
||||
ADD requirements.txt .
|
||||
ADD requirements.txt requirements.txt
|
||||
|
||||
RUN pip3 install -r requirements.txt
|
||||
|
||||
ADD events .
|
||||
ADD camera .
|
||||
ADD setup.cfg .
|
||||
ADD setup.py .
|
||||
|
||||
ADD events events
|
||||
ADD camera camera
|
||||
ADD setup.cfg setup.cfg
|
||||
ADD setup.py setup.py
|
||||
|
||||
ENV PYTHON_EGG_CACHE=/tmp/cache
|
||||
RUN python3 setup.py install && rm -rf /src
|
||||
#RUN mkdir -p ${PYTHON_EGG_CACHE}
|
||||
RUN python3 setup.py install
|
||||
|
||||
WORKDIR /tmp
|
||||
USER 1234
|
||||
|
@ -3,7 +3,10 @@
|
||||
IMAGE_NAME=robocar-oak-camera
|
||||
TAG=$(git describe)
|
||||
FULL_IMAGE_NAME=docker.io/cyrilix/${IMAGE_NAME}:${TAG}
|
||||
PLATFORM="linux/amd64,linux/arm64"
|
||||
#PLATFORM="linux/amd64,linux/arm64,linux/arm/v7"
|
||||
|
||||
podman build . --platform "${PLATFORM}" --manifest "${IMAGE_NAME}:${TAG}"
|
||||
podman manifest push --format v2s2 "localhost/${IMAGE_NAME}:${TAG}" "docker://${FULL_IMAGE_NAME}"
|
||||
|
||||
podman build . --platform linux/amd64,linux/arm64,linux/arm/v7 --manifest ${IMAGE_NAME}
|
||||
podman manifest push --format v2s2 --rm "localhost/${IMAGE_NAME}" "docker://${FULL_IMAGE_NAME}"
|
||||
printf "\nImage %s published" "docker://${FULL_IMAGE_NAME}"
|
||||
|
@ -1,8 +1,9 @@
|
||||
"""
|
||||
Publish data from oak-lite device
|
||||
|
||||
Usage: rc-oak-robocar-oak-camera [-u USERNAME | --mqtt-username=USERNAME] [--mqtt-password=PASSWORD] [--mqtt-broker=HOSTNAME]
|
||||
[--mqtt-topic-robocar-oak-camera="TOPIC_CAMERA"] [--mqtt-client-id=CLIENT_ID]
|
||||
Usage: rc-oak-camera [-u USERNAME | --mqtt-username=USERNAME] [--mqtt-password=PASSWORD] [--mqtt-broker=HOSTNAME] \
|
||||
[--mqtt-topic-robocar-oak-camera="TOPIC_CAMERA"] [--mqtt-client-id=CLIENT_ID] \
|
||||
[-H IMG_HEIGHT | --image-height=IMG_HEIGHT] [-W IMG_WIDTH | --image-width=IMG_width]
|
||||
|
||||
Options:
|
||||
-h --help Show this screen.
|
||||
@ -16,7 +17,7 @@ Options:
|
||||
"""
|
||||
import logging
|
||||
import os
|
||||
import camera.depthai as cam
|
||||
from . import depthai as cam
|
||||
from docopt import docopt
|
||||
import paho.mqtt.client as mqtt
|
||||
|
||||
@ -31,7 +32,7 @@ def init_mqtt_client(broker_host: str, user: str, password: str, client_id: str)
|
||||
client = mqtt.Client(client_id=client_id, clean_session=True, userdata=None, protocol=mqtt.MQTTv311)
|
||||
|
||||
client.username_pw_set(user, password)
|
||||
logger.info("Connect to mqtt broker")
|
||||
logger.info("Connect to mqtt broker "+ broker_host)
|
||||
client.connect(host=broker_host, port=1883, keepalive=60)
|
||||
logger.info("Connected to mqtt broker")
|
||||
return client
|
||||
@ -54,9 +55,7 @@ def execute_from_command_line():
|
||||
frame_topic=frame_topic,
|
||||
img_width=int(get_default_value(args["--image-width"], "IMAGE_WIDTH", 160)),
|
||||
img_height=int(get_default_value(args["--image-height"], "IMAGE_HEIGHT", 120)))
|
||||
frame_processor.start()
|
||||
|
||||
client.loop_forever()
|
||||
frame_processor.run()
|
||||
|
||||
|
||||
def get_default_value(value, env_var: str, default_value) -> str:
|
||||
|
@ -1,36 +1,17 @@
|
||||
import datetime
|
||||
import logging
|
||||
import paho.mqtt.client as mqtt
|
||||
|
||||
import events.events_pb2
|
||||
from google.protobuf.timestamp_pb2 import Timestamp
|
||||
|
||||
import depthai as dai
|
||||
import cv2
|
||||
|
||||
|
||||
from threading import Thread
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
"""
|
||||
This example shows usage of Camera Control message as well as ColorCamera configInput to change crop x and y
|
||||
Uses 'WASD' controls to move the crop window, 'C' to capture a still image, 'T' to trigger autofocus, 'IOKL,.[]'
|
||||
for manual exposure/focus/white-balance:
|
||||
Control: key[dec/inc] min..max
|
||||
exposure time: I O 1..33000 [us]
|
||||
sensitivity iso: K L 100..1600
|
||||
focus: , . 0..255 [far..near]
|
||||
white balance: [ ] 1000..12000 (light color temperature K)
|
||||
To go back to auto controls:
|
||||
'E' - autoexposure
|
||||
'F' - autofocus (continuous)
|
||||
'B' - auto white-balance
|
||||
"""
|
||||
|
||||
|
||||
class FramePublisher(Thread):
|
||||
class FramePublisher:
|
||||
def __init__(self, mqtt_client: mqtt.Client, frame_topic: str, img_width: int, img_height: int):
|
||||
super().__init__(name="FrameProcessor")
|
||||
self._mqtt_client = mqtt_client
|
||||
self._frame_topic = frame_topic
|
||||
self._img_width = img_width
|
||||
@ -40,59 +21,60 @@ class FramePublisher(Thread):
|
||||
def _configure_pipeline(self) -> dai.Pipeline:
|
||||
logger.info("configure pipeline")
|
||||
pipeline = dai.Pipeline()
|
||||
|
||||
cam_rgb = pipeline.create(dai.node.ColorCamera)
|
||||
cam_rgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
|
||||
cam_rgb.setInterleaved(False)
|
||||
cam_rgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.RGB)
|
||||
xout_rgb = pipeline.create(dai.node.XLinkOut)
|
||||
|
||||
# Define sources and outputs
|
||||
manip = pipeline.create(dai.node.ImageManip)
|
||||
|
||||
manip_out = pipeline.create(dai.node.XLinkOut)
|
||||
|
||||
manip_out.setStreamName("manip")
|
||||
xout_rgb.setStreamName("rgb")
|
||||
|
||||
# Properties
|
||||
cam_rgb.setBoardSocket(dai.CameraBoardSocket.RGB)
|
||||
cam_rgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
|
||||
|
||||
manip.initialConfig.setResize(self._img_width, self._img_height)
|
||||
cam_rgb.setPreviewSize(width=self._img_width, height=self._img_height)
|
||||
cam_rgb.setInterleaved(False)
|
||||
cam_rgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.RGB)
|
||||
cam_rgb.setFps(30)
|
||||
|
||||
# Linking
|
||||
cam_rgb.video.link(manip.inputImage)
|
||||
manip.out.link(manip_out.input)
|
||||
cam_rgb.preview.link(xout_rgb.input)
|
||||
logger.info("pipeline configured")
|
||||
return pipeline
|
||||
|
||||
def run(self):
|
||||
# Connect to device and start pipeline
|
||||
with dai.Device(self._pipeline) as device:
|
||||
logger.info('MxId: %s', device.getDeviceInfo().getMxId())
|
||||
logger.info('USB speed: %s', device.getUsbSpeed())
|
||||
logger.info('Connected cameras: %s', device.getConnectedCameras())
|
||||
|
||||
logger.info("output queues found: %s", device.getOutputQueueNames())
|
||||
|
||||
device.startPipeline()
|
||||
# Queues
|
||||
queue_size = 8
|
||||
queue_manip = device.getOutputQueue("manip", queue_size)
|
||||
queue_size = 4
|
||||
q_rgb = device.getOutputQueue("rgb", maxSize=queue_size, blocking=False)
|
||||
|
||||
while True:
|
||||
try:
|
||||
while queue_manip.has():
|
||||
im_resize = queue_manip.get().getData().getCvFrame()
|
||||
logger.debug("wait for new frame")
|
||||
inRgb = q_rgb.get() # blocking call, will wait until a new data has arrived
|
||||
|
||||
im_resize = inRgb.getCvFrame()
|
||||
|
||||
is_success, im_buf_arr = cv2.imencode(".jpg", im_resize)
|
||||
byte_im = im_buf_arr.tobytes()
|
||||
|
||||
timestamp = Timestamp()
|
||||
now = datetime.datetime.now()
|
||||
frame_msg = events.events_pb2.FrameMessage()
|
||||
frame_msg.id = events.events_pb2.FrameRef()
|
||||
frame_msg.id.name = "robocar-oak-camera-oak"
|
||||
frame_msg.id.id = timestamp.ToMilliseconds()
|
||||
frame_msg.id.created_at = timestamp.GetCurrentTime()
|
||||
frame_msg.id.id = str(int(now.timestamp() * 1000))
|
||||
frame_msg.id.created_at.FromDatetime(now)
|
||||
frame_msg.frame = byte_im
|
||||
|
||||
logger.debug("publish frame event to %s", self._frame_topic)
|
||||
self._mqtt_client.publish(topic=self._frame_topic,
|
||||
payload=frame_msg.SerializeToString(),
|
||||
qos=0,
|
||||
retain=False)
|
||||
|
||||
except Exception as e:
|
||||
logger.exception("unexpected error")
|
||||
|
||||
|
||||
logger.exception("unexpected error: %s", str(e))
|
||||
|
@ -3,4 +3,5 @@ docopt~=0.6.2
|
||||
depthai==2.14.1.0
|
||||
opencv-python~=4.5.5.62
|
||||
google~=3.0.0
|
||||
google-api-core~=2.4.0
|
||||
setuptools==60.5.0
|
Reference in New Issue
Block a user