17 Commits

Author SHA1 Message Date
552f69e46e feat: tune camera exposition 2024-01-15 19:51:13 +01:00
4ec2aef409 feat: display fps 2024-01-14 10:36:40 +01:00
54977ee4e3 build: upgrade dependencies 2024-01-13 18:30:19 +01:00
87c1ee96e3 feat(object-detection): apply detection on big image 2024-01-13 18:28:21 +01:00
d4f8a12577 feat: refactor and option to configure camera fps 2023-10-01 17:24:05 +02:00
2593d5d953 chore: dependencies upgrade 2023-10-01 11:57:28 +02:00
c6f955a50c fix(camera): bad img height configuration 2022-12-25 11:22:06 +01:00
7ebd9093d9 fix(object_detection): add link from image_manip to nn node 2022-11-11 17:16:38 +01:00
642df5b927 add debug logs 2022-11-09 21:04:32 +01:00
c755d019e8 feat(cli): add flag to configure log level 2022-11-09 20:37:20 +01:00
befb4bacb3 fix: bad pipeline configuration 2022-11-05 16:09:30 +01:00
30f9876c1d build: fix docker build (pip index missing) 2022-11-02 16:31:46 +01:00
df8676ae5c fix(dependency): upgrade robocar-protobuf 2022-11-02 16:08:33 +01:00
9c07826898 build: upgrade dependencies 2022-11-02 15:51:16 +01:00
2149a01dd6 build: fix docker build 2022-11-02 15:35:47 +01:00
0db958e936 build: limit files to include into docker image 2022-11-02 13:56:41 +01:00
4faf3c2fee updaqte dockerignore 2022-11-02 13:55:52 +01:00
9 changed files with 1463 additions and 1446 deletions

View File

@ -1,5 +1,5 @@
venv
dist/
dist/*
build-docker.sh
Dockerfile

View File

@ -1,7 +1,8 @@
FROM docker.io/library/python:3.10-slim as base
FROM docker.io/library/python:3.11-slim as base
# Configure piwheels repo to use pre-compiled numpy wheels for arm
RUN echo -n "[global]\nextra-index-url=https://www.piwheels.org/simple\n" >> /etc/pip.conf
RUN echo -n "[global]\n" > /etc/pip.conf &&\
echo -n "extra-index-url = https://www.piwheels.org/simple https://git.cyrilix.bzh/api/packages/robocars/pypi/simple \n" >> /etc/pip.conf
RUN apt-get update && apt-get install -y libgl1 libglib2.0-0
@ -18,20 +19,29 @@ RUN blobconverter --zoo-name mobile_object_localizer_192x192 --zoo-type depthai
FROM base as builder
RUN apt-get install -y git && \
pip3 install poetry==1.2.0 && \
pip3 install poetry && \
poetry self add "poetry-dynamic-versioning[plugin]"
ADD . .
ADD poetry.lock .
ADD pyproject.toml .
ADD camera camera
ADD README.md .
# Poetry expect to found a git project
ADD .git .git
RUN poetry build
#################
FROM base
COPY camera_tunning /camera_tuning
RUN mkdir /models
COPY --from=model-builder /models/mobile_object_localizer_192x192_openvino_2021.4_6shave.blob /models/mobile_object_localizer_192x192_openvino_2021.4_6shave.blob
COPY --from=builder dist/*.whl /tmp/
RUN pip3 install /tmp/*whl
RUN pip3 install /tmp/*.whl
WORKDIR /tmp
USER 1234

View File

@ -7,6 +7,6 @@ PLATFORM="linux/amd64,linux/arm64"
#PLATFORM="linux/amd64,linux/arm64,linux/arm/v7"
podman build . --platform "${PLATFORM}" --manifest "${IMAGE_NAME}:${TAG}"
podman manifest push --all --format v2s2 "localhost/${IMAGE_NAME}:${TAG}" "docker://${FULL_IMAGE_NAME}"
podman manifest push --all "localhost/${IMAGE_NAME}:${TAG}" "docker://${FULL_IMAGE_NAME}"
printf "\nImage %s published" "docker://${FULL_IMAGE_NAME}"

View File

@ -10,10 +10,13 @@ import typing, types
import depthai as dai
import paho.mqtt.client as mqtt
from . import depthai as cam # pylint: disable=reimported
from camera import oak_pipeline as cam
CAMERA_EXPOSITION_DEFAULT = "default"
CAMERA_EXPOSITION_8300US = "8300us"
CAMERA_EXPOSITION_500US = "500us"
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
_DEFAULT_CLIENT_ID = "robocar-depthai"
@ -46,12 +49,24 @@ def _parse_args_cli() -> argparse.Namespace:
help="threshold to filter detected objects",
type=float,
default=_get_env_float_value("OBJECTS_THRESHOLD", 0.2))
parser.add_argument("-f", "--camera-fps",
help="set rate at which camera should produce frames",
type=int,
default=30)
parser.add_argument("--camera-tuning-exposition", type=str,
default=CAMERA_EXPOSITION_DEFAULT,
help="override camera exposition configuration",
choices=[CAMERA_EXPOSITION_DEFAULT, CAMERA_EXPOSITION_500US, CAMERA_EXPOSITION_8300US])
parser.add_argument("-H", "--image-height", help="image height",
type=int,
default=_get_env_int_value("IMAGE_HEIGHT", 120))
parser.add_argument("-W", "--image-width", help="image width",
type=int,
default=_get_env_int_value("IMAGE_WIDTH", 126))
parser.add_argument("--log", help="Log level",
type=str,
default="info",
choices=["info", "debug"])
args = parser.parse_args()
return args
@ -72,9 +87,12 @@ def execute_from_command_line() -> None:
Cli entrypoint
:return:
"""
logging.basicConfig(level=logging.INFO)
args = _parse_args_cli()
if args.log == "info":
logging.basicConfig(level=logging.INFO)
elif args.log == "debug":
logging.basicConfig(level=logging.DEBUG)
client = _init_mqtt_client(broker_host=args.mqtt_broker_host,
broker_port=args.mqtt_broker_port,
@ -88,12 +106,20 @@ def execute_from_command_line() -> None:
objects_threshold=args.objects_threshold)
pipeline = dai.Pipeline()
pipeline_controller = cam.PipelineController(frame_processor=frame_processor,
if args.camera_tuning_exposition == CAMERA_EXPOSITION_500US:
pipeline.setCameraTuningBlobPath('/camera_tuning/tuning_exp_limit_500us.bin')
elif args.camera_tuning_exposition == CAMERA_EXPOSITION_8300US:
pipeline.setCameraTuningBlobPath('/camera_tuning/tuning_exp_limit_8300us.bin')
pipeline_controller = cam.PipelineController(pipeline=pipeline,
frame_processor=frame_processor,
object_processor=object_processor,
object_node=cam.ObjectDetectionNN(pipeline=pipeline),
camera=cam.CameraSource(pipeline=pipeline,
img_width=args.image_width,
img_height=args.image_width,
img_height=args.image_height,
fps=args.camera_fps,
))
def sigterm_handler(signum: int, frame: typing.Optional[

View File

@ -5,6 +5,7 @@ import abc
import datetime
import logging
import pathlib
import time
import typing
from dataclasses import dataclass
@ -22,6 +23,9 @@ _NN_PATH = "/models/mobile_object_localizer_192x192_openvino_2021.4_6shave.blob"
_NN_WIDTH = 192
_NN_HEIGHT = 192
_PREVIEW_WIDTH = 640
_PREVIEW_HEIGHT = 480
class ObjectProcessor:
"""
@ -159,6 +163,7 @@ class ObjectDetectionNN:
self._xout = self._configure_xout_nn(pipeline)
self._detection_nn.out.link(self._xout.input)
self._manip_image = self._configure_manip(pipeline)
self._manip_image.out.link(self._detection_nn.input)
@staticmethod
def _configure_manip(pipeline: dai.Pipeline) -> dai.node.ImageManip:
@ -195,23 +200,22 @@ class ObjectDetectionNN:
class CameraSource(Source):
"""Image source based on camera preview"""
def __init__(self, pipeline: dai.Pipeline, img_width: int, img_height: int):
cam_rgb = pipeline.createColorCamera()
xout_rgb = pipeline.createXLinkOut()
xout_rgb.setStreamName("rgb")
self._cam_rgb = cam_rgb
self._xout_rgb = xout_rgb
def __init__(self, pipeline: dai.Pipeline, img_width: int, img_height: int, fps: int):
self._cam_rgb = pipeline.createColorCamera()
self._xout_rgb = pipeline.createXLinkOut()
self._xout_rgb.setStreamName("rgb")
# Properties
cam_rgb.setBoardSocket(dai.CameraBoardSocket.RGB)
cam_rgb.setPreviewSize(width=img_width, height=img_height)
cam_rgb.setInterleaved(False)
cam_rgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.RGB)
cam_rgb.setFps(30)
self._cam_rgb.setBoardSocket(dai.CameraBoardSocket.RGB)
self._cam_rgb.setPreviewSize(width=_PREVIEW_WIDTH, height=_PREVIEW_HEIGHT)
self._cam_rgb.setInterleaved(False)
self._cam_rgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.RGB)
self._cam_rgb.setFps(fps)
self._resize_manip = self._configure_manip(pipeline=pipeline, img_width=img_width, img_height=img_height)
# link camera preview to output
cam_rgb.preview.link(xout_rgb.input)
self._cam_rgb.preview.link(self._resize_manip.inputImage)
self._resize_manip.out.link(self._xout_rgb.input)
def link(self, input_node: dai.Node.Input) -> None:
self._cam_rgb.preview.link(input_node)
@ -219,6 +223,15 @@ class CameraSource(Source):
def get_stream_name(self) -> str:
return self._xout_rgb.getStreamName()
@staticmethod
def _configure_manip(pipeline: dai.Pipeline, img_width: int, img_height: int) -> dai.node.ImageManip:
# Resize image
manip = pipeline.createImageManip()
manip.initialConfig.setResize(img_width, img_height)
manip.initialConfig.setFrameType(dai.ImgFrame.Type.RGB888p)
manip.initialConfig.setKeepAspectRatio(False)
return manip
@dataclass
class MqttConfig:
@ -292,25 +305,25 @@ class PipelineController:
"""
def __init__(self, frame_processor: FrameProcessor,
object_processor: ObjectProcessor, camera: Source, object_node: ObjectDetectionNN):
self._pipeline = self._configure_pipeline()
object_processor: ObjectProcessor, camera: Source, object_node: ObjectDetectionNN,
pipeline: dai.Pipeline):
self._frame_processor = frame_processor
self._object_processor = object_processor
self._camera = camera
self._object_node = object_node
self._stop = False
self._pipeline = pipeline
self._configure_pipeline()
def _configure_pipeline(self) -> dai.Pipeline:
def _configure_pipeline(self) -> None:
logger.info("configure pipeline")
pipeline = dai.Pipeline()
pipeline.setOpenVINOVersion(version=dai.OpenVINO.VERSION_2021_4)
self._pipeline.setOpenVINOVersion(version=dai.OpenVINO.VERSION_2021_4)
# Link preview to manip and manip to nn
self._camera.link(self._object_node.get_input())
logger.info("pipeline configured")
return pipeline
def run(self) -> None:
"""
@ -332,6 +345,10 @@ class PipelineController:
q_nn = dev.getOutputQueue(name=self._object_node.get_stream_name(), maxSize=queue_size, # type: ignore
blocking=False)
start_time = time.time()
counter = 0
fps = 0
display_time = time.time()
self._stop = False
while True:
if self._stop:
@ -343,19 +360,36 @@ class PipelineController:
except Exception as ex:
logger.exception("unexpected error: %s", str(ex))
counter += 1
if (time.time() - start_time) > 1:
fps = counter / (time.time() - start_time)
counter = 0
start_time = time.time()
if (time.time() - display_time) >= 10:
display_time = time.time()
logger.info("fps: %s", fps)
def _loop_on_camera_events(self, q_nn: dai.DataOutputQueue, q_rgb: dai.DataOutputQueue) -> None:
logger.debug("wait for new frame")
# Wait for frame
in_rgb: dai.ImgFrame = q_rgb.get() # type: ignore # blocking call, will wait until a new data has arrived
try:
logger.debug("process frame")
frame_ref = self._frame_processor.process(in_rgb)
except FrameProcessError as ex:
logger.error("unable to process frame: %s", str(ex))
return
logger.debug("frame processed")
logger.debug("wait for nn response")
# Read NN result
in_nn: dai.NNData = q_nn.get() # type: ignore
logger.debug("process objects")
self._object_processor.process(in_nn, frame_ref)
logger.debug("objects processed")
def stop(self) -> None:
"""

Binary file not shown.

Binary file not shown.

2767
poetry.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -9,15 +9,14 @@ packages = [
]
[tool.poetry.dependencies]
python = "^3.10"
python = "^3.11"
paho-mqtt = "^1.6.1"
depthai = "^2.17.4.0"
depthai = "^2.22.0"
protobuf3 = "^0.2.1"
google = "^3.0.0"
blobconverter = "^1.3.0"
protobuf = "^4.21.8"
opencv-python-headless = "^4.6.0.66"
robocar-protobuf = { version = "^1.1.1", source = "robocar" }
robocar-protobuf = {version = "^1.3.0", source = "robocar"}
[tool.poetry.group.test.dependencies]
@ -35,8 +34,7 @@ types-protobuf = "^3.20.4.2"
[[tool.poetry.source]]
name = "robocar"
url = "https://git.cyrilix.bzh/api/packages/robocars/pypi/simple"
default = false
secondary = false
priority = "explicit"
[build-system]
requires = ["poetry-core>=1.0.0", "poetry-dynamic-versioning"]