13 Commits

7 changed files with 1441 additions and 1423 deletions

View File

@ -1,5 +1,5 @@
venv
dist/
dist/*
build-docker.sh
Dockerfile

View File

@ -1,7 +1,8 @@
FROM docker.io/library/python:3.10-slim as base
FROM docker.io/library/python:3.11-slim as base
# Configure piwheels repo to use pre-compiled numpy wheels for arm
RUN echo -n "[global]\nextra-index-url=https://www.piwheels.org/simple\n" >> /etc/pip.conf
RUN echo -n "[global]\n" > /etc/pip.conf &&\
echo -n "extra-index-url = https://www.piwheels.org/simple https://git.cyrilix.bzh/api/packages/robocars/pypi/simple \n" >> /etc/pip.conf
RUN apt-get update && apt-get install -y libgl1 libglib2.0-0
@ -18,9 +19,16 @@ RUN blobconverter --zoo-name mobile_object_localizer_192x192 --zoo-type depthai
FROM base as builder
RUN apt-get install -y git && \
pip3 install poetry==1.2.0 && \
pip3 install poetry && \
poetry self add "poetry-dynamic-versioning[plugin]"
ADD . .
ADD poetry.lock .
ADD pyproject.toml .
ADD camera camera
ADD README.md .
# Poetry expect to found a git project
ADD .git .git
RUN poetry build
@ -31,7 +39,7 @@ RUN mkdir /models
COPY --from=model-builder /models/mobile_object_localizer_192x192_openvino_2021.4_6shave.blob /models/mobile_object_localizer_192x192_openvino_2021.4_6shave.blob
COPY --from=builder dist/*.whl /tmp/
RUN pip3 install /tmp/*whl
RUN pip3 install /tmp/*.whl
WORKDIR /tmp
USER 1234

View File

@ -7,6 +7,6 @@ PLATFORM="linux/amd64,linux/arm64"
#PLATFORM="linux/amd64,linux/arm64,linux/arm/v7"
podman build . --platform "${PLATFORM}" --manifest "${IMAGE_NAME}:${TAG}"
podman manifest push --all --format v2s2 "localhost/${IMAGE_NAME}:${TAG}" "docker://${FULL_IMAGE_NAME}"
podman manifest push --all "localhost/${IMAGE_NAME}:${TAG}" "docker://${FULL_IMAGE_NAME}"
printf "\nImage %s published" "docker://${FULL_IMAGE_NAME}"

View File

@ -10,10 +10,9 @@ import typing, types
import depthai as dai
import paho.mqtt.client as mqtt
from . import depthai as cam # pylint: disable=reimported
from camera import oak_pipeline as cam
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
_DEFAULT_CLIENT_ID = "robocar-depthai"
@ -46,12 +45,20 @@ def _parse_args_cli() -> argparse.Namespace:
help="threshold to filter detected objects",
type=float,
default=_get_env_float_value("OBJECTS_THRESHOLD", 0.2))
parser.add_argument("-f", "--camera-fps",
help="set rate at which camera should produce frames",
type=int,
default=30)
parser.add_argument("-H", "--image-height", help="image height",
type=int,
default=_get_env_int_value("IMAGE_HEIGHT", 120))
parser.add_argument("-W", "--image-width", help="image width",
type=int,
default=_get_env_int_value("IMAGE_WIDTH", 126))
parser.add_argument("--log", help="Log level",
type=str,
default="info",
choices=["info", "debug"])
args = parser.parse_args()
return args
@ -72,9 +79,12 @@ def execute_from_command_line() -> None:
Cli entrypoint
:return:
"""
logging.basicConfig(level=logging.INFO)
args = _parse_args_cli()
if args.log == "info":
logging.basicConfig(level=logging.INFO)
elif args.log == "debug":
logging.basicConfig(level=logging.DEBUG)
client = _init_mqtt_client(broker_host=args.mqtt_broker_host,
broker_port=args.mqtt_broker_port,
@ -88,12 +98,14 @@ def execute_from_command_line() -> None:
objects_threshold=args.objects_threshold)
pipeline = dai.Pipeline()
pipeline_controller = cam.PipelineController(frame_processor=frame_processor,
pipeline_controller = cam.PipelineController(pipeline=pipeline,
frame_processor=frame_processor,
object_processor=object_processor,
object_node=cam.ObjectDetectionNN(pipeline=pipeline),
camera=cam.CameraSource(pipeline=pipeline,
img_width=args.image_width,
img_height=args.image_width,
img_height=args.image_height,
fps=args.camera_fps,
))
def sigterm_handler(signum: int, frame: typing.Optional[

View File

@ -159,6 +159,7 @@ class ObjectDetectionNN:
self._xout = self._configure_xout_nn(pipeline)
self._detection_nn.out.link(self._xout.input)
self._manip_image = self._configure_manip(pipeline)
self._manip_image.out.link(self._detection_nn.input)
@staticmethod
def _configure_manip(pipeline: dai.Pipeline) -> dai.node.ImageManip:
@ -195,23 +196,20 @@ class ObjectDetectionNN:
class CameraSource(Source):
"""Image source based on camera preview"""
def __init__(self, pipeline: dai.Pipeline, img_width: int, img_height: int):
cam_rgb = pipeline.createColorCamera()
xout_rgb = pipeline.createXLinkOut()
xout_rgb.setStreamName("rgb")
self._cam_rgb = cam_rgb
self._xout_rgb = xout_rgb
def __init__(self, pipeline: dai.Pipeline, img_width: int, img_height: int, fps: int):
self._cam_rgb = pipeline.createColorCamera()
self._xout_rgb = pipeline.createXLinkOut()
self._xout_rgb.setStreamName("rgb")
# Properties
cam_rgb.setBoardSocket(dai.CameraBoardSocket.RGB)
cam_rgb.setPreviewSize(width=img_width, height=img_height)
cam_rgb.setInterleaved(False)
cam_rgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.RGB)
cam_rgb.setFps(30)
self._cam_rgb.setBoardSocket(dai.CameraBoardSocket.RGB)
self._cam_rgb.setPreviewSize(width=img_width, height=img_height)
self._cam_rgb.setInterleaved(False)
self._cam_rgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.RGB)
self._cam_rgb.setFps(fps)
# link camera preview to output
cam_rgb.preview.link(xout_rgb.input)
self._cam_rgb.preview.link(self._xout_rgb.input)
def link(self, input_node: dai.Node.Input) -> None:
self._cam_rgb.preview.link(input_node)
@ -292,25 +290,25 @@ class PipelineController:
"""
def __init__(self, frame_processor: FrameProcessor,
object_processor: ObjectProcessor, camera: Source, object_node: ObjectDetectionNN):
self._pipeline = self._configure_pipeline()
object_processor: ObjectProcessor, camera: Source, object_node: ObjectDetectionNN,
pipeline: dai.Pipeline):
self._frame_processor = frame_processor
self._object_processor = object_processor
self._camera = camera
self._object_node = object_node
self._stop = False
self._pipeline = pipeline
self._configure_pipeline()
def _configure_pipeline(self) -> dai.Pipeline:
def _configure_pipeline(self) -> None:
logger.info("configure pipeline")
pipeline = dai.Pipeline()
pipeline.setOpenVINOVersion(version=dai.OpenVINO.VERSION_2021_4)
self._pipeline.setOpenVINOVersion(version=dai.OpenVINO.VERSION_2021_4)
# Link preview to manip and manip to nn
self._camera.link(self._object_node.get_input())
logger.info("pipeline configured")
return pipeline
def run(self) -> None:
"""
@ -349,13 +347,19 @@ class PipelineController:
# Wait for frame
in_rgb: dai.ImgFrame = q_rgb.get() # type: ignore # blocking call, will wait until a new data has arrived
try:
logger.debug("process frame")
frame_ref = self._frame_processor.process(in_rgb)
except FrameProcessError as ex:
logger.error("unable to process frame: %s", str(ex))
return
logger.debug("frame processed")
logger.debug("wait for nn response")
# Read NN result
in_nn: dai.NNData = q_nn.get() # type: ignore
logger.debug("process objects")
self._object_processor.process(in_nn, frame_ref)
logger.debug("objects processed")
def stop(self) -> None:
"""

2768
poetry.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -9,15 +9,14 @@ packages = [
]
[tool.poetry.dependencies]
python = "^3.10"
python = "^3.11"
paho-mqtt = "^1.6.1"
depthai = "^2.17.4.0"
depthai = "^2.22.0"
protobuf3 = "^0.2.1"
google = "^3.0.0"
blobconverter = "^1.3.0"
protobuf = "^4.21.8"
opencv-python-headless = "^4.6.0.66"
robocar-protobuf = { version = "^1.1.1", source = "robocar" }
robocar-protobuf = {version = "^1.3.0", source = "robocar"}
[tool.poetry.group.test.dependencies]
@ -35,8 +34,7 @@ types-protobuf = "^3.20.4.2"
[[tool.poetry.source]]
name = "robocar"
url = "https://git.cyrilix.bzh/api/packages/robocars/pypi/simple"
default = false
secondary = false
priority = "explicit"
[build-system]
requires = ["poetry-core>=1.0.0", "poetry-dynamic-versioning"]