Compare commits
13 Commits
feat/simul
...
v0.4.0
Author | SHA1 | Date | |
---|---|---|---|
d4f8a12577 | |||
2593d5d953 | |||
c6f955a50c | |||
7ebd9093d9 | |||
642df5b927 | |||
c755d019e8 | |||
befb4bacb3 | |||
30f9876c1d | |||
df8676ae5c | |||
9c07826898 | |||
2149a01dd6 | |||
0db958e936 | |||
4faf3c2fee |
@ -1,5 +1,5 @@
|
|||||||
venv
|
venv
|
||||||
dist/
|
dist/*
|
||||||
build-docker.sh
|
build-docker.sh
|
||||||
Dockerfile
|
Dockerfile
|
||||||
|
|
||||||
|
18
Dockerfile
18
Dockerfile
@ -1,7 +1,8 @@
|
|||||||
FROM docker.io/library/python:3.10-slim as base
|
FROM docker.io/library/python:3.11-slim as base
|
||||||
|
|
||||||
# Configure piwheels repo to use pre-compiled numpy wheels for arm
|
# Configure piwheels repo to use pre-compiled numpy wheels for arm
|
||||||
RUN echo -n "[global]\nextra-index-url=https://www.piwheels.org/simple\n" >> /etc/pip.conf
|
RUN echo -n "[global]\n" > /etc/pip.conf &&\
|
||||||
|
echo -n "extra-index-url = https://www.piwheels.org/simple https://git.cyrilix.bzh/api/packages/robocars/pypi/simple \n" >> /etc/pip.conf
|
||||||
|
|
||||||
RUN apt-get update && apt-get install -y libgl1 libglib2.0-0
|
RUN apt-get update && apt-get install -y libgl1 libglib2.0-0
|
||||||
|
|
||||||
@ -18,9 +19,16 @@ RUN blobconverter --zoo-name mobile_object_localizer_192x192 --zoo-type depthai
|
|||||||
FROM base as builder
|
FROM base as builder
|
||||||
|
|
||||||
RUN apt-get install -y git && \
|
RUN apt-get install -y git && \
|
||||||
pip3 install poetry==1.2.0 && \
|
pip3 install poetry && \
|
||||||
poetry self add "poetry-dynamic-versioning[plugin]"
|
poetry self add "poetry-dynamic-versioning[plugin]"
|
||||||
ADD . .
|
|
||||||
|
ADD poetry.lock .
|
||||||
|
ADD pyproject.toml .
|
||||||
|
ADD camera camera
|
||||||
|
ADD README.md .
|
||||||
|
|
||||||
|
# Poetry expect to found a git project
|
||||||
|
ADD .git .git
|
||||||
|
|
||||||
RUN poetry build
|
RUN poetry build
|
||||||
|
|
||||||
@ -31,7 +39,7 @@ RUN mkdir /models
|
|||||||
COPY --from=model-builder /models/mobile_object_localizer_192x192_openvino_2021.4_6shave.blob /models/mobile_object_localizer_192x192_openvino_2021.4_6shave.blob
|
COPY --from=model-builder /models/mobile_object_localizer_192x192_openvino_2021.4_6shave.blob /models/mobile_object_localizer_192x192_openvino_2021.4_6shave.blob
|
||||||
|
|
||||||
COPY --from=builder dist/*.whl /tmp/
|
COPY --from=builder dist/*.whl /tmp/
|
||||||
RUN pip3 install /tmp/*whl
|
RUN pip3 install /tmp/*.whl
|
||||||
|
|
||||||
WORKDIR /tmp
|
WORKDIR /tmp
|
||||||
USER 1234
|
USER 1234
|
||||||
|
@ -7,6 +7,6 @@ PLATFORM="linux/amd64,linux/arm64"
|
|||||||
#PLATFORM="linux/amd64,linux/arm64,linux/arm/v7"
|
#PLATFORM="linux/amd64,linux/arm64,linux/arm/v7"
|
||||||
|
|
||||||
podman build . --platform "${PLATFORM}" --manifest "${IMAGE_NAME}:${TAG}"
|
podman build . --platform "${PLATFORM}" --manifest "${IMAGE_NAME}:${TAG}"
|
||||||
podman manifest push --all --format v2s2 "localhost/${IMAGE_NAME}:${TAG}" "docker://${FULL_IMAGE_NAME}"
|
podman manifest push --all "localhost/${IMAGE_NAME}:${TAG}" "docker://${FULL_IMAGE_NAME}"
|
||||||
|
|
||||||
printf "\nImage %s published" "docker://${FULL_IMAGE_NAME}"
|
printf "\nImage %s published" "docker://${FULL_IMAGE_NAME}"
|
||||||
|
@ -10,10 +10,9 @@ import typing, types
|
|||||||
import depthai as dai
|
import depthai as dai
|
||||||
import paho.mqtt.client as mqtt
|
import paho.mqtt.client as mqtt
|
||||||
|
|
||||||
from . import depthai as cam # pylint: disable=reimported
|
from camera import oak_pipeline as cam
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
logging.basicConfig(level=logging.INFO)
|
|
||||||
|
|
||||||
_DEFAULT_CLIENT_ID = "robocar-depthai"
|
_DEFAULT_CLIENT_ID = "robocar-depthai"
|
||||||
|
|
||||||
@ -46,12 +45,20 @@ def _parse_args_cli() -> argparse.Namespace:
|
|||||||
help="threshold to filter detected objects",
|
help="threshold to filter detected objects",
|
||||||
type=float,
|
type=float,
|
||||||
default=_get_env_float_value("OBJECTS_THRESHOLD", 0.2))
|
default=_get_env_float_value("OBJECTS_THRESHOLD", 0.2))
|
||||||
|
parser.add_argument("-f", "--camera-fps",
|
||||||
|
help="set rate at which camera should produce frames",
|
||||||
|
type=int,
|
||||||
|
default=30)
|
||||||
parser.add_argument("-H", "--image-height", help="image height",
|
parser.add_argument("-H", "--image-height", help="image height",
|
||||||
type=int,
|
type=int,
|
||||||
default=_get_env_int_value("IMAGE_HEIGHT", 120))
|
default=_get_env_int_value("IMAGE_HEIGHT", 120))
|
||||||
parser.add_argument("-W", "--image-width", help="image width",
|
parser.add_argument("-W", "--image-width", help="image width",
|
||||||
type=int,
|
type=int,
|
||||||
default=_get_env_int_value("IMAGE_WIDTH", 126))
|
default=_get_env_int_value("IMAGE_WIDTH", 126))
|
||||||
|
parser.add_argument("--log", help="Log level",
|
||||||
|
type=str,
|
||||||
|
default="info",
|
||||||
|
choices=["info", "debug"])
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
return args
|
return args
|
||||||
|
|
||||||
@ -72,9 +79,12 @@ def execute_from_command_line() -> None:
|
|||||||
Cli entrypoint
|
Cli entrypoint
|
||||||
:return:
|
:return:
|
||||||
"""
|
"""
|
||||||
logging.basicConfig(level=logging.INFO)
|
|
||||||
|
|
||||||
args = _parse_args_cli()
|
args = _parse_args_cli()
|
||||||
|
if args.log == "info":
|
||||||
|
logging.basicConfig(level=logging.INFO)
|
||||||
|
elif args.log == "debug":
|
||||||
|
logging.basicConfig(level=logging.DEBUG)
|
||||||
|
|
||||||
client = _init_mqtt_client(broker_host=args.mqtt_broker_host,
|
client = _init_mqtt_client(broker_host=args.mqtt_broker_host,
|
||||||
broker_port=args.mqtt_broker_port,
|
broker_port=args.mqtt_broker_port,
|
||||||
@ -88,12 +98,14 @@ def execute_from_command_line() -> None:
|
|||||||
objects_threshold=args.objects_threshold)
|
objects_threshold=args.objects_threshold)
|
||||||
|
|
||||||
pipeline = dai.Pipeline()
|
pipeline = dai.Pipeline()
|
||||||
pipeline_controller = cam.PipelineController(frame_processor=frame_processor,
|
pipeline_controller = cam.PipelineController(pipeline=pipeline,
|
||||||
|
frame_processor=frame_processor,
|
||||||
object_processor=object_processor,
|
object_processor=object_processor,
|
||||||
object_node=cam.ObjectDetectionNN(pipeline=pipeline),
|
object_node=cam.ObjectDetectionNN(pipeline=pipeline),
|
||||||
camera=cam.CameraSource(pipeline=pipeline,
|
camera=cam.CameraSource(pipeline=pipeline,
|
||||||
img_width=args.image_width,
|
img_width=args.image_width,
|
||||||
img_height=args.image_width,
|
img_height=args.image_height,
|
||||||
|
fps=args.camera_fps,
|
||||||
))
|
))
|
||||||
|
|
||||||
def sigterm_handler(signum: int, frame: typing.Optional[
|
def sigterm_handler(signum: int, frame: typing.Optional[
|
||||||
|
@ -159,6 +159,7 @@ class ObjectDetectionNN:
|
|||||||
self._xout = self._configure_xout_nn(pipeline)
|
self._xout = self._configure_xout_nn(pipeline)
|
||||||
self._detection_nn.out.link(self._xout.input)
|
self._detection_nn.out.link(self._xout.input)
|
||||||
self._manip_image = self._configure_manip(pipeline)
|
self._manip_image = self._configure_manip(pipeline)
|
||||||
|
self._manip_image.out.link(self._detection_nn.input)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _configure_manip(pipeline: dai.Pipeline) -> dai.node.ImageManip:
|
def _configure_manip(pipeline: dai.Pipeline) -> dai.node.ImageManip:
|
||||||
@ -195,23 +196,20 @@ class ObjectDetectionNN:
|
|||||||
class CameraSource(Source):
|
class CameraSource(Source):
|
||||||
"""Image source based on camera preview"""
|
"""Image source based on camera preview"""
|
||||||
|
|
||||||
def __init__(self, pipeline: dai.Pipeline, img_width: int, img_height: int):
|
def __init__(self, pipeline: dai.Pipeline, img_width: int, img_height: int, fps: int):
|
||||||
cam_rgb = pipeline.createColorCamera()
|
self._cam_rgb = pipeline.createColorCamera()
|
||||||
xout_rgb = pipeline.createXLinkOut()
|
self._xout_rgb = pipeline.createXLinkOut()
|
||||||
xout_rgb.setStreamName("rgb")
|
self._xout_rgb.setStreamName("rgb")
|
||||||
|
|
||||||
self._cam_rgb = cam_rgb
|
|
||||||
self._xout_rgb = xout_rgb
|
|
||||||
|
|
||||||
# Properties
|
# Properties
|
||||||
cam_rgb.setBoardSocket(dai.CameraBoardSocket.RGB)
|
self._cam_rgb.setBoardSocket(dai.CameraBoardSocket.RGB)
|
||||||
cam_rgb.setPreviewSize(width=img_width, height=img_height)
|
self._cam_rgb.setPreviewSize(width=img_width, height=img_height)
|
||||||
cam_rgb.setInterleaved(False)
|
self._cam_rgb.setInterleaved(False)
|
||||||
cam_rgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.RGB)
|
self._cam_rgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.RGB)
|
||||||
cam_rgb.setFps(30)
|
self._cam_rgb.setFps(fps)
|
||||||
|
|
||||||
# link camera preview to output
|
# link camera preview to output
|
||||||
cam_rgb.preview.link(xout_rgb.input)
|
self._cam_rgb.preview.link(self._xout_rgb.input)
|
||||||
|
|
||||||
def link(self, input_node: dai.Node.Input) -> None:
|
def link(self, input_node: dai.Node.Input) -> None:
|
||||||
self._cam_rgb.preview.link(input_node)
|
self._cam_rgb.preview.link(input_node)
|
||||||
@ -292,25 +290,25 @@ class PipelineController:
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, frame_processor: FrameProcessor,
|
def __init__(self, frame_processor: FrameProcessor,
|
||||||
object_processor: ObjectProcessor, camera: Source, object_node: ObjectDetectionNN):
|
object_processor: ObjectProcessor, camera: Source, object_node: ObjectDetectionNN,
|
||||||
self._pipeline = self._configure_pipeline()
|
pipeline: dai.Pipeline):
|
||||||
self._frame_processor = frame_processor
|
self._frame_processor = frame_processor
|
||||||
self._object_processor = object_processor
|
self._object_processor = object_processor
|
||||||
self._camera = camera
|
self._camera = camera
|
||||||
self._object_node = object_node
|
self._object_node = object_node
|
||||||
self._stop = False
|
self._stop = False
|
||||||
|
self._pipeline = pipeline
|
||||||
|
self._configure_pipeline()
|
||||||
|
|
||||||
def _configure_pipeline(self) -> dai.Pipeline:
|
def _configure_pipeline(self) -> None:
|
||||||
logger.info("configure pipeline")
|
logger.info("configure pipeline")
|
||||||
pipeline = dai.Pipeline()
|
|
||||||
|
|
||||||
pipeline.setOpenVINOVersion(version=dai.OpenVINO.VERSION_2021_4)
|
self._pipeline.setOpenVINOVersion(version=dai.OpenVINO.VERSION_2021_4)
|
||||||
|
|
||||||
# Link preview to manip and manip to nn
|
# Link preview to manip and manip to nn
|
||||||
self._camera.link(self._object_node.get_input())
|
self._camera.link(self._object_node.get_input())
|
||||||
|
|
||||||
logger.info("pipeline configured")
|
logger.info("pipeline configured")
|
||||||
return pipeline
|
|
||||||
|
|
||||||
def run(self) -> None:
|
def run(self) -> None:
|
||||||
"""
|
"""
|
||||||
@ -349,13 +347,19 @@ class PipelineController:
|
|||||||
# Wait for frame
|
# Wait for frame
|
||||||
in_rgb: dai.ImgFrame = q_rgb.get() # type: ignore # blocking call, will wait until a new data has arrived
|
in_rgb: dai.ImgFrame = q_rgb.get() # type: ignore # blocking call, will wait until a new data has arrived
|
||||||
try:
|
try:
|
||||||
|
logger.debug("process frame")
|
||||||
frame_ref = self._frame_processor.process(in_rgb)
|
frame_ref = self._frame_processor.process(in_rgb)
|
||||||
except FrameProcessError as ex:
|
except FrameProcessError as ex:
|
||||||
logger.error("unable to process frame: %s", str(ex))
|
logger.error("unable to process frame: %s", str(ex))
|
||||||
return
|
return
|
||||||
|
logger.debug("frame processed")
|
||||||
|
|
||||||
|
logger.debug("wait for nn response")
|
||||||
# Read NN result
|
# Read NN result
|
||||||
in_nn: dai.NNData = q_nn.get() # type: ignore
|
in_nn: dai.NNData = q_nn.get() # type: ignore
|
||||||
|
logger.debug("process objects")
|
||||||
self._object_processor.process(in_nn, frame_ref)
|
self._object_processor.process(in_nn, frame_ref)
|
||||||
|
logger.debug("objects processed")
|
||||||
|
|
||||||
def stop(self) -> None:
|
def stop(self) -> None:
|
||||||
"""
|
"""
|
2768
poetry.lock
generated
2768
poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@ -9,15 +9,14 @@ packages = [
|
|||||||
]
|
]
|
||||||
|
|
||||||
[tool.poetry.dependencies]
|
[tool.poetry.dependencies]
|
||||||
python = "^3.10"
|
python = "^3.11"
|
||||||
paho-mqtt = "^1.6.1"
|
paho-mqtt = "^1.6.1"
|
||||||
depthai = "^2.17.4.0"
|
depthai = "^2.22.0"
|
||||||
protobuf3 = "^0.2.1"
|
protobuf3 = "^0.2.1"
|
||||||
google = "^3.0.0"
|
google = "^3.0.0"
|
||||||
blobconverter = "^1.3.0"
|
|
||||||
protobuf = "^4.21.8"
|
protobuf = "^4.21.8"
|
||||||
opencv-python-headless = "^4.6.0.66"
|
opencv-python-headless = "^4.6.0.66"
|
||||||
robocar-protobuf = { version = "^1.1.1", source = "robocar" }
|
robocar-protobuf = {version = "^1.3.0", source = "robocar"}
|
||||||
|
|
||||||
|
|
||||||
[tool.poetry.group.test.dependencies]
|
[tool.poetry.group.test.dependencies]
|
||||||
@ -35,8 +34,7 @@ types-protobuf = "^3.20.4.2"
|
|||||||
[[tool.poetry.source]]
|
[[tool.poetry.source]]
|
||||||
name = "robocar"
|
name = "robocar"
|
||||||
url = "https://git.cyrilix.bzh/api/packages/robocars/pypi/simple"
|
url = "https://git.cyrilix.bzh/api/packages/robocars/pypi/simple"
|
||||||
default = false
|
priority = "explicit"
|
||||||
secondary = false
|
|
||||||
|
|
||||||
[build-system]
|
[build-system]
|
||||||
requires = ["poetry-core>=1.0.0", "poetry-dynamic-versioning"]
|
requires = ["poetry-core>=1.0.0", "poetry-dynamic-versioning"]
|
||||||
|
Reference in New Issue
Block a user