Compare commits

...

28 Commits

Author SHA1 Message Date
7ebd9093d9 fix(object_detection): add link from image_manip to nn node 2022-11-11 17:16:38 +01:00
642df5b927 add debug logs 2022-11-09 21:04:32 +01:00
c755d019e8 feat(cli): add flag to configure log level 2022-11-09 20:37:20 +01:00
befb4bacb3 fix: bad pipeline configuration 2022-11-05 16:09:30 +01:00
30f9876c1d build: fix docker build (pip index missing) 2022-11-02 16:31:46 +01:00
df8676ae5c fix(dependency): upgrade robocar-protobuf 2022-11-02 16:08:33 +01:00
9c07826898 build: upgrade dependencies 2022-11-02 15:51:16 +01:00
2149a01dd6 build: fix docker build 2022-11-02 15:35:47 +01:00
0db958e936 build: limit files to include into docker image 2022-11-02 13:56:41 +01:00
4faf3c2fee updaqte dockerignore 2022-11-02 13:55:52 +01:00
7c65f87d58 build: merge mypy.ini with pyproject.toml 2022-10-27 16:04:32 +02:00
55d8ce06c6 build: fix all pylint/mypy errors 2022-10-27 16:03:23 +02:00
4daf4d3c23 build: upgrade dependencies 2022-10-27 09:13:41 +02:00
aed8e9f8c2 feat: check typing with mypy 2022-10-27 09:09:04 +02:00
7670b8b01a refactor: fix pylint 2022-10-26 17:32:35 +02:00
6db1afce75 build: use opencv headless flavor 2022-10-26 10:48:13 +02:00
667c6903ef feat(simulator): add simulator source 2022-10-25 16:59:18 +02:00
24e4410c25 refactor: rewrite depthai node management 2022-10-25 16:44:16 +02:00
0c5e8e93ac test: add unit tests and fix error 2022-10-24 12:09:37 +02:00
9b0b772786 refactor: minor clean 2022-10-20 21:00:59 +02:00
c2875df304 docker: update registry to push 2022-10-20 17:45:05 +02:00
49ab38d66c feat(k8s): implement gracefull sigterm 2022-10-20 17:06:55 +02:00
9918c8c413 refactor: split pipeline code 2022-10-20 16:57:33 +02:00
b50b54be34 refactor: split event loop process 2022-10-20 16:02:24 +02:00
c3396b13ac quality: run and fix pylint check 2022-10-20 15:29:38 +02:00
fa4c7eef03 refactor(cli): remove docopt dependency 2022-10-20 15:29:38 +02:00
3919519e50 build: upgrade to python 3.10 2022-10-20 15:29:38 +02:00
bbc0c3b976 build: move to poetry 2022-10-20 15:29:38 +02:00
16 changed files with 6678 additions and 311 deletions

View File

@ -1,2 +1,6 @@
venv venv
dist/*
build-docker.sh
Dockerfile

3
.gitignore vendored
View File

@ -3,3 +3,6 @@
*.egg-info *.egg-info
.idea .idea
*/__pycache__/ */__pycache__/
/dist/
build
__pycache__

618
.pylintrc Normal file
View File

@ -0,0 +1,618 @@
[MAIN]
# Analyse import fallback blocks. This can be used to support both Python 2 and
# 3 compatible code, which means that the block might have code that exists
# only in one or another interpreter, leading to false positives when analysed.
analyse-fallback-blocks=no
# Load and enable all available extensions. Use --list-extensions to see a list
# all available extensions.
#enable-all-extensions=
# In error mode, messages with a category besides ERROR or FATAL are
# suppressed, and no reports are done by default. Error mode is compatible with
# disabling specific errors.
#errors-only=
# Always return a 0 (non-error) status code, even if lint errors are found.
# This is primarily useful in continuous integration scripts.
#exit-zero=
# A comma-separated list of package or module names from where C extensions may
# be loaded. Extensions are loading into the active Python interpreter and may
# run arbitrary code.
extension-pkg-allow-list=depthai,node,cv2,events.*
# A comma-separated list of package or module names from where C extensions may
# be loaded. Extensions are loading into the active Python interpreter and may
# run arbitrary code. (This is an alternative name to extension-pkg-allow-list
# for backward compatibility.)
extension-pkg-whitelist=
# Return non-zero exit code if any of these messages/categories are detected,
# even if score is above --fail-under value. Syntax same as enable. Messages
# specified are enabled, while categories only check already-enabled messages.
fail-on=
# Specify a score threshold under which the program will exit with error.
fail-under=10
# Interpret the stdin as a python script, whose filename needs to be passed as
# the module_or_package argument.
#from-stdin=
# Files or directories to be skipped. They should be base names, not paths.
ignore=CVS
# Add files or directories matching the regular expressions patterns to the
# ignore-list. The regex matches against paths and can be in Posix or Windows
# format. Because '\' represents the directory delimiter on Windows systems, it
# can't be used as an escape character.
ignore-paths=
# Files or directories matching the regular expression patterns are skipped.
# The regex matches against base names, not paths. The default value ignores
# Emacs file locks
ignore-patterns=^\.#,test_.*?py
# List of module names for which member attributes should not be checked
# (useful for modules/projects where namespaces are manipulated during runtime
# and thus existing member attributes cannot be deduced by static analysis). It
# supports qualified module names, as well as Unix pattern matching.
ignored-modules=
# Python code to execute, usually for sys.path manipulation such as
# pygtk.require().
#init-hook=
# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the
# number of processors available to use, and will cap the count on Windows to
# avoid hangs.
jobs=1
# Control the amount of potential inferred values when inferring a single
# object. This can help the performance when dealing with large functions or
# complex, nested conditions.
limit-inference-results=100
# List of plugins (as comma separated values of python module names) to load,
# usually to register additional checkers.
load-plugins=
# Pickle collected data for later comparisons.
persistent=yes
# Minimum Python version to use for version dependent checks. Will default to
# the version used to run pylint.
py-version=3.10
# Discover python modules and packages in the file system subtree.
recursive=yes
# When enabled, pylint would attempt to guess common misconfiguration and emit
# user-friendly hints instead of false-positive error messages.
suggestion-mode=yes
# Allow loading of arbitrary C extensions. Extensions are imported into the
# active Python interpreter and may run arbitrary code.
unsafe-load-any-extension=no
# In verbose mode, extra non-checker-related info will be displayed.
#verbose=
[REPORTS]
# Python expression which should return a score less than or equal to 10. You
# have access to the variables 'fatal', 'error', 'warning', 'refactor',
# 'convention', and 'info' which contain the number of messages in each
# category, as well as 'statement' which is the total number of statements
# analyzed. This score is used by the global evaluation report (RP0004).
evaluation=max(0, 0 if fatal else 10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10))
# Template used to display messages. This is a python new-style format string
# used to format the message information. See doc for all details.
msg-template=
# Set the output format. Available formats are text, parseable, colorized, json
# and msvs (visual studio). You can also give a reporter class, e.g.
# mypackage.mymodule.MyReporterClass.
#output-format=
# Tells whether to display a full report or only the messages.
reports=no
# Activate the evaluation score.
score=yes
[MESSAGES CONTROL]
# Only show warnings with the listed confidence levels. Leave empty to show
# all. Valid levels: HIGH, CONTROL_FLOW, INFERENCE, INFERENCE_FAILURE,
# UNDEFINED.
confidence=HIGH,
CONTROL_FLOW,
INFERENCE,
INFERENCE_FAILURE,
UNDEFINED
# Disable the message, report, category or checker with the given id(s). You
# can either give multiple identifiers separated by comma (,) or put this
# option multiple times (only on the command line, not in the configuration
# file where it should appear only once). You can also use "--disable=all" to
# disable everything first and then re-enable specific checks. For example, if
# you want to run only the similarities checker, you can use "--disable=all
# --enable=similarities". If you want to run only the classes checker, but have
# no Warning level messages displayed, use "--disable=all --enable=classes
# --disable=W".
disable=raw-checker-failed,
bad-inline-option,
locally-disabled,
file-ignored,
suppressed-message,
useless-suppression,
deprecated-pragma,
use-symbolic-message-instead
# Enable the message, report, category or checker with the given id(s). You can
# either give multiple identifier separated by comma (,) or put this option
# multiple time (only on the command line, not in the configuration file where
# it should appear only once). See also the "--disable" option for examples.
enable=c-extension-no-member
[LOGGING]
# The type of string formatting that logging methods do. `old` means using %
# formatting, `new` is for `{}` formatting.
logging-format-style=old
# Logging modules to check that the string format arguments are in logging
# function parameter format.
logging-modules=logging
[SPELLING]
# Limits count of emitted suggestions for spelling mistakes.
max-spelling-suggestions=4
# Spelling dictionary name. Available dictionaries: none. To make it work,
# install the 'python-enchant' package.
spelling-dict=
# List of comma separated words that should be considered directives if they
# appear at the beginning of a comment and should not be checked.
spelling-ignore-comment-directives=fmt: on,fmt: off,noqa:,noqa,nosec,isort:skip,mypy:
# List of comma separated words that should not be checked.
spelling-ignore-words=
# A path to a file that contains the private dictionary; one word per line.
spelling-private-dict-file=
# Tells whether to store unknown words to the private dictionary (see the
# --spelling-private-dict-file option) instead of raising a message.
spelling-store-unknown-words=no
[MISCELLANEOUS]
# List of note tags to take in consideration, separated by a comma.
notes=FIXME,
XXX,
TODO
# Regular expression of note tags to take in consideration.
notes-rgx=
[TYPECHECK]
# List of decorators that produce context managers, such as
# contextlib.contextmanager. Add to this list to register other decorators that
# produce valid context managers.
contextmanager-decorators=contextlib.contextmanager
# List of members which are set dynamically and missed by pylint inference
# system, and so shouldn't trigger E1101 when accessed. Python regular
# expressions are accepted.
generated-members=cv2,events.events_pb2,depthai.*,dai.*
# Tells whether to warn about missing members when the owner of the attribute
# is inferred to be None.
ignore-none=yes
# This flag controls whether pylint should warn about no-member and similar
# checks whenever an opaque object is returned when inferring. The inference
# can return multiple potential results while evaluating a Python object, but
# some branches might not be evaluated, which results in partial inference. In
# that case, it might be useful to still emit no-member and other checks for
# the rest of the inferred objects.
ignore-on-opaque-inference=yes
# List of symbolic message names to ignore for Mixin members.
ignored-checks-for-mixins=no-member,
not-async-context-manager,
not-context-manager,
attribute-defined-outside-init
# List of class names for which member attributes should not be checked (useful
# for classes with dynamically set attributes). This supports the use of
# qualified names.
ignored-classes=optparse.Values,thread._local,_thread._local,argparse.Namespace
# Show a hint with possible names when a member name was not found. The aspect
# of finding the hint is based on edit distance.
missing-member-hint=yes
# The minimum edit distance a name should have in order to be considered a
# similar match for a missing member name.
missing-member-hint-distance=1
# The total number of similar names that should be taken in consideration when
missing-member-max-choices=1
# Regex pattern to define which classes are considered mixins.
mixin-class-rgx=.*[Mm]ixin
# List of decorators that change the signature of a decorated function.
signature-mutators=
[CLASSES]
# Warn about protected attribute access inside special methods
check-protected-access-in-special-methods=no
# List of method names used to declare (i.e. assign) instance attributes.
defining-attr-methods=__init__,
__new__,
setUp,
__post_init__
# List of member names, which should be excluded from the protected access
# warning.
exclude-protected=_asdict,
_fields,
_replace,
_source,
_make
# List of valid names for the first argument in a class method.
valid-classmethod-first-arg=cls
# List of valid names for the first argument in a metaclass class method.
valid-metaclass-classmethod-first-arg=cls
[VARIABLES]
# List of additional names supposed to be defined in builtins. Remember that
# you should avoid defining new builtins when possible.
additional-builtins=
# Tells whether unused global variables should be treated as a violation.
allow-global-unused-variables=yes
# List of names allowed to shadow builtins
allowed-redefined-builtins=
# List of strings which can identify a callback function by name. A callback
# name must start or end with one of those strings.
callbacks=cb_,
_cb
# A regular expression matching the name of dummy variables (i.e. expected to
# not be used).
dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_
# Argument names that match this expression will be ignored.
ignored-argument-names=_.*|^ignored_|^unused_
# Tells whether we should check for unused import in __init__ files.
init-import=no
# List of qualified module names which can have objects that can redefine
# builtins.
redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io
[FORMAT]
# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
expected-line-ending-format=
# Regexp for a line that is allowed to be longer than the limit.
ignore-long-lines=^\s*(# )?<?https?://\S+>?$
# Number of spaces of indent required inside a hanging or continued line.
indent-after-paren=4
# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
# tab).
indent-string=' '
# Maximum number of characters on a single line.
max-line-length=120
# Maximum number of lines in a module.
max-module-lines=1000
# Allow the body of a class to be on the same line as the declaration if body
# contains single statement.
single-line-class-stmt=no
# Allow the body of an if to be on the same line as the test if there is no
# else.
single-line-if-stmt=no
[IMPORTS]
# List of modules that can be imported at any level, not just the top level
# one.
allow-any-import-level=
# Allow wildcard imports from modules that define __all__.
allow-wildcard-with-all=no
# Deprecated modules which should not be used, separated by a comma.
deprecated-modules=
# Output a graph (.gv or any supported image format) of external dependencies
# to the given file (report RP0402 must not be disabled).
ext-import-graph=
# Output a graph (.gv or any supported image format) of all (i.e. internal and
# external) dependencies to the given file (report RP0402 must not be
# disabled).
import-graph=
# Output a graph (.gv or any supported image format) of internal dependencies
# to the given file (report RP0402 must not be disabled).
int-import-graph=
# Force import order to recognize a module as part of the standard
# compatibility libraries.
known-standard-library=
# Force import order to recognize a module as part of a third party library.
known-third-party=enchant
# Couples of modules and preferred modules, separated by a comma.
preferred-modules=
[METHOD_ARGS]
# List of qualified names (i.e., library.method) which require a timeout
# parameter e.g. 'requests.api.get,requests.api.post'
timeout-methods=requests.api.delete,requests.api.get,requests.api.head,requests.api.options,requests.api.patch,requests.api.post,requests.api.put,requests.api.request
[EXCEPTIONS]
# Exceptions that will emit a warning when caught.
overgeneral-exceptions=BaseException,
Exception
[REFACTORING]
# Maximum number of nested blocks for function / method body
max-nested-blocks=5
# Complete name of functions that never returns. When checking for
# inconsistent-return-statements if a never returning function is called then
# it will be considered as an explicit return statement and no message will be
# printed.
never-returning-functions=sys.exit,argparse.parse_error
[SIMILARITIES]
# Comments are removed from the similarity computation
ignore-comments=yes
# Docstrings are removed from the similarity computation
ignore-docstrings=yes
# Imports are removed from the similarity computation
ignore-imports=yes
# Signatures are removed from the similarity computation
ignore-signatures=yes
# Minimum lines number of a similarity.
min-similarity-lines=4
[DESIGN]
# List of regular expressions of class ancestor names to ignore when counting
# public methods (see R0903)
exclude-too-few-public-methods=
# List of qualified class names to ignore when counting class parents (see
# R0901)
ignored-parents=
# Maximum number of arguments for function / method.
max-args=5
# Maximum number of attributes for a class (see R0902).
max-attributes=7
# Maximum number of boolean expressions in an if statement (see R0916).
max-bool-expr=5
# Maximum number of branch for function / method body.
max-branches=12
# Maximum number of locals for function / method body.
max-locals=15
# Maximum number of parents for a class (see R0901).
max-parents=7
# Maximum number of public methods for a class (see R0904).
max-public-methods=20
# Maximum number of return / yield for function / method body.
max-returns=6
# Maximum number of statements in function / method body.
max-statements=50
# Minimum number of public methods for a class (see R0903).
min-public-methods=1
[STRING]
# This flag controls whether inconsistent-quotes generates a warning when the
# character used as a quote delimiter is used inconsistently within a module.
check-quote-consistency=no
# This flag controls whether the implicit-str-concat should generate a warning
# on implicit string concatenation in sequences defined over several lines.
check-str-concat-over-line-jumps=no
[BASIC]
# Naming style matching correct argument names.
argument-naming-style=snake_case
# Regular expression matching correct argument names. Overrides argument-
# naming-style. If left empty, argument names will be checked with the set
# naming style.
#argument-rgx=
# Naming style matching correct attribute names.
attr-naming-style=snake_case
# Regular expression matching correct attribute names. Overrides attr-naming-
# style. If left empty, attribute names will be checked with the set naming
# style.
#attr-rgx=
# Bad variable names which should always be refused, separated by a comma.
bad-names=foo,
bar,
baz,
toto,
tutu,
tata
# Bad variable names regexes, separated by a comma. If names match any regex,
# they will always be refused
bad-names-rgxs=
# Naming style matching correct class attribute names.
class-attribute-naming-style=any
# Regular expression matching correct class attribute names. Overrides class-
# attribute-naming-style. If left empty, class attribute names will be checked
# with the set naming style.
#class-attribute-rgx=
# Naming style matching correct class constant names.
class-const-naming-style=UPPER_CASE
# Regular expression matching correct class constant names. Overrides class-
# const-naming-style. If left empty, class constant names will be checked with
# the set naming style.
#class-const-rgx=
# Naming style matching correct class names.
class-naming-style=PascalCase
# Regular expression matching correct class names. Overrides class-naming-
# style. If left empty, class names will be checked with the set naming style.
#class-rgx=
# Naming style matching correct constant names.
const-naming-style=UPPER_CASE
# Regular expression matching correct constant names. Overrides const-naming-
# style. If left empty, constant names will be checked with the set naming
# style.
#const-rgx=
# Minimum line length for functions/classes that require docstrings, shorter
# ones are exempt.
docstring-min-length=-1
# Naming style matching correct function names.
function-naming-style=snake_case
# Regular expression matching correct function names. Overrides function-
# naming-style. If left empty, function names will be checked with the set
# naming style.
#function-rgx=
# Good variable names which should always be accepted, separated by a comma.
good-names=i,
j,
k,
ex,
Run,
_
# Good variable names regexes, separated by a comma. If names match any regex,
# they will always be accepted
good-names-rgxs=
# Include a hint for the correct naming format with invalid-name.
include-naming-hint=no
# Naming style matching correct inline iteration names.
inlinevar-naming-style=any
# Regular expression matching correct inline iteration names. Overrides
# inlinevar-naming-style. If left empty, inline iteration names will be checked
# with the set naming style.
#inlinevar-rgx=
# Naming style matching correct method names.
method-naming-style=snake_case
# Regular expression matching correct method names. Overrides method-naming-
# style. If left empty, method names will be checked with the set naming style.
#method-rgx=
# Naming style matching correct module names.
module-naming-style=snake_case
# Regular expression matching correct module names. Overrides module-naming-
# style. If left empty, module names will be checked with the set naming style.
#module-rgx=
# Colon-delimited sets of names that determine each other's naming style when
# the name regexes allow several styles.
name-group=
# Regular expression which should only match function or class names that do
# not require a docstring.
no-docstring-rgx=^_
# List of decorators that produce properties, such as abc.abstractproperty. Add
# to this list to register other decorators that produce valid properties.
# These decorators are taken in consideration only for invalid-name.
property-classes=abc.abstractproperty
# Regular expression matching correct type variable names. If left empty, type
# variable names will be checked with the set naming style.
#typevar-rgx=
# Naming style matching correct variable names.
variable-naming-style=snake_case
# Regular expression matching correct variable names. Overrides variable-
# naming-style. If left empty, variable names will be checked with the set
# naming style.
#variable-rgx=

View File

@ -1,35 +1,45 @@
FROM docker.io/library/python:3.9-slim AS model FROM docker.io/library/python:3.10-slim as base
# Configure piwheels repo to use pre-compiled numpy wheels for arm
RUN echo -n "[global]\n" > /etc/pip.conf &&\
echo -n "extra-index-url = https://www.piwheels.org/simple https://git.cyrilix.bzh/api/packages/robocars/pypi/simple \n" >> /etc/pip.conf
RUN apt-get update && apt-get install -y libgl1 libglib2.0-0
#################
FROM base as model-builder
RUN python3 -m pip install blobconverter RUN python3 -m pip install blobconverter
RUN mkdir -p /models RUN mkdir -p /models
RUN blobconverter --zoo-name mobile_object_localizer_192x192 --zoo-type depthai --shaves 6 --version 2021.4 --output-dir /models || echo "" RUN blobconverter --zoo-name mobile_object_localizer_192x192 --zoo-type depthai --shaves 6 --version 2021.4 --output-dir /models || echo ""
RUN ls /models
#######
FROM docker.io/library/python:3.9-slim
# Configure piwheels repo to use pre-compiled numpy wheels for arm #################
RUN echo -n "[global]\nextra-index-url=https://www.piwheels.org/simple\n" >> /etc/pip.conf FROM base as builder
RUN apt-get update && apt-get install -y libgl1 libglib2.0-0 RUN apt-get install -y git && \
pip3 install poetry==1.2.0 && \
poetry self add "poetry-dynamic-versioning[plugin]"
RUN pip3 install numpy ADD poetry.lock .
ADD pyproject.toml .
ADD camera camera
ADD README.md .
# Poetry expect to found a git project
ADD .git .git
RUN poetry build
#################
FROM base
RUN mkdir /models RUN mkdir /models
COPY --from=model-builder /models/mobile_object_localizer_192x192_openvino_2021.4_6shave.blob /models/mobile_object_localizer_192x192_openvino_2021.4_6shave.blob
COPY --from=model /models/mobile_object_localizer_192x192_openvino_2021.4_6shave.blob /models/mobile_object_localizer_192x192_openvino_2021.4_6shave.blob COPY --from=builder dist/*.whl /tmp/
ADD requirements.txt requirements.txt RUN pip3 install /tmp/*.whl
RUN pip3 install -r requirements.txt
ADD events events
ADD camera camera
ADD setup.cfg setup.cfg
ADD setup.py setup.py
ENV PYTHON_EGG_CACHE=/tmp/cache
RUN python3 setup.py install
WORKDIR /tmp WORKDIR /tmp
USER 1234 USER 1234

View File

@ -2,11 +2,11 @@
IMAGE_NAME=robocar-oak-camera IMAGE_NAME=robocar-oak-camera
TAG=$(git describe) TAG=$(git describe)
FULL_IMAGE_NAME=docker.io/cyrilix/${IMAGE_NAME}:${TAG} FULL_IMAGE_NAME=git.cyrilix.bzh/robocars/${IMAGE_NAME}:${TAG}
PLATFORM="linux/amd64,linux/arm64" PLATFORM="linux/amd64,linux/arm64"
#PLATFORM="linux/amd64,linux/arm64,linux/arm/v7" #PLATFORM="linux/amd64,linux/arm64,linux/arm/v7"
podman build . --platform "${PLATFORM}" --manifest "${IMAGE_NAME}:${TAG}" podman build . --platform "${PLATFORM}" --manifest "${IMAGE_NAME}:${TAG}"
podman manifest push --format v2s2 "localhost/${IMAGE_NAME}:${TAG}" "docker://${FULL_IMAGE_NAME}" podman manifest push --all "localhost/${IMAGE_NAME}:${TAG}" "docker://${FULL_IMAGE_NAME}"
printf "\nImage %s published" "docker://${FULL_IMAGE_NAME}" printf "\nImage %s published" "docker://${FULL_IMAGE_NAME}"

View File

@ -1,77 +1,128 @@
""" """
Publish data from oak-lite device Mqtt gateway for oak-lite device
Usage: rc-oak-camera [-u USERNAME | --mqtt-username=USERNAME] [--mqtt-password=PASSWORD] \
[--mqtt-broker-host=HOSTNAME] [--mqtt-broker-port=PORT] \
[--mqtt-topic-robocar-oak-camera="TOPIC_CAMERA"] [--mqtt-topic-robocar-objects="TOPIC_OBJECTS"] \
[--mqtt-client-id=CLIENT_ID] \
[-H IMG_HEIGHT | --image-height=IMG_HEIGHT] [-W IMG_WIDTH | --image-width=IMG_width] \
[-t OBJECTS_THRESHOLD | --objects-threshold=OBJECTS_THRESHOLD]
Options:
-h --help Show this screen.
-u USERID --mqtt-username=USERNAME MQTT user
-p PASSWORD --mqtt-password=PASSWORD MQTT password
-b HOSTNAME --mqtt-broker-host=HOSTNAME MQTT broker host
-P HOSTNAME --mqtt-broker-port=PORT MQTT broker port
-C CLIENT_ID --mqtt-client-id=CLIENT_ID MQTT client id
-c TOPIC_CAMERA --mqtt-topic-robocar-oak-camera=TOPIC_CAMERA MQTT topic where to publish robocar-oak-camera frames
-o TOPIC_OBJECTS --mqtt-topic-robocar-objects=TOPIC_OBJECTS MQTT topic where to publish objects detection results
-H IMG_HEIGHT --image-height=IMG_HEIGHT IMG_HEIGHT image height
-W IMG_WIDTH --image-width=IMG_width IMG_WIDTH image width
-t OBJECTS_THRESHOLD --objects-threshold=OBJECTS_THRESHOLD OBJECTS_THRESHOLD threshold to filter objects detected
""" """
import argparse
import logging import logging
import os import os
from . import depthai as cam import signal
from docopt import docopt import typing, types
import depthai as dai
import paho.mqtt.client as mqtt import paho.mqtt.client as mqtt
from . import depthai as cam # pylint: disable=reimported
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
default_client_id = "robocar-depthai" _DEFAULT_CLIENT_ID = "robocar-depthai"
def init_mqtt_client(broker_host: str, broker_port, user: str, password: str, client_id: str) -> mqtt.Client: def _parse_args_cli() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument("-u", "--mqtt-username",
help="MQTT user",
default=_get_env_value("MQTT_USERNAME", ""))
parser.add_argument("-p", "--mqtt-password",
help="MQTT password",
default=_get_env_value("MQTT_PASSWORD", ""))
parser.add_argument("-b", "--mqtt-broker-host",
help="MQTT broker host",
default=_get_env_value("MQTT_BROKER_HOST", "localhost"))
parser.add_argument("-P", "--mqtt-broker-port",
help="MQTT broker port",
type=int,
default=_get_env_int_value("MQTT_BROKER_PORT", 1883))
parser.add_argument("-C", "--mqtt-client-id",
help="MQTT client id",
default=_get_env_value("MQTT_CLIENT_ID", _DEFAULT_CLIENT_ID))
parser.add_argument("-c", "--mqtt-topic-robocar-oak-camera",
help="MQTT topic where to publish robocar-oak-camera frames",
default=_get_env_value("MQTT_TOPIC_CAMERA", "/oak/camera_rgb"))
parser.add_argument("-o", "---mqtt-topic-robocar-objects",
help="MQTT topic where to publish objects detection results",
default=_get_env_value("MQTT_TOPIC_OBJECTS", "/objects"))
parser.add_argument("-t", "--objects-threshold",
help="threshold to filter detected objects",
type=float,
default=_get_env_float_value("OBJECTS_THRESHOLD", 0.2))
parser.add_argument("-H", "--image-height", help="image height",
type=int,
default=_get_env_int_value("IMAGE_HEIGHT", 120))
parser.add_argument("-W", "--image-width", help="image width",
type=int,
default=_get_env_int_value("IMAGE_WIDTH", 126))
parser.add_argument("--log", help="Log level",
type=str,
default="info",
choices=["info", "debug"])
args = parser.parse_args()
return args
def _init_mqtt_client(broker_host: str, broker_port: int, user: str, password: str, client_id: str) -> mqtt.Client:
logger.info("Start part.py-robocar-oak-camera") logger.info("Start part.py-robocar-oak-camera")
client = mqtt.Client(client_id=client_id, clean_session=True, userdata=None, protocol=mqtt.MQTTv311) client = mqtt.Client(client_id=client_id, clean_session=True, userdata=None, protocol=mqtt.MQTTv311)
client.username_pw_set(user, password) client.username_pw_set(user, password)
logger.info("Connect to mqtt broker "+ broker_host) logger.info("Connect to mqtt broker %s", broker_host)
client.connect(host=broker_host, port=broker_port, keepalive=60) client.connect(host=broker_host, port=broker_port, keepalive=60)
logger.info("Connected to mqtt broker") logger.info("Connected to mqtt broker")
return client return client
def execute_from_command_line(): def execute_from_command_line() -> None:
logging.basicConfig(level=logging.INFO) """
Cli entrypoint
:return:
"""
args = docopt(__doc__) args = _parse_args_cli()
if args.log == "info":
logging.basicConfig(level=logging.INFO)
elif args.log == "debug":
logging.basicConfig(level=logging.DEBUG)
client = init_mqtt_client(broker_host=get_default_value(args["--mqtt-broker-host"], "MQTT_BROKER_HOST", "localhost"), client = _init_mqtt_client(broker_host=args.mqtt_broker_host,
broker_port=int(get_default_value(args["--mqtt-broker-port"], "MQTT_BROKER_PORT", "1883")), broker_port=args.mqtt_broker_port,
user=get_default_value(args["--mqtt-username"], "MQTT_USERNAME", ""), user=args.mqtt_username,
password=get_default_value(args["--mqtt-password"], "MQTT_PASSWORD", ""), password=args.mqtt_password,
client_id=get_default_value(args["--mqtt-client-id"], "MQTT_CLIENT_ID", client_id=args.mqtt_client_id,
default_client_id), )
) frame_processor = cam.FrameProcessor(mqtt_client=client, frame_topic=args.mqtt_topic_robocar_oak_camera)
frame_topic = get_default_value(args["--mqtt-topic-robocar-oak-camera"], "MQTT_TOPIC_CAMERA", "/oak/camera_rgb") object_processor = cam.ObjectProcessor(mqtt_client=client,
objects_topic = get_default_value(args["--mqtt-topic-robocar-objects"], "MQTT_TOPIC_OBJECTS", "/objects") objects_topic=args.mqtt_topic_robocar_objects,
objects_threshold=args.objects_threshold)
frame_processor = cam.FramePublisher(mqtt_client=client, pipeline = dai.Pipeline()
frame_topic=frame_topic, pipeline_controller = cam.PipelineController(pipeline=pipeline,
objects_topic=objects_topic, frame_processor=frame_processor,
objects_threshold=float(get_default_value(args["--objects-threshold"], object_processor=object_processor,
"OBJECTS_THRESHOLD", 0.2)), object_node=cam.ObjectDetectionNN(pipeline=pipeline),
img_width=int(get_default_value(args["--image-width"], "IMAGE_WIDTH", 160)), camera=cam.CameraSource(pipeline=pipeline,
img_height=int(get_default_value(args["--image-height"], "IMAGE_HEIGHT", 120))) img_width=args.image_width,
frame_processor.run() img_height=args.image_width,
))
def sigterm_handler(signum: int, frame: typing.Optional[
types.FrameType]) -> None: # pylint: disable=unused-argument # need to implement handler signature
logger.info("exit on SIGTERM")
pipeline_controller.stop()
signal.signal(signal.SIGTERM, sigterm_handler)
pipeline_controller.run()
def get_default_value(value, env_var: str, default_value) -> str: def _get_env_value(env_var: str, default_value: str) -> str:
if value:
return value
if env_var in os.environ: if env_var in os.environ:
return os.environ[env_var] return os.environ[env_var]
return default_value return default_value
def _get_env_int_value(env_var: str, default_value: int) -> int:
value = _get_env_value(env_var, str(default_value))
return int(value)
def _get_env_float_value(env_var: str, default_value: float) -> float:
value = _get_env_value(env_var, str(default_value))
return float(value)

View File

@ -1,151 +1,380 @@
"""
Camera event loop
"""
import abc
import datetime import datetime
import logging import logging
import paho.mqtt.client as mqtt import pathlib
import typing
from dataclasses import dataclass
import events.events_pb2
import depthai as dai
import cv2 import cv2
import depthai as dai
import events.events_pb2 as evt
import numpy as np import numpy as np
import numpy.typing as npt
import paho.mqtt.client as mqtt
from depthai import Device
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
NN_PATH = "/models/mobile_object_localizer_192x192_openvino_2021.4_6shave.blob" _NN_PATH = "/models/mobile_object_localizer_192x192_openvino_2021.4_6shave.blob"
NN_WIDTH = 192 _NN_WIDTH = 192
NN_HEIGHT = 192 _NN_HEIGHT = 192
class FramePublisher: class ObjectProcessor:
def __init__(self, mqtt_client: mqtt.Client, frame_topic: str, objects_topic: str, objects_threshold: float, """
img_width: int, img_height: int): Processor for Object detection
"""
def __init__(self, mqtt_client: mqtt.Client, objects_topic: str, objects_threshold: float):
self._mqtt_client = mqtt_client self._mqtt_client = mqtt_client
self._frame_topic = frame_topic
self._objects_topic = objects_topic self._objects_topic = objects_topic
self._objects_threshold = objects_threshold self._objects_threshold = objects_threshold
self._img_width = img_width
self._img_height = img_height
self._pipeline = self._configure_pipeline()
def _configure_pipeline(self) -> dai.Pipeline: def process(self, in_nn: dai.NNData, frame_ref: evt.FrameRef) -> None:
logger.info("configure pipeline") """
pipeline = dai.Pipeline() Parse and publish result of NeuralNetwork result
:param in_nn: NeuralNetwork result read from device
:param frame_ref: Id of the frame where objects are been detected
:return:
"""
detection_boxes = np.array(in_nn.getLayerFp16("ExpandDims")).reshape((100, 4))
detection_scores = np.array(in_nn.getLayerFp16("ExpandDims_2")).reshape((100,))
# keep boxes bigger than threshold
mask = detection_scores >= self._objects_threshold
boxes = detection_boxes[mask]
scores = detection_scores[mask]
pipeline.setOpenVINOVersion(version=dai.OpenVINO.VERSION_2021_4) if boxes.shape[0] > 0:
self._publish_objects(boxes, frame_ref, scores)
def _publish_objects(self, boxes: npt.NDArray[np.float64], frame_ref: evt.FrameRef, scores: npt.NDArray[np.float64]) -> None:
objects_msg = evt.ObjectsMessage()
objs = []
for i in range(boxes.shape[0]):
logger.debug("new object detected: %s", str(boxes[i]))
objs.append(_bbox_to_object(boxes[i], scores[i].astype(float)))
objects_msg.objects.extend(objs)
objects_msg.frame_ref.name = frame_ref.name
objects_msg.frame_ref.id = frame_ref.id
objects_msg.frame_ref.created_at.FromDatetime(frame_ref.created_at.ToDatetime())
logger.debug("publish object event to %s", self._objects_topic)
self._mqtt_client.publish(topic=self._objects_topic,
payload=objects_msg.SerializeToString(),
qos=0,
retain=False)
class FrameProcessError(Exception):
"""
Error base for invalid frame processing
Attributes:
message -- explanation of the error
"""
def __init__(self, message: str):
"""
:param message: explanation of the error
"""
self.message = message
class FrameProcessor:
"""
Processor for camera frames
"""
def __init__(self, mqtt_client: mqtt.Client, frame_topic: str):
self._mqtt_client = mqtt_client
self._frame_topic = frame_topic
def process(self, img: dai.ImgFrame) -> typing.Any:
"""
Publish camera frames
:param img: image read from camera
:return:
id frame reference
:raise:
FrameProcessError if frame can't be processed
"""
im_resize = img.getCvFrame()
is_success, im_buf_arr = cv2.imencode(".jpg", im_resize)
if not is_success:
raise FrameProcessError("unable to process to encode frame to jpg")
byte_im = im_buf_arr.tobytes()
now = datetime.datetime.now()
frame_msg = evt.FrameMessage()
frame_msg.id.name = "robocar-oak-camera-oak"
frame_msg.id.id = str(int(now.timestamp() * 1000))
frame_msg.id.created_at.FromDatetime(now)
frame_msg.frame = byte_im
logger.debug("publish frame event to %s", self._frame_topic)
self._mqtt_client.publish(topic=self._frame_topic,
payload=frame_msg.SerializeToString(),
qos=0,
retain=False)
return frame_msg.id
class Source(abc.ABC):
"""Base class for image source"""
@abc.abstractmethod
def get_stream_name(self) -> str:
"""
Queue/stream name to use to get data
:return: steam name
"""
@abc.abstractmethod
def link(self, input_node: dai.Node.Input) -> None:
"""
Link this source to the input node
:param: input_node: input node to link
"""
class ObjectDetectionNN:
"""
Node to detect objects into image
Read image as input and apply resize transformation before to run NN on it
Result is available with 'get_stream_name()' stream
"""
def __init__(self, pipeline: dai.Pipeline):
# Define a neural network that will make predictions based on the source frames # Define a neural network that will make predictions based on the source frames
detection_nn = pipeline.create(dai.node.NeuralNetwork) detection_nn = pipeline.createNeuralNetwork()
detection_nn.setBlobPath(NN_PATH) detection_nn.setBlobPath(pathlib.Path(_NN_PATH))
detection_nn.setNumPoolFrames(4) detection_nn.setNumPoolFrames(4)
detection_nn.input.setBlocking(False) detection_nn.input.setBlocking(False)
detection_nn.setNumInferenceThreads(2) detection_nn.setNumInferenceThreads(2)
self._detection_nn = detection_nn
self._xout = self._configure_xout_nn(pipeline)
self._detection_nn.out.link(self._xout.input)
self._manip_image = self._configure_manip(pipeline)
self._manip_image.out.link(self._detection_nn.input)
xout_nn = pipeline.create(dai.node.XLinkOut) @staticmethod
xout_nn.setStreamName("nn") def _configure_manip(pipeline: dai.Pipeline) -> dai.node.ImageManip:
xout_nn.input.setBlocking(False)
# Resize image # Resize image
manip = pipeline.create(dai.node.ImageManip) manip = pipeline.createImageManip()
manip.initialConfig.setResize(NN_WIDTH, NN_HEIGHT) manip.initialConfig.setResize(_NN_WIDTH, _NN_HEIGHT)
manip.initialConfig.setFrameType(dai.ImgFrame.Type.RGB888p) manip.initialConfig.setFrameType(dai.ImgFrame.Type.RGB888p)
manip.initialConfig.setKeepAspectRatio(False) manip.initialConfig.setKeepAspectRatio(False)
return manip
cam_rgb = pipeline.create(dai.node.ColorCamera) @staticmethod
xout_rgb = pipeline.create(dai.node.XLinkOut) def _configure_xout_nn(pipeline: dai.Pipeline) -> dai.node.XLinkOut:
xout_rgb.setStreamName("rgb") xout_nn = pipeline.createXLinkOut()
xout_nn.setStreamName("nn")
xout_nn.input.setBlocking(False)
return xout_nn
def get_stream_name(self) -> str:
"""
Queue/stream name to use to get data
:return: stream name
"""
return self._xout.getStreamName()
def get_input(self) -> dai.Node.Input:
"""
Get input node to use to link with source node
:return: input to link with source output, see Source.link()
"""
return self._manip_image.inputImage
class CameraSource(Source):
"""Image source based on camera preview"""
def __init__(self, pipeline: dai.Pipeline, img_width: int, img_height: int):
self._cam_rgb = pipeline.createColorCamera()
self._xout_rgb = pipeline.createXLinkOut()
self._xout_rgb.setStreamName("rgb")
# Properties # Properties
cam_rgb.setBoardSocket(dai.CameraBoardSocket.RGB) self._cam_rgb.setBoardSocket(dai.CameraBoardSocket.RGB)
cam_rgb.setPreviewSize(width=self._img_width, height=self._img_height) self._cam_rgb.setPreviewSize(width=img_width, height=img_height)
cam_rgb.setInterleaved(False) self._cam_rgb.setInterleaved(False)
cam_rgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.RGB) self._cam_rgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.RGB)
cam_rgb.setFps(30) self._cam_rgb.setFps(30)
# link camera preview to output
self._cam_rgb.preview.link(self._xout_rgb.input)
def link(self, input_node: dai.Node.Input) -> None:
self._cam_rgb.preview.link(input_node)
def get_stream_name(self) -> str:
return self._xout_rgb.getStreamName()
@dataclass
class MqttConfig:
"""MQTT configuration"""
host: str
topic: str
port: int = 1883
qos: int = 0
class MqttSource(Source):
"""Image source based onto mqtt stream"""
def __init__(self, device: Device, pipeline: dai.Pipeline, mqtt_config: MqttConfig):
self._mqtt_config = mqtt_config
self._client = mqtt.Client()
self._client.user_data_set(mqtt_config)
self._client.on_connect = self._on_connect
self._client.on_message = self._on_message
self._img_in = pipeline.createXLinkIn()
self._img_in.setStreamName("img_input")
self._img_out = pipeline.createXLinkOut()
self._img_out.setStreamName("img_output")
self._img_in.out.link(self._img_out.input)
self._img_in_queue = device.getInputQueue(self._img_in.getStreamName())
def run(self) -> None:
""" Connect and start mqtt loop """
self._client.connect(host=self._mqtt_config.host, port=self._mqtt_config.port)
self._client.loop_start()
def stop(self) -> None:
"""Stop and disconnect mqtt loop"""
self._client.loop_stop()
self._client.disconnect()
@staticmethod
# pylint: disable=unused-argument
def _on_connect(client: mqtt.Client, userdata: MqttConfig, flags: typing.Any,
result_connection: typing.Any) -> None:
# if we lose the connection and reconnect then subscriptions will be renewed.
client.subscribe(topic=userdata.topic, qos=userdata.qos)
# pylint: disable=unused-argument
def _on_message(self, _: mqtt.Client, user_data: MqttConfig, msg: mqtt.MQTTMessage) -> None:
frame_msg = evt.FrameMessage()
frame_msg.ParseFromString(msg.payload)
frame = np.asarray(frame_msg.frame, dtype="uint8")
frame = cv2.imdecode(frame, cv2.IMREAD_COLOR)
nn_data = dai.NNData()
nn_data.setLayer("data", _to_planar(frame, (300, 300)))
self._img_in_queue.send(nn_data)
def get_stream_name(self) -> str:
return self._img_out.getStreamName()
def link(self, input_node: dai.Node.Input) -> None:
self._img_in.out.link(input_node)
def _to_planar(arr: npt.NDArray[np.uint8], shape: tuple[int, int]) -> list[int]:
return [val for channel in cv2.resize(arr, shape).transpose(2, 0, 1) for y_col in channel for val in y_col]
class PipelineController:
"""
Pipeline controller that drive camera device
"""
def __init__(self, frame_processor: FrameProcessor,
object_processor: ObjectProcessor, camera: Source, object_node: ObjectDetectionNN,
pipeline: dai.Pipeline):
self._frame_processor = frame_processor
self._object_processor = object_processor
self._camera = camera
self._object_node = object_node
self._stop = False
self._pipeline = pipeline
self._configure_pipeline()
def _configure_pipeline(self) -> None:
logger.info("configure pipeline")
self._pipeline.setOpenVINOVersion(version=dai.OpenVINO.VERSION_2021_4)
# Link preview to manip and manip to nn # Link preview to manip and manip to nn
cam_rgb.preview.link(manip.inputImage) self._camera.link(self._object_node.get_input())
manip.out.link(detection_nn.input)
# Linking to output
cam_rgb.preview.link(xout_rgb.input)
detection_nn.out.link(xout_nn.input)
logger.info("pipeline configured") logger.info("pipeline configured")
return pipeline
def run(self): def run(self) -> None:
"""
Start event loop
:return:
"""
# Connect to device and start pipeline # Connect to device and start pipeline
with dai.Device(self._pipeline) as device: with Device(pipeline=self._pipeline) as dev:
logger.info('MxId: %s', device.getDeviceInfo().getMxId()) logger.info('MxId: %s', dev.getDeviceInfo().getMxId())
logger.info('USB speed: %s', device.getUsbSpeed()) logger.info('USB speed: %s', dev.getUsbSpeed())
logger.info('Connected cameras: %s', device.getConnectedCameras()) logger.info('Connected cameras: %s', str(dev.getConnectedCameras()))
logger.info("output queues found: %s", str(''.join(dev.getOutputQueueNames()))) # type: ignore
logger.info("output queues found: %s", device.getOutputQueueNames()) dev.startPipeline()
device.startPipeline()
# Queues # Queues
queue_size = 4 queue_size = 4
q_rgb = device.getOutputQueue(name="rgb", maxSize=queue_size, blocking=False) q_rgb = dev.getOutputQueue(name=self._camera.get_stream_name(), maxSize=queue_size, # type: ignore
q_nn = device.getOutputQueue(name="nn", maxSize=queue_size, blocking=False) blocking=False)
q_nn = dev.getOutputQueue(name=self._object_node.get_stream_name(), maxSize=queue_size, # type: ignore
blocking=False)
self._stop = False
while True: while True:
if self._stop:
logger.info("stop loop event")
return
try: try:
logger.debug("wait for new frame") self._loop_on_camera_events(q_nn, q_rgb)
inRgb = q_rgb.get() # blocking call, will wait until a new data has arrived # pylint: disable=broad-except # bad frame or event must not stop loop
except Exception as ex:
logger.exception("unexpected error: %s", str(ex))
im_resize = inRgb.getCvFrame() def _loop_on_camera_events(self, q_nn: dai.DataOutputQueue, q_rgb: dai.DataOutputQueue) -> None:
logger.debug("wait for new frame")
is_success, im_buf_arr = cv2.imencode(".jpg", im_resize) # Wait for frame
byte_im = im_buf_arr.tobytes() in_rgb: dai.ImgFrame = q_rgb.get() # type: ignore # blocking call, will wait until a new data has arrived
try:
logger.debug("process frame")
frame_ref = self._frame_processor.process(in_rgb)
except FrameProcessError as ex:
logger.error("unable to process frame: %s", str(ex))
return
logger.debug("frame processed")
now = datetime.datetime.now() logger.debug("wait for nn response")
frame_msg = events.events_pb2.FrameMessage() # Read NN result
frame_msg.id.name = "robocar-oak-camera-oak" in_nn: dai.NNData = q_nn.get() # type: ignore
frame_msg.id.id = str(int(now.timestamp() * 1000)) logger.debug("process objects")
frame_msg.id.created_at.FromDatetime(now) self._object_processor.process(in_nn, frame_ref)
frame_msg.frame = byte_im logger.debug("objects processed")
logger.debug("publish frame event to %s", self._frame_topic) def stop(self) -> None:
self._mqtt_client.publish(topic=self._frame_topic, """
payload=frame_msg.SerializeToString(), Stop event loop, if loop is not running, do nothing
qos=0, :return:
retain=False) """
self._stop = True
in_nn = q_nn.get()
# get outputs def _bbox_to_object(bbox: npt.NDArray[np.float64], score: float) -> evt.Object:
detection_boxes = np.array(in_nn.getLayerFp16("ExpandDims")).reshape((100, 4)) obj = evt.Object()
detection_scores = np.array(in_nn.getLayerFp16("ExpandDims_2")).reshape((100,)) obj.type = evt.TypeObject.ANY
obj.top = bbox[0].astype(float)
# keep boxes bigger than threshold obj.right = bbox[3].astype(float)
mask = detection_scores >= self._objects_threshold obj.bottom = bbox[2].astype(float)
boxes = detection_boxes[mask] obj.left = bbox[1].astype(float)
scores = detection_scores[mask] obj.confidence = score
return obj
if boxes.shape[0] > 0:
objects_msg = events.events_pb2.ObjectsMessage()
objs = []
for i in range(boxes.shape[0]):
bbox = boxes[i]
logger.debug("new object detected: %s", str(bbox))
o = events.events_pb2.Object()
o.type = events.events_pb2.TypeObject.ANY
o.top = bbox[0].astype(float)
o.right = bbox[3].astype(float)
o.bottom = bbox[2].astype(float)
o.left = bbox[1].astype(float)
o.confidence = scores[i].astype(float)
objs.append(o)
objects_msg.objects.extend(objs)
objects_msg.frame_ref.name = frame_msg.id.name
objects_msg.frame_ref.id = frame_msg.id.id
objects_msg.frame_ref.created_at.FromDatetime(now)
logger.debug("publish object event to %s", self._frame_topic)
self._mqtt_client.publish(topic=self._objects_topic,
payload=objects_msg.SerializeToString(),
qos=0,
retain=False)
except Exception as e:
logger.exception("unexpected error: %s", str(e))

View File

@ -0,0 +1,156 @@
import datetime
import typing
import unittest.mock
import depthai as dai
import events.events_pb2
import numpy as np
import numpy.typing as npt
import paho.mqtt.client as mqtt
import pytest
import pytest_mock
import camera.depthai
Object = dict[str, float]
@pytest.fixture
def mqtt_client(mocker: pytest_mock.MockerFixture) -> mqtt.Client:
return mocker.MagicMock() # type: ignore
class TestObjectProcessor:
@pytest.fixture
def frame_ref(self) -> events.events_pb2.FrameRef:
now = datetime.datetime.now()
frame_msg = events.events_pb2.FrameMessage()
frame_msg.id.name = "robocar-oak-camera-oak"
frame_msg.id.id = str(int(now.timestamp() * 1000))
frame_msg.id.created_at.FromDatetime(now)
return frame_msg.id
@pytest.fixture
def object1(self) -> Object:
return {
"left": 0.3,
"right": 0.7,
"top": 0.1,
"bottom": 0.6,
"score": 0.8,
}
@pytest.fixture
def raw_objects_empty(self, mocker: pytest_mock.MockerFixture) -> dai.NNData:
raw_objects = mocker.MagicMock()
def mock_return(name: str) -> typing.List[typing.Union[int, typing.List[int]]]:
if name == "ExpandDims":
return [[0] * 4] * 100
elif name == "ExpandDims_2":
return [0] * 100
else:
raise ValueError(f"{name} is not a valid arg")
m = mocker.patch(target='depthai.NNData.getLayerFp16', autospec=True)
m.getLayerFp16 = mock_return
return m
@pytest.fixture
def raw_objects_one(self, mocker: pytest_mock.MockerFixture, object1: Object) -> dai.NNData:
def mock_return(name: str) -> typing.Union[npt.NDArray[np.int64], typing.List[float]]:
if name == "ExpandDims": # Detection boxes
boxes: list[list[float]] = [[0.] * 4] * 100
boxes[0] = [object1["top"], object1["left"], object1["bottom"], object1["right"]]
return np.array(boxes)
elif name == "ExpandDims_2": # Detection scores
scores: list[float] = [0.] * 100
scores[0] = object1["score"]
return scores
else:
raise ValueError(f"{name} is not a valid arg")
m = mocker.patch(target='depthai.NNData.getLayerFp16', autospec=True)
m.getLayerFp16 = mock_return
return m
@pytest.fixture
def object_processor(self, mqtt_client: mqtt.Client) -> camera.depthai.ObjectProcessor:
return camera.depthai.ObjectProcessor(mqtt_client, "topic/object", 0.2)
def test_process_without_object(self, object_processor: camera.depthai.ObjectProcessor, mqtt_client: mqtt.Client,
raw_objects_empty: dai.NNData, frame_ref: events.events_pb2.FrameRef) -> None:
object_processor.process(raw_objects_empty, frame_ref)
publish_mock: unittest.mock.MagicMock = mqtt_client.publish # type: ignore
publish_mock.assert_not_called()
def test_process_with_object_with_low_score(self, object_processor: camera.depthai.ObjectProcessor,
mqtt_client: mqtt.Client, raw_objects_one: dai.NNData,
frame_ref: events.events_pb2.FrameRef) -> None:
object_processor._objects_threshold = 0.9
object_processor.process(raw_objects_one, frame_ref)
publish_mock: unittest.mock.MagicMock = mqtt_client.publish # type: ignore
publish_mock.assert_not_called()
def test_process_with_one_object(self,
object_processor: camera.depthai.ObjectProcessor, mqtt_client: mqtt.Client,
raw_objects_one: dai.NNData, frame_ref: events.events_pb2.FrameRef,
object1: Object) -> None:
object_processor.process(raw_objects_one, frame_ref)
left = object1["left"]
right = object1["right"]
top = object1["top"]
bottom = object1["bottom"]
score = object1["score"]
pub_mock: unittest.mock.MagicMock = mqtt_client.publish # type: ignore
pub_mock.assert_called_once_with(payload=unittest.mock.ANY, qos=0, retain=False, topic="topic/object")
payload = pub_mock.call_args.kwargs['payload']
objects_msg = events.events_pb2.ObjectsMessage()
objects_msg.ParseFromString(payload)
assert len(objects_msg.objects) == 1
assert left - 0.0001 < objects_msg.objects[0].left < left + 0.0001
assert right - 0.0001 < objects_msg.objects[0].right < right + 0.0001
assert top - 0.0001 < objects_msg.objects[0].top < top + 0.0001
assert bottom - 0.0001 < objects_msg.objects[0].bottom < bottom + 0.0001
assert score - 0.0001 < objects_msg.objects[0].confidence < score + 0.0001
assert objects_msg.frame_ref == frame_ref
class TestFrameProcessor:
@pytest.fixture
def frame_processor(self, mqtt_client: mqtt.Client) -> camera.depthai.FrameProcessor:
return camera.depthai.FrameProcessor(mqtt_client, "topic/frame")
def test_process(self, frame_processor: camera.depthai.FrameProcessor, mocker: pytest_mock.MockerFixture,
mqtt_client: mqtt.Client) -> None:
img: dai.ImgFrame = mocker.MagicMock()
mocker.patch(target="cv2.imencode").return_value = (True, np.array(b"img content"))
frame_ref = frame_processor.process(img)
pub_mock: unittest.mock.MagicMock = mqtt_client.publish # type: ignore
pub_mock.assert_called_once_with(payload=unittest.mock.ANY, qos=0, retain=False, topic="topic/frame")
payload = pub_mock.call_args.kwargs['payload']
frame_msg = events.events_pb2.FrameMessage()
frame_msg.ParseFromString(payload)
assert frame_msg.id == frame_ref
assert frame_msg.frame == b"img content"
assert frame_msg.id.name == "robocar-oak-camera-oak"
assert len(frame_msg.id.id) is 13
now = datetime.datetime.now()
assert now - datetime.timedelta(
milliseconds=10) < frame_msg.id.created_at.ToDatetime() < now + datetime.timedelta(milliseconds=10)
def test_process_error(self, frame_processor: camera.depthai.FrameProcessor, mocker: pytest_mock.MockerFixture,
mqtt_client: mqtt.Client) -> None:
img: dai.ImgFrame = mocker.MagicMock()
mocker.patch(target="cv2.imencode").return_value = (False, None)
with pytest.raises(camera.depthai.FrameProcessError) as ex:
_ = frame_processor.process(img)
exception_raised = ex.value
assert exception_raised.message == "unable to process to encode frame to jpg"

3627
cv2-stubs/__init__.pyi Normal file

File diff suppressed because one or more lines are too long

View File

@ -1,53 +0,0 @@
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: events/events.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import builder as _builder
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x13\x65vents/events.proto\x12\x0erobocar.events\x1a\x1fgoogle/protobuf/timestamp.proto\"T\n\x08\x46rameRef\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\n\n\x02id\x18\x02 \x01(\t\x12.\n\ncreated_at\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"C\n\x0c\x46rameMessage\x12$\n\x02id\x18\x01 \x01(\x0b\x32\x18.robocar.events.FrameRef\x12\r\n\x05\x66rame\x18\x02 \x01(\x0c\"d\n\x0fSteeringMessage\x12\x10\n\x08steering\x18\x01 \x01(\x02\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12+\n\tframe_ref\x18\x03 \x01(\x0b\x32\x18.robocar.events.FrameRef\"d\n\x0fThrottleMessage\x12\x10\n\x08throttle\x18\x01 \x01(\x02\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12+\n\tframe_ref\x18\x03 \x01(\x0b\x32\x18.robocar.events.FrameRef\"A\n\x10\x44riveModeMessage\x12-\n\ndrive_mode\x18\x01 \x01(\x0e\x32\x19.robocar.events.DriveMode\"f\n\x0eObjectsMessage\x12\'\n\x07objects\x18\x01 \x03(\x0b\x32\x16.robocar.events.Object\x12+\n\tframe_ref\x18\x02 \x01(\x0b\x32\x18.robocar.events.FrameRef\"\x80\x01\n\x06Object\x12(\n\x04type\x18\x01 \x01(\x0e\x32\x1a.robocar.events.TypeObject\x12\x0c\n\x04left\x18\x02 \x01(\x02\x12\x0b\n\x03top\x18\x03 \x01(\x02\x12\r\n\x05right\x18\x04 \x01(\x02\x12\x0e\n\x06\x62ottom\x18\x05 \x01(\x02\x12\x12\n\nconfidence\x18\x06 \x01(\x02\"&\n\x13SwitchRecordMessage\x12\x0f\n\x07\x65nabled\x18\x01 \x01(\x08\"\x8c\x01\n\x0bRoadMessage\x12&\n\x07\x63ontour\x18\x01 \x03(\x0b\x32\x15.robocar.events.Point\x12(\n\x07\x65llipse\x18\x02 \x01(\x0b\x32\x17.robocar.events.Ellipse\x12+\n\tframe_ref\x18\x03 \x01(\x0b\x32\x18.robocar.events.FrameRef\"\x1d\n\x05Point\x12\t\n\x01x\x18\x01 \x01(\x05\x12\t\n\x01y\x18\x02 \x01(\x05\"r\n\x07\x45llipse\x12%\n\x06\x63\x65nter\x18\x01 \x01(\x0b\x32\x15.robocar.events.Point\x12\r\n\x05width\x18\x02 \x01(\x05\x12\x0e\n\x06height\x18\x03 \x01(\x05\x12\r\n\x05\x61ngle\x18\x04 \x01(\x02\x12\x12\n\nconfidence\x18\x05 \x01(\x02\"\x82\x01\n\rRecordMessage\x12+\n\x05\x66rame\x18\x01 \x01(\x0b\x32\x1c.robocar.events.FrameMessage\x12\x31\n\x08steering\x18\x02 \x01(\x0b\x32\x1f.robocar.events.SteeringMessage\x12\x11\n\trecordSet\x18\x03 \x01(\t*-\n\tDriveMode\x12\x0b\n\x07INVALID\x10\x00\x12\x08\n\x04USER\x10\x01\x12\t\n\x05PILOT\x10\x02*2\n\nTypeObject\x12\x07\n\x03\x41NY\x10\x00\x12\x07\n\x03\x43\x41R\x10\x01\x12\x08\n\x04\x42UMP\x10\x02\x12\x08\n\x04PLOT\x10\x03\x42\nZ\x08./eventsb\x06proto3')
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'events.events_pb2', globals())
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
DESCRIPTOR._serialized_options = b'Z\010./events'
_DRIVEMODE._serialized_start=1196
_DRIVEMODE._serialized_end=1241
_TYPEOBJECT._serialized_start=1243
_TYPEOBJECT._serialized_end=1293
_FRAMEREF._serialized_start=72
_FRAMEREF._serialized_end=156
_FRAMEMESSAGE._serialized_start=158
_FRAMEMESSAGE._serialized_end=225
_STEERINGMESSAGE._serialized_start=227
_STEERINGMESSAGE._serialized_end=327
_THROTTLEMESSAGE._serialized_start=329
_THROTTLEMESSAGE._serialized_end=429
_DRIVEMODEMESSAGE._serialized_start=431
_DRIVEMODEMESSAGE._serialized_end=496
_OBJECTSMESSAGE._serialized_start=498
_OBJECTSMESSAGE._serialized_end=600
_OBJECT._serialized_start=603
_OBJECT._serialized_end=731
_SWITCHRECORDMESSAGE._serialized_start=733
_SWITCHRECORDMESSAGE._serialized_end=771
_ROADMESSAGE._serialized_start=774
_ROADMESSAGE._serialized_end=914
_POINT._serialized_start=916
_POINT._serialized_end=945
_ELLIPSE._serialized_start=947
_ELLIPSE._serialized_end=1061
_RECORDMESSAGE._serialized_start=1064
_RECORDMESSAGE._serialized_end=1194
# @@protoc_insertion_point(module_scope)

1745
poetry.lock generated Normal file

File diff suppressed because it is too large Load Diff

58
pyproject.toml Normal file
View File

@ -0,0 +1,58 @@
[tool.poetry]
name = "robocar-oak-camera"
version = "0.0.0"
description = "Mqtt gateway for oak-lite device"
authors = ["Cyrille Nofficial <cynoffic@cyrilix.fr>"]
readme = "README.md"
packages = [
{ include = "camera" },
]
[tool.poetry.dependencies]
python = "^3.10"
paho-mqtt = "^1.6.1"
depthai = "^2.19.0"
protobuf3 = "^0.2.1"
google = "^3.0.0"
blobconverter = "^1.3.0"
protobuf = "^4.21.8"
opencv-python-headless = "^4.6.0.66"
robocar-protobuf = {version = "^1.1.2", source = "robocar"}
[tool.poetry.group.test.dependencies]
pytest = "^7.1.3"
pytest-mock = "^3.10.0"
[tool.poetry.group.dev.dependencies]
pylint = "^2.15.4"
mypy = "^0.982"
types-paho-mqtt = "^1.6.0.1"
types-protobuf = "^3.20.4.2"
[[tool.poetry.source]]
name = "robocar"
url = "https://git.cyrilix.bzh/api/packages/robocars/pypi/simple"
default = false
secondary = false
[build-system]
requires = ["poetry-core>=1.0.0", "poetry-dynamic-versioning"]
build-backend = "poetry_dynamic_versioning.backend"
[tool.poetry.scripts]
rc-oak-camera = 'camera.cli:execute_from_command_line'
[tool.poetry-dynamic-versioning]
enable = true
style = 'semver'
vcs = 'git'
dirty = true
bump = true
[tool.mypy]
strict = true
warn_unused_configs = true
plugins = 'numpy.typing.mypy_plugin'

View File

@ -1,8 +0,0 @@
paho-mqtt~=1.6.1
docopt~=0.6.2
depthai==2.17.2.0
opencv-python==4.6.0.66
google~=3.0.0
google-api-core~=2.4.0
setuptools==60.5.0
blobconverter==1.3.0

View File

@ -1,5 +0,0 @@
[metadata]
description-file = README.md
[aliases]
test = pytest

View File

@ -1,68 +0,0 @@
import os
from setuptools import setup, find_packages
# include the non python files
def package_files(directory, strip_leading):
paths = []
for (path, directories, filenames) in os.walk(directory):
for filename in filenames:
package_file = os.path.join(path, filename)
paths.append(package_file[len(strip_leading):])
return paths
tests_require = ['pytest',
]
setup(name='robocar-oak-camera',
version='0.1',
description='Mqtt gateway for oak-lite device.',
url='https://github.com/cyrilix/robocar-oak-camera',
license='Apache2',
entry_points={
'console_scripts': [
'rc-oak-camera=camera.cli:execute_from_command_line',
],
},
setup_requires=['pytest-runner'],
install_requires=['depthai',
'docopt',
'paho-mqtt',
'protobuf3',
'google',
'numpy',
'opencv-python',
'blobconverter',
],
tests_require=tests_require,
extras_require={
'tests': tests_require
},
include_package_data=True,
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: Apache 2 License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3.7',
],
keywords='selfdriving cars drive',
packages=find_packages(exclude=(['tests', 'env'])),
)