build: upgrade to go 1.17 and upgrade dependencies
This commit is contained in:
1
vendor/gocv.io/x/gocv/.dockerignore
generated
vendored
1
vendor/gocv.io/x/gocv/.dockerignore
generated
vendored
@@ -1 +0,0 @@
|
||||
**
|
||||
60
vendor/gocv.io/x/gocv/.travis.yml
generated
vendored
60
vendor/gocv.io/x/gocv/.travis.yml
generated
vendored
@@ -1,60 +0,0 @@
|
||||
# Use new container infrastructure to enable caching
|
||||
sudo: required
|
||||
dist: trusty
|
||||
|
||||
# language is go
|
||||
language: go
|
||||
go:
|
||||
- "1.13"
|
||||
go_import_path: gocv.io/x/gocv
|
||||
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- libgmp-dev
|
||||
- build-essential
|
||||
- cmake
|
||||
- git
|
||||
- libgtk2.0-dev
|
||||
- pkg-config
|
||||
- libavcodec-dev
|
||||
- libavformat-dev
|
||||
- libswscale-dev
|
||||
- libtbb2
|
||||
- libtbb-dev
|
||||
- libjpeg-dev
|
||||
- libpng-dev
|
||||
- libtiff-dev
|
||||
- libjasper-dev
|
||||
- libdc1394-22-dev
|
||||
- xvfb
|
||||
|
||||
before_install:
|
||||
- ./travis_build_opencv.sh
|
||||
- export PKG_CONFIG_PATH=$(pkg-config --variable pc_path pkg-config):$HOME/usr/lib/pkgconfig
|
||||
- export INCLUDE_PATH=$HOME/usr/include:${INCLUDE_PATH}
|
||||
- export LD_LIBRARY_PATH=$HOME/usr/lib:${LD_LIBRARY_PATH}
|
||||
- sudo ln /dev/null /dev/raw1394
|
||||
- export DISPLAY=:99.0
|
||||
- sh -e /etc/init.d/xvfb start
|
||||
|
||||
before_cache:
|
||||
- rm -f $HOME/fresh-cache
|
||||
|
||||
script:
|
||||
- export GOCV_CAFFE_TEST_FILES="${HOME}/testdata"
|
||||
- export GOCV_TENSORFLOW_TEST_FILES="${HOME}/testdata"
|
||||
- export OPENCV_ENABLE_NONFREE=ON
|
||||
- echo "Ensuring code is well formatted"; ! gofmt -s -d . | read
|
||||
- go test -v -coverprofile=coverage.txt -covermode=atomic -tags matprofile .
|
||||
- go test -tags matprofile ./contrib -coverprofile=contrib.txt -covermode=atomic; cat contrib.txt >> coverage.txt; rm contrib.txt;
|
||||
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
||||
|
||||
# Caching so the next build will be fast as possible.
|
||||
cache:
|
||||
timeout: 1000
|
||||
directories:
|
||||
- $HOME/usr
|
||||
- $HOME/testdata
|
||||
228
vendor/gocv.io/x/gocv/CHANGELOG.md
generated
vendored
228
vendor/gocv.io/x/gocv/CHANGELOG.md
generated
vendored
@@ -1,3 +1,231 @@
|
||||
0.28.0
|
||||
---
|
||||
* **all**
|
||||
* update to OpenCV 4.5.3
|
||||
* make task and build tag for static build of OpenCV/GoCV on Linux
|
||||
* add Makefile tasks for OpenCV install on Nvidia Jetson
|
||||
* add gotest for more colorful test output running tests from containers
|
||||
* **build**
|
||||
* correcting output format for code coverage report
|
||||
* enforce rule that all Go code is correctly formatted
|
||||
* remove codecov
|
||||
* **core**
|
||||
* add NewPointVectorFromMat() and NewPoint2fVectorFromMat() functions
|
||||
* Fix possible MatProfile race by ordering remove before free.
|
||||
* **cuda**
|
||||
* add core functions for GpuMat like Cols(), Rows(), and Type()
|
||||
* initial implementation for the Flip function
|
||||
* **docs**
|
||||
* update ROADMAP from recent contributions
|
||||
* **examples**
|
||||
* correct list of examples and fix comment
|
||||
* **features2d**
|
||||
* Add NewORBWithParams
|
||||
* **tracking**
|
||||
* change MOSSE to KCF
|
||||
* **highgui**
|
||||
* Add function CreateTrackbarWithValue to Window type.
|
||||
* **imgcodec**
|
||||
* optimize IMEncode avoiding multiple data copies.
|
||||
* **imgproc**
|
||||
* Add CircleWithParams function
|
||||
* Add DilateWithParams() function (#827)
|
||||
* Add EllipseWithParams function
|
||||
* Add FillPolyWithParams function
|
||||
* Add PointPolygonTest function
|
||||
* Add RectangleWithParams function
|
||||
* **photo**
|
||||
* add MergeMertens, AlignMTB and Denoising function (#848)
|
||||
* **xphoto**
|
||||
* Add Xphoto contrib (#844)
|
||||
|
||||
0.27.0
|
||||
---
|
||||
* **all**
|
||||
* update to OpenCV 4.5.2
|
||||
* **core**
|
||||
* add Append() to PointsVector/PointVector
|
||||
* add cv::RNG
|
||||
* add implementation for Point2fVector
|
||||
* add rand functions
|
||||
* add test coverage for PointsVector
|
||||
* create new PointsVector/PointVector wrappers to avoid repetitive memory copying for seeming innocent operations involving slices of image.Point
|
||||
* test coverage for Point2f
|
||||
* use PointVector for everything that we can to speed up pipeline when passing around Point vectors
|
||||
* use enum instead of int for Invert Method
|
||||
* **cuda**
|
||||
* adding HoughLinesDetector and HoughSegmentDetector
|
||||
* adding tests for the CannyEdgeDetector
|
||||
* some refactoring of the API
|
||||
* adding dockerfiles for OpenCV 4.5.2 with CUDA 11.2
|
||||
* add GaussianFilter
|
||||
* correct signature and test for Threshold
|
||||
* implement SobelFilter
|
||||
* move arithm module functions into correct location
|
||||
* rename files to get rid of so many cudas
|
||||
* add abs function implementation
|
||||
* **dnn**
|
||||
* increase test coverage
|
||||
* **docker**
|
||||
* make all Dockerfiles names/tags more consistent
|
||||
* **docs**
|
||||
* add CUDA functions that need implementation to ROADMAP
|
||||
* remove invalid sections and add some missing functions from ROADMAP
|
||||
* **imgproc**
|
||||
* Add FindContoursWithParams function
|
||||
* Add ToImageYUV and ToImageYUVWithParams
|
||||
* **make**
|
||||
* add make task to show changelog for next release
|
||||
* **wechat_qrcode**
|
||||
* disable module in Windows due to linker error
|
||||
|
||||
0.26.0
|
||||
---
|
||||
* **all**
|
||||
* update to OpenCV 4.5.1
|
||||
* **core**
|
||||
* add Matrix initializers: eye, ones, zeros (#758)
|
||||
* add multidimensional mat creation
|
||||
* add ndim mat constructor
|
||||
* added accumulators
|
||||
* added norm call with two mats (#600)
|
||||
* keep a reference to a []byte that backs a Mat. (#755)
|
||||
* remove guard for DataPtrUint8 since any Mat can be treated an Uint8
|
||||
* add Mat IsContinuous() function, and ensure that any Mat data pointers used to create Go slices only apply to continuous Mats
|
||||
* fix buffer size for Go strings for 32-bit operating systems
|
||||
* **build**
|
||||
* bring back codecov.io
|
||||
* **calib3d**
|
||||
* correctly close mat after test
|
||||
* **dnn**
|
||||
* add ReadNetFromONNX and ReadNetFromONNXBytes (#760)
|
||||
* increase test coverage
|
||||
* **docker**
|
||||
* dockerfiles for opencv gpu builds
|
||||
* **docs**
|
||||
* corrected links to CUDA and OpenVINO
|
||||
* list all unimplemented functions in photo module
|
||||
* replace GoDocs with pkg docs
|
||||
* update ROADMAP from recent contributions
|
||||
* **imgproc**
|
||||
* add test coverage for GetTextSizeWithBaseline()
|
||||
* close all Mats even those based on memory slices
|
||||
* close Mat to avoid memory leak in ToImage()
|
||||
* refactoring of ToImage and ImageToMatXX functions
|
||||
* **openvino**
|
||||
* fix dldt repo in makefile for openvino
|
||||
* **os**
|
||||
* adding gcc-c++ package to rpm deps
|
||||
* **photo**
|
||||
* add SeamlessClone function
|
||||
* **profile**
|
||||
* add created mats in Split and ForwardLayers to profile (#780)
|
||||
|
||||
0.25.0
|
||||
---
|
||||
* **all**
|
||||
* update to opencv release 4.5.0
|
||||
* **build**
|
||||
* add file dependencies needed for DNN tests
|
||||
* add verbose output for tests on CircleCI
|
||||
* also run unit tests on non-free algorithms. YMMV.
|
||||
* fix build with cuda
|
||||
* remove Travis and switch to CircleCI using Docker based builds
|
||||
* update CI builds to Go 1.15
|
||||
* **core**
|
||||
* add mixChannels() method to Mat (#746)
|
||||
* Add toGoStrings helper
|
||||
* support ConvertToWithParams method
|
||||
* **dnn**
|
||||
* Add NMSBoxes function (#736)
|
||||
* Added ability to load Torch file. Tested features for extracting 128d vectors
|
||||
* fix using wrong type for unconnectedlayertype
|
||||
* use default ddepth for conversions to blob from image as recommended by @berak
|
||||
* **docker**
|
||||
* use separate dockerfile for opencv to avoid massive rebuild
|
||||
* **docs**
|
||||
* add recent contributions to ROADMAP and also add cuda functions still in need of implementation
|
||||
* display CircleCI badge in README
|
||||
* minor improvements to CUDA docs in READMEs
|
||||
* **features2d**
|
||||
* add FlannBasedMatcher
|
||||
* add drawmatches (#720)
|
||||
* fix memory leak in SIFT
|
||||
* **highgui**
|
||||
* refactored ROI methods
|
||||
* **imgproc**
|
||||
* Add option to return baseline with GetTextSizeWithBaseline
|
||||
* **objdetect**
|
||||
* Add QRCode DetectAndDecodeMulti
|
||||
* **videoio**
|
||||
* Add video capture properties and set preferred api backend (#739)
|
||||
* fix needed as discussed in golang/go issue #32479
|
||||
|
||||
0.24.0
|
||||
---
|
||||
* **all**
|
||||
* update Makefile and READMEChange constants and corresponding function signatures to have the correct types (#689)
|
||||
* replace master branch terminology with release
|
||||
* update to OpenCV 4.4.0
|
||||
* **calib3d**
|
||||
* add FindHomography()
|
||||
* add function EstimateAffinePartial2D()
|
||||
* add GetAffineTransform() and GetAffineTransform2f()
|
||||
* add UndistortPoints(), FisheyeUndistortPoints() and EstimateNewCameraMatrixForUndistortRectify()
|
||||
* **core**
|
||||
* add MultiplyWithParams
|
||||
* **docs**
|
||||
* add recent contributions to ROADMAP
|
||||
* create CODE_OF_CONDUCT.md
|
||||
* update copyright year
|
||||
* **features2d**
|
||||
* close returned Mat from SIFT algorithm
|
||||
* fix issue 707 with DrawKeyPoints
|
||||
* SIFT patent now expired so is part of main OpenCV modules
|
||||
* **imgproc**
|
||||
* change struct to remove GNU old-style field designator extension warning
|
||||
|
||||
0.23.0
|
||||
---
|
||||
* **build**
|
||||
* update Makefile and README
|
||||
* update to use go1.14
|
||||
* **calib3d**
|
||||
* add draw chessboard
|
||||
* **core**
|
||||
* fix memory leak in Mat.Size() and Mat.Split() (#580)
|
||||
* **cuda**
|
||||
* add build support
|
||||
* add cuda backend/target
|
||||
* add support for:
|
||||
* cv::cuda::CannyEdgeDetector
|
||||
* cv::cuda::CascadeClassifier Class
|
||||
* cv::cuda::HOG Class
|
||||
* remove breaking case statement
|
||||
* **dnn**
|
||||
* avoid parallel test runs
|
||||
* remove attempt at providing grayscale image blog conversion that uses mean adjustment
|
||||
* **docker**
|
||||
* docker file last command change (#505)
|
||||
* **docs**
|
||||
* add recent contributions to ROADMAP
|
||||
* **imgproc**
|
||||
* add ErodeWithParams function
|
||||
* add getGaussianKernel function
|
||||
* add Go Point2f type and update GetPerspectiveTransform() (#589)
|
||||
* add PhaseCorrelate binding (#626)
|
||||
* added Polylines feature
|
||||
* do not free contours data until after we have drawn the needed contours
|
||||
* Threshold() should return a value (#620)
|
||||
* **make**
|
||||
* added raspberry pi zero support to the makefile
|
||||
* **opencv**
|
||||
* update to OpenCV 4.3.0
|
||||
* **openvino**
|
||||
* add build support
|
||||
* **windows**
|
||||
* add cmake flag for allocator stats counter type to avoid opencv issue #16398
|
||||
|
||||
0.22.0
|
||||
---
|
||||
* **bgsegm**
|
||||
|
||||
76
vendor/gocv.io/x/gocv/CODE_OF_CONDUCT.md
generated
vendored
Normal file
76
vendor/gocv.io/x/gocv/CODE_OF_CONDUCT.md
generated
vendored
Normal file
@@ -0,0 +1,76 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
In the interest of fostering an open and welcoming environment, we as
|
||||
contributors and maintainers pledge to making participation in our project and
|
||||
our community a harassment-free experience for everyone, regardless of age, body
|
||||
size, disability, ethnicity, sex characteristics, gender identity and expression,
|
||||
level of experience, education, socio-economic status, nationality, personal
|
||||
appearance, race, religion, or sexual identity and orientation.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to creating a positive environment
|
||||
include:
|
||||
|
||||
* Using welcoming and inclusive language
|
||||
* Being respectful of differing viewpoints and experiences
|
||||
* Gracefully accepting constructive criticism
|
||||
* Focusing on what is best for the community
|
||||
* Showing empathy towards other community members
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery and unwelcome sexual attention or
|
||||
advances
|
||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or electronic
|
||||
address, without explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Our Responsibilities
|
||||
|
||||
Project maintainers are responsible for clarifying the standards of acceptable
|
||||
behavior and are expected to take appropriate and fair corrective action in
|
||||
response to any instances of unacceptable behavior.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or
|
||||
reject comments, commits, code, wiki edits, issues, and other contributions
|
||||
that are not aligned to this Code of Conduct, or to ban temporarily or
|
||||
permanently any contributor for other behaviors that they deem inappropriate,
|
||||
threatening, offensive, or harmful.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community. Examples of
|
||||
representing a project or community include using an official project e-mail
|
||||
address, posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event. Representation of a project may be
|
||||
further defined and clarified by project maintainers.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported by contacting the project team at info@hybridgroup.com. All
|
||||
complaints will be reviewed and investigated and will result in a response that
|
||||
is deemed necessary and appropriate to the circumstances. The project team is
|
||||
obligated to maintain confidentiality with regard to the reporter of an incident.
|
||||
Further details of specific enforcement policies may be posted separately.
|
||||
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good
|
||||
faith may face temporary or permanent repercussions as determined by other
|
||||
members of the project's leadership.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
|
||||
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
|
||||
For answers to common questions about this code of conduct, see
|
||||
https://www.contributor-covenant.org/faq
|
||||
2
vendor/gocv.io/x/gocv/CONTRIBUTING.md
generated
vendored
2
vendor/gocv.io/x/gocv/CONTRIBUTING.md
generated
vendored
@@ -22,7 +22,7 @@ Please open a Github issue with your needs, and we can see what we can do.
|
||||
|
||||
## How to use our Github repository
|
||||
|
||||
The `master` branch of this repo will always have the latest released version of GoCV. All of the active development work for the next release will take place in the `dev` branch. GoCV will use semantic versioning and will create a tag/release for each release.
|
||||
The `release` branch of this repo will always have the latest released version of GoCV. All of the active development work for the next release will take place in the `dev` branch. GoCV will use semantic versioning and will create a tag/release for each release.
|
||||
|
||||
Here is how to contribute back some code or documentation:
|
||||
|
||||
|
||||
64
vendor/gocv.io/x/gocv/Dockerfile
generated
vendored
64
vendor/gocv.io/x/gocv/Dockerfile
generated
vendored
@@ -1,60 +1,12 @@
|
||||
FROM ubuntu:16.04 AS opencv
|
||||
LABEL maintainer="hybridgroup"
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
git build-essential cmake pkg-config unzip libgtk2.0-dev \
|
||||
curl ca-certificates libcurl4-openssl-dev libssl-dev \
|
||||
libavcodec-dev libavformat-dev libswscale-dev libtbb2 libtbb-dev \
|
||||
libjpeg-dev libpng-dev libtiff-dev libdc1394-22-dev && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ARG OPENCV_VERSION="4.2.0"
|
||||
ENV OPENCV_VERSION $OPENCV_VERSION
|
||||
|
||||
RUN curl -Lo opencv.zip https://github.com/opencv/opencv/archive/${OPENCV_VERSION}.zip && \
|
||||
unzip -q opencv.zip && \
|
||||
curl -Lo opencv_contrib.zip https://github.com/opencv/opencv_contrib/archive/${OPENCV_VERSION}.zip && \
|
||||
unzip -q opencv_contrib.zip && \
|
||||
rm opencv.zip opencv_contrib.zip && \
|
||||
cd opencv-${OPENCV_VERSION} && \
|
||||
mkdir build && cd build && \
|
||||
cmake -D CMAKE_BUILD_TYPE=RELEASE \
|
||||
-D CMAKE_INSTALL_PREFIX=/usr/local \
|
||||
-D OPENCV_EXTRA_MODULES_PATH=../../opencv_contrib-${OPENCV_VERSION}/modules \
|
||||
-D WITH_JASPER=OFF \
|
||||
-D BUILD_DOCS=OFF \
|
||||
-D BUILD_EXAMPLES=OFF \
|
||||
-D BUILD_TESTS=OFF \
|
||||
-D BUILD_PERF_TESTS=OFF \
|
||||
-D BUILD_opencv_java=NO \
|
||||
-D BUILD_opencv_python=NO \
|
||||
-D BUILD_opencv_python2=NO \
|
||||
-D BUILD_opencv_python3=NO \
|
||||
-D OPENCV_GENERATE_PKGCONFIG=ON .. && \
|
||||
make -j $(nproc --all) && \
|
||||
make preinstall && make install && ldconfig && \
|
||||
cd / && rm -rf opencv*
|
||||
|
||||
#################
|
||||
# Go + OpenCV #
|
||||
#################
|
||||
FROM opencv AS gocv
|
||||
LABEL maintainer="hybridgroup"
|
||||
|
||||
ARG GOVERSION="1.13.5"
|
||||
ENV GOVERSION $GOVERSION
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
git software-properties-common && \
|
||||
curl -Lo go${GOVERSION}.linux-amd64.tar.gz https://dl.google.com/go/go${GOVERSION}.linux-amd64.tar.gz && \
|
||||
tar -C /usr/local -xzf go${GOVERSION}.linux-amd64.tar.gz && \
|
||||
rm go${GOVERSION}.linux-amd64.tar.gz && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
# to build this docker image:
|
||||
# docker build .
|
||||
FROM gocv/opencv:4.5.3
|
||||
|
||||
ENV GOPATH /go
|
||||
ENV PATH $GOPATH/bin:/usr/local/go/bin:$PATH
|
||||
|
||||
RUN mkdir -p "$GOPATH/src" "$GOPATH/bin" && chmod -R 777 "$GOPATH"
|
||||
WORKDIR $GOPATH
|
||||
COPY . /go/src/gocv.io/x/gocv/
|
||||
|
||||
RUN go get -u -d gocv.io/x/gocv && go run ${GOPATH}/src/gocv.io/x/gocv/cmd/version/main.go
|
||||
WORKDIR /go/src/gocv.io/x/gocv
|
||||
RUN go build -tags example -o /build/gocv_version -i ./cmd/version/
|
||||
|
||||
CMD ["/build/gocv_version"]
|
||||
|
||||
19
vendor/gocv.io/x/gocv/Dockerfile-test
generated
vendored
Normal file
19
vendor/gocv.io/x/gocv/Dockerfile-test
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
# To build:
|
||||
# docker build -f Dockerfile-test -t gocv-test .
|
||||
#
|
||||
# To run tests:
|
||||
# xhost +
|
||||
# docker run -it --rm -e DISPLAY=$DISPLAY -v /tmp/.X11-unix:/tmp/.X11-unix gocv-test
|
||||
# xhost -
|
||||
#
|
||||
FROM gocv/opencv:4.5.3 AS gocv-test
|
||||
|
||||
ENV GOPATH /go
|
||||
|
||||
COPY . /go/src/gocv.io/x/gocv/
|
||||
|
||||
WORKDIR /go/src/gocv.io/x/gocv
|
||||
|
||||
RUN go get -u github.com/rakyll/gotest
|
||||
|
||||
ENTRYPOINT ["gotest", "-v", ".", "./contrib/..."]
|
||||
18
vendor/gocv.io/x/gocv/Dockerfile-test.gpu-cuda-10
generated
vendored
Normal file
18
vendor/gocv.io/x/gocv/Dockerfile-test.gpu-cuda-10
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
# To build:
|
||||
# docker build -f Dockerfile-test.gpu-cuda-10 -t gocv-test-gpu-cuda-10 .
|
||||
#
|
||||
# To run tests:
|
||||
# docker run -it --rm --gpus all gocv-test-gpu-cuda-10
|
||||
#
|
||||
FROM gocv/opencv:4.5.3-gpu-cuda-10 AS gocv-gpu-test-cuda-10
|
||||
|
||||
ENV GOPATH /go
|
||||
ENV PATH="${PATH}:/go/bin"
|
||||
|
||||
COPY . /go/src/gocv.io/x/gocv/
|
||||
|
||||
WORKDIR /go/src/gocv.io/x/gocv
|
||||
|
||||
RUN go get -u github.com/rakyll/gotest
|
||||
|
||||
ENTRYPOINT ["gotest", "-v", "./cuda/..."]
|
||||
18
vendor/gocv.io/x/gocv/Dockerfile-test.gpu-cuda-11
generated
vendored
Normal file
18
vendor/gocv.io/x/gocv/Dockerfile-test.gpu-cuda-11
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
# To build:
|
||||
# docker build -f Dockerfile-test.gpu-cuda-11 -t gocv-test-gpu-cuda-11 .
|
||||
#
|
||||
# To run tests:
|
||||
# docker run -it --rm --gpus all gocv-test-gpu-cuda-11
|
||||
#
|
||||
FROM gocv/opencv:4.5.3-gpu-cuda-11 AS gocv-gpu-test-cuda-11
|
||||
|
||||
ENV GOPATH /go
|
||||
ENV PATH="${PATH}:/go/bin"
|
||||
|
||||
COPY . /go/src/gocv.io/x/gocv/
|
||||
|
||||
WORKDIR /go/src/gocv.io/x/gocv
|
||||
|
||||
RUN go get -u github.com/rakyll/gotest
|
||||
|
||||
ENTRYPOINT ["gotest", "-v", "./cuda/..."]
|
||||
12
vendor/gocv.io/x/gocv/Dockerfile.gpu
generated
vendored
Normal file
12
vendor/gocv.io/x/gocv/Dockerfile.gpu
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
# to build this docker image:
|
||||
# docker build -f Dockerfile.gpu .
|
||||
FROM gocv/opencv:4.5.3-gpu AS gocv-gpu
|
||||
|
||||
ENV GOPATH /go
|
||||
|
||||
COPY . /go/src/gocv.io/x/gocv/
|
||||
|
||||
WORKDIR /go/src/gocv.io/x/gocv
|
||||
RUN go build -tags example -o /build/gocv_cuda_version ./cmd/cuda/
|
||||
|
||||
CMD ["/build/gocv_cuda_version"]
|
||||
44
vendor/gocv.io/x/gocv/Dockerfile.opencv
generated
vendored
Normal file
44
vendor/gocv.io/x/gocv/Dockerfile.opencv
generated
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
# to build this docker image:
|
||||
# docker build -f Dockerfile.opencv -t gocv/opencv:4.5.3 .
|
||||
FROM golang:1.16-buster AS opencv
|
||||
LABEL maintainer="hybridgroup"
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
git build-essential cmake pkg-config unzip libgtk2.0-dev \
|
||||
curl ca-certificates libcurl4-openssl-dev libssl-dev \
|
||||
libavcodec-dev libavformat-dev libswscale-dev libtbb2 libtbb-dev \
|
||||
libjpeg-dev libpng-dev libtiff-dev libdc1394-22-dev && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ARG OPENCV_VERSION="4.5.3"
|
||||
ENV OPENCV_VERSION $OPENCV_VERSION
|
||||
|
||||
RUN curl -Lo opencv.zip https://github.com/opencv/opencv/archive/${OPENCV_VERSION}.zip && \
|
||||
unzip -q opencv.zip && \
|
||||
curl -Lo opencv_contrib.zip https://github.com/opencv/opencv_contrib/archive/${OPENCV_VERSION}.zip && \
|
||||
unzip -q opencv_contrib.zip && \
|
||||
rm opencv.zip opencv_contrib.zip && \
|
||||
cd opencv-${OPENCV_VERSION} && \
|
||||
mkdir build && cd build && \
|
||||
cmake -D CMAKE_BUILD_TYPE=RELEASE \
|
||||
-D WITH_IPP=OFF \
|
||||
-D WITH_OPENGL=OFF \
|
||||
-D WITH_QT=OFF \
|
||||
-D CMAKE_INSTALL_PREFIX=/usr/local \
|
||||
-D OPENCV_EXTRA_MODULES_PATH=../../opencv_contrib-${OPENCV_VERSION}/modules \
|
||||
-D OPENCV_ENABLE_NONFREE=ON \
|
||||
-D WITH_JASPER=OFF \
|
||||
-D BUILD_DOCS=OFF \
|
||||
-D BUILD_EXAMPLES=OFF \
|
||||
-D BUILD_TESTS=OFF \
|
||||
-D BUILD_PERF_TESTS=OFF \
|
||||
-D BUILD_opencv_java=NO \
|
||||
-D BUILD_opencv_python=NO \
|
||||
-D BUILD_opencv_python2=NO \
|
||||
-D BUILD_opencv_python3=NO \
|
||||
-D OPENCV_GENERATE_PKGCONFIG=ON .. && \
|
||||
make -j $(nproc --all) && \
|
||||
make preinstall && make install && ldconfig && \
|
||||
cd / && rm -rf opencv*
|
||||
|
||||
CMD ["go version"]
|
||||
62
vendor/gocv.io/x/gocv/Dockerfile.opencv-gpu-cuda-10
generated
vendored
Normal file
62
vendor/gocv.io/x/gocv/Dockerfile.opencv-gpu-cuda-10
generated
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
# to build this docker image:
|
||||
# docker build -f Dockerfile.opencv-gpu-cuda-10 -t gocv/opencv:4.5.3-gpu-cuda-10 .
|
||||
FROM nvidia/cuda:10.2-cudnn7-devel AS opencv-gpu-base
|
||||
LABEL maintainer="hybridgroup"
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
git build-essential cmake pkg-config unzip libgtk2.0-dev \
|
||||
wget curl ca-certificates libcurl4-openssl-dev libssl-dev \
|
||||
libavcodec-dev libavformat-dev libswscale-dev libtbb2 libtbb-dev \
|
||||
libjpeg-dev libpng-dev libtiff-dev libdc1394-22-dev && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ARG OPENCV_VERSION="4.5.3"
|
||||
ENV OPENCV_VERSION $OPENCV_VERSION
|
||||
|
||||
RUN curl -Lo opencv.zip https://github.com/opencv/opencv/archive/${OPENCV_VERSION}.zip && \
|
||||
unzip -q opencv.zip && \
|
||||
curl -Lo opencv_contrib.zip https://github.com/opencv/opencv_contrib/archive/${OPENCV_VERSION}.zip && \
|
||||
unzip -q opencv_contrib.zip && \
|
||||
rm opencv.zip opencv_contrib.zip && \
|
||||
cd opencv-${OPENCV_VERSION} && \
|
||||
mkdir build && cd build && \
|
||||
cmake -D CMAKE_BUILD_TYPE=RELEASE \
|
||||
-D WITH_IPP=OFF \
|
||||
-D WITH_OPENGL=OFF \
|
||||
-D WITH_QT=OFF \
|
||||
-D CMAKE_INSTALL_PREFIX=/usr/local \
|
||||
-D OPENCV_EXTRA_MODULES_PATH=../../opencv_contrib-${OPENCV_VERSION}/modules \
|
||||
-D OPENCV_ENABLE_NONFREE=ON \
|
||||
-D WITH_JASPER=OFF \
|
||||
-D BUILD_DOCS=OFF \
|
||||
-D BUILD_EXAMPLES=OFF \
|
||||
-D BUILD_TESTS=OFF \
|
||||
-D BUILD_PERF_TESTS=OFF \
|
||||
-D BUILD_opencv_java=NO \
|
||||
-D BUILD_opencv_python=NO \
|
||||
-D BUILD_opencv_python2=NO \
|
||||
-D BUILD_opencv_python3=NO \
|
||||
-D WITH_CUDA=ON \
|
||||
-D ENABLE_FAST_MATH=1 \
|
||||
-D CUDA_FAST_MATH=1 \
|
||||
-D WITH_CUBLAS=1 \
|
||||
-D CUDA_TOOLKIT_ROOT_DIR=/usr/local/cuda/ \
|
||||
-D BUILD_opencv_cudacodec=OFF \
|
||||
-D WITH_CUDNN=ON \
|
||||
-D OPENCV_DNN_CUDA=ON \
|
||||
-D CUDA_GENERATION=Auto \
|
||||
-D OPENCV_GENERATE_PKGCONFIG=ON .. && \
|
||||
make -j $(nproc --all) && \
|
||||
make preinstall && make install && ldconfig && \
|
||||
cd / && rm -rf opencv*
|
||||
|
||||
# install golang here
|
||||
FROM opencv-gpu-base AS opencv-gpu-golang
|
||||
|
||||
ENV GO_RELEASE=1.16.5
|
||||
RUN wget https://dl.google.com/go/go${GO_RELEASE}.linux-amd64.tar.gz && \
|
||||
tar xfv go${GO_RELEASE}.linux-amd64.tar.gz -C /usr/local && \
|
||||
rm go${GO_RELEASE}.linux-amd64.tar.gz
|
||||
ENV PATH="${PATH}:/usr/local/go/bin"
|
||||
|
||||
CMD ["go version"]
|
||||
63
vendor/gocv.io/x/gocv/Dockerfile.opencv-gpu-cuda-11
generated
vendored
Normal file
63
vendor/gocv.io/x/gocv/Dockerfile.opencv-gpu-cuda-11
generated
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
# to build this docker image:
|
||||
# docker build -f Dockerfile.opencv-gpu-cuda-11 -t gocv/opencv:4.5.3-gpu-cuda-11 .
|
||||
FROM nvidia/cuda:11.2.2-cudnn8-devel AS opencv-gpu-cuda-11-base
|
||||
LABEL maintainer="hybridgroup"
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
git build-essential cmake pkg-config unzip libgtk2.0-dev \
|
||||
wget curl ca-certificates libcurl4-openssl-dev libssl-dev \
|
||||
libavcodec-dev libavformat-dev libswscale-dev libtbb2 libtbb-dev \
|
||||
libjpeg-dev libpng-dev libtiff-dev libdc1394-22-dev && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ARG OPENCV_VERSION="4.5.3"
|
||||
ENV OPENCV_VERSION $OPENCV_VERSION
|
||||
|
||||
RUN curl -Lo opencv.zip https://github.com/opencv/opencv/archive/${OPENCV_VERSION}.zip && \
|
||||
unzip -q opencv.zip && \
|
||||
curl -Lo opencv_contrib.zip https://github.com/opencv/opencv_contrib/archive/${OPENCV_VERSION}.zip && \
|
||||
unzip -q opencv_contrib.zip && \
|
||||
rm opencv.zip opencv_contrib.zip && \
|
||||
cd opencv-${OPENCV_VERSION} && \
|
||||
mkdir build && cd build && \
|
||||
cmake -D CMAKE_BUILD_TYPE=RELEASE \
|
||||
-D WITH_IPP=OFF \
|
||||
-D WITH_OPENGL=OFF \
|
||||
-D WITH_QT=OFF \
|
||||
-D CMAKE_INSTALL_PREFIX=/usr/local \
|
||||
-D OPENCV_EXTRA_MODULES_PATH=../../opencv_contrib-${OPENCV_VERSION}/modules \
|
||||
-D OPENCV_ENABLE_NONFREE=ON \
|
||||
-D WITH_JASPER=OFF \
|
||||
-D BUILD_DOCS=OFF \
|
||||
-D BUILD_EXAMPLES=OFF \
|
||||
-D BUILD_TESTS=OFF \
|
||||
-D BUILD_PERF_TESTS=OFF \
|
||||
-D BUILD_opencv_java=NO \
|
||||
-D BUILD_opencv_python=NO \
|
||||
-D BUILD_opencv_python2=NO \
|
||||
-D BUILD_opencv_python3=NO \
|
||||
-D WITH_CUDA=ON \
|
||||
-D ENABLE_FAST_MATH=1 \
|
||||
-D CUDA_FAST_MATH=1 \
|
||||
-D WITH_CUBLAS=1 \
|
||||
-D CUDA_TOOLKIT_ROOT_DIR=/usr/local/cuda/ \
|
||||
-D BUILD_opencv_cudacodec=OFF \
|
||||
-D WITH_CUDNN=ON \
|
||||
-D OPENCV_DNN_CUDA=ON \
|
||||
-D CUDA_GENERATION=Auto \
|
||||
-D OPENCV_GENERATE_PKGCONFIG=ON .. && \
|
||||
make -j $(nproc --all) && \
|
||||
make preinstall && make install && ldconfig && \
|
||||
cd / && rm -rf opencv*
|
||||
|
||||
# install golang here
|
||||
FROM opencv-gpu-cuda-11-base AS opencv-gpu-cuda-11-golang
|
||||
|
||||
ENV GO_RELEASE=1.16.5
|
||||
RUN wget https://dl.google.com/go/go${GO_RELEASE}.linux-amd64.tar.gz && \
|
||||
tar xfv go${GO_RELEASE}.linux-amd64.tar.gz -C /usr/local && \
|
||||
rm go${GO_RELEASE}.linux-amd64.tar.gz
|
||||
ENV PATH="${PATH}:/usr/local/go/bin"
|
||||
|
||||
CMD ["go version"]
|
||||
191
vendor/gocv.io/x/gocv/LICENSE.txt
generated
vendored
191
vendor/gocv.io/x/gocv/LICENSE.txt
generated
vendored
@@ -1,193 +1,4 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright (c) 2017-2019 The Hybrid Group
|
||||
Copyright (c) 2017-2021 The Hybrid Group
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
190
vendor/gocv.io/x/gocv/Makefile
generated
vendored
190
vendor/gocv.io/x/gocv/Makefile
generated
vendored
@@ -1,18 +1,28 @@
|
||||
.ONESHELL:
|
||||
.PHONY: test deps download build clean astyle cmds docker
|
||||
|
||||
# GoCV version to use.
|
||||
GOCV_VERSION?="v0.26.0"
|
||||
|
||||
# OpenCV version to use.
|
||||
OPENCV_VERSION?=4.2.0
|
||||
OPENCV_VERSION?=4.5.3
|
||||
|
||||
# Go version to use when building Docker image
|
||||
GOVERSION?=1.13.1
|
||||
GOVERSION?=1.16.2
|
||||
|
||||
# Temporary directory to put files into.
|
||||
TMP_DIR?=/tmp/
|
||||
|
||||
# Build shared or static library
|
||||
BUILD_SHARED_LIBS?=ON
|
||||
|
||||
# Package list for each well-known Linux distribution
|
||||
RPMS=cmake curl git gtk2-devel libpng-devel libjpeg-devel libtiff-devel tbb tbb-devel libdc1394-devel unzip
|
||||
DEBS=unzip build-essential cmake curl git libgtk2.0-dev pkg-config libavcodec-dev libavformat-dev libswscale-dev libtbb2 libtbb-dev libjpeg-dev libpng-dev libtiff-dev libdc1394-22-dev
|
||||
RPMS=cmake curl wget git gtk2-devel libpng-devel libjpeg-devel libtiff-devel tbb tbb-devel libdc1394-devel unzip gcc-c++
|
||||
DEBS=unzip wget build-essential cmake curl git libgtk2.0-dev pkg-config libavcodec-dev libavformat-dev libswscale-dev libtbb2 libtbb-dev libjpeg-dev libpng-dev libtiff-dev libdc1394-22-dev
|
||||
JETSON=build-essential cmake git unzip pkg-config libjpeg-dev libpng-dev libtiff-dev libavcodec-dev libavformat-dev libswscale-dev libgtk2.0-dev libcanberra-gtk* libxvidcore-dev libx264-dev libgtk-3-dev libtbb2 libtbb-dev libdc1394-22-dev libv4l-dev v4l-utils libgstreamer1.0-dev libgstreamer-plugins-base1.0-dev libavresample-dev libvorbis-dev libxine2-dev libfaac-dev libmp3lame-dev libtheora-dev libopencore-amrnb-dev libopencore-amrwb-dev libopenblas-dev libatlas-base-dev libblas-dev liblapack-dev libeigen3-dev gfortran libhdf5-dev protobuf-compiler libprotobuf-dev libgoogle-glog-dev libgflags-dev
|
||||
|
||||
explain:
|
||||
@echo "For quick install with typical defaults of both OpenCV and GoCV, run 'make install'"
|
||||
|
||||
# Detect Linux distribution
|
||||
distro_deps=
|
||||
@@ -41,6 +51,11 @@ deps_debian:
|
||||
sudo apt-get -y update
|
||||
sudo apt-get -y install $(DEBS)
|
||||
|
||||
deps_jetson:
|
||||
sudo sh -c "echo '/usr/local/cuda/lib64' >> /etc/ld.so.conf.d/nvidia-tegra.conf"
|
||||
sudo ldconfig
|
||||
sudo apt-get -y update
|
||||
sudo apt-get -y install $(JETSON)
|
||||
|
||||
# Download OpenCV source tarballs.
|
||||
download:
|
||||
@@ -54,12 +69,36 @@ download:
|
||||
rm opencv.zip opencv_contrib.zip
|
||||
cd -
|
||||
|
||||
# Download openvino source tarballs.
|
||||
download_openvino:
|
||||
sudo rm -rf /usr/local/dldt/
|
||||
sudo rm -rf /usr/local/openvino/
|
||||
sudo git clone https://github.com/openvinotoolkit/openvino -b 2019_R3.1 /usr/local/openvino/
|
||||
|
||||
# Build openvino.
|
||||
build_openvino_package:
|
||||
cd /usr/local/openvino/inference-engine
|
||||
sudo git submodule init
|
||||
sudo git submodule update --recursive
|
||||
sudo ./install_dependencies.sh
|
||||
sudo mv -f thirdparty/clDNN/common/intel_ocl_icd/6.3/linux/Release thirdparty/clDNN/common/intel_ocl_icd/6.3/linux/RELEASE
|
||||
sudo mkdir build
|
||||
cd build
|
||||
sudo rm -rf *
|
||||
sudo cmake -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local -D BUILD_SHARED_LIBS=${BUILD_SHARED_LIBS} -D ENABLE_VPU=ON -D ENABLE_MKL_DNN=ON -D ENABLE_CLDNN=ON ..
|
||||
sudo $(MAKE) -j $(shell nproc --all)
|
||||
sudo touch VERSION
|
||||
sudo mkdir -p src/ngraph
|
||||
sudo cp thirdparty/ngraph/src/ngraph/version.hpp src/ngraph
|
||||
cd -
|
||||
|
||||
# Build OpenCV.
|
||||
build:
|
||||
cd $(TMP_DIR)opencv/opencv-$(OPENCV_VERSION)
|
||||
mkdir build
|
||||
cd build
|
||||
cmake -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local -D OPENCV_EXTRA_MODULES_PATH=$(TMP_DIR)opencv/opencv_contrib-$(OPENCV_VERSION)/modules -D BUILD_DOCS=OFF -D BUILD_EXAMPLES=OFF -D BUILD_TESTS=OFF -D BUILD_PERF_TESTS=OFF -D BUILD_opencv_java=NO -D BUILD_opencv_python=NO -D BUILD_opencv_python2=NO -D BUILD_opencv_python3=NO -D WITH_JASPER=OFF -DOPENCV_GENERATE_PKGCONFIG=ON ..
|
||||
rm -rf *
|
||||
cmake -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local -D BUILD_SHARED_LIBS=${BUILD_SHARED_LIBS} -D OPENCV_EXTRA_MODULES_PATH=$(TMP_DIR)opencv/opencv_contrib-$(OPENCV_VERSION)/modules -D BUILD_DOCS=OFF -D BUILD_EXAMPLES=OFF -D BUILD_TESTS=OFF -D BUILD_PERF_TESTS=OFF -D BUILD_opencv_java=NO -D BUILD_opencv_python=NO -D BUILD_opencv_python2=NO -D BUILD_opencv_python3=NO -D WITH_JASPER=OFF -DOPENCV_GENERATE_PKGCONFIG=ON ..
|
||||
$(MAKE) -j $(shell nproc --all)
|
||||
$(MAKE) preinstall
|
||||
cd -
|
||||
@@ -69,7 +108,58 @@ build_raspi:
|
||||
cd $(TMP_DIR)opencv/opencv-$(OPENCV_VERSION)
|
||||
mkdir build
|
||||
cd build
|
||||
cmake -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local -D OPENCV_EXTRA_MODULES_PATH=$(TMP_DIR)opencv/opencv_contrib-$(OPENCV_VERSION)/modules -D BUILD_DOCS=OFF -D BUILD_EXAMPLES=OFF -D BUILD_TESTS=OFF -D BUILD_PERF_TESTS=OFF -D BUILD_opencv_java=OFF -D BUILD_opencv_python=NO -D BUILD_opencv_python2=NO -D BUILD_opencv_python3=NO -D ENABLE_NEON=ON -D ENABLE_VFPV3=ON -D WITH_JASPER=OFF -D OPENCV_GENERATE_PKGCONFIG=ON ..
|
||||
rm -rf *
|
||||
cmake -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local -D BUILD_SHARED_LIBS=${BUILD_SHARED_LIBS} -D OPENCV_EXTRA_MODULES_PATH=$(TMP_DIR)opencv/opencv_contrib-$(OPENCV_VERSION)/modules -D BUILD_DOCS=OFF -D BUILD_EXAMPLES=OFF -D BUILD_TESTS=OFF -D BUILD_PERF_TESTS=OFF -D BUILD_opencv_java=OFF -D BUILD_opencv_python=NO -D BUILD_opencv_python2=NO -D BUILD_opencv_python3=NO -D ENABLE_NEON=ON -D ENABLE_VFPV3=ON -D WITH_JASPER=OFF -D OPENCV_GENERATE_PKGCONFIG=ON ..
|
||||
$(MAKE) -j $(shell nproc --all)
|
||||
$(MAKE) preinstall
|
||||
cd -
|
||||
|
||||
# Build OpenCV on Raspberry pi zero which has ARMv6.
|
||||
build_raspi_zero:
|
||||
cd $(TMP_DIR)opencv/opencv-$(OPENCV_VERSION)
|
||||
mkdir build
|
||||
cd build
|
||||
rm -rf *
|
||||
cmake -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local -D BUILD_SHARED_LIBS=${BUILD_SHARED_LIBS} -D OPENCV_EXTRA_MODULES_PATH=$(TMP_DIR)opencv/opencv_contrib-$(OPENCV_VERSION)/modules -D BUILD_DOCS=OFF -D BUILD_EXAMPLES=OFF -D BUILD_TESTS=OFF -D BUILD_PERF_TESTS=OFF -D BUILD_opencv_java=OFF -D BUILD_opencv_python=NO -D BUILD_opencv_python2=NO -D BUILD_opencv_python3=NO -D ENABLE_VFPV2=ON -D WITH_JASPER=OFF -D OPENCV_GENERATE_PKGCONFIG=ON ..
|
||||
$(MAKE) -j $(shell nproc --all)
|
||||
$(MAKE) preinstall
|
||||
cd -
|
||||
|
||||
# Build OpenCV for NVidia Jetson with CUDA.
|
||||
build_jetson:
|
||||
cd $(TMP_DIR)opencv/opencv-$(OPENCV_VERSION)
|
||||
mkdir build
|
||||
cd build
|
||||
rm -rf *
|
||||
cmake -D CMAKE_BUILD_TYPE=RELEASE \
|
||||
-D CMAKE_INSTALL_PREFIX=/usr/local \
|
||||
-D EIGEN_INCLUDE_PATH=/usr/include/eigen3 \
|
||||
-D BUILD_SHARED_LIBS=${BUILD_SHARED_LIBS} \
|
||||
-D OPENCV_EXTRA_MODULES_PATH=$(TMP_DIR)opencv/opencv_contrib-$(OPENCV_VERSION)/modules \
|
||||
-D BUILD_DOCS=OFF -D BUILD_EXAMPLES=OFF -D BUILD_TESTS=OFF -D BUILD_PERF_TESTS=OFF -D BUILD_opencv_java=OFF -D BUILD_opencv_python=NO -D BUILD_opencv_python2=NO -D BUILD_opencv_python3=NO \
|
||||
-D WITH_OPENCL=OFF \
|
||||
-D WITH_CUDA=ON \
|
||||
-D CUDA_ARCH_BIN=5.3 \
|
||||
-D CUDA_ARCH_PTX="" \
|
||||
-D WITH_CUDNN=ON \
|
||||
-D WITH_CUBLAS=ON \
|
||||
-D ENABLE_FAST_MATH=ON \
|
||||
-D CUDA_FAST_MATH=ON \
|
||||
-D OPENCV_DNN_CUDA=ON \
|
||||
-D ENABLE_NEON=ON \
|
||||
-D WITH_QT=OFF \
|
||||
-D WITH_OPENMP=ON \
|
||||
-D WITH_OPENGL=ON \
|
||||
-D BUILD_TIFF=ON \
|
||||
-D WITH_FFMPEG=ON \
|
||||
-D WITH_GSTREAMER=ON \
|
||||
-D WITH_TBB=ON \
|
||||
-D BUILD_TBB=ON \
|
||||
-D BUILD_TESTS=OFF \
|
||||
-D WITH_EIGEN=ON \
|
||||
-D WITH_V4L=ON \
|
||||
-D WITH_LIBV4L=ON \
|
||||
-D OPENCV_GENERATE_PKGCONFIG=ON ..
|
||||
$(MAKE) -j $(shell nproc --all)
|
||||
$(MAKE) preinstall
|
||||
cd -
|
||||
@@ -79,7 +169,19 @@ build_nonfree:
|
||||
cd $(TMP_DIR)opencv/opencv-$(OPENCV_VERSION)
|
||||
mkdir build
|
||||
cd build
|
||||
cmake -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local -D OPENCV_EXTRA_MODULES_PATH=$(TMP_DIR)opencv/opencv_contrib-$(OPENCV_VERSION)/modules -D BUILD_DOCS=OFF -D BUILD_EXAMPLES=OFF -D BUILD_TESTS=OFF -D BUILD_PERF_TESTS=OFF -D BUILD_opencv_java=NO -D BUILD_opencv_python=NO -D BUILD_opencv_python2=NO -D BUILD_opencv_python3=NO -D WITH_JASPER=OFF -DOPENCV_GENERATE_PKGCONFIG=ON -DOPENCV_ENABLE_NONFREE=ON ..
|
||||
rm -rf *
|
||||
cmake -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local -D BUILD_SHARED_LIBS=${BUILD_SHARED_LIBS} -D OPENCV_EXTRA_MODULES_PATH=$(TMP_DIR)opencv/opencv_contrib-$(OPENCV_VERSION)/modules -D BUILD_DOCS=OFF -D BUILD_EXAMPLES=OFF -D BUILD_TESTS=OFF -D BUILD_PERF_TESTS=OFF -D BUILD_opencv_java=NO -D BUILD_opencv_python=NO -D BUILD_opencv_python2=NO -D BUILD_opencv_python3=NO -D WITH_JASPER=OFF -DOPENCV_GENERATE_PKGCONFIG=ON -DOPENCV_ENABLE_NONFREE=ON ..
|
||||
$(MAKE) -j $(shell nproc --all)
|
||||
$(MAKE) preinstall
|
||||
cd -
|
||||
|
||||
# Build OpenCV with openvino.
|
||||
build_openvino:
|
||||
cd $(TMP_DIR)opencv/opencv-$(OPENCV_VERSION)
|
||||
mkdir build
|
||||
cd build
|
||||
rm -rf *
|
||||
cmake -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local -D BUILD_SHARED_LIBS=${BUILD_SHARED_LIBS} -D ENABLE_CXX11=ON -D OPENCV_EXTRA_MODULES_PATH=$(TMP_DIR)opencv/opencv_contrib-$(OPENCV_VERSION)/modules -D WITH_INF_ENGINE=ON -D InferenceEngine_DIR=/usr/local/dldt/inference-engine/build -D BUILD_DOCS=OFF -D BUILD_EXAMPLES=OFF -D BUILD_TESTS=OFF -D BUILD_PERF_TESTS=OFF -D BUILD_opencv_java=NO -D BUILD_opencv_python=NO -D BUILD_opencv_python2=NO -D BUILD_opencv_python3=NO -D WITH_JASPER=OFF -DOPENCV_GENERATE_PKGCONFIG=ON -DOPENCV_ENABLE_NONFREE=ON ..
|
||||
$(MAKE) -j $(shell nproc --all)
|
||||
$(MAKE) preinstall
|
||||
cd -
|
||||
@@ -89,7 +191,30 @@ build_cuda:
|
||||
cd $(TMP_DIR)opencv/opencv-$(OPENCV_VERSION)
|
||||
mkdir build
|
||||
cd build
|
||||
cmake -j $(shell nproc --all) -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local -D OPENCV_EXTRA_MODULES_PATH=$(TMP_DIR)opencv/opencv_contrib-$(OPENCV_VERSION)/modules -D BUILD_DOCS=OFF -D BUILD_EXAMPLES=OFF -D BUILD_TESTS=OFF -D BUILD_PERF_TESTS=OFF -D BUILD_opencv_java=NO -D BUILD_opencv_python=NO -D BUILD_opencv_python2=NO -D BUILD_opencv_python3=NO -D WITH_JASPER=OFF -DOPENCV_GENERATE_PKGCONFIG=ON -DWITH_CUDA=ON -DENABLE_FAST_MATH=1 -DCUDA_FAST_MATH=1 -DWITH_CUBLAS=1 -DCUDA_TOOLKIT_ROOT_DIR=/usr/local/cuda/ -DBUILD_opencv_cudacodec=OFF ..
|
||||
rm -rf *
|
||||
cmake -j $(shell nproc --all) -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local -D BUILD_SHARED_LIBS=${BUILD_SHARED_LIBS} -D OPENCV_EXTRA_MODULES_PATH=$(TMP_DIR)opencv/opencv_contrib-$(OPENCV_VERSION)/modules -D BUILD_DOCS=OFF -D BUILD_EXAMPLES=OFF -D BUILD_TESTS=OFF -D BUILD_PERF_TESTS=OFF -D BUILD_opencv_java=NO -D BUILD_opencv_python=NO -D BUILD_opencv_python2=NO -D BUILD_opencv_python3=NO -D WITH_JASPER=OFF -DOPENCV_GENERATE_PKGCONFIG=ON -DWITH_CUDA=ON -DENABLE_FAST_MATH=1 -DCUDA_FAST_MATH=1 -DWITH_CUBLAS=1 -DCUDA_TOOLKIT_ROOT_DIR=/usr/local/cuda/ -DBUILD_opencv_cudacodec=OFF -D WITH_CUDNN=ON -D OPENCV_DNN_CUDA=ON -D CUDA_GENERATION=Auto ..
|
||||
$(MAKE) -j $(shell nproc --all)
|
||||
$(MAKE) preinstall
|
||||
cd -
|
||||
|
||||
# Build OpenCV staticly linked
|
||||
build_static:
|
||||
cd $(TMP_DIR)opencv/opencv-$(OPENCV_VERSION)
|
||||
mkdir build
|
||||
cd build
|
||||
rm -rf *
|
||||
cmake -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local -D BUILD_SHARED_LIBS=OFF -D OPENCV_EXTRA_MODULES_PATH=$(TMP_DIR)opencv/opencv_contrib-$(OPENCV_VERSION)/modules -D BUILD_DOCS=OFF -D BUILD_EXAMPLES=OFF -D BUILD_TESTS=OFF -D BUILD_PERF_TESTS=OFF -D BUILD_opencv_java=NO -D BUILD_opencv_python=NO -D BUILD_opencv_python2=NO -D BUILD_opencv_python3=NO -DWITH_JASPER=OFF -DWITH_QT=OFF -DWITH_GTK=OFF -DWITH_FFMPEG=OFF -DWITH_TIFF=OFF -DWITH_WEBP=OFF -DWITH_PNG=OFF -DWITH_1394=OFF -DWITH_OPENJPEG=OFF -DOPENCV_GENERATE_PKGCONFIG=ON ..
|
||||
$(MAKE) -j $(shell nproc --all)
|
||||
$(MAKE) preinstall
|
||||
cd -
|
||||
|
||||
# Build OpenCV with cuda.
|
||||
build_all:
|
||||
cd $(TMP_DIR)opencv/opencv-$(OPENCV_VERSION)
|
||||
mkdir build
|
||||
cd build
|
||||
rm -rf *
|
||||
cmake -j $(shell nproc --all) -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local -D BUILD_SHARED_LIBS=${BUILD_SHARED_LIBS} -D ENABLE_CXX11=ON -D OPENCV_EXTRA_MODULES_PATH=$(TMP_DIR)opencv/opencv_contrib-$(OPENCV_VERSION)/modules -D WITH_INF_ENGINE=ON -D InferenceEngine_DIR=/usr/local/dldt/inference-engine/build -D BUILD_DOCS=OFF -D BUILD_EXAMPLES=OFF -D BUILD_TESTS=OFF -D BUILD_PERF_TESTS=OFF -D BUILD_opencv_java=NO -D BUILD_opencv_python=NO -D BUILD_opencv_python2=NO -D BUILD_opencv_python3=NO -D WITH_JASPER=OFF -DOPENCV_GENERATE_PKGCONFIG=ON -DWITH_CUDA=ON -DENABLE_FAST_MATH=1 -DCUDA_FAST_MATH=1 -DWITH_CUBLAS=1 -DCUDA_TOOLKIT_ROOT_DIR=/usr/local/cuda/ -DBUILD_opencv_cudacodec=OFF -D WITH_CUDNN=ON -D OPENCV_DNN_CUDA=ON -D CUDA_GENERATION=Auto ..
|
||||
$(MAKE) -j $(shell nproc --all)
|
||||
$(MAKE) preinstall
|
||||
cd -
|
||||
@@ -99,14 +224,36 @@ clean:
|
||||
go clean --cache
|
||||
rm -rf $(TMP_DIR)opencv
|
||||
|
||||
# Cleanup old library files.
|
||||
sudo_pre_install_clean:
|
||||
sudo rm -rf /usr/local/lib/cmake/opencv4/
|
||||
sudo rm -rf /usr/local/lib/libopencv*
|
||||
sudo rm -rf /usr/local/lib/pkgconfig/opencv*
|
||||
sudo rm -rf /usr/local/include/opencv*
|
||||
|
||||
# Do everything.
|
||||
install: deps download build sudo_install clean verify
|
||||
install: deps download sudo_pre_install_clean build sudo_install clean verify
|
||||
|
||||
# Do everything on Raspbian.
|
||||
install_raspi: deps download build_raspi sudo_install clean verify
|
||||
|
||||
# Do everything on the raspberry pi zero.
|
||||
install_raspi_zero: deps download build_raspi_zero sudo_install clean verify
|
||||
|
||||
# Do everything on Jetson.
|
||||
install_jetson: deps download build_jetson sudo_install clean verify
|
||||
|
||||
# Do everything with cuda.
|
||||
install_cuda: deps download build_cuda sudo_install clean verify
|
||||
install_cuda: deps download sudo_pre_install_clean build_cuda sudo_install clean verify verify_cuda
|
||||
|
||||
# Do everything with openvino.
|
||||
install_openvino: deps download download_openvino sudo_pre_install_clean build_openvino_package sudo_install_openvino build_openvino sudo_install clean verify_openvino
|
||||
|
||||
# Do everything statically.
|
||||
install_static: deps download sudo_pre_install_clean build_static sudo_install clean verify
|
||||
|
||||
# Do everything with openvino and cuda.
|
||||
install_all: deps download download_openvino sudo_pre_install_clean build_openvino_package sudo_install_openvino build_all sudo_install clean verify_openvino verify_cuda
|
||||
|
||||
# Install system wide.
|
||||
sudo_install:
|
||||
@@ -115,15 +262,30 @@ sudo_install:
|
||||
sudo ldconfig
|
||||
cd -
|
||||
|
||||
# Install system wide.
|
||||
sudo_install_openvino:
|
||||
cd /usr/local/openvino/inference-engine/build
|
||||
sudo $(MAKE) install
|
||||
sudo ldconfig
|
||||
cd -
|
||||
|
||||
# Build a minimal Go app to confirm gocv works.
|
||||
verify:
|
||||
go run ./cmd/version/main.go
|
||||
|
||||
# Build a minimal Go app to confirm gocv cuda works.
|
||||
verify_cuda:
|
||||
go run ./cmd/cuda/main.go
|
||||
|
||||
# Build a minimal Go app to confirm gocv openvino works.
|
||||
verify_openvino:
|
||||
go run -tags openvino ./cmd/version/main.go
|
||||
|
||||
# Runs tests.
|
||||
# This assumes env.sh was already sourced.
|
||||
# pvt is not tested here since it requires additional depenedences.
|
||||
test:
|
||||
go test . ./contrib
|
||||
go test -tags matprofile . ./contrib
|
||||
|
||||
docker:
|
||||
docker build --build-arg OPENCV_VERSION=$(OPENCV_VERSION) --build-arg GOVERSION=$(GOVERSION) .
|
||||
@@ -131,7 +293,11 @@ docker:
|
||||
astyle:
|
||||
astyle --project=.astylerc --recursive *.cpp,*.h
|
||||
|
||||
CMDS=basic-drawing caffe-classifier captest capwindow counter faceblur facedetect find-circles hand-gestures img-similarity mjpeg-streamer motion-detect pose saveimage savevideo showimage ssd-facedetect tf-classifier tracking version
|
||||
|
||||
releaselog:
|
||||
git log --pretty=format:"%s" $(GOCV_VERSION)..HEAD
|
||||
|
||||
CMDS=basic-drawing caffe-classifier captest capwindow counter dnn-detection dnn-pose-detection dnn-style-transfer faceblur facedetect facedetect-from-url feature-matching find-chessboard find-circles find-lines hand-gestures hello img-similarity mjpeg-streamer motion-detect saveimage savevideo showimage ssd-facedetect tf-classifier tracking version xphoto
|
||||
cmds:
|
||||
for cmd in $(CMDS) ; do \
|
||||
go build -o build/$$cmd cmd/$$cmd/main.go ;
|
||||
|
||||
162
vendor/gocv.io/x/gocv/README.md
generated
vendored
162
vendor/gocv.io/x/gocv/README.md
generated
vendored
@@ -1,17 +1,18 @@
|
||||
# GoCV
|
||||
|
||||
[](http://gocv.io/)
|
||||
[](http://gocv.io/)
|
||||
|
||||
[](https://godoc.org/github.com/hybridgroup/gocv)
|
||||
[](https://travis-ci.org/hybridgroup/gocv)
|
||||
[](https://pkg.go.dev/gocv.io/x/gocv)
|
||||
[](https://circleci.com/gh/hybridgroup/gocv/tree/dev)
|
||||
[](https://ci.appveyor.com/project/deadprogram/gocv/branch/dev)
|
||||
[](https://codecov.io/gh/hybridgroup/gocv)
|
||||
[](https://goreportcard.com/report/github.com/hybridgroup/gocv)
|
||||
[](https://github.com/hybridgroup/gocv/blob/master/LICENSE.txt)
|
||||
[](https://github.com/hybridgroup/gocv/blob/release/LICENSE.txt)
|
||||
|
||||
The GoCV package provides Go language bindings for the [OpenCV 4](http://opencv.org/) computer vision library.
|
||||
|
||||
The GoCV package supports the latest releases of Go and OpenCV (v4.2.0) on Linux, macOS, and Windows. We intend to make the Go language a "first-class" client compatible with the latest developments in the OpenCV ecosystem.
|
||||
The GoCV package supports the latest releases of Go and OpenCV (v4.5.3) on Linux, macOS, and Windows. We intend to make the Go language a "first-class" client compatible with the latest developments in the OpenCV ecosystem.
|
||||
|
||||
GoCV supports [CUDA](https://en.wikipedia.org/wiki/CUDA) for hardware acceleration using Nvidia GPUs. Check out the [CUDA README](./cuda/README.md) for more info on how to use GoCV with OpenCV/CUDA.
|
||||
|
||||
GoCV also supports [Intel OpenVINO](https://software.intel.com/en-us/openvino-toolkit). Check out the [OpenVINO README](./openvino/README.md) for more info on how to use GoCV with the Intel OpenVINO toolkit.
|
||||
|
||||
@@ -43,7 +44,7 @@ func main() {
|
||||
|
||||
### Face detect
|
||||
|
||||

|
||||

|
||||
|
||||
This is a more complete example that opens a video capture device using device "0". It also uses the CascadeClassifier class to load an external data file containing the classifier data. The program grabs each frame from the video, then uses the classifier to detect faces. If any faces are found, it draws a green rectangle around each one, then displays the video in an output window:
|
||||
|
||||
@@ -121,43 +122,83 @@ There are examples in the [cmd directory](./cmd) of this repo in the form of var
|
||||
|
||||
## How to install
|
||||
|
||||
To install GoCV, run the following command:
|
||||
To install GoCV, you must first have the matching version of OpenCV installed on your system. The current release of GoCV requires OpenCV 4.5.3.
|
||||
|
||||
```
|
||||
go get -u -d gocv.io/x/gocv
|
||||
```
|
||||
|
||||
To run code that uses the GoCV package, you must also install OpenCV 4.2.0 on your system. Here are instructions for Ubuntu, Raspian, macOS, and Windows.
|
||||
Here are instructions for Ubuntu, Raspian, macOS, and Windows.
|
||||
|
||||
## Ubuntu/Linux
|
||||
|
||||
### Installation
|
||||
|
||||
You can use `make` to install OpenCV 4.2.0 with the handy `Makefile` included with this repo. If you already have installed OpenCV, you do not need to do so again. The installation performed by the `Makefile` is minimal, so it may remove OpenCV options such as Python or Java wrappers if you have already installed OpenCV some other way.
|
||||
You can use `make` to install OpenCV 4.5.3 with the handy `Makefile` included with this repo. If you already have installed OpenCV, you do not need to do so again. The installation performed by the `Makefile` is minimal, so it may remove OpenCV options such as Python or Java wrappers if you have already installed OpenCV some other way.
|
||||
|
||||
#### Quick Install
|
||||
|
||||
The following commands should do everything to download and install OpenCV 4.2.0 on Linux:
|
||||
First, change directories to where you want to install GoCV, and then use git to clone the repository to your local machine like this:
|
||||
|
||||
cd $GOPATH/src/gocv.io/x/gocv
|
||||
cd $HOME/folder/with/your/src/
|
||||
git clone https://github.com/hybridgroup/gocv.git
|
||||
|
||||
Make sure to change `$HOME/folder/with/your/src/` to where you actually want to save the code.
|
||||
|
||||
Once you have cloned the repo, the following commands should do everything to download and install OpenCV 4.5.3 on Linux:
|
||||
|
||||
cd gocv
|
||||
make install
|
||||
|
||||
If you need static opencv libraries
|
||||
|
||||
make install BUILD_SHARED_LIBS=OFF
|
||||
|
||||
If it works correctly, at the end of the entire process, the following message should be displayed:
|
||||
|
||||
gocv version: 0.22.0
|
||||
opencv lib version: 4.2.0
|
||||
gocv version: 0.28.0
|
||||
opencv lib version: 4.5.3
|
||||
|
||||
That's it, now you are ready to use GoCV.
|
||||
|
||||
#### Using CUDA with GoCV
|
||||
|
||||
See the [cuda directory](./cuda) for information.
|
||||
|
||||
#### Using OpenVINO with GoCV
|
||||
|
||||
See the [openvino directory](./openvino) for information.
|
||||
|
||||
#### Make Install for OpenVINO and Cuda
|
||||
|
||||
The following commands should do everything to download and install OpenCV 4.5.3 with CUDA and OpenVINO on Linux. Make sure to change `$HOME/folder/with/your/src/` to the directory you used to clone GoCV:
|
||||
|
||||
cd $HOME/folder/with/gocv/
|
||||
make install_all
|
||||
|
||||
If you need static opencv libraries
|
||||
|
||||
make install_all BUILD_SHARED_LIBS=OFF
|
||||
|
||||
If it works correctly, at the end of the entire process, the following message should be displayed:
|
||||
|
||||
gocv version: 0.28.0
|
||||
opencv lib version: 4.5.3-openvino
|
||||
cuda information:
|
||||
Device 0: "GeForce MX150" 2003Mb, sm_61, Driver/Runtime ver.10.0/10.0
|
||||
|
||||
#### Complete Install
|
||||
|
||||
If you have already done the "Quick Install" as described above, you do not need to run any further commands. For the curious, or for custom installations, here are the details for each of the steps that are performed when you run `make install`.
|
||||
|
||||
First, change directories to where you want to install GoCV, and then use git to clone the repository to your local machine like this:
|
||||
|
||||
cd $HOME/folder/with/your/src/
|
||||
git clone https://github.com/hybridgroup/gocv.git
|
||||
|
||||
Make sure to change `$HOME/folder/with/your/src/` to where you actually want to save the code.
|
||||
|
||||
##### Install required packages
|
||||
|
||||
First, you need to change the current directory to the location of the GoCV repo, so you can access the `Makefile`:
|
||||
First, you need to change the current directory to the location where you cloned the GoCV repo, so you can access the `Makefile`:
|
||||
|
||||
cd $GOPATH/src/gocv.io/x/gocv
|
||||
cd $HOME/folder/with/your/src/gocv
|
||||
|
||||
Next, you need to update the system, and install any required packages:
|
||||
|
||||
@@ -165,7 +206,7 @@ Next, you need to update the system, and install any required packages:
|
||||
|
||||
#### Download source
|
||||
|
||||
Now, download the OpenCV 4.2.0 and OpenCV Contrib source code:
|
||||
Now, download the OpenCV 4.5.3 and OpenCV Contrib source code:
|
||||
|
||||
make download
|
||||
|
||||
@@ -175,6 +216,10 @@ Build everything. This will take quite a while:
|
||||
|
||||
make build
|
||||
|
||||
If you need static opencv libraries
|
||||
|
||||
make build BUILD_SHARED_LIBS=OFF
|
||||
|
||||
#### Install
|
||||
|
||||
Once the code is built, you are ready to install:
|
||||
@@ -187,7 +232,7 @@ To verify your installation you can run one of the included examples.
|
||||
|
||||
First, change the current directory to the location of the GoCV repo:
|
||||
|
||||
cd $GOPATH/src/gocv.io/x/gocv
|
||||
cd $HOME/src/gocv.io/x/gocv
|
||||
|
||||
Now you should be able to build or run any of the examples:
|
||||
|
||||
@@ -195,8 +240,8 @@ Now you should be able to build or run any of the examples:
|
||||
|
||||
The version program should output the following:
|
||||
|
||||
gocv version: 0.22.0
|
||||
opencv lib version: 4.2.0
|
||||
gocv version: 0.28.0
|
||||
opencv lib version: 4.5.3
|
||||
|
||||
#### Cleanup extra files
|
||||
|
||||
@@ -204,12 +249,6 @@ After the installation is complete, you can remove the extra files and folders:
|
||||
|
||||
make clean
|
||||
|
||||
### Cache builds
|
||||
|
||||
If you are running a version of Go older than v1.10 and not modifying GoCV source, precompile the GoCV package to significantly decrease your build times:
|
||||
|
||||
go install gocv.io/x/gocv
|
||||
|
||||
### Custom Environment
|
||||
|
||||
By default, pkg-config is used to determine the correct flags for compiling and linking OpenCV. This behavior can be disabled by supplying `-tags customenv` when building/running your application. When building with this tag you will need to supply the CGO environment variables yourself.
|
||||
@@ -231,10 +270,10 @@ The project now provides `Dockerfile` which lets you build [GoCV](https://gocv.i
|
||||
make docker
|
||||
```
|
||||
|
||||
By default Docker image built by running the command above ships [Go](https://golang.org/) version `1.13.5`, but if you would like to build an image which uses different version of `Go` you can override the default value when running the target command:
|
||||
By default Docker image built by running the command above ships [Go](https://golang.org/) version `1.16.5`, but if you would like to build an image which uses different version of `Go` you can override the default value when running the target command:
|
||||
|
||||
```
|
||||
make docker GOVERSION='1.13.5'
|
||||
make docker GOVERSION='1.15'
|
||||
```
|
||||
|
||||
#### Running GUI programs in Docker on macOS
|
||||
@@ -281,19 +320,26 @@ There is a Docker image with Alpine 3.7 that has been created by project contrib
|
||||
|
||||
### Installation
|
||||
|
||||
We have a special installation for the Raspberry Pi that includes some hardware optimizations. You use `make` to install OpenCV 4.2.0 with the handy `Makefile` included with this repo. If you already have installed OpenCV, you do not need to do so again. The installation performed by the `Makefile` is minimal, so it may remove OpenCV options such as Python or Java wrappers if you have already installed OpenCV some other way.
|
||||
We have a special installation for the Raspberry Pi that includes some hardware optimizations. You use `make` to install OpenCV 4.5.3 with the handy `Makefile` included with this repo. If you already have installed OpenCV, you do not need to do so again. The installation performed by the `Makefile` is minimal, so it may remove OpenCV options such as Python or Java wrappers if you have already installed OpenCV some other way.
|
||||
|
||||
#### Quick Install
|
||||
|
||||
The following commands should do everything to download and install OpenCV 4.2.0 on Raspbian:
|
||||
First, change directories to where you want to install GoCV, and then use git to clone the repository to your local machine like this:
|
||||
|
||||
cd $GOPATH/src/gocv.io/x/gocv
|
||||
cd $HOME/folder/with/your/src/
|
||||
git clone https://github.com/hybridgroup/gocv.git
|
||||
|
||||
Make sure to change `$HOME/folder/with/your/src/` to where you actually want to save the code.
|
||||
|
||||
The following make command should do everything to download and install OpenCV 4.5.3 on Raspbian:
|
||||
|
||||
cd $HOME/folder/with/your/src/gocv
|
||||
make install_raspi
|
||||
|
||||
If it works correctly, at the end of the entire process, the following message should be displayed:
|
||||
|
||||
gocv version: 0.22.0
|
||||
opencv lib version: 4.2.0
|
||||
gocv version: 0.28.0
|
||||
opencv lib version: 4.5.3
|
||||
|
||||
That's it, now you are ready to use GoCV.
|
||||
|
||||
@@ -301,13 +347,13 @@ That's it, now you are ready to use GoCV.
|
||||
|
||||
### Installation
|
||||
|
||||
You can install OpenCV 4.2.0 using Homebrew.
|
||||
You can install OpenCV 4.5.3 using Homebrew.
|
||||
|
||||
If you already have an earlier version of OpenCV (3.4.x) installed, you should probably remove it before installing the new version:
|
||||
|
||||
brew uninstall opencv
|
||||
|
||||
You can then install OpenCV 4.2.0:
|
||||
You can then install OpenCV 4.5.3:
|
||||
|
||||
brew install opencv
|
||||
|
||||
@@ -323,7 +369,7 @@ To verify your installation you can run one of the included examples.
|
||||
|
||||
First, change the current directory to the location of the GoCV repo:
|
||||
|
||||
cd $GOPATH/src/gocv.io/x/gocv
|
||||
cd $HOME/folder/with/your/src/gocv
|
||||
|
||||
Now you should be able to build or run any of the examples:
|
||||
|
||||
@@ -331,14 +377,8 @@ Now you should be able to build or run any of the examples:
|
||||
|
||||
The version program should output the following:
|
||||
|
||||
gocv version: 0.22.0
|
||||
opencv lib version: 4.2.0
|
||||
|
||||
### Cache builds
|
||||
|
||||
If you are running a version of Go older than v1.10 and not modifying GoCV source, precompile the GoCV package to significantly decrease your build times:
|
||||
|
||||
go install gocv.io/x/gocv
|
||||
gocv version: 0.28.0
|
||||
opencv lib version: 4.5.3
|
||||
|
||||
### Custom Environment
|
||||
|
||||
@@ -347,8 +387,8 @@ By default, pkg-config is used to determine the correct flags for compiling and
|
||||
For example:
|
||||
|
||||
export CGO_CXXFLAGS="--std=c++11"
|
||||
export CGO_CPPFLAGS="-I/usr/local/Cellar/opencv/4.2.0/include"
|
||||
export CGO_LDFLAGS="-L/usr/local/Cellar/opencv/4.2.0/lib -lopencv_stitching -lopencv_superres -lopencv_videostab -lopencv_aruco -lopencv_bgsegm -lopencv_bioinspired -lopencv_ccalib -lopencv_dnn_objdetect -lopencv_dpm -lopencv_face -lopencv_photo -lopencv_fuzzy -lopencv_hfs -lopencv_img_hash -lopencv_line_descriptor -lopencv_optflow -lopencv_reg -lopencv_rgbd -lopencv_saliency -lopencv_stereo -lopencv_structured_light -lopencv_phase_unwrapping -lopencv_surface_matching -lopencv_tracking -lopencv_datasets -lopencv_dnn -lopencv_plot -lopencv_xfeatures2d -lopencv_shape -lopencv_video -lopencv_ml -lopencv_ximgproc -lopencv_calib3d -lopencv_features2d -lopencv_highgui -lopencv_videoio -lopencv_flann -lopencv_xobjdetect -lopencv_imgcodecs -lopencv_objdetect -lopencv_xphoto -lopencv_imgproc -lopencv_core"
|
||||
export CGO_CPPFLAGS="-I/usr/local/Cellar/opencv/4.5.3/include"
|
||||
export CGO_LDFLAGS="-L/usr/local/Cellar/opencv/4.5.3/lib -lopencv_stitching -lopencv_superres -lopencv_videostab -lopencv_aruco -lopencv_bgsegm -lopencv_bioinspired -lopencv_ccalib -lopencv_dnn_objdetect -lopencv_dpm -lopencv_face -lopencv_photo -lopencv_fuzzy -lopencv_hfs -lopencv_img_hash -lopencv_line_descriptor -lopencv_optflow -lopencv_reg -lopencv_rgbd -lopencv_saliency -lopencv_stereo -lopencv_structured_light -lopencv_phase_unwrapping -lopencv_surface_matching -lopencv_tracking -lopencv_datasets -lopencv_dnn -lopencv_plot -lopencv_xfeatures2d -lopencv_shape -lopencv_video -lopencv_ml -lopencv_ximgproc -lopencv_calib3d -lopencv_features2d -lopencv_highgui -lopencv_videoio -lopencv_flann -lopencv_xobjdetect -lopencv_imgcodecs -lopencv_objdetect -lopencv_xphoto -lopencv_imgproc -lopencv_core"
|
||||
|
||||
Please note that you will need to run these 3 lines of code one time in your current session in order to build or run the code, in order to setup the needed ENV variables. Once you have done so, you can execute code that uses GoCV with your custom environment like this:
|
||||
|
||||
@@ -360,11 +400,11 @@ Please note that you will need to run these 3 lines of code one time in your cur
|
||||
|
||||
The following assumes that you are running a 64-bit version of Windows 10.
|
||||
|
||||
In order to build and install OpenCV 4.2.0 on Windows, you must first download and install MinGW-W64 and CMake, as follows.
|
||||
In order to build and install OpenCV 4.5.3 on Windows, you must first download and install MinGW-W64 and CMake, as follows.
|
||||
|
||||
#### MinGW-W64
|
||||
|
||||
Download and run the MinGW-W64 compiler installer from [https://sourceforge.net/projects/mingw-w64/?source=typ_redirect](https://sourceforge.net/projects/mingw-w64/?source=typ_redirect).
|
||||
Download and run the MinGW-W64 compiler installer from [https://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20Win32/Personal%20Builds/mingw-builds/7.3.0/](https://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20Win32/Personal%20Builds/mingw-builds/7.3.0/).
|
||||
|
||||
The latest version of the MinGW-W64 toolchain is `7.3.0`, but any version from `7.X` on should work.
|
||||
|
||||
@@ -376,9 +416,9 @@ Add the `C:\Program Files\mingw-w64\x86_64-7.3.0-posix-seh-rt_v5-rev2\mingw64\bi
|
||||
|
||||
Download and install CMake [https://cmake.org/download/](https://cmake.org/download/) to the default location. CMake installer will add CMake to your system path.
|
||||
|
||||
#### OpenCV 4.2.0 and OpenCV Contrib Modules
|
||||
#### OpenCV 4.5.3 and OpenCV Contrib Modules
|
||||
|
||||
The following commands should do everything to download and install OpenCV 4.2.0 on Windows:
|
||||
The following commands should do everything to download and install OpenCV 4.5.3 on Windows:
|
||||
|
||||
chdir %GOPATH%\src\gocv.io\x\gocv
|
||||
win_build_opencv.cmd
|
||||
@@ -399,17 +439,11 @@ Now you should be able to build or run any of the command examples:
|
||||
|
||||
The version program should output the following:
|
||||
|
||||
gocv version: 0.22.0
|
||||
opencv lib version: 4.2.0
|
||||
gocv version: 0.28.0
|
||||
opencv lib version: 4.5.3
|
||||
|
||||
That's it, now you are ready to use GoCV.
|
||||
|
||||
### Cache builds
|
||||
|
||||
If you are running a version of Go older than v1.10 and not modifying GoCV source, precompile the GoCV package to significantly decrease your build times:
|
||||
|
||||
go install gocv.io/x/gocv
|
||||
|
||||
### Custom Environment
|
||||
|
||||
By default, OpenCV is expected to be in `C:\opencv\build\install\include`. This behavior can be disabled by supplying `-tags customenv` when building/running your application. When building with this tag you will need to supply the CGO environment variables yourself.
|
||||
@@ -420,7 +454,7 @@ For example:
|
||||
|
||||
set CGO_CXXFLAGS="--std=c++11"
|
||||
set CGO_CPPFLAGS=-IC:\opencv\build\install\include
|
||||
set CGO_LDFLAGS=-LC:\opencv\build\install\x64\mingw\lib -lopencv_core412 -lopencv_face412 -lopencv_videoio412 -lopencv_imgproc412 -lopencv_highgui412 -lopencv_imgcodecs412 -lopencv_objdetect412 -lopencv_features2d412 -lopencv_video412 -lopencv_dnn412 -lopencv_xfeatures2d412 -lopencv_plot412 -lopencv_tracking412 -lopencv_img_hash412
|
||||
set CGO_LDFLAGS=-LC:\opencv\build\install\x64\mingw\lib -lopencv_core453 -lopencv_face453 -lopencv_videoio453 -lopencv_imgproc453 -lopencv_highgui453 -lopencv_imgcodecs453 -lopencv_objdetect453 -lopencv_features2d453 -lopencv_video453 -lopencv_dnn453 -lopencv_xfeatures2d453 -lopencv_plot453 -lopencv_tracking453 -lopencv_img_hash453
|
||||
|
||||
Please note that you will need to run these 3 lines of code one time in your current session in order to build or run the code, in order to setup the needed ENV variables. Once you have done so, you can execute code that uses GoCV with your custom environment like this:
|
||||
|
||||
@@ -554,6 +588,6 @@ This package was inspired by the original https://github.com/go-opencv/go-opencv
|
||||
|
||||
## License
|
||||
|
||||
Licensed under the Apache 2.0 license. Copyright (c) 2017-2019 The Hybrid Group.
|
||||
Licensed under the Apache 2.0 license. Copyright (c) 2017-2021 The Hybrid Group.
|
||||
|
||||
Logo generated by GopherizeMe - https://gopherize.me
|
||||
|
||||
224
vendor/gocv.io/x/gocv/ROADMAP.md
generated
vendored
224
vendor/gocv.io/x/gocv/ROADMAP.md
generated
vendored
@@ -13,10 +13,9 @@ Your pull requests will be greatly appreciated!
|
||||
## Modules list
|
||||
|
||||
- [ ] **core. Core functionality - WORK STARTED**
|
||||
- [ ] **Basic structures - WORK STARTED**
|
||||
- [X] **Basic structures**
|
||||
- [ ] **Operations on arrays - WORK STARTED**. The following functions still need implementation:
|
||||
- [ ] [Mahalanobis](https://docs.opencv.org/master/d2/de8/group__core__array.html#ga4493aee129179459cbfc6064f051aa7d)
|
||||
- [ ] [mixChannels](https://docs.opencv.org/master/d2/de8/group__core__array.html#ga51d768c270a1cdd3497255017c4504be)
|
||||
- [ ] [mulTransposed](https://docs.opencv.org/master/d2/de8/group__core__array.html#gadc4e49f8f7a155044e3be1b9e3b270ab)
|
||||
- [ ] [PCABackProject](https://docs.opencv.org/master/d2/de8/group__core__array.html#gab26049f30ee8e94f7d69d82c124faafc)
|
||||
- [ ] [PCACompute](https://docs.opencv.org/master/d2/de8/group__core__array.html#ga4e2073c7311f292a0648f04c37b73781)
|
||||
@@ -25,51 +24,46 @@ Your pull requests will be greatly appreciated!
|
||||
- [ ] [randn](https://docs.opencv.org/master/d2/de8/group__core__array.html#gaeff1f61e972d133a04ce3a5f81cf6808)
|
||||
- [ ] [randShuffle](https://docs.opencv.org/master/d2/de8/group__core__array.html#ga6a789c8a5cb56c6dd62506179808f763)
|
||||
- [ ] [randu](https://docs.opencv.org/master/d2/de8/group__core__array.html#ga1ba1026dca0807b27057ba6a49d258c0)
|
||||
- [x] [setIdentity](https://docs.opencv.org/master/d2/de8/group__core__array.html#ga388d7575224a4a277ceb98ccaa327c99)
|
||||
- [ ] [setRNGSeed](https://docs.opencv.org/master/d2/de8/group__core__array.html#ga757e657c037410d9e19e819569e7de0f)
|
||||
- [ ] [SVBackSubst](https://docs.opencv.org/master/d2/de8/group__core__array.html#gab4e620e6fc6c8a27bb2be3d50a840c0b)
|
||||
- [ ] [SVDecomp](https://docs.opencv.org/master/d2/de8/group__core__array.html#gab477b5b7b39b370bb03e75b19d2d5109)
|
||||
- [ ] [theRNG](https://docs.opencv.org/master/d2/de8/group__core__array.html#ga75843061d150ad6564b5447e38e57722)
|
||||
|
||||
- [ ] XML/YAML Persistence
|
||||
- [ ] [FileStorage](https://docs.opencv.org/master/da/d56/classcv_1_1FileStorage.html)
|
||||
|
||||
- [ ] **Clustering - WORK STARTED**. The following functions still need implementation:
|
||||
- [ ] [partition](https://docs.opencv.org/master/d5/d38/group__core__cluster.html#ga2037c989e69b499c1aa271419f3a9b34)
|
||||
|
||||
- [ ] Utility and system functions and macros
|
||||
- [ ] OpenGL interoperability
|
||||
- [ ] Intel IPP Asynchronous C/C++ Converters
|
||||
- [ ] Optimization Algorithms
|
||||
- [ ] OpenCL support
|
||||
- [ ] [ConjGradSolver](https://docs.opencv.org/master/d0/d21/classcv_1_1ConjGradSolver.html)
|
||||
- [ ] [DownhillSolver](https://docs.opencv.org/master/d4/d43/classcv_1_1DownhillSolver.html)
|
||||
- [ ] [solveLP](https://docs.opencv.org/master/da/d01/group__core__optim.html#ga9a06d237a9d38ace891efa1ca1b5d00a)
|
||||
|
||||
- [ ] **imgproc. Image processing - WORK STARTED**
|
||||
- [ ] **Image Filtering - WORK STARTED** The following functions still need implementation:
|
||||
- [ ] [buildPyramid](https://docs.opencv.org/master/d4/d86/group__imgproc__filter.html#gacfdda2bc1ac55e96de7e9f0bce7238c0)
|
||||
- [ ] [getDerivKernels](https://docs.opencv.org/master/d4/d86/group__imgproc__filter.html#ga6d6c23f7bd3f5836c31cfae994fc4aea)
|
||||
- [ ] [getGaborKernel](https://docs.opencv.org/master/d4/d86/group__imgproc__filter.html#gae84c92d248183bd92fa713ce51cc3599)
|
||||
- [ ] [getGaussianKernel](https://docs.opencv.org/master/d4/d86/group__imgproc__filter.html#gac05a120c1ae92a6060dd0db190a61afa)
|
||||
- [ ] [morphologyExWithParams](https://docs.opencv.org/master/d4/d86/group__imgproc__filter.html#ga67493776e3ad1a3df63883829375201f)
|
||||
- [ ] [pyrMeanShiftFiltering](https://docs.opencv.org/master/d4/d86/group__imgproc__filter.html#ga9fabdce9543bd602445f5db3827e4cc0)
|
||||
|
||||
- [ ] **Geometric Image Transformations - WORK STARTED** The following functions still need implementation:
|
||||
- [ ] [convertMaps](https://docs.opencv.org/master/da/d54/group__imgproc__transform.html#ga9156732fa8f01be9ebd1a194f2728b7f)
|
||||
- [ ] [getAffineTransform](https://docs.opencv.org/master/da/d54/group__imgproc__transform.html#ga8f6d378f9f8eebb5cb55cd3ae295a999)
|
||||
- [ ] [getDefaultNewCameraMatrix](https://docs.opencv.org/master/da/d54/group__imgproc__transform.html#ga744529385e88ef7bc841cbe04b35bfbf)
|
||||
- [X] [getRectSubPix](https://docs.opencv.org/master/da/d54/group__imgproc__transform.html#ga77576d06075c1a4b6ba1a608850cd614)
|
||||
- [ ] [initUndistortRectifyMap](https://docs.opencv.org/master/da/d54/group__imgproc__transform.html#ga7dfb72c9cf9780a347fbe3d1c47e5d5a)
|
||||
- [ ] [initWideAngleProjMap](https://docs.opencv.org/master/da/d54/group__imgproc__transform.html#gaceb049ec48898d1dadd5b50c604429c8)
|
||||
- [ ] [undistort](https://docs.opencv.org/master/da/d54/group__imgproc__transform.html#ga69f2545a8b62a6b0fc2ee060dc30559d)
|
||||
- [ ] [undistortPoints](https://docs.opencv.org/master/da/d54/group__imgproc__transform.html#ga55c716492470bfe86b0ee9bf3a1f0f7e)
|
||||
|
||||
- [ ] **Miscellaneous Image Transformations - WORK STARTED** The following functions still need implementation:
|
||||
- [ ] [cvtColorTwoPlane](https://docs.opencv.org/master/d7/d1b/group__imgproc__misc.html#ga8e873314e72a1a6c0252375538fbf753)
|
||||
- [ ] [floodFill](https://docs.opencv.org/master/d7/d1b/group__imgproc__misc.html#gaf1f55a048f8a45bc3383586e80b1f0d0)
|
||||
|
||||
- [ ] **Drawing Functions - WORK STARTED** The following functions still need implementation:
|
||||
- [X] [clipLine](https://docs.opencv.org/master/d6/d6e/group__imgproc__draw.html#gaf483cb46ad6b049bc35ec67052ef1c2c)
|
||||
- [ ] [drawMarker](https://docs.opencv.org/master/d6/d6e/group__imgproc__draw.html#ga482fa7b0f578fcdd8a174904592a6250)
|
||||
- [ ] [ellipse2Poly](https://docs.opencv.org/master/d6/d6e/group__imgproc__draw.html#ga727a72a3f6a625a2ae035f957c61051f)
|
||||
- [ ] [fillConvexPoly](https://docs.opencv.org/master/d6/d6e/group__imgproc__draw.html#ga906aae1606ea4ed2f27bec1537f6c5c2)
|
||||
- [ ] [getFontScaleFromHeight](https://docs.opencv.org/master/d6/d6e/group__imgproc__draw.html#ga442ff925c1a957794a1309e0ed3ba2c3)
|
||||
- [ ] [polylines](https://docs.opencv.org/master/d6/d6e/group__imgproc__draw.html#ga444cb8a2666320f47f09d5af08d91ffb)
|
||||
|
||||
- [ ] ColorMaps in OpenCV
|
||||
- [ ] Planar Subdivision
|
||||
@@ -86,10 +80,12 @@ Your pull requests will be greatly appreciated!
|
||||
- [ ] [isContourConvex](https://docs.opencv.org/master/d3/dc0/group__imgproc__shape.html#ga8abf8010377b58cbc16db6734d92941b)
|
||||
- [ ] [matchShapes](https://docs.opencv.org/master/d3/dc0/group__imgproc__shape.html#gaadc90cb16e2362c9bd6e7363e6e4c317)
|
||||
- [ ] [minEnclosingTriangle](https://docs.opencv.org/master/d3/dc0/group__imgproc__shape.html#ga1513e72f6bbdfc370563664f71e0542f)
|
||||
- [ ] [pointPolygonTest](https://docs.opencv.org/master/d3/dc0/group__imgproc__shape.html#ga1a539e8db2135af2566103705d7a5722)
|
||||
- [ ] [rotatedRectangleIntersection](https://docs.opencv.org/master/d3/dc0/group__imgproc__shape.html#ga8740e7645628c59d238b0b22c2abe2d4)
|
||||
|
||||
- [ ] Motion Analysis and Object Tracking
|
||||
- [ ] **Motion Analysis and Object Tracking - WORK STARTED** The following functions still need implementation:
|
||||
- [ ] [createHanningWindow](https://docs.opencv.org/master/d7/df3/group__imgproc__motion.html#ga80e5c3de52f6bab3a7c1e60e89308e1b)
|
||||
- [ ] [phaseCorrelate](https://docs.opencv.org/master/d7/df3/group__imgproc__motion.html#ga552420a2ace9ef3fb053cd630fdb4952)
|
||||
|
||||
- [ ] **Feature Detection - WORK STARTED** The following functions still need implementation:
|
||||
- [ ] [cornerEigenValsAndVecs](https://docs.opencv.org/master/dd/d1a/group__imgproc__feature.html#ga4055896d9ef77dd3cacf2c5f60e13f1c)
|
||||
- [ ] [cornerHarris](https://docs.opencv.org/master/dd/d1a/group__imgproc__feature.html#gac1fc3598018010880e370e2f709b4345)
|
||||
@@ -114,6 +110,7 @@ Your pull requests will be greatly appreciated!
|
||||
- [ ] [FarnebackOpticalFlow](https://docs.opencv.org/master/de/d9e/classcv_1_1FarnebackOpticalFlow.html)
|
||||
- [ ] [KalmanFilter](https://docs.opencv.org/master/dd/d6a/classcv_1_1KalmanFilter.html)
|
||||
- [ ] [SparsePyrLKOpticalFlow](https://docs.opencv.org/master/d7/d08/classcv_1_1SparsePyrLKOpticalFlow.html)
|
||||
- [ ] [GOTURN](https://docs.opencv.org/master/d7/d4c/classcv_1_1TrackerGOTURN.html)
|
||||
|
||||
- [ ] **calib3d. Camera Calibration and 3D Reconstruction - WORK STARTED**. The following functions still need implementation:
|
||||
- [ ] **Camera Calibration - WORK STARTED** The following functions still need implementation:
|
||||
@@ -135,7 +132,6 @@ Your pull requests will be greatly appreciated!
|
||||
- [ ] [drawFrameAxes](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
|
||||
- [ ] [estimateAffine2D](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
|
||||
- [ ] [estimateAffine3D](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
|
||||
- [ ] [estimateAffinePartial2D](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
|
||||
- [ ] [filterHomographyDecompByVisibleRefpoints](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
|
||||
- [ ] [filterSpeckles](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
|
||||
- [ ] [find4QuadCornerSubpix](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
|
||||
@@ -144,7 +140,6 @@ Your pull requests will be greatly appreciated!
|
||||
- [ ] [findCirclesGrid](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
|
||||
- [ ] [findEssentialMat](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
|
||||
- [ ] [findFundamentalMat](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
|
||||
- [ ] [findHomography](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
|
||||
- [ ] [getDefaultNewCameraMatrix](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
|
||||
- [ ] [getOptimalNewCameraMatrix](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
|
||||
- [ ] [getValidDisparityROI](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
|
||||
@@ -169,46 +164,171 @@ Your pull requests will be greatly appreciated!
|
||||
- [ ] [stereoRectify](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
|
||||
- [ ] [stereoRectifyUncalibrated](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
|
||||
- [ ] [triangulatePoints](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
|
||||
- [x] [undistort](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
|
||||
- [ ] [undistortPoints](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
|
||||
- [ ] [validateDisparity](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
|
||||
|
||||
- [ ] **Fisheye - WORK STARTED** The following functions still need implementation:
|
||||
- [ ] [calibrate](https://docs.opencv.org/master/db/d58/group__calib3d__fisheye.html#gad626a78de2b1dae7489e152a5a5a89e1)
|
||||
- [ ] [distortPoints](https://docs.opencv.org/master/db/d58/group__calib3d__fisheye.html#ga75d8877a98e38d0b29b6892c5f8d7765)
|
||||
- [ ] [estimateNewCameraMatrixForUndistortRectify](https://docs.opencv.org/master/db/d58/group__calib3d__fisheye.html#ga384940fdf04c03e362e94b6eb9b673c9)
|
||||
- [ ] [projectPoints](https://docs.opencv.org/master/db/d58/group__calib3d__fisheye.html#gab1ad1dc30c42ee1a50ce570019baf2c4)
|
||||
- [ ] [stereoCalibrate](https://docs.opencv.org/master/db/d58/group__calib3d__fisheye.html#gadbb3a6ca6429528ef302c784df47949b)
|
||||
- [ ] [stereoRectify](https://docs.opencv.org/master/db/d58/group__calib3d__fisheye.html#gac1af58774006689056b0f2ef1db55ecc)
|
||||
- [ ] [undistortPoints](https://docs.opencv.org/master/db/d58/group__calib3d__fisheye.html#gab738cdf90ceee97b2b52b0d0e7511541)
|
||||
|
||||
- [ ] **features2d. 2D Features Framework - WORK STARTED**
|
||||
- [X] **Feature Detection and Description**
|
||||
- [ ] **Descriptor Matchers - WORK STARTED** The following functions still need implementation:
|
||||
- [ ] [FlannBasedMatcher](https://docs.opencv.org/master/dc/de2/classcv_1_1FlannBasedMatcher.html)
|
||||
- [ ] **Drawing Function of Keypoints and Matches - WORK STARTED** The following function still needs implementation:
|
||||
- [ ] [drawMatches](https://docs.opencv.org/master/d4/d5d/group__features2d__draw.html#ga7421b3941617d7267e3f2311582f49e1)
|
||||
- [X] **Descriptor Matchers**
|
||||
- [X] **Drawing Function of Keypoints and Matches**
|
||||
- [ ] Object Categorization
|
||||
- [ ] [BOWImgDescriptorExtractor](https://docs.opencv.org/master/d2/d6b/classcv_1_1BOWImgDescriptorExtractor.html)
|
||||
- [ ] [BOWKMeansTrainer](https://docs.opencv.org/master/d4/d72/classcv_1_1BOWKMeansTrainer.html)
|
||||
|
||||
- [X] **objdetect. Object Detection**
|
||||
- [ ] **dnn. Deep Neural Network module - WORK STARTED** The following functions still need implementation:
|
||||
- [ ] [NMSBoxes](https://docs.opencv.org/master/d6/d0f/group__dnn.html#ga9d118d70a1659af729d01b10233213ee)
|
||||
|
||||
- [X] **dnn. Deep Neural Network module**
|
||||
- [ ] ml. Machine Learning
|
||||
- [ ] flann. Clustering and Search in Multi-Dimensional Spaces
|
||||
- [ ] photo. Computational Photography
|
||||
- [ ] **photo. Computational Photography - WORK STARTED** The following functions still need implementation:
|
||||
- [ ] [inpaint](https://docs.opencv.org/master/d7/d8b/group__photo__inpaint.html#gaedd30dfa0214fec4c88138b51d678085)
|
||||
- [ ] [denoise_TVL1](https://docs.opencv.org/master/d1/d79/group__photo__denoise.html#ga7602ed5ae17b7de40152b922227c4e4f)
|
||||
- [ ] [fastNlMeansDenoising](https://docs.opencv.org/master/d1/d79/group__photo__denoise.html#ga4c6b0031f56ea3f98f768881279ffe93)
|
||||
- [ ] [fastNlMeansDenoisingColored](https://docs.opencv.org/master/d1/d79/group__photo__denoise.html#ga03aa4189fc3e31dafd638d90de335617)
|
||||
- [ ] [fastNlMeansDenoisingMulti](https://docs.opencv.org/master/d1/d79/group__photo__denoise.html#gaf4421bf068c4d632ea7f0aa38e0bf172)
|
||||
- [ ] [createCalibrateDebevec](https://docs.opencv.org/master/d6/df5/group__photo__hdr.html#ga7fed9707ad5f2cc0e633888867109f90)
|
||||
- [ ] [createCalibrateRobertson](https://docs.opencv.org/master/d6/df5/group__photo__hdr.html#gae77813a21cd351a596619e5ff013be5d)
|
||||
- [ ] [createMergeDebevec](https://docs.opencv.org/master/d6/df5/group__photo__hdr.html#gaa8eab36bc764abb2a225db7c945f87f9)
|
||||
- [ ] [createMergeRobertson](https://docs.opencv.org/master/d6/df5/group__photo__hdr.html#ga460d4a1df1a7e8cdcf7445bb87a8fb78)
|
||||
- [ ] [createTonemap](https://docs.opencv.org/master/d6/df5/group__photo__hdr.html#gabcbd653140b93a1fa87ccce94548cd0d)
|
||||
- [ ] [createTonemapDrago](https://docs.opencv.org/master/d6/df5/group__photo__hdr.html#ga72bf92bb6b8653ee4be650ac01cf50b6)
|
||||
- [ ] [createTonemapMantiuk](https://docs.opencv.org/master/d6/df5/group__photo__hdr.html#ga3b3f3bf083b7515802f039a6a70f2d21)
|
||||
- [ ] [createTonemapReinhard](https://docs.opencv.org/master/d6/df5/group__photo__hdr.html#gadabe7f6bf1fa96ad0fd644df9182c2fb)
|
||||
- [ ] [decolor](https://docs.opencv.org/master/d4/d32/group__photo__decolor.html#ga4864d4c007bda5dacdc5e9d4ed7e222c)
|
||||
- [ ] [detailEnhance](https://docs.opencv.org/master/df/dac/group__photo__render.html#ga0de660cb6f371a464a74c7b651415975)
|
||||
- [ ] [edgePreservingFilter](https://docs.opencv.org/master/df/dac/group__photo__render.html#gafaee2977597029bc8e35da6e67bd31f7)
|
||||
- [ ] [pencilSketch](https://docs.opencv.org/master/df/dac/group__photo__render.html#gae5930dd822c713b36f8529b21ddebd0c)
|
||||
- [ ] [stylization](https://docs.opencv.org/master/df/dac/group__photo__render.html#gacb0f7324017df153d7b5d095aed53206)
|
||||
|
||||
- [ ] stitching. Images stitching
|
||||
- [ ] cudaarithm. Operations on Matrices
|
||||
- [ ] cudabgsegm. Background Segmentation
|
||||
- [ ] cudacodec. Video Encoding/Decoding
|
||||
- [ ] cudafeatures2d. Feature Detection and Description
|
||||
- [ ] cudafilters. Image Filtering
|
||||
- [ ] cudaimgproc. Image Processing
|
||||
- [ ] cudalegacy. Legacy support
|
||||
- [ ] cudaobjdetect. Object Detection
|
||||
- [ ] **cudaoptflow. Optical Flow - WORK STARTED**
|
||||
|
||||
## CUDA
|
||||
|
||||
- [ ] **core. - WORK STARTED** The following functions still need implementation:
|
||||
- [ ] [cv::cuda::convertFp16](https://docs.opencv.org/master/d8/d40/group__cudacore__init.html#gaa1c52258763197958eb9e6681917f723)
|
||||
- [ ] [cv::cuda::deviceSupports](https://docs.opencv.org/master/d8/d40/group__cudacore__init.html#ga170b10cc9af4aa8cce8c0afdb4b1d08c)
|
||||
- [ ] [cv::cuda::getDevice](https://docs.opencv.org/master/d8/d40/group__cudacore__init.html#ga6ded4ed8e4fc483a9863d31f34ec9c0e)
|
||||
- [ ] [cv::cuda::resetDevice](https://docs.opencv.org/master/d8/d40/group__cudacore__init.html#ga6153b6f461101374e655a54fc77e725e)
|
||||
- [ ] [cv::cuda::setDevice](https://docs.opencv.org/master/d8/d40/group__cudacore__init.html#gaefa34186b185de47851836dba537828b)
|
||||
|
||||
- [ ] **cudaarithm. Operations on Matrices - WORK STARTED** The following functions still need implementation:
|
||||
- [ ] **core** The following functions still need implementation:
|
||||
- [ ] [cv::cuda::copyMakeBorder](https://docs.opencv.org/master/de/d09/group__cudaarithm__core.html#ga5368db7656eacf846b40089c98053a49)
|
||||
- [ ] [cv::cuda::createLookUpTable](https://docs.opencv.org/master/de/d09/group__cudaarithm__core.html#ga2d9d9780dea8c5cd85d3c19b7e01979c)
|
||||
- [ ] [cv::cuda::merge](https://docs.opencv.org/master/de/d09/group__cudaarithm__core.html#gaac939dc3b178ee92fb6e7078f342622c)
|
||||
- [ ] [cv::cuda::split](https://docs.opencv.org/master/de/d09/group__cudaarithm__core.html#gabe5013d55d4ff586b20393913726179e)
|
||||
- [ ] [cv::cuda::transpose](https://docs.opencv.org/master/de/d09/group__cudaarithm__core.html#ga327b71c3cb811a904ccf5fba37fc29f2)
|
||||
|
||||
- [X] **per-element operations - WORK STARTED** The following functions still need implementation:
|
||||
- [ ] [cv::cuda::absdiff](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#gac062b283cf46ee90f74a773d3382ab54)
|
||||
- [ ] [cv::cuda::add](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga5d9794bde97ed23d1c1485249074a8b1)
|
||||
- [ ] [cv::cuda::addWeighted](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga2cd14a684ea70c6ab2a63ee90ffe6201)
|
||||
- [ ] [cv::cuda::bitwise_and](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga78d7c1a013877abd4237fbfc4e13bd76)
|
||||
- [ ] [cv::cuda::bitwise_not](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#gae58159a2259ae1acc76b531c171cf06a)
|
||||
- [ ] [cv::cuda::bitwise_or](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#gafd098ee3e51c68daa793999c1da3dfb7)
|
||||
- [ ] [cv::cuda::bitwise_xor](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga3d95d4faafb099aacf18e8b915a4ad8d)
|
||||
- [ ] [cv::cuda::cartToPolar](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga82210c7d1c1d42e616e554bf75a53480)
|
||||
- [ ] [cv::cuda::compare](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga4d41cd679f4a83862a3de71a6057db54)
|
||||
- [ ] [cv::cuda::divide](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga124315aa226260841e25cc0b9ea99dc3)
|
||||
- [ ] [cv::cuda::exp](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#gac6e51541d3bb0a7a396128e4d5919b61)
|
||||
- [ ] [cv::cuda::log](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#gaae9c60739e2d1a977b4d3250a0be42ca)
|
||||
- [ ] [cv::cuda::lshift](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#gafd072accecb14c9adccdad45e3bf2300)
|
||||
- [ ] [cv::cuda::magnitude](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga3d17f4fcd79d7c01fadd217969009463)
|
||||
- [ ] [cv::cuda::magnitudeSqr](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga7613e382d257e150033d0ce4d6098f6a)
|
||||
- [ ] [cv::cuda::max](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#gadb5dd3d870f10c0866035755b929b1e7)
|
||||
- [ ] [cv::cuda::min](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga74f0b05a65b3d949c237abb5e6c60867)
|
||||
- [ ] [cv::cuda::multiply](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga497cc0615bf717e1e615143b56f00591)
|
||||
- [ ] [cv::cuda::phase](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga5b75ec01be06dcd6e27ada09a0d4656a)
|
||||
- [ ] [cv::cuda::polarToCart](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga01516a286a329c303c2db746513dd9df)
|
||||
- [ ] [cv::cuda::pow](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga82d04ef4bcc4dfa9bfbe76488007c6c4)
|
||||
- [ ] [cv::cuda::rshift](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga87af0b66358cc302676f35c1fd56c2ed)
|
||||
- [ ] [cv::cuda::sqr](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga8aae233da90ce0ffe309ab8004342acb)
|
||||
- [ ] [cv::cuda::sqrt](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga09303680cb1a5521a922b6d392028d8c)
|
||||
- [ ] [cv::cuda::subtract](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga6eab60fc250059e2fda79c5636bd067f)
|
||||
|
||||
- [ ] **matrix reductions** The following functions still need implementation:
|
||||
- [ ] [cv::cuda::absSum](https://docs.opencv.org/master/d5/de6/group__cudaarithm__reduce.html#ga690fa79ba4426c53f7d2bebf3d37a32a)
|
||||
- [ ] [cv::cuda::calcAbsSum](https://docs.opencv.org/master/d5/de6/group__cudaarithm__reduce.html#ga15c403b76ab2c4d7ed0f5edc09891b7e)
|
||||
- [ ] [cv::cuda::calcNorm](https://docs.opencv.org/master/d5/de6/group__cudaarithm__reduce.html#ga39d2826990d29b7e4b69dbe02bdae2e1)
|
||||
- [ ] [cv::cuda::calcNormDiff](https://docs.opencv.org/master/d5/de6/group__cudaarithm__reduce.html#ga9be3d9a7b6c5760955f37d1039d01265)
|
||||
- [ ] [cv::cuda::calcSqrSum](https://docs.opencv.org/master/d5/de6/group__cudaarithm__reduce.html#gac998c83597f6c206c78cee16aa87946f)
|
||||
- [ ] [cv::cuda::calcSum](https://docs.opencv.org/master/d5/de6/group__cudaarithm__reduce.html#ga98a09144047f09f5cb1d6b6ea8e0856f)
|
||||
- [ ] [cv::cuda::countNonZero](https://docs.opencv.org/master/d5/de6/group__cudaarithm__reduce.html#ga98a09144047f09f5cb1d6b6ea8e0856f)
|
||||
- [ ] [cv::cuda::findMinMax](https://docs.opencv.org/master/d5/de6/group__cudaarithm__reduce.html#gae7f5f2aa9f65314470a76fccdff887f2)
|
||||
- [ ] [cv::cuda::findMinMaxLoc](https://docs.opencv.org/master/d5/de6/group__cudaarithm__reduce.html#ga93916bc473a62d215d1130fab84d090a)
|
||||
- [ ] [cv::cuda::integral](https://docs.opencv.org/master/d5/de6/group__cudaarithm__reduce.html#ga07e5104eba4bf45212ac9dbc5bf72ba6)
|
||||
- [ ] [cv::cuda::meanStdDev](https://docs.opencv.org/master/d5/de6/group__cudaarithm__reduce.html#ga990a4db4c6d7e8f0f3a6685ba48fbddc)
|
||||
- [ ] [cv::cuda::minMax](https://docs.opencv.org/master/d5/de6/group__cudaarithm__reduce.html#ga8d7de68c10717cf25e787e3c20d2dfee)
|
||||
- [ ] [cv::cuda::minMaxLoc](https://docs.opencv.org/master/d5/de6/group__cudaarithm__reduce.html#ga5cacbc2a2323c4eaa81e7390c5d9f530)
|
||||
- [ ] [cv::cuda::norm](https://docs.opencv.org/master/d5/de6/group__cudaarithm__reduce.html#ga6c01988a58d92126a7c60a4ab76d8324)
|
||||
- [ ] [cv::cuda::normalize](https://docs.opencv.org/master/d5/de6/group__cudaarithm__reduce.html#ga4da4738b9956a5baaa2f5f8c2fba438a)
|
||||
- [ ] [cv::cuda::rectStdDev](https://docs.opencv.org/master/d5/de6/group__cudaarithm__reduce.html#gac311484a4e57cab2ce2cfdc195fda7ee)
|
||||
- [ ] [cv::cuda::reduce](https://docs.opencv.org/master/d5/de6/group__cudaarithm__reduce.html#ga21d57f661db7be093caf2c4378be2007)
|
||||
- [ ] [cv::cuda::sqrIntegral](https://docs.opencv.org/master/d5/de6/group__cudaarithm__reduce.html#ga40c75196202706399a60bf6ba7a052ac)
|
||||
- [ ] [cv::cuda::sqlSum](https://docs.opencv.org/master/d5/de6/group__cudaarithm__reduce.html#ga056c804ebf5d2eb9f6f35e3dcb01524c)
|
||||
- [ ] [cv::cuda::sum](https://docs.opencv.org/master/d5/de6/group__cudaarithm__reduce.html#ga1f582844670199281e8012733b50c582)
|
||||
|
||||
- [ ] **Operations on matrices** The following functions still need implementation:
|
||||
- [ ] [cv::cuda::createConvolution](https://docs.opencv.org/4.5.0/d9/d88/group__cudaarithm__arithm.html#ga2695e05ef624bf3ce03cfbda383a821d)
|
||||
- [ ] [cv::cuda::createDFT](https://docs.opencv.org/4.5.0/d9/d88/group__cudaarithm__arithm.html#ga0f72d063b73c8bb995678525eb076f10)
|
||||
- [ ] [cv::cuda::dft](https://docs.opencv.org/4.5.0/d9/d88/group__cudaarithm__arithm.html#gadea99cb15a715c983bcc2870d65a2e78)
|
||||
- [ ] [cv::cuda::gemm](https://docs.opencv.org/4.5.0/d9/d88/group__cudaarithm__arithm.html#ga42efe211d7a43bbc922da044c4f17130)
|
||||
- [ ] [cv::cuda::mulAndScaleSpectrums](https://docs.opencv.org/4.5.0/d9/d88/group__cudaarithm__arithm.html#ga5704c25b8be4f19da812e6d98c8ee464)
|
||||
- [ ] [cv::cuda::mulSpectrums](https://docs.opencv.org/4.5.0/d9/d88/group__cudaarithm__arithm.html#gab3e8900d67c4f59bdc137a0495206cd8)
|
||||
|
||||
- [X] **cudabgsegm. Background Segmentation**
|
||||
|
||||
- [ ] **cudacodec** Video Encoding/Decoding. The following functions still need implementation:
|
||||
- [ ] [cv::cuda::VideoReader](https://docs.opencv.org/master/db/ded/classcv_1_1cudacodec_1_1VideoReader.html)
|
||||
- [ ] [cv::cuda::VideoWriter](https://docs.opencv.org/master/df/dde/classcv_1_1cudacodec_1_1VideoWriter.html)
|
||||
|
||||
- [ ] **cudafeatures2d** Feature Detection and Description. The following functions still need implementation:
|
||||
- [ ] [cv::cuda::FastFeatureDetector](https://docs.opencv.org/master/d4/d6a/classcv_1_1cuda_1_1FastFeatureDetector.html)
|
||||
- [ ] [cv::cuda::ORB](https://docs.opencv.org/master/da/d44/classcv_1_1cuda_1_1ORB.html)
|
||||
|
||||
- [ ] **cudafilters. Image Filtering - WORK STARTED** The following functions still need implementation:
|
||||
- [ ] [cv::cuda::createBoxFilter](https://docs.opencv.org/master/dc/d66/group__cudafilters.html#ga3113b66e289bad7caef412e6e13ec2be)
|
||||
- [ ] [cv::cuda::createBoxMaxFilter](https://docs.opencv.org/master/dc/d66/group__cudafilters.html#gaaf4740c51128d23a37f6f1b22cee49e8)
|
||||
- [ ] [cv::cuda::createBoxMinFilter](https://docs.opencv.org/master/dc/d66/group__cudafilters.html#ga77fd36949bc8d92aabc120b4b1cfaafa)
|
||||
- [ ] [cv::cuda::createColumnSumFilter](https://docs.opencv.org/master/dc/d66/group__cudafilters.html#gac13bf7c41a34bfde2a7f33ad8caacfdf)
|
||||
- [ ] [cv::cuda::createDerivFilter](https://docs.opencv.org/master/dc/d66/group__cudafilters.html#ga14d76dc6982ce739c67198f52bc16ee1)
|
||||
- [ ] [cv::cuda::createLaplacianFilter](https://docs.opencv.org/master/dc/d66/group__cudafilters.html#ga53126e88bb7e6185dcd5628e28e42cd2)
|
||||
- [ ] [cv::cuda::createLinearFilter](https://docs.opencv.org/master/dc/d66/group__cudafilters.html#ga57cb1804ad9d1280bf86433858daabf9)
|
||||
- [ ] [cv::cuda::createMorphologyFilter](https://docs.opencv.org/master/dc/d66/group__cudafilters.html#gae58694e07be6bdbae126f36c75c08ee6)
|
||||
- [ ] [cv::cuda::createRowSumFilter](https://docs.opencv.org/master/dc/d66/group__cudafilters.html#gaf735de273ccb5072f3c27816fb97a53a)
|
||||
- [ ] [cv::cuda::createScharrFilter](https://docs.opencv.org/master/dc/d66/group__cudafilters.html#ga4ac8df158e5771ddb0bd5c9091188ce6)
|
||||
- [ ] [cv::cuda::createSeparableLinearFilter](https://docs.opencv.org/master/dc/d66/group__cudafilters.html#gaf7b79a9a92992044f328dad07a52c4bf)
|
||||
|
||||
- [ ] **cudaimgproc. Image Processing - WORK STARTED** The following functions still need implementation:
|
||||
- [ ] [cv::cuda::TemplateMatching](https://docs.opencv.org/master/d2/d58/classcv_1_1cuda_1_1TemplateMatching.html)
|
||||
- [ ] [cv::cuda::alphaComp](https://docs.opencv.org/master/db/d8c/group__cudaimgproc__color.html#ga08a698700458d9311390997b57fbf8dc)
|
||||
- [ ] [cv::cuda::demosaicing](https://docs.opencv.org/master/db/d8c/group__cudaimgproc__color.html#ga7fb153572b573ebd2d7610fcbe64166e)
|
||||
- [ ] [cv::cuda::gammaCorrection](https://docs.opencv.org/master/db/d8c/group__cudaimgproc__color.html#gaf4195a8409c3b8fbfa37295c2b2c4729)
|
||||
- [ ] [cv::cuda::swapChannels](https://docs.opencv.org/master/db/d8c/group__cudaimgproc__color.html#ga75a29cc4a97cde0d43ea066b01de927e)
|
||||
- [ ] [cv::cuda::calcHist](https://docs.opencv.org/master/d8/d0e/group__cudaimgproc__hist.html#gaaf3944106890947020bb4522a7619c26)
|
||||
- [ ] [cv::cuda::CLAHE](https://docs.opencv.org/master/db/d79/classcv_1_1cuda_1_1CLAHE.html)
|
||||
- [ ] [cv::cuda::equalizeHist](https://docs.opencv.org/master/d8/d0e/group__cudaimgproc__hist.html#ga2384be74bd2feba7e6c46815513f0060)
|
||||
- [ ] [cv::cuda::evenLevels](https://docs.opencv.org/master/d8/d0e/group__cudaimgproc__hist.html#ga2f2cbd21dc6d7367a7c4ee1a826f389d)
|
||||
- [ ] [cv::cuda::histEven](https://docs.opencv.org/master/d8/d0e/group__cudaimgproc__hist.html#gacd3b14279fb77a57a510cb8c89a1856f)
|
||||
- [ ] [cv::cuda::histRange](https://docs.opencv.org/master/d8/d0e/group__cudaimgproc__hist.html#ga87819085c1059186d9cdeacd92cea783)
|
||||
- [ ] [cv::cuda::HoughCirclesDetector](https://docs.opencv.org/master/da/d80/classcv_1_1cuda_1_1HoughCirclesDetector.html)
|
||||
- [ ] [cv::cuda::createGoodFeaturesToTrackDetector](https://docs.opencv.org/master/dc/d6d/group__cudaimgproc__feature.html#ga478b474a598ece101f7e706fee2c8e91)
|
||||
- [ ] [cv::cuda::createHarrisCorner](https://docs.opencv.org/master/dc/d6d/group__cudaimgproc__feature.html#ga3e5878a803e9bba51added0c10101979)
|
||||
- [ ] [cv::cuda::createMinEigenValCorner](https://docs.opencv.org/master/dc/d6d/group__cudaimgproc__feature.html#ga7457fd4b53b025f990b1c1dd1b749915)
|
||||
- [ ] [cv::cuda::bilateralFilter](https://docs.opencv.org/master/d0/d05/group__cudaimgproc.html#ga6abeaecdd4e7edc0bd1393a04f4f20bd)
|
||||
- [ ] [cv::cuda::blendLinear](https://docs.opencv.org/master/d0/d05/group__cudaimgproc.html#ga4793607e5729bcc15b27ea33d9fe335e)
|
||||
- [ ] [cv::cuda::meanShiftFiltering](https://docs.opencv.org/master/d0/d05/group__cudaimgproc.html#gae13b3035bc6df0e512d876dbb8c00555)
|
||||
- [ ] [cv::cuda::meanShiftProc](https://docs.opencv.org/master/d0/d05/group__cudaimgproc.html#ga6039dc8ecbe2f912bc83fcc9b3bcca39)
|
||||
- [ ] [cv::cuda::meanShiftSegmentation](https://docs.opencv.org/master/d0/d05/group__cudaimgproc.html#ga70ed80533a448829dc48cf22b1845c16)
|
||||
|
||||
- [X] **cudaobjdetect. Object Detection**
|
||||
|
||||
- [ ] **cudaoptflow. Optical Flow - WORK STARTED** The following functions still need implementation:
|
||||
- [ ] [BroxOpticalFlow](https://docs.opencv.org/master/d7/d18/classcv_1_1cuda_1_1BroxOpticalFlow.html)
|
||||
- [ ] [DenseOpticalFlow](https://docs.opencv.org/master/d6/d4a/classcv_1_1cuda_1_1DenseOpticalFlow.html)
|
||||
- [ ] [DensePyrLKOpticalFlow](https://docs.opencv.org/master/d0/da4/classcv_1_1cuda_1_1DensePyrLKOpticalFlow.html)
|
||||
@@ -218,16 +338,20 @@ Your pull requests will be greatly appreciated!
|
||||
- [ ] [SparseOpticalFlow](https://docs.opencv.org/master/d5/dcf/classcv_1_1cuda_1_1SparseOpticalFlow.html)
|
||||
- [ ] **[SparsePyrLKOpticalFlow](https://docs.opencv.org/master/d7/d05/classcv_1_1cuda_1_1SparsePyrLKOpticalFlow.html) - WORK STARTED**
|
||||
|
||||
- [ ] cudastereo. Stereo Correspondence
|
||||
- [ ] **cudastereo** Stereo Correspondence
|
||||
- [ ] [cv::cuda::createDisparityBilateralFilter](https://docs.opencv.org/master/dd/d47/group__cudastereo.html#gaafb5f9902f7a9e74cb2cd4e680569590)
|
||||
- [ ] [cv::cuda::createStereoBeliefPropagation](https://docs.opencv.org/master/dd/d47/group__cudastereo.html#ga8d22dd80bdfb4e3d7d2ac09e8a07c22b)
|
||||
- [ ] [cv::cuda::createStereoBM](https://docs.opencv.org/master/dd/d47/group__cudastereo.html#ga77edc901350dd0a7f46ec5aca4138039)
|
||||
- [ ] [cv::cuda::createStereoConstantSpaceBP](https://docs.opencv.org/master/dd/d47/group__cudastereo.html#gaec3b49c7cf9f7701a6f549a227be4df2)
|
||||
- [ ] [cv::cuda::createStereoSGM](https://docs.opencv.org/master/dd/d47/group__cudastereo.html#gafb7e5284de5f488d664c3155acb12c93)
|
||||
- [ ] [cv::cuda::drawColorDisp](https://docs.opencv.org/master/dd/d47/group__cudastereo.html#ga469b23a77938dd7c06861e59cecc08c5)
|
||||
- [ ] [cv::cuda::reprojectImageTo3D](https://docs.opencv.org/master/dd/d47/group__cudastereo.html#gaff851e3932da0f3e74d1be1d8855f094)
|
||||
|
||||
- [X] **cudawarping. Image Warping**
|
||||
- [ ] cudev. Device layer
|
||||
- [ ] shape. Shape Distance and Matching
|
||||
- [ ] superres. Super Resolution
|
||||
- [ ] videostab. Video Stabilization
|
||||
- [ ] viz. 3D Visualizer
|
||||
|
||||
## Contrib modules list
|
||||
|
||||
- [ ] alphamat. Alpha Matting
|
||||
- [ ] aruco. ArUco Marker Detection
|
||||
- [X] **bgsegm. Improved Background-Foreground Segmentation Methods - WORK STARTED**
|
||||
- [ ] bioinspired. Biologically inspired vision models and derivated tools
|
||||
@@ -236,27 +360,37 @@ Your pull requests will be greatly appreciated!
|
||||
- [ ] cvv. GUI for Interactive Visual Debugging of Computer Vision Programs
|
||||
- [ ] datasets. Framework for working with different datasets
|
||||
- [ ] dnn_modern. Deep Learning Modern Module
|
||||
- [ ] dnn_objdetect. DNN used for object detection
|
||||
- [ ] dnn_superres. DNN used for super resolution
|
||||
- [ ] dpm. Deformable Part-based Models
|
||||
- [ ] **face. Face Recognition - WORK STARTED**
|
||||
- [ ] freetype. Drawing UTF-8 strings with freetype/harfbuzz
|
||||
- [ ] fuzzy. Image processing based on fuzzy mathematics
|
||||
- [ ] hdf. Hierarchical Data Format I/O routines
|
||||
- [ ] hfs. Hierarchical Feature Selection for Efficient Image Segmentation
|
||||
- [X] **img_hash. The module brings implementations of different image hashing algorithms.**
|
||||
- [ ] intensity_transform. The module brings implementations of intensity transformation algorithms to adjust image contrast.
|
||||
- [ ] line_descriptor. Binary descriptors for lines extracted from an image
|
||||
- [ ] mcc. Macbeth Chart module
|
||||
- [ ] matlab. MATLAB Bridge
|
||||
- [ ] optflow. Optical Flow Algorithms
|
||||
- [ ] ovis. OGRE 3D Visualiser
|
||||
- [ ] phase_unwrapping. Phase Unwrapping API
|
||||
- [ ] plot. Plot function for Mat data
|
||||
- [ ] reg. Image Registration
|
||||
- [ ] rgbd. RGB-Depth Processing
|
||||
- [ ] saliency. Saliency API
|
||||
- [ ] sfm. Structure From Motion
|
||||
- [ ] shape. Shape Distance and Matching
|
||||
- [ ] stereo. Stereo Correspondance Algorithms
|
||||
- [ ] structured_light. Structured Light API
|
||||
- [ ] superres. Super Resolution
|
||||
- [ ] surface_matching. Surface Matching
|
||||
- [ ] text. Scene Text Detection and Recognition
|
||||
- [ ] **tracking. Tracking API - WORK STARTED**
|
||||
- [ ] videostab. Video Stabilization
|
||||
- [ ] viz. 3D Visualizer
|
||||
- [ ] **xfeatures2d. Extra 2D Features Framework - WORK STARTED**
|
||||
- [ ] ximgproc. Extended Image Processing
|
||||
- [ ] xobjdetect. Extended object detection
|
||||
- [ ] xphoto. Additional photo processing algorithms
|
||||
- [ ] **xphoto. Additional photo processing algorithms - WORK STARTED**
|
||||
|
||||
5
vendor/gocv.io/x/gocv/appveyor.yml
generated
vendored
5
vendor/gocv.io/x/gocv/appveyor.yml
generated
vendored
@@ -8,7 +8,7 @@ platform:
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
GOROOT: c:\go
|
||||
GOVERSION: 1.13
|
||||
GOVERSION: 1.16
|
||||
TEST_EXTERNAL: 1
|
||||
APPVEYOR_SAVE_CACHE_ON_ERROR: true
|
||||
|
||||
@@ -18,7 +18,7 @@ cache:
|
||||
install:
|
||||
- if not exist "C:\opencv" appveyor_build_opencv.cmd
|
||||
- set PATH=C:\Perl\site\bin;C:\Perl\bin;C:\Windows\system32;C:\Windows;C:\Windows\System32\Wbem;C:\Windows\System32\WindowsPowerShell\v1.0\;C:\Program Files\7-Zip;C:\Program Files\Microsoft\Web Platform Installer\;C:\Tools\PsTools;C:\Program Files (x86)\CMake\bin;C:\go\bin;C:\Tools\NuGet;C:\Program Files\LLVM\bin;C:\Tools\curl\bin;C:\ProgramData\chocolatey\bin;C:\Program Files (x86)\Yarn\bin;C:\Users\appveyor\AppData\Local\Yarn\bin;C:\Program Files\AppVeyor\BuildAgent\
|
||||
- set PATH=%PATH%;C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin
|
||||
- set PATH=%PATH%;C:\mingw-w64\x86_64-7.3.0-posix-seh-rt_v5-rev0\mingw64\bin
|
||||
- set PATH=%PATH%;C:\Tools\GitVersion;C:\Program Files\Git LFS;C:\Program Files\Git\cmd;C:\Program Files\Git\usr\bin;C:\opencv\build\install\x64\mingw\bin;
|
||||
- echo %PATH%
|
||||
- echo %GOPATH%
|
||||
@@ -27,6 +27,7 @@ install:
|
||||
- go get -d .
|
||||
- set GOCV_CAFFE_TEST_FILES=C:\opencv\testdata
|
||||
- set GOCV_TENSORFLOW_TEST_FILES=C:\opencv\testdata
|
||||
- set GOCV_ONNX_TEST_FILES=C:\opencv\testdata
|
||||
- set OPENCV_ENABLE_NONFREE=ON
|
||||
- go env
|
||||
|
||||
|
||||
22
vendor/gocv.io/x/gocv/appveyor_build_opencv.cmd
generated
vendored
22
vendor/gocv.io/x/gocv/appveyor_build_opencv.cmd
generated
vendored
@@ -1,23 +1,25 @@
|
||||
|
||||
if not exist "C:\opencv" mkdir "C:\opencv"
|
||||
if not exist "C:\opencv\build" mkdir "C:\opencv\build"
|
||||
if not exist "C:\opencv\testdata" mkdir "C:\opencv\testdata"
|
||||
|
||||
appveyor DownloadFile https://github.com/opencv/opencv/archive/4.2.0.zip -FileName c:\opencv\opencv-4.2.0.zip
|
||||
7z x c:\opencv\opencv-4.2.0.zip -oc:\opencv -y
|
||||
del c:\opencv\opencv-4.2.0.zip /q
|
||||
appveyor DownloadFile https://github.com/opencv/opencv_contrib/archive/4.2.0.zip -FileName c:\opencv\opencv_contrib-4.2.0.zip
|
||||
7z x c:\opencv\opencv_contrib-4.2.0.zip -oc:\opencv -y
|
||||
del c:\opencv\opencv_contrib-4.2.0.zip /q
|
||||
appveyor DownloadFile https://github.com/opencv/opencv/archive/4.5.3.zip -FileName c:\opencv\opencv-4.5.3.zip
|
||||
7z x c:\opencv\opencv-4.5.3.zip -oc:\opencv -y
|
||||
del c:\opencv\opencv-4.5.3.zip /q
|
||||
appveyor DownloadFile https://github.com/opencv/opencv_contrib/archive/4.5.3.zip -FileName c:\opencv\opencv_contrib-4.5.3.zip
|
||||
7z x c:\opencv\opencv_contrib-4.5.3.zip -oc:\opencv -y
|
||||
del c:\opencv\opencv_contrib-4.5.3.zip /q
|
||||
cd C:\opencv\build
|
||||
set PATH=C:\Perl\site\bin;C:\Perl\bin;C:\Windows\system32;C:\Windows;C:\Windows\System32\Wbem;C:\Windows\System32\WindowsPowerShell\v1.0\;C:\Program Files\7-Zip;C:\Program Files\Microsoft\Web Platform Installer\;C:\Tools\PsTools;C:\Program Files (x86)\CMake\bin;C:\go\bin;C:\Tools\NuGet;C:\Program Files\LLVM\bin;C:\Tools\curl\bin;C:\ProgramData\chocolatey\bin;C:\Program Files (x86)\Yarn\bin;C:\Users\appveyor\AppData\Local\Yarn\bin;C:\Program Files\AppVeyor\BuildAgent\
|
||||
set PATH=%PATH%;C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin
|
||||
set PATH=%PATH%;C:\mingw-w64\x86_64-7.3.0-posix-seh-rt_v5-rev0\mingw64\bin
|
||||
dir C:\opencv
|
||||
cmake C:\opencv\opencv-4.2.0 -G "MinGW Makefiles" -BC:\opencv\build -DENABLE_CXX11=ON -DOPENCV_EXTRA_MODULES_PATH=C:\opencv\opencv_contrib-4.2.0\modules -DBUILD_SHARED_LIBS=ON -DWITH_IPP=OFF -DWITH_MSMF=OFF -DBUILD_EXAMPLES=OFF -DBUILD_TESTS=OFF -DBUILD_PERF_TESTS=OFF -DBUILD_opencv_java=OFF -DBUILD_opencv_python=OFF -DBUILD_opencv_python2=OFF -DBUILD_opencv_python3=OFF -DBUILD_DOCS=OFF -DENABLE_PRECOMPILED_HEADERS=OFF -DBUILD_opencv_saliency=OFF -DCPU_DISPATCH= -DBUILD_opencv_gapi=OFF -DOPENCV_GENERATE_PKGCONFIG=ON -DOPENCV_ENABLE_NONFREE=ON -DWITH_OPENCL_D3D11_NV=OFF -Wno-dev
|
||||
cmake C:\opencv\opencv-4.5.3 -G "MinGW Makefiles" -BC:\opencv\build -DENABLE_CXX11=ON -DOPENCV_EXTRA_MODULES_PATH=C:\opencv\opencv_contrib-4.5.3\modules -DBUILD_SHARED_LIBS=ON -DWITH_IPP=OFF -DWITH_MSMF=OFF -DBUILD_EXAMPLES=OFF -DBUILD_TESTS=OFF -DBUILD_PERF_TESTS=OFF -DBUILD_opencv_java=OFF -DBUILD_opencv_python=OFF -DBUILD_opencv_python2=OFF -DBUILD_opencv_python3=OFF -DBUILD_DOCS=OFF -DENABLE_PRECOMPILED_HEADERS=OFF -DBUILD_opencv_saliency=OFF -DBUILD_opencv_wechat_qrcode=OFF -DCPU_DISPATCH= -DBUILD_opencv_gapi=OFF -DOPENCV_GENERATE_PKGCONFIG=ON -DOPENCV_ENABLE_NONFREE=ON -DWITH_OPENCL_D3D11_NV=OFF -DOPENCV_ALLOCATOR_STATS_COUNTER_TYPE=int64_t -Wno-dev
|
||||
mingw32-make -j%NUMBER_OF_PROCESSORS%
|
||||
mingw32-make install
|
||||
appveyor DownloadFile https://raw.githubusercontent.com/opencv/opencv_extra/master/testdata/dnn/bvlc_googlenet.prototxt -FileName C:\opencv\testdata\bvlc_googlenet.prototxt
|
||||
appveyor DownloadFile http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel -FileName C:\opencv\testdata\bvlc_googlenet.caffemodel
|
||||
appveyor DownloadFile https://storage.googleapis.com/download.tensorflow.org/models/inception5h.zip -FileName C:\opencv\testdata\inception5h.zip
|
||||
appveyor DownloadFile https://github.com/onnx/models/raw/master/vision/classification/inception_and_googlenet/googlenet/model/googlenet-9.onnx -FileName C:\opencv\testdata\googlenet-9.onnx
|
||||
7z x C:\opencv\testdata\inception5h.zip -oC:\opencv\testdata tensorflow_inception_graph.pb -y
|
||||
rmdir c:\opencv\opencv-4.2.0 /s /q
|
||||
rmdir c:\opencv\opencv_contrib-4.2.0 /s /q
|
||||
rmdir c:\opencv\opencv-4.5.3 /s /q
|
||||
rmdir c:\opencv\opencv_contrib-4.5.3 /s /q
|
||||
|
||||
27
vendor/gocv.io/x/gocv/calib3d.cpp
generated
vendored
27
vendor/gocv.io/x/gocv/calib3d.cpp
generated
vendored
@@ -10,6 +10,16 @@ void Fisheye_UndistortImageWithParams(Mat distorted, Mat undistorted, Mat k, Mat
|
||||
cv::fisheye::undistortImage(*distorted, *undistorted, *k, *d, *knew, sz);
|
||||
}
|
||||
|
||||
void Fisheye_UndistortPoints(Mat distorted, Mat undistorted, Mat k, Mat d, Mat r, Mat p) {
|
||||
cv::fisheye::undistortPoints(*distorted, *undistorted, *k, *d, *r, *p);
|
||||
}
|
||||
|
||||
void Fisheye_EstimateNewCameraMatrixForUndistortRectify(Mat k, Mat d, Size imgSize, Mat r, Mat p, double balance, Size newSize, double fovScale) {
|
||||
cv::Size newSz(newSize.width, newSize.height);
|
||||
cv::Size imgSz(imgSize.width, imgSize.height);
|
||||
cv::fisheye::estimateNewCameraMatrixForUndistortRectify(*k, *d, imgSz, *r, *p, balance, newSz, fovScale);
|
||||
}
|
||||
|
||||
void InitUndistortRectifyMap(Mat cameraMatrix,Mat distCoeffs,Mat r,Mat newCameraMatrix,Size size,int m1type,Mat map1,Mat map2) {
|
||||
cv::Size sz(size.width, size.height);
|
||||
cv::initUndistortRectifyMap(*cameraMatrix,*distCoeffs,*r,*newCameraMatrix,sz,m1type,*map1,*map2);
|
||||
@@ -31,3 +41,20 @@ void Undistort(Mat src, Mat dst, Mat cameraMatrix, Mat distCoeffs, Mat newCamera
|
||||
cv::undistort(*src, *dst, *cameraMatrix, *distCoeffs, *newCameraMatrix);
|
||||
}
|
||||
|
||||
void UndistortPoints(Mat distorted, Mat undistorted, Mat k, Mat d, Mat r, Mat p) {
|
||||
cv::undistortPoints(*distorted, *undistorted, *k, *d, *r, *p);
|
||||
}
|
||||
|
||||
bool FindChessboardCorners(Mat image, Size patternSize, Mat corners, int flags) {
|
||||
cv::Size sz(patternSize.width, patternSize.height);
|
||||
return cv::findChessboardCorners(*image, sz, *corners, flags);
|
||||
}
|
||||
|
||||
void DrawChessboardCorners(Mat image, Size patternSize, Mat corners, bool patternWasFound) {
|
||||
cv::Size sz(patternSize.width, patternSize.height);
|
||||
cv::drawChessboardCorners(*image, sz, *corners, patternWasFound);
|
||||
}
|
||||
|
||||
Mat EstimateAffinePartial2D(Point2fVector from, Point2fVector to) {
|
||||
return new cv::Mat(cv::estimateAffinePartial2D(*from, *to));
|
||||
}
|
||||
|
||||
88
vendor/gocv.io/x/gocv/calib3d.go
generated
vendored
88
vendor/gocv.io/x/gocv/calib3d.go
generated
vendored
@@ -67,6 +67,30 @@ func FisheyeUndistortImageWithParams(distorted Mat, undistorted *Mat, k, d, knew
|
||||
C.Fisheye_UndistortImageWithParams(distorted.Ptr(), undistorted.Ptr(), k.Ptr(), d.Ptr(), knew.Ptr(), sz)
|
||||
}
|
||||
|
||||
// FisheyeUndistortPoints transforms points to compensate for fisheye lens distortion
|
||||
//
|
||||
// For further details, please see:
|
||||
// https://docs.opencv.org/master/db/d58/group__calib3d__fisheye.html#gab738cdf90ceee97b2b52b0d0e7511541
|
||||
func FisheyeUndistortPoints(distorted Mat, undistorted *Mat, k, d, r, p Mat) {
|
||||
C.Fisheye_UndistortPoints(distorted.Ptr(), undistorted.Ptr(), k.Ptr(), d.Ptr(), r.Ptr(), p.Ptr())
|
||||
}
|
||||
|
||||
// EstimateNewCameraMatrixForUndistortRectify estimates new camera matrix for undistortion or rectification.
|
||||
//
|
||||
// For further details, please see:
|
||||
// https://docs.opencv.org/master/db/d58/group__calib3d__fisheye.html#ga384940fdf04c03e362e94b6eb9b673c9
|
||||
func EstimateNewCameraMatrixForUndistortRectify(k, d Mat, imgSize image.Point, r Mat, p *Mat, balance float64, newSize image.Point, fovScale float64) {
|
||||
imgSz := C.struct_Size{
|
||||
width: C.int(imgSize.X),
|
||||
height: C.int(imgSize.Y),
|
||||
}
|
||||
newSz := C.struct_Size{
|
||||
width: C.int(newSize.X),
|
||||
height: C.int(newSize.Y),
|
||||
}
|
||||
C.Fisheye_EstimateNewCameraMatrixForUndistortRectify(k.Ptr(), d.Ptr(), imgSz, r.Ptr(), p.Ptr(), C.double(balance), newSz, C.double(fovScale))
|
||||
}
|
||||
|
||||
// InitUndistortRectifyMap computes the joint undistortion and rectification transformation and represents the result in the form of maps for remap
|
||||
//
|
||||
// For further details, please see:
|
||||
@@ -101,3 +125,67 @@ func GetOptimalNewCameraMatrixWithParams(cameraMatrix Mat, distCoeffs Mat, image
|
||||
func Undistort(src Mat, dst *Mat, cameraMatrix Mat, distCoeffs Mat, newCameraMatrix Mat) {
|
||||
C.Undistort(src.Ptr(), dst.Ptr(), cameraMatrix.Ptr(), distCoeffs.Ptr(), newCameraMatrix.Ptr())
|
||||
}
|
||||
|
||||
// UndistortPoints transforms points to compensate for lens distortion
|
||||
//
|
||||
// For further details, please see:
|
||||
// https://docs.opencv.org/master/d9/d0c/group__calib3d.html#ga55c716492470bfe86b0ee9bf3a1f0f7e
|
||||
func UndistortPoints(src Mat, dst *Mat, cameraMatrix, distCoeffs, rectificationTransform, newCameraMatrix Mat) {
|
||||
C.UndistortPoints(src.Ptr(), dst.Ptr(), cameraMatrix.Ptr(), distCoeffs.Ptr(), rectificationTransform.Ptr(), newCameraMatrix.Ptr())
|
||||
}
|
||||
|
||||
// CalibCBFlag value for chessboard calibration
|
||||
// For more details, please see:
|
||||
// https://docs.opencv.org/master/d9/d0c/group__calib3d.html#ga93efa9b0aa890de240ca32b11253dd4a
|
||||
type CalibCBFlag int
|
||||
|
||||
const (
|
||||
// Various operation flags that can be zero or a combination of the following values:
|
||||
// Use adaptive thresholding to convert the image to black and white, rather than a fixed threshold level (computed from the average image brightness).
|
||||
CalibCBAdaptiveThresh CalibCBFlag = 1 << iota
|
||||
// Normalize the image gamma with equalizeHist before applying fixed or adaptive thresholding.
|
||||
CalibCBNormalizeImage
|
||||
// Use additional criteria (like contour area, perimeter, square-like shape) to filter out false quads extracted at the contour retrieval stage.
|
||||
CalibCBFilterQuads
|
||||
// Run a fast check on the image that looks for chessboard corners, and shortcut the call if none is found. This can drastically speed up the call in the degenerate condition when no chessboard is observed.
|
||||
CalibCBFastCheck
|
||||
CalibCBExhaustive
|
||||
CalibCBAccuracy
|
||||
CalibCBLarger
|
||||
CalibCBMarker
|
||||
)
|
||||
|
||||
// FindChessboardCorners finds the positions of internal corners of the chessboard.
|
||||
//
|
||||
// For further details, please see:
|
||||
// https://docs.opencv.org/master/d9/d0c/group__calib3d.html#ga93efa9b0aa890de240ca32b11253dd4a
|
||||
//
|
||||
func FindChessboardCorners(image Mat, patternSize image.Point, corners *Mat, flags CalibCBFlag) bool {
|
||||
sz := C.struct_Size{
|
||||
width: C.int(patternSize.X),
|
||||
height: C.int(patternSize.Y),
|
||||
}
|
||||
return bool(C.FindChessboardCorners(image.Ptr(), sz, corners.Ptr(), C.int(flags)))
|
||||
}
|
||||
|
||||
// DrawChessboardCorners renders the detected chessboard corners.
|
||||
//
|
||||
// For further details, please see:
|
||||
// https://docs.opencv.org/master/d9/d0c/group__calib3d.html#ga6a10b0bb120c4907e5eabbcd22319022
|
||||
//
|
||||
func DrawChessboardCorners(image *Mat, patternSize image.Point, corners Mat, patternWasFound bool) {
|
||||
sz := C.struct_Size{
|
||||
width: C.int(patternSize.X),
|
||||
height: C.int(patternSize.Y),
|
||||
}
|
||||
C.DrawChessboardCorners(image.Ptr(), sz, corners.Ptr(), C.bool(patternWasFound))
|
||||
}
|
||||
|
||||
// EstimateAffinePartial2D computes an optimal limited affine transformation
|
||||
// with 4 degrees of freedom between two 2D point sets.
|
||||
//
|
||||
// For further details, please see:
|
||||
// https://docs.opencv.org/master/d9/d0c/group__calib3d.html#gad767faff73e9cbd8b9d92b955b50062d
|
||||
func EstimateAffinePartial2D(from, to Point2fVector) Mat {
|
||||
return newMat(C.EstimateAffinePartial2D(from.p, to.p))
|
||||
}
|
||||
|
||||
8
vendor/gocv.io/x/gocv/calib3d.h
generated
vendored
8
vendor/gocv.io/x/gocv/calib3d.h
generated
vendored
@@ -14,12 +14,18 @@ extern "C" {
|
||||
//Calib
|
||||
void Fisheye_UndistortImage(Mat distorted, Mat undistorted, Mat k, Mat d);
|
||||
void Fisheye_UndistortImageWithParams(Mat distorted, Mat undistorted, Mat k, Mat d, Mat knew, Size size);
|
||||
void Fisheye_UndistortPoints(Mat distorted, Mat undistorted, Mat k, Mat d, Mat R, Mat P);
|
||||
void Fisheye_EstimateNewCameraMatrixForUndistortRectify(Mat k, Mat d, Size imgSize, Mat r, Mat p, double balance, Size newSize, double fovScale);
|
||||
|
||||
void InitUndistortRectifyMap(Mat cameraMatrix,Mat distCoeffs,Mat r,Mat newCameraMatrix,Size size,int m1type,Mat map1,Mat map2);
|
||||
Mat GetOptimalNewCameraMatrixWithParams(Mat cameraMatrix,Mat distCoeffs,Size size,double alpha,Size newImgSize,Rect* validPixROI,bool centerPrincipalPoint);
|
||||
void Undistort(Mat src, Mat dst, Mat cameraMatrix, Mat distCoeffs, Mat newCameraMatrix);
|
||||
void UndistortPoints(Mat distorted, Mat undistorted, Mat k, Mat d, Mat r, Mat p);
|
||||
bool FindChessboardCorners(Mat image, Size patternSize, Mat corners, int flags);
|
||||
void DrawChessboardCorners(Mat image, Size patternSize, Mat corners, bool patternWasFound);
|
||||
Mat EstimateAffinePartial2D(Point2fVector from, Point2fVector to);
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif //_OPENCV3_CALIB_H
|
||||
#endif //_OPENCV3_CALIB_H
|
||||
22
vendor/gocv.io/x/gocv/calib3d_string.go
generated
vendored
22
vendor/gocv.io/x/gocv/calib3d_string.go
generated
vendored
@@ -25,3 +25,25 @@ func (c CalibFlag) String() string {
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (c CalibCBFlag) String() string {
|
||||
switch c {
|
||||
case CalibCBAdaptiveThresh:
|
||||
return "calib-cb-adaptive-thresh"
|
||||
case CalibCBNormalizeImage:
|
||||
return "calib-cb-normalize-image"
|
||||
case CalibCBFilterQuads:
|
||||
return "calib-cb-filter-quads"
|
||||
case CalibCBFastCheck:
|
||||
return "calib-cb-fast-check"
|
||||
case CalibCBExhaustive:
|
||||
return "calib-cb-exhaustive"
|
||||
case CalibCBAccuracy:
|
||||
return "calib-cb-accuracy"
|
||||
case CalibCBLarger:
|
||||
return "calib-cb-larger"
|
||||
case CalibCBMarker:
|
||||
return "calib-cb-marker"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
4
vendor/gocv.io/x/gocv/cgo.go
generated
vendored
4
vendor/gocv.io/x/gocv/cgo.go
generated
vendored
@@ -1,4 +1,4 @@
|
||||
// +build !customenv,!openvino
|
||||
// +build !customenv,!static
|
||||
|
||||
package gocv
|
||||
|
||||
@@ -8,6 +8,6 @@ package gocv
|
||||
#cgo !windows pkg-config: opencv4
|
||||
#cgo CXXFLAGS: --std=c++11
|
||||
#cgo windows CPPFLAGS: -IC:/opencv/build/install/include
|
||||
#cgo windows LDFLAGS: -LC:/opencv/build/install/x64/mingw/lib -lopencv_core420 -lopencv_face420 -lopencv_videoio420 -lopencv_imgproc420 -lopencv_highgui420 -lopencv_imgcodecs420 -lopencv_objdetect420 -lopencv_features2d420 -lopencv_video420 -lopencv_dnn420 -lopencv_xfeatures2d420 -lopencv_plot420 -lopencv_tracking420 -lopencv_img_hash420 -lopencv_calib3d420
|
||||
#cgo windows LDFLAGS: -LC:/opencv/build/install/x64/mingw/lib -lopencv_core453 -lopencv_face453 -lopencv_videoio453 -lopencv_imgproc453 -lopencv_highgui453 -lopencv_imgcodecs453 -lopencv_objdetect453 -lopencv_features2d453 -lopencv_video453 -lopencv_dnn453 -lopencv_xfeatures2d453 -lopencv_plot453 -lopencv_tracking453 -lopencv_img_hash453 -lopencv_calib3d453 -lopencv_bgsegm453 -lopencv_photo453
|
||||
*/
|
||||
import "C"
|
||||
|
||||
12
vendor/gocv.io/x/gocv/cgo_static.go
generated
vendored
Normal file
12
vendor/gocv.io/x/gocv/cgo_static.go
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
// +build !customenv,static,!windows
|
||||
|
||||
package gocv
|
||||
|
||||
// Changes here should be mirrored in contrib/cgo.go and cuda/cgo.go.
|
||||
|
||||
/*
|
||||
#cgo CXXFLAGS: --std=c++11
|
||||
#cgo CPPFLAGS: -I/usr/local/include -I/usr/local/include/opencv4
|
||||
#cgo LDFLAGS: -L/usr/local/lib -L/usr/local/lib/opencv4/3rdparty -lopencv_gapi -lopencv_stitching -lopencv_aruco -lopencv_bgsegm -lopencv_bioinspired -lopencv_ccalib -lopencv_dnn_objdetect -lopencv_dpm -lopencv_face -lopencv_fuzzy -lopencv_hfs -lopencv_img_hash -lopencv_line_descriptor -lopencv_quality -lopencv_reg -lopencv_rgbd -lopencv_saliency -lopencv_stereo -lopencv_structured_light -lopencv_phase_unwrapping -lopencv_superres -lopencv_optflow -lopencv_surface_matching -lopencv_tracking -lopencv_datasets -lopencv_text -lopencv_highgui -lopencv_dnn -lopencv_plot -lopencv_videostab -lopencv_video -lopencv_videoio -lopencv_xfeatures2d -lopencv_shape -lopencv_ml -lopencv_ximgproc -lopencv_xobjdetect -lopencv_objdetect -lopencv_calib3d -lopencv_imgcodecs -lopencv_features2d -lopencv_flann -lopencv_xphoto -lopencv_photo -lopencv_imgproc -lopencv_core -littnotify -llibprotobuf -lIlmImf -lquirc -lippiw -lippicv -lade -lz -ljpeg -ldl -lm -lpthread -lrt -lquadmath
|
||||
*/
|
||||
import "C"
|
||||
275
vendor/gocv.io/x/gocv/core.cpp
generated
vendored
275
vendor/gocv.io/x/gocv/core.cpp
generated
vendored
@@ -11,6 +11,15 @@ Mat Mat_NewWithSize(int rows, int cols, int type) {
|
||||
return new cv::Mat(rows, cols, type, 0.0);
|
||||
}
|
||||
|
||||
// Mat_NewWithSizes creates a new Mat with specific dimension sizes and number of channels.
|
||||
Mat Mat_NewWithSizes(struct IntVector sizes, int type) {
|
||||
std::vector<int> sizess;
|
||||
for (int i = 0; i < sizes.length; ++i) {
|
||||
sizess.push_back(sizes.val[i]);
|
||||
}
|
||||
return new cv::Mat(sizess, type);
|
||||
}
|
||||
|
||||
// Mat_NewFromScalar creates a new Mat from a Scalar. Intended to be used
|
||||
// for Mat comparison operation such as InRange.
|
||||
Mat Mat_NewFromScalar(Scalar ar, int type) {
|
||||
@@ -28,6 +37,42 @@ Mat Mat_NewFromBytes(int rows, int cols, int type, struct ByteArray buf) {
|
||||
return new cv::Mat(rows, cols, type, buf.data);
|
||||
}
|
||||
|
||||
// Mat_NewWithSizesFromScalar creates multidimensional Mat from a scalar
|
||||
Mat Mat_NewWithSizesFromScalar(IntVector sizes, int type, Scalar ar) {
|
||||
std::vector<int> _sizes;
|
||||
for (int i = 0, *v = sizes.val; i < sizes.length; ++v, ++i) {
|
||||
_sizes.push_back(*v);
|
||||
}
|
||||
|
||||
cv::Scalar c = cv::Scalar(ar.val1, ar.val2, ar.val3, ar.val4);
|
||||
return new cv::Mat(_sizes, type, c);
|
||||
}
|
||||
|
||||
// Mat_NewWithSizesFromBytes creates multidimensional Mat from a bytes
|
||||
Mat Mat_NewWithSizesFromBytes(IntVector sizes, int type, struct ByteArray buf) {
|
||||
std::vector<int> _sizes;
|
||||
for (int i = 0, *v = sizes.val; i < sizes.length; ++v, ++i) {
|
||||
_sizes.push_back(*v);
|
||||
}
|
||||
|
||||
return new cv::Mat(_sizes, type, buf.data);
|
||||
}
|
||||
|
||||
Mat Eye(int rows, int cols, int type) {
|
||||
cv::Mat temp = cv::Mat::eye(rows, cols, type);
|
||||
return new cv::Mat(rows, cols, type, temp.data);
|
||||
}
|
||||
|
||||
Mat Zeros(int rows, int cols, int type) {
|
||||
cv::Mat temp = cv::Mat::zeros(rows, cols, type);
|
||||
return new cv::Mat(rows, cols, type, temp.data);
|
||||
}
|
||||
|
||||
Mat Ones(int rows, int cols, int type) {
|
||||
cv::Mat temp = cv::Mat::ones(rows, cols, type);
|
||||
return new cv::Mat(rows, cols, type, temp.data);
|
||||
}
|
||||
|
||||
Mat Mat_FromPtr(Mat m, int rows, int cols, int type, int prow, int pcol) {
|
||||
return new cv::Mat(rows, cols, type, m->ptr(prow, pcol));
|
||||
}
|
||||
@@ -42,6 +87,11 @@ int Mat_Empty(Mat m) {
|
||||
return m->empty();
|
||||
}
|
||||
|
||||
// Mat_IsContinuous tests if a Mat is continuous
|
||||
bool Mat_IsContinuous(Mat m) {
|
||||
return m->isContinuous();
|
||||
}
|
||||
|
||||
// Mat_Clone returns a clone of this Mat
|
||||
Mat Mat_Clone(Mat m) {
|
||||
return new cv::Mat(m->clone());
|
||||
@@ -61,6 +111,10 @@ void Mat_ConvertTo(Mat m, Mat dst, int type) {
|
||||
m->convertTo(*dst, type);
|
||||
}
|
||||
|
||||
void Mat_ConvertToWithParams(Mat m, Mat dst, int type, float alpha, float beta) {
|
||||
m->convertTo(*dst, type, alpha, beta);
|
||||
}
|
||||
|
||||
// Mat_ToBytes returns the bytes representation of the underlying data.
|
||||
struct ByteArray Mat_ToBytes(Mat m) {
|
||||
return toByteArray(reinterpret_cast<const char*>(m->data), m->total() * m->elemSize());
|
||||
@@ -511,12 +565,9 @@ double KMeans(Mat data, int k, Mat bestLabels, TermCriteria criteria, int attemp
|
||||
return ret;
|
||||
}
|
||||
|
||||
double KMeansPoints(Contour points, int k, Mat bestLabels, TermCriteria criteria, int attempts, int flags, Mat centers) {
|
||||
double KMeansPoints(PointVector points, int k, Mat bestLabels, TermCriteria criteria, int attempts, int flags, Mat centers) {
|
||||
std::vector<cv::Point2f> pts;
|
||||
|
||||
for (size_t i = 0; i < points.length; i++) {
|
||||
pts.push_back(cv::Point2f(points.points[i].x, points.points[i].y));
|
||||
}
|
||||
copyPointVectorToPoint2fVector(points, &pts);
|
||||
double ret = cv::kmeans(pts, k, *bestLabels, *criteria, attempts, flags, *centers);
|
||||
return ret;
|
||||
}
|
||||
@@ -566,6 +617,28 @@ void Mat_MinMaxLoc(Mat m, double* minVal, double* maxVal, Point* minLoc, Point*
|
||||
maxLoc->y = cMaxLoc.y;
|
||||
}
|
||||
|
||||
void Mat_MixChannels(struct Mats src, struct Mats dst, struct IntVector fromTo) {
|
||||
std::vector<cv::Mat> srcMats;
|
||||
|
||||
for (int i = 0; i < src.length; ++i) {
|
||||
srcMats.push_back(*src.mats[i]);
|
||||
}
|
||||
|
||||
std::vector<cv::Mat> dstMats;
|
||||
|
||||
for (int i = 0; i < dst.length; ++i) {
|
||||
dstMats.push_back(*dst.mats[i]);
|
||||
}
|
||||
|
||||
std::vector<int> fromTos;
|
||||
|
||||
for (int i = 0; i < fromTo.length; ++i) {
|
||||
fromTos.push_back(fromTo.val[i]);
|
||||
}
|
||||
|
||||
cv::mixChannels(srcMats, dstMats, fromTos);
|
||||
}
|
||||
|
||||
void Mat_MulSpectrums(Mat a, Mat b, Mat c, int flags) {
|
||||
cv::mulSpectrums(*a, *b, *c, flags);
|
||||
}
|
||||
@@ -574,6 +647,10 @@ void Mat_Multiply(Mat src1, Mat src2, Mat dst) {
|
||||
cv::multiply(*src1, *src2, *dst);
|
||||
}
|
||||
|
||||
void Mat_MultiplyWithParams(Mat src1, Mat src2, Mat dst, double scale, int dtype) {
|
||||
cv::multiply(*src1, *src2, *dst, scale, dtype);
|
||||
}
|
||||
|
||||
void Mat_Normalize(Mat src, Mat dst, double alpha, double beta, int typ) {
|
||||
cv::normalize(*src, *dst, alpha, beta, typ);
|
||||
}
|
||||
@@ -582,6 +659,10 @@ double Norm(Mat src1, int normType) {
|
||||
return cv::norm(*src1, normType);
|
||||
}
|
||||
|
||||
double NormWithMats(Mat src1, Mat src2, int normType) {
|
||||
return cv::norm(*src1, *src2, normType);
|
||||
}
|
||||
|
||||
void Mat_PerspectiveTransform(Mat src, Mat dst, Mat tm) {
|
||||
cv::perspectiveTransform(*src, *dst, *tm);
|
||||
}
|
||||
@@ -692,6 +773,13 @@ void Contours_Close(struct Contours cs) {
|
||||
delete[] cs.contours;
|
||||
}
|
||||
|
||||
void CStrings_Close(struct CStrings cstrs) {
|
||||
for ( int i = 0; i < cstrs.length; i++ ) {
|
||||
delete [] cstrs.strs[i];
|
||||
}
|
||||
delete [] cstrs.strs;
|
||||
}
|
||||
|
||||
void KeyPoints_Close(struct KeyPoints ks) {
|
||||
delete[] ks.keypoints;
|
||||
}
|
||||
@@ -761,3 +849,180 @@ Mat Mat_colRange(Mat m,int startrow,int endrow) {
|
||||
return new cv::Mat(m->colRange(startrow,endrow));
|
||||
}
|
||||
|
||||
PointVector PointVector_New() {
|
||||
return new std::vector< cv::Point >;
|
||||
}
|
||||
|
||||
PointVector PointVector_NewFromPoints(Contour points) {
|
||||
std::vector<cv::Point>* cntr = new std::vector<cv::Point>;
|
||||
|
||||
for (size_t i = 0; i < points.length; i++) {
|
||||
cntr->push_back(cv::Point(points.points[i].x, points.points[i].y));
|
||||
}
|
||||
|
||||
return cntr;
|
||||
}
|
||||
|
||||
PointVector PointVector_NewFromMat(Mat mat) {
|
||||
std::vector<cv::Point>* pts = new std::vector<cv::Point>;
|
||||
*pts = (std::vector<cv::Point>) *mat;
|
||||
return pts;
|
||||
}
|
||||
|
||||
Point PointVector_At(PointVector pv, int idx) {
|
||||
cv::Point p = pv->at(idx);
|
||||
return Point{.x = p.x, .y = p.y};
|
||||
}
|
||||
|
||||
void PointVector_Append(PointVector pv, Point p) {
|
||||
pv->push_back(cv::Point(p.x, p.y));
|
||||
}
|
||||
|
||||
int PointVector_Size(PointVector p) {
|
||||
return p->size();
|
||||
}
|
||||
|
||||
void PointVector_Close(PointVector p) {
|
||||
p->clear();
|
||||
delete p;
|
||||
}
|
||||
|
||||
PointsVector PointsVector_New() {
|
||||
return new std::vector< std::vector< cv::Point > >;
|
||||
}
|
||||
|
||||
PointsVector PointsVector_NewFromPoints(Contours points) {
|
||||
std::vector< std::vector< cv::Point > >* pv = new std::vector< std::vector< cv::Point > >;
|
||||
|
||||
for (size_t i = 0; i < points.length; i++) {
|
||||
Contour contour = points.contours[i];
|
||||
|
||||
std::vector<cv::Point> cntr;
|
||||
|
||||
for (size_t i = 0; i < contour.length; i++) {
|
||||
cntr.push_back(cv::Point(contour.points[i].x, contour.points[i].y));
|
||||
}
|
||||
|
||||
pv->push_back(cntr);
|
||||
}
|
||||
|
||||
return pv;
|
||||
}
|
||||
|
||||
int PointsVector_Size(PointsVector ps) {
|
||||
return ps->size();
|
||||
}
|
||||
|
||||
PointVector PointsVector_At(PointsVector ps, int idx) {
|
||||
std::vector< cv::Point >* p = &(ps->at(idx));
|
||||
return p;
|
||||
}
|
||||
|
||||
void PointsVector_Append(PointsVector psv, PointVector pv) {
|
||||
psv->push_back(*pv);
|
||||
}
|
||||
|
||||
void PointsVector_Close(PointsVector ps) {
|
||||
ps->clear();
|
||||
delete ps;
|
||||
}
|
||||
|
||||
Point2fVector Point2fVector_New() {
|
||||
return new std::vector< cv::Point2f >;
|
||||
}
|
||||
|
||||
Point2fVector Point2fVector_NewFromPoints(Contour2f points) {
|
||||
std::vector<cv::Point2f>* cntr = new std::vector<cv::Point2f>;
|
||||
|
||||
for (size_t i = 0; i < points.length; i++) {
|
||||
cntr->push_back(cv::Point2f(points.points[i].x, points.points[i].y));
|
||||
}
|
||||
|
||||
return cntr;
|
||||
}
|
||||
|
||||
Point2fVector Point2fVector_NewFromMat(Mat mat) {
|
||||
std::vector<cv::Point2f>* pts = new std::vector<cv::Point2f>;
|
||||
*pts = (std::vector<cv::Point2f>) *mat;
|
||||
return pts;
|
||||
}
|
||||
|
||||
Point2f Point2fVector_At(Point2fVector pfv, int idx) {
|
||||
cv::Point2f p = pfv->at(idx);
|
||||
return Point2f{.x = p.x, .y = p.y};
|
||||
}
|
||||
|
||||
int Point2fVector_Size(Point2fVector pfv) {
|
||||
return pfv->size();
|
||||
}
|
||||
|
||||
void Point2fVector_Close(Point2fVector pv) {
|
||||
pv->clear();
|
||||
delete pv;
|
||||
}
|
||||
|
||||
void IntVector_Close(struct IntVector ivec) {
|
||||
delete[] ivec.val;
|
||||
}
|
||||
|
||||
RNG TheRNG() {
|
||||
return &cv::theRNG();
|
||||
}
|
||||
|
||||
void SetRNGSeed(int seed) {
|
||||
cv::setRNGSeed(seed);
|
||||
}
|
||||
|
||||
void RNG_Fill(RNG rng, Mat mat, int distType, double a, double b, bool saturateRange) {
|
||||
rng->fill(*mat, distType, a, b, saturateRange);
|
||||
}
|
||||
|
||||
double RNG_Gaussian(RNG rng, double sigma) {
|
||||
return rng->gaussian(sigma);
|
||||
}
|
||||
|
||||
unsigned int RNG_Next(RNG rng) {
|
||||
return rng->next();
|
||||
}
|
||||
|
||||
void RandN(Mat mat, Scalar mean, Scalar stddev) {
|
||||
cv::Scalar m = cv::Scalar(mean.val1, mean.val2, mean.val3, mean.val4);
|
||||
cv::Scalar s = cv::Scalar(stddev.val1, stddev.val2, stddev.val3, stddev.val4);
|
||||
cv::randn(*mat, m, s);
|
||||
}
|
||||
|
||||
void RandShuffle(Mat mat) {
|
||||
cv::randShuffle(*mat);
|
||||
}
|
||||
|
||||
void RandShuffleWithParams(Mat mat, double iterFactor, RNG rng) {
|
||||
cv::randShuffle(*mat, iterFactor, rng);
|
||||
}
|
||||
|
||||
void RandU(Mat mat, Scalar low, Scalar high) {
|
||||
cv::Scalar l = cv::Scalar(low.val1, low.val2, low.val3, low.val4);
|
||||
cv::Scalar h = cv::Scalar(high.val1, high.val2, high.val3, high.val4);
|
||||
cv::randn(*mat, l, h);
|
||||
}
|
||||
|
||||
void copyPointVectorToPoint2fVector(PointVector src, Point2fVector dest) {
|
||||
for (size_t i = 0; i < src->size(); i++) {
|
||||
dest->push_back(cv::Point2f(src->at(i).x, src->at(i).y));
|
||||
}
|
||||
}
|
||||
|
||||
void StdByteVectorInitialize(void* data) {
|
||||
new (data) std::vector<uchar>();
|
||||
}
|
||||
|
||||
void StdByteVectorFree(void *data) {
|
||||
reinterpret_cast<std::vector<uchar> *>(data)->~vector<uchar>();
|
||||
}
|
||||
|
||||
size_t StdByteVectorLen(void *data) {
|
||||
return reinterpret_cast<std::vector<uchar> *>(data)->size();
|
||||
}
|
||||
|
||||
uint8_t* StdByteVectorData(void *data) {
|
||||
return reinterpret_cast<std::vector<uchar> *>(data)->data();
|
||||
}
|
||||
|
||||
909
vendor/gocv.io/x/gocv/core.go
generated
vendored
909
vendor/gocv.io/x/gocv/core.go
generated
vendored
File diff suppressed because it is too large
Load Diff
82
vendor/gocv.io/x/gocv/core.h
generated
vendored
82
vendor/gocv.io/x/gocv/core.h
generated
vendored
@@ -56,9 +56,18 @@ typedef struct Points {
|
||||
int length;
|
||||
} Points;
|
||||
|
||||
// Wrapper for the vector of Point2f structs aka std::vector<Point2f>
|
||||
typedef struct Points2f {
|
||||
Point2f* points;
|
||||
int length;
|
||||
} Points2f;
|
||||
|
||||
// Contour is alias for Points
|
||||
typedef Points Contour;
|
||||
|
||||
// Contour2f is alias for Points2f
|
||||
typedef Points2f Contour2f;
|
||||
|
||||
// Wrapper for the vector of Points vectors aka std::vector< std::vector<Point> >
|
||||
typedef struct Contours {
|
||||
Contour* contours;
|
||||
@@ -87,7 +96,7 @@ typedef struct Size {
|
||||
|
||||
// Wrapper for an individual cv::RotatedRect
|
||||
typedef struct RotatedRect {
|
||||
Contour pts;
|
||||
Points pts;
|
||||
Rect boundingRect;
|
||||
Point center;
|
||||
Size size;
|
||||
@@ -195,9 +204,17 @@ typedef struct Moment {
|
||||
#ifdef __cplusplus
|
||||
typedef cv::Mat* Mat;
|
||||
typedef cv::TermCriteria* TermCriteria;
|
||||
typedef cv::RNG* RNG;
|
||||
typedef std::vector< cv::Point >* PointVector;
|
||||
typedef std::vector< std::vector< cv::Point > >* PointsVector;
|
||||
typedef std::vector< cv::Point2f >* Point2fVector;
|
||||
#else
|
||||
typedef void* Mat;
|
||||
typedef void* TermCriteria;
|
||||
typedef void* RNG;
|
||||
typedef void* PointVector;
|
||||
typedef void* PointsVector;
|
||||
typedef void* Point2fVector;
|
||||
#endif
|
||||
|
||||
// Wrapper for the vector of Mat aka std::vector<Mat>
|
||||
@@ -223,18 +240,23 @@ void MultiDMatches_Close(struct MultiDMatches mds);
|
||||
|
||||
Mat Mat_New();
|
||||
Mat Mat_NewWithSize(int rows, int cols, int type);
|
||||
Mat Mat_NewWithSizes(struct IntVector sizes, int type);
|
||||
Mat Mat_NewWithSizesFromScalar(IntVector sizes, int type, Scalar ar);
|
||||
Mat Mat_NewWithSizesFromBytes(IntVector sizes, int type, struct ByteArray buf);
|
||||
Mat Mat_NewFromScalar(const Scalar ar, int type);
|
||||
Mat Mat_NewWithSizeFromScalar(const Scalar ar, int rows, int cols, int type);
|
||||
Mat Mat_NewFromBytes(int rows, int cols, int type, struct ByteArray buf);
|
||||
Mat Mat_FromPtr(Mat m, int rows, int cols, int type, int prows, int pcols);
|
||||
void Mat_Close(Mat m);
|
||||
int Mat_Empty(Mat m);
|
||||
bool Mat_IsContinuous(Mat m);
|
||||
Mat Mat_Clone(Mat m);
|
||||
void Mat_CopyTo(Mat m, Mat dst);
|
||||
int Mat_Total(Mat m);
|
||||
void Mat_Size(Mat m, IntVector* res);
|
||||
void Mat_CopyToWithMask(Mat m, Mat dst, Mat mask);
|
||||
void Mat_ConvertTo(Mat m, Mat dst, int type);
|
||||
void Mat_ConvertToWithParams(Mat m, Mat dst, int type, float alpha, float beta);
|
||||
struct ByteArray Mat_ToBytes(Mat m);
|
||||
struct ByteArray Mat_DataPtr(Mat m);
|
||||
Mat Mat_Region(Mat m, Rect r);
|
||||
@@ -249,6 +271,9 @@ int Mat_Cols(Mat m);
|
||||
int Mat_Channels(Mat m);
|
||||
int Mat_Type(Mat m);
|
||||
int Mat_Step(Mat m);
|
||||
Mat Eye(int rows, int cols, int type);
|
||||
Mat Zeros(int rows, int cols, int type);
|
||||
Mat Ones(int rows, int cols, int type);
|
||||
|
||||
uint8_t Mat_GetUChar(Mat m, int row, int col);
|
||||
uint8_t Mat_GetUChar3(Mat m, int x, int y, int z);
|
||||
@@ -336,7 +361,7 @@ void Mat_InRangeWithScalar(Mat src, const Scalar lowerb, const Scalar upperb, Ma
|
||||
void Mat_InsertChannel(Mat src, Mat dst, int coi);
|
||||
double Mat_Invert(Mat src, Mat dst, int flags);
|
||||
double KMeans(Mat data, int k, Mat bestLabels, TermCriteria criteria, int attempts, int flags, Mat centers);
|
||||
double KMeansPoints(Contour points, int k, Mat bestLabels, TermCriteria criteria, int attempts, int flags, Mat centers);
|
||||
double KMeansPoints(PointVector pts, int k, Mat bestLabels, TermCriteria criteria, int attempts, int flags, Mat centers);
|
||||
void Mat_Log(Mat src, Mat dst);
|
||||
void Mat_Magnitude(Mat x, Mat y, Mat magnitude);
|
||||
void Mat_Max(Mat src1, Mat src2, Mat dst);
|
||||
@@ -345,11 +370,14 @@ void Mat_Merge(struct Mats mats, Mat dst);
|
||||
void Mat_Min(Mat src1, Mat src2, Mat dst);
|
||||
void Mat_MinMaxIdx(Mat m, double* minVal, double* maxVal, int* minIdx, int* maxIdx);
|
||||
void Mat_MinMaxLoc(Mat m, double* minVal, double* maxVal, Point* minLoc, Point* maxLoc);
|
||||
void Mat_MixChannels(struct Mats src, struct Mats dst, struct IntVector fromTo);
|
||||
void Mat_MulSpectrums(Mat a, Mat b, Mat c, int flags);
|
||||
void Mat_Multiply(Mat src1, Mat src2, Mat dst);
|
||||
void Mat_MultiplyWithParams(Mat src1, Mat src2, Mat dst, double scale, int dtype);
|
||||
void Mat_Subtract(Mat src1, Mat src2, Mat dst);
|
||||
void Mat_Normalize(Mat src, Mat dst, double alpha, double beta, int typ);
|
||||
double Norm(Mat src1, int normType);
|
||||
double NormWithMats(Mat src1, Mat src2, int normType);
|
||||
void Mat_PerspectiveTransform(Mat src, Mat dst, Mat tm);
|
||||
bool Mat_Solve(Mat src1, Mat src2, Mat dst, int flags);
|
||||
int Mat_SolveCubic(Mat coeffs, Mat roots);
|
||||
@@ -378,6 +406,56 @@ double GetTickFrequency();
|
||||
Mat Mat_rowRange(Mat m,int startrow,int endrow);
|
||||
Mat Mat_colRange(Mat m,int startrow,int endrow);
|
||||
|
||||
PointVector PointVector_New();
|
||||
PointVector PointVector_NewFromPoints(Contour points);
|
||||
PointVector PointVector_NewFromMat(Mat mat);
|
||||
Point PointVector_At(PointVector pv, int idx);
|
||||
void PointVector_Append(PointVector pv, Point p);
|
||||
int PointVector_Size(PointVector pv);
|
||||
void PointVector_Close(PointVector pv);
|
||||
|
||||
PointsVector PointsVector_New();
|
||||
PointsVector PointsVector_NewFromPoints(Contours points);
|
||||
PointVector PointsVector_At(PointsVector psv, int idx);
|
||||
void PointsVector_Append(PointsVector psv, PointVector pv);
|
||||
int PointsVector_Size(PointsVector psv);
|
||||
void PointsVector_Close(PointsVector psv);
|
||||
|
||||
Point2fVector Point2fVector_New();
|
||||
void Point2fVector_Close(Point2fVector pfv);
|
||||
Point2fVector Point2fVector_NewFromPoints(Contour2f pts);
|
||||
Point2fVector Point2fVector_NewFromMat(Mat mat);
|
||||
Point2f Point2fVector_At(Point2fVector pfv, int idx);
|
||||
int Point2fVector_Size(Point2fVector pfv);
|
||||
|
||||
void IntVector_Close(struct IntVector ivec);
|
||||
|
||||
void CStrings_Close(struct CStrings cstrs);
|
||||
|
||||
RNG TheRNG();
|
||||
|
||||
void SetRNGSeed(int seed);
|
||||
|
||||
void RNG_Fill(RNG rng, Mat mat, int distType, double a, double b, bool saturateRange);
|
||||
|
||||
double RNG_Gaussian(RNG rng, double sigma);
|
||||
|
||||
unsigned int RNG_Next(RNG rng);
|
||||
|
||||
void RandN(Mat mat, Scalar mean, Scalar stddev);
|
||||
|
||||
void RandShuffle(Mat mat);
|
||||
|
||||
void RandShuffleWithParams(Mat mat, double iterFactor, RNG rng);
|
||||
|
||||
void RandU(Mat mat, Scalar low, Scalar high);
|
||||
|
||||
void copyPointVectorToPoint2fVector(PointVector src, Point2fVector dest);
|
||||
|
||||
void StdByteVectorInitialize(void* data);
|
||||
void StdByteVectorFree(void *data);
|
||||
size_t StdByteVectorLen(void *data);
|
||||
uint8_t* StdByteVectorData(void *data);
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
101
vendor/gocv.io/x/gocv/dnn.cpp
generated
vendored
101
vendor/gocv.io/x/gocv/dnn.cpp
generated
vendored
@@ -33,6 +33,21 @@ Net Net_ReadNetFromTensorflowBytes(struct ByteArray model) {
|
||||
return n;
|
||||
}
|
||||
|
||||
Net Net_ReadNetFromTorch(const char* model) {
|
||||
Net n = new cv::dnn::Net(cv::dnn::readNetFromTorch(model));
|
||||
return n;
|
||||
}
|
||||
|
||||
Net Net_ReadNetFromONNX(const char* model) {
|
||||
Net n = new cv::dnn::Net(cv::dnn::readNetFromONNX(model));
|
||||
return n;
|
||||
}
|
||||
|
||||
Net Net_ReadNetFromONNXBytes(struct ByteArray model) {
|
||||
Net n = new cv::dnn::Net(cv::dnn::readNetFromONNX(model.data, model.length));
|
||||
return n;
|
||||
}
|
||||
|
||||
void Net_Close(Net net) {
|
||||
delete net;
|
||||
}
|
||||
@@ -110,17 +125,9 @@ void Net_GetLayerNames(Net net, CStrings* names) {
|
||||
Mat Net_BlobFromImage(Mat image, double scalefactor, Size size, Scalar mean, bool swapRB,
|
||||
bool crop) {
|
||||
cv::Size sz(size.width, size.height);
|
||||
|
||||
// set the output ddepth to the input image depth
|
||||
int ddepth = image->depth();
|
||||
if (ddepth == CV_8U)
|
||||
{
|
||||
// no scalar mean adjustment allowed, so ignore
|
||||
return new cv::Mat(cv::dnn::blobFromImage(*image, scalefactor, sz, NULL, swapRB, crop, ddepth));
|
||||
}
|
||||
|
||||
cv::Scalar cm(mean.val1, mean.val2, mean.val3, mean.val4);
|
||||
return new cv::Mat(cv::dnn::blobFromImage(*image, scalefactor, sz, cm, swapRB, crop, ddepth));
|
||||
// use the default target ddepth here.
|
||||
return new cv::Mat(cv::dnn::blobFromImage(*image, scalefactor, sz, cm, swapRB, crop));
|
||||
}
|
||||
|
||||
void Net_BlobFromImages(struct Mats images, Mat blob, double scalefactor, Size size,
|
||||
@@ -134,8 +141,8 @@ void Net_BlobFromImages(struct Mats images, Mat blob, double scalefactor, Size s
|
||||
cv::Size sz(size.width, size.height);
|
||||
cv::Scalar cm = cv::Scalar(mean.val1, mean.val2, mean.val3, mean.val4);
|
||||
|
||||
// TODO: handle different version signatures of this function v2 vs v3.
|
||||
cv::dnn::blobFromImages(imgs, *blob, scalefactor, sz, cm, swapRB, crop, ddepth);
|
||||
// ignore the passed in ddepth, just use default.
|
||||
cv::dnn::blobFromImages(imgs, *blob, scalefactor, sz, cm, swapRB, crop);
|
||||
}
|
||||
|
||||
void Net_ImagesFromBlob(Mat blob_, struct Mats* images_) {
|
||||
@@ -187,3 +194,73 @@ const char* Layer_GetName(Layer layer) {
|
||||
const char* Layer_GetType(Layer layer) {
|
||||
return (*layer)->type.c_str();
|
||||
}
|
||||
|
||||
void NMSBoxes(struct Rects bboxes, FloatVector scores, float score_threshold, float nms_threshold, IntVector* indices) {
|
||||
std::vector<cv::Rect> _bboxes;
|
||||
|
||||
for (int i = 0; i < bboxes.length; ++i) {
|
||||
_bboxes.push_back(cv::Rect(
|
||||
bboxes.rects[i].x,
|
||||
bboxes.rects[i].y,
|
||||
bboxes.rects[i].width,
|
||||
bboxes.rects[i].height
|
||||
));
|
||||
}
|
||||
|
||||
std::vector<float> _scores;
|
||||
|
||||
float* f;
|
||||
int i;
|
||||
for (i = 0, f = scores.val; i < scores.length; ++f, ++i) {
|
||||
_scores.push_back(*f);
|
||||
}
|
||||
|
||||
std::vector<int> _indices(indices->length);
|
||||
|
||||
cv::dnn::NMSBoxes(_bboxes, _scores, score_threshold, nms_threshold, _indices, 1.f, 0);
|
||||
|
||||
int* ptr = new int[_indices.size()];
|
||||
|
||||
for (size_t i=0; i<_indices.size(); ++i) {
|
||||
ptr[i] = _indices[i];
|
||||
}
|
||||
|
||||
indices->length = _indices.size();
|
||||
indices->val = ptr;
|
||||
return;
|
||||
}
|
||||
|
||||
void NMSBoxesWithParams(struct Rects bboxes, FloatVector scores, const float score_threshold, const float nms_threshold, IntVector* indices, const float eta, const int top_k) {
|
||||
std::vector<cv::Rect> _bboxes;
|
||||
|
||||
for (int i = 0; i < bboxes.length; ++i) {
|
||||
_bboxes.push_back(cv::Rect(
|
||||
bboxes.rects[i].x,
|
||||
bboxes.rects[i].y,
|
||||
bboxes.rects[i].width,
|
||||
bboxes.rects[i].height
|
||||
));
|
||||
}
|
||||
|
||||
std::vector<float> _scores;
|
||||
|
||||
float* f;
|
||||
int i;
|
||||
for (i = 0, f = scores.val; i < scores.length; ++f, ++i) {
|
||||
_scores.push_back(*f);
|
||||
}
|
||||
|
||||
std::vector<int> _indices(indices->length);
|
||||
|
||||
cv::dnn::NMSBoxes(_bboxes, _scores, score_threshold, nms_threshold, _indices, eta, top_k);
|
||||
|
||||
int* ptr = new int[_indices.size()];
|
||||
|
||||
for (size_t i=0; i<_indices.size(); ++i) {
|
||||
ptr[i] = _indices[i];
|
||||
}
|
||||
|
||||
indices->length = _indices.size();
|
||||
indices->val = ptr;
|
||||
return;
|
||||
}
|
||||
171
vendor/gocv.io/x/gocv/dnn.go
generated
vendored
171
vendor/gocv.io/x/gocv/dnn.go
generated
vendored
@@ -39,6 +39,9 @@ const (
|
||||
|
||||
// NetBackendVKCOM is the Vulkan backend.
|
||||
NetBackendVKCOM NetBackendType = 4
|
||||
|
||||
// NetBackendCUDA is the Cuda backend.
|
||||
NetBackendCUDA NetBackendType = 5
|
||||
)
|
||||
|
||||
// ParseNetBackend returns a valid NetBackendType given a string. Valid values are:
|
||||
@@ -46,6 +49,7 @@ const (
|
||||
// - openvino
|
||||
// - opencv
|
||||
// - vulkan
|
||||
// - cuda
|
||||
// - default
|
||||
func ParseNetBackend(backend string) NetBackendType {
|
||||
switch backend {
|
||||
@@ -57,6 +61,8 @@ func ParseNetBackend(backend string) NetBackendType {
|
||||
return NetBackendOpenCV
|
||||
case "vulkan":
|
||||
return NetBackendVKCOM
|
||||
case "cuda":
|
||||
return NetBackendCUDA
|
||||
default:
|
||||
return NetBackendDefault
|
||||
}
|
||||
@@ -83,6 +89,12 @@ const (
|
||||
|
||||
// NetTargetFPGA is the FPGA target.
|
||||
NetTargetFPGA NetTargetType = 5
|
||||
|
||||
// NetTargetCUDA is the CUDA target.
|
||||
NetTargetCUDA NetTargetType = 6
|
||||
|
||||
// NetTargetCUDAFP16 is the CUDA target.
|
||||
NetTargetCUDAFP16 NetTargetType = 7
|
||||
)
|
||||
|
||||
// ParseNetTarget returns a valid NetTargetType given a string. Valid values are:
|
||||
@@ -92,6 +104,8 @@ const (
|
||||
// - vpu
|
||||
// - vulkan
|
||||
// - fpga
|
||||
// - cuda
|
||||
// - cudafp16
|
||||
func ParseNetTarget(target string) NetTargetType {
|
||||
switch target {
|
||||
case "cpu":
|
||||
@@ -106,6 +120,10 @@ func ParseNetTarget(target string) NetTargetType {
|
||||
return NetTargetVulkan
|
||||
case "fpga":
|
||||
return NetTargetFPGA
|
||||
case "cuda":
|
||||
return NetTargetCUDA
|
||||
case "cudafp16":
|
||||
return NetTargetCUDAFP16
|
||||
default:
|
||||
return NetTargetCPU
|
||||
}
|
||||
@@ -162,6 +180,7 @@ func (net *Net) ForwardLayers(outBlobNames []string) (blobs []Mat) {
|
||||
blobs = make([]Mat, cMats.length)
|
||||
for i := C.int(0); i < cMats.length; i++ {
|
||||
blobs[i].p = C.Mats_get(cMats, i)
|
||||
addMatToProfile(blobs[i].p)
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -274,6 +293,43 @@ func ReadNetFromTensorflowBytes(model []byte) (Net, error) {
|
||||
return Net{p: unsafe.Pointer(C.Net_ReadNetFromTensorflowBytes(*bModel))}, nil
|
||||
}
|
||||
|
||||
// ReadNetFromTorch reads a network model stored in Torch framework's format (t7).
|
||||
// check net.Empty() for read failure
|
||||
//
|
||||
// For further details, please see:
|
||||
// https://docs.opencv.org/master/d6/d0f/group__dnn.html#gaaaed8c8530e9e92fe6647700c13d961e
|
||||
//
|
||||
func ReadNetFromTorch(model string) Net {
|
||||
cmodel := C.CString(model)
|
||||
defer C.free(unsafe.Pointer(cmodel))
|
||||
return Net{p: unsafe.Pointer(C.Net_ReadNetFromTorch(cmodel))}
|
||||
}
|
||||
|
||||
// ReadNetFromONNX reads a network model stored in ONNX framework's format.
|
||||
// check net.Empty() for read failure
|
||||
//
|
||||
// For further details, please see:
|
||||
// https://docs.opencv.org/master/d6/d0f/group__dnn.html#ga7faea56041d10c71dbbd6746ca854197
|
||||
//
|
||||
func ReadNetFromONNX(model string) Net {
|
||||
cmodel := C.CString(model)
|
||||
defer C.free(unsafe.Pointer(cmodel))
|
||||
return Net{p: unsafe.Pointer(C.Net_ReadNetFromONNX(cmodel))}
|
||||
}
|
||||
|
||||
// ReadNetFromONNXBytes reads a network model stored in ONNX framework's format.
|
||||
//
|
||||
// For further details, please see:
|
||||
// https://docs.opencv.org/master/d6/d0f/group__dnn.html#ga9198ecaac7c32ddf0aa7a1bcbd359567
|
||||
//
|
||||
func ReadNetFromONNXBytes(model []byte) (Net, error) {
|
||||
bModel, err := toByteArray(model)
|
||||
if err != nil {
|
||||
return Net{}, err
|
||||
}
|
||||
return Net{p: unsafe.Pointer(C.Net_ReadNetFromONNXBytes(*bModel))}, nil
|
||||
}
|
||||
|
||||
// BlobFromImage creates 4-dimensional blob from image. Optionally resizes and crops
|
||||
// image from center, subtract mean values, scales values by scalefactor,
|
||||
// swap Blue and Red channels.
|
||||
@@ -307,7 +363,7 @@ func BlobFromImage(img Mat, scaleFactor float64, size image.Point, mean Scalar,
|
||||
// https://docs.opencv.org/master/d6/d0f/group__dnn.html#ga2b89ed84432e4395f5a1412c2926293c
|
||||
//
|
||||
func BlobFromImages(imgs []Mat, blob *Mat, scaleFactor float64, size image.Point, mean Scalar,
|
||||
swapRB bool, crop bool, ddepth int) {
|
||||
swapRB bool, crop bool, ddepth MatType) {
|
||||
|
||||
cMatArray := make([]C.Mat, len(imgs))
|
||||
for i, r := range imgs {
|
||||
@@ -396,13 +452,14 @@ func (net *Net) GetPerfProfile() float64 {
|
||||
func (net *Net) GetUnconnectedOutLayers() (ids []int) {
|
||||
cids := C.IntVector{}
|
||||
C.Net_GetUnconnectedOutLayers((C.Net)(net.p), &cids)
|
||||
defer C.free(unsafe.Pointer(cids.val))
|
||||
|
||||
h := &reflect.SliceHeader{
|
||||
Data: uintptr(unsafe.Pointer(cids.val)),
|
||||
Len: int(cids.length),
|
||||
Cap: int(cids.length),
|
||||
}
|
||||
pcids := *(*[]int)(unsafe.Pointer(h))
|
||||
pcids := *(*[]C.int)(unsafe.Pointer(h))
|
||||
|
||||
for i := 0; i < int(cids.length); i++ {
|
||||
ids = append(ids, int(pcids[i]))
|
||||
@@ -417,19 +474,9 @@ func (net *Net) GetUnconnectedOutLayers() (ids []int) {
|
||||
//
|
||||
func (net *Net) GetLayerNames() (names []string) {
|
||||
cstrs := C.CStrings{}
|
||||
defer C.CStrings_Close(cstrs)
|
||||
C.Net_GetLayerNames((C.Net)(net.p), &cstrs)
|
||||
|
||||
h := &reflect.SliceHeader{
|
||||
Data: uintptr(unsafe.Pointer(cstrs.strs)),
|
||||
Len: int(cstrs.length),
|
||||
Cap: int(cstrs.length),
|
||||
}
|
||||
pcstrs := *(*[]string)(unsafe.Pointer(h))
|
||||
|
||||
for i := 0; i < int(cstrs.length); i++ {
|
||||
names = append(names, string(pcstrs[i]))
|
||||
}
|
||||
return
|
||||
return toGoStrings(cstrs)
|
||||
}
|
||||
|
||||
// Close Layer
|
||||
@@ -470,3 +517,99 @@ func (l *Layer) OutputNameToIndex(name string) int {
|
||||
defer C.free(unsafe.Pointer(cName))
|
||||
return int(C.Layer_OutputNameToIndex((C.Layer)(l.p), cName))
|
||||
}
|
||||
|
||||
// NMSBoxes performs non maximum suppression given boxes and corresponding scores.
|
||||
//
|
||||
// For futher details, please see:
|
||||
// https://docs.opencv.org/4.4.0/d6/d0f/group__dnn.html#ga9d118d70a1659af729d01b10233213ee
|
||||
func NMSBoxes(bboxes []image.Rectangle, scores []float32, scoreThreshold float32, nmsThreshold float32, indices []int) {
|
||||
bboxesRectArr := []C.struct_Rect{}
|
||||
for _, v := range bboxes {
|
||||
bbox := C.struct_Rect{
|
||||
x: C.int(v.Min.X),
|
||||
y: C.int(v.Min.Y),
|
||||
width: C.int(v.Size().X),
|
||||
height: C.int(v.Size().Y),
|
||||
}
|
||||
bboxesRectArr = append(bboxesRectArr, bbox)
|
||||
}
|
||||
|
||||
bboxesRects := C.Rects{
|
||||
rects: (*C.Rect)(&bboxesRectArr[0]),
|
||||
length: C.int(len(bboxes)),
|
||||
}
|
||||
|
||||
scoresFloats := []C.float{}
|
||||
for _, v := range scores {
|
||||
scoresFloats = append(scoresFloats, C.float(v))
|
||||
}
|
||||
scoresVector := C.struct_FloatVector{}
|
||||
scoresVector.val = (*C.float)(&scoresFloats[0])
|
||||
scoresVector.length = (C.int)(len(scoresFloats))
|
||||
|
||||
indicesVector := C.IntVector{}
|
||||
|
||||
C.NMSBoxes(bboxesRects, scoresVector, C.float(scoreThreshold), C.float(nmsThreshold), &indicesVector)
|
||||
defer C.free(unsafe.Pointer(indicesVector.val))
|
||||
|
||||
h := &reflect.SliceHeader{
|
||||
Data: uintptr(unsafe.Pointer(indicesVector.val)),
|
||||
Len: int(indicesVector.length),
|
||||
Cap: int(indicesVector.length),
|
||||
}
|
||||
|
||||
ptr := *(*[]C.int)(unsafe.Pointer(h))
|
||||
|
||||
for i := 0; i < int(indicesVector.length); i++ {
|
||||
indices[i] = int(ptr[i])
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// NMSBoxesWithParams performs non maximum suppression given boxes and corresponding scores.
|
||||
//
|
||||
// For futher details, please see:
|
||||
// https://docs.opencv.org/4.4.0/d6/d0f/group__dnn.html#ga9d118d70a1659af729d01b10233213ee
|
||||
func NMSBoxesWithParams(bboxes []image.Rectangle, scores []float32, scoreThreshold float32, nmsThreshold float32, indices []int, eta float32, topK int) {
|
||||
bboxesRectArr := []C.struct_Rect{}
|
||||
for _, v := range bboxes {
|
||||
bbox := C.struct_Rect{
|
||||
x: C.int(v.Min.X),
|
||||
y: C.int(v.Min.Y),
|
||||
width: C.int(v.Size().X),
|
||||
height: C.int(v.Size().Y),
|
||||
}
|
||||
bboxesRectArr = append(bboxesRectArr, bbox)
|
||||
}
|
||||
|
||||
bboxesRects := C.Rects{
|
||||
rects: (*C.Rect)(&bboxesRectArr[0]),
|
||||
length: C.int(len(bboxes)),
|
||||
}
|
||||
|
||||
scoresFloats := []C.float{}
|
||||
for _, v := range scores {
|
||||
scoresFloats = append(scoresFloats, C.float(v))
|
||||
}
|
||||
scoresVector := C.struct_FloatVector{}
|
||||
scoresVector.val = (*C.float)(&scoresFloats[0])
|
||||
scoresVector.length = (C.int)(len(scoresFloats))
|
||||
|
||||
indicesVector := C.IntVector{}
|
||||
|
||||
C.NMSBoxesWithParams(bboxesRects, scoresVector, C.float(scoreThreshold), C.float(nmsThreshold), &indicesVector, C.float(eta), C.int(topK))
|
||||
defer C.free(unsafe.Pointer(indicesVector.val))
|
||||
|
||||
h := &reflect.SliceHeader{
|
||||
Data: uintptr(unsafe.Pointer(indicesVector.val)),
|
||||
Len: int(indicesVector.length),
|
||||
Cap: int(indicesVector.length),
|
||||
}
|
||||
|
||||
ptr := *(*[]C.int)(unsafe.Pointer(h))
|
||||
|
||||
for i := 0; i < int(indicesVector.length); i++ {
|
||||
indices[i] = int(ptr[i])
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
6
vendor/gocv.io/x/gocv/dnn.h
generated
vendored
6
vendor/gocv.io/x/gocv/dnn.h
generated
vendored
@@ -25,6 +25,9 @@ Net Net_ReadNetFromCaffe(const char* prototxt, const char* caffeModel);
|
||||
Net Net_ReadNetFromCaffeBytes(struct ByteArray prototxt, struct ByteArray caffeModel);
|
||||
Net Net_ReadNetFromTensorflow(const char* model);
|
||||
Net Net_ReadNetFromTensorflowBytes(struct ByteArray model);
|
||||
Net Net_ReadNetFromTorch(const char* model);
|
||||
Net Net_ReadNetFromONNX(const char* model);
|
||||
Net Net_ReadNetFromONNXBytes(struct ByteArray model);
|
||||
Mat Net_BlobFromImage(Mat image, double scalefactor, Size size, Scalar mean, bool swapRB,
|
||||
bool crop);
|
||||
void Net_BlobFromImages(struct Mats images, Mat blob, double scalefactor, Size size,
|
||||
@@ -51,6 +54,9 @@ int Layer_OutputNameToIndex(Layer layer, const char* name);
|
||||
const char* Layer_GetName(Layer layer);
|
||||
const char* Layer_GetType(Layer layer);
|
||||
|
||||
void NMSBoxes(struct Rects bboxes, FloatVector scores, float score_threshold, float nms_threshold, IntVector* indices);
|
||||
void NMSBoxesWithParams(struct Rects bboxes, FloatVector scores, const float score_threshold, const float nms_threshold, IntVector* indices, const float eta, const int top_k);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
6
vendor/gocv.io/x/gocv/dnn_string.go
generated
vendored
6
vendor/gocv.io/x/gocv/dnn_string.go
generated
vendored
@@ -12,6 +12,8 @@ func (c NetBackendType) String() string {
|
||||
return "opencv"
|
||||
case NetBackendVKCOM:
|
||||
return "vulkan"
|
||||
case NetBackendCUDA:
|
||||
return "cuda"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
@@ -30,6 +32,10 @@ func (c NetTargetType) String() string {
|
||||
return "vulkan"
|
||||
case NetTargetFPGA:
|
||||
return "fpga"
|
||||
case NetTargetCUDA:
|
||||
return "cuda"
|
||||
case NetTargetCUDAFP16:
|
||||
return "cudafp16"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
131
vendor/gocv.io/x/gocv/features2d.cpp
generated
vendored
131
vendor/gocv.io/x/gocv/features2d.cpp
generated
vendored
@@ -237,10 +237,13 @@ struct KeyPoints FastFeatureDetector_Detect(FastFeatureDetector f, Mat src) {
|
||||
}
|
||||
|
||||
ORB ORB_Create() {
|
||||
// TODO: params
|
||||
return new cv::Ptr<cv::ORB>(cv::ORB::create());
|
||||
}
|
||||
|
||||
ORB ORB_CreateWithParams(int nfeatures, float scaleFactor, int nlevels, int edgeThreshold, int firstLevel, int WTA_K, int scoreType, int patchSize, int fastThreshold) {
|
||||
return new cv::Ptr<cv::ORB>(cv::ORB::create(nfeatures, scaleFactor, nlevels, edgeThreshold, firstLevel, WTA_K, static_cast<cv::ORB::ScoreType>(scoreType), patchSize, fastThreshold));
|
||||
}
|
||||
|
||||
void ORB_Close(ORB o) {
|
||||
delete o;
|
||||
}
|
||||
@@ -413,6 +416,50 @@ struct MultiDMatches BFMatcher_KnnMatchWithParams(BFMatcher b, Mat query, Mat tr
|
||||
return ret;
|
||||
}
|
||||
|
||||
FlannBasedMatcher FlannBasedMatcher_Create() {
|
||||
return new cv::Ptr<cv::FlannBasedMatcher>(cv::FlannBasedMatcher::create());
|
||||
}
|
||||
|
||||
void FlannBasedMatcher_Close(FlannBasedMatcher f) {
|
||||
delete f;
|
||||
}
|
||||
|
||||
struct MultiDMatches FlannBasedMatcher_KnnMatch(FlannBasedMatcher f, Mat query, Mat train, int k) {
|
||||
std::vector< std::vector<cv::DMatch> > matches;
|
||||
(*f)->knnMatch(*query, *train, matches, k);
|
||||
|
||||
DMatches *dms = new DMatches[matches.size()];
|
||||
for (size_t i = 0; i < matches.size(); ++i) {
|
||||
DMatch *dmatches = new DMatch[matches[i].size()];
|
||||
for (size_t j = 0; j < matches[i].size(); ++j) {
|
||||
DMatch dmatch = {matches[i][j].queryIdx, matches[i][j].trainIdx, matches[i][j].imgIdx,
|
||||
matches[i][j].distance};
|
||||
dmatches[j] = dmatch;
|
||||
}
|
||||
dms[i] = {dmatches, (int) matches[i].size()};
|
||||
}
|
||||
MultiDMatches ret = {dms, (int) matches.size()};
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct MultiDMatches FlannBasedMatcher_KnnMatchWithParams(FlannBasedMatcher f, Mat query, Mat train, int k, Mat mask, bool compactResult) {
|
||||
std::vector< std::vector<cv::DMatch> > matches;
|
||||
(*f)->knnMatch(*query, *train, matches, k, *mask, compactResult);
|
||||
|
||||
DMatches *dms = new DMatches[matches.size()];
|
||||
for (size_t i = 0; i < matches.size(); ++i) {
|
||||
DMatch *dmatches = new DMatch[matches[i].size()];
|
||||
for (size_t j = 0; j < matches[i].size(); ++j) {
|
||||
DMatch dmatch = {matches[i][j].queryIdx, matches[i][j].trainIdx, matches[i][j].imgIdx,
|
||||
matches[i][j].distance};
|
||||
dmatches[j] = dmatch;
|
||||
}
|
||||
dms[i] = {dmatches, (int) matches[i].size()};
|
||||
}
|
||||
MultiDMatches ret = {dms, (int) matches.size()};
|
||||
return ret;
|
||||
}
|
||||
|
||||
void DrawKeyPoints(Mat src, struct KeyPoints kp, Mat dst, Scalar s, int flags) {
|
||||
std::vector<cv::KeyPoint> keypts;
|
||||
cv::KeyPoint keypt;
|
||||
@@ -428,3 +475,85 @@ void DrawKeyPoints(Mat src, struct KeyPoints kp, Mat dst, Scalar s, int flags) {
|
||||
|
||||
cv::drawKeypoints(*src, keypts, *dst, color, static_cast<cv::DrawMatchesFlags>(flags));
|
||||
}
|
||||
|
||||
SIFT SIFT_Create() {
|
||||
// TODO: params
|
||||
return new cv::Ptr<cv::SIFT>(cv::SIFT::create());
|
||||
}
|
||||
|
||||
void SIFT_Close(SIFT d) {
|
||||
delete d;
|
||||
}
|
||||
|
||||
struct KeyPoints SIFT_Detect(SIFT d, Mat src) {
|
||||
std::vector<cv::KeyPoint> detected;
|
||||
(*d)->detect(*src, detected);
|
||||
|
||||
KeyPoint* kps = new KeyPoint[detected.size()];
|
||||
|
||||
for (size_t i = 0; i < detected.size(); ++i) {
|
||||
KeyPoint k = {detected[i].pt.x, detected[i].pt.y, detected[i].size, detected[i].angle,
|
||||
detected[i].response, detected[i].octave, detected[i].class_id
|
||||
};
|
||||
kps[i] = k;
|
||||
}
|
||||
|
||||
KeyPoints ret = {kps, (int)detected.size()};
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct KeyPoints SIFT_DetectAndCompute(SIFT d, Mat src, Mat mask, Mat desc) {
|
||||
std::vector<cv::KeyPoint> detected;
|
||||
(*d)->detectAndCompute(*src, *mask, detected, *desc);
|
||||
|
||||
KeyPoint* kps = new KeyPoint[detected.size()];
|
||||
|
||||
for (size_t i = 0; i < detected.size(); ++i) {
|
||||
KeyPoint k = {detected[i].pt.x, detected[i].pt.y, detected[i].size, detected[i].angle,
|
||||
detected[i].response, detected[i].octave, detected[i].class_id
|
||||
};
|
||||
kps[i] = k;
|
||||
}
|
||||
|
||||
KeyPoints ret = {kps, (int)detected.size()};
|
||||
return ret;
|
||||
}
|
||||
|
||||
void DrawMatches(Mat img1, struct KeyPoints kp1, Mat img2, struct KeyPoints kp2, struct DMatches matches1to2, Mat outImg, const Scalar matchesColor, const Scalar pointColor, struct ByteArray matchesMask, int flags) {
|
||||
std::vector<cv::KeyPoint> kp1vec, kp2vec;
|
||||
cv::KeyPoint keypt;
|
||||
|
||||
for (int i = 0; i < kp1.length; ++i) {
|
||||
keypt = cv::KeyPoint(kp1.keypoints[i].x, kp1.keypoints[i].y,
|
||||
kp1.keypoints[i].size, kp1.keypoints[i].angle, kp1.keypoints[i].response,
|
||||
kp1.keypoints[i].octave, kp1.keypoints[i].classID);
|
||||
kp1vec.push_back(keypt);
|
||||
}
|
||||
|
||||
for (int i = 0; i < kp2.length; ++i) {
|
||||
keypt = cv::KeyPoint(kp2.keypoints[i].x, kp2.keypoints[i].y,
|
||||
kp2.keypoints[i].size, kp2.keypoints[i].angle, kp2.keypoints[i].response,
|
||||
kp2.keypoints[i].octave, kp2.keypoints[i].classID);
|
||||
kp2vec.push_back(keypt);
|
||||
}
|
||||
|
||||
cv::Scalar cvmatchescolor = cv::Scalar(matchesColor.val1, matchesColor.val2, matchesColor.val3, matchesColor.val4);
|
||||
cv::Scalar cvpointcolor = cv::Scalar(pointColor.val1, pointColor.val2, pointColor.val3, pointColor.val4);
|
||||
|
||||
std::vector<cv::DMatch> dmatchvec;
|
||||
cv::DMatch dm;
|
||||
|
||||
for (int i = 0; i < matches1to2.length; i++) {
|
||||
dm = cv::DMatch(matches1to2.dmatches[i].queryIdx, matches1to2.dmatches[i].trainIdx,
|
||||
matches1to2.dmatches[i].imgIdx, matches1to2.dmatches[i].distance);
|
||||
dmatchvec.push_back(dm);
|
||||
}
|
||||
|
||||
std::vector<char> maskvec;
|
||||
|
||||
for (int i = 0; i < matchesMask.length; i++) {
|
||||
maskvec.push_back(matchesMask.data[i]);
|
||||
}
|
||||
|
||||
cv::drawMatches(*img1, kp1vec, *img2, kp2vec, dmatchvec, *outImg, cvmatchescolor, cvpointcolor, maskvec, static_cast<cv::DrawMatchesFlags>(flags));
|
||||
}
|
||||
|
||||
207
vendor/gocv.io/x/gocv/features2d.go
generated
vendored
207
vendor/gocv.io/x/gocv/features2d.go
generated
vendored
@@ -149,9 +149,9 @@ const (
|
||||
//FastFeatureDetectorType58 is an alias of FastFeatureDetector::TYPE_5_8
|
||||
FastFeatureDetectorType58 FastFeatureDetectorType = 0
|
||||
//FastFeatureDetectorType712 is an alias of FastFeatureDetector::TYPE_7_12
|
||||
FastFeatureDetectorType712 = 1
|
||||
FastFeatureDetectorType712 FastFeatureDetectorType = 1
|
||||
//FastFeatureDetectorType916 is an alias of FastFeatureDetector::TYPE_9_16
|
||||
FastFeatureDetectorType916 = 2
|
||||
FastFeatureDetectorType916 FastFeatureDetectorType = 2
|
||||
)
|
||||
|
||||
// FastFeatureDetector is a wrapper around the cv::FastFeatureDetector.
|
||||
@@ -321,12 +321,38 @@ type ORB struct {
|
||||
// NewORB returns a new ORB algorithm
|
||||
//
|
||||
// For further details, please see:
|
||||
// https://docs.opencv.org/master/d7/d19/classcv_1_1AgastFeatureDetector.html
|
||||
// https://docs.opencv.org/master/db/d95/classcv_1_1ORB.html
|
||||
//
|
||||
func NewORB() ORB {
|
||||
return ORB{p: unsafe.Pointer(C.ORB_Create())}
|
||||
}
|
||||
|
||||
// NewORBWithParams returns a new ORB algorithm with parameters
|
||||
//
|
||||
// For further details, please see:
|
||||
// https://docs.opencv.org/master/db/d95/classcv_1_1ORB.html#aeff0cbe668659b7ca14bb85ff1c4073b
|
||||
//
|
||||
func NewORBWithParams(nFeatures int, scaleFactor float32, nLevels int, edgeThreshold int, firstLevel int, WTAK int, scoreType ORBScoreType, patchSize int, fastThreshold int) ORB {
|
||||
return ORB{p: unsafe.Pointer(C.ORB_CreateWithParams(
|
||||
C.int(nFeatures),
|
||||
C.float(scaleFactor),
|
||||
C.int(nLevels),
|
||||
C.int(edgeThreshold),
|
||||
C.int(firstLevel),
|
||||
C.int(WTAK),
|
||||
C.int(scoreType),
|
||||
C.int(patchSize),
|
||||
C.int(fastThreshold),
|
||||
))}
|
||||
}
|
||||
|
||||
type ORBScoreType int
|
||||
|
||||
const (
|
||||
ORBScoreTypeHarris ORBScoreType = 0
|
||||
ORBScoreTypeFAST ORBScoreType = 1
|
||||
)
|
||||
|
||||
// Close ORB.
|
||||
func (o *ORB) Close() error {
|
||||
C.ORB_Close((C.ORB)(o.p))
|
||||
@@ -665,6 +691,40 @@ func (b *BFMatcher) KnnMatch(query, train Mat, k int) [][]DMatch {
|
||||
return getMultiDMatches(ret)
|
||||
}
|
||||
|
||||
// FlannBasedMatcher is a wrapper around the the cv::FlannBasedMatcher algorithm
|
||||
type FlannBasedMatcher struct {
|
||||
// C.FlannBasedMatcher
|
||||
p unsafe.Pointer
|
||||
}
|
||||
|
||||
// NewFlannBasedMatcher returns a new FlannBasedMatcher
|
||||
//
|
||||
// For further details, please see:
|
||||
// https://docs.opencv.org/master/dc/de2/classcv_1_1FlannBasedMatcher.html#ab9114a6471e364ad221f89068ca21382
|
||||
//
|
||||
func NewFlannBasedMatcher() FlannBasedMatcher {
|
||||
return FlannBasedMatcher{p: unsafe.Pointer(C.FlannBasedMatcher_Create())}
|
||||
}
|
||||
|
||||
// Close FlannBasedMatcher
|
||||
func (f *FlannBasedMatcher) Close() error {
|
||||
C.FlannBasedMatcher_Close((C.FlannBasedMatcher)(f.p))
|
||||
f.p = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// KnnMatch Finds the k best matches for each descriptor from a query set.
|
||||
//
|
||||
// For further details, please see:
|
||||
// https://docs.opencv.org/master/db/d39/classcv_1_1DescriptorMatcher.html#aa880f9353cdf185ccf3013e08210483a
|
||||
//
|
||||
func (f *FlannBasedMatcher) KnnMatch(query, train Mat, k int) [][]DMatch {
|
||||
ret := C.FlannBasedMatcher_KnnMatch((C.FlannBasedMatcher)(f.p), query.p, train.p, C.int(k))
|
||||
defer C.MultiDMatches_Close(ret)
|
||||
|
||||
return getMultiDMatches(ret)
|
||||
}
|
||||
|
||||
func getMultiDMatches(ret C.MultiDMatches) [][]DMatch {
|
||||
cArray := ret.dmatches
|
||||
length := int(ret.length)
|
||||
@@ -710,11 +770,11 @@ const (
|
||||
// DrawDefault creates new image and for each keypoint only the center point will be drawn
|
||||
DrawDefault DrawMatchesFlag = 0
|
||||
// DrawOverOutImg draws matches on existing content of image
|
||||
DrawOverOutImg = 1
|
||||
DrawOverOutImg DrawMatchesFlag = 1
|
||||
// NotDrawSinglePoints will not draw single points
|
||||
NotDrawSinglePoints = 2
|
||||
NotDrawSinglePoints DrawMatchesFlag = 2
|
||||
// DrawRichKeyPoints draws the circle around each keypoint with keypoint size and orientation
|
||||
DrawRichKeyPoints = 3
|
||||
DrawRichKeyPoints DrawMatchesFlag = 3
|
||||
)
|
||||
|
||||
// DrawKeyPoints draws keypoints
|
||||
@@ -740,11 +800,142 @@ func DrawKeyPoints(src Mat, keyPoints []KeyPoint, dst *Mat, color color.RGBA, fl
|
||||
}
|
||||
|
||||
scalar := C.struct_Scalar{
|
||||
val1: C.double(color.R),
|
||||
val1: C.double(color.B),
|
||||
val2: C.double(color.G),
|
||||
val3: C.double(color.B),
|
||||
val3: C.double(color.R),
|
||||
val4: C.double(color.A),
|
||||
}
|
||||
|
||||
C.DrawKeyPoints(src.p, cKeyPoints, dst.p, scalar, C.int(flag))
|
||||
}
|
||||
|
||||
// SIFT is a wrapper around the cv::SIFT algorithm.
|
||||
// Due to the patent having expired, this is now in the main OpenCV code modules.
|
||||
type SIFT struct {
|
||||
// C.SIFT
|
||||
p unsafe.Pointer
|
||||
}
|
||||
|
||||
// NewSIFT returns a new SIFT algorithm.
|
||||
//
|
||||
// For further details, please see:
|
||||
// https://docs.opencv.org/master/d5/d3c/classcv_1_1xfeatures2d_1_1SIFT.html
|
||||
//
|
||||
func NewSIFT() SIFT {
|
||||
return SIFT{p: unsafe.Pointer(C.SIFT_Create())}
|
||||
}
|
||||
|
||||
// Close SIFT.
|
||||
func (d *SIFT) Close() error {
|
||||
C.SIFT_Close((C.SIFT)(d.p))
|
||||
d.p = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// Detect keypoints in an image using SIFT.
|
||||
//
|
||||
// For further details, please see:
|
||||
// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#aa4e9a7082ec61ebc108806704fbd7887
|
||||
//
|
||||
func (d *SIFT) Detect(src Mat) []KeyPoint {
|
||||
ret := C.SIFT_Detect((C.SIFT)(d.p), C.Mat(src.Ptr()))
|
||||
defer C.KeyPoints_Close(ret)
|
||||
|
||||
return getKeyPoints(ret)
|
||||
}
|
||||
|
||||
// DetectAndCompute detects and computes keypoints in an image using SIFT.
|
||||
//
|
||||
// For further details, please see:
|
||||
// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#a8be0d1c20b08eb867184b8d74c15a677
|
||||
//
|
||||
func (d *SIFT) DetectAndCompute(src Mat, mask Mat) ([]KeyPoint, Mat) {
|
||||
desc := NewMat()
|
||||
ret := C.SIFT_DetectAndCompute((C.SIFT)(d.p), C.Mat(src.Ptr()), C.Mat(mask.Ptr()),
|
||||
C.Mat(desc.Ptr()))
|
||||
defer C.KeyPoints_Close(ret)
|
||||
|
||||
return getKeyPoints(ret), desc
|
||||
}
|
||||
|
||||
// DrawMatches draws matches on combined train and querry images.
|
||||
//
|
||||
// For further details, please see:
|
||||
// https://docs.opencv.org/master/d4/d5d/group__features2d__draw.html#gad8f463ccaf0dc6f61083abd8717c261a
|
||||
func DrawMatches(img1 Mat, kp1 []KeyPoint, img2 Mat, kp2 []KeyPoint, matches1to2 []DMatch, outImg *Mat, matchColor color.RGBA, singlePointColor color.RGBA, matchesMask []byte, flags DrawMatchesFlag) {
|
||||
kp1arr := make([]C.struct_KeyPoint, len(kp1))
|
||||
kp2arr := make([]C.struct_KeyPoint, len(kp2))
|
||||
|
||||
for i, kp := range kp1 {
|
||||
kp1arr[i].x = C.double(kp.X)
|
||||
kp1arr[i].y = C.double(kp.Y)
|
||||
kp1arr[i].size = C.double(kp.Size)
|
||||
kp1arr[i].angle = C.double(kp.Angle)
|
||||
kp1arr[i].response = C.double(kp.Response)
|
||||
kp1arr[i].octave = C.int(kp.Octave)
|
||||
kp1arr[i].classID = C.int(kp.ClassID)
|
||||
}
|
||||
|
||||
for i, kp := range kp2 {
|
||||
kp2arr[i].x = C.double(kp.X)
|
||||
kp2arr[i].y = C.double(kp.Y)
|
||||
kp2arr[i].size = C.double(kp.Size)
|
||||
kp2arr[i].angle = C.double(kp.Angle)
|
||||
kp2arr[i].response = C.double(kp.Response)
|
||||
kp2arr[i].octave = C.int(kp.Octave)
|
||||
kp2arr[i].classID = C.int(kp.ClassID)
|
||||
}
|
||||
|
||||
cKeyPoints1 := C.struct_KeyPoints{
|
||||
keypoints: (*C.struct_KeyPoint)(&kp1arr[0]),
|
||||
length: (C.int)(len(kp1)),
|
||||
}
|
||||
|
||||
cKeyPoints2 := C.struct_KeyPoints{
|
||||
keypoints: (*C.struct_KeyPoint)(&kp2arr[0]),
|
||||
length: (C.int)(len(kp2)),
|
||||
}
|
||||
|
||||
dMatchArr := make([]C.struct_DMatch, len(matches1to2))
|
||||
|
||||
for i, dm := range matches1to2 {
|
||||
dMatchArr[i].queryIdx = C.int(dm.QueryIdx)
|
||||
dMatchArr[i].trainIdx = C.int(dm.TrainIdx)
|
||||
dMatchArr[i].imgIdx = C.int(dm.ImgIdx)
|
||||
dMatchArr[i].distance = C.float(dm.Distance)
|
||||
}
|
||||
|
||||
cDMatches := C.struct_DMatches{
|
||||
dmatches: (*C.struct_DMatch)(&dMatchArr[0]),
|
||||
length: (C.int)(len(matches1to2)),
|
||||
}
|
||||
|
||||
scalarMatchColor := C.struct_Scalar{
|
||||
val1: C.double(matchColor.R),
|
||||
val2: C.double(matchColor.G),
|
||||
val3: C.double(matchColor.B),
|
||||
val4: C.double(matchColor.A),
|
||||
}
|
||||
|
||||
scalarPointColor := C.struct_Scalar{
|
||||
val1: C.double(singlePointColor.B),
|
||||
val2: C.double(singlePointColor.G),
|
||||
val3: C.double(singlePointColor.R),
|
||||
val4: C.double(singlePointColor.A),
|
||||
}
|
||||
|
||||
mask := make([]C.char, len(matchesMask))
|
||||
|
||||
cByteArray := C.struct_ByteArray{
|
||||
length: (C.int)(len(matchesMask)),
|
||||
}
|
||||
|
||||
if len(matchesMask) > 0 {
|
||||
cByteArray = C.struct_ByteArray{
|
||||
data: (*C.char)(&mask[0]),
|
||||
length: (C.int)(len(matchesMask)),
|
||||
}
|
||||
}
|
||||
|
||||
C.DrawMatches(img1.p, cKeyPoints1, img2.p, cKeyPoints2, cDMatches, outImg.p, scalarMatchColor, scalarPointColor, cByteArray, C.int(flags))
|
||||
}
|
||||
|
||||
16
vendor/gocv.io/x/gocv/features2d.h
generated
vendored
16
vendor/gocv.io/x/gocv/features2d.h
generated
vendored
@@ -19,6 +19,8 @@ typedef cv::Ptr<cv::MSER>* MSER;
|
||||
typedef cv::Ptr<cv::ORB>* ORB;
|
||||
typedef cv::Ptr<cv::SimpleBlobDetector>* SimpleBlobDetector;
|
||||
typedef cv::Ptr<cv::BFMatcher>* BFMatcher;
|
||||
typedef cv::Ptr<cv::FlannBasedMatcher>* FlannBasedMatcher;
|
||||
typedef cv::Ptr<cv::SIFT>* SIFT;
|
||||
#else
|
||||
typedef void* AKAZE;
|
||||
typedef void* AgastFeatureDetector;
|
||||
@@ -30,6 +32,8 @@ typedef void* MSER;
|
||||
typedef void* ORB;
|
||||
typedef void* SimpleBlobDetector;
|
||||
typedef void* BFMatcher;
|
||||
typedef void* FlannBasedMatcher;
|
||||
typedef void* SIFT;
|
||||
#endif
|
||||
|
||||
AKAZE AKAZE_Create();
|
||||
@@ -65,6 +69,7 @@ void MSER_Close(MSER a);
|
||||
struct KeyPoints MSER_Detect(MSER a, Mat src);
|
||||
|
||||
ORB ORB_Create();
|
||||
ORB ORB_CreateWithParams(int nfeatures, float scaleFactor, int nlevels, int edgeThreshold, int firstLevel, int WTA_K, int scoreType, int patchSize, int fastThreshold);
|
||||
void ORB_Close(ORB o);
|
||||
struct KeyPoints ORB_Detect(ORB o, Mat src);
|
||||
struct KeyPoints ORB_DetectAndCompute(ORB o, Mat src, Mat mask, Mat desc);
|
||||
@@ -80,8 +85,19 @@ BFMatcher BFMatcher_CreateWithParams(int normType, bool crossCheck);
|
||||
void BFMatcher_Close(BFMatcher b);
|
||||
struct MultiDMatches BFMatcher_KnnMatch(BFMatcher b, Mat query, Mat train, int k);
|
||||
|
||||
FlannBasedMatcher FlannBasedMatcher_Create();
|
||||
void FlannBasedMatcher_Close(FlannBasedMatcher f);
|
||||
struct MultiDMatches FlannBasedMatcher_KnnMatch(FlannBasedMatcher f, Mat query, Mat train, int k);
|
||||
|
||||
void DrawKeyPoints(Mat src, struct KeyPoints kp, Mat dst, const Scalar s, int flags);
|
||||
|
||||
SIFT SIFT_Create();
|
||||
void SIFT_Close(SIFT f);
|
||||
struct KeyPoints SIFT_Detect(SIFT f, Mat src);
|
||||
struct KeyPoints SIFT_DetectAndCompute(SIFT f, Mat src, Mat mask, Mat desc);
|
||||
|
||||
void DrawMatches(Mat img1, struct KeyPoints kp1, Mat img2, struct KeyPoints kp2, struct DMatches matches1to2, Mat outImg, const Scalar matchesColor, const Scalar pointColor, struct ByteArray matchesMask, int flags);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
3
vendor/gocv.io/x/gocv/go.mod
generated
vendored
3
vendor/gocv.io/x/gocv/go.mod
generated
vendored
@@ -1,3 +0,0 @@
|
||||
module gocv.io/x/gocv
|
||||
|
||||
go 1.13
|
||||
4
vendor/gocv.io/x/gocv/highgui.cpp
generated
vendored
4
vendor/gocv.io/x/gocv/highgui.cpp
generated
vendored
@@ -62,6 +62,10 @@ void Trackbar_Create(const char* winname, const char* trackname, int max) {
|
||||
cv::createTrackbar(trackname, winname, NULL, max);
|
||||
}
|
||||
|
||||
void Trackbar_CreateWithValue(const char* winname, const char* trackname, int* value, int max) {
|
||||
cv::createTrackbar(trackname, winname, value, max);
|
||||
}
|
||||
|
||||
int Trackbar_GetPos(const char* winname, const char* trackname) {
|
||||
return cv::getTrackbarPos(trackname, winname);
|
||||
}
|
||||
|
||||
64
vendor/gocv.io/x/gocv/highgui.go
generated
vendored
64
vendor/gocv.io/x/gocv/highgui.go
generated
vendored
@@ -67,19 +67,19 @@ type WindowFlag float32
|
||||
|
||||
const (
|
||||
// WindowNormal indicates a normal window.
|
||||
WindowNormal WindowFlag = 0
|
||||
|
||||
// WindowFullscreen indicates a full-screen window.
|
||||
WindowFullscreen = 1
|
||||
WindowNormal WindowFlag = 0x00000000
|
||||
|
||||
// WindowAutosize indicates a window sized based on the contents.
|
||||
WindowAutosize = 1
|
||||
WindowAutosize WindowFlag = 0x00000001
|
||||
|
||||
// WindowFullscreen indicates a full-screen window.
|
||||
WindowFullscreen WindowFlag = 1
|
||||
|
||||
// WindowFreeRatio indicates allow the user to resize without maintaining aspect ratio.
|
||||
WindowFreeRatio = 0x00000100
|
||||
WindowFreeRatio WindowFlag = 0x00000100
|
||||
|
||||
// WindowKeepRatio indicates always maintain an aspect ratio that matches the contents.
|
||||
WindowKeepRatio = 0
|
||||
WindowKeepRatio WindowFlag = 0x00000000
|
||||
)
|
||||
|
||||
// WindowPropertyFlag flags for SetWindowProperty / GetWindowProperty.
|
||||
@@ -92,17 +92,17 @@ const (
|
||||
|
||||
// WindowPropertyAutosize is autosize property
|
||||
// (can be WINDOW_NORMAL or WINDOW_AUTOSIZE).
|
||||
WindowPropertyAutosize = 1
|
||||
WindowPropertyAutosize WindowPropertyFlag = 1
|
||||
|
||||
// WindowPropertyAspectRatio window's aspect ration
|
||||
// (can be set to WINDOW_FREERATIO or WINDOW_KEEPRATIO).
|
||||
WindowPropertyAspectRatio = 2
|
||||
WindowPropertyAspectRatio WindowPropertyFlag = 2
|
||||
|
||||
// WindowPropertyOpenGL opengl support.
|
||||
WindowPropertyOpenGL = 3
|
||||
WindowPropertyOpenGL WindowPropertyFlag = 3
|
||||
|
||||
// WindowPropertyVisible or not.
|
||||
WindowPropertyVisible = 4
|
||||
WindowPropertyVisible WindowPropertyFlag = 4
|
||||
)
|
||||
|
||||
// GetWindowProperty returns properties of a window.
|
||||
@@ -204,8 +204,8 @@ func (w *Window) ResizeWindow(width, height int) {
|
||||
// For further details, please see:
|
||||
// https://docs.opencv.org/master/d7/dfc/group__highgui.html#ga8daf4730d3adf7035b6de9be4c469af5
|
||||
//
|
||||
func SelectROI(name string, img Mat) image.Rectangle {
|
||||
cName := C.CString(name)
|
||||
func (w *Window) SelectROI(img Mat) image.Rectangle {
|
||||
cName := C.CString(w.name)
|
||||
defer C.free(unsafe.Pointer(cName))
|
||||
|
||||
r := C.Window_SelectROI(cName, img.p)
|
||||
@@ -223,6 +223,27 @@ func SelectROI(name string, img Mat) image.Rectangle {
|
||||
// For further details, please see:
|
||||
// https://docs.opencv.org/master/d7/dfc/group__highgui.html#ga0f11fad74a6432b8055fb21621a0f893
|
||||
//
|
||||
func (w *Window) SelectROIs(img Mat) []image.Rectangle {
|
||||
cName := C.CString(w.name)
|
||||
defer C.free(unsafe.Pointer(cName))
|
||||
|
||||
ret := C.Window_SelectROIs(cName, img.p)
|
||||
defer C.Rects_Close(ret)
|
||||
|
||||
return toRectangles(ret)
|
||||
}
|
||||
|
||||
// Deprecated: use Window.SelectROI instead
|
||||
func SelectROI(name string, img Mat) image.Rectangle {
|
||||
cName := C.CString(name)
|
||||
defer C.free(unsafe.Pointer(cName))
|
||||
|
||||
r := C.Window_SelectROI(cName, img.p)
|
||||
rect := image.Rect(int(r.x), int(r.y), int(r.x+r.width), int(r.y+r.height))
|
||||
return rect
|
||||
}
|
||||
|
||||
// Deprecated: use Window.SelectROIs instead
|
||||
func SelectROIs(name string, img Mat) []image.Rectangle {
|
||||
cName := C.CString(name)
|
||||
defer C.free(unsafe.Pointer(cName))
|
||||
@@ -262,6 +283,23 @@ func (w *Window) CreateTrackbar(name string, max int) *Trackbar {
|
||||
return &Trackbar{name: name, parent: w}
|
||||
}
|
||||
|
||||
// CreateTrackbarWithValue works like CreateTrackbar but also assigns a
|
||||
// variable value to be a position synchronized with the trackbar.
|
||||
//
|
||||
// For further details, please see:
|
||||
// https://docs.opencv.org/master/d7/dfc/group__highgui.html#gaf78d2155d30b728fc413803745b67a9b
|
||||
//
|
||||
func (w *Window) CreateTrackbarWithValue(name string, value *int, max int) *Trackbar {
|
||||
cName := C.CString(w.name)
|
||||
defer C.free(unsafe.Pointer(cName))
|
||||
|
||||
tName := C.CString(name)
|
||||
defer C.free(unsafe.Pointer(tName))
|
||||
|
||||
C.Trackbar_CreateWithValue(cName, tName, (*C.int)(unsafe.Pointer(value)), C.int(max))
|
||||
return &Trackbar{name: name, parent: w}
|
||||
}
|
||||
|
||||
// GetPos returns the trackbar position.
|
||||
//
|
||||
// For further details, please see:
|
||||
|
||||
1
vendor/gocv.io/x/gocv/highgui_gocv.h
generated
vendored
1
vendor/gocv.io/x/gocv/highgui_gocv.h
generated
vendored
@@ -23,6 +23,7 @@ struct Rects Window_SelectROIs(const char* winname, Mat img);
|
||||
|
||||
// Trackbar
|
||||
void Trackbar_Create(const char* winname, const char* trackname, int max);
|
||||
void Trackbar_CreateWithValue(const char* winname, const char* trackname, int* value, int max);
|
||||
int Trackbar_GetPos(const char* winname, const char* trackname);
|
||||
void Trackbar_SetPos(const char* winname, const char* trackname, int pos);
|
||||
void Trackbar_SetMin(const char* winname, const char* trackname, int pos);
|
||||
|
||||
14
vendor/gocv.io/x/gocv/imgcodecs.cpp
generated
vendored
14
vendor/gocv.io/x/gocv/imgcodecs.cpp
generated
vendored
@@ -21,22 +21,20 @@ bool Image_IMWrite_WithParams(const char* filename, Mat img, IntVector params) {
|
||||
return cv::imwrite(filename, *img, compression_params);
|
||||
}
|
||||
|
||||
struct ByteArray Image_IMEncode(const char* fileExt, Mat img) {
|
||||
std::vector<uchar> data;
|
||||
cv::imencode(fileExt, *img, data);
|
||||
return toByteArray(reinterpret_cast<const char*>(&data[0]), data.size());
|
||||
void Image_IMEncode(const char* fileExt, Mat img, void* vector) {
|
||||
auto vectorPtr = reinterpret_cast<std::vector<uchar> *>(vector);
|
||||
cv::imencode(fileExt, *img, *vectorPtr);
|
||||
}
|
||||
|
||||
struct ByteArray Image_IMEncode_WithParams(const char* fileExt, Mat img, IntVector params) {
|
||||
std::vector<uchar> data;
|
||||
void Image_IMEncode_WithParams(const char* fileExt, Mat img, IntVector params, void* vector) {
|
||||
auto vectorPtr = reinterpret_cast<std::vector<uchar> *>(vector);
|
||||
std::vector<int> compression_params;
|
||||
|
||||
for (int i = 0, *v = params.val; i < params.length; ++v, ++i) {
|
||||
compression_params.push_back(*v);
|
||||
}
|
||||
|
||||
cv::imencode(fileExt, *img, data, compression_params);
|
||||
return toByteArray(reinterpret_cast<const char*>(&data[0]), data.size());
|
||||
cv::imencode(fileExt, *img, *vectorPtr, compression_params);
|
||||
}
|
||||
|
||||
Mat Image_IMDecode(ByteArray buf, int flags) {
|
||||
|
||||
44
vendor/gocv.io/x/gocv/imgcodecs.go
generated
vendored
44
vendor/gocv.io/x/gocv/imgcodecs.go
generated
vendored
@@ -19,48 +19,52 @@ const (
|
||||
|
||||
// IMReadGrayScale always convert image to the single channel
|
||||
// grayscale image.
|
||||
IMReadGrayScale = 0
|
||||
IMReadGrayScale IMReadFlag = 0
|
||||
|
||||
// IMReadColor always converts image to the 3 channel BGR color image.
|
||||
IMReadColor = 1
|
||||
IMReadColor IMReadFlag = 1
|
||||
|
||||
// IMReadAnyDepth returns 16-bit/32-bit image when the input has the corresponding
|
||||
// depth, otherwise convert it to 8-bit.
|
||||
IMReadAnyDepth = 2
|
||||
IMReadAnyDepth IMReadFlag = 2
|
||||
|
||||
// IMReadAnyColor the image is read in any possible color format.
|
||||
IMReadAnyColor = 4
|
||||
IMReadAnyColor IMReadFlag = 4
|
||||
|
||||
// IMReadLoadGDAL uses the gdal driver for loading the image.
|
||||
IMReadLoadGDAL = 8
|
||||
IMReadLoadGDAL IMReadFlag = 8
|
||||
|
||||
// IMReadReducedGrayscale2 always converts image to the single channel grayscale image
|
||||
// and the image size reduced 1/2.
|
||||
IMReadReducedGrayscale2 = 16
|
||||
IMReadReducedGrayscale2 IMReadFlag = 16
|
||||
|
||||
// IMReadReducedColor2 always converts image to the 3 channel BGR color image and the
|
||||
// image size reduced 1/2.
|
||||
IMReadReducedColor2 = 17
|
||||
IMReadReducedColor2 IMReadFlag = 17
|
||||
|
||||
// IMReadReducedGrayscale4 always converts image to the single channel grayscale image and
|
||||
// the image size reduced 1/4.
|
||||
IMReadReducedGrayscale4 = 32
|
||||
IMReadReducedGrayscale4 IMReadFlag = 32
|
||||
|
||||
// IMReadReducedColor4 always converts image to the 3 channel BGR color image and
|
||||
// the image size reduced 1/4.
|
||||
IMReadReducedColor4 = 33
|
||||
IMReadReducedColor4 IMReadFlag = 33
|
||||
|
||||
// IMReadReducedGrayscale8 always convert image to the single channel grayscale image and
|
||||
// the image size reduced 1/8.
|
||||
IMReadReducedGrayscale8 = 64
|
||||
IMReadReducedGrayscale8 IMReadFlag = 64
|
||||
|
||||
// IMReadReducedColor8 always convert image to the 3 channel BGR color image and the
|
||||
// image size reduced 1/8.
|
||||
IMReadReducedColor8 = 65
|
||||
IMReadReducedColor8 IMReadFlag = 65
|
||||
|
||||
// IMReadIgnoreOrientation do not rotate the image according to EXIF's orientation flag.
|
||||
IMReadIgnoreOrientation = 128
|
||||
IMReadIgnoreOrientation IMReadFlag = 128
|
||||
)
|
||||
|
||||
// TODO: Define IMWriteFlag type?
|
||||
|
||||
const (
|
||||
//IMWriteJpegQuality is the quality from 0 to 100 for JPEG (the higher is the better). Default value is 95.
|
||||
IMWriteJpegQuality = 1
|
||||
|
||||
@@ -193,13 +197,13 @@ const (
|
||||
// For further details, please see:
|
||||
// http://docs.opencv.org/master/d4/da8/group__imgcodecs.html#ga461f9ac09887e47797a54567df3b8b63
|
||||
//
|
||||
func IMEncode(fileExt FileExt, img Mat) (buf []byte, err error) {
|
||||
func IMEncode(fileExt FileExt, img Mat) (buf *NativeByteBuffer, err error) {
|
||||
cfileExt := C.CString(string(fileExt))
|
||||
defer C.free(unsafe.Pointer(cfileExt))
|
||||
|
||||
b := C.Image_IMEncode(cfileExt, img.Ptr())
|
||||
defer C.ByteArray_Release(b)
|
||||
return toGoBytes(b), nil
|
||||
buffer := newNativeByteBuffer()
|
||||
C.Image_IMEncode(cfileExt, img.Ptr(), buffer.nativePointer())
|
||||
return buffer, nil
|
||||
}
|
||||
|
||||
// IMEncodeWithParams encodes an image Mat into a memory buffer.
|
||||
@@ -212,7 +216,7 @@ func IMEncode(fileExt FileExt, img Mat) (buf []byte, err error) {
|
||||
// For further details, please see:
|
||||
// http://docs.opencv.org/master/d4/da8/group__imgcodecs.html#ga461f9ac09887e47797a54567df3b8b63
|
||||
//
|
||||
func IMEncodeWithParams(fileExt FileExt, img Mat, params []int) (buf []byte, err error) {
|
||||
func IMEncodeWithParams(fileExt FileExt, img Mat, params []int) (buf *NativeByteBuffer, err error) {
|
||||
cfileExt := C.CString(string(fileExt))
|
||||
defer C.free(unsafe.Pointer(cfileExt))
|
||||
|
||||
@@ -226,9 +230,9 @@ func IMEncodeWithParams(fileExt FileExt, img Mat, params []int) (buf []byte, err
|
||||
paramsVector.val = (*C.int)(&cparams[0])
|
||||
paramsVector.length = (C.int)(len(cparams))
|
||||
|
||||
b := C.Image_IMEncode_WithParams(cfileExt, img.Ptr(), paramsVector)
|
||||
defer C.ByteArray_Release(b)
|
||||
return toGoBytes(b), nil
|
||||
b := newNativeByteBuffer()
|
||||
C.Image_IMEncode_WithParams(cfileExt, img.Ptr(), paramsVector, b.nativePointer())
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// IMDecode reads an image from a buffer in memory.
|
||||
|
||||
5
vendor/gocv.io/x/gocv/imgcodecs.h
generated
vendored
5
vendor/gocv.io/x/gocv/imgcodecs.h
generated
vendored
@@ -13,8 +13,9 @@ extern "C" {
|
||||
Mat Image_IMRead(const char* filename, int flags);
|
||||
bool Image_IMWrite(const char* filename, Mat img);
|
||||
bool Image_IMWrite_WithParams(const char* filename, Mat img, IntVector params);
|
||||
struct ByteArray Image_IMEncode(const char* fileExt, Mat img);
|
||||
struct ByteArray Image_IMEncode_WithParams(const char* fileExt, Mat img, IntVector params);
|
||||
void Image_IMEncode(const char* fileExt, Mat img, void* vector);
|
||||
|
||||
void Image_IMEncode_WithParams(const char* fileExt, Mat img, IntVector params, void* vector);
|
||||
Mat Image_IMDecode(ByteArray buf, int flags);
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
||||
324
vendor/gocv.io/x/gocv/imgproc.cpp
generated
vendored
324
vendor/gocv.io/x/gocv/imgproc.cpp
generated
vendored
@@ -1,33 +1,14 @@
|
||||
#include "imgproc.h"
|
||||
|
||||
double ArcLength(Contour curve, bool is_closed) {
|
||||
std::vector<cv::Point> pts;
|
||||
|
||||
for (size_t i = 0; i < curve.length; i++) {
|
||||
pts.push_back(cv::Point(curve.points[i].x, curve.points[i].y));
|
||||
}
|
||||
|
||||
return cv::arcLength(pts, is_closed);
|
||||
double ArcLength(PointVector curve, bool is_closed) {
|
||||
return cv::arcLength(*curve, is_closed);
|
||||
}
|
||||
|
||||
Contour ApproxPolyDP(Contour curve, double epsilon, bool closed) {
|
||||
std::vector<cv::Point> curvePts;
|
||||
PointVector ApproxPolyDP(PointVector curve, double epsilon, bool closed) {
|
||||
PointVector approxCurvePts = new std::vector<cv::Point>;
|
||||
cv::approxPolyDP(*curve, *approxCurvePts, epsilon, closed);
|
||||
|
||||
for (size_t i = 0; i < curve.length; i++) {
|
||||
curvePts.push_back(cv::Point(curve.points[i].x, curve.points[i].y));
|
||||
}
|
||||
|
||||
std::vector<cv::Point> approxCurvePts;
|
||||
cv::approxPolyDP(curvePts, approxCurvePts, epsilon, closed);
|
||||
|
||||
int length = approxCurvePts.size();
|
||||
Point* points = new Point[length];
|
||||
|
||||
for (size_t i = 0; i < length; i++) {
|
||||
points[i] = (Point){approxCurvePts[i].x, approxCurvePts[i].y};
|
||||
}
|
||||
|
||||
return (Contour){points, length};
|
||||
return approxCurvePts;
|
||||
}
|
||||
|
||||
void CvtColor(Mat src, Mat dst, int code) {
|
||||
@@ -95,46 +76,34 @@ double CompareHist(Mat hist1, Mat hist2, int method) {
|
||||
return cv::compareHist(*hist1, *hist2, method);
|
||||
}
|
||||
|
||||
struct RotatedRect FitEllipse(Points points)
|
||||
struct RotatedRect FitEllipse(PointVector pts)
|
||||
{
|
||||
Point *rpts = new Point[points.length];
|
||||
std::vector<cv::Point> pts;
|
||||
cv::RotatedRect bRect = cv::fitEllipse(*pts);
|
||||
|
||||
for (size_t i = 0; i < points.length; i++)
|
||||
{
|
||||
pts.push_back(cv::Point(points.points[i].x, points.points[i].y));
|
||||
Point pt = {points.points[i].x, points.points[i].y};
|
||||
rpts[i] = pt;
|
||||
}
|
||||
Rect r = {bRect.boundingRect().x, bRect.boundingRect().y, bRect.boundingRect().width, bRect.boundingRect().height};
|
||||
Point centrpt = {int(lroundf(bRect.center.x)), int(lroundf(bRect.center.y))};
|
||||
Size szsz = {int(lroundf(bRect.size.width)), int(lroundf(bRect.size.height))};
|
||||
|
||||
cv::RotatedRect bRect = cv::fitEllipse(pts);
|
||||
|
||||
Rect r = {bRect.boundingRect().x, bRect.boundingRect().y, bRect.boundingRect().width, bRect.boundingRect().height};
|
||||
Point centrpt = {int(lroundf(bRect.center.x)), int(lroundf(bRect.center.y))};
|
||||
Size szsz = {int(lroundf(bRect.size.width)), int(lroundf(bRect.size.height))};
|
||||
|
||||
RotatedRect rotRect = {(Contour){rpts, 4}, r, centrpt, szsz, bRect.angle};
|
||||
return rotRect;
|
||||
}
|
||||
|
||||
void ConvexHull(Contour points, Mat hull, bool clockwise, bool returnPoints) {
|
||||
std::vector<cv::Point> pts;
|
||||
|
||||
for (size_t i = 0; i < points.length; i++) {
|
||||
pts.push_back(cv::Point(points.points[i].x, points.points[i].y));
|
||||
cv::Point2f* pts4 = new cv::Point2f[4];
|
||||
bRect.points(pts4);
|
||||
Point* rpts = new Point[4];
|
||||
for (size_t j = 0; j < 4; j++) {
|
||||
Point pt = {int(lroundf(pts4[j].x)), int(lroundf(pts4[j].y))};
|
||||
rpts[j] = pt;
|
||||
}
|
||||
|
||||
cv::convexHull(pts, *hull, clockwise, returnPoints);
|
||||
delete[] pts4;
|
||||
|
||||
RotatedRect rotRect = {Points{rpts, 4}, r, centrpt, szsz, bRect.angle};
|
||||
return rotRect;
|
||||
}
|
||||
|
||||
void ConvexityDefects(Contour points, Mat hull, Mat result) {
|
||||
std::vector<cv::Point> pts;
|
||||
void ConvexHull(PointVector points, Mat hull, bool clockwise, bool returnPoints) {
|
||||
cv::convexHull(*points, *hull, clockwise, returnPoints);
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < points.length; i++) {
|
||||
pts.push_back(cv::Point(points.points[i].x, points.points[i].y));
|
||||
}
|
||||
|
||||
cv::convexityDefects(pts, *hull, *result);
|
||||
void ConvexityDefects(PointVector points, Mat hull, Mat result) {
|
||||
cv::convexityDefects(*points, *hull, *result);
|
||||
}
|
||||
|
||||
void BilateralFilter(Mat src, Mat dst, int d, double sc, double ss) {
|
||||
@@ -160,6 +129,13 @@ void Dilate(Mat src, Mat dst, Mat kernel) {
|
||||
cv::dilate(*src, *dst, *kernel);
|
||||
}
|
||||
|
||||
void DilateWithParams(Mat src, Mat dst, Mat kernel, Point anchor, int iterations, int borderType, Scalar borderValue) {
|
||||
cv::Point pt1(anchor.x, anchor.y);
|
||||
cv::Scalar c = cv::Scalar(borderValue.val1, borderValue.val2, borderValue.val3, borderValue.val4);
|
||||
|
||||
cv::dilate(*src, *dst, *kernel, pt1, iterations, borderType, c);
|
||||
}
|
||||
|
||||
void DistanceTransform(Mat src, Mat dst, Mat labels, int distanceType, int maskSize, int labelType) {
|
||||
cv::distanceTransform(*src, *dst, *labels, distanceType, maskSize, labelType);
|
||||
}
|
||||
@@ -168,6 +144,12 @@ void Erode(Mat src, Mat dst, Mat kernel) {
|
||||
cv::erode(*src, *dst, *kernel);
|
||||
}
|
||||
|
||||
void ErodeWithParams(Mat src, Mat dst, Mat kernel, Point anchor, int iterations, int borderType) {
|
||||
cv::Point pt1(anchor.x, anchor.y);
|
||||
|
||||
cv::erode(*src, *dst, *kernel, pt1, iterations, borderType, cv::morphologyDefaultBorderValue());
|
||||
}
|
||||
|
||||
void MatchTemplate(Mat image, Mat templ, Mat result, int method, Mat mask) {
|
||||
cv::matchTemplate(*image, *templ, *result, method, *mask);
|
||||
}
|
||||
@@ -191,14 +173,8 @@ void PyrUp(Mat src, Mat dst, Size size, int borderType) {
|
||||
cv::pyrUp(*src, *dst, cvSize, borderType);
|
||||
}
|
||||
|
||||
struct Rect BoundingRect(Contour con) {
|
||||
std::vector<cv::Point> pts;
|
||||
|
||||
for (size_t i = 0; i < con.length; i++) {
|
||||
pts.push_back(cv::Point(con.points[i].x, con.points[i].y));
|
||||
}
|
||||
|
||||
cv::Rect bRect = cv::boundingRect(pts);
|
||||
struct Rect BoundingRect(PointVector pts) {
|
||||
cv::Rect bRect = cv::boundingRect(*pts);
|
||||
Rect r = {bRect.x, bRect.y, bRect.width, bRect.height};
|
||||
return r;
|
||||
}
|
||||
@@ -207,27 +183,15 @@ void BoxPoints(RotatedRect rect, Mat boxPts){
|
||||
cv::Point2f centerPt(rect.center.x , rect.center.y);
|
||||
cv::Size2f rSize(rect.size.width, rect.size.height);
|
||||
cv::RotatedRect rotatedRectangle(centerPt, rSize, rect.angle);
|
||||
cv::boxPoints(rotatedRectangle, *boxPts);
|
||||
cv::boxPoints(rotatedRectangle, *boxPts);
|
||||
}
|
||||
|
||||
double ContourArea(Contour con) {
|
||||
std::vector<cv::Point> pts;
|
||||
|
||||
for (size_t i = 0; i < con.length; i++) {
|
||||
pts.push_back(cv::Point(con.points[i].x, con.points[i].y));
|
||||
}
|
||||
|
||||
return cv::contourArea(pts);
|
||||
double ContourArea(PointVector pts) {
|
||||
return cv::contourArea(*pts);
|
||||
}
|
||||
|
||||
struct RotatedRect MinAreaRect(Points points){
|
||||
std::vector<cv::Point> pts;
|
||||
|
||||
for (size_t i = 0; i < points.length; i++) {
|
||||
pts.push_back(cv::Point(points.points[i].x, points.points[i].y));
|
||||
}
|
||||
|
||||
cv::RotatedRect cvrect = cv::minAreaRect(pts);
|
||||
struct RotatedRect MinAreaRect(PointVector pts){
|
||||
cv::RotatedRect cvrect = cv::minAreaRect(*pts);
|
||||
|
||||
Point* rpts = new Point[4];
|
||||
cv::Point2f* pts4 = new cv::Point2f[4];
|
||||
@@ -249,38 +213,24 @@ struct RotatedRect MinAreaRect(Points points){
|
||||
return retrect;
|
||||
}
|
||||
|
||||
void MinEnclosingCircle(Points points, Point2f* center, float* radius){
|
||||
std::vector<cv::Point> pts;
|
||||
|
||||
for (size_t i = 0; i < points.length; i++) {
|
||||
pts.push_back(cv::Point(points.points[i].x, points.points[i].y));
|
||||
}
|
||||
|
||||
void MinEnclosingCircle(PointVector pts, Point2f* center, float* radius){
|
||||
cv::Point2f center2f;
|
||||
cv::minEnclosingCircle(pts, center2f, *radius);
|
||||
cv::minEnclosingCircle(*pts, center2f, *radius);
|
||||
center->x = center2f.x;
|
||||
center->y = center2f.y;
|
||||
}
|
||||
|
||||
struct Contours FindContours(Mat src, int mode, int method) {
|
||||
std::vector<std::vector<cv::Point> > contours;
|
||||
cv::findContours(*src, contours, mode, method);
|
||||
PointsVector FindContours(Mat src, Mat hierarchy, int mode, int method) {
|
||||
PointsVector contours = new std::vector<std::vector<cv::Point> >;
|
||||
cv::findContours(*src, *contours, *hierarchy, mode, method);
|
||||
|
||||
Contour* points = new Contour[contours.size()];
|
||||
return contours;
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < contours.size(); i++) {
|
||||
Point* pts = new Point[contours[i].size()];
|
||||
double PointPolygonTest(PointVector pts, Point pt, bool measureDist) {
|
||||
cv::Point2f pt1(pt.x, pt.y);
|
||||
|
||||
for (size_t j = 0; j < contours[i].size(); j++) {
|
||||
Point pt = {contours[i][j].x, contours[i][j].y};
|
||||
pts[j] = pt;
|
||||
}
|
||||
|
||||
points[i] = (Contour){pts, (int)contours[i].size()};
|
||||
}
|
||||
|
||||
Contours cons = {points, (int)contours.size()};
|
||||
return cons;
|
||||
return cv::pointPolygonTest(*pts, pt1, measureDist);
|
||||
}
|
||||
|
||||
int ConnectedComponents(Mat src, Mat labels, int connectivity, int ltype, int ccltype){
|
||||
@@ -317,6 +267,10 @@ void GaussianBlur(Mat src, Mat dst, Size ps, double sX, double sY, int bt) {
|
||||
cv::GaussianBlur(*src, *dst, sz, sX, sY, bt);
|
||||
}
|
||||
|
||||
Mat GetGaussianKernel(int ksize, double sigma, int ktype){
|
||||
return new cv::Mat(cv::getGaussianKernel(ksize, sigma, ktype));
|
||||
}
|
||||
|
||||
void Laplacian(Mat src, Mat dst, int dDepth, int kSize, double scale, double delta,
|
||||
int borderType) {
|
||||
cv::Laplacian(*src, *dst, dDepth, kSize, scale, delta, borderType);
|
||||
@@ -382,8 +336,8 @@ void Integral(Mat src, Mat sum, Mat sqsum, Mat tilted) {
|
||||
cv::integral(*src, *sum, *sqsum, *tilted);
|
||||
}
|
||||
|
||||
void Threshold(Mat src, Mat dst, double thresh, double maxvalue, int typ) {
|
||||
cv::threshold(*src, *dst, thresh, maxvalue, typ);
|
||||
double Threshold(Mat src, Mat dst, double thresh, double maxvalue, int typ) {
|
||||
return cv::threshold(*src, *dst, thresh, maxvalue, typ);
|
||||
}
|
||||
|
||||
void AdaptiveThreshold(Mat src, Mat dst, double maxValue, int adaptiveMethod, int thresholdType,
|
||||
@@ -414,6 +368,13 @@ void Circle(Mat img, Point center, int radius, Scalar color, int thickness) {
|
||||
cv::circle(*img, p1, radius, c, thickness);
|
||||
}
|
||||
|
||||
void CircleWithParams(Mat img, Point center, int radius, Scalar color, int thickness, int lineType, int shift) {
|
||||
cv::Point p1(center.x, center.y);
|
||||
cv::Scalar c = cv::Scalar(color.val1, color.val2, color.val3, color.val4);
|
||||
|
||||
cv::circle(*img, p1, radius, c, thickness, lineType, shift);
|
||||
}
|
||||
|
||||
void Ellipse(Mat img, Point center, Point axes, double angle, double
|
||||
startAngle, double endAngle, Scalar color, int thickness) {
|
||||
cv::Point p1(center.x, center.y);
|
||||
@@ -423,6 +384,15 @@ void Ellipse(Mat img, Point center, Point axes, double angle, double
|
||||
cv::ellipse(*img, p1, p2, angle, startAngle, endAngle, c, thickness);
|
||||
}
|
||||
|
||||
void EllipseWithParams(Mat img, Point center, Point axes, double angle, double
|
||||
startAngle, double endAngle, Scalar color, int thickness, int lineType, int shift) {
|
||||
cv::Point p1(center.x, center.y);
|
||||
cv::Point p2(axes.x, axes.y);
|
||||
cv::Scalar c = cv::Scalar(color.val1, color.val2, color.val3, color.val4);
|
||||
|
||||
cv::ellipse(*img, p1, p2, angle, startAngle, endAngle, c, thickness, lineType, shift);
|
||||
}
|
||||
|
||||
void Line(Mat img, Point pt1, Point pt2, Scalar color, int thickness) {
|
||||
cv::Point p1(pt1.x, pt1.y);
|
||||
cv::Point p2(pt2.x, pt2.y);
|
||||
@@ -443,28 +413,43 @@ void Rectangle(Mat img, Rect r, Scalar color, int thickness) {
|
||||
);
|
||||
}
|
||||
|
||||
void FillPoly(Mat img, Contours points, Scalar color) {
|
||||
std::vector<std::vector<cv::Point> > pts;
|
||||
|
||||
for (size_t i = 0; i < points.length; i++) {
|
||||
Contour contour = points.contours[i];
|
||||
|
||||
std::vector<cv::Point> cntr;
|
||||
|
||||
for (size_t i = 0; i < contour.length; i++) {
|
||||
cntr.push_back(cv::Point(contour.points[i].x, contour.points[i].y));
|
||||
}
|
||||
|
||||
pts.push_back(cntr);
|
||||
}
|
||||
void RectangleWithParams(Mat img, Rect r, Scalar color, int thickness, int lineType, int shift) {
|
||||
cv::Scalar c = cv::Scalar(color.val1, color.val2, color.val3, color.val4);
|
||||
cv::rectangle(
|
||||
*img,
|
||||
cv::Point(r.x, r.y),
|
||||
cv::Point(r.x + r.width, r.y + r.height),
|
||||
c,
|
||||
thickness,
|
||||
lineType,
|
||||
shift
|
||||
);
|
||||
}
|
||||
|
||||
void FillPoly(Mat img, PointsVector pts, Scalar color) {
|
||||
cv::Scalar c = cv::Scalar(color.val1, color.val2, color.val3, color.val4);
|
||||
|
||||
cv::fillPoly(*img, pts, c);
|
||||
cv::fillPoly(*img, *pts, c);
|
||||
}
|
||||
|
||||
void FillPolyWithParams(Mat img, PointsVector pts, Scalar color, int lineType, int shift, Point offset) {
|
||||
cv::Scalar c = cv::Scalar(color.val1, color.val2, color.val3, color.val4);
|
||||
|
||||
cv::fillPoly(*img, *pts, c, lineType, shift, cv::Point(offset.x, offset.y));
|
||||
}
|
||||
|
||||
void Polylines(Mat img, PointsVector pts, bool isClosed, Scalar color,int thickness) {
|
||||
cv::Scalar c = cv::Scalar(color.val1, color.val2, color.val3, color.val4);
|
||||
|
||||
cv::polylines(*img, *pts, isClosed, c, thickness);
|
||||
}
|
||||
|
||||
struct Size GetTextSize(const char* text, int fontFace, double fontScale, int thickness) {
|
||||
cv::Size sz = cv::getTextSize(text, fontFace, fontScale, thickness, NULL);
|
||||
return GetTextSizeWithBaseline(text, fontFace, fontScale, thickness, NULL);
|
||||
}
|
||||
|
||||
struct Size GetTextSizeWithBaseline(const char* text, int fontFace, double fontScale, int thickness, int* baesline) {
|
||||
cv::Size sz = cv::getTextSize(text, fontFace, fontScale, thickness, baesline);
|
||||
Size size = {sz.width, sz.height};
|
||||
return size;
|
||||
}
|
||||
@@ -528,39 +513,41 @@ void ApplyCustomColorMap(Mat src, Mat dst, Mat colormap) {
|
||||
cv::applyColorMap(*src, *dst, *colormap);
|
||||
}
|
||||
|
||||
Mat GetPerspectiveTransform(Contour src, Contour dst) {
|
||||
Mat GetPerspectiveTransform(PointVector src, PointVector dst) {
|
||||
std::vector<cv::Point2f> src_pts;
|
||||
|
||||
for (size_t i = 0; i < src.length; i++) {
|
||||
src_pts.push_back(cv::Point2f(src.points[i].x, src.points[i].y));
|
||||
}
|
||||
copyPointVectorToPoint2fVector(src, &src_pts);
|
||||
|
||||
std::vector<cv::Point2f> dst_pts;
|
||||
|
||||
for (size_t i = 0; i < dst.length; i++) {
|
||||
dst_pts.push_back(cv::Point2f(dst.points[i].x, dst.points[i].y));
|
||||
}
|
||||
copyPointVectorToPoint2fVector(dst, &dst_pts);
|
||||
|
||||
return new cv::Mat(cv::getPerspectiveTransform(src_pts, dst_pts));
|
||||
}
|
||||
|
||||
void DrawContours(Mat src, Contours contours, int contourIdx, Scalar color, int thickness) {
|
||||
std::vector<std::vector<cv::Point> > cntrs;
|
||||
Mat GetPerspectiveTransform2f(Point2fVector src, Point2fVector dst) {
|
||||
return new cv::Mat(cv::getPerspectiveTransform(*src, *dst));
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < contours.length; i++) {
|
||||
Contour contour = contours.contours[i];
|
||||
Mat GetAffineTransform(PointVector src, PointVector dst) {
|
||||
std::vector<cv::Point2f> src_pts;
|
||||
copyPointVectorToPoint2fVector(src, &src_pts);
|
||||
|
||||
std::vector<cv::Point> cntr;
|
||||
std::vector<cv::Point2f> dst_pts;
|
||||
copyPointVectorToPoint2fVector(dst, &dst_pts);
|
||||
|
||||
for (size_t i = 0; i < contour.length; i++) {
|
||||
cntr.push_back(cv::Point(contour.points[i].x, contour.points[i].y));
|
||||
}
|
||||
return new cv::Mat(cv::getAffineTransform(src_pts, dst_pts));
|
||||
}
|
||||
|
||||
cntrs.push_back(cntr);
|
||||
}
|
||||
Mat GetAffineTransform2f(Point2fVector src, Point2fVector dst) {
|
||||
return new cv::Mat(cv::getAffineTransform(*src, *dst));
|
||||
}
|
||||
|
||||
Mat FindHomography(Mat src, Mat dst, int method, double ransacReprojThreshold, Mat mask, const int maxIters, const double confidence) {
|
||||
return new cv::Mat(cv::findHomography(*src, *dst, method, ransacReprojThreshold, *mask, maxIters, confidence));
|
||||
}
|
||||
|
||||
void DrawContours(Mat src, PointsVector contours, int contourIdx, Scalar color, int thickness) {
|
||||
cv::Scalar c = cv::Scalar(color.val1, color.val2, color.val3, color.val4);
|
||||
cv::drawContours(*src, cntrs, contourIdx, c, thickness);
|
||||
cv::drawContours(*src, *contours, contourIdx, c, thickness);
|
||||
}
|
||||
|
||||
void Sobel(Mat src, Mat dst, int ddepth, int dx, int dy, int ksize, double scale, double delta, int borderType) {
|
||||
@@ -592,12 +579,8 @@ void LogPolar(Mat src, Mat dst, Point center, double m, int flags) {
|
||||
cv::logPolar(*src, *dst, centerPt, m, flags);
|
||||
}
|
||||
|
||||
void FitLine(Contour points, Mat line, int distType, double param, double reps, double aeps) {
|
||||
std::vector<cv::Point> pts;
|
||||
for (size_t i = 0; i < points.length; i++) {
|
||||
pts.push_back(cv::Point(points.points[i].x, points.points[i].y));
|
||||
}
|
||||
cv::fitLine(pts, *line, distType, param, reps, aeps);
|
||||
void FitLine(PointVector pts, Mat line, int distType, double param, double reps, double aeps) {
|
||||
cv::fitLine(*pts, *line, distType, param, reps, aeps);
|
||||
}
|
||||
|
||||
void LinearPolar(Mat src, Mat dst, Point center, double maxRadius, int flags) {
|
||||
@@ -625,3 +608,44 @@ void CLAHE_Apply(CLAHE c, Mat src, Mat dst) {
|
||||
void InvertAffineTransform(Mat src, Mat dst) {
|
||||
cv::invertAffineTransform(*src, *dst);
|
||||
}
|
||||
|
||||
Point2f PhaseCorrelate(Mat src1, Mat src2, Mat window, double* response) {
|
||||
cv::Point2d result = cv::phaseCorrelate(*src1, *src2, *window, response);
|
||||
|
||||
Point2f result2f = {
|
||||
.x = float(result.x),
|
||||
.y = float(result.y),
|
||||
};
|
||||
return result2f;
|
||||
}
|
||||
|
||||
void Mat_Accumulate(Mat src, Mat dst) {
|
||||
cv::accumulate(*src, *dst);
|
||||
}
|
||||
void Mat_AccumulateWithMask(Mat src, Mat dst, Mat mask) {
|
||||
cv::accumulate(*src, *dst, *mask);
|
||||
}
|
||||
|
||||
void Mat_AccumulateSquare(Mat src, Mat dst) {
|
||||
cv::accumulateSquare(*src, *dst);
|
||||
}
|
||||
|
||||
void Mat_AccumulateSquareWithMask(Mat src, Mat dst, Mat mask) {
|
||||
cv::accumulateSquare(*src, *dst, *mask);
|
||||
}
|
||||
|
||||
void Mat_AccumulateProduct(Mat src1, Mat src2, Mat dst) {
|
||||
cv::accumulateProduct(*src1, *src2, *dst);
|
||||
}
|
||||
|
||||
void Mat_AccumulateProductWithMask(Mat src1, Mat src2, Mat dst, Mat mask) {
|
||||
cv::accumulateProduct(*src1, *src2, *dst, *mask);
|
||||
}
|
||||
|
||||
void Mat_AccumulatedWeighted(Mat src, Mat dst, double alpha) {
|
||||
cv::accumulateWeighted(*src, *dst, alpha);
|
||||
}
|
||||
|
||||
void Mat_AccumulatedWeightedWithMask(Mat src, Mat dst, double alpha, Mat mask) {
|
||||
cv::accumulateWeighted(*src, *dst, alpha, *mask);
|
||||
}
|
||||
|
||||
901
vendor/gocv.io/x/gocv/imgproc.go
generated
vendored
901
vendor/gocv.io/x/gocv/imgproc.go
generated
vendored
File diff suppressed because it is too large
Load Diff
55
vendor/gocv.io/x/gocv/imgproc.h
generated
vendored
55
vendor/gocv.io/x/gocv/imgproc.h
generated
vendored
@@ -16,37 +16,41 @@ typedef void* CLAHE;
|
||||
|
||||
#include "core.h"
|
||||
|
||||
double ArcLength(Contour curve, bool is_closed);
|
||||
Contour ApproxPolyDP(Contour curve, double epsilon, bool closed);
|
||||
double ArcLength(PointVector curve, bool is_closed);
|
||||
PointVector ApproxPolyDP(PointVector curve, double epsilon, bool closed);
|
||||
void CvtColor(Mat src, Mat dst, int code);
|
||||
void EqualizeHist(Mat src, Mat dst);
|
||||
void CalcHist(struct Mats mats, IntVector chans, Mat mask, Mat hist, IntVector sz, FloatVector rng, bool acc);
|
||||
void CalcBackProject(struct Mats mats, IntVector chans, Mat hist, Mat backProject, FloatVector rng, bool uniform);
|
||||
double CompareHist(Mat hist1, Mat hist2, int method);
|
||||
void ConvexHull(Contour points, Mat hull, bool clockwise, bool returnPoints);
|
||||
void ConvexityDefects(Contour points, Mat hull, Mat result);
|
||||
void ConvexHull(PointVector points, Mat hull, bool clockwise, bool returnPoints);
|
||||
void ConvexityDefects(PointVector points, Mat hull, Mat result);
|
||||
void BilateralFilter(Mat src, Mat dst, int d, double sc, double ss);
|
||||
void Blur(Mat src, Mat dst, Size ps);
|
||||
void BoxFilter(Mat src, Mat dst, int ddepth, Size ps);
|
||||
void SqBoxFilter(Mat src, Mat dst, int ddepth, Size ps);
|
||||
void Dilate(Mat src, Mat dst, Mat kernel);
|
||||
void DilateWithParams(Mat src, Mat dst, Mat kernel, Point anchor, int iterations, int borderType, Scalar borderValue);
|
||||
void DistanceTransform(Mat src, Mat dst, Mat labels, int distanceType, int maskSize, int labelType);
|
||||
void Erode(Mat src, Mat dst, Mat kernel);
|
||||
void ErodeWithParams(Mat src, Mat dst, Mat kernel, Point anchor, int iterations, int borderType);
|
||||
void MatchTemplate(Mat image, Mat templ, Mat result, int method, Mat mask);
|
||||
struct Moment Moments(Mat src, bool binaryImage);
|
||||
void PyrDown(Mat src, Mat dst, Size dstsize, int borderType);
|
||||
void PyrUp(Mat src, Mat dst, Size dstsize, int borderType);
|
||||
struct Rect BoundingRect(Contour con);
|
||||
struct Rect BoundingRect(PointVector pts);
|
||||
void BoxPoints(RotatedRect rect, Mat boxPts);
|
||||
double ContourArea(Contour con);
|
||||
struct RotatedRect MinAreaRect(Points points);
|
||||
struct RotatedRect FitEllipse(Points points);
|
||||
void MinEnclosingCircle(Points points, Point2f* center, float* radius);
|
||||
struct Contours FindContours(Mat src, int mode, int method);
|
||||
double ContourArea(PointVector pts);
|
||||
struct RotatedRect MinAreaRect(PointVector pts);
|
||||
struct RotatedRect FitEllipse(PointVector pts);
|
||||
void MinEnclosingCircle(PointVector pts, Point2f* center, float* radius);
|
||||
PointsVector FindContours(Mat src, Mat hierarchy, int mode, int method);
|
||||
double PointPolygonTest(PointVector pts, Point pt, bool measureDist);
|
||||
int ConnectedComponents(Mat src, Mat dst, int connectivity, int ltype, int ccltype);
|
||||
int ConnectedComponentsWithStats(Mat src, Mat labels, Mat stats, Mat centroids, int connectivity, int ltype, int ccltype);
|
||||
|
||||
void GaussianBlur(Mat src, Mat dst, Size ps, double sX, double sY, int bt);
|
||||
Mat GetGaussianKernel(int ksize, double sigma, int ktype);
|
||||
void Laplacian(Mat src, Mat dst, int dDepth, int kSize, double scale, double delta, int borderType);
|
||||
void Scharr(Mat src, Mat dst, int dDepth, int dx, int dy, double scale, double delta,
|
||||
int borderType);
|
||||
@@ -70,18 +74,25 @@ void HoughLinesPointSet(Mat points, Mat lines, int lines_max, int threshold,
|
||||
double min_rho, double max_rho, double rho_step,
|
||||
double min_theta, double max_theta, double theta_step);
|
||||
void Integral(Mat src, Mat sum, Mat sqsum, Mat tilted);
|
||||
void Threshold(Mat src, Mat dst, double thresh, double maxvalue, int typ);
|
||||
double Threshold(Mat src, Mat dst, double thresh, double maxvalue, int typ);
|
||||
void AdaptiveThreshold(Mat src, Mat dst, double maxValue, int adaptiveTyp, int typ, int blockSize,
|
||||
double c);
|
||||
|
||||
void ArrowedLine(Mat img, Point pt1, Point pt2, Scalar color, int thickness);
|
||||
void Circle(Mat img, Point center, int radius, Scalar color, int thickness);
|
||||
void CircleWithParams(Mat img, Point center, int radius, Scalar color, int thickness, int lineType, int shift);
|
||||
void Ellipse(Mat img, Point center, Point axes, double angle, double
|
||||
startAngle, double endAngle, Scalar color, int thickness);
|
||||
void EllipseWithParams(Mat img, Point center, Point axes, double angle, double
|
||||
startAngle, double endAngle, Scalar color, int thickness, int lineType, int shift);
|
||||
void Line(Mat img, Point pt1, Point pt2, Scalar color, int thickness);
|
||||
void Rectangle(Mat img, Rect rect, Scalar color, int thickness);
|
||||
void FillPoly(Mat img, Contours points, Scalar color);
|
||||
void RectangleWithParams(Mat img, Rect rect, Scalar color, int thickness, int lineType, int shift);
|
||||
void FillPoly(Mat img, PointsVector points, Scalar color);
|
||||
void FillPolyWithParams(Mat img, PointsVector points, Scalar color, int lineType, int shift, Point offset);
|
||||
void Polylines(Mat img, PointsVector points, bool isClosed, Scalar color, int thickness);
|
||||
struct Size GetTextSize(const char* text, int fontFace, double fontScale, int thickness);
|
||||
struct Size GetTextSizeWithBaseline(const char* text, int fontFace, double fontScale, int thickness, int* baseline);
|
||||
void PutText(Mat img, const char* text, Point org, int fontFace, double fontScale,
|
||||
Scalar color, int thickness);
|
||||
void PutTextWithParams(Mat img, const char* text, Point org, int fontFace, double fontScale,
|
||||
@@ -96,15 +107,19 @@ void WarpPerspective(Mat src, Mat dst, Mat m, Size dsize);
|
||||
void Watershed(Mat image, Mat markers);
|
||||
void ApplyColorMap(Mat src, Mat dst, int colormap);
|
||||
void ApplyCustomColorMap(Mat src, Mat dst, Mat colormap);
|
||||
Mat GetPerspectiveTransform(Contour src, Contour dst);
|
||||
void DrawContours(Mat src, Contours contours, int contourIdx, Scalar color, int thickness);
|
||||
Mat GetPerspectiveTransform(PointVector src, PointVector dst);
|
||||
Mat GetPerspectiveTransform2f(Point2fVector src, Point2fVector dst);
|
||||
Mat GetAffineTransform(PointVector src, PointVector dst);
|
||||
Mat GetAffineTransform2f(Point2fVector src, Point2fVector dst);
|
||||
Mat FindHomography(Mat src, Mat dst, int method, double ransacReprojThreshold, Mat mask, const int maxIters, const double confidence) ;
|
||||
void DrawContours(Mat src, PointsVector contours, int contourIdx, Scalar color, int thickness);
|
||||
void Sobel(Mat src, Mat dst, int ddepth, int dx, int dy, int ksize, double scale, double delta, int borderType);
|
||||
void SpatialGradient(Mat src, Mat dx, Mat dy, int ksize, int borderType);
|
||||
void Remap(Mat src, Mat dst, Mat map1, Mat map2, int interpolation, int borderMode, Scalar borderValue);
|
||||
void Filter2D(Mat src, Mat dst, int ddepth, Mat kernel, Point anchor, double delta, int borderType);
|
||||
void SepFilter2D(Mat src, Mat dst, int ddepth, Mat kernelX, Mat kernelY, Point anchor, double delta, int borderType);
|
||||
void LogPolar(Mat src, Mat dst, Point center, double m, int flags);
|
||||
void FitLine(Contour points, Mat line, int distType, double param, double reps, double aeps);
|
||||
void FitLine(PointVector pts, Mat line, int distType, double param, double reps, double aeps);
|
||||
void LinearPolar(Mat src, Mat dst, Point center, double maxRadius, int flags);
|
||||
bool ClipLine(Size imgSize, Point pt1, Point pt2);
|
||||
CLAHE CLAHE_Create();
|
||||
@@ -112,7 +127,15 @@ CLAHE CLAHE_CreateWithParams(double clipLimit, Size tileGridSize);
|
||||
void CLAHE_Close(CLAHE c);
|
||||
void CLAHE_Apply(CLAHE c, Mat src, Mat dst);
|
||||
void InvertAffineTransform(Mat src, Mat dst);
|
||||
|
||||
Point2f PhaseCorrelate(Mat src1, Mat src2, Mat window, double* response);
|
||||
void Mat_Accumulate(Mat src, Mat dst);
|
||||
void Mat_AccumulateWithMask(Mat src, Mat dst, Mat mask);
|
||||
void Mat_AccumulateSquare(Mat src, Mat dst);
|
||||
void Mat_AccumulateSquareWithMask(Mat src, Mat dst, Mat mask);
|
||||
void Mat_AccumulateProduct(Mat src1, Mat src2, Mat dst);
|
||||
void Mat_AccumulateProductWithMask(Mat src1, Mat src2, Mat dst, Mat mask);
|
||||
void Mat_AccumulatedWeighted(Mat src, Mat dst, double alpha);
|
||||
void Mat_AccumulatedWeightedWithMask(Mat src, Mat dst, double alpha, Mat mask);
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
274
vendor/gocv.io/x/gocv/imgproc_colorcodes.go
generated
vendored
274
vendor/gocv.io/x/gocv/imgproc_colorcodes.go
generated
vendored
@@ -12,340 +12,340 @@ const (
|
||||
ColorBGRToBGRA ColorConversionCode = 0
|
||||
|
||||
// ColorBGRAToBGR removes alpha channel from BGR image.
|
||||
ColorBGRAToBGR = 1
|
||||
ColorBGRAToBGR ColorConversionCode = 1
|
||||
|
||||
// ColorBGRToRGBA converts from BGR to RGB with alpha channel.
|
||||
ColorBGRToRGBA = 2
|
||||
ColorBGRToRGBA ColorConversionCode = 2
|
||||
|
||||
// ColorRGBAToBGR converts from RGB with alpha to BGR color space.
|
||||
ColorRGBAToBGR = 3
|
||||
ColorRGBAToBGR ColorConversionCode = 3
|
||||
|
||||
// ColorBGRToRGB converts from BGR to RGB without alpha channel.
|
||||
ColorBGRToRGB = 4
|
||||
ColorBGRToRGB ColorConversionCode = 4
|
||||
|
||||
// ColorBGRAToRGBA converts from BGR with alpha channel
|
||||
// to RGB with alpha channel.
|
||||
ColorBGRAToRGBA = 5
|
||||
ColorBGRAToRGBA ColorConversionCode = 5
|
||||
|
||||
// ColorBGRToGray converts from BGR to grayscale.
|
||||
ColorBGRToGray = 6
|
||||
ColorBGRToGray ColorConversionCode = 6
|
||||
|
||||
// ColorRGBToGray converts from RGB to grayscale.
|
||||
ColorRGBToGray = 7
|
||||
ColorRGBToGray ColorConversionCode = 7
|
||||
|
||||
// ColorGrayToBGR converts from grayscale to BGR.
|
||||
ColorGrayToBGR = 8
|
||||
ColorGrayToBGR ColorConversionCode = 8
|
||||
|
||||
// ColorGrayToBGRA converts from grayscale to BGR with alpha channel.
|
||||
ColorGrayToBGRA = 9
|
||||
ColorGrayToBGRA ColorConversionCode = 9
|
||||
|
||||
// ColorBGRAToGray converts from BGR with alpha channel to grayscale.
|
||||
ColorBGRAToGray = 10
|
||||
ColorBGRAToGray ColorConversionCode = 10
|
||||
|
||||
// ColorRGBAToGray converts from RGB with alpha channel to grayscale.
|
||||
ColorRGBAToGray = 11
|
||||
ColorRGBAToGray ColorConversionCode = 11
|
||||
|
||||
// ColorBGRToBGR565 converts from BGR to BGR565 (16-bit images).
|
||||
ColorBGRToBGR565 = 12
|
||||
ColorBGRToBGR565 ColorConversionCode = 12
|
||||
|
||||
// ColorRGBToBGR565 converts from RGB to BGR565 (16-bit images).
|
||||
ColorRGBToBGR565 = 13
|
||||
ColorRGBToBGR565 ColorConversionCode = 13
|
||||
|
||||
// ColorBGR565ToBGR converts from BGR565 (16-bit images) to BGR.
|
||||
ColorBGR565ToBGR = 14
|
||||
ColorBGR565ToBGR ColorConversionCode = 14
|
||||
|
||||
// ColorBGR565ToRGB converts from BGR565 (16-bit images) to RGB.
|
||||
ColorBGR565ToRGB = 15
|
||||
ColorBGR565ToRGB ColorConversionCode = 15
|
||||
|
||||
// ColorBGRAToBGR565 converts from BGRA (with alpha channel)
|
||||
// to BGR565 (16-bit images).
|
||||
ColorBGRAToBGR565 = 16
|
||||
ColorBGRAToBGR565 ColorConversionCode = 16
|
||||
|
||||
// ColorRGBAToBGR565 converts from RGBA (with alpha channel)
|
||||
// to BGR565 (16-bit images).
|
||||
ColorRGBAToBGR565 = 17
|
||||
ColorRGBAToBGR565 ColorConversionCode = 17
|
||||
|
||||
// ColorBGR565ToBGRA converts from BGR565 (16-bit images)
|
||||
// to BGRA (with alpha channel).
|
||||
ColorBGR565ToBGRA = 18
|
||||
ColorBGR565ToBGRA ColorConversionCode = 18
|
||||
|
||||
// ColorBGR565ToRGBA converts from BGR565 (16-bit images)
|
||||
// to RGBA (with alpha channel).
|
||||
ColorBGR565ToRGBA = 19
|
||||
ColorBGR565ToRGBA ColorConversionCode = 19
|
||||
|
||||
// ColorGrayToBGR565 converts from grayscale
|
||||
// to BGR565 (16-bit images).
|
||||
ColorGrayToBGR565 = 20
|
||||
ColorGrayToBGR565 ColorConversionCode = 20
|
||||
|
||||
// ColorBGR565ToGray converts from BGR565 (16-bit images)
|
||||
// to grayscale.
|
||||
ColorBGR565ToGray = 21
|
||||
ColorBGR565ToGray ColorConversionCode = 21
|
||||
|
||||
// ColorBGRToBGR555 converts from BGR to BGR555 (16-bit images).
|
||||
ColorBGRToBGR555 = 22
|
||||
ColorBGRToBGR555 ColorConversionCode = 22
|
||||
|
||||
// ColorRGBToBGR555 converts from RGB to BGR555 (16-bit images).
|
||||
ColorRGBToBGR555 = 23
|
||||
ColorRGBToBGR555 ColorConversionCode = 23
|
||||
|
||||
// ColorBGR555ToBGR converts from BGR555 (16-bit images) to BGR.
|
||||
ColorBGR555ToBGR = 24
|
||||
ColorBGR555ToBGR ColorConversionCode = 24
|
||||
|
||||
// ColorBGR555ToRGB converts from BGR555 (16-bit images) to RGB.
|
||||
ColorBGR555ToRGB = 25
|
||||
ColorBGR555ToRGB ColorConversionCode = 25
|
||||
|
||||
// ColorBGRAToBGR555 converts from BGRA (with alpha channel)
|
||||
// to BGR555 (16-bit images).
|
||||
ColorBGRAToBGR555 = 26
|
||||
ColorBGRAToBGR555 ColorConversionCode = 26
|
||||
|
||||
// ColorRGBAToBGR555 converts from RGBA (with alpha channel)
|
||||
// to BGR555 (16-bit images).
|
||||
ColorRGBAToBGR555 = 27
|
||||
ColorRGBAToBGR555 ColorConversionCode = 27
|
||||
|
||||
// ColorBGR555ToBGRA converts from BGR555 (16-bit images)
|
||||
// to BGRA (with alpha channel).
|
||||
ColorBGR555ToBGRA = 28
|
||||
ColorBGR555ToBGRA ColorConversionCode = 28
|
||||
|
||||
// ColorBGR555ToRGBA converts from BGR555 (16-bit images)
|
||||
// to RGBA (with alpha channel).
|
||||
ColorBGR555ToRGBA = 29
|
||||
ColorBGR555ToRGBA ColorConversionCode = 29
|
||||
|
||||
// ColorGrayToBGR555 converts from grayscale to BGR555 (16-bit images).
|
||||
ColorGrayToBGR555 = 30
|
||||
ColorGrayToBGR555 ColorConversionCode = 30
|
||||
|
||||
// ColorBGR555ToGRAY converts from BGR555 (16-bit images) to grayscale.
|
||||
ColorBGR555ToGRAY = 31
|
||||
ColorBGR555ToGRAY ColorConversionCode = 31
|
||||
|
||||
// ColorBGRToXYZ converts from BGR to CIE XYZ.
|
||||
ColorBGRToXYZ = 32
|
||||
ColorBGRToXYZ ColorConversionCode = 32
|
||||
|
||||
// ColorRGBToXYZ converts from RGB to CIE XYZ.
|
||||
ColorRGBToXYZ = 33
|
||||
ColorRGBToXYZ ColorConversionCode = 33
|
||||
|
||||
// ColorXYZToBGR converts from CIE XYZ to BGR.
|
||||
ColorXYZToBGR = 34
|
||||
ColorXYZToBGR ColorConversionCode = 34
|
||||
|
||||
// ColorXYZToRGB converts from CIE XYZ to RGB.
|
||||
ColorXYZToRGB = 35
|
||||
ColorXYZToRGB ColorConversionCode = 35
|
||||
|
||||
// ColorBGRToYCrCb converts from BGR to luma-chroma (aka YCC).
|
||||
ColorBGRToYCrCb = 36
|
||||
ColorBGRToYCrCb ColorConversionCode = 36
|
||||
|
||||
// ColorRGBToYCrCb converts from RGB to luma-chroma (aka YCC).
|
||||
ColorRGBToYCrCb = 37
|
||||
ColorRGBToYCrCb ColorConversionCode = 37
|
||||
|
||||
// ColorYCrCbToBGR converts from luma-chroma (aka YCC) to BGR.
|
||||
ColorYCrCbToBGR = 38
|
||||
ColorYCrCbToBGR ColorConversionCode = 38
|
||||
|
||||
// ColorYCrCbToRGB converts from luma-chroma (aka YCC) to RGB.
|
||||
ColorYCrCbToRGB = 39
|
||||
ColorYCrCbToRGB ColorConversionCode = 39
|
||||
|
||||
// ColorBGRToHSV converts from BGR to HSV (hue saturation value).
|
||||
ColorBGRToHSV = 40
|
||||
ColorBGRToHSV ColorConversionCode = 40
|
||||
|
||||
// ColorRGBToHSV converts from RGB to HSV (hue saturation value).
|
||||
ColorRGBToHSV = 41
|
||||
ColorRGBToHSV ColorConversionCode = 41
|
||||
|
||||
// ColorBGRToLab converts from BGR to CIE Lab.
|
||||
ColorBGRToLab = 44
|
||||
ColorBGRToLab ColorConversionCode = 44
|
||||
|
||||
// ColorRGBToLab converts from RGB to CIE Lab.
|
||||
ColorRGBToLab = 45
|
||||
ColorRGBToLab ColorConversionCode = 45
|
||||
|
||||
// ColorBGRToLuv converts from BGR to CIE Luv.
|
||||
ColorBGRToLuv = 50
|
||||
ColorBGRToLuv ColorConversionCode = 50
|
||||
|
||||
// ColorRGBToLuv converts from RGB to CIE Luv.
|
||||
ColorRGBToLuv = 51
|
||||
ColorRGBToLuv ColorConversionCode = 51
|
||||
|
||||
// ColorBGRToHLS converts from BGR to HLS (hue lightness saturation).
|
||||
ColorBGRToHLS = 52
|
||||
ColorBGRToHLS ColorConversionCode = 52
|
||||
|
||||
// ColorRGBToHLS converts from RGB to HLS (hue lightness saturation).
|
||||
ColorRGBToHLS = 53
|
||||
ColorRGBToHLS ColorConversionCode = 53
|
||||
|
||||
// ColorHSVToBGR converts from HSV (hue saturation value) to BGR.
|
||||
ColorHSVToBGR = 54
|
||||
ColorHSVToBGR ColorConversionCode = 54
|
||||
|
||||
// ColorHSVToRGB converts from HSV (hue saturation value) to RGB.
|
||||
ColorHSVToRGB = 55
|
||||
ColorHSVToRGB ColorConversionCode = 55
|
||||
|
||||
// ColorLabToBGR converts from CIE Lab to BGR.
|
||||
ColorLabToBGR = 56
|
||||
ColorLabToBGR ColorConversionCode = 56
|
||||
|
||||
// ColorLabToRGB converts from CIE Lab to RGB.
|
||||
ColorLabToRGB = 57
|
||||
ColorLabToRGB ColorConversionCode = 57
|
||||
|
||||
// ColorLuvToBGR converts from CIE Luv to BGR.
|
||||
ColorLuvToBGR = 58
|
||||
ColorLuvToBGR ColorConversionCode = 58
|
||||
|
||||
// ColorLuvToRGB converts from CIE Luv to RGB.
|
||||
ColorLuvToRGB = 59
|
||||
ColorLuvToRGB ColorConversionCode = 59
|
||||
|
||||
// ColorHLSToBGR converts from HLS (hue lightness saturation) to BGR.
|
||||
ColorHLSToBGR = 60
|
||||
ColorHLSToBGR ColorConversionCode = 60
|
||||
|
||||
// ColorHLSToRGB converts from HLS (hue lightness saturation) to RGB.
|
||||
ColorHLSToRGB = 61
|
||||
ColorHLSToRGB ColorConversionCode = 61
|
||||
|
||||
// ColorBGRToHSVFull converts from BGR to HSV (hue saturation value) full.
|
||||
ColorBGRToHSVFull = 66
|
||||
ColorBGRToHSVFull ColorConversionCode = 66
|
||||
|
||||
// ColorRGBToHSVFull converts from RGB to HSV (hue saturation value) full.
|
||||
ColorRGBToHSVFull = 67
|
||||
ColorRGBToHSVFull ColorConversionCode = 67
|
||||
|
||||
// ColorBGRToHLSFull converts from BGR to HLS (hue lightness saturation) full.
|
||||
ColorBGRToHLSFull = 68
|
||||
ColorBGRToHLSFull ColorConversionCode = 68
|
||||
|
||||
// ColorRGBToHLSFull converts from RGB to HLS (hue lightness saturation) full.
|
||||
ColorRGBToHLSFull = 69
|
||||
ColorRGBToHLSFull ColorConversionCode = 69
|
||||
|
||||
// ColorHSVToBGRFull converts from HSV (hue saturation value) to BGR full.
|
||||
ColorHSVToBGRFull = 70
|
||||
ColorHSVToBGRFull ColorConversionCode = 70
|
||||
|
||||
// ColorHSVToRGBFull converts from HSV (hue saturation value) to RGB full.
|
||||
ColorHSVToRGBFull = 71
|
||||
ColorHSVToRGBFull ColorConversionCode = 71
|
||||
|
||||
// ColorHLSToBGRFull converts from HLS (hue lightness saturation) to BGR full.
|
||||
ColorHLSToBGRFull = 72
|
||||
ColorHLSToBGRFull ColorConversionCode = 72
|
||||
|
||||
// ColorHLSToRGBFull converts from HLS (hue lightness saturation) to RGB full.
|
||||
ColorHLSToRGBFull = 73
|
||||
ColorHLSToRGBFull ColorConversionCode = 73
|
||||
|
||||
// ColorLBGRToLab converts from LBGR to CIE Lab.
|
||||
ColorLBGRToLab = 74
|
||||
ColorLBGRToLab ColorConversionCode = 74
|
||||
|
||||
// ColorLRGBToLab converts from LRGB to CIE Lab.
|
||||
ColorLRGBToLab = 75
|
||||
ColorLRGBToLab ColorConversionCode = 75
|
||||
|
||||
// ColorLBGRToLuv converts from LBGR to CIE Luv.
|
||||
ColorLBGRToLuv = 76
|
||||
ColorLBGRToLuv ColorConversionCode = 76
|
||||
|
||||
// ColorLRGBToLuv converts from LRGB to CIE Luv.
|
||||
ColorLRGBToLuv = 77
|
||||
ColorLRGBToLuv ColorConversionCode = 77
|
||||
|
||||
// ColorLabToLBGR converts from CIE Lab to LBGR.
|
||||
ColorLabToLBGR = 78
|
||||
ColorLabToLBGR ColorConversionCode = 78
|
||||
|
||||
// ColorLabToLRGB converts from CIE Lab to LRGB.
|
||||
ColorLabToLRGB = 79
|
||||
ColorLabToLRGB ColorConversionCode = 79
|
||||
|
||||
// ColorLuvToLBGR converts from CIE Luv to LBGR.
|
||||
ColorLuvToLBGR = 80
|
||||
ColorLuvToLBGR ColorConversionCode = 80
|
||||
|
||||
// ColorLuvToLRGB converts from CIE Luv to LRGB.
|
||||
ColorLuvToLRGB = 81
|
||||
ColorLuvToLRGB ColorConversionCode = 81
|
||||
|
||||
// ColorBGRToYUV converts from BGR to YUV.
|
||||
ColorBGRToYUV = 82
|
||||
ColorBGRToYUV ColorConversionCode = 82
|
||||
|
||||
// ColorRGBToYUV converts from RGB to YUV.
|
||||
ColorRGBToYUV = 83
|
||||
ColorRGBToYUV ColorConversionCode = 83
|
||||
|
||||
// ColorYUVToBGR converts from YUV to BGR.
|
||||
ColorYUVToBGR = 84
|
||||
ColorYUVToBGR ColorConversionCode = 84
|
||||
|
||||
// ColorYUVToRGB converts from YUV to RGB.
|
||||
ColorYUVToRGB = 85
|
||||
ColorYUVToRGB ColorConversionCode = 85
|
||||
|
||||
// ColorYUVToRGBNV12 converts from YUV 4:2:0 to RGB NV12.
|
||||
ColorYUVToRGBNV12 = 90
|
||||
ColorYUVToRGBNV12 ColorConversionCode = 90
|
||||
|
||||
// ColorYUVToBGRNV12 converts from YUV 4:2:0 to BGR NV12.
|
||||
ColorYUVToBGRNV12 = 91
|
||||
ColorYUVToBGRNV12 ColorConversionCode = 91
|
||||
|
||||
// ColorYUVToRGBNV21 converts from YUV 4:2:0 to RGB NV21.
|
||||
ColorYUVToRGBNV21 = 92
|
||||
ColorYUVToRGBNV21 ColorConversionCode = 92
|
||||
|
||||
// ColorYUVToBGRNV21 converts from YUV 4:2:0 to BGR NV21.
|
||||
ColorYUVToBGRNV21 = 93
|
||||
ColorYUVToBGRNV21 ColorConversionCode = 93
|
||||
|
||||
// ColorYUVToRGBANV12 converts from YUV 4:2:0 to RGBA NV12.
|
||||
ColorYUVToRGBANV12 = 94
|
||||
ColorYUVToRGBANV12 ColorConversionCode = 94
|
||||
|
||||
// ColorYUVToBGRANV12 converts from YUV 4:2:0 to BGRA NV12.
|
||||
ColorYUVToBGRANV12 = 95
|
||||
ColorYUVToBGRANV12 ColorConversionCode = 95
|
||||
|
||||
// ColorYUVToRGBANV21 converts from YUV 4:2:0 to RGBA NV21.
|
||||
ColorYUVToRGBANV21 = 96
|
||||
ColorYUVToRGBANV21 ColorConversionCode = 96
|
||||
|
||||
// ColorYUVToBGRANV21 converts from YUV 4:2:0 to BGRA NV21.
|
||||
ColorYUVToBGRANV21 = 97
|
||||
ColorYUVToBGRANV21 ColorConversionCode = 97
|
||||
|
||||
ColorYUVToRGBYV12 = 98
|
||||
ColorYUVToBGRYV12 = 99
|
||||
ColorYUVToRGBIYUV = 100
|
||||
ColorYUVToBGRIYUV = 101
|
||||
ColorYUVToRGBYV12 ColorConversionCode = 98
|
||||
ColorYUVToBGRYV12 ColorConversionCode = 99
|
||||
ColorYUVToRGBIYUV ColorConversionCode = 100
|
||||
ColorYUVToBGRIYUV ColorConversionCode = 101
|
||||
|
||||
ColorYUVToRGBAYV12 = 102
|
||||
ColorYUVToBGRAYV12 = 103
|
||||
ColorYUVToRGBAIYUV = 104
|
||||
ColorYUVToBGRAIYUV = 105
|
||||
ColorYUVToRGBAYV12 ColorConversionCode = 102
|
||||
ColorYUVToBGRAYV12 ColorConversionCode = 103
|
||||
ColorYUVToRGBAIYUV ColorConversionCode = 104
|
||||
ColorYUVToBGRAIYUV ColorConversionCode = 105
|
||||
|
||||
ColorYUVToGRAY420 = 106
|
||||
ColorYUVToGRAY420 ColorConversionCode = 106
|
||||
|
||||
// YUV 4:2:2 family to RGB
|
||||
ColorYUVToRGBUYVY = 107
|
||||
ColorYUVToBGRUYVY = 108
|
||||
ColorYUVToRGBUYVY ColorConversionCode = 107
|
||||
ColorYUVToBGRUYVY ColorConversionCode = 108
|
||||
|
||||
ColorYUVToRGBAUYVY = 111
|
||||
ColorYUVToBGRAUYVY = 112
|
||||
ColorYUVToRGBAUYVY ColorConversionCode = 111
|
||||
ColorYUVToBGRAUYVY ColorConversionCode = 112
|
||||
|
||||
ColorYUVToRGBYUY2 = 115
|
||||
ColorYUVToBGRYUY2 = 116
|
||||
ColorYUVToRGBYVYU = 117
|
||||
ColorYUVToBGRYVYU = 118
|
||||
ColorYUVToRGBYUY2 ColorConversionCode = 115
|
||||
ColorYUVToBGRYUY2 ColorConversionCode = 116
|
||||
ColorYUVToRGBYVYU ColorConversionCode = 117
|
||||
ColorYUVToBGRYVYU ColorConversionCode = 118
|
||||
|
||||
ColorYUVToRGBAYUY2 = 119
|
||||
ColorYUVToBGRAYUY2 = 120
|
||||
ColorYUVToRGBAYVYU = 121
|
||||
ColorYUVToBGRAYVYU = 122
|
||||
ColorYUVToRGBAYUY2 ColorConversionCode = 119
|
||||
ColorYUVToBGRAYUY2 ColorConversionCode = 120
|
||||
ColorYUVToRGBAYVYU ColorConversionCode = 121
|
||||
ColorYUVToBGRAYVYU ColorConversionCode = 122
|
||||
|
||||
ColorYUVToGRAYUYVY = 123
|
||||
ColorYUVToGRAYYUY2 = 124
|
||||
ColorYUVToGRAYUYVY ColorConversionCode = 123
|
||||
ColorYUVToGRAYYUY2 ColorConversionCode = 124
|
||||
|
||||
// alpha premultiplication
|
||||
ColorRGBATomRGBA = 125
|
||||
ColormRGBAToRGBA = 126
|
||||
ColorRGBATomRGBA ColorConversionCode = 125
|
||||
ColormRGBAToRGBA ColorConversionCode = 126
|
||||
|
||||
// RGB to YUV 4:2:0 family
|
||||
ColorRGBToYUVI420 = 127
|
||||
ColorBGRToYUVI420 = 128
|
||||
ColorRGBToYUVI420 ColorConversionCode = 127
|
||||
ColorBGRToYUVI420 ColorConversionCode = 128
|
||||
|
||||
ColorRGBAToYUVI420 = 129
|
||||
ColorBGRAToYUVI420 = 130
|
||||
ColorRGBToYUVYV12 = 131
|
||||
ColorBGRToYUVYV12 = 132
|
||||
ColorRGBAToYUVYV12 = 133
|
||||
ColorBGRAToYUVYV12 = 134
|
||||
ColorRGBAToYUVI420 ColorConversionCode = 129
|
||||
ColorBGRAToYUVI420 ColorConversionCode = 130
|
||||
ColorRGBToYUVYV12 ColorConversionCode = 131
|
||||
ColorBGRToYUVYV12 ColorConversionCode = 132
|
||||
ColorRGBAToYUVYV12 ColorConversionCode = 133
|
||||
ColorBGRAToYUVYV12 ColorConversionCode = 134
|
||||
|
||||
// Demosaicing
|
||||
ColorBayerBGToBGR = 46
|
||||
ColorBayerGBToBGR = 47
|
||||
ColorBayerRGToBGR = 48
|
||||
ColorBayerGRToBGR = 49
|
||||
ColorBayerBGToBGR ColorConversionCode = 46
|
||||
ColorBayerGBToBGR ColorConversionCode = 47
|
||||
ColorBayerRGToBGR ColorConversionCode = 48
|
||||
ColorBayerGRToBGR ColorConversionCode = 49
|
||||
|
||||
ColorBayerBGToGRAY = 86
|
||||
ColorBayerGBToGRAY = 87
|
||||
ColorBayerRGToGRAY = 88
|
||||
ColorBayerGRToGRAY = 89
|
||||
ColorBayerBGToGRAY ColorConversionCode = 86
|
||||
ColorBayerGBToGRAY ColorConversionCode = 87
|
||||
ColorBayerRGToGRAY ColorConversionCode = 88
|
||||
ColorBayerGRToGRAY ColorConversionCode = 89
|
||||
|
||||
// Demosaicing using Variable Number of Gradients
|
||||
ColorBayerBGToBGRVNG = 62
|
||||
ColorBayerGBToBGRVNG = 63
|
||||
ColorBayerRGToBGRVNG = 64
|
||||
ColorBayerGRToBGRVNG = 65
|
||||
ColorBayerBGToBGRVNG ColorConversionCode = 62
|
||||
ColorBayerGBToBGRVNG ColorConversionCode = 63
|
||||
ColorBayerRGToBGRVNG ColorConversionCode = 64
|
||||
ColorBayerGRToBGRVNG ColorConversionCode = 65
|
||||
|
||||
// Edge-Aware Demosaicing
|
||||
ColorBayerBGToBGREA = 135
|
||||
ColorBayerGBToBGREA = 136
|
||||
ColorBayerRGToBGREA = 137
|
||||
ColorBayerGRToBGREA = 138
|
||||
ColorBayerBGToBGREA ColorConversionCode = 135
|
||||
ColorBayerGBToBGREA ColorConversionCode = 136
|
||||
ColorBayerRGToBGREA ColorConversionCode = 137
|
||||
ColorBayerGRToBGREA ColorConversionCode = 138
|
||||
|
||||
// Demosaicing with alpha channel
|
||||
ColorBayerBGToBGRA = 139
|
||||
ColorBayerGBToBGRA = 140
|
||||
ColorBayerRGToBGRA = 141
|
||||
ColorBayerGRToBGRA = 142
|
||||
ColorBayerBGToBGRA ColorConversionCode = 139
|
||||
ColorBayerGBToBGRA ColorConversionCode = 140
|
||||
ColorBayerRGToBGRA ColorConversionCode = 141
|
||||
ColorBayerGRToBGRA ColorConversionCode = 142
|
||||
|
||||
ColorCOLORCVTMAX = 143
|
||||
ColorCOLORCVTMAX ColorConversionCode = 143
|
||||
)
|
||||
|
||||
6
vendor/gocv.io/x/gocv/mat_noprofile.go
generated
vendored
6
vendor/gocv.io/x/gocv/mat_noprofile.go
generated
vendored
@@ -8,6 +8,11 @@ package gocv
|
||||
*/
|
||||
import "C"
|
||||
|
||||
// addMatToProfile does nothing if matprofile tag is not set.
|
||||
func addMatToProfile(p C.Mat) {
|
||||
return
|
||||
}
|
||||
|
||||
// newMat returns a new Mat from a C Mat
|
||||
func newMat(p C.Mat) Mat {
|
||||
return Mat{p: p}
|
||||
@@ -17,5 +22,6 @@ func newMat(p C.Mat) Mat {
|
||||
func (m *Mat) Close() error {
|
||||
C.Mat_Close(m.p)
|
||||
m.p = nil
|
||||
m.d = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
11
vendor/gocv.io/x/gocv/mat_profile.go
generated
vendored
11
vendor/gocv.io/x/gocv/mat_profile.go
generated
vendored
@@ -58,6 +58,12 @@ func init() {
|
||||
}
|
||||
}
|
||||
|
||||
// addMatToProfile records Mat to the MatProfile.
|
||||
func addMatToProfile(p C.Mat) {
|
||||
MatProfile.Add(p, 1)
|
||||
return
|
||||
}
|
||||
|
||||
// newMat returns a new Mat from a C Mat and records it to the MatProfile.
|
||||
func newMat(p C.Mat) Mat {
|
||||
m := Mat{p: p}
|
||||
@@ -67,8 +73,11 @@ func newMat(p C.Mat) Mat {
|
||||
|
||||
// Close the Mat object.
|
||||
func (m *Mat) Close() error {
|
||||
C.Mat_Close(m.p)
|
||||
// NOTE: The pointer must be removed from the profile before it is deleted to
|
||||
// avoid a data race.
|
||||
MatProfile.Remove(m.p)
|
||||
C.Mat_Close(m.p)
|
||||
m.p = nil
|
||||
m.d = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
27
vendor/gocv.io/x/gocv/objdetect.cpp
generated
vendored
27
vendor/gocv.io/x/gocv/objdetect.cpp
generated
vendored
@@ -149,3 +149,30 @@ const char* QRCodeDetector_Decode(QRCodeDetector qr, Mat input,Mat inputPoints,M
|
||||
cv::String *str = new cv::String(qr->detectAndDecode(*input,*inputPoints,*straight_qrcode));
|
||||
return str->c_str();
|
||||
}
|
||||
|
||||
bool QRCodeDetector_DetectMulti(QRCodeDetector qr, Mat input, Mat points) {
|
||||
return qr->detectMulti(*input,*points);
|
||||
}
|
||||
|
||||
bool QRCodeDetector_DetectAndDecodeMulti(QRCodeDetector qr, Mat input, CStrings* decoded, Mat points, struct Mats* qrCodes) {
|
||||
std::vector<cv::String> decodedCodes;
|
||||
std::vector<cv::Mat> straightQrCodes;
|
||||
bool res = qr->detectAndDecodeMulti(*input, decodedCodes, *points, straightQrCodes);
|
||||
if (!res) {
|
||||
return res;
|
||||
}
|
||||
|
||||
qrCodes->mats = new Mat[straightQrCodes.size()];
|
||||
qrCodes->length = straightQrCodes.size();
|
||||
for (size_t i = 0; i < straightQrCodes.size(); i++) {
|
||||
qrCodes->mats[i] = new cv::Mat(straightQrCodes[i]);
|
||||
}
|
||||
|
||||
const char **strs = new const char*[decodedCodes.size()];
|
||||
for (size_t i = 0; i < decodedCodes.size(); ++i) {
|
||||
strs[i] = decodedCodes[i].c_str();
|
||||
}
|
||||
decoded->length = decodedCodes.size();
|
||||
decoded->strs = strs;
|
||||
return res;
|
||||
}
|
||||
45
vendor/gocv.io/x/gocv/objdetect.go
generated
vendored
45
vendor/gocv.io/x/gocv/objdetect.go
generated
vendored
@@ -211,6 +211,7 @@ func (a *QRCodeDetector) Close() error {
|
||||
|
||||
// DetectAndDecode Both detects and decodes QR code.
|
||||
//
|
||||
// Returns true as long as some QR code was detected even in case where the decoding failed
|
||||
// For further details, please see:
|
||||
// https://docs.opencv.org/master/de/dc3/classcv_1_1QRCodeDetector.html#a7290bd6a5d59b14a37979c3a14fbf394
|
||||
//
|
||||
@@ -238,3 +239,47 @@ func (a *QRCodeDetector) Decode(input Mat, points Mat, straight_qrcode *Mat) str
|
||||
goResult := C.GoString(C.QRCodeDetector_DetectAndDecode(a.p, input.p, points.p, straight_qrcode.p))
|
||||
return string(goResult)
|
||||
}
|
||||
|
||||
// Detects QR codes in image and finds of the quadrangles containing the codes.
|
||||
//
|
||||
// Each quadrangle would be returned as a row in the `points` Mat and each point is a Vecf.
|
||||
// Returns true if QR code was detected
|
||||
// For usage please see TestQRCodeDetector
|
||||
// For further details, please see:
|
||||
// https://docs.opencv.org/master/de/dc3/classcv_1_1QRCodeDetector.html#aaf2b6b2115b8e8fbc9acf3a8f68872b6
|
||||
func (a *QRCodeDetector) DetectMulti(input Mat, points *Mat) bool {
|
||||
result := C.QRCodeDetector_DetectMulti(a.p, input.p, points.p)
|
||||
return bool(result)
|
||||
}
|
||||
|
||||
// Detects QR codes in image and finds of the quadrangles containing the codes and decode the decode the QRCodes to strings.
|
||||
//
|
||||
// Each quadrangle would be returned as a row in the `points` Mat and each point is a Vecf.
|
||||
// Returns true as long as some QR code was detected even in case where the decoding failed
|
||||
// For usage please see TestQRCodeDetector
|
||||
// For further details, please see:
|
||||
//https://docs.opencv.org/master/de/dc3/classcv_1_1QRCodeDetector.html#a188b63ffa17922b2c65d8a0ab7b70775
|
||||
func (a *QRCodeDetector) DetectAndDecodeMulti(input Mat, decoded *[]string, points *Mat, qrCodes *[]Mat) bool {
|
||||
cDecoded := C.CStrings{}
|
||||
defer C.CStrings_Close(cDecoded)
|
||||
cQrCodes := C.struct_Mats{}
|
||||
defer C.Mats_Close(cQrCodes)
|
||||
success := C.QRCodeDetector_DetectAndDecodeMulti(a.p, input.p, &cDecoded, points.p, &cQrCodes)
|
||||
if !success {
|
||||
return bool(success)
|
||||
}
|
||||
|
||||
tmpCodes := make([]Mat, cQrCodes.length)
|
||||
for i := C.int(0); i < cQrCodes.length; i++ {
|
||||
tmpCodes[i].p = C.Mats_get(cQrCodes, i)
|
||||
}
|
||||
|
||||
for _, qr := range tmpCodes {
|
||||
*qrCodes = append(*qrCodes, qr)
|
||||
}
|
||||
|
||||
for _, s := range toGoStrings(cDecoded) {
|
||||
*decoded = append(*decoded, s)
|
||||
}
|
||||
return bool(success)
|
||||
}
|
||||
|
||||
2
vendor/gocv.io/x/gocv/objdetect.h
generated
vendored
2
vendor/gocv.io/x/gocv/objdetect.h
generated
vendored
@@ -45,6 +45,8 @@ const char* QRCodeDetector_DetectAndDecode(QRCodeDetector qr, Mat input,Mat poin
|
||||
bool QRCodeDetector_Detect(QRCodeDetector qr, Mat input,Mat points);
|
||||
const char* QRCodeDetector_Decode(QRCodeDetector qr, Mat input,Mat inputPoints,Mat straight_qrcode);
|
||||
void QRCodeDetector_Close(QRCodeDetector qr);
|
||||
bool QRCodeDetector_DetectMulti(QRCodeDetector qr, Mat input, Mat points);
|
||||
bool QRCodeDetector_DetectAndDecodeMulti(QRCodeDetector qr, Mat input, CStrings* decoded ,Mat points, struct Mats* mats);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
||||
86
vendor/gocv.io/x/gocv/photo.cpp
generated
vendored
Normal file
86
vendor/gocv.io/x/gocv/photo.cpp
generated
vendored
Normal file
@@ -0,0 +1,86 @@
|
||||
#include "photo.h"
|
||||
|
||||
void ColorChange(Mat src, Mat mask, Mat dst, float red_mul, float green_mul, float blue_mul) {
|
||||
cv::colorChange(*src, *mask, *dst, red_mul, green_mul, blue_mul);
|
||||
}
|
||||
|
||||
void IlluminationChange(Mat src, Mat mask, Mat dst, float alpha, float beta) {
|
||||
cv::illuminationChange(*src, *mask, *dst, alpha, beta);
|
||||
}
|
||||
|
||||
void SeamlessClone(Mat src, Mat dst, Mat mask, Point p, Mat blend, int flags) {
|
||||
cv::Point pt(p.x, p.y);
|
||||
cv::seamlessClone(*src, *dst, *mask, pt, *blend, flags);
|
||||
}
|
||||
|
||||
void TextureFlattening(Mat src, Mat mask, Mat dst, float low_threshold, float high_threshold, int kernel_size) {
|
||||
cv::textureFlattening(*src, *mask, *dst, low_threshold, high_threshold, kernel_size);
|
||||
}
|
||||
|
||||
|
||||
void FastNlMeansDenoisingColoredMulti( struct Mats src, Mat dst, int imgToDenoiseIndex, int temporalWindowSize){
|
||||
std::vector<cv::Mat> images;
|
||||
for (int i = 0; i < src.length; ++i) {
|
||||
images.push_back(*src.mats[i]);
|
||||
}
|
||||
cv::fastNlMeansDenoisingColoredMulti( images, *dst, imgToDenoiseIndex, temporalWindowSize );
|
||||
}
|
||||
|
||||
void FastNlMeansDenoisingColoredMultiWithParams( struct Mats src, Mat dst, int imgToDenoiseIndex, int temporalWindowSize, float h, float hColor, int templateWindowSize, int searchWindowSize ){
|
||||
std::vector<cv::Mat> images;
|
||||
for (int i = 0; i < src.length; ++i) {
|
||||
images.push_back(*src.mats[i]);
|
||||
}
|
||||
cv::fastNlMeansDenoisingColoredMulti( images, *dst, imgToDenoiseIndex, temporalWindowSize, h, hColor, templateWindowSize, searchWindowSize );
|
||||
}
|
||||
|
||||
MergeMertens MergeMertens_Create() {
|
||||
return new cv::Ptr<cv::MergeMertens>(cv::createMergeMertens());
|
||||
}
|
||||
|
||||
MergeMertens MergeMertens_CreateWithParams(float contrast_weight,
|
||||
float saturation_weight,
|
||||
float exposure_weight) {
|
||||
return new cv::Ptr<cv::MergeMertens>(cv::createMergeMertens(
|
||||
contrast_weight, saturation_weight, exposure_weight));
|
||||
}
|
||||
|
||||
void MergeMertens_Close(MergeMertens b) {
|
||||
delete b;
|
||||
}
|
||||
|
||||
void MergeMertens_Process(MergeMertens b, struct Mats src, Mat dst) {
|
||||
std::vector<cv::Mat> images;
|
||||
for (int i = 0; i < src.length; ++i) {
|
||||
images.push_back(*src.mats[i]);
|
||||
}
|
||||
(*b)->process(images, *dst);
|
||||
}
|
||||
|
||||
AlignMTB AlignMTB_Create() {
|
||||
return new cv::Ptr<cv::AlignMTB>(cv::createAlignMTB(6,4,false));
|
||||
}
|
||||
|
||||
AlignMTB AlignMTB_CreateWithParams(int max_bits, int exclude_range, bool cut) {
|
||||
return new cv::Ptr<cv::AlignMTB>(
|
||||
cv::createAlignMTB(max_bits, exclude_range, cut));
|
||||
}
|
||||
|
||||
void AlignMTB_Close(AlignMTB b) { delete b; }
|
||||
|
||||
void AlignMTB_Process(AlignMTB b, struct Mats src, struct Mats *dst) {
|
||||
|
||||
std::vector<cv::Mat> srcMats;
|
||||
for (int i = 0; i < src.length; ++i) {
|
||||
srcMats.push_back(*src.mats[i]);
|
||||
}
|
||||
|
||||
std::vector<cv::Mat> dstMats;
|
||||
(*b)->process(srcMats, dstMats);
|
||||
|
||||
dst->mats = new Mat[dstMats.size()];
|
||||
for (size_t i = 0; i < dstMats.size() ; ++i) {
|
||||
dst->mats[i] = new cv::Mat( dstMats[i] );
|
||||
}
|
||||
dst->length = (int)dstMats.size();
|
||||
}
|
||||
227
vendor/gocv.io/x/gocv/photo.go
generated
vendored
Normal file
227
vendor/gocv.io/x/gocv/photo.go
generated
vendored
Normal file
@@ -0,0 +1,227 @@
|
||||
package gocv
|
||||
|
||||
/*
|
||||
#include <stdlib.h>
|
||||
#include "photo.h"
|
||||
*/
|
||||
import "C"
|
||||
import (
|
||||
"image"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
//SeamlessCloneFlags seamlessClone algorithm flags
|
||||
type SeamlessCloneFlags int
|
||||
|
||||
// MergeMertens is a wrapper around the cv::MergeMertens.
|
||||
type MergeMertens struct {
|
||||
p unsafe.Pointer // This unsafe pointer will in fact be a C.MergeMertens
|
||||
}
|
||||
|
||||
// AlignMTB is a wrapper around the cv::AlignMTB.
|
||||
type AlignMTB struct {
|
||||
p unsafe.Pointer // This unsafe pointer will in fact be a C.AlignMTB
|
||||
}
|
||||
|
||||
const (
|
||||
// NormalClone The power of the method is fully expressed when inserting objects with complex outlines into a new background.
|
||||
NormalClone SeamlessCloneFlags = iota
|
||||
|
||||
// MixedClone The classic method, color-based selection and alpha masking might be time consuming and often leaves an undesirable halo. Seamless cloning, even averaged with the original image, is not effective. Mixed seamless cloning based on a loose selection proves effective.
|
||||
MixedClone
|
||||
|
||||
// MonochromeTransfer Monochrome transfer allows the user to easily replace certain features of one object by alternative features.
|
||||
MonochromeTransfer
|
||||
)
|
||||
|
||||
// ColorChange mix two differently colored versions of an image seamlessly.
|
||||
//
|
||||
// For further details, please see:
|
||||
// https://docs.opencv.org/master/df/da0/group__photo__clone.html#ga6684f35dc669ff6196a7c340dc73b98e
|
||||
//
|
||||
func ColorChange(src, mask Mat, dst *Mat, red_mul, green_mul, blue_mul float32) {
|
||||
C.ColorChange(src.p, mask.p, dst.p, C.float(red_mul), C.float(green_mul), C.float(blue_mul))
|
||||
}
|
||||
|
||||
// SeamlessClone blend two image by Poisson Blending.
|
||||
//
|
||||
// For further details, please see:
|
||||
// https://docs.opencv.org/master/df/da0/group__photo__clone.html#ga2bf426e4c93a6b1f21705513dfeca49d
|
||||
//
|
||||
func SeamlessClone(src, dst, mask Mat, p image.Point, blend *Mat, flags SeamlessCloneFlags) {
|
||||
cp := C.struct_Point{
|
||||
x: C.int(p.X),
|
||||
y: C.int(p.Y),
|
||||
}
|
||||
|
||||
C.SeamlessClone(src.p, dst.p, mask.p, cp, blend.p, C.int(flags))
|
||||
}
|
||||
|
||||
// IlluminationChange modifies locally the apparent illumination of an image.
|
||||
//
|
||||
// For further details, please see:
|
||||
// https://docs.opencv.org/master/df/da0/group__photo__clone.html#gac5025767cf2febd8029d474278e886c7
|
||||
//
|
||||
func IlluminationChange(src, mask Mat, dst *Mat, alpha, beta float32) {
|
||||
C.IlluminationChange(src.p, mask.p, dst.p, C.float(alpha), C.float(beta))
|
||||
}
|
||||
|
||||
// TextureFlattening washes out the texture of the selected region, giving its contents a flat aspect.
|
||||
//
|
||||
// For further details, please see:
|
||||
// https://docs.opencv.org/master/df/da0/group__photo__clone.html#gad55df6aa53797365fa7cc23959a54004
|
||||
//
|
||||
func TextureFlattening(src, mask Mat, dst *Mat, lowThreshold, highThreshold float32, kernelSize int) {
|
||||
C.TextureFlattening(src.p, mask.p, dst.p, C.float(lowThreshold), C.float(highThreshold), C.int(kernelSize))
|
||||
}
|
||||
|
||||
// FastNlMeansDenoisingColoredMulti denoises the selected images.
|
||||
//
|
||||
// For further details, please see:
|
||||
// https://docs.opencv.org/master/d1/d79/group__photo__denoise.html#gaa501e71f52fb2dc17ff8ca5e7d2d3619
|
||||
//
|
||||
func FastNlMeansDenoisingColoredMulti(src []Mat, dst *Mat, imgToDenoiseIndex int, temporalWindowSize int) {
|
||||
cMatArray := make([]C.Mat, len(src))
|
||||
for i, r := range src {
|
||||
cMatArray[i] = (C.Mat)(r.p)
|
||||
}
|
||||
matsVector := C.struct_Mats{
|
||||
mats: (*C.Mat)(&cMatArray[0]),
|
||||
length: C.int(len(src)),
|
||||
}
|
||||
C.FastNlMeansDenoisingColoredMulti(matsVector, dst.p, C.int(imgToDenoiseIndex), C.int(temporalWindowSize))
|
||||
}
|
||||
|
||||
// FastNlMeansDenoisingColoredMulti denoises the selected images.
|
||||
//
|
||||
// For further details, please see:
|
||||
// https://docs.opencv.org/master/d1/d79/group__photo__denoise.html#gaa501e71f52fb2dc17ff8ca5e7d2d3619
|
||||
//
|
||||
func FastNlMeansDenoisingColoredMultiWithParams(src []Mat, dst *Mat, imgToDenoiseIndex int, temporalWindowSize int, h float32, hColor float32, templateWindowSize int, searchWindowSize int) {
|
||||
cMatArray := make([]C.Mat, len(src))
|
||||
for i, r := range src {
|
||||
cMatArray[i] = (C.Mat)(r.p)
|
||||
}
|
||||
matsVector := C.struct_Mats{
|
||||
mats: (*C.Mat)(&cMatArray[0]),
|
||||
length: C.int(len(src)),
|
||||
}
|
||||
C.FastNlMeansDenoisingColoredMultiWithParams(matsVector, dst.p, C.int(imgToDenoiseIndex), C.int(temporalWindowSize), C.float(h), C.float(hColor), C.int(templateWindowSize), C.int(searchWindowSize))
|
||||
}
|
||||
|
||||
// NewMergeMertens returns returns a new MergeMertens white LDR merge algorithm.
|
||||
// of type MergeMertens with default parameters.
|
||||
// MergeMertens algorithm merge the ldr image should result in a HDR image.
|
||||
//
|
||||
// For further details, please see:
|
||||
// https://docs.opencv.org/master/d6/df5/group__photo__hdr.html
|
||||
// https://docs.opencv.org/master/d7/dd6/classcv_1_1MergeMertens.html
|
||||
// https://docs.opencv.org/master/d6/df5/group__photo__hdr.html#ga79d59aa3cb3a7c664e59a4b5acc1ccb6
|
||||
//
|
||||
func NewMergeMertens() MergeMertens {
|
||||
return MergeMertens{p: unsafe.Pointer(C.MergeMertens_Create())}
|
||||
}
|
||||
|
||||
// NewMergeMertensWithParams returns a new MergeMertens white LDR merge algorithm
|
||||
// of type MergeMertens with customized parameters.
|
||||
// MergeMertens algorithm merge the ldr image should result in a HDR image.
|
||||
//
|
||||
// For further details, please see:
|
||||
// https://docs.opencv.org/master/d6/df5/group__photo__hdr.html
|
||||
// https://docs.opencv.org/master/d7/dd6/classcv_1_1MergeMertens.html
|
||||
// https://docs.opencv.org/master/d6/df5/group__photo__hdr.html#ga79d59aa3cb3a7c664e59a4b5acc1ccb6
|
||||
//
|
||||
func NewMergeMertensWithParams(contrast_weight float32, saturation_weight float32, exposure_weight float32) MergeMertens {
|
||||
return MergeMertens{p: unsafe.Pointer(C.MergeMertens_CreateWithParams(C.float(contrast_weight), C.float(saturation_weight), C.float(exposure_weight)))}
|
||||
}
|
||||
|
||||
// Close MergeMertens.
|
||||
func (b *MergeMertens) Close() error {
|
||||
C.MergeMertens_Close((C.MergeMertens)(b.p)) // Here the unsafe pointer is cast into the right type
|
||||
b.p = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// BalanceWhite computes merge LDR images using the current MergeMertens.
|
||||
// Return a image MAT : 8bits 3 channel image ( RGB 8 bits )
|
||||
// For further details, please see:
|
||||
// https://docs.opencv.org/master/d7/dd6/classcv_1_1MergeMertens.html#a2d2254b2aab722c16954de13a663644d
|
||||
//
|
||||
func (b *MergeMertens) Process(src []Mat, dst *Mat) {
|
||||
cMatArray := make([]C.Mat, len(src))
|
||||
for i, r := range src {
|
||||
cMatArray[i] = (C.Mat)(r.p)
|
||||
}
|
||||
// Conversion function from a Golang slice into an array of matrices that are understood by OpenCV
|
||||
matsVector := C.struct_Mats{
|
||||
mats: (*C.Mat)(&cMatArray[0]),
|
||||
length: C.int(len(src)),
|
||||
}
|
||||
C.MergeMertens_Process((C.MergeMertens)(b.p), matsVector, dst.p)
|
||||
// Convert a series of double [0.0,1.0] to [0,255] with Golang
|
||||
dst.ConvertToWithParams(dst, MatTypeCV8UC3, 255.0, 0.0)
|
||||
}
|
||||
|
||||
// NewAlignMTB returns an AlignMTB for converts images to median threshold bitmaps.
|
||||
// of type AlignMTB converts images to median threshold bitmaps (1 for pixels
|
||||
// brighter than median luminance and 0 otherwise) and than aligns the resulting
|
||||
// bitmaps using bit operations.
|
||||
|
||||
// For further details, please see:
|
||||
// https://docs.opencv.org/master/d6/df5/group__photo__hdr.html
|
||||
// https://docs.opencv.org/master/d7/db6/classcv_1_1AlignMTB.html
|
||||
// https://docs.opencv.org/master/d6/df5/group__photo__hdr.html#ga2f1fafc885a5d79dbfb3542e08db0244
|
||||
//
|
||||
func NewAlignMTB() AlignMTB {
|
||||
return AlignMTB{p: unsafe.Pointer(C.AlignMTB_Create())}
|
||||
}
|
||||
|
||||
// NewAlignMTBWithParams returns an AlignMTB for converts images to median threshold bitmaps.
|
||||
// of type AlignMTB converts images to median threshold bitmaps (1 for pixels
|
||||
// brighter than median luminance and 0 otherwise) and than aligns the resulting
|
||||
// bitmaps using bit operations.
|
||||
|
||||
// For further details, please see:
|
||||
// https://docs.opencv.org/master/d6/df5/group__photo__hdr.html
|
||||
// https://docs.opencv.org/master/d7/db6/classcv_1_1AlignMTB.html
|
||||
// https://docs.opencv.org/master/d6/df5/group__photo__hdr.html#ga2f1fafc885a5d79dbfb3542e08db0244
|
||||
//
|
||||
func NewAlignMTBWithParams(max_bits int, exclude_range int, cut bool) AlignMTB {
|
||||
return AlignMTB{p: unsafe.Pointer(C.AlignMTB_CreateWithParams(C.int(max_bits), C.int(exclude_range), C.bool(cut)))}
|
||||
}
|
||||
|
||||
// Close AlignMTB.
|
||||
func (b *AlignMTB) Close() error {
|
||||
C.AlignMTB_Close((C.AlignMTB)(b.p))
|
||||
b.p = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// Process computes an alignment using the current AlignMTB.
|
||||
//
|
||||
// For further details, please see:
|
||||
// https://docs.opencv.org/master/d7/db6/classcv_1_1AlignMTB.html#a37b3417d844f362d781f34155cbcb201
|
||||
//
|
||||
func (b *AlignMTB) Process(src []Mat, dst *[]Mat) {
|
||||
|
||||
cSrcArray := make([]C.Mat, len(src))
|
||||
for i, r := range src {
|
||||
cSrcArray[i] = r.p
|
||||
}
|
||||
cSrcMats := C.struct_Mats{
|
||||
mats: (*C.Mat)(&cSrcArray[0]),
|
||||
length: C.int(len(src)),
|
||||
}
|
||||
|
||||
cDstMats := C.struct_Mats{}
|
||||
|
||||
C.AlignMTB_Process((C.AlignMTB)(b.p), cSrcMats, &cDstMats)
|
||||
|
||||
// Pass the matrices by reference from an OpenCV/C++ to a GoCV::Mat object
|
||||
for i := C.int(0); i < cDstMats.length; i++ {
|
||||
var tempdst Mat
|
||||
tempdst.p = C.Mats_get(cDstMats, i)
|
||||
*dst = append(*dst, tempdst)
|
||||
}
|
||||
return
|
||||
}
|
||||
49
vendor/gocv.io/x/gocv/photo.h
generated
vendored
Normal file
49
vendor/gocv.io/x/gocv/photo.h
generated
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
#ifndef _OPENCV3_PHOTO_H_
|
||||
#define _OPENCV3_PHOTO_H_
|
||||
|
||||
#ifdef __cplusplus
|
||||
#include <opencv2/opencv.hpp>
|
||||
#include <opencv2/photo.hpp>
|
||||
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "core.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
// see : https://docs.opencv.org/3.4/d7/dd6/classcv_1_1MergeMertens.html
|
||||
typedef cv::Ptr<cv::MergeMertens> *MergeMertens;
|
||||
// see : https://docs.opencv.org/master/d7/db6/classcv_1_1AlignMTB.html
|
||||
typedef cv::Ptr<cv::AlignMTB> *AlignMTB;
|
||||
#else
|
||||
typedef void *MergeMertens;
|
||||
typedef void *AlignMTB;
|
||||
#endif
|
||||
|
||||
void ColorChange(Mat src, Mat mask, Mat dst, float red_mul, float green_mul, float blue_mul);
|
||||
|
||||
void SeamlessClone(Mat src, Mat dst, Mat mask, Point p, Mat blend, int flags);
|
||||
|
||||
void IlluminationChange(Mat src, Mat mask, Mat dst, float alpha, float beta);
|
||||
|
||||
void TextureFlattening(Mat src, Mat mask, Mat dst, float low_threshold, float high_threshold, int kernel_size);
|
||||
|
||||
void FastNlMeansDenoisingColoredMulti(struct Mats src, Mat dst, int imgToDenoiseIndex, int temporalWindowSize);
|
||||
|
||||
void FastNlMeansDenoisingColoredMultiWithParams(struct Mats src, Mat dst, int imgToDenoiseIndex, int temporalWindowSize, float h, float hColor, int templateWindowSize, int searchWindowSize );
|
||||
|
||||
MergeMertens MergeMertens_Create();
|
||||
MergeMertens MergeMertens_CreateWithParams(float contrast_weight, float saturation_weight, float exposure_weight);
|
||||
void MergeMertens_Process(MergeMertens b, struct Mats src, Mat dst);
|
||||
void MergeMertens_Close(MergeMertens b);
|
||||
|
||||
AlignMTB AlignMTB_Create();
|
||||
AlignMTB AlignMTB_CreateWithParams(int max_bits, int exclude_range, bool cut);
|
||||
void AlignMTB_Process(AlignMTB b, struct Mats src, struct Mats *dst);
|
||||
void AlignMTB_Close(AlignMTB b);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif //_OPENCV3_PHOTO_H
|
||||
13
vendor/gocv.io/x/gocv/photo_string.go
generated
vendored
Normal file
13
vendor/gocv.io/x/gocv/photo_string.go
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
package gocv
|
||||
|
||||
func (c SeamlessCloneFlags) String() string {
|
||||
switch c {
|
||||
case NormalClone:
|
||||
return "normal-clone"
|
||||
case MixedClone:
|
||||
return "mixed-clone"
|
||||
case MonochromeTransfer:
|
||||
return "monochrome-transfer"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
79
vendor/gocv.io/x/gocv/travis_build_opencv.sh
generated
vendored
79
vendor/gocv.io/x/gocv/travis_build_opencv.sh
generated
vendored
@@ -1,79 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -eux -o pipefail
|
||||
|
||||
OPENCV_VERSION=${OPENCV_VERSION:-4.2.0}
|
||||
|
||||
#GRAPHICAL=ON
|
||||
GRAPHICAL=${GRAPHICAL:-OFF}
|
||||
|
||||
# OpenCV looks for libjpeg in /usr/lib/libjpeg.so, for some reason. However,
|
||||
# it does not seem to be there in 14.04. Create a link
|
||||
|
||||
mkdir -p $HOME/usr/lib
|
||||
|
||||
if [[ ! -f "$HOME/usr/lib/libjpeg.so" ]]; then
|
||||
ln -s /usr/lib/x86_64-linux-gnu/libjpeg.so $HOME/usr/lib/libjpeg.so
|
||||
fi
|
||||
|
||||
# Same for libpng.so
|
||||
|
||||
if [[ ! -f "$HOME/usr/lib/libpng.so" ]]; then
|
||||
ln -s /usr/lib/x86_64-linux-gnu/libpng.so $HOME/usr/lib/libpng.so
|
||||
fi
|
||||
|
||||
# Build OpenCV
|
||||
if [[ ! -e "$HOME/usr/installed-${OPENCV_VERSION}" ]]; then
|
||||
TMP=$(mktemp -d)
|
||||
if [[ ! -d "opencv-${OPENCV_VERSION}/build" ]]; then
|
||||
curl -sL https://github.com/opencv/opencv/archive/${OPENCV_VERSION}.zip > ${TMP}/opencv.zip
|
||||
unzip -q ${TMP}/opencv.zip
|
||||
mkdir opencv-${OPENCV_VERSION}/build
|
||||
rm ${TMP}/opencv.zip
|
||||
fi
|
||||
|
||||
if [[ ! -d "opencv_contrib-${OPENCV_VERSION}/modules" ]]; then
|
||||
curl -sL https://github.com/opencv/opencv_contrib/archive/${OPENCV_VERSION}.zip > ${TMP}/opencv-contrib.zip
|
||||
unzip -q ${TMP}/opencv-contrib.zip
|
||||
rm ${TMP}/opencv-contrib.zip
|
||||
fi
|
||||
rmdir ${TMP}
|
||||
|
||||
cd opencv-${OPENCV_VERSION}/build
|
||||
cmake -D WITH_IPP=${GRAPHICAL} \
|
||||
-D WITH_OPENGL=${GRAPHICAL} \
|
||||
-D WITH_QT=${GRAPHICAL} \
|
||||
-D BUILD_EXAMPLES=OFF \
|
||||
-D BUILD_TESTS=OFF \
|
||||
-D BUILD_PERF_TESTS=OFF \
|
||||
-D BUILD_opencv_java=OFF \
|
||||
-D BUILD_opencv_python=OFF \
|
||||
-D BUILD_opencv_python2=OFF \
|
||||
-D BUILD_opencv_python3=OFF \
|
||||
-D OPENCV_GENERATE_PKGCONFIG=ON \
|
||||
-D CMAKE_INSTALL_PREFIX=$HOME/usr \
|
||||
-D OPENCV_ENABLE_NONFREE=ON \
|
||||
-D OPENCV_EXTRA_MODULES_PATH=../../opencv_contrib-${OPENCV_VERSION}/modules ..
|
||||
make -j8
|
||||
make install && touch $HOME/usr/installed-${OPENCV_VERSION}
|
||||
|
||||
# caffe test data
|
||||
if [[ ! -d "${HOME}/testdata" ]]; then
|
||||
mkdir ${HOME}/testdata
|
||||
fi
|
||||
|
||||
#if [[ ! -f "${HOME}/testdata/bvlc_googlenet.prototxt" ]]; then
|
||||
curl -sL https://raw.githubusercontent.com/opencv/opencv_extra/master/testdata/dnn/bvlc_googlenet.prototxt > ${HOME}/testdata/bvlc_googlenet.prototxt
|
||||
#fi
|
||||
|
||||
#if [[ ! -f "${HOME}/testdata/bvlc_googlenet.caffemodel" ]]; then
|
||||
curl -sL http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel > ${HOME}/testdata/bvlc_googlenet.caffemodel
|
||||
#fi
|
||||
|
||||
#if [[ ! -f "${HOME}/testdata/tensorflow_inception_graph.pb" ]]; then
|
||||
curl -sL https://storage.googleapis.com/download.tensorflow.org/models/inception5h.zip > ${HOME}/testdata/inception5h.zip
|
||||
unzip -o ${HOME}/testdata/inception5h.zip tensorflow_inception_graph.pb -d ${HOME}/testdata
|
||||
#fi
|
||||
|
||||
cd ../..
|
||||
touch $HOME/fresh-cache
|
||||
fi
|
||||
2
vendor/gocv.io/x/gocv/version.go
generated
vendored
2
vendor/gocv.io/x/gocv/version.go
generated
vendored
@@ -7,7 +7,7 @@ package gocv
|
||||
import "C"
|
||||
|
||||
// GoCVVersion of this package, for display purposes.
|
||||
const GoCVVersion = "0.22.0"
|
||||
const GoCVVersion = "0.28.0"
|
||||
|
||||
// Version returns the current golang package version
|
||||
func Version() string {
|
||||
|
||||
24
vendor/gocv.io/x/gocv/video.cpp
generated
vendored
24
vendor/gocv.io/x/gocv/video.cpp
generated
vendored
@@ -47,3 +47,27 @@ void CalcOpticalFlowPyrLKWithParams(Mat prevImg, Mat nextImg, Mat prevPts, Mat n
|
||||
cv::calcOpticalFlowPyrLK(*prevImg, *nextImg, *prevPts, *nextPts, *status, *err, sz, maxLevel, *criteria, flags, minEigThreshold);
|
||||
}
|
||||
|
||||
bool Tracker_Init(Tracker self, Mat image, Rect boundingBox) {
|
||||
cv::Rect bb(boundingBox.x, boundingBox.y, boundingBox.width, boundingBox.height);
|
||||
|
||||
(*self)->init(*image, bb);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool Tracker_Update(Tracker self, Mat image, Rect* boundingBox) {
|
||||
cv::Rect bb;
|
||||
bool ret = (*self)->update(*image, bb);
|
||||
boundingBox->x = int(bb.x);
|
||||
boundingBox->y = int(bb.y);
|
||||
boundingBox->width = int(bb.width);
|
||||
boundingBox->height = int(bb.height);
|
||||
return ret;
|
||||
}
|
||||
|
||||
TrackerMIL TrackerMIL_Create() {
|
||||
return new cv::Ptr<cv::TrackerMIL>(cv::TrackerMIL::create());
|
||||
}
|
||||
|
||||
void TrackerMIL_Close(TrackerMIL self) {
|
||||
delete self;
|
||||
}
|
||||
|
||||
78
vendor/gocv.io/x/gocv/video.go
generated
vendored
78
vendor/gocv.io/x/gocv/video.go
generated
vendored
@@ -155,3 +155,81 @@ func CalcOpticalFlowPyrLKWithParams(prevImg Mat, nextImg Mat, prevPts Mat, nextP
|
||||
C.CalcOpticalFlowPyrLKWithParams(prevImg.p, nextImg.p, prevPts.p, nextPts.p, status.p, err.p, winSz, C.int(maxLevel), criteria.p, C.int(flags), C.double(minEigThreshold))
|
||||
return
|
||||
}
|
||||
|
||||
// Tracker is the base interface for object tracking.
|
||||
//
|
||||
// see: https://docs.opencv.org/master/d0/d0a/classcv_1_1Tracker.html
|
||||
//
|
||||
type Tracker interface {
|
||||
// Close closes, as Trackers need to be Closed manually.
|
||||
//
|
||||
Close() error
|
||||
|
||||
// Init initializes the tracker with a known bounding box that surrounded the target.
|
||||
// Note: this can only be called once. If you lose the object, you have to Close() the instance,
|
||||
// create a new one, and call Init() on it again.
|
||||
//
|
||||
// see: https://docs.opencv.org/master/d0/d0a/classcv_1_1Tracker.html#a4d285747589b1bdd16d2e4f00c3255dc
|
||||
//
|
||||
Init(image Mat, boundingBox image.Rectangle) bool
|
||||
|
||||
// Update updates the tracker, returns a new bounding box and a boolean determining whether the tracker lost the target.
|
||||
//
|
||||
// see: https://docs.opencv.org/master/d0/d0a/classcv_1_1Tracker.html#a549159bd0553e6a8de356f3866df1f18
|
||||
//
|
||||
Update(image Mat) (image.Rectangle, bool)
|
||||
}
|
||||
|
||||
func trackerInit(trk C.Tracker, img Mat, boundingBox image.Rectangle) bool {
|
||||
cBox := C.struct_Rect{
|
||||
x: C.int(boundingBox.Min.X),
|
||||
y: C.int(boundingBox.Min.Y),
|
||||
width: C.int(boundingBox.Size().X),
|
||||
height: C.int(boundingBox.Size().Y),
|
||||
}
|
||||
|
||||
ret := C.Tracker_Init(trk, C.Mat(img.Ptr()), cBox)
|
||||
return bool(ret)
|
||||
}
|
||||
|
||||
func trackerUpdate(trk C.Tracker, img Mat) (image.Rectangle, bool) {
|
||||
cBox := C.struct_Rect{}
|
||||
|
||||
ret := C.Tracker_Update(trk, C.Mat(img.Ptr()), &cBox)
|
||||
|
||||
rect := image.Rect(int(cBox.x), int(cBox.y), int(cBox.x+cBox.width), int(cBox.y+cBox.height))
|
||||
return rect, bool(ret)
|
||||
}
|
||||
|
||||
// TrackerMIL is a Tracker that uses the MIL algorithm. MIL trains a classifier in an online manner
|
||||
// to separate the object from the background.
|
||||
// Multiple Instance Learning avoids the drift problem for a robust tracking.
|
||||
//
|
||||
// For further details, please see:
|
||||
// https://docs.opencv.org/master/d0/d26/classcv_1_1TrackerMIL.html
|
||||
//
|
||||
type TrackerMIL struct {
|
||||
p C.TrackerMIL
|
||||
}
|
||||
|
||||
// NewTrackerMIL returns a new TrackerMIL.
|
||||
func NewTrackerMIL() Tracker {
|
||||
return TrackerMIL{p: C.TrackerMIL_Create()}
|
||||
}
|
||||
|
||||
// Close closes the TrackerMIL.
|
||||
func (trk TrackerMIL) Close() error {
|
||||
C.TrackerMIL_Close(trk.p)
|
||||
trk.p = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// Init initializes the TrackerMIL.
|
||||
func (trk TrackerMIL) Init(img Mat, boundingBox image.Rectangle) bool {
|
||||
return trackerInit(C.Tracker(trk.p), img, boundingBox)
|
||||
}
|
||||
|
||||
// Update updates the TrackerMIL.
|
||||
func (trk TrackerMIL) Update(img Mat) (image.Rectangle, bool) {
|
||||
return trackerUpdate(C.Tracker(trk.p), img)
|
||||
}
|
||||
|
||||
14
vendor/gocv.io/x/gocv/video.h
generated
vendored
14
vendor/gocv.io/x/gocv/video.h
generated
vendored
@@ -3,6 +3,7 @@
|
||||
|
||||
#ifdef __cplusplus
|
||||
#include <opencv2/opencv.hpp>
|
||||
#include <opencv2/video.hpp>
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
@@ -11,9 +12,15 @@ extern "C" {
|
||||
#ifdef __cplusplus
|
||||
typedef cv::Ptr<cv::BackgroundSubtractorMOG2>* BackgroundSubtractorMOG2;
|
||||
typedef cv::Ptr<cv::BackgroundSubtractorKNN>* BackgroundSubtractorKNN;
|
||||
typedef cv::Ptr<cv::Tracker>* Tracker;
|
||||
typedef cv::Ptr<cv::TrackerMIL>* TrackerMIL;
|
||||
typedef cv::Ptr<cv::TrackerGOTURN>* TrackerGOTURN;
|
||||
#else
|
||||
typedef void* BackgroundSubtractorMOG2;
|
||||
typedef void* BackgroundSubtractorKNN;
|
||||
typedef void* Tracker;
|
||||
typedef void* TrackerMIL;
|
||||
typedef void* TrackerGOTURN;
|
||||
#endif
|
||||
|
||||
BackgroundSubtractorMOG2 BackgroundSubtractorMOG2_Create();
|
||||
@@ -31,6 +38,13 @@ void CalcOpticalFlowPyrLK(Mat prevImg, Mat nextImg, Mat prevPts, Mat nextPts, Ma
|
||||
void CalcOpticalFlowPyrLKWithParams(Mat prevImg, Mat nextImg, Mat prevPts, Mat nextPts, Mat status, Mat err, Size winSize, int maxLevel, TermCriteria criteria, int flags, double minEigThreshold);
|
||||
void CalcOpticalFlowFarneback(Mat prevImg, Mat nextImg, Mat flow, double pyrScale, int levels,
|
||||
int winsize, int iterations, int polyN, double polySigma, int flags);
|
||||
|
||||
bool Tracker_Init(Tracker self, Mat image, Rect boundingBox);
|
||||
bool Tracker_Update(Tracker self, Mat image, Rect* boundingBox);
|
||||
|
||||
TrackerMIL TrackerMIL_Create();
|
||||
void TrackerMIL_Close(TrackerMIL self);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
8
vendor/gocv.io/x/gocv/videoio.cpp
generated
vendored
8
vendor/gocv.io/x/gocv/videoio.cpp
generated
vendored
@@ -13,10 +13,18 @@ bool VideoCapture_Open(VideoCapture v, const char* uri) {
|
||||
return v->open(uri);
|
||||
}
|
||||
|
||||
bool VideoCapture_OpenWithAPI(VideoCapture v, const char* uri, int apiPreference) {
|
||||
return v->open(uri, apiPreference);
|
||||
}
|
||||
|
||||
bool VideoCapture_OpenDevice(VideoCapture v, int device) {
|
||||
return v->open(device);
|
||||
}
|
||||
|
||||
bool VideoCapture_OpenDeviceWithAPI(VideoCapture v, int device, int apiPreference) {
|
||||
return v->open(device, apiPreference);
|
||||
}
|
||||
|
||||
void VideoCapture_Set(VideoCapture v, int prop, double param) {
|
||||
v->set(prop, param);
|
||||
}
|
||||
|
||||
250
vendor/gocv.io/x/gocv/videoio.go
generated
vendored
250
vendor/gocv.io/x/gocv/videoio.go
generated
vendored
@@ -13,6 +13,111 @@ import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Select preferred API for a capture object.
|
||||
// Note: Backends are available only if they have been built with your OpenCV binaries
|
||||
type VideoCaptureAPI int
|
||||
|
||||
const (
|
||||
// Auto detect == 0
|
||||
VideoCaptureAny VideoCaptureAPI = 0
|
||||
|
||||
// Video For Windows (obsolete, removed)
|
||||
VideoCaptureVFW VideoCaptureAPI = 200
|
||||
|
||||
// V4L/V4L2 capturing support
|
||||
VideoCaptureV4L VideoCaptureAPI = 200
|
||||
|
||||
// Same as VideoCaptureV4L
|
||||
VideoCaptureV4L2 VideoCaptureAPI = 200
|
||||
|
||||
// IEEE 1394 drivers
|
||||
VideoCaptureFirewire VideoCaptureAPI = 300
|
||||
|
||||
// Same value as VideoCaptureFirewire
|
||||
VideoCaptureFireware VideoCaptureAPI = 300
|
||||
|
||||
// Same value as VideoCaptureFirewire
|
||||
VideoCaptureIEEE1394 VideoCaptureAPI = 300
|
||||
|
||||
// Same value as VideoCaptureFirewire
|
||||
VideoCaptureDC1394 VideoCaptureAPI = 300
|
||||
|
||||
// Same value as VideoCaptureFirewire
|
||||
VideoCaptureCMU1394 VideoCaptureAPI = 300
|
||||
|
||||
// QuickTime (obsolete, removed)
|
||||
VideoCaptureQT VideoCaptureAPI = 500
|
||||
|
||||
// Unicap drivers (obsolete, removed)
|
||||
VideoCaptureUnicap VideoCaptureAPI = 600
|
||||
|
||||
// DirectShow (via videoInput)
|
||||
VideoCaptureDshow VideoCaptureAPI = 700
|
||||
|
||||
// PvAPI, Prosilica GigE SDK
|
||||
VideoCapturePvAPI VideoCaptureAPI = 800
|
||||
|
||||
// OpenNI (for Kinect)
|
||||
VideoCaptureOpenNI VideoCaptureAPI = 900
|
||||
|
||||
// OpenNI (for Asus Xtion)
|
||||
VideoCaptureOpenNIAsus VideoCaptureAPI = 910
|
||||
|
||||
// Android - not used
|
||||
VideoCaptureAndroid VideoCaptureAPI = 1000
|
||||
|
||||
// XIMEA Camera API
|
||||
VideoCaptureXiAPI VideoCaptureAPI = 1100
|
||||
|
||||
// AVFoundation framework for iOS (OS X Lion will have the same API)
|
||||
VideoCaptureAVFoundation VideoCaptureAPI = 1200
|
||||
|
||||
// Smartek Giganetix GigEVisionSDK
|
||||
VideoCaptureGiganetix VideoCaptureAPI = 1300
|
||||
|
||||
// Microsoft Media Foundation (via videoInput)
|
||||
VideoCaptureMSMF VideoCaptureAPI = 1400
|
||||
|
||||
// Microsoft Windows Runtime using Media Foundation
|
||||
VideoCaptureWinRT VideoCaptureAPI = 1410
|
||||
|
||||
// RealSense (former Intel Perceptual Computing SDK)
|
||||
VideoCaptureIntelPerc VideoCaptureAPI = 1500
|
||||
|
||||
// Synonym for VideoCaptureIntelPerc
|
||||
VideoCaptureRealsense VideoCaptureAPI = 1500
|
||||
|
||||
// OpenNI2 (for Kinect)
|
||||
VideoCaptureOpenNI2 VideoCaptureAPI = 1600
|
||||
|
||||
// OpenNI2 (for Asus Xtion and Occipital Structure sensors)
|
||||
VideoCaptureOpenNI2Asus VideoCaptureAPI = 1610
|
||||
|
||||
// gPhoto2 connection
|
||||
VideoCaptureGPhoto2 VideoCaptureAPI = 1700
|
||||
|
||||
// GStreamer
|
||||
VideoCaptureGstreamer VideoCaptureAPI = 1800
|
||||
|
||||
// Open and record video file or stream using the FFMPEG library
|
||||
VideoCaptureFFmpeg VideoCaptureAPI = 1900
|
||||
|
||||
// OpenCV Image Sequence (e.g. img_%02d.jpg)
|
||||
VideoCaptureImages VideoCaptureAPI = 2000
|
||||
|
||||
// Aravis SDK
|
||||
VideoCaptureAravis VideoCaptureAPI = 2100
|
||||
|
||||
// Built-in OpenCV MotionJPEG codec
|
||||
VideoCaptureOpencvMjpeg VideoCaptureAPI = 2200
|
||||
|
||||
// Intel MediaSDK
|
||||
VideoCaptureIntelMFX VideoCaptureAPI = 2300
|
||||
|
||||
// XINE engine (Linux)
|
||||
VideoCaptureXINE VideoCaptureAPI = 2400
|
||||
)
|
||||
|
||||
// VideoCaptureProperties are the properties used for VideoCapture operations.
|
||||
type VideoCaptureProperties int
|
||||
|
||||
@@ -23,133 +128,158 @@ const (
|
||||
|
||||
// VideoCapturePosFrames 0-based index of the frame to be
|
||||
// decoded/captured next.
|
||||
VideoCapturePosFrames = 1
|
||||
VideoCapturePosFrames VideoCaptureProperties = 1
|
||||
|
||||
// VideoCapturePosAVIRatio relative position of the video file:
|
||||
// 0=start of the film, 1=end of the film.
|
||||
VideoCapturePosAVIRatio = 2
|
||||
VideoCapturePosAVIRatio VideoCaptureProperties = 2
|
||||
|
||||
// VideoCaptureFrameWidth is width of the frames in the video stream.
|
||||
VideoCaptureFrameWidth = 3
|
||||
VideoCaptureFrameWidth VideoCaptureProperties = 3
|
||||
|
||||
// VideoCaptureFrameHeight controls height of frames in the video stream.
|
||||
VideoCaptureFrameHeight = 4
|
||||
VideoCaptureFrameHeight VideoCaptureProperties = 4
|
||||
|
||||
// VideoCaptureFPS controls capture frame rate.
|
||||
VideoCaptureFPS = 5
|
||||
VideoCaptureFPS VideoCaptureProperties = 5
|
||||
|
||||
// VideoCaptureFOURCC contains the 4-character code of codec.
|
||||
// see VideoWriter::fourcc for details.
|
||||
VideoCaptureFOURCC = 6
|
||||
VideoCaptureFOURCC VideoCaptureProperties = 6
|
||||
|
||||
// VideoCaptureFrameCount contains number of frames in the video file.
|
||||
VideoCaptureFrameCount = 7
|
||||
VideoCaptureFrameCount VideoCaptureProperties = 7
|
||||
|
||||
// VideoCaptureFormat format of the Mat objects returned by
|
||||
// VideoCapture::retrieve().
|
||||
VideoCaptureFormat = 8
|
||||
VideoCaptureFormat VideoCaptureProperties = 8
|
||||
|
||||
// VideoCaptureMode contains backend-specific value indicating
|
||||
// the current capture mode.
|
||||
VideoCaptureMode = 9
|
||||
VideoCaptureMode VideoCaptureProperties = 9
|
||||
|
||||
// VideoCaptureBrightness is brightness of the image
|
||||
// (only for those cameras that support).
|
||||
VideoCaptureBrightness = 10
|
||||
VideoCaptureBrightness VideoCaptureProperties = 10
|
||||
|
||||
// VideoCaptureContrast is contrast of the image
|
||||
// (only for cameras that support it).
|
||||
VideoCaptureContrast = 11
|
||||
VideoCaptureContrast VideoCaptureProperties = 11
|
||||
|
||||
// VideoCaptureSaturation saturation of the image
|
||||
// (only for cameras that support).
|
||||
VideoCaptureSaturation = 12
|
||||
VideoCaptureSaturation VideoCaptureProperties = 12
|
||||
|
||||
// VideoCaptureHue hue of the image (only for cameras that support).
|
||||
VideoCaptureHue = 13
|
||||
VideoCaptureHue VideoCaptureProperties = 13
|
||||
|
||||
// VideoCaptureGain is the gain of the capture image.
|
||||
// (only for those cameras that support).
|
||||
VideoCaptureGain = 14
|
||||
VideoCaptureGain VideoCaptureProperties = 14
|
||||
|
||||
// VideoCaptureExposure is the exposure of the capture image.
|
||||
// (only for those cameras that support).
|
||||
VideoCaptureExposure = 15
|
||||
VideoCaptureExposure VideoCaptureProperties = 15
|
||||
|
||||
// VideoCaptureConvertRGB is a boolean flags indicating whether
|
||||
// images should be converted to RGB.
|
||||
VideoCaptureConvertRGB = 16
|
||||
VideoCaptureConvertRGB VideoCaptureProperties = 16
|
||||
|
||||
// VideoCaptureWhiteBalanceBlueU is currently unsupported.
|
||||
VideoCaptureWhiteBalanceBlueU = 17
|
||||
VideoCaptureWhiteBalanceBlueU VideoCaptureProperties = 17
|
||||
|
||||
// VideoCaptureRectification is the rectification flag for stereo cameras.
|
||||
// Note: only supported by DC1394 v 2.x backend currently.
|
||||
VideoCaptureRectification = 18
|
||||
VideoCaptureRectification VideoCaptureProperties = 18
|
||||
|
||||
// VideoCaptureMonochrome indicates whether images should be
|
||||
// converted to monochrome.
|
||||
VideoCaptureMonochrome = 19
|
||||
VideoCaptureMonochrome VideoCaptureProperties = 19
|
||||
|
||||
// VideoCaptureSharpness controls image capture sharpness.
|
||||
VideoCaptureSharpness = 20
|
||||
VideoCaptureSharpness VideoCaptureProperties = 20
|
||||
|
||||
// VideoCaptureAutoExposure controls the DC1394 exposure control
|
||||
// done by camera, user can adjust reference level using this feature.
|
||||
VideoCaptureAutoExposure = 21
|
||||
VideoCaptureAutoExposure VideoCaptureProperties = 21
|
||||
|
||||
// VideoCaptureGamma controls video capture gamma.
|
||||
VideoCaptureGamma = 22
|
||||
VideoCaptureGamma VideoCaptureProperties = 22
|
||||
|
||||
// VideoCaptureTemperature controls video capture temperature.
|
||||
VideoCaptureTemperature = 23
|
||||
VideoCaptureTemperature VideoCaptureProperties = 23
|
||||
|
||||
// VideoCaptureTrigger controls video capture trigger.
|
||||
VideoCaptureTrigger = 24
|
||||
VideoCaptureTrigger VideoCaptureProperties = 24
|
||||
|
||||
// VideoCaptureTriggerDelay controls video capture trigger delay.
|
||||
VideoCaptureTriggerDelay = 25
|
||||
VideoCaptureTriggerDelay VideoCaptureProperties = 25
|
||||
|
||||
// VideoCaptureWhiteBalanceRedV controls video capture setting for
|
||||
// white balance.
|
||||
VideoCaptureWhiteBalanceRedV = 26
|
||||
VideoCaptureWhiteBalanceRedV VideoCaptureProperties = 26
|
||||
|
||||
// VideoCaptureZoom controls video capture zoom.
|
||||
VideoCaptureZoom = 27
|
||||
VideoCaptureZoom VideoCaptureProperties = 27
|
||||
|
||||
// VideoCaptureFocus controls video capture focus.
|
||||
VideoCaptureFocus = 28
|
||||
VideoCaptureFocus VideoCaptureProperties = 28
|
||||
|
||||
// VideoCaptureGUID controls video capture GUID.
|
||||
VideoCaptureGUID = 29
|
||||
VideoCaptureGUID VideoCaptureProperties = 29
|
||||
|
||||
// VideoCaptureISOSpeed controls video capture ISO speed.
|
||||
VideoCaptureISOSpeed = 30
|
||||
VideoCaptureISOSpeed VideoCaptureProperties = 30
|
||||
|
||||
// VideoCaptureBacklight controls video capture backlight.
|
||||
VideoCaptureBacklight = 32
|
||||
VideoCaptureBacklight VideoCaptureProperties = 32
|
||||
|
||||
// VideoCapturePan controls video capture pan.
|
||||
VideoCapturePan = 33
|
||||
VideoCapturePan VideoCaptureProperties = 33
|
||||
|
||||
// VideoCaptureTilt controls video capture tilt.
|
||||
VideoCaptureTilt = 34
|
||||
VideoCaptureTilt VideoCaptureProperties = 34
|
||||
|
||||
// VideoCaptureRoll controls video capture roll.
|
||||
VideoCaptureRoll = 35
|
||||
VideoCaptureRoll VideoCaptureProperties = 35
|
||||
|
||||
// VideoCaptureIris controls video capture iris.
|
||||
VideoCaptureIris = 36
|
||||
VideoCaptureIris VideoCaptureProperties = 36
|
||||
|
||||
// VideoCaptureSettings is the pop up video/camera filter dialog. Note:
|
||||
// only supported by DSHOW backend currently. The property value is ignored.
|
||||
VideoCaptureSettings = 37
|
||||
VideoCaptureSettings VideoCaptureProperties = 37
|
||||
|
||||
// VideoCaptureBufferSize controls video capture buffer size.
|
||||
VideoCaptureBufferSize = 38
|
||||
VideoCaptureBufferSize VideoCaptureProperties = 38
|
||||
|
||||
// VideoCaptureAutoFocus controls video capture auto focus..
|
||||
VideoCaptureAutoFocus = 39
|
||||
VideoCaptureAutoFocus VideoCaptureProperties = 39
|
||||
|
||||
// VideoCaptureSarNumerator controls the sample aspect ratio: num/den (num)
|
||||
VideoCaptureSarNumerator VideoCaptureProperties = 40
|
||||
|
||||
// VideoCaptureSarDenominator controls the sample aspect ratio: num/den (den)
|
||||
VideoCaptureSarDenominator VideoCaptureProperties = 41
|
||||
|
||||
// VideoCaptureBackend is the current api backend (VideoCaptureAPI). Read-only property.
|
||||
VideoCaptureBackend VideoCaptureProperties = 42
|
||||
|
||||
// VideoCaptureChannel controls the video input or channel number (only for those cameras that support).
|
||||
VideoCaptureChannel VideoCaptureProperties = 43
|
||||
|
||||
// VideoCaptureAutoWB controls the auto white-balance.
|
||||
VideoCaptureAutoWB VideoCaptureProperties = 44
|
||||
|
||||
// VideoCaptureWBTemperature controls the white-balance color temperature
|
||||
VideoCaptureWBTemperature VideoCaptureProperties = 45
|
||||
|
||||
// VideoCaptureCodecPixelFormat shows the the codec's pixel format (4-character code). Read-only property.
|
||||
// Subset of AV_PIX_FMT_* or -1 if unknown.
|
||||
VideoCaptureCodecPixelFormat VideoCaptureProperties = 46
|
||||
|
||||
// VideoCaptureBitrate displays the video bitrate in kbits/s. Read-only property.
|
||||
VideoCaptureBitrate VideoCaptureProperties = 47
|
||||
)
|
||||
|
||||
// VideoCapture is a wrapper around the OpenCV VideoCapture class.
|
||||
@@ -176,6 +306,21 @@ func VideoCaptureFile(uri string) (vc *VideoCapture, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// VideoCaptureFile opens a VideoCapture from a file and prepares
|
||||
// to start capturing. It returns error if it fails to open the file stored in uri path.
|
||||
func VideoCaptureFileWithAPI(uri string, apiPreference VideoCaptureAPI) (vc *VideoCapture, err error) {
|
||||
vc = &VideoCapture{p: C.VideoCapture_New()}
|
||||
|
||||
cURI := C.CString(uri)
|
||||
defer C.free(unsafe.Pointer(cURI))
|
||||
|
||||
if !C.VideoCapture_OpenWithAPI(vc.p, cURI, C.int(apiPreference)) {
|
||||
err = fmt.Errorf("Error opening file: %s with api backend: %d", uri, apiPreference)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// VideoCaptureDevice opens a VideoCapture from a device and prepares
|
||||
// to start capturing. It returns error if it fails to open the video device.
|
||||
func VideoCaptureDevice(device int) (vc *VideoCapture, err error) {
|
||||
@@ -188,6 +333,18 @@ func VideoCaptureDevice(device int) (vc *VideoCapture, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// VideoCaptureDevice opens a VideoCapture from a device with the api preference.
|
||||
// It returns error if it fails to open the video device.
|
||||
func VideoCaptureDeviceWithAPI(device int, apiPreference VideoCaptureAPI) (vc *VideoCapture, err error) {
|
||||
vc = &VideoCapture{p: C.VideoCapture_New()}
|
||||
|
||||
if !C.VideoCapture_OpenDeviceWithAPI(vc.p, C.int(device), C.int(apiPreference)) {
|
||||
err = fmt.Errorf("Error opening device: %d with api backend: %d", device, apiPreference)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Close VideoCapture object.
|
||||
func (v *VideoCapture) Close() error {
|
||||
C.VideoCapture_Close(v.p)
|
||||
@@ -228,7 +385,7 @@ func (v *VideoCapture) CodecString() string {
|
||||
res := ""
|
||||
hexes := []int64{0xff, 0xff00, 0xff0000, 0xff000000}
|
||||
for i, h := range hexes {
|
||||
res += string(int64(v.Get(VideoCaptureFOURCC)) & h >> (uint(i * 8)))
|
||||
res += string(rune(int64(v.Get(VideoCaptureFOURCC)) & h >> (uint(i * 8))))
|
||||
}
|
||||
return res
|
||||
}
|
||||
@@ -330,3 +487,18 @@ func OpenVideoCapture(v interface{}) (*VideoCapture, error) {
|
||||
return nil, errors.New("argument must be int or string")
|
||||
}
|
||||
}
|
||||
|
||||
func OpenVideoCaptureWithAPI(v interface{}, apiPreference VideoCaptureAPI) (*VideoCapture, error) {
|
||||
switch vv := v.(type) {
|
||||
case int:
|
||||
return VideoCaptureDeviceWithAPI(vv, apiPreference)
|
||||
case string:
|
||||
id, err := strconv.Atoi(vv)
|
||||
if err == nil {
|
||||
return VideoCaptureDeviceWithAPI(id, apiPreference)
|
||||
}
|
||||
return VideoCaptureFileWithAPI(vv, apiPreference)
|
||||
default:
|
||||
return nil, errors.New("argument must be int or string")
|
||||
}
|
||||
}
|
||||
|
||||
2
vendor/gocv.io/x/gocv/videoio.h
generated
vendored
2
vendor/gocv.io/x/gocv/videoio.h
generated
vendored
@@ -20,7 +20,9 @@ typedef void* VideoWriter;
|
||||
VideoCapture VideoCapture_New();
|
||||
void VideoCapture_Close(VideoCapture v);
|
||||
bool VideoCapture_Open(VideoCapture v, const char* uri);
|
||||
bool VideoCapture_OpenWithAPI(VideoCapture v, const char* uri, int apiPreference);
|
||||
bool VideoCapture_OpenDevice(VideoCapture v, int device);
|
||||
bool VideoCapture_OpenDeviceWithAPI(VideoCapture v, int device, int apiPreference);
|
||||
void VideoCapture_Set(VideoCapture v, int prop, double param);
|
||||
double VideoCapture_Get(VideoCapture v, int prop);
|
||||
int VideoCapture_IsOpened(VideoCapture v);
|
||||
|
||||
74
vendor/gocv.io/x/gocv/videoio_string.go
generated
vendored
74
vendor/gocv.io/x/gocv/videoio_string.go
generated
vendored
@@ -1,5 +1,63 @@
|
||||
package gocv
|
||||
|
||||
func (c VideoCaptureAPI) String() string {
|
||||
switch c {
|
||||
case VideoCaptureAny:
|
||||
return "video-capture-any"
|
||||
case VideoCaptureV4L2:
|
||||
return "video-capture-v4l2"
|
||||
case VideoCaptureFirewire:
|
||||
return "video-capture-firewire"
|
||||
case VideoCaptureQT:
|
||||
return "video-capture-qt"
|
||||
case VideoCaptureUnicap:
|
||||
return "video-capture-unicap"
|
||||
case VideoCaptureDshow:
|
||||
return "video-capture-dshow"
|
||||
case VideoCapturePvAPI:
|
||||
return "video-capture-pvapi"
|
||||
case VideoCaptureOpenNI:
|
||||
return "video-capture-openni"
|
||||
case VideoCaptureOpenNIAsus:
|
||||
return "video-capture-openni-asus"
|
||||
case VideoCaptureAndroid:
|
||||
return "video-capture-android"
|
||||
case VideoCaptureXiAPI:
|
||||
return "video-capture-xiapi"
|
||||
case VideoCaptureAVFoundation:
|
||||
return "video-capture-av-foundation"
|
||||
case VideoCaptureGiganetix:
|
||||
return "video-capture-giganetix"
|
||||
case VideoCaptureMSMF:
|
||||
return "video-capture-msmf"
|
||||
case VideoCaptureWinRT:
|
||||
return "video-capture-winrt"
|
||||
case VideoCaptureIntelPerc:
|
||||
return "video-capture-intel-perc"
|
||||
case VideoCaptureOpenNI2:
|
||||
return "video-capture-openni2"
|
||||
case VideoCaptureOpenNI2Asus:
|
||||
return "video-capture-openni2-asus"
|
||||
case VideoCaptureGPhoto2:
|
||||
return "video-capture-gphoto2"
|
||||
case VideoCaptureGstreamer:
|
||||
return "video-capture-gstreamer"
|
||||
case VideoCaptureFFmpeg:
|
||||
return "video-capture-ffmpeg"
|
||||
case VideoCaptureImages:
|
||||
return "video-capture-images"
|
||||
case VideoCaptureAravis:
|
||||
return "video-capture-aravis"
|
||||
case VideoCaptureOpencvMjpeg:
|
||||
return "video-capture-opencv-mjpeg"
|
||||
case VideoCaptureIntelMFX:
|
||||
return "video-capture-intel-mfx"
|
||||
case VideoCaptureXINE:
|
||||
return "video-capture-xine"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (c VideoCaptureProperties) String() string {
|
||||
switch c {
|
||||
case VideoCapturePosMsec:
|
||||
@@ -80,6 +138,22 @@ func (c VideoCaptureProperties) String() string {
|
||||
return "video-capture-buffer-size"
|
||||
case VideoCaptureAutoFocus:
|
||||
return "video-capture-auto-focus"
|
||||
case VideoCaptureSarNumerator:
|
||||
return "video-capture-sar-numerator"
|
||||
case VideoCaptureSarDenominator:
|
||||
return "video-capture-sar-denominator"
|
||||
case VideoCaptureBackend:
|
||||
return "video-capture-backend"
|
||||
case VideoCaptureChannel:
|
||||
return "video-capture-channel"
|
||||
case VideoCaptureAutoWB:
|
||||
return "video-capture-auto-wb"
|
||||
case VideoCaptureWBTemperature:
|
||||
return "video-capture-wb-temperature"
|
||||
case VideoCaptureCodecPixelFormat:
|
||||
return "video-capture-pixel-format"
|
||||
case VideoCaptureBitrate:
|
||||
return "video-capture-bitrate"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
22
vendor/gocv.io/x/gocv/win_build_opencv.cmd
generated
vendored
22
vendor/gocv.io/x/gocv/win_build_opencv.cmd
generated
vendored
@@ -11,18 +11,18 @@ echo.
|
||||
REM This is why there is no progress bar:
|
||||
REM https://github.com/PowerShell/PowerShell/issues/2138
|
||||
|
||||
echo Downloading: opencv-4.2.0.zip [91MB]
|
||||
powershell -command "[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; $ProgressPreference = 'SilentlyContinue'; Invoke-WebRequest -Uri https://github.com/opencv/opencv/archive/4.2.0.zip -OutFile c:\opencv\opencv-4.2.0.zip"
|
||||
echo Downloading: opencv-4.5.3.zip [91MB]
|
||||
powershell -command "[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; $ProgressPreference = 'SilentlyContinue'; Invoke-WebRequest -Uri https://github.com/opencv/opencv/archive/4.5.3.zip -OutFile c:\opencv\opencv-4.5.3.zip"
|
||||
echo Extracting...
|
||||
powershell -command "$ProgressPreference = 'SilentlyContinue'; Expand-Archive -Path c:\opencv\opencv-4.2.0.zip -DestinationPath c:\opencv"
|
||||
del c:\opencv\opencv-4.2.0.zip /q
|
||||
powershell -command "$ProgressPreference = 'SilentlyContinue'; Expand-Archive -Path c:\opencv\opencv-4.5.3.zip -DestinationPath c:\opencv"
|
||||
del c:\opencv\opencv-4.5.3.zip /q
|
||||
echo.
|
||||
|
||||
echo Downloading: opencv_contrib-4.2.0.zip [58MB]
|
||||
powershell -command "[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; $ProgressPreference = 'SilentlyContinue'; Invoke-WebRequest -Uri https://github.com/opencv/opencv_contrib/archive/4.2.0.zip -OutFile c:\opencv\opencv_contrib-4.2.0.zip"
|
||||
echo Downloading: opencv_contrib-4.5.3.zip [58MB]
|
||||
powershell -command "[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; $ProgressPreference = 'SilentlyContinue'; Invoke-WebRequest -Uri https://github.com/opencv/opencv_contrib/archive/4.5.3.zip -OutFile c:\opencv\opencv_contrib-4.5.3.zip"
|
||||
echo Extracting...
|
||||
powershell -command "$ProgressPreference = 'SilentlyContinue'; Expand-Archive -Path c:\opencv\opencv_contrib-4.2.0.zip -DestinationPath c:\opencv"
|
||||
del c:\opencv\opencv_contrib-4.2.0.zip /q
|
||||
powershell -command "$ProgressPreference = 'SilentlyContinue'; Expand-Archive -Path c:\opencv\opencv_contrib-4.5.3.zip -DestinationPath c:\opencv"
|
||||
del c:\opencv\opencv_contrib-4.5.3.zip /q
|
||||
echo.
|
||||
|
||||
echo Done with downloading and extracting sources.
|
||||
@@ -32,9 +32,9 @@ echo on
|
||||
|
||||
cd /D C:\opencv\build
|
||||
set PATH=%PATH%;C:\Program Files (x86)\CMake\bin;C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin
|
||||
cmake C:\opencv\opencv-4.2.0 -G "MinGW Makefiles" -BC:\opencv\build -DENABLE_CXX11=ON -DOPENCV_EXTRA_MODULES_PATH=C:\opencv\opencv_contrib-4.2.0\modules -DBUILD_SHARED_LIBS=ON -DWITH_IPP=OFF -DWITH_MSMF=OFF -DBUILD_EXAMPLES=OFF -DBUILD_TESTS=OFF -DBUILD_PERF_TESTS=OFF -DBUILD_opencv_java=OFF -DBUILD_opencv_python=OFF -DBUILD_opencv_python2=OFF -DBUILD_opencv_python3=OFF -DBUILD_DOCS=OFF -DENABLE_PRECOMPILED_HEADERS=OFF -DBUILD_opencv_saliency=OFF -DCPU_DISPATCH= -DOPENCV_GENERATE_PKGCONFIG=ON -DWITH_OPENCL_D3D11_NV=OFF -Wno-dev
|
||||
cmake C:\opencv\opencv-4.5.3 -G "MinGW Makefiles" -BC:\opencv\build -DENABLE_CXX11=ON -DOPENCV_EXTRA_MODULES_PATH=C:\opencv\opencv_contrib-4.5.3\modules -DBUILD_SHARED_LIBS=ON -DWITH_IPP=OFF -DWITH_MSMF=OFF -DBUILD_EXAMPLES=OFF -DBUILD_TESTS=OFF -DBUILD_PERF_TESTS=OFF -DBUILD_opencv_java=OFF -DBUILD_opencv_python=OFF -DBUILD_opencv_python2=OFF -DBUILD_opencv_python3=OFF -DBUILD_DOCS=OFF -DENABLE_PRECOMPILED_HEADERS=OFF -DBUILD_opencv_saliency=OFF -DBUILD_opencv_wechat_qrcode=OFF -DCPU_DISPATCH= -DOPENCV_GENERATE_PKGCONFIG=ON -DWITH_OPENCL_D3D11_NV=OFF -DOPENCV_ALLOCATOR_STATS_COUNTER_TYPE=int64_t -Wno-dev
|
||||
mingw32-make -j%NUMBER_OF_PROCESSORS%
|
||||
mingw32-make install
|
||||
rmdir c:\opencv\opencv-4.2.0 /s /q
|
||||
rmdir c:\opencv\opencv_contrib-4.2.0 /s /q
|
||||
rmdir c:\opencv\opencv-4.5.3 /s /q
|
||||
rmdir c:\opencv\opencv_contrib-4.5.3 /s /q
|
||||
chdir /D %GOPATH%\src\gocv.io\x\gocv
|
||||
|
||||
Reference in New Issue
Block a user