Upgrade dependencies

This commit is contained in:
2021-01-17 19:07:37 +01:00
parent 7f955dd0ad
commit 44faf5fa5d
156 changed files with 24494 additions and 2850 deletions

View File

@ -1 +0,0 @@
**

60
vendor/gocv.io/x/gocv/.travis.yml generated vendored
View File

@ -1,60 +0,0 @@
# Use new container infrastructure to enable caching
sudo: required
dist: trusty
# language is go
language: go
go:
- "1.14"
go_import_path: gocv.io/x/gocv
addons:
apt:
packages:
- libgmp-dev
- build-essential
- cmake
- git
- libgtk2.0-dev
- pkg-config
- libavcodec-dev
- libavformat-dev
- libswscale-dev
- libtbb2
- libtbb-dev
- libjpeg-dev
- libpng-dev
- libtiff-dev
- libjasper-dev
- libdc1394-22-dev
- xvfb
before_install:
- ./travis_build_opencv.sh
- export PKG_CONFIG_PATH=$(pkg-config --variable pc_path pkg-config):$HOME/usr/lib/pkgconfig
- export INCLUDE_PATH=$HOME/usr/include:${INCLUDE_PATH}
- export LD_LIBRARY_PATH=$HOME/usr/lib:${LD_LIBRARY_PATH}
- sudo ln /dev/null /dev/raw1394
- export DISPLAY=:99.0
- sh -e /etc/init.d/xvfb start
before_cache:
- rm -f $HOME/fresh-cache
script:
- export GOCV_CAFFE_TEST_FILES="${HOME}/testdata"
- export GOCV_TENSORFLOW_TEST_FILES="${HOME}/testdata"
- export OPENCV_ENABLE_NONFREE=ON
- echo "Ensuring code is well formatted"; ! gofmt -s -d . | read
- go test -v -coverprofile=coverage.txt -covermode=atomic -tags matprofile .
- go test -tags matprofile ./contrib -coverprofile=contrib.txt -covermode=atomic; cat contrib.txt >> coverage.txt; rm contrib.txt;
after_success:
- bash <(curl -s https://codecov.io/bash)
# Caching so the next build will be fast as possible.
cache:
timeout: 1000
directories:
- $HOME/usr
- $HOME/testdata

82
vendor/gocv.io/x/gocv/CHANGELOG.md generated vendored
View File

@ -1,3 +1,85 @@
0.26.0
---
* **all**
* update to OpenCV 4.5.1
* **core**
* add Matrix initializers: eye, ones, zeros (#758)
* add multidimensional mat creation
* add ndim mat constructor
* added accumulators
* added norm call with two mats (#600)
* keep a reference to a []byte that backs a Mat. (#755)
* remove guard for DataPtrUint8 since any Mat can be treated an Uint8
* add Mat IsContinuous() function, and ensure that any Mat data pointers used to create Go slices only apply to continuous Mats
* fix buffer size for Go strings for 32-bit operating systems
* **build**
* bring back codecov.io
* **calib3d**
* correctly close mat after test
* **dnn**
* add ReadNetFromONNX and ReadNetFromONNXBytes (#760)
* increase test coverage
* **docker**
* dockerfiles for opencv gpu builds
* **docs**
* corrected links to CUDA and OpenVINO
* list all unimplemented functions in photo module
* replace GoDocs with pkg docs
* update ROADMAP from recent contributions
* **imgproc**
* add test coverage for GetTextSizeWithBaseline()
* close all Mats even those based on memory slices
* close Mat to avoid memory leak in ToImage()
* refactoring of ToImage and ImageToMatXX functions
* **openvino**
* fix dldt repo in makefile for openvino
* **os**
* adding gcc-c++ package to rpm deps
* **photo**
* add SeamlessClone function
* **profile**
* add created mats in Split and ForwardLayers to profile (#780)
0.25.0
---
* **all**
* update to opencv release 4.5.0
* **build**
* add file dependencies needed for DNN tests
* add verbose output for tests on CircleCI
* also run unit tests on non-free algorithms. YMMV.
* fix build with cuda
* remove Travis and switch to CircleCI using Docker based builds
* update CI builds to Go 1.15
* **core**
* add mixChannels() method to Mat (#746)
* Add toGoStrings helper
* support ConvertToWithParams method
* **dnn**
* Add NMSBoxes function (#736)
* Added ability to load Torch file. Tested features for extracting 128d vectors
* fix using wrong type for unconnectedlayertype
* use default ddepth for conversions to blob from image as recommended by @berak
* **docker**
* use separate dockerfile for opencv to avoid massive rebuild
* **docs**
* add recent contributions to ROADMAP and also add cuda functions still in need of implementation
* display CircleCI badge in README
* minor improvements to CUDA docs in READMEs
* **features2d**
* add FlannBasedMatcher
* add drawmatches (#720)
* fix memory leak in SIFT
* **highgui**
* refactored ROI methods
* **imgproc**
* Add option to return baseline with GetTextSizeWithBaseline
* **objdetect**
* Add QRCode DetectAndDecodeMulti
* **videoio**
* Add video capture properties and set preferred api backend (#739)
* fix needed as discussed in golang/go issue #32479
0.24.0
---
* **all**

68
vendor/gocv.io/x/gocv/Dockerfile generated vendored
View File

@ -1,66 +1,12 @@
FROM ubuntu:16.04 AS opencv
LABEL maintainer="hybridgroup"
RUN apt-get update && apt-get install -y --no-install-recommends \
git build-essential cmake pkg-config unzip libgtk2.0-dev \
curl ca-certificates libcurl4-openssl-dev libssl-dev \
libavcodec-dev libavformat-dev libswscale-dev libtbb2 libtbb-dev \
libjpeg-dev libpng-dev libtiff-dev libdc1394-22-dev && \
rm -rf /var/lib/apt/lists/*
ARG OPENCV_VERSION="4.4.0"
ENV OPENCV_VERSION $OPENCV_VERSION
RUN curl -Lo opencv.zip https://github.com/opencv/opencv/archive/${OPENCV_VERSION}.zip && \
unzip -q opencv.zip && \
curl -Lo opencv_contrib.zip https://github.com/opencv/opencv_contrib/archive/${OPENCV_VERSION}.zip && \
unzip -q opencv_contrib.zip && \
rm opencv.zip opencv_contrib.zip && \
cd opencv-${OPENCV_VERSION} && \
mkdir build && cd build && \
cmake -D CMAKE_BUILD_TYPE=RELEASE \
-D CMAKE_INSTALL_PREFIX=/usr/local \
-D OPENCV_EXTRA_MODULES_PATH=../../opencv_contrib-${OPENCV_VERSION}/modules \
-D WITH_JASPER=OFF \
-D BUILD_DOCS=OFF \
-D BUILD_EXAMPLES=OFF \
-D BUILD_TESTS=OFF \
-D BUILD_PERF_TESTS=OFF \
-D BUILD_opencv_java=NO \
-D BUILD_opencv_python=NO \
-D BUILD_opencv_python2=NO \
-D BUILD_opencv_python3=NO \
-D OPENCV_GENERATE_PKGCONFIG=ON .. && \
make -j $(nproc --all) && \
make preinstall && make install && ldconfig && \
cd / && rm -rf opencv*
#################
# Go + OpenCV #
#################
FROM opencv AS gocv
LABEL maintainer="hybridgroup"
ARG GOVERSION="1.14.1"
ENV GOVERSION $GOVERSION
RUN apt-get update && apt-get install -y --no-install-recommends \
git software-properties-common && \
curl -Lo go${GOVERSION}.linux-amd64.tar.gz https://dl.google.com/go/go${GOVERSION}.linux-amd64.tar.gz && \
tar -C /usr/local -xzf go${GOVERSION}.linux-amd64.tar.gz && \
rm go${GOVERSION}.linux-amd64.tar.gz && \
rm -rf /var/lib/apt/lists/*
# to build this docker image:
# docker build .
FROM gocv/opencv:4.5.1
ENV GOPATH /go
ENV PATH $GOPATH/bin:/usr/local/go/bin:$PATH
RUN mkdir -p "$GOPATH/src" "$GOPATH/bin" && chmod -R 777 "$GOPATH"
WORKDIR $GOPATH
COPY . /go/src/gocv.io/x/gocv/
RUN go get -u -d gocv.io/x/gocv
WORKDIR /go/src/gocv.io/x/gocv
RUN go build -tags example -o /build/gocv_version -i ./cmd/version/
WORKDIR ${GOPATH}/src/gocv.io/x/gocv/cmd/version/
RUN go build -o gocv_version -i main.go
CMD ["./gocv_version"]
CMD ["/build/gocv_version"]

12
vendor/gocv.io/x/gocv/Dockerfile.gpu generated vendored Normal file
View File

@ -0,0 +1,12 @@
# to build this docker image:
# docker build -f Dockerfile.gpu .
FROM gocv/opencv:4.5.1-gpu AS gocv-gpu-test
ENV GOPATH /go
COPY . /go/src/gocv.io/x/gocv/
WORKDIR /go/src/gocv.io/x/gocv
RUN go build -tags example -o /build/gocv_cuda_version ./cmd/cuda/
CMD ["/build/gocv_cuda_version"]

44
vendor/gocv.io/x/gocv/Dockerfile.opencv generated vendored Normal file
View File

@ -0,0 +1,44 @@
# to build this docker image:
# docker build -f Dockerfile.opencv -t gocv/opencv:4.5.1 .
FROM golang:1.15-buster AS opencv
LABEL maintainer="hybridgroup"
RUN apt-get update && apt-get install -y --no-install-recommends \
git build-essential cmake pkg-config unzip libgtk2.0-dev \
curl ca-certificates libcurl4-openssl-dev libssl-dev \
libavcodec-dev libavformat-dev libswscale-dev libtbb2 libtbb-dev \
libjpeg-dev libpng-dev libtiff-dev libdc1394-22-dev && \
rm -rf /var/lib/apt/lists/*
ARG OPENCV_VERSION="4.5.1"
ENV OPENCV_VERSION $OPENCV_VERSION
RUN curl -Lo opencv.zip https://github.com/opencv/opencv/archive/${OPENCV_VERSION}.zip && \
unzip -q opencv.zip && \
curl -Lo opencv_contrib.zip https://github.com/opencv/opencv_contrib/archive/${OPENCV_VERSION}.zip && \
unzip -q opencv_contrib.zip && \
rm opencv.zip opencv_contrib.zip && \
cd opencv-${OPENCV_VERSION} && \
mkdir build && cd build && \
cmake -D CMAKE_BUILD_TYPE=RELEASE \
-D WITH_IPP=OFF \
-D WITH_OPENGL=OFF \
-D WITH_QT=OFF \
-D CMAKE_INSTALL_PREFIX=/usr/local \
-D OPENCV_EXTRA_MODULES_PATH=../../opencv_contrib-${OPENCV_VERSION}/modules \
-D OPENCV_ENABLE_NONFREE=ON \
-D WITH_JASPER=OFF \
-D BUILD_DOCS=OFF \
-D BUILD_EXAMPLES=OFF \
-D BUILD_TESTS=OFF \
-D BUILD_PERF_TESTS=OFF \
-D BUILD_opencv_java=NO \
-D BUILD_opencv_python=NO \
-D BUILD_opencv_python2=NO \
-D BUILD_opencv_python3=NO \
-D OPENCV_GENERATE_PKGCONFIG=ON .. && \
make -j $(nproc --all) && \
make preinstall && make install && ldconfig && \
cd / && rm -rf opencv*
CMD ["go version"]

62
vendor/gocv.io/x/gocv/Dockerfile.opencv-gpu generated vendored Normal file
View File

@ -0,0 +1,62 @@
# to build this docker image:
# docker build -f Dockerfile.opencv-gpu -t gocv/opencv:4.5.1-gpu .
FROM nvidia/cuda:10.2-cudnn7-devel AS opencv-gpu-base
LABEL maintainer="hybridgroup"
RUN apt-get update && apt-get install -y --no-install-recommends \
git build-essential cmake pkg-config unzip libgtk2.0-dev \
wget curl ca-certificates libcurl4-openssl-dev libssl-dev \
libavcodec-dev libavformat-dev libswscale-dev libtbb2 libtbb-dev \
libjpeg-dev libpng-dev libtiff-dev libdc1394-22-dev && \
rm -rf /var/lib/apt/lists/*
ARG OPENCV_VERSION="4.5.1"
ENV OPENCV_VERSION $OPENCV_VERSION
RUN curl -Lo opencv.zip https://github.com/opencv/opencv/archive/${OPENCV_VERSION}.zip && \
unzip -q opencv.zip && \
curl -Lo opencv_contrib.zip https://github.com/opencv/opencv_contrib/archive/${OPENCV_VERSION}.zip && \
unzip -q opencv_contrib.zip && \
rm opencv.zip opencv_contrib.zip && \
cd opencv-${OPENCV_VERSION} && \
mkdir build && cd build && \
cmake -D CMAKE_BUILD_TYPE=RELEASE \
-D WITH_IPP=OFF \
-D WITH_OPENGL=OFF \
-D WITH_QT=OFF \
-D CMAKE_INSTALL_PREFIX=/usr/local \
-D OPENCV_EXTRA_MODULES_PATH=../../opencv_contrib-${OPENCV_VERSION}/modules \
-D OPENCV_ENABLE_NONFREE=ON \
-D WITH_JASPER=OFF \
-D BUILD_DOCS=OFF \
-D BUILD_EXAMPLES=OFF \
-D BUILD_TESTS=OFF \
-D BUILD_PERF_TESTS=OFF \
-D BUILD_opencv_java=NO \
-D BUILD_opencv_python=NO \
-D BUILD_opencv_python2=NO \
-D BUILD_opencv_python3=NO \
-D WITH_CUDA=ON \
-D ENABLE_FAST_MATH=1 \
-D CUDA_FAST_MATH=1 \
-D WITH_CUBLAS=1 \
-D CUDA_TOOLKIT_ROOT_DIR=/usr/local/cuda/ \
-D BUILD_opencv_cudacodec=OFF \
-D WITH_CUDNN=ON \
-D OPENCV_DNN_CUDA=ON \
-D CUDA_GENERATION=Auto \
-D OPENCV_GENERATE_PKGCONFIG=ON .. && \
make -j $(nproc --all) && \
make preinstall && make install && ldconfig && \
cd / && rm -rf opencv*
# install golang here
FROM opencv-gpu-base AS opencv-gpu-golang
ENV GO_RELEASE=1.15.5
RUN wget https://dl.google.com/go/go${GO_RELEASE}.linux-amd64.tar.gz && \
tar xfv go${GO_RELEASE}.linux-amd64.tar.gz -C /usr/local && \
rm go${GO_RELEASE}.linux-amd64.tar.gz
ENV PATH="${PATH}:/usr/local/go/bin"
CMD ["go version"]

31
vendor/gocv.io/x/gocv/Makefile generated vendored
View File

@ -2,10 +2,10 @@
.PHONY: test deps download build clean astyle cmds docker
# OpenCV version to use.
OPENCV_VERSION?=4.4.0
OPENCV_VERSION?=4.5.1
# Go version to use when building Docker image
GOVERSION?=1.14.4
GOVERSION?=1.15.3
# Temporary directory to put files into.
TMP_DIR?=/tmp/
@ -14,7 +14,7 @@ TMP_DIR?=/tmp/
BUILD_SHARED_LIBS?=ON
# Package list for each well-known Linux distribution
RPMS=cmake curl wget git gtk2-devel libpng-devel libjpeg-devel libtiff-devel tbb tbb-devel libdc1394-devel unzip
RPMS=cmake curl wget git gtk2-devel libpng-devel libjpeg-devel libtiff-devel tbb tbb-devel libdc1394-devel unzip gcc-c++
DEBS=unzip wget build-essential cmake curl git libgtk2.0-dev pkg-config libavcodec-dev libavformat-dev libswscale-dev libtbb2 libtbb-dev libjpeg-dev libpng-dev libtiff-dev libdc1394-22-dev
explain:
@ -60,14 +60,15 @@ download:
rm opencv.zip opencv_contrib.zip
cd -
# Download dldt source tarballs.
download_dldt:
# Download openvino source tarballs.
download_openvino:
sudo rm -rf /usr/local/dldt/
sudo git clone https://github.com/opencv/dldt -b 2019 /usr/local/dldt/
sudo rm -rf /usr/local/openvino/
sudo git clone https://github.com/openvinotoolkit/openvino -b 2019_R3.1 /usr/local/openvino/
# Build dldt.
build_dldt:
cd /usr/local/dldt/inference-engine
# Build openvino.
build_openvino_package:
cd /usr/local/openvino/inference-engine
sudo git submodule init
sudo git submodule update --recursive
sudo ./install_dependencies.sh
@ -184,10 +185,10 @@ install_raspi_zero: deps download build_raspi_zero sudo_install clean verify
install_cuda: deps download sudo_pre_install_clean build_cuda sudo_install clean verify verify_cuda
# Do everything with openvino.
install_openvino: deps download download_dldt sudo_pre_install_clean build_dldt sudo_install_dldt build_openvino sudo_install clean verify_openvino
install_openvino: deps download download_openvino sudo_pre_install_clean build_openvino_package sudo_install_openvino build_openvino sudo_install clean verify_openvino
# Do everything with openvino and cuda.
install_all: deps download download_dldt sudo_pre_install_clean build_dldt sudo_install_dldt build_all sudo_install clean verify_openvino verify_cuda
install_all: deps download download_openvino sudo_pre_install_clean build_openvino_package sudo_install_openvino build_all sudo_install clean verify_openvino verify_cuda
# Install system wide.
sudo_install:
@ -197,8 +198,8 @@ sudo_install:
cd -
# Install system wide.
sudo_install_dldt:
cd /usr/local/dldt/inference-engine/build
sudo_install_openvino:
cd /usr/local/openvino/inference-engine/build
sudo $(MAKE) install
sudo ldconfig
cd -
@ -219,7 +220,7 @@ verify_openvino:
# This assumes env.sh was already sourced.
# pvt is not tested here since it requires additional depenedences.
test:
go test . ./contrib
go test -tags matprofile . ./contrib
docker:
docker build --build-arg OPENCV_VERSION=$(OPENCV_VERSION) --build-arg GOVERSION=$(GOVERSION) .
@ -227,7 +228,7 @@ docker:
astyle:
astyle --project=.astylerc --recursive *.cpp,*.h
CMDS=basic-drawing caffe-classifier captest capwindow counter faceblur facedetect find-circles hand-gestures img-similarity mjpeg-streamer motion-detect pose saveimage savevideo showimage ssd-facedetect tf-classifier tracking version
CMDS=basic-drawing caffe-classifier captest capwindow counter faceblur facedetect find-circles hand-gestures hello-sift img-similarity mjpeg-streamer motion-detect pose saveimage savevideo showimage ssd-facedetect tf-classifier tracking version
cmds:
for cmd in $(CMDS) ; do \
go build -o build/$$cmd cmd/$$cmd/main.go ;

70
vendor/gocv.io/x/gocv/README.md generated vendored
View File

@ -2,8 +2,8 @@
[![GoCV](https://raw.githubusercontent.com/hybridgroup/gocv/release/images/gocvlogo.jpg)](http://gocv.io/)
[![GoDoc](https://godoc.org/gocv.io/x/gocv?status.svg)](https://godoc.org/github.com/hybridgroup/gocv)
[![Travis Build Status](https://travis-ci.org/hybridgroup/gocv.svg?branch=dev)](https://travis-ci.org/hybridgroup/gocv)
[![Go Reference](https://pkg.go.dev/badge/gocv.io/x/gocv.svg)](https://pkg.go.dev/gocv.io/x/gocv)
[![CircleCI Build status](https://circleci.com/gh/hybridgroup/gocv/tree/dev.svg?style=svg)](https://circleci.com/gh/hybridgroup/gocv/tree/dev)
[![AppVeyor Build status](https://ci.appveyor.com/api/projects/status/9asd5foet54ru69q/branch/dev?svg=true)](https://ci.appveyor.com/project/deadprogram/gocv/branch/dev)
[![codecov](https://codecov.io/gh/hybridgroup/gocv/branch/dev/graph/badge.svg)](https://codecov.io/gh/hybridgroup/gocv)
[![Go Report Card](https://goreportcard.com/badge/github.com/hybridgroup/gocv)](https://goreportcard.com/report/github.com/hybridgroup/gocv)
@ -11,7 +11,9 @@
The GoCV package provides Go language bindings for the [OpenCV 4](http://opencv.org/) computer vision library.
The GoCV package supports the latest releases of Go and OpenCV (v4.4.0) on Linux, macOS, and Windows. We intend to make the Go language a "first-class" client compatible with the latest developments in the OpenCV ecosystem.
The GoCV package supports the latest releases of Go and OpenCV (v4.5.1) on Linux, macOS, and Windows. We intend to make the Go language a "first-class" client compatible with the latest developments in the OpenCV ecosystem.
GoCV supports [CUDA](https://en.wikipedia.org/wiki/CUDA) for hardware acceleration using Nvidia GPUs. Check out the [CUDA README](./cuda/README.md) for more info on how to use GoCV with OpenCV/CUDA.
GoCV also supports [Intel OpenVINO](https://software.intel.com/en-us/openvino-toolkit). Check out the [OpenVINO README](./openvino/README.md) for more info on how to use GoCV with the Intel OpenVINO toolkit.
@ -127,17 +129,17 @@ To install GoCV, run the following command:
go get -u -d gocv.io/x/gocv
```
To run code that uses the GoCV package, you must also install OpenCV 4.4.0 on your system. Here are instructions for Ubuntu, Raspian, macOS, and Windows.
To run code that uses the GoCV package, you must also install OpenCV 4.5.1 on your system. Here are instructions for Ubuntu, Raspian, macOS, and Windows.
## Ubuntu/Linux
### Installation
You can use `make` to install OpenCV 4.4.0 with the handy `Makefile` included with this repo. If you already have installed OpenCV, you do not need to do so again. The installation performed by the `Makefile` is minimal, so it may remove OpenCV options such as Python or Java wrappers if you have already installed OpenCV some other way.
You can use `make` to install OpenCV 4.5.1 with the handy `Makefile` included with this repo. If you already have installed OpenCV, you do not need to do so again. The installation performed by the `Makefile` is minimal, so it may remove OpenCV options such as Python or Java wrappers if you have already installed OpenCV some other way.
#### Quick Install
The following commands should do everything to download and install OpenCV 4.4.0 on Linux:
The following commands should do everything to download and install OpenCV 4.5.1 on Linux:
cd $GOPATH/src/gocv.io/x/gocv
make install
@ -148,22 +150,22 @@ If you need static opencv libraries
If it works correctly, at the end of the entire process, the following message should be displayed:
gocv version: 0.22.0
opencv lib version: 4.4.0
gocv version: 0.26.0
opencv lib version: 4.5.1
That's it, now you are ready to use GoCV.
#### Install Cuda
#### Using CUDA with GoCV
[cuda directory](./cuda)
See the [cuda directory](./cuda) for information.
#### Install OpenVINO
#### Using OpenVINO with GoCV
[openvino directory](./openvino)
See the [openvino directory](./openvino) for information.
#### Install OpenVINO and Cuda
#### Make Install for OpenVINO and Cuda
The following commands should do everything to download and install OpenCV 4.4.0 with Cuda and OpenVINO on Linux:
The following commands should do everything to download and install OpenCV 4.5.1 with CUDA and OpenVINO on Linux:
cd $GOPATH/src/gocv.io/x/gocv
make install_all
@ -174,8 +176,8 @@ If you need static opencv libraries
If it works correctly, at the end of the entire process, the following message should be displayed:
gocv version: 0.22.0
opencv lib version: 4.4.0-openvino
gocv version: 0.26.0
opencv lib version: 4.5.1-openvino
cuda information:
Device 0: "GeForce MX150" 2003Mb, sm_61, Driver/Runtime ver.10.0/10.0
@ -195,7 +197,7 @@ Next, you need to update the system, and install any required packages:
#### Download source
Now, download the OpenCV 4.4.0 and OpenCV Contrib source code:
Now, download the OpenCV 4.5.1 and OpenCV Contrib source code:
make download
@ -229,8 +231,8 @@ Now you should be able to build or run any of the examples:
The version program should output the following:
gocv version: 0.22.0
opencv lib version: 4.4.0
gocv version: 0.26.0
opencv lib version: 4.5.1
#### Cleanup extra files
@ -315,19 +317,19 @@ There is a Docker image with Alpine 3.7 that has been created by project contrib
### Installation
We have a special installation for the Raspberry Pi that includes some hardware optimizations. You use `make` to install OpenCV 4.4.0 with the handy `Makefile` included with this repo. If you already have installed OpenCV, you do not need to do so again. The installation performed by the `Makefile` is minimal, so it may remove OpenCV options such as Python or Java wrappers if you have already installed OpenCV some other way.
We have a special installation for the Raspberry Pi that includes some hardware optimizations. You use `make` to install OpenCV 4.5.1 with the handy `Makefile` included with this repo. If you already have installed OpenCV, you do not need to do so again. The installation performed by the `Makefile` is minimal, so it may remove OpenCV options such as Python or Java wrappers if you have already installed OpenCV some other way.
#### Quick Install
The following commands should do everything to download and install OpenCV 4.4.0 on Raspbian:
The following commands should do everything to download and install OpenCV 4.5.1 on Raspbian:
cd $GOPATH/src/gocv.io/x/gocv
make install_raspi
If it works correctly, at the end of the entire process, the following message should be displayed:
gocv version: 0.22.0
opencv lib version: 4.4.0
gocv version: 0.26.0
opencv lib version: 4.5.1
That's it, now you are ready to use GoCV.
@ -335,13 +337,13 @@ That's it, now you are ready to use GoCV.
### Installation
You can install OpenCV 4.4.0 using Homebrew.
You can install OpenCV 4.5.1 using Homebrew.
If you already have an earlier version of OpenCV (3.4.x) installed, you should probably remove it before installing the new version:
brew uninstall opencv
You can then install OpenCV 4.4.0:
You can then install OpenCV 4.5.1:
brew install opencv
@ -365,8 +367,8 @@ Now you should be able to build or run any of the examples:
The version program should output the following:
gocv version: 0.22.0
opencv lib version: 4.4.0
gocv version: 0.26.0
opencv lib version: 4.5.1
### Cache builds
@ -381,8 +383,8 @@ By default, pkg-config is used to determine the correct flags for compiling and
For example:
export CGO_CXXFLAGS="--std=c++11"
export CGO_CPPFLAGS="-I/usr/local/Cellar/opencv/4.4.0/include"
export CGO_LDFLAGS="-L/usr/local/Cellar/opencv/4.4.0/lib -lopencv_stitching -lopencv_superres -lopencv_videostab -lopencv_aruco -lopencv_bgsegm -lopencv_bioinspired -lopencv_ccalib -lopencv_dnn_objdetect -lopencv_dpm -lopencv_face -lopencv_photo -lopencv_fuzzy -lopencv_hfs -lopencv_img_hash -lopencv_line_descriptor -lopencv_optflow -lopencv_reg -lopencv_rgbd -lopencv_saliency -lopencv_stereo -lopencv_structured_light -lopencv_phase_unwrapping -lopencv_surface_matching -lopencv_tracking -lopencv_datasets -lopencv_dnn -lopencv_plot -lopencv_xfeatures2d -lopencv_shape -lopencv_video -lopencv_ml -lopencv_ximgproc -lopencv_calib3d -lopencv_features2d -lopencv_highgui -lopencv_videoio -lopencv_flann -lopencv_xobjdetect -lopencv_imgcodecs -lopencv_objdetect -lopencv_xphoto -lopencv_imgproc -lopencv_core"
export CGO_CPPFLAGS="-I/usr/local/Cellar/opencv/4.5.1/include"
export CGO_LDFLAGS="-L/usr/local/Cellar/opencv/4.5.1/lib -lopencv_stitching -lopencv_superres -lopencv_videostab -lopencv_aruco -lopencv_bgsegm -lopencv_bioinspired -lopencv_ccalib -lopencv_dnn_objdetect -lopencv_dpm -lopencv_face -lopencv_photo -lopencv_fuzzy -lopencv_hfs -lopencv_img_hash -lopencv_line_descriptor -lopencv_optflow -lopencv_reg -lopencv_rgbd -lopencv_saliency -lopencv_stereo -lopencv_structured_light -lopencv_phase_unwrapping -lopencv_surface_matching -lopencv_tracking -lopencv_datasets -lopencv_dnn -lopencv_plot -lopencv_xfeatures2d -lopencv_shape -lopencv_video -lopencv_ml -lopencv_ximgproc -lopencv_calib3d -lopencv_features2d -lopencv_highgui -lopencv_videoio -lopencv_flann -lopencv_xobjdetect -lopencv_imgcodecs -lopencv_objdetect -lopencv_xphoto -lopencv_imgproc -lopencv_core"
Please note that you will need to run these 3 lines of code one time in your current session in order to build or run the code, in order to setup the needed ENV variables. Once you have done so, you can execute code that uses GoCV with your custom environment like this:
@ -394,7 +396,7 @@ Please note that you will need to run these 3 lines of code one time in your cur
The following assumes that you are running a 64-bit version of Windows 10.
In order to build and install OpenCV 4.4.0 on Windows, you must first download and install MinGW-W64 and CMake, as follows.
In order to build and install OpenCV 4.5.1 on Windows, you must first download and install MinGW-W64 and CMake, as follows.
#### MinGW-W64
@ -410,9 +412,9 @@ Add the `C:\Program Files\mingw-w64\x86_64-7.3.0-posix-seh-rt_v5-rev2\mingw64\bi
Download and install CMake [https://cmake.org/download/](https://cmake.org/download/) to the default location. CMake installer will add CMake to your system path.
#### OpenCV 4.4.0 and OpenCV Contrib Modules
#### OpenCV 4.5.1 and OpenCV Contrib Modules
The following commands should do everything to download and install OpenCV 4.4.0 on Windows:
The following commands should do everything to download and install OpenCV 4.5.1 on Windows:
chdir %GOPATH%\src\gocv.io\x\gocv
win_build_opencv.cmd
@ -433,8 +435,8 @@ Now you should be able to build or run any of the command examples:
The version program should output the following:
gocv version: 0.22.0
opencv lib version: 4.4.0
gocv version: 0.26.0
opencv lib version: 4.5.1
That's it, now you are ready to use GoCV.

119
vendor/gocv.io/x/gocv/ROADMAP.md generated vendored
View File

@ -16,7 +16,6 @@ Your pull requests will be greatly appreciated!
- [ ] **Basic structures - WORK STARTED**
- [ ] **Operations on arrays - WORK STARTED**. The following functions still need implementation:
- [ ] [Mahalanobis](https://docs.opencv.org/master/d2/de8/group__core__array.html#ga4493aee129179459cbfc6064f051aa7d)
- [ ] [mixChannels](https://docs.opencv.org/master/d2/de8/group__core__array.html#ga51d768c270a1cdd3497255017c4504be)
- [ ] [mulTransposed](https://docs.opencv.org/master/d2/de8/group__core__array.html#gadc4e49f8f7a155044e3be1b9e3b270ab)
- [ ] [PCABackProject](https://docs.opencv.org/master/d2/de8/group__core__array.html#gab26049f30ee8e94f7d69d82c124faafc)
- [ ] [PCACompute](https://docs.opencv.org/master/d2/de8/group__core__array.html#ga4e2073c7311f292a0648f04c37b73781)
@ -82,7 +81,10 @@ Your pull requests will be greatly appreciated!
- [ ] [pointPolygonTest](https://docs.opencv.org/master/d3/dc0/group__imgproc__shape.html#ga1a539e8db2135af2566103705d7a5722)
- [ ] [rotatedRectangleIntersection](https://docs.opencv.org/master/d3/dc0/group__imgproc__shape.html#ga8740e7645628c59d238b0b22c2abe2d4)
- [ ] Motion Analysis and Object Tracking
- [ ] **Motion Analysis and Object Tracking - WORK STARTED** The following functions still need implementation:
- [ ] [createHanningWindow](https://docs.opencv.org/master/d7/df3/group__imgproc__motion.html#ga80e5c3de52f6bab3a7c1e60e89308e1b)
- [ ] [phaseCorrelate](https://docs.opencv.org/master/d7/df3/group__imgproc__motion.html#ga552420a2ace9ef3fb053cd630fdb4952)
- [ ] **Feature Detection - WORK STARTED** The following functions still need implementation:
- [ ] [cornerEigenValsAndVecs](https://docs.opencv.org/master/dd/d1a/group__imgproc__feature.html#ga4055896d9ef77dd3cacf2c5f60e13f1c)
- [ ] [cornerHarris](https://docs.opencv.org/master/dd/d1a/group__imgproc__feature.html#gac1fc3598018010880e370e2f709b4345)
@ -107,6 +109,7 @@ Your pull requests will be greatly appreciated!
- [ ] [FarnebackOpticalFlow](https://docs.opencv.org/master/de/d9e/classcv_1_1FarnebackOpticalFlow.html)
- [ ] [KalmanFilter](https://docs.opencv.org/master/dd/d6a/classcv_1_1KalmanFilter.html)
- [ ] [SparsePyrLKOpticalFlow](https://docs.opencv.org/master/d7/d08/classcv_1_1SparsePyrLKOpticalFlow.html)
- [ ] [GOTURN](https://docs.opencv.org/master/d7/d4c/classcv_1_1TrackerGOTURN.html)
- [ ] **calib3d. Camera Calibration and 3D Reconstruction - WORK STARTED**. The following functions still need implementation:
- [ ] **Camera Calibration - WORK STARTED** The following functions still need implementation:
@ -171,31 +174,102 @@ Your pull requests will be greatly appreciated!
- [ ] **features2d. 2D Features Framework - WORK STARTED**
- [X] **Feature Detection and Description**
- [ ] **Descriptor Matchers - WORK STARTED** The following functions still need implementation:
- [ ] [FlannBasedMatcher](https://docs.opencv.org/master/dc/de2/classcv_1_1FlannBasedMatcher.html)
- [ ] **Drawing Function of Keypoints and Matches - WORK STARTED** The following function still needs implementation:
- [ ] [drawMatches](https://docs.opencv.org/master/d4/d5d/group__features2d__draw.html#ga7421b3941617d7267e3f2311582f49e1)
- [X] **Descriptor Matchers**
- [X] **Drawing Function of Keypoints and Matches**
- [ ] Object Categorization
- [ ] [BOWImgDescriptorExtractor](https://docs.opencv.org/master/d2/d6b/classcv_1_1BOWImgDescriptorExtractor.html)
- [ ] [BOWKMeansTrainer](https://docs.opencv.org/master/d4/d72/classcv_1_1BOWKMeansTrainer.html)
- [X] **objdetect. Object Detection**
- [ ] **dnn. Deep Neural Network module - WORK STARTED** The following functions still need implementation:
- [ ] [NMSBoxes](https://docs.opencv.org/master/d6/d0f/group__dnn.html#ga9d118d70a1659af729d01b10233213ee)
- [X] **dnn. Deep Neural Network module**
- [ ] ml. Machine Learning
- [ ] flann. Clustering and Search in Multi-Dimensional Spaces
- [ ] photo. Computational Photography
- [ ] **photo. Computational Photography - WORK STARTED** The following functions still need implementation:
- [ ] [inpaint](https://docs.opencv.org/master/d7/d8b/group__photo__inpaint.html#gaedd30dfa0214fec4c88138b51d678085)
- [ ] [denoise_TVL1](https://docs.opencv.org/master/d1/d79/group__photo__denoise.html#ga7602ed5ae17b7de40152b922227c4e4f)
- [ ] [fastNlMeansDenoising](https://docs.opencv.org/master/d1/d79/group__photo__denoise.html#ga4c6b0031f56ea3f98f768881279ffe93)
- [ ] [fastNlMeansDenoisingColored](https://docs.opencv.org/master/d1/d79/group__photo__denoise.html#ga03aa4189fc3e31dafd638d90de335617)
- [ ] [fastNlMeansDenoisingColoredMulti](https://docs.opencv.org/master/d1/d79/group__photo__denoise.html#gaa501e71f52fb2dc17ff8ca5e7d2d3619)
- [ ] [fastNlMeansDenoisingMulti](https://docs.opencv.org/master/d1/d79/group__photo__denoise.html#gaf4421bf068c4d632ea7f0aa38e0bf172)
- [ ] [createAlignMTB](https://docs.opencv.org/master/d6/df5/group__photo__hdr.html#ga2f1fafc885a5d79dbfb3542e08db0244)
- [ ] [createCalibrateDebevec](https://docs.opencv.org/master/d6/df5/group__photo__hdr.html#ga7fed9707ad5f2cc0e633888867109f90)
- [ ] [createCalibrateRobertson](https://docs.opencv.org/master/d6/df5/group__photo__hdr.html#gae77813a21cd351a596619e5ff013be5d)
- [ ] [createMergeDebevec](https://docs.opencv.org/master/d6/df5/group__photo__hdr.html#gaa8eab36bc764abb2a225db7c945f87f9)
- [ ] [createMergeMertens](https://docs.opencv.org/master/d6/df5/group__photo__hdr.html#ga79d59aa3cb3a7c664e59a4b5acc1ccb6)
- [ ] [createMergeRobertson](https://docs.opencv.org/master/d6/df5/group__photo__hdr.html#ga460d4a1df1a7e8cdcf7445bb87a8fb78)
- [ ] [createTonemap](https://docs.opencv.org/master/d6/df5/group__photo__hdr.html#gabcbd653140b93a1fa87ccce94548cd0d)
- [ ] [createTonemapDrago](https://docs.opencv.org/master/d6/df5/group__photo__hdr.html#ga72bf92bb6b8653ee4be650ac01cf50b6)
- [ ] [createTonemapMantiuk](https://docs.opencv.org/master/d6/df5/group__photo__hdr.html#ga3b3f3bf083b7515802f039a6a70f2d21)
- [ ] [createTonemapReinhard](https://docs.opencv.org/master/d6/df5/group__photo__hdr.html#gadabe7f6bf1fa96ad0fd644df9182c2fb)
- [ ] [decolor](https://docs.opencv.org/master/d4/d32/group__photo__decolor.html#ga4864d4c007bda5dacdc5e9d4ed7e222c)
- [ ] [detailEnhance](https://docs.opencv.org/master/df/dac/group__photo__render.html#ga0de660cb6f371a464a74c7b651415975)
- [ ] [edgePreservingFilter](https://docs.opencv.org/master/df/dac/group__photo__render.html#gafaee2977597029bc8e35da6e67bd31f7)
- [ ] [pencilSketch](https://docs.opencv.org/master/df/dac/group__photo__render.html#gae5930dd822c713b36f8529b21ddebd0c)
- [ ] [stylization](https://docs.opencv.org/master/df/dac/group__photo__render.html#gacb0f7324017df153d7b5d095aed53206)
- [ ] stitching. Images stitching
- [ ] cudaarithm. Operations on Matrices
- [ ] cudabgsegm. Background Segmentation
## CUDA
- [ ] **cudaarithm. Operations on Matrices - WORK STARTED** The following functions still need implementation:
- [ ] [cv::cuda::abs](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga54a72bd772494ab34d05406fd76df2b6)
- [ ] [cv::cuda::absdiff](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#gac062b283cf46ee90f74a773d3382ab54)
- [ ] [cv::cuda::add](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga5d9794bde97ed23d1c1485249074a8b1)
- [ ] [cv::cuda::addWeighted](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga2cd14a684ea70c6ab2a63ee90ffe6201)
- [ ] [cv::cuda::bitwise_and](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga78d7c1a013877abd4237fbfc4e13bd76)
- [ ] [cv::cuda::bitwise_not](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#gae58159a2259ae1acc76b531c171cf06a)
- [ ] [cv::cuda::bitwise_or](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#gafd098ee3e51c68daa793999c1da3dfb7)
- [ ] [cv::cuda::bitwise_xor](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga3d95d4faafb099aacf18e8b915a4ad8d)
- [ ] [cv::cuda::cartToPolar](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga82210c7d1c1d42e616e554bf75a53480)
- [ ] [cv::cuda::compare](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga4d41cd679f4a83862a3de71a6057db54)
- [ ] [cv::cuda::divide](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga124315aa226260841e25cc0b9ea99dc3)
- [ ] [cv::cuda::exp](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#gac6e51541d3bb0a7a396128e4d5919b61)
- [ ] [cv::cuda::log](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#gaae9c60739e2d1a977b4d3250a0be42ca)
- [ ] [cv::cuda::lshift](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#gafd072accecb14c9adccdad45e3bf2300)
- [ ] [cv::cuda::magnitude](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga3d17f4fcd79d7c01fadd217969009463)
- [ ] [cv::cuda::magnitudeSqr](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga7613e382d257e150033d0ce4d6098f6a)
- [ ] [cv::cuda::max](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#gadb5dd3d870f10c0866035755b929b1e7)
- [ ] [cv::cuda::min](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga74f0b05a65b3d949c237abb5e6c60867)
- [ ] [cv::cuda::multiply](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga497cc0615bf717e1e615143b56f00591)
- [ ] [cv::cuda::phase](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga5b75ec01be06dcd6e27ada09a0d4656a)
- [ ] [cv::cuda::polarToCart](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga01516a286a329c303c2db746513dd9df)
- [ ] [cv::cuda::pow](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga82d04ef4bcc4dfa9bfbe76488007c6c4)
- [ ] [cv::cuda::rshift](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga87af0b66358cc302676f35c1fd56c2ed)
- [ ] [cv::cuda::sqr](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga8aae233da90ce0ffe309ab8004342acb)
- [ ] [cv::cuda::sqrt](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga09303680cb1a5521a922b6d392028d8c)
- [ ] [cv::cuda::subtract](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga6eab60fc250059e2fda79c5636bd067f)
- [X] **cudabgsegm. Background Segmentation**
- [ ] cudacodec. Video Encoding/Decoding
- [ ] cudafeatures2d. Feature Detection and Description
- [ ] cudafilters. Image Filtering
- [ ] cudaimgproc. Image Processing
- [ ] **cudaimgproc. Image Processing - WORK STARTED** The following functions still need implementation:
- [ ] [cv::cuda::TemplateMatching](https://docs.opencv.org/master/d2/d58/classcv_1_1cuda_1_1TemplateMatching.html)
- [ ] [cv::cuda::alphaComp](https://docs.opencv.org/master/db/d8c/group__cudaimgproc__color.html#ga08a698700458d9311390997b57fbf8dc)
- [ ] [cv::cuda::demosaicing](https://docs.opencv.org/master/db/d8c/group__cudaimgproc__color.html#ga7fb153572b573ebd2d7610fcbe64166e)
- [ ] [cv::cuda::gammaCorrection](https://docs.opencv.org/master/db/d8c/group__cudaimgproc__color.html#gaf4195a8409c3b8fbfa37295c2b2c4729)
- [ ] [cv::cuda::swapChannels](https://docs.opencv.org/master/db/d8c/group__cudaimgproc__color.html#ga75a29cc4a97cde0d43ea066b01de927e)
- [ ] [cv::cuda::calcHist](https://docs.opencv.org/master/d8/d0e/group__cudaimgproc__hist.html#gaaf3944106890947020bb4522a7619c26)
- [ ] [cv::cuda::CLAHE](https://docs.opencv.org/master/db/d79/classcv_1_1cuda_1_1CLAHE.html)
- [ ] [cv::cuda::equalizeHist](https://docs.opencv.org/master/d8/d0e/group__cudaimgproc__hist.html#ga2384be74bd2feba7e6c46815513f0060)
- [ ] [cv::cuda::evenLevels](https://docs.opencv.org/master/d8/d0e/group__cudaimgproc__hist.html#ga2f2cbd21dc6d7367a7c4ee1a826f389d)
- [ ] [cv::cuda::histEven](https://docs.opencv.org/master/d8/d0e/group__cudaimgproc__hist.html#gacd3b14279fb77a57a510cb8c89a1856f)
- [ ] [cv::cuda::histRange](https://docs.opencv.org/master/d8/d0e/group__cudaimgproc__hist.html#ga87819085c1059186d9cdeacd92cea783)
- [ ] [cv::cuda::HoughCirclesDetector](https://docs.opencv.org/master/da/d80/classcv_1_1cuda_1_1HoughCirclesDetector.html)
- [ ] [cv::cuda::HoughLinesDetector](https://docs.opencv.org/master/d2/dcd/classcv_1_1cuda_1_1HoughLinesDetector.html)
- [ ] [cv::cuda::HoughSegmentDetector](https://docs.opencv.org/master/d6/df9/classcv_1_1cuda_1_1HoughSegmentDetector.html)
- [ ] [cv::cuda::createGoodFeaturesToTrackDetector](https://docs.opencv.org/master/dc/d6d/group__cudaimgproc__feature.html#ga478b474a598ece101f7e706fee2c8e91)
- [ ] [cv::cuda::createHarrisCorner](https://docs.opencv.org/master/dc/d6d/group__cudaimgproc__feature.html#ga3e5878a803e9bba51added0c10101979)
- [ ] [cv::cuda::createMinEigenValCorner](https://docs.opencv.org/master/dc/d6d/group__cudaimgproc__feature.html#ga7457fd4b53b025f990b1c1dd1b749915)
- [ ] [cv::cuda::bilateralFilter](https://docs.opencv.org/master/d0/d05/group__cudaimgproc.html#ga6abeaecdd4e7edc0bd1393a04f4f20bd)
- [ ] [cv::cuda::blendLinear](https://docs.opencv.org/master/d0/d05/group__cudaimgproc.html#ga4793607e5729bcc15b27ea33d9fe335e)
- [ ] [cv::cuda::meanShiftFiltering](https://docs.opencv.org/master/d0/d05/group__cudaimgproc.html#gae13b3035bc6df0e512d876dbb8c00555)
- [ ] [cv::cuda::meanShiftProc](https://docs.opencv.org/master/d0/d05/group__cudaimgproc.html#ga6039dc8ecbe2f912bc83fcc9b3bcca39)
- [ ] [cv::cuda::meanShiftSegmentation](https://docs.opencv.org/master/d0/d05/group__cudaimgproc.html#ga70ed80533a448829dc48cf22b1845c16)
- [ ] cudalegacy. Legacy support
- [ ] cudaobjdetect. Object Detection
- [ ] **cudaoptflow. Optical Flow - WORK STARTED**
- [X] **cudaobjdetect. Object Detection**
- [ ] **cudaoptflow. Optical Flow - WORK STARTED** The following functions still need implementation:
- [ ] [BroxOpticalFlow](https://docs.opencv.org/master/d7/d18/classcv_1_1cuda_1_1BroxOpticalFlow.html)
- [ ] [DenseOpticalFlow](https://docs.opencv.org/master/d6/d4a/classcv_1_1cuda_1_1DenseOpticalFlow.html)
- [ ] [DensePyrLKOpticalFlow](https://docs.opencv.org/master/d0/da4/classcv_1_1cuda_1_1DensePyrLKOpticalFlow.html)
@ -208,13 +282,10 @@ Your pull requests will be greatly appreciated!
- [ ] cudastereo. Stereo Correspondence
- [X] **cudawarping. Image Warping**
- [ ] cudev. Device layer
- [ ] shape. Shape Distance and Matching
- [ ] superres. Super Resolution
- [ ] videostab. Video Stabilization
- [ ] viz. 3D Visualizer
## Contrib modules list
- [ ] alphamat. Alpha Matting
- [ ] aruco. ArUco Marker Detection
- [X] **bgsegm. Improved Background-Foreground Segmentation Methods - WORK STARTED**
- [ ] bioinspired. Biologically inspired vision models and derivated tools
@ -223,26 +294,36 @@ Your pull requests will be greatly appreciated!
- [ ] cvv. GUI for Interactive Visual Debugging of Computer Vision Programs
- [ ] datasets. Framework for working with different datasets
- [ ] dnn_modern. Deep Learning Modern Module
- [ ] dnn_objdetect. DNN used for object detection
- [ ] dnn_superres. DNN used for super resolution
- [ ] dpm. Deformable Part-based Models
- [ ] **face. Face Recognition - WORK STARTED**
- [ ] freetype. Drawing UTF-8 strings with freetype/harfbuzz
- [ ] fuzzy. Image processing based on fuzzy mathematics
- [ ] hdf. Hierarchical Data Format I/O routines
- [ ] hfs. Hierarchical Feature Selection for Efficient Image Segmentation
- [X] **img_hash. The module brings implementations of different image hashing algorithms.**
- [ ] intensity_transform. The module brings implementations of intensity transformation algorithms to adjust image contrast.
- [ ] line_descriptor. Binary descriptors for lines extracted from an image
- [ ] mcc. Macbeth Chart module
- [ ] matlab. MATLAB Bridge
- [ ] optflow. Optical Flow Algorithms
- [ ] ovis. OGRE 3D Visualiser
- [ ] phase_unwrapping. Phase Unwrapping API
- [ ] plot. Plot function for Mat data
- [ ] reg. Image Registration
- [ ] rgbd. RGB-Depth Processing
- [ ] saliency. Saliency API
- [ ] sfm. Structure From Motion
- [ ] shape. Shape Distance and Matching
- [ ] stereo. Stereo Correspondance Algorithms
- [ ] structured_light. Structured Light API
- [ ] superres. Super Resolution
- [ ] surface_matching. Surface Matching
- [ ] text. Scene Text Detection and Recognition
- [ ] **tracking. Tracking API - WORK STARTED**
- [ ] videostab. Video Stabilization
- [ ] viz. 3D Visualizer
- [ ] **xfeatures2d. Extra 2D Features Framework - WORK STARTED**
- [ ] ximgproc. Extended Image Processing
- [ ] xobjdetect. Extended object detection

3
vendor/gocv.io/x/gocv/appveyor.yml generated vendored
View File

@ -8,7 +8,7 @@ platform:
environment:
GOPATH: c:\gopath
GOROOT: c:\go
GOVERSION: 1.14
GOVERSION: 1.15
TEST_EXTERNAL: 1
APPVEYOR_SAVE_CACHE_ON_ERROR: true
@ -27,6 +27,7 @@ install:
- go get -d .
- set GOCV_CAFFE_TEST_FILES=C:\opencv\testdata
- set GOCV_TENSORFLOW_TEST_FILES=C:\opencv\testdata
- set GOCV_ONNX_TEST_FILES=C:\opencv\testdata
- set OPENCV_ENABLE_NONFREE=ON
- go env

View File

@ -1,23 +1,25 @@
if not exist "C:\opencv" mkdir "C:\opencv"
if not exist "C:\opencv\build" mkdir "C:\opencv\build"
if not exist "C:\opencv\testdata" mkdir "C:\opencv\testdata"
appveyor DownloadFile https://github.com/opencv/opencv/archive/4.4.0.zip -FileName c:\opencv\opencv-4.4.0.zip
7z x c:\opencv\opencv-4.4.0.zip -oc:\opencv -y
del c:\opencv\opencv-4.4.0.zip /q
appveyor DownloadFile https://github.com/opencv/opencv_contrib/archive/4.4.0.zip -FileName c:\opencv\opencv_contrib-4.4.0.zip
7z x c:\opencv\opencv_contrib-4.4.0.zip -oc:\opencv -y
del c:\opencv\opencv_contrib-4.4.0.zip /q
appveyor DownloadFile https://github.com/opencv/opencv/archive/4.5.1.zip -FileName c:\opencv\opencv-4.5.1.zip
7z x c:\opencv\opencv-4.5.1.zip -oc:\opencv -y
del c:\opencv\opencv-4.5.1.zip /q
appveyor DownloadFile https://github.com/opencv/opencv_contrib/archive/4.5.1.zip -FileName c:\opencv\opencv_contrib-4.5.1.zip
7z x c:\opencv\opencv_contrib-4.5.1.zip -oc:\opencv -y
del c:\opencv\opencv_contrib-4.5.1.zip /q
cd C:\opencv\build
set PATH=C:\Perl\site\bin;C:\Perl\bin;C:\Windows\system32;C:\Windows;C:\Windows\System32\Wbem;C:\Windows\System32\WindowsPowerShell\v1.0\;C:\Program Files\7-Zip;C:\Program Files\Microsoft\Web Platform Installer\;C:\Tools\PsTools;C:\Program Files (x86)\CMake\bin;C:\go\bin;C:\Tools\NuGet;C:\Program Files\LLVM\bin;C:\Tools\curl\bin;C:\ProgramData\chocolatey\bin;C:\Program Files (x86)\Yarn\bin;C:\Users\appveyor\AppData\Local\Yarn\bin;C:\Program Files\AppVeyor\BuildAgent\
set PATH=%PATH%;C:\mingw-w64\x86_64-7.3.0-posix-seh-rt_v5-rev0\mingw64\bin
dir C:\opencv
cmake C:\opencv\opencv-4.4.0 -G "MinGW Makefiles" -BC:\opencv\build -DENABLE_CXX11=ON -DOPENCV_EXTRA_MODULES_PATH=C:\opencv\opencv_contrib-4.4.0\modules -DBUILD_SHARED_LIBS=ON -DWITH_IPP=OFF -DWITH_MSMF=OFF -DBUILD_EXAMPLES=OFF -DBUILD_TESTS=OFF -DBUILD_PERF_TESTS=OFF -DBUILD_opencv_java=OFF -DBUILD_opencv_python=OFF -DBUILD_opencv_python2=OFF -DBUILD_opencv_python3=OFF -DBUILD_DOCS=OFF -DENABLE_PRECOMPILED_HEADERS=OFF -DBUILD_opencv_saliency=OFF -DCPU_DISPATCH= -DBUILD_opencv_gapi=OFF -DOPENCV_GENERATE_PKGCONFIG=ON -DOPENCV_ENABLE_NONFREE=ON -DWITH_OPENCL_D3D11_NV=OFF -DOPENCV_ALLOCATOR_STATS_COUNTER_TYPE=int64_t -Wno-dev
cmake C:\opencv\opencv-4.5.1 -G "MinGW Makefiles" -BC:\opencv\build -DENABLE_CXX11=ON -DOPENCV_EXTRA_MODULES_PATH=C:\opencv\opencv_contrib-4.5.1\modules -DBUILD_SHARED_LIBS=ON -DWITH_IPP=OFF -DWITH_MSMF=OFF -DBUILD_EXAMPLES=OFF -DBUILD_TESTS=OFF -DBUILD_PERF_TESTS=OFF -DBUILD_opencv_java=OFF -DBUILD_opencv_python=OFF -DBUILD_opencv_python2=OFF -DBUILD_opencv_python3=OFF -DBUILD_DOCS=OFF -DENABLE_PRECOMPILED_HEADERS=OFF -DBUILD_opencv_saliency=OFF -DCPU_DISPATCH= -DBUILD_opencv_gapi=OFF -DOPENCV_GENERATE_PKGCONFIG=ON -DOPENCV_ENABLE_NONFREE=ON -DWITH_OPENCL_D3D11_NV=OFF -DOPENCV_ALLOCATOR_STATS_COUNTER_TYPE=int64_t -Wno-dev
mingw32-make -j%NUMBER_OF_PROCESSORS%
mingw32-make install
appveyor DownloadFile https://raw.githubusercontent.com/opencv/opencv_extra/master/testdata/dnn/bvlc_googlenet.prototxt -FileName C:\opencv\testdata\bvlc_googlenet.prototxt
appveyor DownloadFile http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel -FileName C:\opencv\testdata\bvlc_googlenet.caffemodel
appveyor DownloadFile https://storage.googleapis.com/download.tensorflow.org/models/inception5h.zip -FileName C:\opencv\testdata\inception5h.zip
appveyor DownloadFile https://github.com/onnx/models/raw/master/vision/classification/inception_and_googlenet/googlenet/model/googlenet-9.onnx -FileName C:\opencv\testdata\googlenet-9.onnx
7z x C:\opencv\testdata\inception5h.zip -oC:\opencv\testdata tensorflow_inception_graph.pb -y
rmdir c:\opencv\opencv-4.4.0 /s /q
rmdir c:\opencv\opencv_contrib-4.4.0 /s /q
rmdir c:\opencv\opencv-4.5.1 /s /q
rmdir c:\opencv\opencv_contrib-4.5.1 /s /q

10
vendor/gocv.io/x/gocv/calib3d.go generated vendored
View File

@ -155,6 +155,11 @@ const (
CalibCBMarker
)
// FindChessboardCorners finds the positions of internal corners of the chessboard.
//
// For further details, please see:
// https://docs.opencv.org/master/d9/d0c/group__calib3d.html#ga93efa9b0aa890de240ca32b11253dd4a
//
func FindChessboardCorners(image Mat, patternSize image.Point, corners *Mat, flags CalibCBFlag) bool {
sz := C.struct_Size{
width: C.int(patternSize.X),
@ -163,6 +168,11 @@ func FindChessboardCorners(image Mat, patternSize image.Point, corners *Mat, fla
return bool(C.FindChessboardCorners(image.Ptr(), sz, corners.Ptr(), C.int(flags)))
}
// DrawChessboardCorners renders the detected chessboard corners.
//
// For further details, please see:
// https://docs.opencv.org/master/d9/d0c/group__calib3d.html#ga6a10b0bb120c4907e5eabbcd22319022
//
func DrawChessboardCorners(image *Mat, patternSize image.Point, corners Mat, patternWasFound bool) {
sz := C.struct_Size{
width: C.int(patternSize.X),

2
vendor/gocv.io/x/gocv/cgo.go generated vendored
View File

@ -8,6 +8,6 @@ package gocv
#cgo !windows pkg-config: opencv4
#cgo CXXFLAGS: --std=c++11
#cgo windows CPPFLAGS: -IC:/opencv/build/install/include
#cgo windows LDFLAGS: -LC:/opencv/build/install/x64/mingw/lib -lopencv_core440 -lopencv_face440 -lopencv_videoio440 -lopencv_imgproc440 -lopencv_highgui440 -lopencv_imgcodecs440 -lopencv_objdetect440 -lopencv_features2d440 -lopencv_video440 -lopencv_dnn440 -lopencv_xfeatures2d440 -lopencv_plot440 -lopencv_tracking440 -lopencv_img_hash440 -lopencv_calib3d440 -lopencv_bgsegm440
#cgo windows LDFLAGS: -LC:/opencv/build/install/x64/mingw/lib -lopencv_core451 -lopencv_face451 -lopencv_videoio451 -lopencv_imgproc451 -lopencv_highgui451 -lopencv_imgcodecs451 -lopencv_objdetect451 -lopencv_features2d451 -lopencv_video451 -lopencv_dnn451 -lopencv_xfeatures2d451 -lopencv_plot451 -lopencv_tracking451 -lopencv_img_hash451 -lopencv_calib3d451 -lopencv_bgsegm451 -lopencv_photo451
*/
import "C"

87
vendor/gocv.io/x/gocv/core.cpp generated vendored
View File

@ -11,6 +11,15 @@ Mat Mat_NewWithSize(int rows, int cols, int type) {
return new cv::Mat(rows, cols, type, 0.0);
}
// Mat_NewWithSizes creates a new Mat with specific dimension sizes and number of channels.
Mat Mat_NewWithSizes(struct IntVector sizes, int type) {
std::vector<int> sizess;
for (int i = 0; i < sizes.length; ++i) {
sizess.push_back(sizes.val[i]);
}
return new cv::Mat(sizess, type);
}
// Mat_NewFromScalar creates a new Mat from a Scalar. Intended to be used
// for Mat comparison operation such as InRange.
Mat Mat_NewFromScalar(Scalar ar, int type) {
@ -28,6 +37,42 @@ Mat Mat_NewFromBytes(int rows, int cols, int type, struct ByteArray buf) {
return new cv::Mat(rows, cols, type, buf.data);
}
// Mat_NewWithSizesFromScalar creates multidimensional Mat from a scalar
Mat Mat_NewWithSizesFromScalar(IntVector sizes, int type, Scalar ar) {
std::vector<int> _sizes;
for (int i = 0, *v = sizes.val; i < sizes.length; ++v, ++i) {
_sizes.push_back(*v);
}
cv::Scalar c = cv::Scalar(ar.val1, ar.val2, ar.val3, ar.val4);
return new cv::Mat(_sizes, type, c);
}
// Mat_NewWithSizesFromBytes creates multidimensional Mat from a bytes
Mat Mat_NewWithSizesFromBytes(IntVector sizes, int type, struct ByteArray buf) {
std::vector<int> _sizes;
for (int i = 0, *v = sizes.val; i < sizes.length; ++v, ++i) {
_sizes.push_back(*v);
}
return new cv::Mat(_sizes, type, buf.data);
}
Mat Eye(int rows, int cols, int type) {
cv::Mat temp = cv::Mat::eye(rows, cols, type);
return new cv::Mat(rows, cols, type, temp.data);
}
Mat Zeros(int rows, int cols, int type) {
cv::Mat temp = cv::Mat::zeros(rows, cols, type);
return new cv::Mat(rows, cols, type, temp.data);
}
Mat Ones(int rows, int cols, int type) {
cv::Mat temp = cv::Mat::ones(rows, cols, type);
return new cv::Mat(rows, cols, type, temp.data);
}
Mat Mat_FromPtr(Mat m, int rows, int cols, int type, int prow, int pcol) {
return new cv::Mat(rows, cols, type, m->ptr(prow, pcol));
}
@ -42,6 +87,11 @@ int Mat_Empty(Mat m) {
return m->empty();
}
// Mat_IsContinuous tests if a Mat is continuous
bool Mat_IsContinuous(Mat m) {
return m->isContinuous();
}
// Mat_Clone returns a clone of this Mat
Mat Mat_Clone(Mat m) {
return new cv::Mat(m->clone());
@ -61,6 +111,10 @@ void Mat_ConvertTo(Mat m, Mat dst, int type) {
m->convertTo(*dst, type);
}
void Mat_ConvertToWithParams(Mat m, Mat dst, int type, float alpha, float beta) {
m->convertTo(*dst, type, alpha, beta);
}
// Mat_ToBytes returns the bytes representation of the underlying data.
struct ByteArray Mat_ToBytes(Mat m) {
return toByteArray(reinterpret_cast<const char*>(m->data), m->total() * m->elemSize());
@ -566,6 +620,28 @@ void Mat_MinMaxLoc(Mat m, double* minVal, double* maxVal, Point* minLoc, Point*
maxLoc->y = cMaxLoc.y;
}
void Mat_MixChannels(struct Mats src, struct Mats dst, struct IntVector fromTo) {
std::vector<cv::Mat> srcMats;
for (int i = 0; i < src.length; ++i) {
srcMats.push_back(*src.mats[i]);
}
std::vector<cv::Mat> dstMats;
for (int i = 0; i < dst.length; ++i) {
dstMats.push_back(*dst.mats[i]);
}
std::vector<int> fromTos;
for (int i = 0; i < fromTo.length; ++i) {
fromTos.push_back(fromTo.val[i]);
}
cv::mixChannels(srcMats, dstMats, fromTos);
}
void Mat_MulSpectrums(Mat a, Mat b, Mat c, int flags) {
cv::mulSpectrums(*a, *b, *c, flags);
}
@ -586,6 +662,10 @@ double Norm(Mat src1, int normType) {
return cv::norm(*src1, normType);
}
double NormWithMats(Mat src1, Mat src2, int normType) {
return cv::norm(*src1, *src2, normType);
}
void Mat_PerspectiveTransform(Mat src, Mat dst, Mat tm) {
cv::perspectiveTransform(*src, *dst, *tm);
}
@ -696,6 +776,13 @@ void Contours_Close(struct Contours cs) {
delete[] cs.contours;
}
void CStrings_Close(struct CStrings cstrs) {
for ( int i = 0; i < cstrs.length; i++ ) {
delete [] cstrs.strs[i];
}
delete [] cstrs.strs;
}
void KeyPoints_Close(struct KeyPoints ks) {
delete[] ks.keypoints;
}

307
vendor/gocv.io/x/gocv/core.go generated vendored
View File

@ -178,6 +178,9 @@ var ErrEmptyByteSlice = errors.New("empty byte array")
//
type Mat struct {
p C.Mat
// Non-nil if Mat was created with a []byte (using NewMatFromBytes()). Nil otherwise.
d []byte
}
// NewMat returns a new empty Mat.
@ -190,6 +193,58 @@ func NewMatWithSize(rows int, cols int, mt MatType) Mat {
return newMat(C.Mat_NewWithSize(C.int(rows), C.int(cols), C.int(mt)))
}
// NewMatWithSizes returns a new multidimensional Mat with a specific size and type.
func NewMatWithSizes(sizes []int, mt MatType) Mat {
sizesArray := make([]C.int, len(sizes))
for i, s := range sizes {
sizesArray[i] = C.int(s)
}
sizesIntVector := C.IntVector{
val: (*C.int)(&sizesArray[0]),
length: C.int(len(sizes)),
}
return newMat(C.Mat_NewWithSizes(sizesIntVector, C.int(mt)))
}
// NewMatWithSizesWithScalar returns a new multidimensional Mat with a specific size, type and scalar value.
func NewMatWithSizesWithScalar(sizes []int, mt MatType, s Scalar) Mat {
csizes := []C.int{}
for _, v := range sizes {
csizes = append(csizes, C.int(v))
}
sizesVector := C.struct_IntVector{}
sizesVector.val = (*C.int)(&csizes[0])
sizesVector.length = (C.int)(len(csizes))
sVal := C.struct_Scalar{
val1: C.double(s.Val1),
val2: C.double(s.Val2),
val3: C.double(s.Val3),
val4: C.double(s.Val4),
}
return newMat(C.Mat_NewWithSizesFromScalar(sizesVector, C.int(mt), sVal))
}
// NewMatWithSizesWithScalar returns a new multidimensional Mat with a specific size, type and preexisting data.
func NewMatWithSizesFromBytes(sizes []int, mt MatType, data []byte) (Mat, error) {
cBytes, err := toByteArray(data)
if err != nil {
return Mat{}, err
}
csizes := []C.int{}
for _, v := range sizes {
csizes = append(csizes, C.int(v))
}
sizesVector := C.struct_IntVector{}
sizesVector.val = (*C.int)(&csizes[0])
sizesVector.length = (C.int)(len(csizes))
return newMat(C.Mat_NewWithSizesFromBytes(sizesVector, C.int(mt), *cBytes)), nil
}
// NewMatFromScalar returns a new Mat for a specific Scalar value
func NewMatFromScalar(s Scalar, mt MatType) Mat {
sVal := C.struct_Scalar{
@ -221,7 +276,44 @@ func NewMatFromBytes(rows int, cols int, mt MatType, data []byte) (Mat, error) {
if err != nil {
return Mat{}, err
}
return newMat(C.Mat_NewFromBytes(C.int(rows), C.int(cols), C.int(mt), *cBytes)), nil
mat := newMat(C.Mat_NewFromBytes(C.int(rows), C.int(cols), C.int(mt), *cBytes))
// Store a reference to the backing data slice. This is needed because we pass the backing
// array directly to C code and without keeping a Go reference to it, it might end up
// garbage collected which would result in crashes.
//
// TODO(bga): This could live in newMat() but I wanted to reduce the change surface.
// TODO(bga): Code that needs access to the array from Go could use this directly.
mat.d = data
return mat, nil
}
// Returns an identity matrix of the specified size and type.
//
// The method returns a Matlab-style identity matrix initializer, similarly to Mat::zeros. Similarly to Mat::ones.
// For further details, please see:
// https://docs.opencv.org/master/d3/d63/classcv_1_1Mat.html#a2cf9b9acde7a9852542bbc20ef851ed2
func Eye(rows int, cols int, mt MatType) Mat {
return newMat(C.Eye(C.int(rows), C.int(cols), C.int(mt)))
}
// Returns a zero array of the specified size and type.
//
// The method returns a Matlab-style zero array initializer.
// For further details, please see:
// https://docs.opencv.org/master/d3/d63/classcv_1_1Mat.html#a0b57b6a326c8876d944d188a46e0f556
func Zeros(rows int, cols int, mt MatType) Mat {
return newMat(C.Zeros(C.int(rows), C.int(cols), C.int(mt)))
}
// Returns an array of all 1's of the specified size and type.
//
// The method returns a Matlab-style 1's array initializer
// For further details, please see:
// https://docs.opencv.org/master/d3/d63/classcv_1_1Mat.html#a69ae0402d116fc9c71908d8508dc2f09
func Ones(rows int, cols int, mt MatType) Mat {
return newMat(C.Ones(C.int(rows), C.int(cols), C.int(mt)))
}
// FromPtr returns a new Mat with a specific size and type, initialized from a Mat Ptr.
@ -240,6 +332,15 @@ func (m *Mat) Empty() bool {
return isEmpty != 0
}
// IsContinuous determines if the Mat is continuous.
//
// For further details, please see:
// https://docs.opencv.org/master/d3/d63/classcv_1_1Mat.html#aa90cea495029c7d1ee0a41361ccecdf3
//
func (m *Mat) IsContinuous() bool {
return bool(C.Mat_IsContinuous(m.p))
}
// Clone returns a cloned full copy of the Mat.
func (m *Mat) Clone() Mat {
return newMat(C.Mat_Clone(m.p))
@ -275,6 +376,11 @@ func (m *Mat) ConvertTo(dst *Mat, mt MatType) {
return
}
func (m *Mat) ConvertToWithParams(dst *Mat, mt MatType, alpha, beta float32) {
C.Mat_ConvertToWithParams(m.p, dst.p, C.int(mt), C.float(alpha), C.float(beta))
return
}
// Total returns the total number of array elements.
//
// For further details, please see:
@ -320,28 +426,40 @@ func (m *Mat) ToBytes() []byte {
//
// The data is no longer valid once the Mat has been closed. Any data that
// needs to be accessed after the Mat is closed must be copied into Go memory.
func (m *Mat) DataPtrUint8() []uint8 {
func (m *Mat) DataPtrUint8() ([]uint8, error) {
if !m.IsContinuous() {
return nil, errors.New("DataPtrUint8 requires continuous Mat")
}
p := C.Mat_DataPtr(m.p)
h := &reflect.SliceHeader{
Data: uintptr(unsafe.Pointer(p.data)),
Len: int(p.length),
Cap: int(p.length),
}
return *(*[]uint8)(unsafe.Pointer(h))
return *(*[]uint8)(unsafe.Pointer(h)), nil
}
// DataPtrInt8 returns a slice that references the OpenCV allocated data.
//
// The data is no longer valid once the Mat has been closed. Any data that
// needs to be accessed after the Mat is closed must be copied into Go memory.
func (m *Mat) DataPtrInt8() []int8 {
func (m *Mat) DataPtrInt8() ([]int8, error) {
if m.Type()&MatTypeCV8S != MatTypeCV8S {
return nil, errors.New("DataPtrInt8 only supports MatTypeCV8S")
}
if !m.IsContinuous() {
return nil, errors.New("DataPtrInt8 requires continuous Mat")
}
p := C.Mat_DataPtr(m.p)
h := &reflect.SliceHeader{
Data: uintptr(unsafe.Pointer(p.data)),
Len: int(p.length),
Cap: int(p.length),
}
return *(*[]int8)(unsafe.Pointer(h))
return *(*[]int8)(unsafe.Pointer(h)), nil
}
// DataPtrUint16 returns a slice that references the OpenCV allocated data.
@ -353,6 +471,10 @@ func (m *Mat) DataPtrUint16() ([]uint16, error) {
return nil, errors.New("DataPtrUint16 only supports MatTypeCV16U")
}
if !m.IsContinuous() {
return nil, errors.New("DataPtrUint16 requires continuous Mat")
}
p := C.Mat_DataPtr(m.p)
h := &reflect.SliceHeader{
Data: uintptr(unsafe.Pointer(p.data)),
@ -371,6 +493,10 @@ func (m *Mat) DataPtrInt16() ([]int16, error) {
return nil, errors.New("DataPtrInt16 only supports MatTypeCV16S")
}
if !m.IsContinuous() {
return nil, errors.New("DataPtrInt16 requires continuous Mat")
}
p := C.Mat_DataPtr(m.p)
h := &reflect.SliceHeader{
Data: uintptr(unsafe.Pointer(p.data)),
@ -389,6 +515,10 @@ func (m *Mat) DataPtrFloat32() ([]float32, error) {
return nil, errors.New("DataPtrFloat32 only supports MatTypeCV32F")
}
if !m.IsContinuous() {
return nil, errors.New("DataPtrFloat32 requires continuous Mat")
}
p := C.Mat_DataPtr(m.p)
h := &reflect.SliceHeader{
Data: uintptr(unsafe.Pointer(p.data)),
@ -407,6 +537,10 @@ func (m *Mat) DataPtrFloat64() ([]float64, error) {
return nil, errors.New("DataPtrFloat64 only supports MatTypeCV64F")
}
if !m.IsContinuous() {
return nil, errors.New("DataPtrFloat64 requires continuous Mat")
}
p := C.Mat_DataPtr(m.p)
h := &reflect.SliceHeader{
Data: uintptr(unsafe.Pointer(p.data)),
@ -745,106 +879,6 @@ func (m *Mat) T() Mat {
return newMat(C.Mat_T(m.p))
}
// ToImage converts a Mat to a image.Image.
func (m *Mat) ToImage() (image.Image, error) {
t := m.Type()
if t != MatTypeCV8UC1 && t != MatTypeCV8UC3 && t != MatTypeCV8UC4 {
return nil, errors.New("ToImage supports only MatType CV8UC1, CV8UC3 and CV8UC4")
}
width := m.Cols()
height := m.Rows()
step := m.Step()
data := m.ToBytes()
channels := m.Channels()
if t == MatTypeCV8UC1 {
img := image.NewGray(image.Rect(0, 0, width, height))
c := color.Gray{Y: uint8(0)}
for y := 0; y < height; y++ {
for x := 0; x < width; x++ {
c.Y = uint8(data[y*step+x])
img.SetGray(x, y, c)
}
}
return img, nil
}
img := image.NewRGBA(image.Rect(0, 0, width, height))
c := color.RGBA{
R: uint8(0),
G: uint8(0),
B: uint8(0),
A: uint8(255),
}
for y := 0; y < height; y++ {
for x := 0; x < step; x = x + channels {
c.B = uint8(data[y*step+x])
c.G = uint8(data[y*step+x+1])
c.R = uint8(data[y*step+x+2])
if channels == 4 {
c.A = uint8(data[y*step+x+3])
}
img.SetRGBA(int(x/channels), y, c)
}
}
return img, nil
}
//ImageToMatRGBA converts image.Image to gocv.Mat,
//which represents RGBA image having 8bit for each component.
//Type of Mat is gocv.MatTypeCV8UC4.
func ImageToMatRGBA(img image.Image) (Mat, error) {
bounds := img.Bounds()
x := bounds.Dx()
y := bounds.Dy()
data := make([]byte, 0, x*y*4)
for j := bounds.Min.Y; j < bounds.Max.Y; j++ {
for i := bounds.Min.X; i < bounds.Max.X; i++ {
r, g, b, a := img.At(i, j).RGBA()
data = append(data, byte(b>>8), byte(g>>8), byte(r>>8), byte(a>>8))
}
}
return NewMatFromBytes(y, x, MatTypeCV8UC4, data)
}
//ImageToMatRGB converts image.Image to gocv.Mat,
//which represents RGB image having 8bit for each component.
//Type of Mat is gocv.MatTypeCV8UC3.
func ImageToMatRGB(img image.Image) (Mat, error) {
bounds := img.Bounds()
x := bounds.Dx()
y := bounds.Dy()
data := make([]byte, 0, x*y*3)
for j := bounds.Min.Y; j < bounds.Max.Y; j++ {
for i := bounds.Min.X; i < bounds.Max.X; i++ {
r, g, b, _ := img.At(i, j).RGBA()
data = append(data, byte(b>>8), byte(g>>8), byte(r>>8))
}
}
return NewMatFromBytes(y, x, MatTypeCV8UC3, data)
}
//ImageGrayToMatGray converts image.Gray to gocv.Mat,
//which represents grayscale image 8bit.
//Type of Mat is gocv.MatTypeCV8UC1.
func ImageGrayToMatGray(img *image.Gray) (Mat, error) {
bounds := img.Bounds()
x := bounds.Dx()
y := bounds.Dy()
data := make([]byte, 0, x*y)
for j := bounds.Min.Y; j < bounds.Max.Y; j++ {
for i := bounds.Min.X; i < bounds.Max.X; i++ {
data = append(data, img.GrayAt(i, j).Y)
}
}
return NewMatFromBytes(y, x, MatTypeCV8UC1, data)
}
// AbsDiff calculates the per-element absolute difference between two arrays
// or between an array and a scalar.
//
@ -1478,6 +1512,47 @@ func MinMaxLoc(input Mat) (minVal, maxVal float32, minLoc, maxLoc image.Point) {
return float32(cMinVal), float32(cMaxVal), minLoc, maxLoc
}
// Copies specified channels from input arrays to the specified channels of output arrays.
//
// For further details, please see:
// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga51d768c270a1cdd3497255017c4504be
//
func MixChannels(src []Mat, dst []Mat, fromTo []int) {
cSrcArray := make([]C.Mat, len(src))
for i, r := range src {
cSrcArray[i] = r.p
}
cSrcMats := C.struct_Mats{
mats: (*C.Mat)(&cSrcArray[0]),
length: C.int(len(src)),
}
cDstArray := make([]C.Mat, len(dst))
for i, r := range dst {
cDstArray[i] = r.p
}
cDstMats := C.struct_Mats{
mats: (*C.Mat)(&cDstArray[0]),
length: C.int(len(dst)),
}
cFromToArray := make([]C.int, len(fromTo))
for i, ft := range fromTo {
cFromToArray[i] = C.int(ft)
}
cFromToIntVector := C.IntVector{
val: (*C.int)(&cFromToArray[0]),
length: C.int(len(fromTo)),
}
C.Mat_MixChannels(cSrcMats, cDstMats, cFromToIntVector)
for i := C.int(0); i < cDstMats.length; i++ {
dst[i].p = C.Mats_get(cDstMats, i)
}
}
//Mulspectrums performs the per-element multiplication of two Fourier spectrums.
//
// For further details, please see:
@ -1561,6 +1636,15 @@ func Norm(src1 Mat, normType NormType) float64 {
return float64(C.Norm(src1.p, C.int(normType)))
}
// Norm calculates the absolute difference/relative norm of two arrays.
//
// For further details, please see:
// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga7c331fb8dd951707e184ef4e3f21dd33
//
func NormWithMats(src1 Mat, src2 Mat, normType NormType) float64 {
return float64(C.NormWithMats(src1.p, src2.p, C.int(normType)))
}
// PerspectiveTransform performs the perspective matrix transformation of vectors.
//
// For further details, please see:
@ -1727,6 +1811,7 @@ func SortIdx(src Mat, dst *Mat, flags SortFlags) {
}
// Split creates an array of single channel images from a multi-channel image
// Created images should be closed manualy to avoid memory leaks.
//
// For further details, please see:
// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga0547c7fed86152d7e9d0096029c8518a
@ -1738,6 +1823,7 @@ func Split(src Mat) (mv []Mat) {
mv = make([]Mat, cMats.length)
for i := C.int(0); i < cMats.length; i++ {
mv[i].p = C.Mats_get(cMats, i)
addMatToProfile(mv[i].p)
}
return
}
@ -1957,6 +2043,17 @@ func toGoBytes(b C.struct_ByteArray) []byte {
return C.GoBytes(unsafe.Pointer(b.data), b.length)
}
// Converts CStrings to a slice of Go strings even when the C strings are not contiguous in memory
func toGoStrings(strs C.CStrings) []string {
length := int(strs.length)
tmpslice := (*[1 << 20]*C.char)(unsafe.Pointer(strs.strs))[:length:length]
gostrings := make([]string, length)
for i, s := range tmpslice {
gostrings[i] = C.GoString(s)
}
return gostrings
}
func toRectangles(ret C.Rects) []image.Rectangle {
cArray := ret.rects
length := int(ret.length)

12
vendor/gocv.io/x/gocv/core.h generated vendored
View File

@ -232,18 +232,23 @@ void MultiDMatches_Close(struct MultiDMatches mds);
Mat Mat_New();
Mat Mat_NewWithSize(int rows, int cols, int type);
Mat Mat_NewWithSizes(struct IntVector sizes, int type);
Mat Mat_NewWithSizesFromScalar(IntVector sizes, int type, Scalar ar);
Mat Mat_NewWithSizesFromBytes(IntVector sizes, int type, struct ByteArray buf);
Mat Mat_NewFromScalar(const Scalar ar, int type);
Mat Mat_NewWithSizeFromScalar(const Scalar ar, int rows, int cols, int type);
Mat Mat_NewFromBytes(int rows, int cols, int type, struct ByteArray buf);
Mat Mat_FromPtr(Mat m, int rows, int cols, int type, int prows, int pcols);
void Mat_Close(Mat m);
int Mat_Empty(Mat m);
bool Mat_IsContinuous(Mat m);
Mat Mat_Clone(Mat m);
void Mat_CopyTo(Mat m, Mat dst);
int Mat_Total(Mat m);
void Mat_Size(Mat m, IntVector* res);
void Mat_CopyToWithMask(Mat m, Mat dst, Mat mask);
void Mat_ConvertTo(Mat m, Mat dst, int type);
void Mat_ConvertToWithParams(Mat m, Mat dst, int type, float alpha, float beta);
struct ByteArray Mat_ToBytes(Mat m);
struct ByteArray Mat_DataPtr(Mat m);
Mat Mat_Region(Mat m, Rect r);
@ -258,6 +263,9 @@ int Mat_Cols(Mat m);
int Mat_Channels(Mat m);
int Mat_Type(Mat m);
int Mat_Step(Mat m);
Mat Eye(int rows, int cols, int type);
Mat Zeros(int rows, int cols, int type);
Mat Ones(int rows, int cols, int type);
uint8_t Mat_GetUChar(Mat m, int row, int col);
uint8_t Mat_GetUChar3(Mat m, int x, int y, int z);
@ -354,12 +362,14 @@ void Mat_Merge(struct Mats mats, Mat dst);
void Mat_Min(Mat src1, Mat src2, Mat dst);
void Mat_MinMaxIdx(Mat m, double* minVal, double* maxVal, int* minIdx, int* maxIdx);
void Mat_MinMaxLoc(Mat m, double* minVal, double* maxVal, Point* minLoc, Point* maxLoc);
void Mat_MixChannels(struct Mats src, struct Mats dst, struct IntVector fromTo);
void Mat_MulSpectrums(Mat a, Mat b, Mat c, int flags);
void Mat_Multiply(Mat src1, Mat src2, Mat dst);
void Mat_MultiplyWithParams(Mat src1, Mat src2, Mat dst, double scale, int dtype);
void Mat_Subtract(Mat src1, Mat src2, Mat dst);
void Mat_Normalize(Mat src, Mat dst, double alpha, double beta, int typ);
double Norm(Mat src1, int normType);
double NormWithMats(Mat src1, Mat src2, int normType);
void Mat_PerspectiveTransform(Mat src, Mat dst, Mat tm);
bool Mat_Solve(Mat src1, Mat src2, Mat dst, int flags);
int Mat_SolveCubic(Mat coeffs, Mat roots);
@ -390,6 +400,8 @@ Mat Mat_colRange(Mat m,int startrow,int endrow);
void IntVector_Close(struct IntVector ivec);
void CStrings_Close(struct CStrings cstrs);
#ifdef __cplusplus
}
#endif

95
vendor/gocv.io/x/gocv/dnn.cpp generated vendored
View File

@ -33,6 +33,21 @@ Net Net_ReadNetFromTensorflowBytes(struct ByteArray model) {
return n;
}
Net Net_ReadNetFromTorch(const char* model) {
Net n = new cv::dnn::Net(cv::dnn::readNetFromTorch(model));
return n;
}
Net Net_ReadNetFromONNX(const char* model) {
Net n = new cv::dnn::Net(cv::dnn::readNetFromONNX(model));
return n;
}
Net Net_ReadNetFromONNXBytes(struct ByteArray model) {
Net n = new cv::dnn::Net(cv::dnn::readNetFromONNX(model.data, model.length));
return n;
}
void Net_Close(Net net) {
delete net;
}
@ -110,11 +125,9 @@ void Net_GetLayerNames(Net net, CStrings* names) {
Mat Net_BlobFromImage(Mat image, double scalefactor, Size size, Scalar mean, bool swapRB,
bool crop) {
cv::Size sz(size.width, size.height);
// set the output ddepth to the input image depth
int ddepth = image->depth();
cv::Scalar cm(mean.val1, mean.val2, mean.val3, mean.val4);
return new cv::Mat(cv::dnn::blobFromImage(*image, scalefactor, sz, cm, swapRB, crop, ddepth));
// use the default target ddepth here.
return new cv::Mat(cv::dnn::blobFromImage(*image, scalefactor, sz, cm, swapRB, crop));
}
void Net_BlobFromImages(struct Mats images, Mat blob, double scalefactor, Size size,
@ -128,8 +141,8 @@ void Net_BlobFromImages(struct Mats images, Mat blob, double scalefactor, Size s
cv::Size sz(size.width, size.height);
cv::Scalar cm = cv::Scalar(mean.val1, mean.val2, mean.val3, mean.val4);
// TODO: handle different version signatures of this function v2 vs v3.
cv::dnn::blobFromImages(imgs, *blob, scalefactor, sz, cm, swapRB, crop, ddepth);
// ignore the passed in ddepth, just use default.
cv::dnn::blobFromImages(imgs, *blob, scalefactor, sz, cm, swapRB, crop);
}
void Net_ImagesFromBlob(Mat blob_, struct Mats* images_) {
@ -181,3 +194,73 @@ const char* Layer_GetName(Layer layer) {
const char* Layer_GetType(Layer layer) {
return (*layer)->type.c_str();
}
void NMSBoxes(struct Rects bboxes, FloatVector scores, float score_threshold, float nms_threshold, IntVector* indices) {
std::vector<cv::Rect> _bboxes;
for (int i = 0; i < bboxes.length; ++i) {
_bboxes.push_back(cv::Rect(
bboxes.rects[i].x,
bboxes.rects[i].y,
bboxes.rects[i].width,
bboxes.rects[i].height
));
}
std::vector<float> _scores;
float* f;
int i;
for (i = 0, f = scores.val; i < scores.length; ++f, ++i) {
_scores.push_back(*f);
}
std::vector<int> _indices(indices->length);
cv::dnn::NMSBoxes(_bboxes, _scores, score_threshold, nms_threshold, _indices, 1.f, 0);
int* ptr = new int[_indices.size()];
for (size_t i=0; i<_indices.size(); ++i) {
ptr[i] = _indices[i];
}
indices->length = _indices.size();
indices->val = ptr;
return;
}
void NMSBoxesWithParams(struct Rects bboxes, FloatVector scores, const float score_threshold, const float nms_threshold, IntVector* indices, const float eta, const int top_k) {
std::vector<cv::Rect> _bboxes;
for (int i = 0; i < bboxes.length; ++i) {
_bboxes.push_back(cv::Rect(
bboxes.rects[i].x,
bboxes.rects[i].y,
bboxes.rects[i].width,
bboxes.rects[i].height
));
}
std::vector<float> _scores;
float* f;
int i;
for (i = 0, f = scores.val; i < scores.length; ++f, ++i) {
_scores.push_back(*f);
}
std::vector<int> _indices(indices->length);
cv::dnn::NMSBoxes(_bboxes, _scores, score_threshold, nms_threshold, _indices, eta, top_k);
int* ptr = new int[_indices.size()];
for (size_t i=0; i<_indices.size(); ++i) {
ptr[i] = _indices[i];
}
indices->length = _indices.size();
indices->val = ptr;
return;
}

151
vendor/gocv.io/x/gocv/dnn.go generated vendored
View File

@ -180,6 +180,7 @@ func (net *Net) ForwardLayers(outBlobNames []string) (blobs []Mat) {
blobs = make([]Mat, cMats.length)
for i := C.int(0); i < cMats.length; i++ {
blobs[i].p = C.Mats_get(cMats, i)
addMatToProfile(blobs[i].p)
}
return
}
@ -292,6 +293,43 @@ func ReadNetFromTensorflowBytes(model []byte) (Net, error) {
return Net{p: unsafe.Pointer(C.Net_ReadNetFromTensorflowBytes(*bModel))}, nil
}
// ReadNetFromTorch reads a network model stored in Torch framework's format (t7).
// check net.Empty() for read failure
//
// For further details, please see:
// https://docs.opencv.org/master/d6/d0f/group__dnn.html#gaaaed8c8530e9e92fe6647700c13d961e
//
func ReadNetFromTorch(model string) Net {
cmodel := C.CString(model)
defer C.free(unsafe.Pointer(cmodel))
return Net{p: unsafe.Pointer(C.Net_ReadNetFromTorch(cmodel))}
}
// ReadNetFromONNX reads a network model stored in ONNX framework's format.
// check net.Empty() for read failure
//
// For further details, please see:
// https://docs.opencv.org/master/d6/d0f/group__dnn.html#ga7faea56041d10c71dbbd6746ca854197
//
func ReadNetFromONNX(model string) Net {
cmodel := C.CString(model)
defer C.free(unsafe.Pointer(cmodel))
return Net{p: unsafe.Pointer(C.Net_ReadNetFromONNX(cmodel))}
}
// ReadNetFromONNXBytes reads a network model stored in ONNX framework's format.
//
// For further details, please see:
// https://docs.opencv.org/master/d6/d0f/group__dnn.html#ga9198ecaac7c32ddf0aa7a1bcbd359567
//
func ReadNetFromONNXBytes(model []byte) (Net, error) {
bModel, err := toByteArray(model)
if err != nil {
return Net{}, err
}
return Net{p: unsafe.Pointer(C.Net_ReadNetFromONNXBytes(*bModel))}, nil
}
// BlobFromImage creates 4-dimensional blob from image. Optionally resizes and crops
// image from center, subtract mean values, scales values by scalefactor,
// swap Blue and Red channels.
@ -414,13 +452,14 @@ func (net *Net) GetPerfProfile() float64 {
func (net *Net) GetUnconnectedOutLayers() (ids []int) {
cids := C.IntVector{}
C.Net_GetUnconnectedOutLayers((C.Net)(net.p), &cids)
defer C.free(unsafe.Pointer(cids.val))
h := &reflect.SliceHeader{
Data: uintptr(unsafe.Pointer(cids.val)),
Len: int(cids.length),
Cap: int(cids.length),
}
pcids := *(*[]int)(unsafe.Pointer(h))
pcids := *(*[]C.int)(unsafe.Pointer(h))
for i := 0; i < int(cids.length); i++ {
ids = append(ids, int(pcids[i]))
@ -435,19 +474,9 @@ func (net *Net) GetUnconnectedOutLayers() (ids []int) {
//
func (net *Net) GetLayerNames() (names []string) {
cstrs := C.CStrings{}
defer C.CStrings_Close(cstrs)
C.Net_GetLayerNames((C.Net)(net.p), &cstrs)
h := &reflect.SliceHeader{
Data: uintptr(unsafe.Pointer(cstrs.strs)),
Len: int(cstrs.length),
Cap: int(cstrs.length),
}
pcstrs := *(*[]string)(unsafe.Pointer(h))
for i := 0; i < int(cstrs.length); i++ {
names = append(names, string(pcstrs[i]))
}
return
return toGoStrings(cstrs)
}
// Close Layer
@ -488,3 +517,99 @@ func (l *Layer) OutputNameToIndex(name string) int {
defer C.free(unsafe.Pointer(cName))
return int(C.Layer_OutputNameToIndex((C.Layer)(l.p), cName))
}
// NMSBoxes performs non maximum suppression given boxes and corresponding scores.
//
// For futher details, please see:
// https://docs.opencv.org/4.4.0/d6/d0f/group__dnn.html#ga9d118d70a1659af729d01b10233213ee
func NMSBoxes(bboxes []image.Rectangle, scores []float32, scoreThreshold float32, nmsThreshold float32, indices []int) {
bboxesRectArr := []C.struct_Rect{}
for _, v := range bboxes {
bbox := C.struct_Rect{
x: C.int(v.Min.X),
y: C.int(v.Min.Y),
width: C.int(v.Size().X),
height: C.int(v.Size().Y),
}
bboxesRectArr = append(bboxesRectArr, bbox)
}
bboxesRects := C.Rects{
rects: (*C.Rect)(&bboxesRectArr[0]),
length: C.int(len(bboxes)),
}
scoresFloats := []C.float{}
for _, v := range scores {
scoresFloats = append(scoresFloats, C.float(v))
}
scoresVector := C.struct_FloatVector{}
scoresVector.val = (*C.float)(&scoresFloats[0])
scoresVector.length = (C.int)(len(scoresFloats))
indicesVector := C.IntVector{}
C.NMSBoxes(bboxesRects, scoresVector, C.float(scoreThreshold), C.float(nmsThreshold), &indicesVector)
defer C.free(unsafe.Pointer(indicesVector.val))
h := &reflect.SliceHeader{
Data: uintptr(unsafe.Pointer(indicesVector.val)),
Len: int(indicesVector.length),
Cap: int(indicesVector.length),
}
ptr := *(*[]C.int)(unsafe.Pointer(h))
for i := 0; i < int(indicesVector.length); i++ {
indices[i] = int(ptr[i])
}
return
}
// NMSBoxesWithParams performs non maximum suppression given boxes and corresponding scores.
//
// For futher details, please see:
// https://docs.opencv.org/4.4.0/d6/d0f/group__dnn.html#ga9d118d70a1659af729d01b10233213ee
func NMSBoxesWithParams(bboxes []image.Rectangle, scores []float32, scoreThreshold float32, nmsThreshold float32, indices []int, eta float32, topK int) {
bboxesRectArr := []C.struct_Rect{}
for _, v := range bboxes {
bbox := C.struct_Rect{
x: C.int(v.Min.X),
y: C.int(v.Min.Y),
width: C.int(v.Size().X),
height: C.int(v.Size().Y),
}
bboxesRectArr = append(bboxesRectArr, bbox)
}
bboxesRects := C.Rects{
rects: (*C.Rect)(&bboxesRectArr[0]),
length: C.int(len(bboxes)),
}
scoresFloats := []C.float{}
for _, v := range scores {
scoresFloats = append(scoresFloats, C.float(v))
}
scoresVector := C.struct_FloatVector{}
scoresVector.val = (*C.float)(&scoresFloats[0])
scoresVector.length = (C.int)(len(scoresFloats))
indicesVector := C.IntVector{}
C.NMSBoxesWithParams(bboxesRects, scoresVector, C.float(scoreThreshold), C.float(nmsThreshold), &indicesVector, C.float(eta), C.int(topK))
defer C.free(unsafe.Pointer(indicesVector.val))
h := &reflect.SliceHeader{
Data: uintptr(unsafe.Pointer(indicesVector.val)),
Len: int(indicesVector.length),
Cap: int(indicesVector.length),
}
ptr := *(*[]C.int)(unsafe.Pointer(h))
for i := 0; i < int(indicesVector.length); i++ {
indices[i] = int(ptr[i])
}
return
}

6
vendor/gocv.io/x/gocv/dnn.h generated vendored
View File

@ -25,6 +25,9 @@ Net Net_ReadNetFromCaffe(const char* prototxt, const char* caffeModel);
Net Net_ReadNetFromCaffeBytes(struct ByteArray prototxt, struct ByteArray caffeModel);
Net Net_ReadNetFromTensorflow(const char* model);
Net Net_ReadNetFromTensorflowBytes(struct ByteArray model);
Net Net_ReadNetFromTorch(const char* model);
Net Net_ReadNetFromONNX(const char* model);
Net Net_ReadNetFromONNXBytes(struct ByteArray model);
Mat Net_BlobFromImage(Mat image, double scalefactor, Size size, Scalar mean, bool swapRB,
bool crop);
void Net_BlobFromImages(struct Mats images, Mat blob, double scalefactor, Size size,
@ -51,6 +54,9 @@ int Layer_OutputNameToIndex(Layer layer, const char* name);
const char* Layer_GetName(Layer layer);
const char* Layer_GetType(Layer layer);
void NMSBoxes(struct Rects bboxes, FloatVector scores, float score_threshold, float nms_threshold, IntVector* indices);
void NMSBoxesWithParams(struct Rects bboxes, FloatVector scores, const float score_threshold, const float nms_threshold, IntVector* indices, const float eta, const int top_k);
#ifdef __cplusplus
}
#endif

83
vendor/gocv.io/x/gocv/features2d.cpp generated vendored
View File

@ -413,6 +413,50 @@ struct MultiDMatches BFMatcher_KnnMatchWithParams(BFMatcher b, Mat query, Mat tr
return ret;
}
FlannBasedMatcher FlannBasedMatcher_Create() {
return new cv::Ptr<cv::FlannBasedMatcher>(cv::FlannBasedMatcher::create());
}
void FlannBasedMatcher_Close(FlannBasedMatcher f) {
delete f;
}
struct MultiDMatches FlannBasedMatcher_KnnMatch(FlannBasedMatcher f, Mat query, Mat train, int k) {
std::vector< std::vector<cv::DMatch> > matches;
(*f)->knnMatch(*query, *train, matches, k);
DMatches *dms = new DMatches[matches.size()];
for (size_t i = 0; i < matches.size(); ++i) {
DMatch *dmatches = new DMatch[matches[i].size()];
for (size_t j = 0; j < matches[i].size(); ++j) {
DMatch dmatch = {matches[i][j].queryIdx, matches[i][j].trainIdx, matches[i][j].imgIdx,
matches[i][j].distance};
dmatches[j] = dmatch;
}
dms[i] = {dmatches, (int) matches[i].size()};
}
MultiDMatches ret = {dms, (int) matches.size()};
return ret;
}
struct MultiDMatches FlannBasedMatcher_KnnMatchWithParams(FlannBasedMatcher f, Mat query, Mat train, int k, Mat mask, bool compactResult) {
std::vector< std::vector<cv::DMatch> > matches;
(*f)->knnMatch(*query, *train, matches, k, *mask, compactResult);
DMatches *dms = new DMatches[matches.size()];
for (size_t i = 0; i < matches.size(); ++i) {
DMatch *dmatches = new DMatch[matches[i].size()];
for (size_t j = 0; j < matches[i].size(); ++j) {
DMatch dmatch = {matches[i][j].queryIdx, matches[i][j].trainIdx, matches[i][j].imgIdx,
matches[i][j].distance};
dmatches[j] = dmatch;
}
dms[i] = {dmatches, (int) matches[i].size()};
}
MultiDMatches ret = {dms, (int) matches.size()};
return ret;
}
void DrawKeyPoints(Mat src, struct KeyPoints kp, Mat dst, Scalar s, int flags) {
std::vector<cv::KeyPoint> keypts;
cv::KeyPoint keypt;
@ -471,3 +515,42 @@ struct KeyPoints SIFT_DetectAndCompute(SIFT d, Mat src, Mat mask, Mat desc) {
KeyPoints ret = {kps, (int)detected.size()};
return ret;
}
void DrawMatches(Mat img1, struct KeyPoints kp1, Mat img2, struct KeyPoints kp2, struct DMatches matches1to2, Mat outImg, const Scalar matchesColor, const Scalar pointColor, struct ByteArray matchesMask, int flags) {
std::vector<cv::KeyPoint> kp1vec, kp2vec;
cv::KeyPoint keypt;
for (int i = 0; i < kp1.length; ++i) {
keypt = cv::KeyPoint(kp1.keypoints[i].x, kp1.keypoints[i].y,
kp1.keypoints[i].size, kp1.keypoints[i].angle, kp1.keypoints[i].response,
kp1.keypoints[i].octave, kp1.keypoints[i].classID);
kp1vec.push_back(keypt);
}
for (int i = 0; i < kp2.length; ++i) {
keypt = cv::KeyPoint(kp2.keypoints[i].x, kp2.keypoints[i].y,
kp2.keypoints[i].size, kp2.keypoints[i].angle, kp2.keypoints[i].response,
kp2.keypoints[i].octave, kp2.keypoints[i].classID);
kp2vec.push_back(keypt);
}
cv::Scalar cvmatchescolor = cv::Scalar(matchesColor.val1, matchesColor.val2, matchesColor.val3, matchesColor.val4);
cv::Scalar cvpointcolor = cv::Scalar(pointColor.val1, pointColor.val2, pointColor.val3, pointColor.val4);
std::vector<cv::DMatch> dmatchvec;
cv::DMatch dm;
for (int i = 0; i < matches1to2.length; i++) {
dm = cv::DMatch(matches1to2.dmatches[i].queryIdx, matches1to2.dmatches[i].trainIdx,
matches1to2.dmatches[i].imgIdx, matches1to2.dmatches[i].distance);
dmatchvec.push_back(dm);
}
std::vector<char> maskvec;
for (int i = 0; i < matchesMask.length; i++) {
maskvec.push_back(matchesMask.data[i]);
}
cv::drawMatches(*img1, kp1vec, *img2, kp2vec, dmatchvec, *outImg, cvmatchescolor, cvpointcolor, maskvec, static_cast<cv::DrawMatchesFlags>(flags));
}

118
vendor/gocv.io/x/gocv/features2d.go generated vendored
View File

@ -665,6 +665,40 @@ func (b *BFMatcher) KnnMatch(query, train Mat, k int) [][]DMatch {
return getMultiDMatches(ret)
}
// FlannBasedMatcher is a wrapper around the the cv::FlannBasedMatcher algorithm
type FlannBasedMatcher struct {
// C.FlannBasedMatcher
p unsafe.Pointer
}
// NewFlannBasedMatcher returns a new FlannBasedMatcher
//
// For further details, please see:
// https://docs.opencv.org/master/dc/de2/classcv_1_1FlannBasedMatcher.html#ab9114a6471e364ad221f89068ca21382
//
func NewFlannBasedMatcher() FlannBasedMatcher {
return FlannBasedMatcher{p: unsafe.Pointer(C.FlannBasedMatcher_Create())}
}
// Close FlannBasedMatcher
func (f *FlannBasedMatcher) Close() error {
C.FlannBasedMatcher_Close((C.FlannBasedMatcher)(f.p))
f.p = nil
return nil
}
// KnnMatch Finds the k best matches for each descriptor from a query set.
//
// For further details, please see:
// https://docs.opencv.org/master/db/d39/classcv_1_1DescriptorMatcher.html#aa880f9353cdf185ccf3013e08210483a
//
func (f *FlannBasedMatcher) KnnMatch(query, train Mat, k int) [][]DMatch {
ret := C.FlannBasedMatcher_KnnMatch((C.FlannBasedMatcher)(f.p), query.p, train.p, C.int(k))
defer C.MultiDMatches_Close(ret)
return getMultiDMatches(ret)
}
func getMultiDMatches(ret C.MultiDMatches) [][]DMatch {
cArray := ret.dmatches
length := int(ret.length)
@ -779,6 +813,7 @@ func (d *SIFT) Close() error {
//
func (d *SIFT) Detect(src Mat) []KeyPoint {
ret := C.SIFT_Detect((C.SIFT)(d.p), C.Mat(src.Ptr()))
defer C.KeyPoints_Close(ret)
return getKeyPoints(ret)
}
@ -792,6 +827,89 @@ func (d *SIFT) DetectAndCompute(src Mat, mask Mat) ([]KeyPoint, Mat) {
desc := NewMat()
ret := C.SIFT_DetectAndCompute((C.SIFT)(d.p), C.Mat(src.Ptr()), C.Mat(mask.Ptr()),
C.Mat(desc.Ptr()))
defer C.KeyPoints_Close(ret)
return getKeyPoints(ret), desc
}
// DrawMatches draws matches on combined train and querry images.
//
// For further details, please see:
// https://docs.opencv.org/master/d4/d5d/group__features2d__draw.html#gad8f463ccaf0dc6f61083abd8717c261a
func DrawMatches(img1 Mat, kp1 []KeyPoint, img2 Mat, kp2 []KeyPoint, matches1to2 []DMatch, outImg *Mat, matchColor color.RGBA, singlePointColor color.RGBA, matchesMask []byte, flags DrawMatchesFlag) {
kp1arr := make([]C.struct_KeyPoint, len(kp1))
kp2arr := make([]C.struct_KeyPoint, len(kp2))
for i, kp := range kp1 {
kp1arr[i].x = C.double(kp.X)
kp1arr[i].y = C.double(kp.Y)
kp1arr[i].size = C.double(kp.Size)
kp1arr[i].angle = C.double(kp.Angle)
kp1arr[i].response = C.double(kp.Response)
kp1arr[i].octave = C.int(kp.Octave)
kp1arr[i].classID = C.int(kp.ClassID)
}
for i, kp := range kp2 {
kp2arr[i].x = C.double(kp.X)
kp2arr[i].y = C.double(kp.Y)
kp2arr[i].size = C.double(kp.Size)
kp2arr[i].angle = C.double(kp.Angle)
kp2arr[i].response = C.double(kp.Response)
kp2arr[i].octave = C.int(kp.Octave)
kp2arr[i].classID = C.int(kp.ClassID)
}
cKeyPoints1 := C.struct_KeyPoints{
keypoints: (*C.struct_KeyPoint)(&kp1arr[0]),
length: (C.int)(len(kp1)),
}
cKeyPoints2 := C.struct_KeyPoints{
keypoints: (*C.struct_KeyPoint)(&kp2arr[0]),
length: (C.int)(len(kp2)),
}
dMatchArr := make([]C.struct_DMatch, len(matches1to2))
for i, dm := range matches1to2 {
dMatchArr[i].queryIdx = C.int(dm.QueryIdx)
dMatchArr[i].trainIdx = C.int(dm.TrainIdx)
dMatchArr[i].imgIdx = C.int(dm.ImgIdx)
dMatchArr[i].distance = C.float(dm.Distance)
}
cDMatches := C.struct_DMatches{
dmatches: (*C.struct_DMatch)(&dMatchArr[0]),
length: (C.int)(len(matches1to2)),
}
scalarMatchColor := C.struct_Scalar{
val1: C.double(matchColor.R),
val2: C.double(matchColor.G),
val3: C.double(matchColor.B),
val4: C.double(matchColor.A),
}
scalarPointColor := C.struct_Scalar{
val1: C.double(singlePointColor.B),
val2: C.double(singlePointColor.G),
val3: C.double(singlePointColor.R),
val4: C.double(singlePointColor.A),
}
mask := make([]C.char, len(matchesMask))
cByteArray := C.struct_ByteArray{
length: (C.int)(len(matchesMask)),
}
if len(matchesMask) > 0 {
cByteArray = C.struct_ByteArray{
data: (*C.char)(&mask[0]),
length: (C.int)(len(matchesMask)),
}
}
C.DrawMatches(img1.p, cKeyPoints1, img2.p, cKeyPoints2, cDMatches, outImg.p, scalarMatchColor, scalarPointColor, cByteArray, C.int(flags))
}

8
vendor/gocv.io/x/gocv/features2d.h generated vendored
View File

@ -19,6 +19,7 @@ typedef cv::Ptr<cv::MSER>* MSER;
typedef cv::Ptr<cv::ORB>* ORB;
typedef cv::Ptr<cv::SimpleBlobDetector>* SimpleBlobDetector;
typedef cv::Ptr<cv::BFMatcher>* BFMatcher;
typedef cv::Ptr<cv::FlannBasedMatcher>* FlannBasedMatcher;
typedef cv::Ptr<cv::SIFT>* SIFT;
#else
typedef void* AKAZE;
@ -31,6 +32,7 @@ typedef void* MSER;
typedef void* ORB;
typedef void* SimpleBlobDetector;
typedef void* BFMatcher;
typedef void* FlannBasedMatcher;
typedef void* SIFT;
#endif
@ -82,6 +84,10 @@ BFMatcher BFMatcher_CreateWithParams(int normType, bool crossCheck);
void BFMatcher_Close(BFMatcher b);
struct MultiDMatches BFMatcher_KnnMatch(BFMatcher b, Mat query, Mat train, int k);
FlannBasedMatcher FlannBasedMatcher_Create();
void FlannBasedMatcher_Close(FlannBasedMatcher f);
struct MultiDMatches FlannBasedMatcher_KnnMatch(FlannBasedMatcher f, Mat query, Mat train, int k);
void DrawKeyPoints(Mat src, struct KeyPoints kp, Mat dst, const Scalar s, int flags);
SIFT SIFT_Create();
@ -89,6 +95,8 @@ void SIFT_Close(SIFT f);
struct KeyPoints SIFT_Detect(SIFT f, Mat src);
struct KeyPoints SIFT_DetectAndCompute(SIFT f, Mat src, Mat mask, Mat desc);
void DrawMatches(Mat img1, struct KeyPoints kp1, Mat img2, struct KeyPoints kp2, struct DMatches matches1to2, Mat outImg, const Scalar matchesColor, const Scalar pointColor, struct ByteArray matchesMask, int flags);
#ifdef __cplusplus
}
#endif

2
vendor/gocv.io/x/gocv/go.mod generated vendored
View File

@ -1,3 +1,3 @@
module gocv.io/x/gocv
go 1.13
go 1.13

25
vendor/gocv.io/x/gocv/highgui.go generated vendored
View File

@ -204,8 +204,8 @@ func (w *Window) ResizeWindow(width, height int) {
// For further details, please see:
// https://docs.opencv.org/master/d7/dfc/group__highgui.html#ga8daf4730d3adf7035b6de9be4c469af5
//
func SelectROI(name string, img Mat) image.Rectangle {
cName := C.CString(name)
func (w *Window) SelectROI(img Mat) image.Rectangle {
cName := C.CString(w.name)
defer C.free(unsafe.Pointer(cName))
r := C.Window_SelectROI(cName, img.p)
@ -223,6 +223,27 @@ func SelectROI(name string, img Mat) image.Rectangle {
// For further details, please see:
// https://docs.opencv.org/master/d7/dfc/group__highgui.html#ga0f11fad74a6432b8055fb21621a0f893
//
func (w *Window) SelectROIs(img Mat) []image.Rectangle {
cName := C.CString(w.name)
defer C.free(unsafe.Pointer(cName))
ret := C.Window_SelectROIs(cName, img.p)
defer C.Rects_Close(ret)
return toRectangles(ret)
}
// Deprecated: use Window.SelectROI instead
func SelectROI(name string, img Mat) image.Rectangle {
cName := C.CString(name)
defer C.free(unsafe.Pointer(cName))
r := C.Window_SelectROI(cName, img.p)
rect := image.Rect(int(r.x), int(r.y), int(r.x+r.width), int(r.y+r.height))
return rect
}
// Deprecated: use Window.SelectROIs instead
func SelectROIs(name string, img Mat) []image.Rectangle {
cName := C.CString(name)
defer C.free(unsafe.Pointer(cName))

37
vendor/gocv.io/x/gocv/imgproc.cpp generated vendored
View File

@ -494,7 +494,11 @@ void Polylines(Mat img, Contours points, bool isClosed, Scalar color,int thickne
}
struct Size GetTextSize(const char* text, int fontFace, double fontScale, int thickness) {
cv::Size sz = cv::getTextSize(text, fontFace, fontScale, thickness, NULL);
return GetTextSizeWithBaseline(text, fontFace, fontScale, thickness, NULL);
}
struct Size GetTextSizeWithBaseline(const char* text, int fontFace, double fontScale, int thickness, int* baesline) {
cv::Size sz = cv::getTextSize(text, fontFace, fontScale, thickness, baesline);
Size size = {sz.width, sz.height};
return size;
}
@ -711,3 +715,34 @@ Point2f PhaseCorrelate(Mat src1, Mat src2, Mat window, double* response) {
};
return result2f;
}
void Mat_Accumulate(Mat src, Mat dst) {
cv::accumulate(*src, *dst);
}
void Mat_AccumulateWithMask(Mat src, Mat dst, Mat mask) {
cv::accumulate(*src, *dst, *mask);
}
void Mat_AccumulateSquare(Mat src, Mat dst) {
cv::accumulateSquare(*src, *dst);
}
void Mat_AccumulateSquareWithMask(Mat src, Mat dst, Mat mask) {
cv::accumulateSquare(*src, *dst, *mask);
}
void Mat_AccumulateProduct(Mat src1, Mat src2, Mat dst) {
cv::accumulateProduct(*src1, *src2, *dst);
}
void Mat_AccumulateProductWithMask(Mat src1, Mat src2, Mat dst, Mat mask) {
cv::accumulateProduct(*src1, *src2, *dst, *mask);
}
void Mat_AccumulatedWeighted(Mat src, Mat dst, double alpha) {
cv::accumulateWeighted(*src, *dst, alpha);
}
void Mat_AccumulatedWeightedWithMask(Mat src, Mat dst, double alpha, Mat mask) {
cv::accumulateWeighted(*src, *dst, alpha, *mask);
}

237
vendor/gocv.io/x/gocv/imgproc.go generated vendored
View File

@ -6,6 +6,7 @@ package gocv
*/
import "C"
import (
"errors"
"image"
"image/color"
"reflect"
@ -1447,6 +1448,22 @@ func GetTextSize(text string, fontFace HersheyFont, fontScale float64, thickness
return image.Pt(int(sz.width), int(sz.height))
}
// GetTextSizeWithBaseline calculates the width and height of a text string including the basline of the text.
// It returns an image.Point with the size required to draw text using
// a specific font face, scale, and thickness as well as its baseline.
//
// For further details, please see:
// http://docs.opencv.org/master/d6/d6e/group__imgproc__draw.html#ga3d2abfcb995fd2db908c8288199dba82
//
func GetTextSizeWithBaseline(text string, fontFace HersheyFont, fontScale float64, thickness int) (image.Point, int) {
cText := C.CString(text)
defer C.free(unsafe.Pointer(cText))
cBaseline := C.int(0)
sz := C.GetTextSizeWithBaseline(cText, C.int(fontFace), C.double(fontScale), C.int(thickness), &cBaseline)
return image.Pt(int(sz.width), int(sz.height)), int(cBaseline)
}
// PutText draws a text string.
// It renders the specified text string into the img Mat at the location
// passed in the "org" param, using the desired font face, font scale,
@ -1930,3 +1947,223 @@ func PhaseCorrelate(src1, src2, window Mat) (phaseShift Point2f, response float6
Y: float32(result.y),
}, float64(responseDouble)
}
// ToImage converts a Mat to a image.Image.
func (m *Mat) ToImage() (image.Image, error) {
switch m.Type() {
case MatTypeCV8UC1:
img := image.NewGray(image.Rect(0, 0, m.Cols(), m.Rows()))
data, err := m.DataPtrUint8()
if err != nil {
return nil, err
}
copy(img.Pix, data[0:])
return img, nil
case MatTypeCV8UC3:
dst := NewMat()
defer dst.Close()
C.CvtColor(m.p, dst.p, C.int(ColorBGRToRGBA))
img := image.NewRGBA(image.Rect(0, 0, m.Cols(), m.Rows()))
data, err := dst.DataPtrUint8()
if err != nil {
return nil, err
}
copy(img.Pix, data[0:])
return img, nil
case MatTypeCV8UC4:
dst := NewMat()
defer dst.Close()
C.CvtColor(m.p, dst.p, C.int(ColorBGRAToRGBA))
img := image.NewNRGBA(image.Rect(0, 0, m.Cols(), m.Rows()))
data, err := dst.DataPtrUint8()
if err != nil {
return nil, err
}
copy(img.Pix, data[0:])
return img, nil
default:
return nil, errors.New("ToImage supports only MatType CV8UC1, CV8UC3 and CV8UC4")
}
}
// ImageToMatRGBA converts image.Image to gocv.Mat,
// which represents RGBA image having 8bit for each component.
// Type of Mat is gocv.MatTypeCV8UC4.
func ImageToMatRGBA(img image.Image) (Mat, error) {
bounds := img.Bounds()
x := bounds.Dx()
y := bounds.Dy()
var data []uint8
switch img.ColorModel() {
case color.RGBAModel:
m, res := img.(*image.RGBA)
if !res {
return NewMat(), errors.New("Image color format error")
}
data = m.Pix
case color.NRGBAModel:
m, res := img.(*image.NRGBA)
if !res {
return NewMat(), errors.New("Image color format error")
}
data = m.Pix
default:
data := make([]byte, 0, x*y*3)
for j := bounds.Min.Y; j < bounds.Max.Y; j++ {
for i := bounds.Min.X; i < bounds.Max.X; i++ {
r, g, b, _ := img.At(i, j).RGBA()
data = append(data, byte(b>>8), byte(g>>8), byte(r>>8))
}
}
return NewMatFromBytes(y, x, MatTypeCV8UC3, data)
}
// speed up the conversion process of RGBA format
cvt, err := NewMatFromBytes(y, x, MatTypeCV8UC4, data)
if err != nil {
return NewMat(), err
}
defer cvt.Close()
dst := NewMat()
C.CvtColor(cvt.p, dst.p, C.int(ColorBGRAToRGBA))
return dst, nil
}
// ImageToMatRGB converts image.Image to gocv.Mat,
// which represents RGB image having 8bit for each component.
// Type of Mat is gocv.MatTypeCV8UC3.
func ImageToMatRGB(img image.Image) (Mat, error) {
bounds := img.Bounds()
x := bounds.Dx()
y := bounds.Dy()
var data []uint8
switch img.ColorModel() {
case color.RGBAModel:
m, res := img.(*image.RGBA)
if true != res {
return NewMat(), errors.New("Image color format error")
}
data = m.Pix
// speed up the conversion process of RGBA format
src, err := NewMatFromBytes(y, x, MatTypeCV8UC4, data)
if err != nil {
return NewMat(), err
}
defer src.Close()
dst := NewMat()
CvtColor(src, &dst, ColorRGBAToBGR)
return dst, nil
default:
data := make([]byte, 0, x*y*3)
for j := bounds.Min.Y; j < bounds.Max.Y; j++ {
for i := bounds.Min.X; i < bounds.Max.X; i++ {
r, g, b, _ := img.At(i, j).RGBA()
data = append(data, byte(b>>8), byte(g>>8), byte(r>>8))
}
}
return NewMatFromBytes(y, x, MatTypeCV8UC3, data)
}
}
// ImageGrayToMatGray converts image.Gray to gocv.Mat,
// which represents grayscale image 8bit.
// Type of Mat is gocv.MatTypeCV8UC1.
func ImageGrayToMatGray(img *image.Gray) (Mat, error) {
bounds := img.Bounds()
x := bounds.Dx()
y := bounds.Dy()
m, err := NewMatFromBytes(y, x, MatTypeCV8UC1, img.Pix)
if err != nil {
return NewMat(), err
}
return m, nil
}
// Adds the square of a source image to the accumulator image.
//
// For further details, please see:
// https://docs.opencv.org/master/d7/df3/group__imgproc__motion.html#ga1a567a79901513811ff3b9976923b199
//
func Accumulate(src Mat, dst *Mat) {
C.Mat_Accumulate(src.p, dst.p)
}
// Adds an image to the accumulator image with mask.
//
// For further details, please see:
// https://docs.opencv.org/master/d7/df3/group__imgproc__motion.html#ga1a567a79901513811ff3b9976923b199
//
func AccumulateWithMask(src Mat, dst *Mat, mask Mat) {
C.Mat_AccumulateWithMask(src.p, dst.p, mask.p)
}
// Adds the square of a source image to the accumulator image.
//
// For further details, please see:
// https://docs.opencv.org/master/d7/df3/group__imgproc__motion.html#gacb75e7ffb573227088cef9ceaf80be8c
//
func AccumulateSquare(src Mat, dst *Mat) {
C.Mat_AccumulateSquare(src.p, dst.p)
}
// Adds the square of a source image to the accumulator image with mask.
//
// For further details, please see:
// https://docs.opencv.org/master/d7/df3/group__imgproc__motion.html#gacb75e7ffb573227088cef9ceaf80be8c
//
func AccumulateSquareWithMask(src Mat, dst *Mat, mask Mat) {
C.Mat_AccumulateSquareWithMask(src.p, dst.p, mask.p)
}
// Adds the per-element product of two input images to the accumulator image.
//
// For further details, please see:
// https://docs.opencv.org/master/d7/df3/group__imgproc__motion.html#ga82518a940ecfda49460f66117ac82520
//
func AccumulateProduct(src1 Mat, src2 Mat, dst *Mat) {
C.Mat_AccumulateProduct(src1.p, src2.p, dst.p)
}
// Adds the per-element product of two input images to the accumulator image with mask.
//
// For further details, please see:
// https://docs.opencv.org/master/d7/df3/group__imgproc__motion.html#ga82518a940ecfda49460f66117ac82520
//
func AccumulateProductWithMask(src1 Mat, src2 Mat, dst *Mat, mask Mat) {
C.Mat_AccumulateProductWithMask(src1.p, src2.p, dst.p, mask.p)
}
// Updates a running average.
//
// For further details, please see:
// https://docs.opencv.org/master/d7/df3/group__imgproc__motion.html#ga4f9552b541187f61f6818e8d2d826bc7
//
func AccumulatedWeighted(src Mat, dst *Mat, alpha float64) {
C.Mat_AccumulatedWeighted(src.p, dst.p, C.double(alpha))
}
// Updates a running average with mask.
//
// For further details, please see:
// https://docs.opencv.org/master/d7/df3/group__imgproc__motion.html#ga4f9552b541187f61f6818e8d2d826bc7
//
func AccumulatedWeightedWithMask(src Mat, dst *Mat, alpha float64, mask Mat) {
C.Mat_AccumulatedWeightedWithMask(src.p, dst.p, C.double(alpha), mask.p)
}

10
vendor/gocv.io/x/gocv/imgproc.h generated vendored
View File

@ -85,6 +85,7 @@ void Rectangle(Mat img, Rect rect, Scalar color, int thickness);
void FillPoly(Mat img, Contours points, Scalar color);
void Polylines(Mat img, Contours points, bool isClosed, Scalar color, int thickness);
struct Size GetTextSize(const char* text, int fontFace, double fontScale, int thickness);
struct Size GetTextSizeWithBaseline(const char* text, int fontFace, double fontScale, int thickness, int* baseline);
void PutText(Mat img, const char* text, Point org, int fontFace, double fontScale,
Scalar color, int thickness);
void PutTextWithParams(Mat img, const char* text, Point org, int fontFace, double fontScale,
@ -120,7 +121,14 @@ void CLAHE_Close(CLAHE c);
void CLAHE_Apply(CLAHE c, Mat src, Mat dst);
void InvertAffineTransform(Mat src, Mat dst);
Point2f PhaseCorrelate(Mat src1, Mat src2, Mat window, double* response);
void Mat_Accumulate(Mat src, Mat dst);
void Mat_AccumulateWithMask(Mat src, Mat dst, Mat mask);
void Mat_AccumulateSquare(Mat src, Mat dst);
void Mat_AccumulateSquareWithMask(Mat src, Mat dst, Mat mask);
void Mat_AccumulateProduct(Mat src1, Mat src2, Mat dst);
void Mat_AccumulateProductWithMask(Mat src1, Mat src2, Mat dst, Mat mask);
void Mat_AccumulatedWeighted(Mat src, Mat dst, double alpha);
void Mat_AccumulatedWeightedWithMask(Mat src, Mat dst, double alpha, Mat mask);
#ifdef __cplusplus
}
#endif

View File

@ -8,6 +8,11 @@ package gocv
*/
import "C"
// addMatToProfile does nothing if matprofile tag is not set.
func addMatToProfile(p C.Mat) {
return
}
// newMat returns a new Mat from a C Mat
func newMat(p C.Mat) Mat {
return Mat{p: p}
@ -17,5 +22,6 @@ func newMat(p C.Mat) Mat {
func (m *Mat) Close() error {
C.Mat_Close(m.p)
m.p = nil
m.d = nil
return nil
}

View File

@ -58,6 +58,12 @@ func init() {
}
}
// addMatToProfile records Mat to the MatProfile.
func addMatToProfile(p C.Mat) {
MatProfile.Add(p, 1)
return
}
// newMat returns a new Mat from a C Mat and records it to the MatProfile.
func newMat(p C.Mat) Mat {
m := Mat{p: p}
@ -70,5 +76,6 @@ func (m *Mat) Close() error {
C.Mat_Close(m.p)
MatProfile.Remove(m.p)
m.p = nil
m.d = nil
return nil
}

27
vendor/gocv.io/x/gocv/objdetect.cpp generated vendored
View File

@ -149,3 +149,30 @@ const char* QRCodeDetector_Decode(QRCodeDetector qr, Mat input,Mat inputPoints,M
cv::String *str = new cv::String(qr->detectAndDecode(*input,*inputPoints,*straight_qrcode));
return str->c_str();
}
bool QRCodeDetector_DetectMulti(QRCodeDetector qr, Mat input, Mat points) {
return qr->detectMulti(*input,*points);
}
bool QRCodeDetector_DetectAndDecodeMulti(QRCodeDetector qr, Mat input, CStrings* decoded, Mat points, struct Mats* qrCodes) {
std::vector<cv::String> decodedCodes;
std::vector<cv::Mat> straightQrCodes;
bool res = qr->detectAndDecodeMulti(*input, decodedCodes, *points, straightQrCodes);
if (!res) {
return res;
}
qrCodes->mats = new Mat[straightQrCodes.size()];
qrCodes->length = straightQrCodes.size();
for (size_t i = 0; i < straightQrCodes.size(); i++) {
qrCodes->mats[i] = new cv::Mat(straightQrCodes[i]);
}
const char **strs = new const char*[decodedCodes.size()];
for (size_t i = 0; i < decodedCodes.size(); ++i) {
strs[i] = decodedCodes[i].c_str();
}
decoded->length = decodedCodes.size();
decoded->strs = strs;
return res;
}

45
vendor/gocv.io/x/gocv/objdetect.go generated vendored
View File

@ -211,6 +211,7 @@ func (a *QRCodeDetector) Close() error {
// DetectAndDecode Both detects and decodes QR code.
//
// Returns true as long as some QR code was detected even in case where the decoding failed
// For further details, please see:
// https://docs.opencv.org/master/de/dc3/classcv_1_1QRCodeDetector.html#a7290bd6a5d59b14a37979c3a14fbf394
//
@ -238,3 +239,47 @@ func (a *QRCodeDetector) Decode(input Mat, points Mat, straight_qrcode *Mat) str
goResult := C.GoString(C.QRCodeDetector_DetectAndDecode(a.p, input.p, points.p, straight_qrcode.p))
return string(goResult)
}
// Detects QR codes in image and finds of the quadrangles containing the codes.
//
// Each quadrangle would be returned as a row in the `points` Mat and each point is a Vecf.
// Returns true if QR code was detected
// For usage please see TestQRCodeDetector
// For further details, please see:
// https://docs.opencv.org/master/de/dc3/classcv_1_1QRCodeDetector.html#aaf2b6b2115b8e8fbc9acf3a8f68872b6
func (a *QRCodeDetector) DetectMulti(input Mat, points *Mat) bool {
result := C.QRCodeDetector_DetectMulti(a.p, input.p, points.p)
return bool(result)
}
// Detects QR codes in image and finds of the quadrangles containing the codes and decode the decode the QRCodes to strings.
//
// Each quadrangle would be returned as a row in the `points` Mat and each point is a Vecf.
// Returns true as long as some QR code was detected even in case where the decoding failed
// For usage please see TestQRCodeDetector
// For further details, please see:
//https://docs.opencv.org/master/de/dc3/classcv_1_1QRCodeDetector.html#a188b63ffa17922b2c65d8a0ab7b70775
func (a *QRCodeDetector) DetectAndDecodeMulti(input Mat, decoded *[]string, points *Mat, qrCodes *[]Mat) bool {
cDecoded := C.CStrings{}
defer C.CStrings_Close(cDecoded)
cQrCodes := C.struct_Mats{}
defer C.Mats_Close(cQrCodes)
success := C.QRCodeDetector_DetectAndDecodeMulti(a.p, input.p, &cDecoded, points.p, &cQrCodes)
if !success {
return bool(success)
}
tmpCodes := make([]Mat, cQrCodes.length)
for i := C.int(0); i < cQrCodes.length; i++ {
tmpCodes[i].p = C.Mats_get(cQrCodes, i)
}
for _, qr := range tmpCodes {
*qrCodes = append(*qrCodes, qr)
}
for _, s := range toGoStrings(cDecoded) {
*decoded = append(*decoded, s)
}
return bool(success)
}

2
vendor/gocv.io/x/gocv/objdetect.h generated vendored
View File

@ -45,6 +45,8 @@ const char* QRCodeDetector_DetectAndDecode(QRCodeDetector qr, Mat input,Mat poin
bool QRCodeDetector_Detect(QRCodeDetector qr, Mat input,Mat points);
const char* QRCodeDetector_Decode(QRCodeDetector qr, Mat input,Mat inputPoints,Mat straight_qrcode);
void QRCodeDetector_Close(QRCodeDetector qr);
bool QRCodeDetector_DetectMulti(QRCodeDetector qr, Mat input, Mat points);
bool QRCodeDetector_DetectAndDecodeMulti(QRCodeDetector qr, Mat input, CStrings* decoded ,Mat points, struct Mats* mats);
#ifdef __cplusplus
}

18
vendor/gocv.io/x/gocv/photo.cpp generated vendored Normal file
View File

@ -0,0 +1,18 @@
#include "photo.h"
void ColorChange(Mat src, Mat mask, Mat dst, float red_mul, float green_mul, float blue_mul) {
cv::colorChange(*src, *mask, *dst, red_mul, green_mul, blue_mul);
}
void IlluminationChange(Mat src, Mat mask, Mat dst, float alpha, float beta) {
cv::illuminationChange(*src, *mask, *dst, alpha, beta);
}
void SeamlessClone(Mat src, Mat dst, Mat mask, Point p, Mat blend, int flags) {
cv::Point pt(p.x, p.y);
cv::seamlessClone(*src, *dst, *mask, pt, *blend, flags);
}
void TextureFlattening(Mat src, Mat mask, Mat dst, float low_threshold, float high_threshold, int kernel_size) {
cv::textureFlattening(*src, *mask, *dst, low_threshold, high_threshold, kernel_size);
}

63
vendor/gocv.io/x/gocv/photo.go generated vendored Normal file
View File

@ -0,0 +1,63 @@
package gocv
/*
#include <stdlib.h>
#include "photo.h"
*/
import "C"
import "image"
//SeamlessCloneFlags seamlessClone algorithm flags
type SeamlessCloneFlags int
const (
// NormalClone The power of the method is fully expressed when inserting objects with complex outlines into a new background.
NormalClone SeamlessCloneFlags = iota
// MixedClone The classic method, color-based selection and alpha masking might be time consuming and often leaves an undesirable halo. Seamless cloning, even averaged with the original image, is not effective. Mixed seamless cloning based on a loose selection proves effective.
MixedClone
// MonochromeTransfer Monochrome transfer allows the user to easily replace certain features of one object by alternative features.
MonochromeTransfer
)
// ColorChange mix two differently colored versions of an image seamlessly.
//
// For further details, please see:
// https://docs.opencv.org/master/df/da0/group__photo__clone.html#ga6684f35dc669ff6196a7c340dc73b98e
//
func ColorChange(src, mask Mat, dst *Mat, red_mul, green_mul, blue_mul float32) {
C.ColorChange(src.p, mask.p, dst.p, C.float(red_mul), C.float(green_mul), C.float(blue_mul))
}
// SeamlessClone blend two image by Poisson Blending.
//
// For further details, please see:
// https://docs.opencv.org/master/df/da0/group__photo__clone.html#ga2bf426e4c93a6b1f21705513dfeca49d
//
func SeamlessClone(src, dst, mask Mat, p image.Point, blend *Mat, flags SeamlessCloneFlags) {
cp := C.struct_Point{
x: C.int(p.X),
y: C.int(p.Y),
}
C.SeamlessClone(src.p, dst.p, mask.p, cp, blend.p, C.int(flags))
}
// IlluminationChange modifies locally the apparent illumination of an image.
//
// For further details, please see:
// https://docs.opencv.org/master/df/da0/group__photo__clone.html#gac5025767cf2febd8029d474278e886c7
//
func IlluminationChange(src, mask Mat, dst *Mat, alpha, beta float32) {
C.IlluminationChange(src.p, mask.p, dst.p, C.float(alpha), C.float(beta))
}
// TextureFlattening washes out the texture of the selected region, giving its contents a flat aspect.
//
// For further details, please see:
// https://docs.opencv.org/master/df/da0/group__photo__clone.html#gad55df6aa53797365fa7cc23959a54004
//
func TextureFlattening(src, mask Mat, dst *Mat, lowThreshold, highThreshold float32, kernelSize int) {
C.TextureFlattening(src.p, mask.p, dst.p, C.float(lowThreshold), C.float(highThreshold), C.int(kernelSize))
}

24
vendor/gocv.io/x/gocv/photo.h generated vendored Normal file
View File

@ -0,0 +1,24 @@
#ifndef _OPENCV3_PHOTO_H_
#define _OPENCV3_PHOTO_H_
#ifdef __cplusplus
#include <opencv2/opencv.hpp>
extern "C" {
#endif
#include "core.h"
void ColorChange(Mat src, Mat mask, Mat dst, float red_mul, float green_mul, float blue_mul);
void SeamlessClone(Mat src, Mat dst, Mat mask, Point p, Mat blend, int flags);
void IlluminationChange(Mat src, Mat mask, Mat dst, float alpha, float beta);
void TextureFlattening(Mat src, Mat mask, Mat dst, float low_threshold, float high_threshold, int kernel_size);
#ifdef __cplusplus
}
#endif
#endif //_OPENCV3_PHOTO_H

13
vendor/gocv.io/x/gocv/photo_string.go generated vendored Normal file
View File

@ -0,0 +1,13 @@
package gocv
func (c SeamlessCloneFlags) String() string {
switch c {
case NormalClone:
return "normal-clone"
case MixedClone:
return "mixed-clone"
case MonochromeTransfer:
return "monochrome-transfer"
}
return ""
}

View File

@ -1,79 +0,0 @@
#!/bin/bash
set -eux -o pipefail
OPENCV_VERSION=${OPENCV_VERSION:-4.4.0}
#GRAPHICAL=ON
GRAPHICAL=${GRAPHICAL:-OFF}
# OpenCV looks for libjpeg in /usr/lib/libjpeg.so, for some reason. However,
# it does not seem to be there in 14.04. Create a link
mkdir -p $HOME/usr/lib
if [[ ! -f "$HOME/usr/lib/libjpeg.so" ]]; then
ln -s /usr/lib/x86_64-linux-gnu/libjpeg.so $HOME/usr/lib/libjpeg.so
fi
# Same for libpng.so
if [[ ! -f "$HOME/usr/lib/libpng.so" ]]; then
ln -s /usr/lib/x86_64-linux-gnu/libpng.so $HOME/usr/lib/libpng.so
fi
# Build OpenCV
if [[ ! -e "$HOME/usr/installed-${OPENCV_VERSION}" ]]; then
TMP=$(mktemp -d)
if [[ ! -d "opencv-${OPENCV_VERSION}/build" ]]; then
curl -sL https://github.com/opencv/opencv/archive/${OPENCV_VERSION}.zip > ${TMP}/opencv.zip
unzip -q ${TMP}/opencv.zip
mkdir opencv-${OPENCV_VERSION}/build
rm ${TMP}/opencv.zip
fi
if [[ ! -d "opencv_contrib-${OPENCV_VERSION}/modules" ]]; then
curl -sL https://github.com/opencv/opencv_contrib/archive/${OPENCV_VERSION}.zip > ${TMP}/opencv-contrib.zip
unzip -q ${TMP}/opencv-contrib.zip
rm ${TMP}/opencv-contrib.zip
fi
rmdir ${TMP}
cd opencv-${OPENCV_VERSION}/build
cmake -D WITH_IPP=${GRAPHICAL} \
-D WITH_OPENGL=${GRAPHICAL} \
-D WITH_QT=${GRAPHICAL} \
-D BUILD_EXAMPLES=OFF \
-D BUILD_TESTS=OFF \
-D BUILD_PERF_TESTS=OFF \
-D BUILD_opencv_java=OFF \
-D BUILD_opencv_python=OFF \
-D BUILD_opencv_python2=OFF \
-D BUILD_opencv_python3=OFF \
-D OPENCV_GENERATE_PKGCONFIG=ON \
-D CMAKE_INSTALL_PREFIX=$HOME/usr \
-D OPENCV_ENABLE_NONFREE=ON \
-D OPENCV_EXTRA_MODULES_PATH=../../opencv_contrib-${OPENCV_VERSION}/modules ..
make -j8
make install && touch $HOME/usr/installed-${OPENCV_VERSION}
# caffe test data
if [[ ! -d "${HOME}/testdata" ]]; then
mkdir ${HOME}/testdata
fi
#if [[ ! -f "${HOME}/testdata/bvlc_googlenet.prototxt" ]]; then
curl -sL https://raw.githubusercontent.com/opencv/opencv_extra/master/testdata/dnn/bvlc_googlenet.prototxt > ${HOME}/testdata/bvlc_googlenet.prototxt
#fi
#if [[ ! -f "${HOME}/testdata/bvlc_googlenet.caffemodel" ]]; then
curl -sL http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel > ${HOME}/testdata/bvlc_googlenet.caffemodel
#fi
#if [[ ! -f "${HOME}/testdata/tensorflow_inception_graph.pb" ]]; then
curl -sL https://storage.googleapis.com/download.tensorflow.org/models/inception5h.zip > ${HOME}/testdata/inception5h.zip
unzip -o ${HOME}/testdata/inception5h.zip tensorflow_inception_graph.pb -d ${HOME}/testdata
#fi
cd ../..
touch $HOME/fresh-cache
fi

2
vendor/gocv.io/x/gocv/version.go generated vendored
View File

@ -7,7 +7,7 @@ package gocv
import "C"
// GoCVVersion of this package, for display purposes.
const GoCVVersion = "0.24.0"
const GoCVVersion = "0.26.0"
// Version returns the current golang package version
func Version() string {

24
vendor/gocv.io/x/gocv/video.cpp generated vendored
View File

@ -47,3 +47,27 @@ void CalcOpticalFlowPyrLKWithParams(Mat prevImg, Mat nextImg, Mat prevPts, Mat n
cv::calcOpticalFlowPyrLK(*prevImg, *nextImg, *prevPts, *nextPts, *status, *err, sz, maxLevel, *criteria, flags, minEigThreshold);
}
bool Tracker_Init(Tracker self, Mat image, Rect boundingBox) {
cv::Rect bb(boundingBox.x, boundingBox.y, boundingBox.width, boundingBox.height);
(*self)->init(*image, bb);
return true;
}
bool Tracker_Update(Tracker self, Mat image, Rect* boundingBox) {
cv::Rect bb;
bool ret = (*self)->update(*image, bb);
boundingBox->x = int(bb.x);
boundingBox->y = int(bb.y);
boundingBox->width = int(bb.width);
boundingBox->height = int(bb.height);
return ret;
}
TrackerMIL TrackerMIL_Create() {
return new cv::Ptr<cv::TrackerMIL>(cv::TrackerMIL::create());
}
void TrackerMIL_Close(TrackerMIL self) {
delete self;
}

78
vendor/gocv.io/x/gocv/video.go generated vendored
View File

@ -155,3 +155,81 @@ func CalcOpticalFlowPyrLKWithParams(prevImg Mat, nextImg Mat, prevPts Mat, nextP
C.CalcOpticalFlowPyrLKWithParams(prevImg.p, nextImg.p, prevPts.p, nextPts.p, status.p, err.p, winSz, C.int(maxLevel), criteria.p, C.int(flags), C.double(minEigThreshold))
return
}
// Tracker is the base interface for object tracking.
//
// see: https://docs.opencv.org/master/d0/d0a/classcv_1_1Tracker.html
//
type Tracker interface {
// Close closes, as Trackers need to be Closed manually.
//
Close() error
// Init initializes the tracker with a known bounding box that surrounded the target.
// Note: this can only be called once. If you lose the object, you have to Close() the instance,
// create a new one, and call Init() on it again.
//
// see: https://docs.opencv.org/master/d0/d0a/classcv_1_1Tracker.html#a4d285747589b1bdd16d2e4f00c3255dc
//
Init(image Mat, boundingBox image.Rectangle) bool
// Update updates the tracker, returns a new bounding box and a boolean determining whether the tracker lost the target.
//
// see: https://docs.opencv.org/master/d0/d0a/classcv_1_1Tracker.html#a549159bd0553e6a8de356f3866df1f18
//
Update(image Mat) (image.Rectangle, bool)
}
func trackerInit(trk C.Tracker, img Mat, boundingBox image.Rectangle) bool {
cBox := C.struct_Rect{
x: C.int(boundingBox.Min.X),
y: C.int(boundingBox.Min.Y),
width: C.int(boundingBox.Size().X),
height: C.int(boundingBox.Size().Y),
}
ret := C.Tracker_Init(trk, C.Mat(img.Ptr()), cBox)
return bool(ret)
}
func trackerUpdate(trk C.Tracker, img Mat) (image.Rectangle, bool) {
cBox := C.struct_Rect{}
ret := C.Tracker_Update(trk, C.Mat(img.Ptr()), &cBox)
rect := image.Rect(int(cBox.x), int(cBox.y), int(cBox.x+cBox.width), int(cBox.y+cBox.height))
return rect, bool(ret)
}
// TrackerMIL is a Tracker that uses the MIL algorithm. MIL trains a classifier in an online manner
// to separate the object from the background.
// Multiple Instance Learning avoids the drift problem for a robust tracking.
//
// For further details, please see:
// https://docs.opencv.org/master/d0/d26/classcv_1_1TrackerMIL.html
//
type TrackerMIL struct {
p C.TrackerMIL
}
// NewTrackerMIL returns a new TrackerMIL.
func NewTrackerMIL() Tracker {
return TrackerMIL{p: C.TrackerMIL_Create()}
}
// Close closes the TrackerMIL.
func (trk TrackerMIL) Close() error {
C.TrackerMIL_Close(trk.p)
trk.p = nil
return nil
}
// Init initializes the TrackerMIL.
func (trk TrackerMIL) Init(img Mat, boundingBox image.Rectangle) bool {
return trackerInit(C.Tracker(trk.p), img, boundingBox)
}
// Update updates the TrackerMIL.
func (trk TrackerMIL) Update(img Mat) (image.Rectangle, bool) {
return trackerUpdate(C.Tracker(trk.p), img)
}

14
vendor/gocv.io/x/gocv/video.h generated vendored
View File

@ -3,6 +3,7 @@
#ifdef __cplusplus
#include <opencv2/opencv.hpp>
#include <opencv2/video.hpp>
extern "C" {
#endif
@ -11,9 +12,15 @@ extern "C" {
#ifdef __cplusplus
typedef cv::Ptr<cv::BackgroundSubtractorMOG2>* BackgroundSubtractorMOG2;
typedef cv::Ptr<cv::BackgroundSubtractorKNN>* BackgroundSubtractorKNN;
typedef cv::Ptr<cv::Tracker>* Tracker;
typedef cv::Ptr<cv::TrackerMIL>* TrackerMIL;
typedef cv::Ptr<cv::TrackerGOTURN>* TrackerGOTURN;
#else
typedef void* BackgroundSubtractorMOG2;
typedef void* BackgroundSubtractorKNN;
typedef void* Tracker;
typedef void* TrackerMIL;
typedef void* TrackerGOTURN;
#endif
BackgroundSubtractorMOG2 BackgroundSubtractorMOG2_Create();
@ -31,6 +38,13 @@ void CalcOpticalFlowPyrLK(Mat prevImg, Mat nextImg, Mat prevPts, Mat nextPts, Ma
void CalcOpticalFlowPyrLKWithParams(Mat prevImg, Mat nextImg, Mat prevPts, Mat nextPts, Mat status, Mat err, Size winSize, int maxLevel, TermCriteria criteria, int flags, double minEigThreshold);
void CalcOpticalFlowFarneback(Mat prevImg, Mat nextImg, Mat flow, double pyrScale, int levels,
int winsize, int iterations, int polyN, double polySigma, int flags);
bool Tracker_Init(Tracker self, Mat image, Rect boundingBox);
bool Tracker_Update(Tracker self, Mat image, Rect* boundingBox);
TrackerMIL TrackerMIL_Create();
void TrackerMIL_Close(TrackerMIL self);
#ifdef __cplusplus
}
#endif

8
vendor/gocv.io/x/gocv/videoio.cpp generated vendored
View File

@ -13,10 +13,18 @@ bool VideoCapture_Open(VideoCapture v, const char* uri) {
return v->open(uri);
}
bool VideoCapture_OpenWithAPI(VideoCapture v, const char* uri, int apiPreference) {
return v->open(uri, apiPreference);
}
bool VideoCapture_OpenDevice(VideoCapture v, int device) {
return v->open(device);
}
bool VideoCapture_OpenDeviceWithAPI(VideoCapture v, int device, int apiPreference) {
return v->open(device, apiPreference);
}
void VideoCapture_Set(VideoCapture v, int prop, double param) {
v->set(prop, param);
}

174
vendor/gocv.io/x/gocv/videoio.go generated vendored
View File

@ -13,6 +13,111 @@ import (
"unsafe"
)
// Select preferred API for a capture object.
// Note: Backends are available only if they have been built with your OpenCV binaries
type VideoCaptureAPI int
const (
// Auto detect == 0
VideoCaptureAny VideoCaptureAPI = 0
// Video For Windows (obsolete, removed)
VideoCaptureVFW VideoCaptureAPI = 200
// V4L/V4L2 capturing support
VideoCaptureV4L VideoCaptureAPI = 200
// Same as VideoCaptureV4L
VideoCaptureV4L2 VideoCaptureAPI = 200
// IEEE 1394 drivers
VideoCaptureFirewire VideoCaptureAPI = 300
// Same value as VideoCaptureFirewire
VideoCaptureFireware VideoCaptureAPI = 300
// Same value as VideoCaptureFirewire
VideoCaptureIEEE1394 VideoCaptureAPI = 300
// Same value as VideoCaptureFirewire
VideoCaptureDC1394 VideoCaptureAPI = 300
// Same value as VideoCaptureFirewire
VideoCaptureCMU1394 VideoCaptureAPI = 300
// QuickTime (obsolete, removed)
VideoCaptureQT VideoCaptureAPI = 500
// Unicap drivers (obsolete, removed)
VideoCaptureUnicap VideoCaptureAPI = 600
// DirectShow (via videoInput)
VideoCaptureDshow VideoCaptureAPI = 700
// PvAPI, Prosilica GigE SDK
VideoCapturePvAPI VideoCaptureAPI = 800
// OpenNI (for Kinect)
VideoCaptureOpenNI VideoCaptureAPI = 900
// OpenNI (for Asus Xtion)
VideoCaptureOpenNIAsus VideoCaptureAPI = 910
// Android - not used
VideoCaptureAndroid VideoCaptureAPI = 1000
// XIMEA Camera API
VideoCaptureXiAPI VideoCaptureAPI = 1100
// AVFoundation framework for iOS (OS X Lion will have the same API)
VideoCaptureAVFoundation VideoCaptureAPI = 1200
// Smartek Giganetix GigEVisionSDK
VideoCaptureGiganetix VideoCaptureAPI = 1300
// Microsoft Media Foundation (via videoInput)
VideoCaptureMSMF VideoCaptureAPI = 1400
// Microsoft Windows Runtime using Media Foundation
VideoCaptureWinRT VideoCaptureAPI = 1410
// RealSense (former Intel Perceptual Computing SDK)
VideoCaptureIntelPerc VideoCaptureAPI = 1500
// Synonym for VideoCaptureIntelPerc
VideoCaptureRealsense VideoCaptureAPI = 1500
// OpenNI2 (for Kinect)
VideoCaptureOpenNI2 VideoCaptureAPI = 1600
// OpenNI2 (for Asus Xtion and Occipital Structure sensors)
VideoCaptureOpenNI2Asus VideoCaptureAPI = 1610
// gPhoto2 connection
VideoCaptureGPhoto2 VideoCaptureAPI = 1700
// GStreamer
VideoCaptureGstreamer VideoCaptureAPI = 1800
// Open and record video file or stream using the FFMPEG library
VideoCaptureFFmpeg VideoCaptureAPI = 1900
// OpenCV Image Sequence (e.g. img_%02d.jpg)
VideoCaptureImages VideoCaptureAPI = 2000
// Aravis SDK
VideoCaptureAravis VideoCaptureAPI = 2100
// Built-in OpenCV MotionJPEG codec
VideoCaptureOpencvMjpeg VideoCaptureAPI = 2200
// Intel MediaSDK
VideoCaptureIntelMFX VideoCaptureAPI = 2300
// XINE engine (Linux)
VideoCaptureXINE VideoCaptureAPI = 2400
)
// VideoCaptureProperties are the properties used for VideoCapture operations.
type VideoCaptureProperties int
@ -150,6 +255,31 @@ const (
// VideoCaptureAutoFocus controls video capture auto focus..
VideoCaptureAutoFocus VideoCaptureProperties = 39
// VideoCaptureSarNumerator controls the sample aspect ratio: num/den (num)
VideoCaptureSarNumerator VideoCaptureProperties = 40
// VideoCaptureSarDenominator controls the sample aspect ratio: num/den (den)
VideoCaptureSarDenominator VideoCaptureProperties = 41
// VideoCaptureBackend is the current api backend (VideoCaptureAPI). Read-only property.
VideoCaptureBackend VideoCaptureProperties = 42
// VideoCaptureChannel controls the video input or channel number (only for those cameras that support).
VideoCaptureChannel VideoCaptureProperties = 43
// VideoCaptureAutoWB controls the auto white-balance.
VideoCaptureAutoWB VideoCaptureProperties = 44
// VideoCaptureWBTemperature controls the white-balance color temperature
VideoCaptureWBTemperature VideoCaptureProperties = 45
// VideoCaptureCodecPixelFormat shows the the codec's pixel format (4-character code). Read-only property.
// Subset of AV_PIX_FMT_* or -1 if unknown.
VideoCaptureCodecPixelFormat VideoCaptureProperties = 46
// VideoCaptureBitrate displays the video bitrate in kbits/s. Read-only property.
VideoCaptureBitrate VideoCaptureProperties = 47
)
// VideoCapture is a wrapper around the OpenCV VideoCapture class.
@ -176,6 +306,21 @@ func VideoCaptureFile(uri string) (vc *VideoCapture, err error) {
return
}
// VideoCaptureFile opens a VideoCapture from a file and prepares
// to start capturing. It returns error if it fails to open the file stored in uri path.
func VideoCaptureFileWithAPI(uri string, apiPreference VideoCaptureAPI) (vc *VideoCapture, err error) {
vc = &VideoCapture{p: C.VideoCapture_New()}
cURI := C.CString(uri)
defer C.free(unsafe.Pointer(cURI))
if !C.VideoCapture_OpenWithAPI(vc.p, cURI, C.int(apiPreference)) {
err = fmt.Errorf("Error opening file: %s with api backend: %d", uri, apiPreference)
}
return
}
// VideoCaptureDevice opens a VideoCapture from a device and prepares
// to start capturing. It returns error if it fails to open the video device.
func VideoCaptureDevice(device int) (vc *VideoCapture, err error) {
@ -188,6 +333,18 @@ func VideoCaptureDevice(device int) (vc *VideoCapture, err error) {
return
}
// VideoCaptureDevice opens a VideoCapture from a device with the api preference.
// It returns error if it fails to open the video device.
func VideoCaptureDeviceWithAPI(device int, apiPreference VideoCaptureAPI) (vc *VideoCapture, err error) {
vc = &VideoCapture{p: C.VideoCapture_New()}
if !C.VideoCapture_OpenDeviceWithAPI(vc.p, C.int(device), C.int(apiPreference)) {
err = fmt.Errorf("Error opening device: %d with api backend: %d", device, apiPreference)
}
return
}
// Close VideoCapture object.
func (v *VideoCapture) Close() error {
C.VideoCapture_Close(v.p)
@ -228,7 +385,7 @@ func (v *VideoCapture) CodecString() string {
res := ""
hexes := []int64{0xff, 0xff00, 0xff0000, 0xff000000}
for i, h := range hexes {
res += string(int64(v.Get(VideoCaptureFOURCC)) & h >> (uint(i * 8)))
res += string(rune(int64(v.Get(VideoCaptureFOURCC)) & h >> (uint(i * 8))))
}
return res
}
@ -330,3 +487,18 @@ func OpenVideoCapture(v interface{}) (*VideoCapture, error) {
return nil, errors.New("argument must be int or string")
}
}
func OpenVideoCaptureWithAPI(v interface{}, apiPreference VideoCaptureAPI) (*VideoCapture, error) {
switch vv := v.(type) {
case int:
return VideoCaptureDeviceWithAPI(vv, apiPreference)
case string:
id, err := strconv.Atoi(vv)
if err == nil {
return VideoCaptureDeviceWithAPI(id, apiPreference)
}
return VideoCaptureFileWithAPI(vv, apiPreference)
default:
return nil, errors.New("argument must be int or string")
}
}

2
vendor/gocv.io/x/gocv/videoio.h generated vendored
View File

@ -20,7 +20,9 @@ typedef void* VideoWriter;
VideoCapture VideoCapture_New();
void VideoCapture_Close(VideoCapture v);
bool VideoCapture_Open(VideoCapture v, const char* uri);
bool VideoCapture_OpenWithAPI(VideoCapture v, const char* uri, int apiPreference);
bool VideoCapture_OpenDevice(VideoCapture v, int device);
bool VideoCapture_OpenDeviceWithAPI(VideoCapture v, int device, int apiPreference);
void VideoCapture_Set(VideoCapture v, int prop, double param);
double VideoCapture_Get(VideoCapture v, int prop);
int VideoCapture_IsOpened(VideoCapture v);

View File

@ -1,5 +1,63 @@
package gocv
func (c VideoCaptureAPI) String() string {
switch c {
case VideoCaptureAny:
return "video-capture-any"
case VideoCaptureV4L2:
return "video-capture-v4l2"
case VideoCaptureFirewire:
return "video-capture-firewire"
case VideoCaptureQT:
return "video-capture-qt"
case VideoCaptureUnicap:
return "video-capture-unicap"
case VideoCaptureDshow:
return "video-capture-dshow"
case VideoCapturePvAPI:
return "video-capture-pvapi"
case VideoCaptureOpenNI:
return "video-capture-openni"
case VideoCaptureOpenNIAsus:
return "video-capture-openni-asus"
case VideoCaptureAndroid:
return "video-capture-android"
case VideoCaptureXiAPI:
return "video-capture-xiapi"
case VideoCaptureAVFoundation:
return "video-capture-av-foundation"
case VideoCaptureGiganetix:
return "video-capture-giganetix"
case VideoCaptureMSMF:
return "video-capture-msmf"
case VideoCaptureWinRT:
return "video-capture-winrt"
case VideoCaptureIntelPerc:
return "video-capture-intel-perc"
case VideoCaptureOpenNI2:
return "video-capture-openni2"
case VideoCaptureOpenNI2Asus:
return "video-capture-openni2-asus"
case VideoCaptureGPhoto2:
return "video-capture-gphoto2"
case VideoCaptureGstreamer:
return "video-capture-gstreamer"
case VideoCaptureFFmpeg:
return "video-capture-ffmpeg"
case VideoCaptureImages:
return "video-capture-images"
case VideoCaptureAravis:
return "video-capture-aravis"
case VideoCaptureOpencvMjpeg:
return "video-capture-opencv-mjpeg"
case VideoCaptureIntelMFX:
return "video-capture-intel-mfx"
case VideoCaptureXINE:
return "video-capture-xine"
}
return ""
}
func (c VideoCaptureProperties) String() string {
switch c {
case VideoCapturePosMsec:
@ -80,6 +138,22 @@ func (c VideoCaptureProperties) String() string {
return "video-capture-buffer-size"
case VideoCaptureAutoFocus:
return "video-capture-auto-focus"
case VideoCaptureSarNumerator:
return "video-capture-sar-numerator"
case VideoCaptureSarDenominator:
return "video-capture-sar-denominator"
case VideoCaptureBackend:
return "video-capture-backend"
case VideoCaptureChannel:
return "video-capture-channel"
case VideoCaptureAutoWB:
return "video-capture-auto-wb"
case VideoCaptureWBTemperature:
return "video-capture-wb-temperature"
case VideoCaptureCodecPixelFormat:
return "video-capture-pixel-format"
case VideoCaptureBitrate:
return "video-capture-bitrate"
}
return ""
}

View File

@ -11,18 +11,18 @@ echo.
REM This is why there is no progress bar:
REM https://github.com/PowerShell/PowerShell/issues/2138
echo Downloading: opencv-4.4.0.zip [91MB]
powershell -command "[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; $ProgressPreference = 'SilentlyContinue'; Invoke-WebRequest -Uri https://github.com/opencv/opencv/archive/4.4.0.zip -OutFile c:\opencv\opencv-4.4.0.zip"
echo Downloading: opencv-4.5.1.zip [91MB]
powershell -command "[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; $ProgressPreference = 'SilentlyContinue'; Invoke-WebRequest -Uri https://github.com/opencv/opencv/archive/4.5.1.zip -OutFile c:\opencv\opencv-4.5.1.zip"
echo Extracting...
powershell -command "$ProgressPreference = 'SilentlyContinue'; Expand-Archive -Path c:\opencv\opencv-4.4.0.zip -DestinationPath c:\opencv"
del c:\opencv\opencv-4.4.0.zip /q
powershell -command "$ProgressPreference = 'SilentlyContinue'; Expand-Archive -Path c:\opencv\opencv-4.5.1.zip -DestinationPath c:\opencv"
del c:\opencv\opencv-4.5.1.zip /q
echo.
echo Downloading: opencv_contrib-4.4.0.zip [58MB]
powershell -command "[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; $ProgressPreference = 'SilentlyContinue'; Invoke-WebRequest -Uri https://github.com/opencv/opencv_contrib/archive/4.4.0.zip -OutFile c:\opencv\opencv_contrib-4.4.0.zip"
echo Downloading: opencv_contrib-4.5.1.zip [58MB]
powershell -command "[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; $ProgressPreference = 'SilentlyContinue'; Invoke-WebRequest -Uri https://github.com/opencv/opencv_contrib/archive/4.5.1.zip -OutFile c:\opencv\opencv_contrib-4.5.1.zip"
echo Extracting...
powershell -command "$ProgressPreference = 'SilentlyContinue'; Expand-Archive -Path c:\opencv\opencv_contrib-4.4.0.zip -DestinationPath c:\opencv"
del c:\opencv\opencv_contrib-4.4.0.zip /q
powershell -command "$ProgressPreference = 'SilentlyContinue'; Expand-Archive -Path c:\opencv\opencv_contrib-4.5.1.zip -DestinationPath c:\opencv"
del c:\opencv\opencv_contrib-4.5.1.zip /q
echo.
echo Done with downloading and extracting sources.
@ -32,9 +32,9 @@ echo on
cd /D C:\opencv\build
set PATH=%PATH%;C:\Program Files (x86)\CMake\bin;C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin
cmake C:\opencv\opencv-4.4.0 -G "MinGW Makefiles" -BC:\opencv\build -DENABLE_CXX11=ON -DOPENCV_EXTRA_MODULES_PATH=C:\opencv\opencv_contrib-4.4.0\modules -DBUILD_SHARED_LIBS=ON -DWITH_IPP=OFF -DWITH_MSMF=OFF -DBUILD_EXAMPLES=OFF -DBUILD_TESTS=OFF -DBUILD_PERF_TESTS=OFF -DBUILD_opencv_java=OFF -DBUILD_opencv_python=OFF -DBUILD_opencv_python2=OFF -DBUILD_opencv_python3=OFF -DBUILD_DOCS=OFF -DENABLE_PRECOMPILED_HEADERS=OFF -DBUILD_opencv_saliency=OFF -DCPU_DISPATCH= -DOPENCV_GENERATE_PKGCONFIG=ON -DWITH_OPENCL_D3D11_NV=OFF -DOPENCV_ALLOCATOR_STATS_COUNTER_TYPE=int64_t -Wno-dev
cmake C:\opencv\opencv-4.5.1 -G "MinGW Makefiles" -BC:\opencv\build -DENABLE_CXX11=ON -DOPENCV_EXTRA_MODULES_PATH=C:\opencv\opencv_contrib-4.5.1\modules -DBUILD_SHARED_LIBS=ON -DWITH_IPP=OFF -DWITH_MSMF=OFF -DBUILD_EXAMPLES=OFF -DBUILD_TESTS=OFF -DBUILD_PERF_TESTS=OFF -DBUILD_opencv_java=OFF -DBUILD_opencv_python=OFF -DBUILD_opencv_python2=OFF -DBUILD_opencv_python3=OFF -DBUILD_DOCS=OFF -DENABLE_PRECOMPILED_HEADERS=OFF -DBUILD_opencv_saliency=OFF -DCPU_DISPATCH= -DOPENCV_GENERATE_PKGCONFIG=ON -DWITH_OPENCL_D3D11_NV=OFF -DOPENCV_ALLOCATOR_STATS_COUNTER_TYPE=int64_t -Wno-dev
mingw32-make -j%NUMBER_OF_PROCESSORS%
mingw32-make install
rmdir c:\opencv\opencv-4.4.0 /s /q
rmdir c:\opencv\opencv_contrib-4.4.0 /s /q
rmdir c:\opencv\opencv-4.5.1 /s /q
rmdir c:\opencv\opencv_contrib-4.5.1 /s /q
chdir /D %GOPATH%\src\gocv.io\x\gocv