This commit is contained in:
Cyrille Nofficial 2022-08-21 22:31:19 +02:00
parent 4be2f5e6e2
commit 3ae2986580
95 changed files with 18467 additions and 0 deletions

1
go.mod
View File

@ -7,6 +7,7 @@ require (
github.com/cyrilix/robocar-protobuf/go v1.0.5
github.com/eclipse/paho.mqtt.golang v1.4.1
go.uber.org/zap v1.21.0
gocv.io/x/gocv v0.31.0
google.golang.org/protobuf v1.28.0
)

4
go.sum
View File

@ -16,9 +16,11 @@ github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/hybridgroup/mjpeg v0.0.0-20140228234708-4680f319790e/go.mod h1:eagM805MRKrioHYuU7iKLUyFPVKqVV6um5DAvCkUtXs=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
@ -36,6 +38,8 @@ go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8=
go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw=
gocv.io/x/gocv v0.31.0 h1:BHDtK8v+YPvoSPQTTiZB2fM/7BLg6511JqkruY2z6LQ=
gocv.io/x/gocv v0.31.0/go.mod h1:oc6FvfYqfBp99p+yOEzs9tbYF9gOrAQSeL/dyIPefJU=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=

11
pkg/steering/bbox.go Normal file
View File

@ -0,0 +1,11 @@
package steering
import (
"image"
)
func GroupBBoxes(bboxes []*image.Rectangle) []*image.Rectangle {
resp := make([]*image.Rectangle, 0, len(bboxes))
copy(bboxes, resp)
return resp
}

163
pkg/steering/bbox_test.go Normal file
View File

@ -0,0 +1,163 @@
package steering
import (
"encoding/json"
"fmt"
"gocv.io/x/gocv"
"image"
"image/color"
_ "image/jpeg"
"os"
"reflect"
"testing"
)
type ObjectsList struct {
BBoxes []BBox `json:"bboxes"`
}
type BBox struct {
Left float32 `json:"left"`
Top float32 `json:"top"`
Bottom float32 `json:"bottom"`
Right float32 `json:"right"`
Confidence float32 `json:"confidence"`
}
func (bb *BBox) toRect(imgWidth, imgHeight int) image.Rectangle {
return image.Rect(
int(bb.Left*float32(imgWidth)),
int(bb.Top*float32(imgHeight)),
int(bb.Right*float32(imgWidth)),
int(bb.Bottom*float32(imgHeight)),
)
}
func load_data(dataName string) (*gocv.Mat, []BBox, error) {
contentBBoxes, err := os.ReadFile(fmt.Sprintf("test_data/bboxes-%s.json", dataName))
if err != nil {
return nil, []BBox{}, fmt.Errorf("unable to load json file for bbox of '%v': %w", dataName, err)
}
var obj ObjectsList
err = json.Unmarshal(contentBBoxes, &obj)
if err != nil {
return nil, []BBox{}, fmt.Errorf("unable to unmarsh json file for bbox of '%v': %w", dataName, err)
}
imgContent, err := os.ReadFile(fmt.Sprintf("test_data/img-%s.jpg", dataName))
if err != nil {
return nil, []BBox{}, fmt.Errorf("unable to load jpg file of '%v': %w", dataName, err)
}
img, err := gocv.IMDecode(imgContent, gocv.IMReadUnchanged)
if err != nil {
return nil, []BBox{}, fmt.Errorf("unable to load jpg of '%v': %w", dataName, err)
}
return &img, obj.BBoxes, nil
}
func drawImage(img *gocv.Mat, bboxes []BBox) {
for _, bb := range bboxes {
gocv.Rectangle(img, bb.toRect(img.Cols(), img.Rows()), color.RGBA{R: 0, G: 255, B: 0, A: 0}, 2)
gocv.PutText(
img,
fmt.Sprintf("%.2f", bb.Confidence),
image.Point{
X: int(bb.Left*float32(img.Cols()) + 10.),
Y: int(bb.Top*float32(img.Rows()) + 10.),
},
gocv.FontHersheyTriplex,
0.4,
color.RGBA{R: 0, G: 0, B: 0, A: 0},
1)
}
}
func saveImage(name string, img *gocv.Mat) error {
err := os.MkdirAll("test_result", os.ModePerm)
if err != nil {
return fmt.Errorf("unable to create directory for test result: %w", err)
}
jpg, err := gocv.IMEncode(gocv.JPEGFileExt, *img)
if err != nil {
return fmt.Errorf("unable to encode jpg image: %w", err)
}
defer jpg.Close()
err = os.WriteFile(fmt.Sprintf("test_result/%s.jpg", name), jpg.GetBytes(), os.ModePerm)
if err != nil {
return fmt.Errorf("unable to write jpeg file: %w", err)
}
return nil
}
func DisplayImageAndBBoxes(dataName string) error {
img, bboxes, err := load_data(dataName)
if err != nil {
return fmt.Errorf("unable to load image and bboxes: %w", err)
}
drawImage(img, bboxes)
err = saveImage(dataName, img)
if err != nil {
return fmt.Errorf("unable to save image: %w", err)
}
return nil
}
func TestDisplayBBox(t *testing.T) {
type args struct {
dataName string
}
tests := []struct {
name string
args args
//want []*image.Rectangle
}{
{
name: "default",
args: args{dataName: "01"},
},
{
name: "02",
args: args{dataName: "02"},
},
{
name: "03",
args: args{dataName: "03"},
},
{
name: "04",
args: args{dataName: "04"},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := DisplayImageAndBBoxes(tt.args.dataName)
if err != nil {
t.Errorf("unable to draw image: %v", err)
}
})
}
}
func TestGroupBBoxes(t *testing.T) {
type args struct {
bboxes []*image.Rectangle
}
tests := []struct {
name string
args args
want []*image.Rectangle
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := GroupBBoxes(tt.args.bboxes); !reflect.DeepEqual(got, tt.want) {
t.Errorf("GroupBBoxes() = %v, want %v", got, tt.want)
}
})
}
}

View File

@ -0,0 +1,12 @@
{
"bboxes": [
{
"right": 0.5258789,
"top": 0.1706543,
"left": 0.26660156,
"bottom": 0.47583008,
"confidence": 0.4482422
}
]
}

View File

@ -0,0 +1,25 @@
{
"bboxes": [
{
"right": 0.6879883,
"top": 0.115234375,
"left": 0.1586914,
"bottom": 0.66796875,
"confidence": 0.82714844
},
{
"right": 0.6894531,
"top": 0.111816406,
"left": 0.15698242,
"bottom": 0.66748047,
"confidence": 0.83447266
},
{
"right": 0.6875,
"top": 0.11328125,
"left": 0.15673828,
"bottom": 0.66748047,
"confidence": 0.85253906
}
]
}

View File

@ -0,0 +1,27 @@
{
"bboxes": [
{
"right": 0.2211914,
"top": 0.14953613,
"left": 0.0015258789,
"bottom": 0.64941406,
"confidence": 0.5595703
},
{
"right": 0.22192383,
"top": 0.14819336,
"left": 0.0014038086,
"bottom": 0.64941406,
"confidence": 0.5493164
},
{
"right": 0.21948242,
"top": 0.1459961,
"left": 0.0015258789,
"bottom": 0.65185547,
"confidence": 0.5595703
}
]
}

View File

@ -0,0 +1,25 @@
{
"bboxes": [
{
"right": 0.99902344,
"top": 0.08947754,
"left": 0.8095703,
"bottom": 0.54296875,
"confidence": 0.4741211
},
{
"right": 0.99902344,
"top": 0.08666992,
"left": 0.80859375,
"bottom": 0.54003906,
"confidence": 0.453125
},
{
"right": 0.99902344,
"top": 0.09423828,
"left": 0.8095703,
"bottom": 0.54345703,
"confidence": 0.44995117
}
]
}

BIN
pkg/steering/test_data/img-01.jpg Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.6 KiB

BIN
pkg/steering/test_data/img-02.jpg Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.4 KiB

BIN
pkg/steering/test_data/img-03.jpg Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.1 KiB

BIN
pkg/steering/test_data/img-04.jpg Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.0 KiB

28
vendor/gocv.io/x/gocv/.astylerc generated vendored Normal file
View File

@ -0,0 +1,28 @@
--lineend=linux
--style=google
--indent=spaces=4
--indent-col1-comments
--convert-tabs
--attach-return-type
--attach-namespaces
--attach-classes
--attach-inlines
--add-brackets
--add-braces
--align-pointer=type
--align-reference=type
--max-code-length=100
--break-after-logical
--pad-comma
--pad-oper
--unpad-paren
--break-blocks
--pad-header

12
vendor/gocv.io/x/gocv/.gitignore generated vendored Normal file
View File

@ -0,0 +1,12 @@
profile.cov
count.out
*.swp
*.snap
/parts
/prime
/stage
.vscode/
/build
.idea/
contrib/data.yaml
contrib/testOilPainting.png

1019
vendor/gocv.io/x/gocv/CHANGELOG.md generated vendored Normal file

File diff suppressed because it is too large Load Diff

76
vendor/gocv.io/x/gocv/CODE_OF_CONDUCT.md generated vendored Normal file
View File

@ -0,0 +1,76 @@
# Contributor Covenant Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as
contributors and maintainers pledge to making participation in our project and
our community a harassment-free experience for everyone, regardless of age, body
size, disability, ethnicity, sex characteristics, gender identity and expression,
level of experience, education, socio-economic status, nationality, personal
appearance, race, religion, or sexual identity and orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment
include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or
advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic
address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable
behavior and are expected to take appropriate and fair corrective action in
response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or
reject comments, commits, code, wiki edits, issues, and other contributions
that are not aligned to this Code of Conduct, or to ban temporarily or
permanently any contributor for other behaviors that they deem inappropriate,
threatening, offensive, or harmful.
## Scope
This Code of Conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community. Examples of
representing a project or community include using an official project e-mail
address, posting via an official social media account, or acting as an appointed
representative at an online or offline event. Representation of a project may be
further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported by contacting the project team at info@hybridgroup.com. All
complaints will be reviewed and investigated and will result in a response that
is deemed necessary and appropriate to the circumstances. The project team is
obligated to maintain confidentiality with regard to the reporter of an incident.
Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good
faith may face temporary or permanent repercussions as determined by other
members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see
https://www.contributor-covenant.org/faq

136
vendor/gocv.io/x/gocv/CONTRIBUTING.md generated vendored Normal file
View File

@ -0,0 +1,136 @@
# How to contribute
Thank you for your interest in improving GoCV.
We would like your help to make this project better, so we appreciate any contributions. See if one of the following descriptions matches your situation:
### Newcomer to GoCV, to OpenCV, or to computer vision in general
We'd love to get your feedback on getting started with GoCV. Run into any difficulty, confusion, or anything else? You are not alone. We want to know about your experience, so we can help the next people. Please open a Github issue with your questions, or get in touch directly with us.
### Something in GoCV is not working as you expect
Please open a Github issue with your problem, and we will be happy to assist.
### Something you want/need from OpenCV does not appear to be in GoCV
We probably have not implemented it yet. Please take a look at our [ROADMAP.md](ROADMAP.md). Your pull request adding the functionality to GoCV would be greatly appreciated.
### You found some Python code on the Internet that performs some computer vision task, and you want to do it using GoCV
Please open a Github issue with your needs, and we can see what we can do.
## How to use our Github repository
The `release` branch of this repo will always have the latest released version of GoCV. All of the active development work for the next release will take place in the `dev` branch. GoCV will use semantic versioning and will create a tag/release for each release.
Here is how to contribute back some code or documentation:
- Fork repo
- Create a feature branch off of the `dev` branch
- Make some useful change
- Submit a pull request against the `dev` branch.
- Be kind
## How to add a function from OpenCV to GoCV
Here are a few basic guidelines on how to add a function from OpenCV to GoCV:
- Please open a Github issue. We want to help, and also make sure that there is no duplications of efforts. Sometimes what you need is already being worked on by someone else.
- Use the proper Go style naming `MissingFunction()` for the Go wrapper.
- Make any output parameters `Mat*` to indicate to developers that the underlying OpenCV data will be changed by the function.
- Use Go types when possible as parameters for example `image.Point` and then convert to the appropriate OpenCV struct. Also define a new type based on `int` and `const` values instead of just passing "magic numbers" as params. For example, the `VideoCaptureProperties` type used in `videoio.go`.
- Always add the function to the GoCV file named the same as the OpenCV module to which the function belongs.
- If the new function is in a module that is not yet implemented by GoCV, a new set of files for that module will need to be added.
- Always add a "smoke" test for the new function being added. We are not testing OpenCV itself, but just the GoCV wrapper, so all that is needed generally is just exercising the new function.
- If OpenCV has any default params for a function, we have been implementing 2 versions of the function since Go does not support overloading. For example, with a OpenCV function:
```c
opencv::xYZ(int p1, int p2, int p3=2, int p4=3);
```
We would define 2 functions in GoCV:
```go
// uses default param values
XYZ(p1, p2)
// sets each param
XYZWithParams(p2, p2, p3, p4)
```
## How to run tests
To run the tests:
```
go test .
go test ./contrib/.
```
If you want to run an individual test, you can provide a RegExp to the `-run` argument:
```
go test -run TestMat
```
If you are using Intel OpenVINO, you can run those tests using:
```
go test ./openvino/...
```
## Contributing workflow
This section provides a short description of one of many possible workflows you can follow to contribute to `GoCV`. This workflow is based on multiple [git remotes](https://git-scm.com/docs/git-remote) and it's by no means the only workflow you can use to contribute to `GoCV`. However, it's an option that might help you get started quickly without too much hassle as this workflow lets you work off the `gocv` repo directory path!
Assuming you have already forked the `gocv` repo, you need to add a new `git remote` which will point to your GitHub fork. Notice below that you **must** `cd` to `gocv` repo directory before you add the new `git remote`:
```shell
cd $GOPATH/src/gocv.io/x/gocv
git remote add gocv-fork https://github.com/YOUR_GH_HANDLE/gocv.git
```
Note, that in the command above we called our new `git remote`, **gocv-fork** for convenience so we can easily recognize it. You are free to choose any remote name of your liking.
You should now see your new `git remote` when running the command below:
```shell
git remote -v
gocv-fork https://github.com/YOUR_GH_HANDLE/gocv.git (fetch)
gocv-fork https://github.com/YOUR_GH_HANDLE/gocv.git (push)
origin https://github.com/hybridgroup/gocv (fetch)
origin https://github.com/hybridgroup/gocv (push)
```
Before you create a new branch from `dev` you should fetch the latests commits from the `dev` branch:
```shell
git fetch origin dev
```
You want the `dev` branch in your `gocv` fork to be in sync with the `dev` branch of `gocv`, so push the earlier fetched commits to your GitHub fork as shown below. Note, the `-f` force switch might not be needed:
```shell
git push gocv-fork dev -f
```
Create a new feature branch from `dev`:
```shell
git checkout -b new-feature
```
After you've made your changes you can run the tests using the `make` command listed below. Note, you're still working off the `gocv` project root directory, hence running the command below does not require complicated `$GOPATH` rewrites or whatnot:
```shell
make test
```
Once the tests have passed, commit your new code to the `new-feature` branch and push it to your fork running the command below:
```shell
git push gocv-fork new-feature
```
You can now open a new PR from `new-feature` branch in your forked repo against the `dev` branch of `gocv`.

12
vendor/gocv.io/x/gocv/Dockerfile generated vendored Normal file
View File

@ -0,0 +1,12 @@
# to build this docker image:
# docker build .
FROM gocv/opencv:4.6.0
ENV GOPATH /go
COPY . /go/src/gocv.io/x/gocv/
WORKDIR /go/src/gocv.io/x/gocv
RUN go build -tags example -o /build/gocv_version -i ./cmd/version/
CMD ["/build/gocv_version"]

19
vendor/gocv.io/x/gocv/Dockerfile-test generated vendored Normal file
View File

@ -0,0 +1,19 @@
# To build:
# docker build -f Dockerfile-test -t gocv-test .
#
# To run tests:
# xhost +
# docker run -it --rm -e DISPLAY=$DISPLAY -v /tmp/.X11-unix:/tmp/.X11-unix gocv-test
# xhost -
#
FROM gocv/opencv:4.6.0 AS gocv-test
ENV GOPATH /go
COPY . /go/src/gocv.io/x/gocv/
WORKDIR /go/src/gocv.io/x/gocv
RUN go get -u github.com/rakyll/gotest
ENTRYPOINT ["gotest", "-v", ".", "./contrib/..."]

18
vendor/gocv.io/x/gocv/Dockerfile-test.gpu-cuda-10 generated vendored Normal file
View File

@ -0,0 +1,18 @@
# To build:
# docker build -f Dockerfile-test.gpu-cuda-10 -t gocv-test-gpu-cuda-10 .
#
# To run tests:
# docker run -it --rm --gpus all gocv-test-gpu-cuda-10
#
FROM gocv/opencv:4.6.0-gpu-cuda-10 AS gocv-gpu-test-cuda-10
ENV GOPATH /go
ENV PATH="${PATH}:/go/bin"
COPY . /go/src/gocv.io/x/gocv/
WORKDIR /go/src/gocv.io/x/gocv
RUN go get -u github.com/rakyll/gotest
ENTRYPOINT ["gotest", "-v", "./cuda/..."]

18
vendor/gocv.io/x/gocv/Dockerfile-test.gpu-cuda-11 generated vendored Normal file
View File

@ -0,0 +1,18 @@
# To build:
# docker build -f Dockerfile-test.gpu-cuda-11 -t gocv-test-gpu-cuda-11 .
#
# To run tests:
# docker run -it --rm --gpus all gocv-test-gpu-cuda-11
#
FROM gocv/opencv:4.6.0-gpu-cuda-11 AS gocv-gpu-test-cuda-11
ENV GOPATH /go
ENV PATH="${PATH}:/go/bin"
COPY . /go/src/gocv.io/x/gocv/
WORKDIR /go/src/gocv.io/x/gocv
RUN go get -u github.com/rakyll/gotest
ENTRYPOINT ["gotest", "-v", "./cuda/..."]

12
vendor/gocv.io/x/gocv/Dockerfile.gpu generated vendored Normal file
View File

@ -0,0 +1,12 @@
# to build this docker image:
# docker build -f Dockerfile.gpu .
FROM gocv/opencv:4.6.0-gpu-cuda-11 AS gocv-gpu
ENV GOPATH /go
COPY . /go/src/gocv.io/x/gocv/
WORKDIR /go/src/gocv.io/x/gocv
RUN go build -tags cuda -o /build/gocv_cuda_version ./cmd/cuda/
CMD ["/build/gocv_cuda_version"]

45
vendor/gocv.io/x/gocv/Dockerfile.opencv generated vendored Normal file
View File

@ -0,0 +1,45 @@
# to build this docker image:
# docker build -f Dockerfile.opencv -t gocv/opencv:4.6.0 .
FROM golang:1.18-buster AS opencv
LABEL maintainer="hybridgroup"
RUN apt-get update && apt-get install -y --no-install-recommends \
git build-essential cmake pkg-config unzip libgtk2.0-dev \
curl ca-certificates libcurl4-openssl-dev libssl-dev \
libavcodec-dev libavformat-dev libswscale-dev libtbb2 libtbb-dev \
libjpeg-dev libpng-dev libtiff-dev libdc1394-22-dev && \
rm -rf /var/lib/apt/lists/*
ARG OPENCV_VERSION="4.6.0"
ENV OPENCV_VERSION $OPENCV_VERSION
RUN curl -Lo opencv.zip https://github.com/opencv/opencv/archive/${OPENCV_VERSION}.zip && \
unzip -q opencv.zip && \
curl -Lo opencv_contrib.zip https://github.com/opencv/opencv_contrib/archive/${OPENCV_VERSION}.zip && \
unzip -q opencv_contrib.zip && \
rm opencv.zip opencv_contrib.zip && \
cd opencv-${OPENCV_VERSION} && \
mkdir build && cd build && \
cmake -D CMAKE_BUILD_TYPE=RELEASE \
-D WITH_IPP=OFF \
-D WITH_OPENGL=OFF \
-D WITH_QT=OFF \
-D CMAKE_INSTALL_PREFIX=/usr/local \
-D OPENCV_EXTRA_MODULES_PATH=../../opencv_contrib-${OPENCV_VERSION}/modules \
-D OPENCV_ENABLE_NONFREE=ON \
-D WITH_JASPER=OFF \
-D WITH_TBB=ON \
-D BUILD_DOCS=OFF \
-D BUILD_EXAMPLES=OFF \
-D BUILD_TESTS=OFF \
-D BUILD_PERF_TESTS=OFF \
-D BUILD_opencv_java=NO \
-D BUILD_opencv_python=NO \
-D BUILD_opencv_python2=NO \
-D BUILD_opencv_python3=NO \
-D OPENCV_GENERATE_PKGCONFIG=ON .. && \
make -j $(nproc --all) && \
make preinstall && make install && ldconfig && \
cd / && rm -rf opencv*
CMD ["go version"]

68
vendor/gocv.io/x/gocv/Dockerfile.opencv-gpu-cuda-10 generated vendored Normal file
View File

@ -0,0 +1,68 @@
# to build this docker image:
# docker build -f Dockerfile.opencv-gpu-cuda-10 -t gocv/opencv:4.6.0-gpu-cuda-10 .
FROM nvidia/cuda:10.2-cudnn8-devel AS opencv-gpu-base
LABEL maintainer="hybridgroup"
# needed for cuda repo key rotation. see:
# https://forums.developer.nvidia.com/t/notice-cuda-linux-repository-key-rotation/212771
#
RUN apt-key adv --fetch-keys http://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub
RUN apt-get update && apt-get install -y --no-install-recommends \
git build-essential cmake pkg-config unzip libgtk2.0-dev \
wget curl ca-certificates libcurl4-openssl-dev libssl-dev \
libavcodec-dev libavformat-dev libswscale-dev libtbb2 libtbb-dev \
libjpeg-dev libpng-dev libtiff-dev libdc1394-22-dev && \
rm -rf /var/lib/apt/lists/*
ARG OPENCV_VERSION="4.6.0"
ENV OPENCV_VERSION $OPENCV_VERSION
RUN curl -Lo opencv.zip https://github.com/opencv/opencv/archive/${OPENCV_VERSION}.zip && \
unzip -q opencv.zip && \
curl -Lo opencv_contrib.zip https://github.com/opencv/opencv_contrib/archive/${OPENCV_VERSION}.zip && \
unzip -q opencv_contrib.zip && \
rm opencv.zip opencv_contrib.zip && \
cd opencv-${OPENCV_VERSION} && \
mkdir build && cd build && \
cmake -D CMAKE_BUILD_TYPE=RELEASE \
-D WITH_IPP=OFF \
-D WITH_OPENGL=OFF \
-D WITH_QT=OFF \
-D CMAKE_INSTALL_PREFIX=/usr/local \
-D OPENCV_EXTRA_MODULES_PATH=../../opencv_contrib-${OPENCV_VERSION}/modules \
-D OPENCV_ENABLE_NONFREE=ON \
-D WITH_JASPER=OFF \
-D BUILD_DOCS=OFF \
-D BUILD_EXAMPLES=OFF \
-D BUILD_TESTS=OFF \
-D BUILD_PERF_TESTS=OFF \
-D BUILD_opencv_java=NO \
-D BUILD_opencv_python=NO \
-D BUILD_opencv_python2=NO \
-D BUILD_opencv_python3=NO \
-D WITH_TBB=ON \
-D WITH_CUDA=ON \
-D ENABLE_FAST_MATH=1 \
-D CUDA_FAST_MATH=1 \
-D WITH_CUBLAS=1 \
-D CUDA_TOOLKIT_ROOT_DIR=/usr/local/cuda/ \
-D BUILD_opencv_cudacodec=OFF \
-D WITH_CUDNN=ON \
-D OPENCV_DNN_CUDA=ON \
-D CUDA_GENERATION=Auto \
-D OPENCV_GENERATE_PKGCONFIG=ON .. && \
make -j $(nproc --all) && \
make preinstall && make install && ldconfig && \
cd / && rm -rf opencv*
# install golang here
FROM opencv-gpu-base AS opencv-gpu-golang
ENV GO_RELEASE=1.18.3
RUN wget https://dl.google.com/go/go${GO_RELEASE}.linux-amd64.tar.gz && \
tar xfv go${GO_RELEASE}.linux-amd64.tar.gz -C /usr/local && \
rm go${GO_RELEASE}.linux-amd64.tar.gz
ENV PATH="${PATH}:/usr/local/go/bin"
CMD ["go version"]

64
vendor/gocv.io/x/gocv/Dockerfile.opencv-gpu-cuda-11 generated vendored Normal file
View File

@ -0,0 +1,64 @@
# to build this docker image:
# docker build -f Dockerfile.opencv-gpu-cuda-11 -t gocv/opencv:4.6.0-gpu-cuda-11 .
FROM nvidia/cuda:11.5.2-cudnn8-devel-ubuntu20.04 AS opencv-gpu-cuda-11-base
LABEL maintainer="hybridgroup"
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && apt-get install -y --no-install-recommends \
git build-essential cmake pkg-config unzip libgtk2.0-dev \
wget curl ca-certificates libcurl4-openssl-dev libssl-dev \
libavcodec-dev libavformat-dev libswscale-dev libtbb2 libtbb-dev \
libjpeg-dev libpng-dev libtiff-dev libdc1394-22-dev && \
rm -rf /var/lib/apt/lists/*
ARG OPENCV_VERSION="4.6.0"
ENV OPENCV_VERSION $OPENCV_VERSION
RUN curl -Lo opencv.zip https://github.com/opencv/opencv/archive/${OPENCV_VERSION}.zip && \
unzip -q opencv.zip && \
curl -Lo opencv_contrib.zip https://github.com/opencv/opencv_contrib/archive/${OPENCV_VERSION}.zip && \
unzip -q opencv_contrib.zip && \
rm opencv.zip opencv_contrib.zip && \
cd opencv-${OPENCV_VERSION} && \
mkdir build && cd build && \
cmake -D CMAKE_BUILD_TYPE=RELEASE \
-D WITH_IPP=OFF \
-D WITH_OPENGL=OFF \
-D WITH_QT=OFF \
-D CMAKE_INSTALL_PREFIX=/usr/local \
-D OPENCV_EXTRA_MODULES_PATH=../../opencv_contrib-${OPENCV_VERSION}/modules \
-D OPENCV_ENABLE_NONFREE=ON \
-D WITH_JASPER=OFF \
-D BUILD_DOCS=OFF \
-D BUILD_EXAMPLES=OFF \
-D BUILD_TESTS=OFF \
-D BUILD_PERF_TESTS=OFF \
-D BUILD_opencv_java=NO \
-D BUILD_opencv_python=NO \
-D BUILD_opencv_python2=NO \
-D BUILD_opencv_python3=NO \
-D WITH_TBB=ON \
-D WITH_CUDA=ON \
-D ENABLE_FAST_MATH=1 \
-D CUDA_FAST_MATH=1 \
-D WITH_CUBLAS=1 \
-D CUDA_TOOLKIT_ROOT_DIR=/usr/local/cuda/ \
-D BUILD_opencv_cudacodec=OFF \
-D WITH_CUDNN=ON \
-D OPENCV_DNN_CUDA=ON \
-D CUDA_GENERATION=Auto \
-D OPENCV_GENERATE_PKGCONFIG=ON .. && \
make -j $(nproc --all) && \
make preinstall && make install && ldconfig && \
cd / && rm -rf opencv*
# install golang here
FROM opencv-gpu-cuda-11-base AS opencv-gpu-cuda-11-golang
ENV GO_RELEASE=1.18.3
RUN wget https://dl.google.com/go/go${GO_RELEASE}.linux-amd64.tar.gz && \
tar xfv go${GO_RELEASE}.linux-amd64.tar.gz -C /usr/local && \
rm go${GO_RELEASE}.linux-amd64.tar.gz
ENV PATH="${PATH}:/usr/local/go/bin"
CMD ["go version"]

13
vendor/gocv.io/x/gocv/LICENSE.txt generated vendored Normal file
View File

@ -0,0 +1,13 @@
Copyright (c) 2017-2022 The Hybrid Group and friends
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

311
vendor/gocv.io/x/gocv/Makefile generated vendored Normal file
View File

@ -0,0 +1,311 @@
.ONESHELL:
.PHONY: test deps download build clean astyle cmds docker
# GoCV version to use.
GOCV_VERSION?="v0.31.0"
# OpenCV version to use.
OPENCV_VERSION?=4.6.0
# Go version to use when building Docker image
GOVERSION?=1.16.2
# Temporary directory to put files into.
TMP_DIR?=/tmp/
# Build shared or static library
BUILD_SHARED_LIBS?=ON
# Package list for each well-known Linux distribution
RPMS=cmake curl wget git gtk2-devel libpng-devel libjpeg-devel libtiff-devel tbb tbb-devel libdc1394-devel unzip gcc-c++
DEBS=unzip wget build-essential cmake curl git libgtk2.0-dev pkg-config libavcodec-dev libavformat-dev libswscale-dev libtbb2 libtbb-dev libjpeg-dev libpng-dev libtiff-dev libdc1394-22-dev
JETSON=build-essential cmake git unzip pkg-config libjpeg-dev libpng-dev libtiff-dev libavcodec-dev libavformat-dev libswscale-dev libgtk2.0-dev libcanberra-gtk* libxvidcore-dev libx264-dev libgtk-3-dev libtbb2 libtbb-dev libdc1394-22-dev libv4l-dev v4l-utils libgstreamer1.0-dev libgstreamer-plugins-base1.0-dev libavresample-dev libvorbis-dev libxine2-dev libfaac-dev libmp3lame-dev libtheora-dev libopencore-amrnb-dev libopencore-amrwb-dev libopenblas-dev libatlas-base-dev libblas-dev liblapack-dev libeigen3-dev gfortran libhdf5-dev protobuf-compiler libprotobuf-dev libgoogle-glog-dev libgflags-dev
explain:
@echo "For quick install with typical defaults of both OpenCV and GoCV, run 'make install'"
# Detect Linux distribution
distro_deps=
ifneq ($(shell which dnf 2>/dev/null),)
distro_deps=deps_fedora
else
ifneq ($(shell which apt-get 2>/dev/null),)
distro_deps=deps_debian
else
ifneq ($(shell which yum 2>/dev/null),)
distro_deps=deps_rh_centos
endif
endif
endif
# Install all necessary dependencies.
deps: $(distro_deps)
deps_rh_centos:
sudo yum -y install pkgconfig $(RPMS)
deps_fedora:
sudo dnf -y install pkgconf-pkg-config $(RPMS)
deps_debian:
sudo apt-get -y update
sudo apt-get -y install $(DEBS)
deps_jetson:
sudo sh -c "echo '/usr/local/cuda/lib64' >> /etc/ld.so.conf.d/nvidia-tegra.conf"
sudo ldconfig
sudo apt-get -y update
sudo apt-get -y install $(JETSON)
# Download OpenCV source tarballs.
download:
rm -rf $(TMP_DIR)opencv
mkdir $(TMP_DIR)opencv
cd $(TMP_DIR)opencv
curl -Lo opencv.zip https://github.com/opencv/opencv/archive/$(OPENCV_VERSION).zip
unzip -q opencv.zip
curl -Lo opencv_contrib.zip https://github.com/opencv/opencv_contrib/archive/$(OPENCV_VERSION).zip
unzip -q opencv_contrib.zip
rm opencv.zip opencv_contrib.zip
cd -
# Download openvino source tarballs.
download_openvino:
sudo rm -rf /usr/local/dldt/
sudo rm -rf /usr/local/openvino/
sudo git clone https://github.com/openvinotoolkit/openvino -b 2019_R3.1 /usr/local/openvino/
# Build openvino.
build_openvino_package:
cd /usr/local/openvino/inference-engine
sudo git submodule init
sudo git submodule update --recursive
sudo ./install_dependencies.sh
sudo mv -f thirdparty/clDNN/common/intel_ocl_icd/6.3/linux/Release thirdparty/clDNN/common/intel_ocl_icd/6.3/linux/RELEASE
sudo mkdir build
cd build
sudo rm -rf *
sudo cmake -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local -D BUILD_SHARED_LIBS=${BUILD_SHARED_LIBS} -D ENABLE_VPU=ON -D ENABLE_MKL_DNN=ON -D ENABLE_CLDNN=ON ..
sudo $(MAKE) -j $(shell nproc --all)
sudo touch VERSION
sudo mkdir -p src/ngraph
sudo cp thirdparty/ngraph/src/ngraph/version.hpp src/ngraph
cd -
# Build OpenCV.
build:
cd $(TMP_DIR)opencv/opencv-$(OPENCV_VERSION)
mkdir build
cd build
rm -rf *
cmake -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local -D BUILD_SHARED_LIBS=${BUILD_SHARED_LIBS} -D OPENCV_EXTRA_MODULES_PATH=$(TMP_DIR)opencv/opencv_contrib-$(OPENCV_VERSION)/modules -D BUILD_DOCS=OFF -D BUILD_EXAMPLES=OFF -D BUILD_TESTS=OFF -D BUILD_PERF_TESTS=OFF -D BUILD_opencv_java=NO -D BUILD_opencv_python=NO -D BUILD_opencv_python2=NO -D BUILD_opencv_python3=NO -D WITH_JASPER=OFF -D WITH_TBB=ON -DOPENCV_GENERATE_PKGCONFIG=ON ..
$(MAKE) -j $(shell nproc --all)
$(MAKE) preinstall
cd -
# Build OpenCV on Raspbian with ARM hardware optimizations.
build_raspi:
cd $(TMP_DIR)opencv/opencv-$(OPENCV_VERSION)
mkdir build
cd build
rm -rf *
cmake -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local -D BUILD_SHARED_LIBS=${BUILD_SHARED_LIBS} -D OPENCV_EXTRA_MODULES_PATH=$(TMP_DIR)opencv/opencv_contrib-$(OPENCV_VERSION)/modules -D BUILD_DOCS=OFF -D BUILD_EXAMPLES=OFF -D BUILD_TESTS=OFF -D BUILD_PERF_TESTS=OFF -D BUILD_opencv_java=OFF -D BUILD_opencv_python=NO -D BUILD_opencv_python2=NO -D BUILD_opencv_python3=NO -D ENABLE_NEON=ON -D ENABLE_VFPV3=ON -D WITH_JASPER=OFF -D OPENCV_GENERATE_PKGCONFIG=ON ..
$(MAKE) -j $(shell nproc --all)
$(MAKE) preinstall
cd -
# Build OpenCV on Raspberry pi zero which has ARMv6.
build_raspi_zero:
cd $(TMP_DIR)opencv/opencv-$(OPENCV_VERSION)
mkdir build
cd build
rm -rf *
cmake -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local -D BUILD_SHARED_LIBS=${BUILD_SHARED_LIBS} -D OPENCV_EXTRA_MODULES_PATH=$(TMP_DIR)opencv/opencv_contrib-$(OPENCV_VERSION)/modules -D BUILD_DOCS=OFF -D BUILD_EXAMPLES=OFF -D BUILD_TESTS=OFF -D BUILD_PERF_TESTS=OFF -D BUILD_opencv_java=OFF -D BUILD_opencv_python=NO -D BUILD_opencv_python2=NO -D BUILD_opencv_python3=NO -D ENABLE_VFPV2=ON -D WITH_JASPER=OFF -D OPENCV_GENERATE_PKGCONFIG=ON ..
$(MAKE) -j $(shell nproc --all)
$(MAKE) preinstall
cd -
# Build OpenCV for NVidia Jetson with CUDA.
build_jetson:
cd $(TMP_DIR)opencv/opencv-$(OPENCV_VERSION)
mkdir build
cd build
rm -rf *
cmake -D CMAKE_BUILD_TYPE=RELEASE \
-D CMAKE_INSTALL_PREFIX=/usr/local \
-D EIGEN_INCLUDE_PATH=/usr/include/eigen3 \
-D BUILD_SHARED_LIBS=${BUILD_SHARED_LIBS} \
-D OPENCV_EXTRA_MODULES_PATH=$(TMP_DIR)opencv/opencv_contrib-$(OPENCV_VERSION)/modules \
-D BUILD_DOCS=OFF -D BUILD_EXAMPLES=OFF -D BUILD_TESTS=OFF -D BUILD_PERF_TESTS=OFF -D BUILD_opencv_java=OFF -D BUILD_opencv_python=NO -D BUILD_opencv_python2=NO -D BUILD_opencv_python3=NO \
-D WITH_OPENCL=OFF \
-D WITH_CUDA=ON \
-D CUDA_ARCH_BIN=5.3 \
-D CUDA_ARCH_PTX="" \
-D WITH_CUDNN=ON \
-D WITH_CUBLAS=ON \
-D ENABLE_FAST_MATH=ON \
-D CUDA_FAST_MATH=ON \
-D OPENCV_DNN_CUDA=ON \
-D ENABLE_NEON=ON \
-D WITH_QT=OFF \
-D WITH_OPENMP=ON \
-D WITH_OPENGL=ON \
-D BUILD_TIFF=ON \
-D WITH_FFMPEG=ON \
-D WITH_GSTREAMER=ON \
-D WITH_TBB=ON \
-D BUILD_TBB=ON \
-D BUILD_TESTS=OFF \
-D WITH_EIGEN=ON \
-D WITH_V4L=ON \
-D WITH_LIBV4L=ON \
-D OPENCV_GENERATE_PKGCONFIG=ON ..
$(MAKE) -j $(shell nproc --all)
$(MAKE) preinstall
cd -
# Build OpenCV with non-free contrib modules.
build_nonfree:
cd $(TMP_DIR)opencv/opencv-$(OPENCV_VERSION)
mkdir build
cd build
rm -rf *
cmake -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local -D BUILD_SHARED_LIBS=${BUILD_SHARED_LIBS} -D OPENCV_EXTRA_MODULES_PATH=$(TMP_DIR)opencv/opencv_contrib-$(OPENCV_VERSION)/modules -D BUILD_DOCS=OFF -D BUILD_EXAMPLES=OFF -D BUILD_TESTS=OFF -D BUILD_PERF_TESTS=OFF -D BUILD_opencv_java=NO -D BUILD_opencv_python=NO -D BUILD_opencv_python2=NO -D BUILD_opencv_python3=NO -D WITH_JASPER=OFF -D WITH_TBB=ON -DOPENCV_GENERATE_PKGCONFIG=ON -DOPENCV_ENABLE_NONFREE=ON ..
$(MAKE) -j $(shell nproc --all)
$(MAKE) preinstall
cd -
# Build OpenCV with openvino.
build_openvino:
cd $(TMP_DIR)opencv/opencv-$(OPENCV_VERSION)
mkdir build
cd build
rm -rf *
cmake -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local -D BUILD_SHARED_LIBS=${BUILD_SHARED_LIBS} -D ENABLE_CXX11=ON -D OPENCV_EXTRA_MODULES_PATH=$(TMP_DIR)opencv/opencv_contrib-$(OPENCV_VERSION)/modules -D WITH_INF_ENGINE=ON -D InferenceEngine_DIR=/usr/local/dldt/inference-engine/build -D BUILD_DOCS=OFF -D BUILD_EXAMPLES=OFF -D BUILD_TESTS=OFF -D BUILD_PERF_TESTS=OFF -D BUILD_opencv_java=NO -D BUILD_opencv_python=NO -D BUILD_opencv_python2=NO -D BUILD_opencv_python3=NO -D WITH_JASPER=OFF -D WITH_TBB=ON -DOPENCV_GENERATE_PKGCONFIG=ON -DOPENCV_ENABLE_NONFREE=ON ..
$(MAKE) -j $(shell nproc --all)
$(MAKE) preinstall
cd -
# Build OpenCV with cuda.
build_cuda:
cd $(TMP_DIR)opencv/opencv-$(OPENCV_VERSION)
mkdir build
cd build
rm -rf *
cmake -j $(shell nproc --all) -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local -D BUILD_SHARED_LIBS=${BUILD_SHARED_LIBS} -D OPENCV_EXTRA_MODULES_PATH=$(TMP_DIR)opencv/opencv_contrib-$(OPENCV_VERSION)/modules -D BUILD_DOCS=OFF -D BUILD_EXAMPLES=OFF -D BUILD_TESTS=OFF -D BUILD_PERF_TESTS=OFF -D BUILD_opencv_java=NO -D BUILD_opencv_python=NO -D BUILD_opencv_python2=NO -D BUILD_opencv_python3=NO -D WITH_JASPER=OFF -D WITH_TBB=ON -DOPENCV_GENERATE_PKGCONFIG=ON -DWITH_CUDA=ON -DENABLE_FAST_MATH=1 -DCUDA_FAST_MATH=1 -DWITH_CUBLAS=1 -DCUDA_TOOLKIT_ROOT_DIR=/usr/local/cuda/ -DBUILD_opencv_cudacodec=OFF -D WITH_CUDNN=ON -D OPENCV_DNN_CUDA=ON -D CUDA_GENERATION=Auto ..
$(MAKE) -j $(shell nproc --all)
$(MAKE) preinstall
cd -
# Build OpenCV staticly linked
build_static:
cd $(TMP_DIR)opencv/opencv-$(OPENCV_VERSION)
mkdir build
cd build
rm -rf *
cmake -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local -D BUILD_SHARED_LIBS=OFF -D OPENCV_EXTRA_MODULES_PATH=$(TMP_DIR)opencv/opencv_contrib-$(OPENCV_VERSION)/modules -D BUILD_DOCS=OFF -D BUILD_EXAMPLES=OFF -D BUILD_TESTS=OFF -D BUILD_PERF_TESTS=OFF -D BUILD_opencv_java=NO -D BUILD_opencv_python=NO -D BUILD_opencv_python2=NO -D BUILD_opencv_python3=NO -DWITH_JASPER=OFF -DWITH_QT=OFF -DWITH_GTK=OFF -DWITH_FFMPEG=OFF -DWITH_TIFF=OFF -DWITH_WEBP=OFF -DWITH_PNG=OFF -DWITH_1394=OFF -DWITH_OPENJPEG=OFF -DOPENCV_GENERATE_PKGCONFIG=ON ..
$(MAKE) -j $(shell nproc --all)
$(MAKE) preinstall
cd -
# Build OpenCV with cuda.
build_all:
cd $(TMP_DIR)opencv/opencv-$(OPENCV_VERSION)
mkdir build
cd build
rm -rf *
cmake -j $(shell nproc --all) -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local -D BUILD_SHARED_LIBS=${BUILD_SHARED_LIBS} -D ENABLE_CXX11=ON -D OPENCV_EXTRA_MODULES_PATH=$(TMP_DIR)opencv/opencv_contrib-$(OPENCV_VERSION)/modules -D WITH_INF_ENGINE=ON -D InferenceEngine_DIR=/usr/local/dldt/inference-engine/build -D BUILD_DOCS=OFF -D BUILD_EXAMPLES=OFF -D BUILD_TESTS=OFF -D BUILD_PERF_TESTS=OFF -D BUILD_opencv_java=NO -D BUILD_opencv_python=NO -D BUILD_opencv_python2=NO -D BUILD_opencv_python3=NO -D WITH_JASPER=OFF -D WITH_TBB=ON -DOPENCV_GENERATE_PKGCONFIG=ON -DWITH_CUDA=ON -DENABLE_FAST_MATH=1 -DCUDA_FAST_MATH=1 -DWITH_CUBLAS=1 -DCUDA_TOOLKIT_ROOT_DIR=/usr/local/cuda/ -DBUILD_opencv_cudacodec=OFF -D WITH_CUDNN=ON -D OPENCV_DNN_CUDA=ON -D CUDA_GENERATION=Auto ..
$(MAKE) -j $(shell nproc --all)
$(MAKE) preinstall
cd -
# Cleanup temporary build files.
clean:
go clean --cache
rm -rf $(TMP_DIR)opencv
# Cleanup old library files.
sudo_pre_install_clean:
sudo rm -rf /usr/local/lib/cmake/opencv4/
sudo rm -rf /usr/local/lib/libopencv*
sudo rm -rf /usr/local/lib/pkgconfig/opencv*
sudo rm -rf /usr/local/include/opencv*
# Do everything.
install: deps download sudo_pre_install_clean build sudo_install clean verify
# Do everything on Raspbian.
install_raspi: deps download build_raspi sudo_install clean verify
# Do everything on the raspberry pi zero.
install_raspi_zero: deps download build_raspi_zero sudo_install clean verify
# Do everything on Jetson.
install_jetson: deps download build_jetson sudo_install clean verify
# Do everything with cuda.
install_cuda: deps download sudo_pre_install_clean build_cuda sudo_install clean verify verify_cuda
# Do everything with openvino.
install_openvino: deps download download_openvino sudo_pre_install_clean build_openvino_package sudo_install_openvino build_openvino sudo_install clean verify_openvino
# Do everything statically.
install_static: deps download sudo_pre_install_clean build_static sudo_install clean verify_static
# Do everything with non-free modules from cpencv_contrib.
install_nonfree: deps download sudo_pre_install_clean build_nonfree sudo_install clean verify
# Do everything with openvino and cuda.
install_all: deps download download_openvino sudo_pre_install_clean build_openvino_package sudo_install_openvino build_all sudo_install clean verify_openvino verify_cuda
# Install system wide.
sudo_install:
cd $(TMP_DIR)opencv/opencv-$(OPENCV_VERSION)/build
sudo $(MAKE) install
sudo ldconfig
cd -
# Install system wide.
sudo_install_openvino:
cd /usr/local/openvino/inference-engine/build
sudo $(MAKE) install
sudo ldconfig
cd -
# Build a minimal Go app to confirm gocv works.
verify:
go run ./cmd/version/main.go
# Build a minimal Go app to confirm gocv works with statically built OpenCV.
verify_static:
go run -tags static ./cmd/version/main.go
# Build a minimal Go app to confirm gocv cuda works.
verify_cuda:
go run ./cmd/cuda/main.go
# Build a minimal Go app to confirm gocv openvino works.
verify_openvino:
go run -tags openvino ./cmd/version/main.go
# Runs tests.
# This assumes env.sh was already sourced.
# pvt is not tested here since it requires additional depenedences.
test:
go test -tags matprofile . ./contrib
docker:
docker build --build-arg OPENCV_VERSION=$(OPENCV_VERSION) --build-arg GOVERSION=$(GOVERSION) .
astyle:
astyle --project=.astylerc --recursive *.cpp,*.h
releaselog:
git log --pretty=format:"%s" $(GOCV_VERSION)..HEAD
CMDS=basic-drawing caffe-classifier captest capwindow counter dnn-detection dnn-pose-detection dnn-style-transfer faceblur facedetect facedetect-from-url feature-matching find-chessboard find-circles find-lines hand-gestures hello img-similarity mjpeg-streamer motion-detect saveimage savevideo showimage ssd-facedetect tf-classifier tracking version xphoto
cmds:
for cmd in $(CMDS) ; do \
go build -o build/$$cmd cmd/$$cmd/main.go ;
done ; \

593
vendor/gocv.io/x/gocv/README.md generated vendored Normal file
View File

@ -0,0 +1,593 @@
# GoCV
[![GoCV](https://raw.githubusercontent.com/hybridgroup/gocv/release/images/gocvlogo.jpg)](http://gocv.io/)
[![Go Reference](https://pkg.go.dev/badge/gocv.io/x/gocv.svg)](https://pkg.go.dev/gocv.io/x/gocv)
[![Linux](https://github.com/hybridgroup/gocv/actions/workflows/linux.yml/badge.svg?branch=dev)](https://github.com/hybridgroup/gocv/actions/workflows/linux.yml)
[![Windows](https://ci.appveyor.com/api/projects/status/9asd5foet54ru69q/branch/dev?svg=true)](https://ci.appveyor.com/project/deadprogram/gocv/branch/dev)
[![Go Report Card](https://goreportcard.com/badge/github.com/hybridgroup/gocv)](https://goreportcard.com/report/github.com/hybridgroup/gocv)
[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://github.com/hybridgroup/gocv/blob/release/LICENSE.txt)
The GoCV package provides Go language bindings for the [OpenCV 4](http://opencv.org/) computer vision library.
The GoCV package supports the latest releases of Go and OpenCV (v4.6.0) on Linux, macOS, and Windows. We intend to make the Go language a "first-class" client compatible with the latest developments in the OpenCV ecosystem.
GoCV supports [CUDA](https://en.wikipedia.org/wiki/CUDA) for hardware acceleration using Nvidia GPUs. Check out the [CUDA README](./cuda/README.md) for more info on how to use GoCV with OpenCV/CUDA.
GoCV also supports [Intel OpenVINO](https://software.intel.com/en-us/openvino-toolkit). Check out the [OpenVINO README](./openvino/README.md) for more info on how to use GoCV with the Intel OpenVINO toolkit.
## How to use
### Hello, video
This example opens a video capture device using device "0", reads frames, and shows the video in a GUI window:
```go
package main
import (
"gocv.io/x/gocv"
)
func main() {
webcam, _ := gocv.OpenVideoCapture(0)
window := gocv.NewWindow("Hello")
img := gocv.NewMat()
for {
webcam.Read(&img)
window.IMShow(img)
window.WaitKey(1)
}
}
```
### Face detect
![GoCV](https://raw.githubusercontent.com/hybridgroup/gocv/release/images/face-detect.jpg)
This is a more complete example that opens a video capture device using device "0". It also uses the CascadeClassifier class to load an external data file containing the classifier data. The program grabs each frame from the video, then uses the classifier to detect faces. If any faces are found, it draws a green rectangle around each one, then displays the video in an output window:
```go
package main
import (
"fmt"
"image/color"
"gocv.io/x/gocv"
)
func main() {
// set to use a video capture device 0
deviceID := 0
// open webcam
webcam, err := gocv.OpenVideoCapture(deviceID)
if err != nil {
fmt.Println(err)
return
}
defer webcam.Close()
// open display window
window := gocv.NewWindow("Face Detect")
defer window.Close()
// prepare image matrix
img := gocv.NewMat()
defer img.Close()
// color for the rect when faces detected
blue := color.RGBA{0, 0, 255, 0}
// load classifier to recognize faces
classifier := gocv.NewCascadeClassifier()
defer classifier.Close()
if !classifier.Load("data/haarcascade_frontalface_default.xml") {
fmt.Println("Error reading cascade file: data/haarcascade_frontalface_default.xml")
return
}
fmt.Printf("start reading camera device: %v\n", deviceID)
for {
if ok := webcam.Read(&img); !ok {
fmt.Printf("cannot read device %v\n", deviceID)
return
}
if img.Empty() {
continue
}
// detect faces
rects := classifier.DetectMultiScale(img)
fmt.Printf("found %d faces\n", len(rects))
// draw a rectangle around each face on the original image
for _, r := range rects {
gocv.Rectangle(&img, r, blue, 3)
}
// show the image in the window, and wait 1 millisecond
window.IMShow(img)
window.WaitKey(1)
}
}
```
### More examples
There are examples in the [cmd directory](./cmd) of this repo in the form of various useful command line utilities, such as [capturing an image file](./cmd/saveimage), [streaming mjpeg video](./cmd/mjpeg-streamer), [counting objects that cross a line](./cmd/counter), and [using OpenCV with Tensorflow for object classification](./cmd/tf-classifier).
## How to install
To install GoCV, you must first have the matching version of OpenCV installed on your system. The current release of GoCV requires OpenCV 4.6.0.
Here are instructions for Ubuntu, Raspian, macOS, and Windows.
## Ubuntu/Linux
### Installation
You can use `make` to install OpenCV 4.6.0 with the handy `Makefile` included with this repo. If you already have installed OpenCV, you do not need to do so again. The installation performed by the `Makefile` is minimal, so it may remove OpenCV options such as Python or Java wrappers if you have already installed OpenCV some other way.
#### Quick Install
First, change directories to where you want to install GoCV, and then use git to clone the repository to your local machine like this:
cd $HOME/folder/with/your/src/
git clone https://github.com/hybridgroup/gocv.git
Make sure to change `$HOME/folder/with/your/src/` to where you actually want to save the code.
Once you have cloned the repo, the following commands should do everything to download and install OpenCV 4.6.0 on Linux:
cd gocv
make install
If you need static opencv libraries
make install BUILD_SHARED_LIBS=OFF
If it works correctly, at the end of the entire process, the following message should be displayed:
gocv version: 0.31.0
opencv lib version: 4.6.0
That's it, now you are ready to use GoCV.
#### Using CUDA with GoCV
See the [cuda directory](./cuda) for information.
#### Using OpenVINO with GoCV
See the [openvino directory](./openvino) for information.
#### Make Install for OpenVINO and Cuda
The following commands should do everything to download and install OpenCV 4.6.0 with CUDA and OpenVINO on Linux. Make sure to change `$HOME/folder/with/your/src/` to the directory you used to clone GoCV:
cd $HOME/folder/with/gocv/
make install_all
If you need static opencv libraries
make install_all BUILD_SHARED_LIBS=OFF
If it works correctly, at the end of the entire process, the following message should be displayed:
gocv version: 0.31.0
opencv lib version: 4.6.0-openvino
cuda information:
Device 0: "GeForce MX150" 2003Mb, sm_61, Driver/Runtime ver.10.0/10.0
#### Complete Install
If you have already done the "Quick Install" as described above, you do not need to run any further commands. For the curious, or for custom installations, here are the details for each of the steps that are performed when you run `make install`.
First, change directories to where you want to install GoCV, and then use git to clone the repository to your local machine like this:
cd $HOME/folder/with/your/src/
git clone https://github.com/hybridgroup/gocv.git
Make sure to change `$HOME/folder/with/your/src/` to where you actually want to save the code.
##### Install required packages
First, you need to change the current directory to the location where you cloned the GoCV repo, so you can access the `Makefile`:
cd $HOME/folder/with/your/src/gocv
Next, you need to update the system, and install any required packages:
make deps
#### Download source
Now, download the OpenCV 4.6.0 and OpenCV Contrib source code:
make download
#### Build
Build everything. This will take quite a while:
make build
If you need static opencv libraries
make build BUILD_SHARED_LIBS=OFF
#### Install
Once the code is built, you are ready to install:
make sudo_install
### Verifying the installation
To verify your installation you can run one of the included examples.
First, change the current directory to the location of the GoCV repo:
cd $HOME/src/gocv.io/x/gocv
Now you should be able to build or run any of the examples:
go run ./cmd/version/main.go
The version program should output the following:
gocv version: 0.31.0
opencv lib version: 4.6.0
#### Cleanup extra files
After the installation is complete, you can remove the extra files and folders:
make clean
### Custom Environment
By default, pkg-config is used to determine the correct flags for compiling and linking OpenCV. This behavior can be disabled by supplying `-tags customenv` when building/running your application. When building with this tag you will need to supply the CGO environment variables yourself.
For example:
export CGO_CPPFLAGS="-I/usr/local/include"
export CGO_LDFLAGS="-L/usr/local/lib -lopencv_core -lopencv_face -lopencv_videoio -lopencv_imgproc -lopencv_highgui -lopencv_imgcodecs -lopencv_objdetect -lopencv_features2d -lopencv_video -lopencv_dnn -lopencv_xfeatures2d"
Please note that you will need to run these 2 lines of code one time in your current session in order to build or run the code, in order to setup the needed ENV variables. Once you have done so, you can execute code that uses GoCV with your custom environment like this:
go run -tags customenv ./cmd/version/main.go
### Docker
The project now provides `Dockerfile` which lets you build [GoCV](https://gocv.io/) Docker image which you can then use to build and run `GoCV` applications in Docker containers. The `Makefile` contains `docker` target which lets you build Docker image with a single command:
```
make docker
```
By default Docker image built by running the command above ships [Go](https://golang.org/) version `1.16.5`, but if you would like to build an image which uses different version of `Go` you can override the default value when running the target command:
```
make docker GOVERSION='1.15'
```
#### Running GUI programs in Docker on macOS
Sometimes your `GoCV` programs create graphical interfaces like windows eg. when you use `gocv.Window` type when you display an image or video stream. Running the programs which create graphical interfaces in Docker container on macOS is unfortunately a bit elaborate, but not impossible. First you need to satisfy the following prerequisites:
* install [xquartz](https://www.xquartz.org/). You can also install xquartz using [homebrew](https://brew.sh/) by running `brew cask install xquartz`
* install [socat](https://linux.die.net/man/1/socat) `brew install socat`
Note, you will have to log out and log back in to your machine once you have installed `xquartz`. This is so the X window system is reloaded.
Once you have installed all the prerequisites you need to allow connections from network clients to `xquartz`. Here is how you do that. First run the following command to open `xquart` so you can configure it:
```shell
open -a xquartz
```
Click on *Security* tab in preferences and check the "Allow connections" box:
![app image](./images/xquartz.png)
Next, you need to create a TCP proxy using `socat` which will stream [X Window](https://en.wikipedia.org/wiki/X_Window_System) data into `xquart`. Before you start the proxy you need to make sure that there is no process listening in port `6000`. The following command should **not** return any results:
```shell
lsof -i TCP:6000
```
Now you can start a local proxy which will proxy the X Window traffic into xquartz which acts a your local X server:
```shell
socat TCP-LISTEN:6000,reuseaddr,fork UNIX-CLIENT:\"$DISPLAY\"
```
You are now finally ready to run your `GoCV` GUI programs in Docker containers. In order to make everything work you must set `DISPLAY` environment variables as shown in a sample command below:
```shell
docker run -it --rm -e DISPLAY=docker.for.mac.host.internal:0 your-gocv-app
```
**Note, since Docker for MacOS does not provide any video device support, you won't be able run GoCV apps which require camera.**
### Alpine 3.7 Docker image
There is a Docker image with Alpine 3.7 that has been created by project contributor [@denismakogon](https://github.com/denismakogon). You can find it located at [https://github.com/denismakogon/gocv-alpine](https://github.com/denismakogon/gocv-alpine).
## Raspbian
### Installation
We have a special installation for the Raspberry Pi that includes some hardware optimizations. You use `make` to install OpenCV 4.6.0 with the handy `Makefile` included with this repo. If you already have installed OpenCV, you do not need to do so again. The installation performed by the `Makefile` is minimal, so it may remove OpenCV options such as Python or Java wrappers if you have already installed OpenCV some other way.
#### Quick Install
First, change directories to where you want to install GoCV, and then use git to clone the repository to your local machine like this:
cd $HOME/folder/with/your/src/
git clone https://github.com/hybridgroup/gocv.git
Make sure to change `$HOME/folder/with/your/src/` to where you actually want to save the code.
The following make command should do everything to download and install OpenCV 4.6.0 on Raspbian:
cd $HOME/folder/with/your/src/gocv
make install_raspi
If it works correctly, at the end of the entire process, the following message should be displayed:
gocv version: 0.31.0
opencv lib version: 4.6.0
That's it, now you are ready to use GoCV.
## macOS
### Installation
You can install OpenCV 4.6.0 using Homebrew.
If you already have an earlier version of OpenCV (3.4.x) installed, you should probably remove it before installing the new version:
brew uninstall opencv
You can then install OpenCV 4.6.0:
brew install opencv
### pkgconfig Installation
pkg-config is used to determine the correct flags for compiling and linking OpenCV.
You can install it by using Homebrew:
brew install pkgconfig
### Verifying the installation
To verify your installation you can run one of the included examples.
First, change the current directory to the location of the GoCV repo:
cd $HOME/folder/with/your/src/gocv
Now you should be able to build or run any of the examples:
go run ./cmd/version/main.go
The version program should output the following:
gocv version: 0.31.0
opencv lib version: 4.6.0
### Custom Environment
By default, pkg-config is used to determine the correct flags for compiling and linking OpenCV. This behavior can be disabled by supplying `-tags customenv` when building/running your application. When building with this tag you will need to supply the CGO environment variables yourself.
For example:
export CGO_CXXFLAGS="--std=c++11"
export CGO_CPPFLAGS="-I/usr/local/Cellar/opencv/4.6.0/include"
export CGO_LDFLAGS="-L/usr/local/Cellar/opencv/4.6.0/lib -lopencv_stitching -lopencv_superres -lopencv_videostab -lopencv_aruco -lopencv_bgsegm -lopencv_bioinspired -lopencv_ccalib -lopencv_dnn_objdetect -lopencv_dpm -lopencv_face -lopencv_photo -lopencv_fuzzy -lopencv_hfs -lopencv_img_hash -lopencv_line_descriptor -lopencv_optflow -lopencv_reg -lopencv_rgbd -lopencv_saliency -lopencv_stereo -lopencv_structured_light -lopencv_phase_unwrapping -lopencv_surface_matching -lopencv_tracking -lopencv_datasets -lopencv_dnn -lopencv_plot -lopencv_xfeatures2d -lopencv_shape -lopencv_video -lopencv_ml -lopencv_ximgproc -lopencv_calib3d -lopencv_features2d -lopencv_highgui -lopencv_videoio -lopencv_flann -lopencv_xobjdetect -lopencv_imgcodecs -lopencv_objdetect -lopencv_xphoto -lopencv_imgproc -lopencv_core"
Please note that you will need to run these 3 lines of code one time in your current session in order to build or run the code, in order to setup the needed ENV variables. Once you have done so, you can execute code that uses GoCV with your custom environment like this:
go run -tags customenv ./cmd/version/main.go
## Windows
### Installation
The following assumes that you are running a 64-bit version of Windows 10.
In order to build and install OpenCV 4.6.0 on Windows, you must first download and install MinGW-W64 and CMake, as follows.
#### MinGW-W64
Download and run the MinGW-W64 compiler installer from [https://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20Win32/Personal%20Builds/mingw-builds/8.1.0/](https://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20Win32/Personal%20Builds/mingw-builds/8.1.0/).
The latest version of the MinGW-W64 toolchain is `8.1.0`, but any version from `8.X` on should work.
Choose the options for "posix" threads, and for "seh" exceptions handling, then install to the default location `c:\Program Files\mingw-w64\x86_64-8.1.0-posix-seh-rt_v6-rev0`.
Add the `C:\Program Files\mingw-w64\x86_64-8.1.0-posix-seh-rt_v6-rev0\mingw64\bin` path to your System Path.
#### CMake
Download and install CMake [https://cmake.org/download/](https://cmake.org/download/) to the default location. CMake installer will add CMake to your system path.
#### OpenCV 4.6.0 and OpenCV Contrib Modules
The following commands should do everything to download and install OpenCV 4.6.0 on Windows:
chdir %GOPATH%\src\gocv.io\x\gocv
win_build_opencv.cmd
It might take up to one hour.
Last, add `C:\opencv\build\install\x64\mingw\bin` to your System Path.
### Verifying the installation
Change the current directory to the location of the GoCV repo:
chdir %GOPATH%\src\gocv.io\x\gocv
Now you should be able to build or run any of the command examples:
go run cmd\version\main.go
The version program should output the following:
gocv version: 0.31.0
opencv lib version: 4.6.0
That's it, now you are ready to use GoCV.
### Custom Environment
By default, OpenCV is expected to be in `C:\opencv\build\install\include`. This behavior can be disabled by supplying `-tags customenv` when building/running your application. When building with this tag you will need to supply the CGO environment variables yourself.
Due to the way OpenCV produces DLLs, including the version in the name, using this method is required if you're using a different version of OpenCV.
For example:
set CGO_CXXFLAGS="--std=c++11"
set CGO_CPPFLAGS=-IC:\opencv\build\install\include
set CGO_LDFLAGS=-LC:\opencv\build\install\x64\mingw\lib -lopencv_core460 -lopencv_face460 -lopencv_videoio460 -lopencv_imgproc460 -lopencv_highgui460 -lopencv_imgcodecs460 -lopencv_objdetect460 -lopencv_features2d460 -lopencv_video460 -lopencv_dnn460 -lopencv_xfeatures2d460 -lopencv_plot460 -lopencv_tracking460 -lopencv_img_hash460
Please note that you will need to run these 3 lines of code one time in your current session in order to build or run the code, in order to setup the needed ENV variables. Once you have done so, you can execute code that uses GoCV with your custom environment like this:
go run -tags customenv cmd\version\main.go
## Android
There is some work in progress for running GoCV on Android using Gomobile. For information on how to install OpenCV/GoCV for Android, please see:
https://gist.github.com/ogero/c19458cf64bd3e91faae85c3ac887481
See original discussion here:
https://github.com/hybridgroup/gocv/issues/235
## Profiling
Since memory allocations for images in GoCV are done through C based code, the go garbage collector will not clean all resources associated with a `Mat`. As a result, any `Mat` created *must* be closed to avoid memory leaks.
To ease the detection and repair of the resource leaks, GoCV provides a `Mat` profiler that records when each `Mat` is created and closed. Each time a `Mat` is allocated, the stack trace is added to the profile. When it is closed, the stack trace is removed. See the [runtime/pprof documentation](https://golang.org/pkg/runtime/pprof/#Profile).
In order to include the MatProfile custom profiler, you MUST build or run your application or tests using the `-tags matprofile` build tag. For example:
go run -tags matprofile cmd/version/main.go
You can get the profile's count at any time using:
```go
gocv.MatProfile.Count()
```
You can display the current entries (the stack traces) with:
```go
var b bytes.Buffer
gocv.MatProfile.WriteTo(&b, 1)
fmt.Print(b.String())
```
This can be very helpful to track down a leak. For example, suppose you have
the following nonsense program:
```go
package main
import (
"bytes"
"fmt"
"gocv.io/x/gocv"
)
func leak() {
gocv.NewMat()
}
func main() {
fmt.Printf("initial MatProfile count: %v\n", gocv.MatProfile.Count())
leak()
fmt.Printf("final MatProfile count: %v\n", gocv.MatProfile.Count())
var b bytes.Buffer
gocv.MatProfile.WriteTo(&b, 1)
fmt.Print(b.String())
}
```
Running this program produces the following output:
```
initial MatProfile count: 0
final MatProfile count: 1
gocv.io/x/gocv.Mat profile: total 1
1 @ 0x40b936c 0x40b93b7 0x40b94e2 0x40b95af 0x402cd87 0x40558e1
# 0x40b936b gocv.io/x/gocv.newMat+0x4b /go/src/gocv.io/x/gocv/core.go:153
# 0x40b93b6 gocv.io/x/gocv.NewMat+0x26 /go/src/gocv.io/x/gocv/core.go:159
# 0x40b94e1 main.leak+0x21 /go/src/github.com/dougnd/gocvprofexample/main.go:11
# 0x40b95ae main.main+0xae /go/src/github.com/dougnd/gocvprofexample/main.go:16
# 0x402cd86 runtime.main+0x206 /usr/local/Cellar/go/1.11.1/libexec/src/runtime/proc.go:201
```
We can see that this program would leak memory. As it exited, it had one `Mat` that was never closed. The stack trace points to exactly which line the allocation happened on (line 11, the `gocv.NewMat()`).
Furthermore, if the program is a long running process or if GoCV is being used on a web server, it may be helpful to install the HTTP interface )). For example:
```go
package main
import (
"net/http"
_ "net/http/pprof"
"time"
"gocv.io/x/gocv"
)
func leak() {
gocv.NewMat()
}
func main() {
go func() {
ticker := time.NewTicker(time.Second)
for {
<-ticker.C
leak()
}
}()
http.ListenAndServe("localhost:6060", nil)
}
```
This will leak a `Mat` once per second. You can see the current profile count and stack traces by going to the installed HTTP debug interface: [http://localhost:6060/debug/pprof/gocv.io/x/gocv.Mat](http://localhost:6060/debug/pprof/gocv.io/x/gocv.Mat?debug=1).
## How to contribute
Please take a look at our [CONTRIBUTING.md](./CONTRIBUTING.md) document to understand our contribution guidelines.
Then check out our [ROADMAP.md](./ROADMAP.md) document to know what to work on next.
## Why this project exists
The [https://github.com/go-opencv/go-opencv](https://github.com/go-opencv/go-opencv) package for Go and OpenCV does not support any version above OpenCV 2.x, and work on adding support for OpenCV 3 had stalled for over a year, mostly due to the complexity of [SWIG](http://swig.org/). That is why we started this project.
The GoCV package uses a C-style wrapper around the OpenCV 4 C++ classes to avoid having to deal with applying SWIG to a huge existing codebase. The mappings are intended to match as closely as possible to the original OpenCV project structure, to make it easier to find things, and to be able to figure out where to add support to GoCV for additional OpenCV image filters, algorithms, and other features.
For example, the [OpenCV `videoio` module](https://github.com/opencv/opencv/tree/master/modules/videoio) wrappers can be found in the GoCV package in the `videoio.*` files.
This package was inspired by the original https://github.com/go-opencv/go-opencv project, the blog post https://medium.com/@peterleyssens/using-opencv-3-from-golang-5510c312a3c and the repo at https://github.com/sensorbee/opencv thank you all!
## License
Licensed under the Apache 2.0 license. Copyright (c) 2017-2021 The Hybrid Group.
Logo generated by GopherizeMe - https://gopherize.me

400
vendor/gocv.io/x/gocv/ROADMAP.md generated vendored Normal file
View File

@ -0,0 +1,400 @@
# Roadmap
This is a list of all of the functionality areas within OpenCV, and OpenCV Contrib.
Any section listed with an "X" means that all of the relevant OpenCV functionality has been wrapped for use within GoCV.
Any section listed with **WORK STARTED** indicates that some work has been done, but not all functionality in that module has been completed. If there are any functions listed under a section marked **WORK STARTED**, it indicates that that function still requires a wrapper implemented.
And any section that is simply listed, indicates that so far, no work has been done on that module.
Your pull requests will be greatly appreciated!
## Modules list
- [ ] **core. Core functionality - WORK STARTED**
- [X] **Basic structures**
- [ ] **Operations on arrays - WORK STARTED**. The following functions still need implementation:
- [ ] [Mahalanobis](https://docs.opencv.org/master/d2/de8/group__core__array.html#ga4493aee129179459cbfc6064f051aa7d)
- [ ] [mulTransposed](https://docs.opencv.org/master/d2/de8/group__core__array.html#gadc4e49f8f7a155044e3be1b9e3b270ab)
- [ ] [PCABackProject](https://docs.opencv.org/master/d2/de8/group__core__array.html#gab26049f30ee8e94f7d69d82c124faafc)
- [ ] [PCACompute](https://docs.opencv.org/master/d2/de8/group__core__array.html#ga4e2073c7311f292a0648f04c37b73781)
- [ ] [PCAProject](https://docs.opencv.org/master/d2/de8/group__core__array.html#ga6b9fbc7b3a99ebfd441bbec0a6bc4f88)
- [ ] [PSNR](https://docs.opencv.org/master/d2/de8/group__core__array.html#ga07aaf34ae31d226b1b847d8bcff3698f)
- [ ] [randn](https://docs.opencv.org/master/d2/de8/group__core__array.html#gaeff1f61e972d133a04ce3a5f81cf6808)
- [ ] [randShuffle](https://docs.opencv.org/master/d2/de8/group__core__array.html#ga6a789c8a5cb56c6dd62506179808f763)
- [ ] [randu](https://docs.opencv.org/master/d2/de8/group__core__array.html#ga1ba1026dca0807b27057ba6a49d258c0)
- [ ] [setRNGSeed](https://docs.opencv.org/master/d2/de8/group__core__array.html#ga757e657c037410d9e19e819569e7de0f)
- [ ] [SVBackSubst](https://docs.opencv.org/master/d2/de8/group__core__array.html#gab4e620e6fc6c8a27bb2be3d50a840c0b)
- [ ] [SVDecomp](https://docs.opencv.org/master/d2/de8/group__core__array.html#gab477b5b7b39b370bb03e75b19d2d5109)
- [ ] [theRNG](https://docs.opencv.org/master/d2/de8/group__core__array.html#ga75843061d150ad6564b5447e38e57722)
- [ ] XML/YAML Persistence
- [ ] [FileStorage](https://docs.opencv.org/master/da/d56/classcv_1_1FileStorage.html)
- [ ] **Clustering - WORK STARTED**. The following functions still need implementation:
- [ ] [partition](https://docs.opencv.org/master/d5/d38/group__core__cluster.html#ga2037c989e69b499c1aa271419f3a9b34)
- [ ] Optimization Algorithms
- [ ] [ConjGradSolver](https://docs.opencv.org/master/d0/d21/classcv_1_1ConjGradSolver.html)
- [ ] [DownhillSolver](https://docs.opencv.org/master/d4/d43/classcv_1_1DownhillSolver.html)
- [ ] [solveLP](https://docs.opencv.org/master/da/d01/group__core__optim.html#ga9a06d237a9d38ace891efa1ca1b5d00a)
- [ ] **imgproc. Image processing - WORK STARTED**
- [ ] **Image Filtering - WORK STARTED** The following functions still need implementation:
- [ ] [buildPyramid](https://docs.opencv.org/master/d4/d86/group__imgproc__filter.html#gacfdda2bc1ac55e96de7e9f0bce7238c0)
- [ ] [getDerivKernels](https://docs.opencv.org/master/d4/d86/group__imgproc__filter.html#ga6d6c23f7bd3f5836c31cfae994fc4aea)
- [ ] [getGaborKernel](https://docs.opencv.org/master/d4/d86/group__imgproc__filter.html#gae84c92d248183bd92fa713ce51cc3599)
- [ ] [morphologyExWithParams](https://docs.opencv.org/master/d4/d86/group__imgproc__filter.html#ga67493776e3ad1a3df63883829375201f)
- [ ] [pyrMeanShiftFiltering](https://docs.opencv.org/master/d4/d86/group__imgproc__filter.html#ga9fabdce9543bd602445f5db3827e4cc0)
- [ ] **Geometric Image Transformations - WORK STARTED** The following functions still need implementation:
- [ ] [convertMaps](https://docs.opencv.org/master/da/d54/group__imgproc__transform.html#ga9156732fa8f01be9ebd1a194f2728b7f)
- [ ] [getDefaultNewCameraMatrix](https://docs.opencv.org/master/da/d54/group__imgproc__transform.html#ga744529385e88ef7bc841cbe04b35bfbf)
- [ ] [initUndistortRectifyMap](https://docs.opencv.org/master/da/d54/group__imgproc__transform.html#ga7dfb72c9cf9780a347fbe3d1c47e5d5a)
- [ ] [initWideAngleProjMap](https://docs.opencv.org/master/da/d54/group__imgproc__transform.html#gaceb049ec48898d1dadd5b50c604429c8)
- [ ] [undistort](https://docs.opencv.org/master/da/d54/group__imgproc__transform.html#ga69f2545a8b62a6b0fc2ee060dc30559d)
- [ ] **Miscellaneous Image Transformations - WORK STARTED** The following functions still need implementation:
- [ ] [cvtColorTwoPlane](https://docs.opencv.org/master/d7/d1b/group__imgproc__misc.html#ga8e873314e72a1a6c0252375538fbf753)
- [ ] [floodFill](https://docs.opencv.org/master/d7/d1b/group__imgproc__misc.html#gaf1f55a048f8a45bc3383586e80b1f0d0)
- [ ] **Drawing Functions - WORK STARTED** The following functions still need implementation:
- [ ] [drawMarker](https://docs.opencv.org/master/d6/d6e/group__imgproc__draw.html#ga482fa7b0f578fcdd8a174904592a6250)
- [ ] [ellipse2Poly](https://docs.opencv.org/master/d6/d6e/group__imgproc__draw.html#ga727a72a3f6a625a2ae035f957c61051f)
- [ ] [fillConvexPoly](https://docs.opencv.org/master/d6/d6e/group__imgproc__draw.html#ga906aae1606ea4ed2f27bec1537f6c5c2)
- [ ] [getFontScaleFromHeight](https://docs.opencv.org/master/d6/d6e/group__imgproc__draw.html#ga442ff925c1a957794a1309e0ed3ba2c3)
- [ ] ColorMaps in OpenCV
- [ ] Planar Subdivision
- [ ] **Histograms - WORK STARTED** The following functions still need implementation:
- [ ] [EMD](https://docs.opencv.org/master/d6/dc7/group__imgproc__hist.html#ga902b8e60cc7075c8947345489221e0e0)
- [ ] [wrapperEMD](https://docs.opencv.org/master/d6/dc7/group__imgproc__hist.html#ga31fdda0864e64ca6b9de252a2611758b)
- [ ] **Structural Analysis and Shape Descriptors - WORK STARTED** The following functions still need implementation:
- [ ] [fitEllipse](https://docs.opencv.org/master/d3/dc0/group__imgproc__shape.html#gaf259efaad93098103d6c27b9e4900ffa)
- [ ] [fitEllipseAMS](https://docs.opencv.org/master/d3/dc0/group__imgproc__shape.html#ga69e90cda55c4e192a8caa0b99c3e4550)
- [ ] [fitEllipseDirect](https://docs.opencv.org/master/d3/dc0/group__imgproc__shape.html#ga6421884fd411923a74891998bbe9e813)
- [ ] [HuMoments](https://docs.opencv.org/master/d3/dc0/group__imgproc__shape.html#gab001db45c1f1af6cbdbe64df04c4e944)
- [ ] [intersectConvexConvex](https://docs.opencv.org/master/d3/dc0/group__imgproc__shape.html#ga8e840f3f3695613d32c052bec89e782c)
- [ ] [isContourConvex](https://docs.opencv.org/master/d3/dc0/group__imgproc__shape.html#ga8abf8010377b58cbc16db6734d92941b)
- [ ] [matchShapes](https://docs.opencv.org/master/d3/dc0/group__imgproc__shape.html#gaadc90cb16e2362c9bd6e7363e6e4c317)
- [ ] [minEnclosingTriangle](https://docs.opencv.org/master/d3/dc0/group__imgproc__shape.html#ga1513e72f6bbdfc370563664f71e0542f)
- [ ] [rotatedRectangleIntersection](https://docs.opencv.org/master/d3/dc0/group__imgproc__shape.html#ga8740e7645628c59d238b0b22c2abe2d4)
- [ ] **Motion Analysis and Object Tracking - WORK STARTED** The following functions still need implementation:
- [ ] [createHanningWindow](https://docs.opencv.org/master/d7/df3/group__imgproc__motion.html#ga80e5c3de52f6bab3a7c1e60e89308e1b)
- [ ] [phaseCorrelate](https://docs.opencv.org/master/d7/df3/group__imgproc__motion.html#ga552420a2ace9ef3fb053cd630fdb4952)
- [ ] **Feature Detection - WORK STARTED** The following functions still need implementation:
- [ ] [cornerEigenValsAndVecs](https://docs.opencv.org/master/dd/d1a/group__imgproc__feature.html#ga4055896d9ef77dd3cacf2c5f60e13f1c)
- [ ] [cornerHarris](https://docs.opencv.org/master/dd/d1a/group__imgproc__feature.html#gac1fc3598018010880e370e2f709b4345)
- [ ] [cornerMinEigenVal](https://docs.opencv.org/master/dd/d1a/group__imgproc__feature.html#ga3dbce297c1feb859ee36707e1003e0a8)
- [ ] [createLineSegmentDetector](https://docs.opencv.org/master/dd/d1a/group__imgproc__feature.html#ga6b2ad2353c337c42551b521a73eeae7d)
- [ ] [preCornerDetect](https://docs.opencv.org/master/dd/d1a/group__imgproc__feature.html#gaa819f39b5c994871774081803ae22586)
- [X] **Object Detection**
- [X] **imgcodecs. Image file reading and writing.**
- [X] **videoio. Video I/O**
- [X] **highgui. High-level GUI**
- [ ] **video. Video Analysis - WORK STARTED**
- [X] **Motion Analysis**
- [ ] **Object Tracking - WORK STARTED** The following functions still need implementation:
- [ ] [buildOpticalFlowPyramid](https://docs.opencv.org/master/dc/d6b/group__video__track.html#ga86640c1c470f87b2660c096d2b22b2ce)
- [ ] [estimateRigidTransform](https://docs.opencv.org/master/dc/d6b/group__video__track.html#ga762cbe5efd52cf078950196f3c616d48)
- [ ] [findTransformECC](https://docs.opencv.org/master/dc/d6b/group__video__track.html#ga7ded46f9a55c0364c92ccd2019d43e3a)
- [ ] [meanShift](https://docs.opencv.org/master/dc/d6b/group__video__track.html#ga7ded46f9a55c0364c92ccd2019d43e3a)
- [ ] [CamShift](https://docs.opencv.org/master/dc/d6b/group__video__track.html#gaef2bd39c8356f423124f1fe7c44d54a1)
- [ ] [DualTVL1OpticalFlow](https://docs.opencv.org/master/dc/d47/classcv_1_1DualTVL1OpticalFlow.html)
- [ ] [FarnebackOpticalFlow](https://docs.opencv.org/master/de/d9e/classcv_1_1FarnebackOpticalFlow.html)
- [ ] [KalmanFilter](https://docs.opencv.org/master/dd/d6a/classcv_1_1KalmanFilter.html)
- [ ] [SparsePyrLKOpticalFlow](https://docs.opencv.org/master/d7/d08/classcv_1_1SparsePyrLKOpticalFlow.html)
- [ ] [GOTURN](https://docs.opencv.org/master/d7/d4c/classcv_1_1TrackerGOTURN.html)
- [ ] **calib3d. Camera Calibration and 3D Reconstruction - WORK STARTED**. The following functions still need implementation:
- [ ] **Camera Calibration - WORK STARTED** The following functions still need implementation:
- [X] [calibrateCamera](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
- [ ] [calibrateCameraRO](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
- [ ] [calibrateHandEye](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
- [ ] [calibrationMatrixValues](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
- [ ] [checkChessboard](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
- [ ] [composeRT](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
- [ ] [computeCorrespondEpilines](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
- [ ] [convertPointsFromHomogeneous](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
- [ ] [convertPointsHomogeneous](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
- [ ] [convertPointsToHomogeneous](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
- [ ] [correctMatches](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
- [ ] [decomposeEssentialMat](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
- [ ] [decomposeHomographyMat](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
- [ ] [decomposeProjectionMatrix](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
- [ ] [drawChessboardCorners](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
- [ ] [drawFrameAxes](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
- [X] [estimateAffine2D](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
- [ ] [estimateAffine3D](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
- [ ] [filterHomographyDecompByVisibleRefpoints](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
- [ ] [filterSpeckles](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
- [ ] [find4QuadCornerSubpix](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
- [X] [findChessboardCorners](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
- [X] [findChessboardCornersSB](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
- [ ] [findCirclesGrid](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
- [ ] [findEssentialMat](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
- [ ] [findFundamentalMat](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
- [ ] [getDefaultNewCameraMatrix](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
- [ ] [getOptimalNewCameraMatrix](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
- [ ] [getValidDisparityROI](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
- [ ] [initCameraMatrix2D](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
- [ ] [initUndistortRectifyMap](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
- [ ] [initWideAngleProjMap](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
- [ ] [matMulDeriv](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
- [ ] [projectPoints](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
- [ ] [recoverPose](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
- [ ] [rectify3Collinear](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
- [ ] [reprojectImageTo3D](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
- [ ] [Rodrigues](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
- [ ] [RQDecomp3x3](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
- [ ] [sampsonDistance](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
- [ ] [solveP3P](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
- [ ] [solvePnP](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
- [ ] [solvePnPGeneric](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
- [ ] [solvePnPRansac](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
- [ ] [solvePnPRefineLM](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
- [ ] [solvePnPRefineVVS](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
- [ ] [stereoCalibrate](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
- [ ] [stereoRectify](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
- [ ] [stereoRectifyUncalibrated](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
- [ ] [triangulatePoints](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
- [ ] [validateDisparity](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
- [ ] **Fisheye - WORK STARTED** The following functions still need implementation:
- [ ] [calibrate](https://docs.opencv.org/master/db/d58/group__calib3d__fisheye.html#gad626a78de2b1dae7489e152a5a5a89e1)
- [ ] [distortPoints](https://docs.opencv.org/master/db/d58/group__calib3d__fisheye.html#ga75d8877a98e38d0b29b6892c5f8d7765)
- [ ] [projectPoints](https://docs.opencv.org/master/db/d58/group__calib3d__fisheye.html#gab1ad1dc30c42ee1a50ce570019baf2c4)
- [ ] [stereoCalibrate](https://docs.opencv.org/master/db/d58/group__calib3d__fisheye.html#gadbb3a6ca6429528ef302c784df47949b)
- [ ] [stereoRectify](https://docs.opencv.org/master/db/d58/group__calib3d__fisheye.html#gac1af58774006689056b0f2ef1db55ecc)
- [ ] **features2d. 2D Features Framework - WORK STARTED**
- [X] **Feature Detection and Description**
- [X] **Descriptor Matchers**
- [X] **Drawing Function of Keypoints and Matches**
- [ ] Object Categorization
- [ ] [BOWImgDescriptorExtractor](https://docs.opencv.org/master/d2/d6b/classcv_1_1BOWImgDescriptorExtractor.html)
- [ ] [BOWKMeansTrainer](https://docs.opencv.org/master/d4/d72/classcv_1_1BOWKMeansTrainer.html)
- [X] **objdetect. Object Detection**
- [X] **dnn. Deep Neural Network module**
- [ ] ml. Machine Learning
- [ ] flann. Clustering and Search in Multi-Dimensional Spaces
- [ ] **photo. Computational Photography - WORK STARTED** The following functions still need implementation:
- [ ] [inpaint](https://docs.opencv.org/master/d7/d8b/group__photo__inpaint.html#gaedd30dfa0214fec4c88138b51d678085)
- [ ] [denoise_TVL1](https://docs.opencv.org/master/d1/d79/group__photo__denoise.html#ga7602ed5ae17b7de40152b922227c4e4f)
- [X] [fastNlMeansDenoising](https://docs.opencv.org/master/d1/d79/group__photo__denoise.html#ga4c6b0031f56ea3f98f768881279ffe93)
- [X] [fastNlMeansDenoisingColored](https://docs.opencv.org/master/d1/d79/group__photo__denoise.html#ga03aa4189fc3e31dafd638d90de335617)
- [X] [fastNlMeansDenoisingMulti](https://docs.opencv.org/master/d1/d79/group__photo__denoise.html#gaf4421bf068c4d632ea7f0aa38e0bf172)
- [ ] [createCalibrateDebevec](https://docs.opencv.org/master/d6/df5/group__photo__hdr.html#ga7fed9707ad5f2cc0e633888867109f90)
- [ ] [createCalibrateRobertson](https://docs.opencv.org/master/d6/df5/group__photo__hdr.html#gae77813a21cd351a596619e5ff013be5d)
- [ ] [createMergeDebevec](https://docs.opencv.org/master/d6/df5/group__photo__hdr.html#gaa8eab36bc764abb2a225db7c945f87f9)
- [ ] [createMergeRobertson](https://docs.opencv.org/master/d6/df5/group__photo__hdr.html#ga460d4a1df1a7e8cdcf7445bb87a8fb78)
- [ ] [createTonemap](https://docs.opencv.org/master/d6/df5/group__photo__hdr.html#gabcbd653140b93a1fa87ccce94548cd0d)
- [ ] [createTonemapDrago](https://docs.opencv.org/master/d6/df5/group__photo__hdr.html#ga72bf92bb6b8653ee4be650ac01cf50b6)
- [ ] [createTonemapMantiuk](https://docs.opencv.org/master/d6/df5/group__photo__hdr.html#ga3b3f3bf083b7515802f039a6a70f2d21)
- [ ] [createTonemapReinhard](https://docs.opencv.org/master/d6/df5/group__photo__hdr.html#gadabe7f6bf1fa96ad0fd644df9182c2fb)
- [ ] [decolor](https://docs.opencv.org/master/d4/d32/group__photo__decolor.html#ga4864d4c007bda5dacdc5e9d4ed7e222c)
- [X] [detailEnhance](https://docs.opencv.org/master/df/dac/group__photo__render.html#ga0de660cb6f371a464a74c7b651415975)
- [X] [edgePreservingFilter](https://docs.opencv.org/master/df/dac/group__photo__render.html#gafaee2977597029bc8e35da6e67bd31f7)
- [X] [pencilSketch](https://docs.opencv.org/master/df/dac/group__photo__render.html#gae5930dd822c713b36f8529b21ddebd0c)
- [X] [stylization](https://docs.opencv.org/master/df/dac/group__photo__render.html#gacb0f7324017df153d7b5d095aed53206)
- [ ] stitching. Images stitching
## CUDA
- [ ] **core. - WORK STARTED** The following functions still need implementation:
- [ ] [cv::cuda::convertFp16](https://docs.opencv.org/master/d8/d40/group__cudacore__init.html#gaa1c52258763197958eb9e6681917f723)
- [ ] [cv::cuda::deviceSupports](https://docs.opencv.org/master/d8/d40/group__cudacore__init.html#ga170b10cc9af4aa8cce8c0afdb4b1d08c)
- [X] [cv::cuda::getDevice](https://docs.opencv.org/master/d8/d40/group__cudacore__init.html#ga6ded4ed8e4fc483a9863d31f34ec9c0e)
- [X] [cv::cuda::resetDevice](https://docs.opencv.org/master/d8/d40/group__cudacore__init.html#ga6153b6f461101374e655a54fc77e725e)
- [X] [cv::cuda::setDevice](https://docs.opencv.org/master/d8/d40/group__cudacore__init.html#gaefa34186b185de47851836dba537828b)
- [ ] **cudaarithm. Operations on Matrices - WORK STARTED** The following functions still need implementation:
- [ ] **core** The following functions still need implementation:
- [ ] [cv::cuda::copyMakeBorder](https://docs.opencv.org/master/de/d09/group__cudaarithm__core.html#ga5368db7656eacf846b40089c98053a49)
- [ ] [cv::cuda::createLookUpTable](https://docs.opencv.org/master/de/d09/group__cudaarithm__core.html#ga2d9d9780dea8c5cd85d3c19b7e01979c)
- [ ] [cv::cuda::merge](https://docs.opencv.org/master/de/d09/group__cudaarithm__core.html#gaac939dc3b178ee92fb6e7078f342622c)
- [ ] [cv::cuda::split](https://docs.opencv.org/master/de/d09/group__cudaarithm__core.html#gabe5013d55d4ff586b20393913726179e)
- [ ] [cv::cuda::transpose](https://docs.opencv.org/master/de/d09/group__cudaarithm__core.html#ga327b71c3cb811a904ccf5fba37fc29f2)
- [ ] **per-element operations - WORK STARTED** The following functions still need implementation:
- [X] [cv::cuda::absdiff](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#gac062b283cf46ee90f74a773d3382ab54)
- [X] [cv::cuda::add](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga5d9794bde97ed23d1c1485249074a8b1)
- [ ] [cv::cuda::addWeighted](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga2cd14a684ea70c6ab2a63ee90ffe6201)
- [X] [cv::cuda::bitwise_and](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga78d7c1a013877abd4237fbfc4e13bd76)
- [X] [cv::cuda::bitwise_not](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#gae58159a2259ae1acc76b531c171cf06a)
- [X] [cv::cuda::bitwise_or](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#gafd098ee3e51c68daa793999c1da3dfb7)
- [X] [cv::cuda::bitwise_xor](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga3d95d4faafb099aacf18e8b915a4ad8d)
- [ ] [cv::cuda::cartToPolar](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga82210c7d1c1d42e616e554bf75a53480)
- [ ] [cv::cuda::compare](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga4d41cd679f4a83862a3de71a6057db54)
- [X] [cv::cuda::divide](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga124315aa226260841e25cc0b9ea99dc3)
- [X] [cv::cuda::exp](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#gac6e51541d3bb0a7a396128e4d5919b61)
- [ ] [cv::cuda::inRange](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#gaf611ab6b1d85e951feb6f485b1ed9672)
- [X] [cv::cuda::log](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#gaae9c60739e2d1a977b4d3250a0be42ca)
- [ ] [cv::cuda::lshift](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#gafd072accecb14c9adccdad45e3bf2300)
- [ ] [cv::cuda::magnitude](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga3d17f4fcd79d7c01fadd217969009463)
- [ ] [cv::cuda::magnitudeSqr](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga7613e382d257e150033d0ce4d6098f6a)
- [X] [cv::cuda::max](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#gadb5dd3d870f10c0866035755b929b1e7)
- [X] [cv::cuda::min](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga74f0b05a65b3d949c237abb5e6c60867)
- [X] [cv::cuda::multiply](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga497cc0615bf717e1e615143b56f00591)
- [ ] [cv::cuda::phase](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga5b75ec01be06dcd6e27ada09a0d4656a)
- [ ] [cv::cuda::polarToCart](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga01516a286a329c303c2db746513dd9df)
- [ ] [cv::cuda::pow](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga82d04ef4bcc4dfa9bfbe76488007c6c4)
- [ ] [cv::cuda::rshift](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga87af0b66358cc302676f35c1fd56c2ed)
- [X] [cv::cuda::sqr](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga8aae233da90ce0ffe309ab8004342acb)
- [X] [cv::cuda::sqrt](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga09303680cb1a5521a922b6d392028d8c)
- [X] [cv::cuda::subtract](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga6eab60fc250059e2fda79c5636bd067f)
- [ ] **matrix reductions** The following functions still need implementation:
- [ ] [cv::cuda::absSum](https://docs.opencv.org/master/d5/de6/group__cudaarithm__reduce.html#ga690fa79ba4426c53f7d2bebf3d37a32a)
- [ ] [cv::cuda::calcAbsSum](https://docs.opencv.org/master/d5/de6/group__cudaarithm__reduce.html#ga15c403b76ab2c4d7ed0f5edc09891b7e)
- [ ] [cv::cuda::calcNorm](https://docs.opencv.org/master/d5/de6/group__cudaarithm__reduce.html#ga39d2826990d29b7e4b69dbe02bdae2e1)
- [ ] [cv::cuda::calcNormDiff](https://docs.opencv.org/master/d5/de6/group__cudaarithm__reduce.html#ga9be3d9a7b6c5760955f37d1039d01265)
- [ ] [cv::cuda::calcSqrSum](https://docs.opencv.org/master/d5/de6/group__cudaarithm__reduce.html#gac998c83597f6c206c78cee16aa87946f)
- [ ] [cv::cuda::calcSum](https://docs.opencv.org/master/d5/de6/group__cudaarithm__reduce.html#ga98a09144047f09f5cb1d6b6ea8e0856f)
- [ ] [cv::cuda::countNonZero](https://docs.opencv.org/master/d5/de6/group__cudaarithm__reduce.html#ga98a09144047f09f5cb1d6b6ea8e0856f)
- [ ] [cv::cuda::findMinMax](https://docs.opencv.org/master/d5/de6/group__cudaarithm__reduce.html#gae7f5f2aa9f65314470a76fccdff887f2)
- [ ] [cv::cuda::findMinMaxLoc](https://docs.opencv.org/master/d5/de6/group__cudaarithm__reduce.html#ga93916bc473a62d215d1130fab84d090a)
- [ ] [cv::cuda::integral](https://docs.opencv.org/master/d5/de6/group__cudaarithm__reduce.html#ga07e5104eba4bf45212ac9dbc5bf72ba6)
- [ ] [cv::cuda::meanStdDev](https://docs.opencv.org/master/d5/de6/group__cudaarithm__reduce.html#ga990a4db4c6d7e8f0f3a6685ba48fbddc)
- [ ] [cv::cuda::minMax](https://docs.opencv.org/master/d5/de6/group__cudaarithm__reduce.html#ga8d7de68c10717cf25e787e3c20d2dfee)
- [ ] [cv::cuda::minMaxLoc](https://docs.opencv.org/master/d5/de6/group__cudaarithm__reduce.html#ga5cacbc2a2323c4eaa81e7390c5d9f530)
- [ ] [cv::cuda::norm](https://docs.opencv.org/master/d5/de6/group__cudaarithm__reduce.html#ga6c01988a58d92126a7c60a4ab76d8324)
- [ ] [cv::cuda::normalize](https://docs.opencv.org/master/d5/de6/group__cudaarithm__reduce.html#ga4da4738b9956a5baaa2f5f8c2fba438a)
- [ ] [cv::cuda::rectStdDev](https://docs.opencv.org/master/d5/de6/group__cudaarithm__reduce.html#gac311484a4e57cab2ce2cfdc195fda7ee)
- [ ] [cv::cuda::reduce](https://docs.opencv.org/master/d5/de6/group__cudaarithm__reduce.html#ga21d57f661db7be093caf2c4378be2007)
- [ ] [cv::cuda::sqrIntegral](https://docs.opencv.org/master/d5/de6/group__cudaarithm__reduce.html#ga40c75196202706399a60bf6ba7a052ac)
- [ ] [cv::cuda::sqlSum](https://docs.opencv.org/master/d5/de6/group__cudaarithm__reduce.html#ga056c804ebf5d2eb9f6f35e3dcb01524c)
- [ ] [cv::cuda::sum](https://docs.opencv.org/master/d5/de6/group__cudaarithm__reduce.html#ga1f582844670199281e8012733b50c582)
- [ ] **Operations on matrices** The following functions still need implementation:
- [ ] [cv::cuda::createConvolution](https://docs.opencv.org/4.5.0/d9/d88/group__cudaarithm__arithm.html#ga2695e05ef624bf3ce03cfbda383a821d)
- [ ] [cv::cuda::createDFT](https://docs.opencv.org/4.5.0/d9/d88/group__cudaarithm__arithm.html#ga0f72d063b73c8bb995678525eb076f10)
- [ ] [cv::cuda::dft](https://docs.opencv.org/4.5.0/d9/d88/group__cudaarithm__arithm.html#gadea99cb15a715c983bcc2870d65a2e78)
- [ ] [cv::cuda::gemm](https://docs.opencv.org/4.5.0/d9/d88/group__cudaarithm__arithm.html#ga42efe211d7a43bbc922da044c4f17130)
- [ ] [cv::cuda::mulAndScaleSpectrums](https://docs.opencv.org/4.5.0/d9/d88/group__cudaarithm__arithm.html#ga5704c25b8be4f19da812e6d98c8ee464)
- [ ] [cv::cuda::mulSpectrums](https://docs.opencv.org/4.5.0/d9/d88/group__cudaarithm__arithm.html#gab3e8900d67c4f59bdc137a0495206cd8)
- [X] **cudabgsegm. Background Segmentation**
- [ ] **cudacodec** Video Encoding/Decoding. The following functions still need implementation:
- [ ] [cv::cuda::VideoReader](https://docs.opencv.org/master/db/ded/classcv_1_1cudacodec_1_1VideoReader.html)
- [ ] [cv::cuda::VideoWriter](https://docs.opencv.org/master/df/dde/classcv_1_1cudacodec_1_1VideoWriter.html)
- [ ] **cudafeatures2d** Feature Detection and Description. The following functions still need implementation:
- [ ] [cv::cuda::FastFeatureDetector](https://docs.opencv.org/master/d4/d6a/classcv_1_1cuda_1_1FastFeatureDetector.html)
- [ ] [cv::cuda::ORB](https://docs.opencv.org/master/da/d44/classcv_1_1cuda_1_1ORB.html)
- [ ] **cudafilters. Image Filtering - WORK STARTED** The following functions still need implementation:
- [ ] [cv::cuda::createBoxFilter](https://docs.opencv.org/master/dc/d66/group__cudafilters.html#ga3113b66e289bad7caef412e6e13ec2be)
- [ ] [cv::cuda::createBoxMaxFilter](https://docs.opencv.org/master/dc/d66/group__cudafilters.html#gaaf4740c51128d23a37f6f1b22cee49e8)
- [ ] [cv::cuda::createBoxMinFilter](https://docs.opencv.org/master/dc/d66/group__cudafilters.html#ga77fd36949bc8d92aabc120b4b1cfaafa)
- [ ] [cv::cuda::createColumnSumFilter](https://docs.opencv.org/master/dc/d66/group__cudafilters.html#gac13bf7c41a34bfde2a7f33ad8caacfdf)
- [ ] [cv::cuda::createDerivFilter](https://docs.opencv.org/master/dc/d66/group__cudafilters.html#ga14d76dc6982ce739c67198f52bc16ee1)
- [ ] [cv::cuda::createLaplacianFilter](https://docs.opencv.org/master/dc/d66/group__cudafilters.html#ga53126e88bb7e6185dcd5628e28e42cd2)
- [ ] [cv::cuda::createLinearFilter](https://docs.opencv.org/master/dc/d66/group__cudafilters.html#ga57cb1804ad9d1280bf86433858daabf9)
- [ ] [cv::cuda::createMorphologyFilter](https://docs.opencv.org/master/dc/d66/group__cudafilters.html#gae58694e07be6bdbae126f36c75c08ee6)
- [ ] [cv::cuda::createRowSumFilter](https://docs.opencv.org/master/dc/d66/group__cudafilters.html#gaf735de273ccb5072f3c27816fb97a53a)
- [ ] [cv::cuda::createScharrFilter](https://docs.opencv.org/master/dc/d66/group__cudafilters.html#ga4ac8df158e5771ddb0bd5c9091188ce6)
- [ ] [cv::cuda::createSeparableLinearFilter](https://docs.opencv.org/master/dc/d66/group__cudafilters.html#gaf7b79a9a92992044f328dad07a52c4bf)
- [ ] **cudaimgproc. Image Processing - WORK STARTED** The following functions still need implementation:
- [ ] [cv::cuda::TemplateMatching](https://docs.opencv.org/master/d2/d58/classcv_1_1cuda_1_1TemplateMatching.html)
- [ ] [cv::cuda::alphaComp](https://docs.opencv.org/master/db/d8c/group__cudaimgproc__color.html#ga08a698700458d9311390997b57fbf8dc)
- [ ] [cv::cuda::demosaicing](https://docs.opencv.org/master/db/d8c/group__cudaimgproc__color.html#ga7fb153572b573ebd2d7610fcbe64166e)
- [ ] [cv::cuda::gammaCorrection](https://docs.opencv.org/master/db/d8c/group__cudaimgproc__color.html#gaf4195a8409c3b8fbfa37295c2b2c4729)
- [ ] [cv::cuda::swapChannels](https://docs.opencv.org/master/db/d8c/group__cudaimgproc__color.html#ga75a29cc4a97cde0d43ea066b01de927e)
- [ ] [cv::cuda::calcHist](https://docs.opencv.org/master/d8/d0e/group__cudaimgproc__hist.html#gaaf3944106890947020bb4522a7619c26)
- [ ] [cv::cuda::CLAHE](https://docs.opencv.org/master/db/d79/classcv_1_1cuda_1_1CLAHE.html)
- [ ] [cv::cuda::equalizeHist](https://docs.opencv.org/master/d8/d0e/group__cudaimgproc__hist.html#ga2384be74bd2feba7e6c46815513f0060)
- [ ] [cv::cuda::evenLevels](https://docs.opencv.org/master/d8/d0e/group__cudaimgproc__hist.html#ga2f2cbd21dc6d7367a7c4ee1a826f389d)
- [ ] [cv::cuda::histEven](https://docs.opencv.org/master/d8/d0e/group__cudaimgproc__hist.html#gacd3b14279fb77a57a510cb8c89a1856f)
- [ ] [cv::cuda::histRange](https://docs.opencv.org/master/d8/d0e/group__cudaimgproc__hist.html#ga87819085c1059186d9cdeacd92cea783)
- [ ] [cv::cuda::HoughCirclesDetector](https://docs.opencv.org/master/da/d80/classcv_1_1cuda_1_1HoughCirclesDetector.html)
- [ ] [cv::cuda::createGoodFeaturesToTrackDetector](https://docs.opencv.org/master/dc/d6d/group__cudaimgproc__feature.html#ga478b474a598ece101f7e706fee2c8e91)
- [ ] [cv::cuda::createHarrisCorner](https://docs.opencv.org/master/dc/d6d/group__cudaimgproc__feature.html#ga3e5878a803e9bba51added0c10101979)
- [ ] [cv::cuda::createMinEigenValCorner](https://docs.opencv.org/master/dc/d6d/group__cudaimgproc__feature.html#ga7457fd4b53b025f990b1c1dd1b749915)
- [ ] [cv::cuda::bilateralFilter](https://docs.opencv.org/master/d0/d05/group__cudaimgproc.html#ga6abeaecdd4e7edc0bd1393a04f4f20bd)
- [ ] [cv::cuda::blendLinear](https://docs.opencv.org/master/d0/d05/group__cudaimgproc.html#ga4793607e5729bcc15b27ea33d9fe335e)
- [ ] [cv::cuda::meanShiftFiltering](https://docs.opencv.org/master/d0/d05/group__cudaimgproc.html#gae13b3035bc6df0e512d876dbb8c00555)
- [ ] [cv::cuda::meanShiftProc](https://docs.opencv.org/master/d0/d05/group__cudaimgproc.html#ga6039dc8ecbe2f912bc83fcc9b3bcca39)
- [ ] [cv::cuda::meanShiftSegmentation](https://docs.opencv.org/master/d0/d05/group__cudaimgproc.html#ga70ed80533a448829dc48cf22b1845c16)
- [X] **cudaobjdetect. Object Detection**
- [ ] **cudaoptflow. Optical Flow - WORK STARTED** The following functions still need implementation:
- [ ] [BroxOpticalFlow](https://docs.opencv.org/master/d7/d18/classcv_1_1cuda_1_1BroxOpticalFlow.html)
- [ ] [DenseOpticalFlow](https://docs.opencv.org/master/d6/d4a/classcv_1_1cuda_1_1DenseOpticalFlow.html)
- [ ] [DensePyrLKOpticalFlow](https://docs.opencv.org/master/d0/da4/classcv_1_1cuda_1_1DensePyrLKOpticalFlow.html)
- [ ] [FarnebackOpticalFlow](https://docs.opencv.org/master/d9/d30/classcv_1_1cuda_1_1FarnebackOpticalFlow.html)
- [ ] [NvidiaHWOpticalFlow](https://docs.opencv.org/master/d5/d26/classcv_1_1cuda_1_1NvidiaHWOpticalFlow.html)
- [ ] [NvidiaOpticalFlow_1_0](https://docs.opencv.org/master/dc/d9d/classcv_1_1cuda_1_1NvidiaOpticalFlow__1__0.html)
- [ ] [SparseOpticalFlow](https://docs.opencv.org/master/d5/dcf/classcv_1_1cuda_1_1SparseOpticalFlow.html)
- [ ] **[SparsePyrLKOpticalFlow](https://docs.opencv.org/master/d7/d05/classcv_1_1cuda_1_1SparsePyrLKOpticalFlow.html) - WORK STARTED**
- [ ] **cudastereo** Stereo Correspondence
- [ ] [cv::cuda::createDisparityBilateralFilter](https://docs.opencv.org/master/dd/d47/group__cudastereo.html#gaafb5f9902f7a9e74cb2cd4e680569590)
- [ ] [cv::cuda::createStereoBeliefPropagation](https://docs.opencv.org/master/dd/d47/group__cudastereo.html#ga8d22dd80bdfb4e3d7d2ac09e8a07c22b)
- [ ] [cv::cuda::createStereoBM](https://docs.opencv.org/master/dd/d47/group__cudastereo.html#ga77edc901350dd0a7f46ec5aca4138039)
- [ ] [cv::cuda::createStereoConstantSpaceBP](https://docs.opencv.org/master/dd/d47/group__cudastereo.html#gaec3b49c7cf9f7701a6f549a227be4df2)
- [ ] [cv::cuda::createStereoSGM](https://docs.opencv.org/master/dd/d47/group__cudastereo.html#gafb7e5284de5f488d664c3155acb12c93)
- [ ] [cv::cuda::drawColorDisp](https://docs.opencv.org/master/dd/d47/group__cudastereo.html#ga469b23a77938dd7c06861e59cecc08c5)
- [ ] [cv::cuda::reprojectImageTo3D](https://docs.opencv.org/master/dd/d47/group__cudastereo.html#gaff851e3932da0f3e74d1be1d8855f094)
- [X] **cudawarping. Image Warping**
## Contrib modules list
- [ ] alphamat. Alpha Matting
- [X] **aruco. ArUco Marker Detection - WORK STARTED**
- [ ] barcode. Barcode detecting and decoding methods
- [X] **bgsegm. Improved Background-Foreground Segmentation Methods - WORK STARTED**
- [ ] bioinspired. Biologically inspired vision models and derivated tools
- [ ] ccalib. Custom Calibration Pattern for 3D reconstruction
- [ ] cnn_3dobj. 3D object recognition and pose estimation API
- [ ] cvv. GUI for Interactive Visual Debugging of Computer Vision Programs
- [ ] datasets. Framework for working with different datasets
- [ ] dnn_modern. Deep Learning Modern Module
- [ ] dnn_objdetect. DNN used for object detection
- [ ] dnn_superres. DNN used for super resolution
- [ ] dpm. Deformable Part-based Models
- [ ] **face. Face Recognition - WORK STARTED**
- [ ] freetype. Drawing UTF-8 strings with freetype/harfbuzz
- [ ] fuzzy. Image processing based on fuzzy mathematics
- [ ] hdf. Hierarchical Data Format I/O routines
- [ ] hfs. Hierarchical Feature Selection for Efficient Image Segmentation
- [X] **img_hash. The module brings implementations of different image hashing algorithms.**
- [ ] intensity_transform. The module brings implementations of intensity transformation algorithms to adjust image contrast.
- [ ] line_descriptor. Binary descriptors for lines extracted from an image
- [ ] mcc. Macbeth Chart module
- [ ] optflow. Optical Flow Algorithms
- [ ] ovis. OGRE 3D Visualiser
- [ ] phase_unwrapping. Phase Unwrapping API
- [ ] plot. Plot function for Mat data
- [ ] quality. Image Quality Analysis (IQA) API
- [ ] rapid. silhouette based 3D object tracking
- [ ] reg. Image Registration
- [ ] rgbd. RGB-Depth Processing
- [ ] saliency. Saliency API
- [ ] sfm. Structure From Motion
- [ ] shape. Shape Distance and Matching
- [ ] stereo. Stereo Correspondance Algorithms
- [ ] structured_light. Structured Light API
- [ ] superres. Super Resolution
- [ ] surface_matching. Surface Matching
- [ ] text. Scene Text Detection and Recognition
- [ ] **tracking. Tracking API - WORK STARTED**
- [ ] videostab. Video Stabilization
- [ ] viz. 3D Visualizer
- [X] **wechat_qrcode. WeChat QR code detector for detecting and parsing QR code**
- [ ] **xfeatures2d. Extra 2D Features Framework - WORK STARTED**
- [ ] **ximgproc. Extended Image Processing - WORK STARTED**
- [ ] xobjdetect. Extended object detection
- [X] **xphoto. Additional photo processing algorithms**

36
vendor/gocv.io/x/gocv/appveyor.yml generated vendored Normal file
View File

@ -0,0 +1,36 @@
version: "{build}"
clone_folder: c:\gopath\src\gocv.io\x\gocv
platform:
- MinGW_x64
environment:
GOPATH: c:\gopath
GOROOT: c:\go
GOVERSION: 1.16
TEST_EXTERNAL: 1
APPVEYOR_SAVE_CACHE_ON_ERROR: true
cache:
- C:\opencv -> appveyor_build_opencv.cmd
install:
- if not exist "C:\opencv" appveyor_build_opencv.cmd
- set PATH=C:\Perl\site\bin;C:\Perl\bin;C:\Windows\system32;C:\Windows;C:\Windows\System32\Wbem;C:\Windows\System32\WindowsPowerShell\v1.0\;C:\Program Files\7-Zip;C:\Program Files\Microsoft\Web Platform Installer\;C:\Tools\PsTools;C:\Program Files (x86)\CMake\bin;C:\go\bin;C:\Tools\NuGet;C:\Program Files\LLVM\bin;C:\Tools\curl\bin;C:\ProgramData\chocolatey\bin;C:\Program Files (x86)\Yarn\bin;C:\Users\appveyor\AppData\Local\Yarn\bin;C:\Program Files\AppVeyor\BuildAgent\
- set PATH=%PATH%;C:\mingw-w64\x86_64-7.3.0-posix-seh-rt_v5-rev0\mingw64\bin
- set PATH=%PATH%;C:\Tools\GitVersion;C:\Program Files\Git LFS;C:\Program Files\Git\cmd;C:\Program Files\Git\usr\bin;C:\opencv\build\install\x64\mingw\bin;
- echo %PATH%
- echo %GOPATH%
- go version
- cd c:\gopath\src\gocv.io\x\gocv
- go get -d .
- set GOCV_CAFFE_TEST_FILES=C:\opencv\testdata
- set GOCV_TENSORFLOW_TEST_FILES=C:\opencv\testdata
- set GOCV_ONNX_TEST_FILES=C:\opencv\testdata
- set OPENCV_ENABLE_NONFREE=ON
- go env
build_script:
- go test -tags matprofile -v .
- go test -tags matprofile -v ./contrib

29
vendor/gocv.io/x/gocv/appveyor_build_opencv.cmd generated vendored Normal file
View File

@ -0,0 +1,29 @@
if not exist "C:\opencv" mkdir "C:\opencv"
if not exist "C:\opencv\build" mkdir "C:\opencv\build"
if not exist "C:\opencv\testdata" mkdir "C:\opencv\testdata"
appveyor DownloadFile https://github.com/opencv/opencv/archive/4.6.0.zip -FileName c:\opencv\opencv-4.6.0.zip
7z x c:\opencv\opencv-4.6.0.zip -oc:\opencv -y
del c:\opencv\opencv-4.6.0.zip /q
appveyor DownloadFile https://github.com/opencv/opencv_contrib/archive/4.6.0.zip -FileName c:\opencv\opencv_contrib-4.6.0.zip
7z x c:\opencv\opencv_contrib-4.6.0.zip -oc:\opencv -y
del c:\opencv\opencv_contrib-4.6.0.zip /q
cd C:\opencv\build
set PATH=C:\Perl\site\bin;C:\Perl\bin;C:\Windows\system32;C:\Windows;C:\Windows\System32\Wbem;C:\Windows\System32\WindowsPowerShell\v1.0\;C:\Program Files\7-Zip;C:\Program Files\Microsoft\Web Platform Installer\;C:\Tools\PsTools;C:\Program Files (x86)\CMake\bin;C:\go\bin;C:\Tools\NuGet;C:\Program Files\LLVM\bin;C:\Tools\curl\bin;C:\ProgramData\chocolatey\bin;C:\Program Files (x86)\Yarn\bin;C:\Users\appveyor\AppData\Local\Yarn\bin;C:\Program Files\AppVeyor\BuildAgent\
set PATH=%PATH%;C:\mingw-w64\x86_64-8.1.0-posix-seh-rt_v6-rev0\mingw64\bin
dir C:\opencv
cmake C:\opencv\opencv-4.6.0 -G "MinGW Makefiles" -BC:\opencv\build -DENABLE_CXX11=ON -DOPENCV_EXTRA_MODULES_PATH=C:\opencv\opencv_contrib-4.6.0\modules -DBUILD_SHARED_LIBS=ON -DWITH_IPP=OFF -DWITH_MSMF=OFF -DBUILD_EXAMPLES=OFF -DBUILD_TESTS=OFF -DBUILD_PERF_TESTS=OFF -DBUILD_opencv_java=OFF -DBUILD_opencv_python=OFF -DBUILD_opencv_python2=OFF -DBUILD_opencv_python3=OFF -DBUILD_DOCS=OFF -DENABLE_PRECOMPILED_HEADERS=OFF -DBUILD_opencv_saliency=OFF -DBUILD_opencv_wechat_qrcode=ON -DCPU_DISPATCH= -DBUILD_opencv_gapi=OFF -DOPENCV_GENERATE_PKGCONFIG=ON -DOPENCV_ENABLE_NONFREE=ON -DWITH_OPENCL_D3D11_NV=OFF -DOPENCV_ALLOCATOR_STATS_COUNTER_TYPE=int64_t -DWITH_TBB=ON -Wno-dev
mingw32-make -j%NUMBER_OF_PROCESSORS%
mingw32-make install
appveyor DownloadFile https://raw.githubusercontent.com/opencv/opencv_extra/master/testdata/dnn/bvlc_googlenet.prototxt -FileName C:\opencv\testdata\bvlc_googlenet.prototxt
appveyor DownloadFile https://raw.githubusercontent.com/WeChatCV/opencv_3rdparty/wechat_qrcode/detect.caffemodel -FileName C:\opencv\testdata\detect.caffemodel
appveyor DownloadFile https://raw.githubusercontent.com/WeChatCV/opencv_3rdparty/wechat_qrcode/detect.prototxt -FileName C:\opencv\testdata\detect.prototxt
appveyor DownloadFile https://raw.githubusercontent.com/WeChatCV/opencv_3rdparty/wechat_qrcode/sr.caffemodel -FileName C:\opencv\testdata\sr.caffemodel
appveyor DownloadFile https://raw.githubusercontent.com/WeChatCV/opencv_3rdparty/wechat_qrcode/sr.prototxt -FileName C:\opencv\testdata\sr.prototxt
appveyor DownloadFile http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel -FileName C:\opencv\testdata\bvlc_googlenet.caffemodel
appveyor DownloadFile https://storage.googleapis.com/download.tensorflow.org/models/inception5h.zip -FileName C:\opencv\testdata\inception5h.zip
appveyor DownloadFile https://github.com/onnx/models/raw/main/vision/classification/inception_and_googlenet/googlenet/model/googlenet-9.onnx -FileName C:\opencv\testdata\googlenet-9.onnx
7z x C:\opencv\testdata\inception5h.zip -oC:\opencv\testdata tensorflow_inception_graph.pb -y
rmdir c:\opencv\opencv-4.6.0 /s /q
rmdir c:\opencv\opencv_contrib-4.6.0 /s /q

28
vendor/gocv.io/x/gocv/asyncarray.cpp generated vendored Normal file
View File

@ -0,0 +1,28 @@
// +build openvino
#include <string.h>
#include "asyncarray.h"
// AsyncArray_New creates a new empty AsyncArray
AsyncArray AsyncArray_New() {
return new cv::AsyncArray();
}
// AsyncArray_Close deletes an existing AsyncArray
void AsyncArray_Close(AsyncArray a) {
delete a;
}
const char* AsyncArray_GetAsync(AsyncArray async_out,Mat out) {
try {
async_out->get(*out);
} catch(cv::Exception ex) {
return ex.err.c_str();
}
return "";
}
AsyncArray Net_forwardAsync(Net net, const char* outputName) {
return new cv::AsyncArray(net->forwardAsync(outputName));
}

52
vendor/gocv.io/x/gocv/asyncarray.go generated vendored Normal file
View File

@ -0,0 +1,52 @@
// +build openvino
package gocv
import (
"errors"
)
/*
#include <stdlib.h>
#include "dnn.h"
#include "asyncarray.h"
#include "core.h"
*/
import "C"
type AsyncArray struct {
p C.AsyncArray
}
// NewAsyncArray returns a new empty AsyncArray.
func NewAsyncArray() AsyncArray {
return newAsyncArray(C.AsyncArray_New())
}
// Ptr returns the AsyncArray's underlying object pointer.
func (a *AsyncArray) Ptr() C.AsyncArray {
return a.p
}
// Get async returns the Mat
func (m *AsyncArray) Get(mat *Mat) error {
result := C.AsyncArray_GetAsync(m.p, mat.p)
err := C.GoString(result)
if len(err) > 0 {
return errors.New(err)
}
return nil
}
// newAsyncArray returns a new AsyncArray from a C AsyncArray
func newAsyncArray(p C.AsyncArray) AsyncArray {
return AsyncArray{p: p}
}
// Close the AsyncArray object.
func (a *AsyncArray) Close() error {
C.AsyncArray_Close(a.p)
a.p = nil
return nil
}

23
vendor/gocv.io/x/gocv/asyncarray.h generated vendored Normal file
View File

@ -0,0 +1,23 @@
#ifdef __cplusplus
#include <opencv2/opencv.hpp>
extern "C" {
#endif
#include "core.h"
#include "dnn.h"
#ifdef __cplusplus
typedef cv::AsyncArray* AsyncArray;
#else
typedef void* AsyncArray;
#endif
AsyncArray AsyncArray_New();
const char* AsyncArray_GetAsync(AsyncArray async_out,Mat out);
void AsyncArray_Close(AsyncArray a);
AsyncArray Net_forwardAsync(Net net, const char* outputName);
#ifdef __cplusplus
}
#endif

82
vendor/gocv.io/x/gocv/calib3d.cpp generated vendored Normal file
View File

@ -0,0 +1,82 @@
#include "calib3d.h"
void Fisheye_UndistortImage(Mat distorted, Mat undistorted, Mat k, Mat d) {
cv::fisheye::undistortImage(*distorted, *undistorted, *k, *d);
}
void Fisheye_UndistortImageWithParams(Mat distorted, Mat undistorted, Mat k, Mat d, Mat knew, Size size) {
cv::Size sz(size.width, size.height);
cv::fisheye::undistortImage(*distorted, *undistorted, *k, *d, *knew, sz);
}
void Fisheye_UndistortPoints(Mat distorted, Mat undistorted, Mat k, Mat d, Mat r, Mat p) {
cv::fisheye::undistortPoints(*distorted, *undistorted, *k, *d, *r, *p);
}
void Fisheye_EstimateNewCameraMatrixForUndistortRectify(Mat k, Mat d, Size imgSize, Mat r, Mat p, double balance, Size newSize, double fovScale) {
cv::Size newSz(newSize.width, newSize.height);
cv::Size imgSz(imgSize.width, imgSize.height);
cv::fisheye::estimateNewCameraMatrixForUndistortRectify(*k, *d, imgSz, *r, *p, balance, newSz, fovScale);
}
void InitUndistortRectifyMap(Mat cameraMatrix,Mat distCoeffs,Mat r,Mat newCameraMatrix,Size size,int m1type,Mat map1,Mat map2) {
cv::Size sz(size.width, size.height);
cv::initUndistortRectifyMap(*cameraMatrix,*distCoeffs,*r,*newCameraMatrix,sz,m1type,*map1,*map2);
}
Mat GetOptimalNewCameraMatrixWithParams(Mat cameraMatrix,Mat distCoeffs,Size size,double alpha,Size newImgSize,Rect* validPixROI,bool centerPrincipalPoint) {
cv::Size sz(size.width, size.height);
cv::Size newSize(newImgSize.width, newImgSize.height);
cv::Rect rect(validPixROI->x,validPixROI->y,validPixROI->width,validPixROI->height);
cv::Mat* mat = new cv::Mat(cv::getOptimalNewCameraMatrix(*cameraMatrix,*distCoeffs,sz,alpha,newSize,&rect,centerPrincipalPoint));
validPixROI->x = rect.x;
validPixROI->y = rect.y;
validPixROI->width = rect.width;
validPixROI->height = rect.height;
return mat;
}
double CalibrateCamera(Points3fVector objectPoints, Points2fVector imagePoints, Size imageSize, Mat cameraMatrix, Mat distCoeffs, Mat rvecs, Mat tvecs, int flag) {
return cv::calibrateCamera(*objectPoints, *imagePoints, cv::Size(imageSize.width, imageSize.height), *cameraMatrix, *distCoeffs, *rvecs, *tvecs, flag);
}
void Undistort(Mat src, Mat dst, Mat cameraMatrix, Mat distCoeffs, Mat newCameraMatrix) {
cv::undistort(*src, *dst, *cameraMatrix, *distCoeffs, *newCameraMatrix);
}
void UndistortPoints(Mat distorted, Mat undistorted, Mat k, Mat d, Mat r, Mat p) {
cv::undistortPoints(*distorted, *undistorted, *k, *d, *r, *p);
}
bool FindChessboardCorners(Mat image, Size patternSize, Mat corners, int flags) {
cv::Size sz(patternSize.width, patternSize.height);
return cv::findChessboardCorners(*image, sz, *corners, flags);
}
bool FindChessboardCornersSB(Mat image, Size patternSize, Mat corners, int flags) {
cv::Size sz(patternSize.width, patternSize.height);
return cv::findChessboardCornersSB(*image, sz, *corners, flags);
}
bool FindChessboardCornersSBWithMeta(Mat image, Size patternSize, Mat corners, int flags, Mat meta) {
cv::Size sz(patternSize.width, patternSize.height);
return cv::findChessboardCornersSB(*image, sz, *corners, flags, *meta);
}
void DrawChessboardCorners(Mat image, Size patternSize, Mat corners, bool patternWasFound) {
cv::Size sz(patternSize.width, patternSize.height);
cv::drawChessboardCorners(*image, sz, *corners, patternWasFound);
}
Mat EstimateAffinePartial2D(Point2fVector from, Point2fVector to) {
return new cv::Mat(cv::estimateAffinePartial2D(*from, *to));
}
Mat EstimateAffine2D(Point2fVector from, Point2fVector to) {
return new cv::Mat(cv::estimateAffine2D(*from, *to));
}
Mat EstimateAffine2DWithParams(Point2fVector from, Point2fVector to, Mat inliers, int method, double ransacReprojThreshold, size_t maxIters, double confidence, size_t refineIters) {
return new cv::Mat(cv::estimateAffine2D(*from, *to, *inliers, method, ransacReprojThreshold, maxIters, confidence, refineIters));
}

256
vendor/gocv.io/x/gocv/calib3d.go generated vendored Normal file
View File

@ -0,0 +1,256 @@
package gocv
/*
#include <stdlib.h>
#include "calib3d.h"
*/
import "C"
import (
"image"
)
// Calib is a wrapper around OpenCV's "Camera Calibration and 3D Reconstruction" of
// Fisheye Camera model
//
// For more details, please see:
// https://docs.opencv.org/trunk/db/d58/group__calib3d__fisheye.html
// CalibFlag value for calibration
type CalibFlag int32
const (
// CalibUseIntrinsicGuess indicates that cameraMatrix contains valid initial values
// of fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially
// set to the image center ( imageSize is used), and focal distances are computed
// in a least-squares fashion.
CalibUseIntrinsicGuess CalibFlag = 1 << iota
// CalibRecomputeExtrinsic indicates that extrinsic will be recomputed after each
// iteration of intrinsic optimization.
CalibRecomputeExtrinsic
// CalibCheckCond indicates that the functions will check validity of condition number
CalibCheckCond
// CalibFixSkew indicates that skew coefficient (alpha) is set to zero and stay zero
CalibFixSkew
// CalibFixK1 indicates that selected distortion coefficients are set to zeros and stay zero
CalibFixK1
// CalibFixK2 indicates that selected distortion coefficients are set to zeros and stay zero
CalibFixK2
// CalibFixK3 indicates that selected distortion coefficients are set to zeros and stay zero
CalibFixK3
// CalibFixK4 indicates that selected distortion coefficients are set to zeros and stay zero
CalibFixK4
// CalibFixIntrinsic indicates that fix K1, K2? and D1, D2? so that only R, T matrices are estimated
CalibFixIntrinsic
// CalibFixPrincipalPoint indicates that the principal point is not changed during the global optimization.
// It stays at the center or at a different location specified when CalibUseIntrinsicGuess is set too.
CalibFixPrincipalPoint
)
// FisheyeUndistortImage transforms an image to compensate for fisheye lens distortion
func FisheyeUndistortImage(distorted Mat, undistorted *Mat, k, d Mat) {
C.Fisheye_UndistortImage(distorted.Ptr(), undistorted.Ptr(), k.Ptr(), d.Ptr())
}
// FisheyeUndistortImageWithParams transforms an image to compensate for fisheye lens distortion with Knew matrix
func FisheyeUndistortImageWithParams(distorted Mat, undistorted *Mat, k, d, knew Mat, size image.Point) {
sz := C.struct_Size{
width: C.int(size.X),
height: C.int(size.Y),
}
C.Fisheye_UndistortImageWithParams(distorted.Ptr(), undistorted.Ptr(), k.Ptr(), d.Ptr(), knew.Ptr(), sz)
}
// FisheyeUndistortPoints transforms points to compensate for fisheye lens distortion
//
// For further details, please see:
// https://docs.opencv.org/master/db/d58/group__calib3d__fisheye.html#gab738cdf90ceee97b2b52b0d0e7511541
func FisheyeUndistortPoints(distorted Mat, undistorted *Mat, k, d, r, p Mat) {
C.Fisheye_UndistortPoints(distorted.Ptr(), undistorted.Ptr(), k.Ptr(), d.Ptr(), r.Ptr(), p.Ptr())
}
// EstimateNewCameraMatrixForUndistortRectify estimates new camera matrix for undistortion or rectification.
//
// For further details, please see:
// https://docs.opencv.org/master/db/d58/group__calib3d__fisheye.html#ga384940fdf04c03e362e94b6eb9b673c9
func EstimateNewCameraMatrixForUndistortRectify(k, d Mat, imgSize image.Point, r Mat, p *Mat, balance float64, newSize image.Point, fovScale float64) {
imgSz := C.struct_Size{
width: C.int(imgSize.X),
height: C.int(imgSize.Y),
}
newSz := C.struct_Size{
width: C.int(newSize.X),
height: C.int(newSize.Y),
}
C.Fisheye_EstimateNewCameraMatrixForUndistortRectify(k.Ptr(), d.Ptr(), imgSz, r.Ptr(), p.Ptr(), C.double(balance), newSz, C.double(fovScale))
}
// InitUndistortRectifyMap computes the joint undistortion and rectification transformation and represents the result in the form of maps for remap
//
// For further details, please see:
// https://docs.opencv.org/master/d9/d0c/group__calib3d.html#ga7dfb72c9cf9780a347fbe3d1c47e5d5a
//
func InitUndistortRectifyMap(cameraMatrix Mat, distCoeffs Mat, r Mat, newCameraMatrix Mat, size image.Point, m1type int, map1 Mat, map2 Mat) {
sz := C.struct_Size{
width: C.int(size.X),
height: C.int(size.Y),
}
C.InitUndistortRectifyMap(cameraMatrix.Ptr(), distCoeffs.Ptr(), r.Ptr(), newCameraMatrix.Ptr(), sz, C.int(m1type), map1.Ptr(), map2.Ptr())
}
// GetOptimalNewCameraMatrixWithParams computes and returns the optimal new camera matrix based on the free scaling parameter.
//
// For further details, please see:
// https://docs.opencv.org/master/d9/d0c/group__calib3d.html#ga7a6c4e032c97f03ba747966e6ad862b1
//
func GetOptimalNewCameraMatrixWithParams(cameraMatrix Mat, distCoeffs Mat, imageSize image.Point, alpha float64, newImgSize image.Point, centerPrincipalPoint bool) (Mat, image.Rectangle) {
sz := C.struct_Size{
width: C.int(imageSize.X),
height: C.int(imageSize.Y),
}
newSize := C.struct_Size{
width: C.int(newImgSize.X),
height: C.int(newImgSize.Y),
}
rt := C.struct_Rect{}
return newMat(C.GetOptimalNewCameraMatrixWithParams(cameraMatrix.Ptr(), distCoeffs.Ptr(), sz, C.double(alpha), newSize, &rt, C.bool(centerPrincipalPoint))), toRect(rt)
}
// CalibrateCamera finds the camera intrinsic and extrinsic parameters from several views of a calibration pattern.
//
// For further details, please see:
// https://docs.opencv.org/master/d9/d0c/group__calib3d.html#ga3207604e4b1a1758aa66acb6ed5aa65d
//
func CalibrateCamera(objectPoints Points3fVector, imagePoints Points2fVector, imageSize image.Point,
cameraMatrix *Mat, distCoeffs *Mat, rvecs *Mat, tvecs *Mat, calibFlag CalibFlag) float64 {
sz := C.struct_Size{
width: C.int(imageSize.X),
height: C.int(imageSize.Y),
}
res := C.CalibrateCamera(objectPoints.p, imagePoints.p, sz, cameraMatrix.p, distCoeffs.p, rvecs.p, tvecs.p, C.int(calibFlag))
return float64(res)
}
func Undistort(src Mat, dst *Mat, cameraMatrix Mat, distCoeffs Mat, newCameraMatrix Mat) {
C.Undistort(src.Ptr(), dst.Ptr(), cameraMatrix.Ptr(), distCoeffs.Ptr(), newCameraMatrix.Ptr())
}
// UndistortPoints transforms points to compensate for lens distortion
//
// For further details, please see:
// https://docs.opencv.org/master/d9/d0c/group__calib3d.html#ga55c716492470bfe86b0ee9bf3a1f0f7e
func UndistortPoints(src Mat, dst *Mat, cameraMatrix, distCoeffs, rectificationTransform, newCameraMatrix Mat) {
C.UndistortPoints(src.Ptr(), dst.Ptr(), cameraMatrix.Ptr(), distCoeffs.Ptr(), rectificationTransform.Ptr(), newCameraMatrix.Ptr())
}
// CalibCBFlag value for chessboard calibration
// For more details, please see:
// https://docs.opencv.org/master/d9/d0c/group__calib3d.html#ga93efa9b0aa890de240ca32b11253dd4a
type CalibCBFlag int
const (
// Various operation flags that can be zero or a combination of the following values:
// Use adaptive thresholding to convert the image to black and white, rather than a fixed threshold level (computed from the average image brightness).
CalibCBAdaptiveThresh CalibCBFlag = 1 << iota
// Normalize the image gamma with equalizeHist before applying fixed or adaptive thresholding.
CalibCBNormalizeImage
// Use additional criteria (like contour area, perimeter, square-like shape) to filter out false quads extracted at the contour retrieval stage.
CalibCBFilterQuads
// Run a fast check on the image that looks for chessboard corners, and shortcut the call if none is found. This can drastically speed up the call in the degenerate condition when no chessboard is observed.
CalibCBFastCheck
// Run an exhaustive search to improve detection rate.
CalibCBExhaustive
// Up sample input image to improve sub-pixel accuracy due to aliasing effects.
CalibCBAccuracy
// The detected pattern is allowed to be larger than patternSize (see description).
CalibCBLarger
// The detected pattern must have a marker (see description). This should be used if an accurate camera calibration is required.
CalibCBMarker
)
// FindChessboardCorners finds the positions of internal corners of the chessboard.
//
// For further details, please see:
// https://docs.opencv.org/master/d9/d0c/group__calib3d.html#ga93efa9b0aa890de240ca32b11253dd4a
//
func FindChessboardCorners(image Mat, patternSize image.Point, corners *Mat, flags CalibCBFlag) bool {
sz := C.struct_Size{
width: C.int(patternSize.X),
height: C.int(patternSize.Y),
}
return bool(C.FindChessboardCorners(image.Ptr(), sz, corners.Ptr(), C.int(flags)))
}
// FindChessboardCorners finds the positions of internal corners of the chessboard using a sector based approach.
//
// For further details, please see:
// https://docs.opencv.org/master/d9/d0c/group__calib3d.html#gadc5bcb05cb21cf1e50963df26986d7c9
//
func FindChessboardCornersSB(image Mat, patternSize image.Point, corners *Mat, flags CalibCBFlag) bool {
sz := C.struct_Size{
width: C.int(patternSize.X),
height: C.int(patternSize.Y),
}
return bool(C.FindChessboardCornersSB(image.Ptr(), sz, corners.Ptr(), C.int(flags)))
}
// FindChessboardCornersSBWithMeta finds the positions of internal corners of the chessboard using a sector based approach.
//
// For further details, please see:
// https://docs.opencv.org/master/d9/d0c/group__calib3d.html#ga93efa9b0aa890de240ca32b11253dd4a
//
func FindChessboardCornersSBWithMeta(image Mat, patternSize image.Point, corners *Mat, flags CalibCBFlag, meta *Mat) bool {
sz := C.struct_Size{
width: C.int(patternSize.X),
height: C.int(patternSize.Y),
}
return bool(C.FindChessboardCornersSBWithMeta(image.Ptr(), sz, corners.Ptr(), C.int(flags), meta.Ptr()))
}
// DrawChessboardCorners renders the detected chessboard corners.
//
// For further details, please see:
// https://docs.opencv.org/master/d9/d0c/group__calib3d.html#ga6a10b0bb120c4907e5eabbcd22319022
//
func DrawChessboardCorners(image *Mat, patternSize image.Point, corners Mat, patternWasFound bool) {
sz := C.struct_Size{
width: C.int(patternSize.X),
height: C.int(patternSize.Y),
}
C.DrawChessboardCorners(image.Ptr(), sz, corners.Ptr(), C.bool(patternWasFound))
}
// EstimateAffinePartial2D computes an optimal limited affine transformation
// with 4 degrees of freedom between two 2D point sets.
//
// For further details, please see:
// https://docs.opencv.org/master/d9/d0c/group__calib3d.html#gad767faff73e9cbd8b9d92b955b50062d
func EstimateAffinePartial2D(from, to Point2fVector) Mat {
return newMat(C.EstimateAffinePartial2D(from.p, to.p))
}
// EstimateAffine2D Computes an optimal affine transformation between two 2D point sets.
//
// For further details, please see:
// https://docs.opencv.org/4.0.0/d9/d0c/group__calib3d.html#ga27865b1d26bac9ce91efaee83e94d4dd
func EstimateAffine2D(from, to Point2fVector) Mat {
return newMat(C.EstimateAffine2D(from.p, to.p))
}
// EstimateAffine2DWithParams Computes an optimal affine transformation between two 2D point sets
// with additional optional parameters.
//
// For further details, please see:
// https://docs.opencv.org/4.0.0/d9/d0c/group__calib3d.html#ga27865b1d26bac9ce91efaee83e94d4dd
func EstimateAffine2DWithParams(from Point2fVector, to Point2fVector, inliers Mat, method int, ransacReprojThreshold float64, maxIters uint, confidence float64, refineIters uint) Mat {
return newMat(C.EstimateAffine2DWithParams(from.p, to.p, inliers.p, C.int(method), C.double(ransacReprojThreshold), C.size_t(maxIters), C.double(confidence), C.size_t(refineIters)))
}

36
vendor/gocv.io/x/gocv/calib3d.h generated vendored Normal file
View File

@ -0,0 +1,36 @@
#ifndef _OPENCV3_CALIB_H_
#define _OPENCV3_CALIB_H_
#ifdef __cplusplus
#include <opencv2/opencv.hpp>
#include <opencv2/calib3d.hpp>
extern "C" {
#endif
#include "core.h"
//Calib
void Fisheye_UndistortImage(Mat distorted, Mat undistorted, Mat k, Mat d);
void Fisheye_UndistortImageWithParams(Mat distorted, Mat undistorted, Mat k, Mat d, Mat knew, Size size);
void Fisheye_UndistortPoints(Mat distorted, Mat undistorted, Mat k, Mat d, Mat R, Mat P);
void Fisheye_EstimateNewCameraMatrixForUndistortRectify(Mat k, Mat d, Size imgSize, Mat r, Mat p, double balance, Size newSize, double fovScale);
void InitUndistortRectifyMap(Mat cameraMatrix,Mat distCoeffs,Mat r,Mat newCameraMatrix,Size size,int m1type,Mat map1,Mat map2);
Mat GetOptimalNewCameraMatrixWithParams(Mat cameraMatrix,Mat distCoeffs,Size size,double alpha,Size newImgSize,Rect* validPixROI,bool centerPrincipalPoint);
double CalibrateCamera(Points3fVector objectPoints, Points2fVector imagePoints, Size imageSize, Mat cameraMatrix, Mat distCoeffs, Mat rvecs, Mat tvecs, int flag);
void Undistort(Mat src, Mat dst, Mat cameraMatrix, Mat distCoeffs, Mat newCameraMatrix);
void UndistortPoints(Mat distorted, Mat undistorted, Mat k, Mat d, Mat r, Mat p);
bool FindChessboardCorners(Mat image, Size patternSize, Mat corners, int flags);
bool FindChessboardCornersSB(Mat image, Size patternSize, Mat corners, int flags);
bool FindChessboardCornersSBWithMeta(Mat image, Size patternSize, Mat corners, int flags, Mat meta);
void DrawChessboardCorners(Mat image, Size patternSize, Mat corners, bool patternWasFound);
Mat EstimateAffinePartial2D(Point2fVector from, Point2fVector to);
Mat EstimateAffine2D(Point2fVector from, Point2fVector to);
Mat EstimateAffine2DWithParams(Point2fVector from, Point2fVector to, Mat inliers, int method, double ransacReprojThreshold, size_t maxIters, double confidence, size_t refineIters);
#ifdef __cplusplus
}
#endif
#endif //_OPENCV3_CALIB_H

49
vendor/gocv.io/x/gocv/calib3d_string.go generated vendored Normal file
View File

@ -0,0 +1,49 @@
package gocv
func (c CalibFlag) String() string {
switch c {
case CalibUseIntrinsicGuess:
return "calib-use-intrinsec-guess"
case CalibRecomputeExtrinsic:
return "calib-recompute-extrinsic"
case CalibCheckCond:
return "calib-check-cond"
case CalibFixSkew:
return "calib-fix-skew"
case CalibFixK1:
return "calib-fix-k1"
case CalibFixK2:
return "calib-fix-k2"
case CalibFixK3:
return "calib-fix-k3"
case CalibFixK4:
return "calib-fix-k4"
case CalibFixIntrinsic:
return "calib-fix-intrinsic"
case CalibFixPrincipalPoint:
return "calib-fix-principal-point"
}
return ""
}
func (c CalibCBFlag) String() string {
switch c {
case CalibCBAdaptiveThresh:
return "calib-cb-adaptive-thresh"
case CalibCBNormalizeImage:
return "calib-cb-normalize-image"
case CalibCBFilterQuads:
return "calib-cb-filter-quads"
case CalibCBFastCheck:
return "calib-cb-fast-check"
case CalibCBExhaustive:
return "calib-cb-exhaustive"
case CalibCBAccuracy:
return "calib-cb-accuracy"
case CalibCBLarger:
return "calib-cb-larger"
case CalibCBMarker:
return "calib-cb-marker"
}
return ""
}

14
vendor/gocv.io/x/gocv/cgo.go generated vendored Normal file
View File

@ -0,0 +1,14 @@
//go:build !customenv && !static
// +build !customenv,!static
package gocv
// Changes here should be mirrored in contrib/cgo.go and cuda/cgo.go.
/*
#cgo !windows pkg-config: opencv4
#cgo CXXFLAGS: --std=c++11
#cgo windows CPPFLAGS: -IC:/opencv/build/install/include
#cgo windows LDFLAGS: -LC:/opencv/build/install/x64/mingw/lib -lopencv_core460 -lopencv_face460 -lopencv_videoio460 -lopencv_imgproc460 -lopencv_highgui460 -lopencv_imgcodecs460 -lopencv_objdetect460 -lopencv_features2d460 -lopencv_video460 -lopencv_dnn460 -lopencv_xfeatures2d460 -lopencv_plot460 -lopencv_tracking460 -lopencv_img_hash460 -lopencv_calib3d460 -lopencv_bgsegm460 -lopencv_photo460 -lopencv_aruco460 -lopencv_wechat_qrcode460 -lopencv_ximgproc460
*/
import "C"

15
vendor/gocv.io/x/gocv/cgo_static.go generated vendored Normal file
View File

@ -0,0 +1,15 @@
//go:build !customenv && static
// +build !customenv,static
package gocv
// Changes here should be mirrored in contrib/cgo_static.go and cuda/cgo_static.go.
/*
#cgo CXXFLAGS: --std=c++11
#cgo !windows CPPFLAGS: -I/usr/local/include -I/usr/local/include/opencv4
#cgo !windows LDFLAGS: -L/usr/local/lib -L/usr/local/lib/opencv4/3rdparty -lopencv_gapi -lopencv_stitching -lopencv_aruco -lopencv_bgsegm -lopencv_bioinspired -lopencv_ccalib -lopencv_dnn_objdetect -lopencv_dpm -lopencv_face -lopencv_fuzzy -lopencv_hfs -lopencv_img_hash -lopencv_line_descriptor -lopencv_quality -lopencv_reg -lopencv_rgbd -lopencv_saliency -lopencv_stereo -lopencv_structured_light -lopencv_phase_unwrapping -lopencv_superres -lopencv_optflow -lopencv_surface_matching -lopencv_tracking -lopencv_datasets -lopencv_text -lopencv_highgui -lopencv_dnn -lopencv_plot -lopencv_videostab -lopencv_video -lopencv_videoio -lopencv_xfeatures2d -lopencv_shape -lopencv_ml -lopencv_ximgproc -lopencv_xobjdetect -lopencv_objdetect -lopencv_calib3d -lopencv_imgcodecs -lopencv_features2d -lopencv_flann -lopencv_xphoto -lopencv_wechat_qrcode -lopencv_photo -lopencv_imgproc -lopencv_core -littnotify -llibprotobuf -lIlmImf -lquirc -lippiw -lippicv -lade -lz -ljpeg -ldl -lm -lpthread -lrt -lquadmath
#cgo windows CPPFLAGS: -IC:/opencv/build/install/include
#cgo windows LDFLAGS: -LC:/opencv/build/install/x64/mingw/staticlib -lopencv_stereo460 -lopencv_tracking460 -lopencv_superres460 -lopencv_stitching460 -lopencv_optflow460 -lopencv_gapi460 -lopencv_face460 -lopencv_dpm460 -lopencv_dnn_objdetect460 -lopencv_ccalib460 -lopencv_bioinspired460 -lopencv_bgsegm460 -lopencv_aruco460 -lopencv_xobjdetect460 -lopencv_ximgproc460 -lopencv_xfeatures2d460 -lopencv_videostab460 -lopencv_video460 -lopencv_structured_light460 -lopencv_shape460 -lopencv_rgbd460 -lopencv_rapid460 -lopencv_objdetect460 -lopencv_mcc460 -lopencv_highgui460 -lopencv_datasets460 -lopencv_calib3d460 -lopencv_videoio460 -lopencv_text460 -lopencv_line_descriptor460 -lopencv_imgcodecs460 -lopencv_img_hash460 -lopencv_hfs460 -lopencv_fuzzy460 -lopencv_features2d460 -lopencv_dnn_superres460 -lopencv_dnn460 -lopencv_xphoto460 -lopencv_wechat_qrcode460 -lopencv_surface_matching460 -lopencv_reg460 -lopencv_quality460 -lopencv_plot460 -lopencv_photo460 -lopencv_phase_unwrapping460 -lopencv_ml460 -lopencv_intensity_transform460 -lopencv_imgproc460 -lopencv_flann460 -lopencv_core460 -lade -lquirc -llibprotobuf -lIlmImf -llibpng -llibopenjp2 -llibwebp -llibtiff -llibjpeg-turbo -lzlib -lkernel32 -lgdi32 -lwinspool -lshell32 -lole32 -loleaut32 -luuid -lcomdlg32 -ladvapi32 -luser32
*/
import "C"

3
vendor/gocv.io/x/gocv/codecov.yml generated vendored Normal file
View File

@ -0,0 +1,3 @@
ignore:
- "*_string.go"
- "*/*_string.go"

1147
vendor/gocv.io/x/gocv/core.cpp generated vendored Normal file

File diff suppressed because it is too large Load Diff

2817
vendor/gocv.io/x/gocv/core.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

519
vendor/gocv.io/x/gocv/core.h generated vendored Normal file
View File

@ -0,0 +1,519 @@
#ifndef _OPENCV3_CORE_H_
#define _OPENCV3_CORE_H_
#include <stdint.h>
#include <stdbool.h>
// Wrapper for std::vector<string>
typedef struct CStrings {
const char** strs;
int length;
} CStrings;
typedef struct ByteArray {
char* data;
int length;
} ByteArray;
// Wrapper for std::vector<int>
typedef struct IntVector {
int* val;
int length;
} IntVector;
// Wrapper for std::vector<float>
typedef struct FloatVector {
float* val;
int length;
} FloatVector;
#ifdef __cplusplus
#include <opencv2/opencv.hpp>
extern "C" {
#endif
typedef struct RawData {
int width;
int height;
struct ByteArray data;
} RawData;
// Wrapper for an individual cv::Point2f
typedef struct Point2f {
float x;
float y;
} Point2f;
typedef struct Point3f {
float x;
float y;
float z;
} Point3f;
// Wrapper for an individual cv::cvPoint
typedef struct Point {
int x;
int y;
} Point;
// Wrapper for the vector of Point structs aka std::vector<Point>
typedef struct Points {
Point* points;
int length;
} Points;
// Wrapper for the vector of Point2f structs aka std::vector<Point2f>
typedef struct Points2f {
Point2f* points;
int length;
} Points2f;
typedef struct Points3f {
Point3f *points;
int length;
} Points3f;
// Contour is alias for Points
typedef Points Contour;
// Contour2f is alias for Points2f
typedef Points2f Contour2f;
typedef struct Contours2f {
Contour2f *contours;
int length;
} Contours2f;
// Contour3f is alias for Points3f
typedef Points3f Contour3f;
// Wrapper for the vector of Points3f vectors aka std::vector< std::vector<Point3f> >
typedef struct Contours3f {
Contour3f *contours;
int length;
} Contours3f;
// Wrapper for the vector of Points vectors aka std::vector< std::vector<Point> >
typedef struct Contours {
Contour* contours;
int length;
} Contours;
// Wrapper for an individual cv::cvRect
typedef struct Rect {
int x;
int y;
int width;
int height;
} Rect;
// Wrapper for the vector of Rect struct aka std::vector<Rect>
typedef struct Rects {
Rect* rects;
int length;
} Rects;
// Wrapper for an individual cv::cvSize
typedef struct Size {
int width;
int height;
} Size;
// Wrapper for an individual cv::RotatedRect
typedef struct RotatedRect {
Points pts;
Rect boundingRect;
Point center;
Size size;
double angle;
} RotatedRect;
// Wrapper for an individual cv::cvScalar
typedef struct Scalar {
double val1;
double val2;
double val3;
double val4;
} Scalar;
// Wrapper for a individual cv::KeyPoint
typedef struct KeyPoint {
double x;
double y;
double size;
double angle;
double response;
int octave;
int classID;
} KeyPoint;
// Wrapper for the vector of KeyPoint struct aka std::vector<KeyPoint>
typedef struct KeyPoints {
KeyPoint* keypoints;
int length;
} KeyPoints;
// Wrapper for SimpleBlobDetectorParams aka SimpleBlobDetector::Params
typedef struct SimpleBlobDetectorParams {
unsigned char blobColor;
bool filterByArea;
bool filterByCircularity;
bool filterByColor;
bool filterByConvexity;
bool filterByInertia;
float maxArea;
float maxCircularity;
float maxConvexity;
float maxInertiaRatio;
float maxThreshold;
float minArea;
float minCircularity;
float minConvexity;
float minDistBetweenBlobs;
float minInertiaRatio;
size_t minRepeatability;
float minThreshold;
float thresholdStep;
} SimpleBlobDetectorParams;
// Wrapper for an individual cv::DMatch
typedef struct DMatch {
int queryIdx;
int trainIdx;
int imgIdx;
float distance;
} DMatch;
// Wrapper for the vector of DMatch struct aka std::vector<DMatch>
typedef struct DMatches {
DMatch* dmatches;
int length;
} DMatches;
// Wrapper for the vector vector of DMatch struct aka std::vector<std::vector<DMatch>>
typedef struct MultiDMatches {
DMatches* dmatches;
int length;
} MultiDMatches;
// Wrapper for an individual cv::Moment
typedef struct Moment {
double m00;
double m10;
double m01;
double m20;
double m11;
double m02;
double m30;
double m21;
double m12;
double m03;
double mu20;
double mu11;
double mu02;
double mu30;
double mu21;
double mu12;
double mu03;
double nu20;
double nu11;
double nu02;
double nu30;
double nu21;
double nu12;
double nu03;
} Moment;
#ifdef __cplusplus
typedef cv::Mat* Mat;
typedef cv::TermCriteria* TermCriteria;
typedef cv::RNG* RNG;
typedef std::vector< cv::Point >* PointVector;
typedef std::vector< std::vector< cv::Point > >* PointsVector;
typedef std::vector< cv::Point2f >* Point2fVector;
typedef std::vector< std::vector< cv::Point2f> >* Points2fVector;
typedef std::vector< cv::Point3f >* Point3fVector;
typedef std::vector< std::vector< cv::Point3f > >* Points3fVector;
#else
typedef void* Mat;
typedef void* TermCriteria;
typedef void* RNG;
typedef void* PointVector;
typedef void* PointsVector;
typedef void* Point2fVector;
typedef void* Points2fVector;
typedef void* Point3fVector;
typedef void* Points3fVector;
#endif
// Wrapper for the vector of Mat aka std::vector<Mat>
typedef struct Mats {
Mat* mats;
int length;
} Mats;
Mat Mats_get(struct Mats mats, int i);
struct DMatches MultiDMatches_get(struct MultiDMatches mds, int index);
struct ByteArray toByteArray(const char* buf, int len);
void ByteArray_Release(struct ByteArray buf);
void Contours_Close(struct Contours cs);
void KeyPoints_Close(struct KeyPoints ks);
void Rects_Close(struct Rects rs);
void Mats_Close(struct Mats mats);
void Point_Close(struct Point p);
void Points_Close(struct Points ps);
void DMatches_Close(struct DMatches ds);
void MultiDMatches_Close(struct MultiDMatches mds);
Mat Mat_New();
Mat Mat_NewWithSize(int rows, int cols, int type);
Mat Mat_NewWithSizes(struct IntVector sizes, int type);
Mat Mat_NewWithSizesFromScalar(IntVector sizes, int type, Scalar ar);
Mat Mat_NewWithSizesFromBytes(IntVector sizes, int type, struct ByteArray buf);
Mat Mat_NewFromScalar(const Scalar ar, int type);
Mat Mat_NewWithSizeFromScalar(const Scalar ar, int rows, int cols, int type);
Mat Mat_NewFromBytes(int rows, int cols, int type, struct ByteArray buf);
Mat Mat_FromPtr(Mat m, int rows, int cols, int type, int prows, int pcols);
void Mat_Close(Mat m);
int Mat_Empty(Mat m);
bool Mat_IsContinuous(Mat m);
Mat Mat_Clone(Mat m);
void Mat_CopyTo(Mat m, Mat dst);
int Mat_Total(Mat m);
void Mat_Size(Mat m, IntVector* res);
void Mat_CopyToWithMask(Mat m, Mat dst, Mat mask);
void Mat_ConvertTo(Mat m, Mat dst, int type);
void Mat_ConvertToWithParams(Mat m, Mat dst, int type, float alpha, float beta);
struct ByteArray Mat_ToBytes(Mat m);
struct ByteArray Mat_DataPtr(Mat m);
Mat Mat_Region(Mat m, Rect r);
Mat Mat_Reshape(Mat m, int cn, int rows);
void Mat_PatchNaNs(Mat m);
Mat Mat_ConvertFp16(Mat m);
Scalar Mat_Mean(Mat m);
Scalar Mat_MeanWithMask(Mat m, Mat mask);
Mat Mat_Sqrt(Mat m);
int Mat_Rows(Mat m);
int Mat_Cols(Mat m);
int Mat_Channels(Mat m);
int Mat_Type(Mat m);
int Mat_Step(Mat m);
int Mat_ElemSize(Mat m);
Mat Eye(int rows, int cols, int type);
Mat Zeros(int rows, int cols, int type);
Mat Ones(int rows, int cols, int type);
uint8_t Mat_GetUChar(Mat m, int row, int col);
uint8_t Mat_GetUChar3(Mat m, int x, int y, int z);
int8_t Mat_GetSChar(Mat m, int row, int col);
int8_t Mat_GetSChar3(Mat m, int x, int y, int z);
int16_t Mat_GetShort(Mat m, int row, int col);
int16_t Mat_GetShort3(Mat m, int x, int y, int z);
int32_t Mat_GetInt(Mat m, int row, int col);
int32_t Mat_GetInt3(Mat m, int x, int y, int z);
float Mat_GetFloat(Mat m, int row, int col);
float Mat_GetFloat3(Mat m, int x, int y, int z);
double Mat_GetDouble(Mat m, int row, int col);
double Mat_GetDouble3(Mat m, int x, int y, int z);
void Mat_SetTo(Mat m, Scalar value);
void Mat_SetUChar(Mat m, int row, int col, uint8_t val);
void Mat_SetUChar3(Mat m, int x, int y, int z, uint8_t val);
void Mat_SetSChar(Mat m, int row, int col, int8_t val);
void Mat_SetSChar3(Mat m, int x, int y, int z, int8_t val);
void Mat_SetShort(Mat m, int row, int col, int16_t val);
void Mat_SetShort3(Mat m, int x, int y, int z, int16_t val);
void Mat_SetInt(Mat m, int row, int col, int32_t val);
void Mat_SetInt3(Mat m, int x, int y, int z, int32_t val);
void Mat_SetFloat(Mat m, int row, int col, float val);
void Mat_SetFloat3(Mat m, int x, int y, int z, float val);
void Mat_SetDouble(Mat m, int row, int col, double val);
void Mat_SetDouble3(Mat m, int x, int y, int z, double val);
void Mat_AddUChar(Mat m, uint8_t val);
void Mat_SubtractUChar(Mat m, uint8_t val);
void Mat_MultiplyUChar(Mat m, uint8_t val);
void Mat_DivideUChar(Mat m, uint8_t val);
void Mat_AddFloat(Mat m, float val);
void Mat_SubtractFloat(Mat m, float val);
void Mat_MultiplyFloat(Mat m, float val);
void Mat_DivideFloat(Mat m, float val);
Mat Mat_MultiplyMatrix(Mat x, Mat y);
Mat Mat_T(Mat x);
void LUT(Mat src, Mat lut, Mat dst);
void Mat_AbsDiff(Mat src1, Mat src2, Mat dst);
void Mat_Add(Mat src1, Mat src2, Mat dst);
void Mat_AddWeighted(Mat src1, double alpha, Mat src2, double beta, double gamma, Mat dst);
void Mat_BitwiseAnd(Mat src1, Mat src2, Mat dst);
void Mat_BitwiseAndWithMask(Mat src1, Mat src2, Mat dst, Mat mask);
void Mat_BitwiseNot(Mat src1, Mat dst);
void Mat_BitwiseNotWithMask(Mat src1, Mat dst, Mat mask);
void Mat_BitwiseOr(Mat src1, Mat src2, Mat dst);
void Mat_BitwiseOrWithMask(Mat src1, Mat src2, Mat dst, Mat mask);
void Mat_BitwiseXor(Mat src1, Mat src2, Mat dst);
void Mat_BitwiseXorWithMask(Mat src1, Mat src2, Mat dst, Mat mask);
void Mat_Compare(Mat src1, Mat src2, Mat dst, int ct);
void Mat_BatchDistance(Mat src1, Mat src2, Mat dist, int dtype, Mat nidx, int normType, int K,
Mat mask, int update, bool crosscheck);
int Mat_BorderInterpolate(int p, int len, int borderType);
void Mat_CalcCovarMatrix(Mat samples, Mat covar, Mat mean, int flags, int ctype);
void Mat_CartToPolar(Mat x, Mat y, Mat magnitude, Mat angle, bool angleInDegrees);
bool Mat_CheckRange(Mat m);
void Mat_CompleteSymm(Mat m, bool lowerToUpper);
void Mat_ConvertScaleAbs(Mat src, Mat dst, double alpha, double beta);
void Mat_CopyMakeBorder(Mat src, Mat dst, int top, int bottom, int left, int right, int borderType,
Scalar value);
int Mat_CountNonZero(Mat src);
void Mat_DCT(Mat src, Mat dst, int flags);
double Mat_Determinant(Mat m);
void Mat_DFT(Mat m, Mat dst, int flags);
void Mat_Divide(Mat src1, Mat src2, Mat dst);
bool Mat_Eigen(Mat src, Mat eigenvalues, Mat eigenvectors);
void Mat_EigenNonSymmetric(Mat src, Mat eigenvalues, Mat eigenvectors);
void Mat_Exp(Mat src, Mat dst);
void Mat_ExtractChannel(Mat src, Mat dst, int coi);
void Mat_FindNonZero(Mat src, Mat idx);
void Mat_Flip(Mat src, Mat dst, int flipCode);
void Mat_Gemm(Mat src1, Mat src2, double alpha, Mat src3, double beta, Mat dst, int flags);
int Mat_GetOptimalDFTSize(int vecsize);
void Mat_Hconcat(Mat src1, Mat src2, Mat dst);
void Mat_Vconcat(Mat src1, Mat src2, Mat dst);
void Rotate(Mat src, Mat dst, int rotationCode);
void Mat_Idct(Mat src, Mat dst, int flags);
void Mat_Idft(Mat src, Mat dst, int flags, int nonzeroRows);
void Mat_InRange(Mat src, Mat lowerb, Mat upperb, Mat dst);
void Mat_InRangeWithScalar(Mat src, const Scalar lowerb, const Scalar upperb, Mat dst);
void Mat_InsertChannel(Mat src, Mat dst, int coi);
double Mat_Invert(Mat src, Mat dst, int flags);
double KMeans(Mat data, int k, Mat bestLabels, TermCriteria criteria, int attempts, int flags, Mat centers);
double KMeansPoints(PointVector pts, int k, Mat bestLabels, TermCriteria criteria, int attempts, int flags, Mat centers);
void Mat_Log(Mat src, Mat dst);
void Mat_Magnitude(Mat x, Mat y, Mat magnitude);
void Mat_Max(Mat src1, Mat src2, Mat dst);
void Mat_MeanStdDev(Mat src, Mat dstMean, Mat dstStdDev);
void Mat_Merge(struct Mats mats, Mat dst);
void Mat_Min(Mat src1, Mat src2, Mat dst);
void Mat_MinMaxIdx(Mat m, double* minVal, double* maxVal, int* minIdx, int* maxIdx);
void Mat_MinMaxLoc(Mat m, double* minVal, double* maxVal, Point* minLoc, Point* maxLoc);
void Mat_MixChannels(struct Mats src, struct Mats dst, struct IntVector fromTo);
void Mat_MulSpectrums(Mat a, Mat b, Mat c, int flags);
void Mat_Multiply(Mat src1, Mat src2, Mat dst);
void Mat_MultiplyWithParams(Mat src1, Mat src2, Mat dst, double scale, int dtype);
void Mat_Subtract(Mat src1, Mat src2, Mat dst);
void Mat_Normalize(Mat src, Mat dst, double alpha, double beta, int typ);
double Norm(Mat src1, int normType);
double NormWithMats(Mat src1, Mat src2, int normType);
void Mat_PerspectiveTransform(Mat src, Mat dst, Mat tm);
bool Mat_Solve(Mat src1, Mat src2, Mat dst, int flags);
int Mat_SolveCubic(Mat coeffs, Mat roots);
double Mat_SolvePoly(Mat coeffs, Mat roots, int maxIters);
void Mat_Reduce(Mat src, Mat dst, int dim, int rType, int dType);
void Mat_Repeat(Mat src, int nY, int nX, Mat dst);
void Mat_ScaleAdd(Mat src1, double alpha, Mat src2, Mat dst);
void Mat_SetIdentity(Mat src, double scalar);
void Mat_Sort(Mat src, Mat dst, int flags);
void Mat_SortIdx(Mat src, Mat dst, int flags);
void Mat_Split(Mat src, struct Mats* mats);
void Mat_Subtract(Mat src1, Mat src2, Mat dst);
Scalar Mat_Trace(Mat src);
void Mat_Transform(Mat src, Mat dst, Mat tm);
void Mat_Transpose(Mat src, Mat dst);
void Mat_PolarToCart(Mat magnitude, Mat degree, Mat x, Mat y, bool angleInDegrees);
void Mat_Pow(Mat src, double power, Mat dst);
void Mat_Phase(Mat x, Mat y, Mat angle, bool angleInDegrees);
Scalar Mat_Sum(Mat src1);
TermCriteria TermCriteria_New(int typ, int maxCount, double epsilon);
int64_t GetCVTickCount();
double GetTickFrequency();
Mat Mat_rowRange(Mat m,int startrow,int endrow);
Mat Mat_colRange(Mat m,int startrow,int endrow);
PointVector PointVector_New();
PointVector PointVector_NewFromPoints(Contour points);
PointVector PointVector_NewFromMat(Mat mat);
Point PointVector_At(PointVector pv, int idx);
void PointVector_Append(PointVector pv, Point p);
int PointVector_Size(PointVector pv);
void PointVector_Close(PointVector pv);
PointsVector PointsVector_New();
PointsVector PointsVector_NewFromPoints(Contours points);
PointVector PointsVector_At(PointsVector psv, int idx);
void PointsVector_Append(PointsVector psv, PointVector pv);
int PointsVector_Size(PointsVector psv);
void PointsVector_Close(PointsVector psv);
Point2fVector Point2fVector_New();
void Point2fVector_Close(Point2fVector pfv);
Point2fVector Point2fVector_NewFromPoints(Contour2f pts);
Point2fVector Point2fVector_NewFromMat(Mat mat);
Point2f Point2fVector_At(Point2fVector pfv, int idx);
int Point2fVector_Size(Point2fVector pfv);
void IntVector_Close(struct IntVector ivec);
void CStrings_Close(struct CStrings cstrs);
RNG TheRNG();
void SetRNGSeed(int seed);
void RNG_Fill(RNG rng, Mat mat, int distType, double a, double b, bool saturateRange);
double RNG_Gaussian(RNG rng, double sigma);
unsigned int RNG_Next(RNG rng);
void RandN(Mat mat, Scalar mean, Scalar stddev);
void RandShuffle(Mat mat);
void RandShuffleWithParams(Mat mat, double iterFactor, RNG rng);
void RandU(Mat mat, Scalar low, Scalar high);
void copyPointVectorToPoint2fVector(PointVector src, Point2fVector dest);
void StdByteVectorInitialize(void* data);
void StdByteVectorFree(void *data);
size_t StdByteVectorLen(void *data);
uint8_t* StdByteVectorData(void *data);
Points2fVector Points2fVector_New();
Points2fVector Points2fVector_NewFromPoints(Contours2f points);
int Points2fVector_Size(Points2fVector ps);
Point2fVector Points2fVector_At(Points2fVector ps, int idx);
void Points2fVector_Append(Points2fVector psv, Point2fVector pv);
void Points2fVector_Close(Points2fVector ps);
Point3fVector Point3fVector_New();
Point3fVector Point3fVector_NewFromPoints(Contour3f points);
Point3fVector Point3fVector_NewFromMat(Mat mat);
void Point3fVector_Append(Point3fVector pfv, Point3f point);
Point3f Point3fVector_At(Point3fVector pfv, int idx);
int Point3fVector_Size(Point3fVector pfv);
void Point3fVector_Close(Point3fVector pv);
Points3fVector Points3fVector_New();
Points3fVector Points3fVector_NewFromPoints(Contours3f points);
int Points3fVector_Size(Points3fVector ps);
Point3fVector Points3fVector_At(Points3fVector ps, int idx);
void Points3fVector_Append(Points3fVector psv, Point3fVector pv);
void Points3fVector_Close(Points3fVector ps);
#ifdef __cplusplus
}
#endif
#endif //_OPENCV3_CORE_H_

211
vendor/gocv.io/x/gocv/core_string.go generated vendored Normal file
View File

@ -0,0 +1,211 @@
package gocv
func (c MatType) String() string {
switch c {
case MatTypeCV8U:
return "CV8U"
case MatTypeCV8UC2:
return "CV8UC2"
case MatTypeCV8UC3:
return "CV8UC3"
case MatTypeCV8UC4:
return "CV8UC4"
case MatTypeCV16U:
return "CV16U"
case MatTypeCV16UC2:
return "CV16UC2"
case MatTypeCV16UC3:
return "CV16UC3"
case MatTypeCV16UC4:
return "CV16UC4"
case MatTypeCV16S:
return "CV16S"
case MatTypeCV16SC2:
return "CV16SC2"
case MatTypeCV16SC3:
return "CV16SC3"
case MatTypeCV16SC4:
return "CV16SC4"
case MatTypeCV32S:
return "CV32S"
case MatTypeCV32SC2:
return "CV32SC2"
case MatTypeCV32SC3:
return "CV32SC3"
case MatTypeCV32SC4:
return "CV32SC4"
case MatTypeCV32F:
return "CV32F"
case MatTypeCV32FC2:
return "CV32FC2"
case MatTypeCV32FC3:
return "CV32FC3"
case MatTypeCV32FC4:
return "CV32FC4"
case MatTypeCV64F:
return "CV64F"
case MatTypeCV64FC2:
return "CV64FC2"
case MatTypeCV64FC3:
return "CV64FC3"
case MatTypeCV64FC4:
return "CV64FC4"
}
return ""
}
func (c CompareType) String() string {
switch c {
case CompareEQ:
return "eq"
case CompareGT:
return "gt"
case CompareGE:
return "ge"
case CompareLT:
return "lt"
case CompareLE:
return "le"
case CompareNE:
return "ne"
}
return ""
}
func (c CovarFlags) String() string {
switch c {
case CovarScrambled:
return "covar-scrambled"
case CovarNormal:
return "covar-normal"
case CovarUseAvg:
return "covar-use-avg"
case CovarScale:
return "covar-scale"
case CovarRows:
return "covar-rows"
case CovarCols:
return "covar-cols"
}
return ""
}
func (c DftFlags) String() string {
switch c {
case DftForward:
return "dft-forward"
case DftInverse:
return "dft-inverse"
case DftScale:
return "dft-scale"
case DftRows:
return "dft-rows"
case DftComplexOutput:
return "dft-complex-output"
case DftRealOutput:
return "dft-real-output"
case DftComplexInput:
return "dft-complex-input"
}
return ""
}
func (c RotateFlag) String() string {
switch c {
case Rotate90Clockwise:
return "rotate-90-clockwise"
case Rotate180Clockwise:
return "rotate-180-clockwise"
case Rotate90CounterClockwise:
return "rotate-90-counter-clockwise"
}
return ""
}
func (c KMeansFlags) String() string {
switch c {
case KMeansRandomCenters:
return "kmeans-random-centers"
case KMeansPPCenters:
return "kmeans-pp-centers"
case KMeansUseInitialLabels:
return "kmeans-use-initial-labels"
}
return ""
}
func (c NormType) String() string {
switch c {
case NormInf:
return "norm-inf"
case NormL1:
return "norm-l1"
case NormL2:
return "norm-l2"
case NormL2Sqr:
return "norm-l2-sqr"
case NormHamming:
return "norm-hamming"
case NormHamming2:
return "norm-hamming2"
case NormRelative:
return "norm-relative"
case NormMinMax:
return "norm-minmax"
}
return ""
}
func (c TermCriteriaType) String() string {
switch c {
case Count:
return "count"
case EPS:
return "eps"
}
return ""
}
func (c SolveDecompositionFlags) String() string {
switch c {
case SolveDecompositionLu:
return "solve-decomposition-lu"
case SolveDecompositionSvd:
return "solve-decomposition-svd"
case SolveDecompositionEing:
return "solve-decomposition-eing"
case SolveDecompositionCholesky:
return "solve-decomposition-cholesky"
case SolveDecompositionQr:
return "solve-decomposition-qr"
case SolveDecompositionNormal:
return "solve-decomposition-normal"
}
return ""
}
func (c ReduceTypes) String() string {
switch c {
case ReduceSum:
return "reduce-sum"
case ReduceAvg:
return "reduce-avg"
case ReduceMax:
return "reduce-max"
case ReduceMin:
return "reduce-min"
}
return ""
}
func (c SortFlags) String() string {
switch c {
case SortEveryRow:
return "sort-every-row"
case SortEveryColumn:
return "sort-every-column"
case SortDescending:
return "sort-descending"
}
return ""
}

266
vendor/gocv.io/x/gocv/dnn.cpp generated vendored Normal file
View File

@ -0,0 +1,266 @@
#include "dnn.h"
Net Net_ReadNet(const char* model, const char* config) {
Net n = new cv::dnn::Net(cv::dnn::readNet(model, config));
return n;
}
Net Net_ReadNetBytes(const char* framework, struct ByteArray model, struct ByteArray config) {
std::vector<uchar> modelv(model.data, model.data + model.length);
std::vector<uchar> configv(config.data, config.data + config.length);
Net n = new cv::dnn::Net(cv::dnn::readNet(framework, modelv, configv));
return n;
}
Net Net_ReadNetFromCaffe(const char* prototxt, const char* caffeModel) {
Net n = new cv::dnn::Net(cv::dnn::readNetFromCaffe(prototxt, caffeModel));
return n;
}
Net Net_ReadNetFromCaffeBytes(struct ByteArray prototxt, struct ByteArray caffeModel) {
Net n = new cv::dnn::Net(cv::dnn::readNetFromCaffe(prototxt.data, prototxt.length,
caffeModel.data, caffeModel.length));
return n;
}
Net Net_ReadNetFromTensorflow(const char* model) {
Net n = new cv::dnn::Net(cv::dnn::readNetFromTensorflow(model));
return n;
}
Net Net_ReadNetFromTensorflowBytes(struct ByteArray model) {
Net n = new cv::dnn::Net(cv::dnn::readNetFromTensorflow(model.data, model.length));
return n;
}
Net Net_ReadNetFromTorch(const char* model) {
Net n = new cv::dnn::Net(cv::dnn::readNetFromTorch(model));
return n;
}
Net Net_ReadNetFromONNX(const char* model) {
Net n = new cv::dnn::Net(cv::dnn::readNetFromONNX(model));
return n;
}
Net Net_ReadNetFromONNXBytes(struct ByteArray model) {
Net n = new cv::dnn::Net(cv::dnn::readNetFromONNX(model.data, model.length));
return n;
}
void Net_Close(Net net) {
delete net;
}
bool Net_Empty(Net net) {
return net->empty();
}
void Net_SetInput(Net net, Mat blob, const char* name) {
net->setInput(*blob, name);
}
Mat Net_Forward(Net net, const char* outputName) {
return new cv::Mat(net->forward(outputName));
}
void Net_ForwardLayers(Net net, struct Mats* outputBlobs, struct CStrings outBlobNames) {
std::vector< cv::Mat > blobs;
std::vector< cv::String > names;
for (int i = 0; i < outBlobNames.length; ++i) {
names.push_back(cv::String(outBlobNames.strs[i]));
}
net->forward(blobs, names);
// copy blobs into outputBlobs
outputBlobs->mats = new Mat[blobs.size()];
for (size_t i = 0; i < blobs.size(); ++i) {
outputBlobs->mats[i] = new cv::Mat(blobs[i]);
}
outputBlobs->length = (int)blobs.size();
}
void Net_SetPreferableBackend(Net net, int backend) {
net->setPreferableBackend(backend);
}
void Net_SetPreferableTarget(Net net, int target) {
net->setPreferableTarget(target);
}
int64_t Net_GetPerfProfile(Net net) {
std::vector<double> layersTimes;
return net->getPerfProfile(layersTimes);
}
void Net_GetUnconnectedOutLayers(Net net, IntVector* res) {
std::vector< int > cids(net->getUnconnectedOutLayers());
int* ids = new int[cids.size()];
for (size_t i = 0; i < cids.size(); ++i) {
ids[i] = cids[i];
}
res->length = cids.size();
res->val = ids;
return;
}
void Net_GetLayerNames(Net net, CStrings* names) {
std::vector< cv::String > cstrs(net->getLayerNames());
const char **strs = new const char*[cstrs.size()];
for (size_t i = 0; i < cstrs.size(); ++i) {
strs[i] = cstrs[i].c_str();
}
names->length = cstrs.size();
names->strs = strs;
return;
}
Mat Net_BlobFromImage(Mat image, double scalefactor, Size size, Scalar mean, bool swapRB,
bool crop) {
cv::Size sz(size.width, size.height);
cv::Scalar cm(mean.val1, mean.val2, mean.val3, mean.val4);
// use the default target ddepth here.
return new cv::Mat(cv::dnn::blobFromImage(*image, scalefactor, sz, cm, swapRB, crop));
}
void Net_BlobFromImages(struct Mats images, Mat blob, double scalefactor, Size size,
Scalar mean, bool swapRB, bool crop, int ddepth) {
std::vector<cv::Mat> imgs;
for (int i = 0; i < images.length; ++i) {
imgs.push_back(*images.mats[i]);
}
cv::Size sz(size.width, size.height);
cv::Scalar cm = cv::Scalar(mean.val1, mean.val2, mean.val3, mean.val4);
// ignore the passed in ddepth, just use default.
cv::dnn::blobFromImages(imgs, *blob, scalefactor, sz, cm, swapRB, crop);
}
void Net_ImagesFromBlob(Mat blob_, struct Mats* images_) {
std::vector<cv::Mat> imgs;
cv::dnn::imagesFromBlob(*blob_, imgs);
images_->mats = new Mat[imgs.size()];
for (size_t i = 0; i < imgs.size(); ++i) {
images_->mats[i] = new cv::Mat(imgs[i]);
}
images_->length = (int) imgs.size();
}
Mat Net_GetBlobChannel(Mat blob, int imgidx, int chnidx) {
size_t w = blob->size[3];
size_t h = blob->size[2];
return new cv::Mat(h, w, CV_32F, blob->ptr<float>(imgidx, chnidx));
}
Scalar Net_GetBlobSize(Mat blob) {
Scalar scal = Scalar();
scal.val1 = blob->size[0];
scal.val2 = blob->size[1];
scal.val3 = blob->size[2];
scal.val4 = blob->size[3];
return scal;
}
Layer Net_GetLayer(Net net, int layerid) {
return new cv::Ptr<cv::dnn::Layer>(net->getLayer(layerid));
}
void Layer_Close(Layer layer) {
delete layer;
}
int Layer_InputNameToIndex(Layer layer, const char* name) {
return (*layer)->inputNameToIndex(name);
}
int Layer_OutputNameToIndex(Layer layer, const char* name) {
return (*layer)->outputNameToIndex(name);
}
const char* Layer_GetName(Layer layer) {
return (*layer)->name.c_str();
}
const char* Layer_GetType(Layer layer) {
return (*layer)->type.c_str();
}
void NMSBoxes(struct Rects bboxes, FloatVector scores, float score_threshold, float nms_threshold, IntVector* indices) {
std::vector<cv::Rect> _bboxes;
for (int i = 0; i < bboxes.length; ++i) {
_bboxes.push_back(cv::Rect(
bboxes.rects[i].x,
bboxes.rects[i].y,
bboxes.rects[i].width,
bboxes.rects[i].height
));
}
std::vector<float> _scores;
float* f;
int i;
for (i = 0, f = scores.val; i < scores.length; ++f, ++i) {
_scores.push_back(*f);
}
std::vector<int> _indices(indices->length);
cv::dnn::NMSBoxes(_bboxes, _scores, score_threshold, nms_threshold, _indices, 1.f, 0);
int* ptr = new int[_indices.size()];
for (size_t i=0; i<_indices.size(); ++i) {
ptr[i] = _indices[i];
}
indices->length = _indices.size();
indices->val = ptr;
return;
}
void NMSBoxesWithParams(struct Rects bboxes, FloatVector scores, const float score_threshold, const float nms_threshold, IntVector* indices, const float eta, const int top_k) {
std::vector<cv::Rect> _bboxes;
for (int i = 0; i < bboxes.length; ++i) {
_bboxes.push_back(cv::Rect(
bboxes.rects[i].x,
bboxes.rects[i].y,
bboxes.rects[i].width,
bboxes.rects[i].height
));
}
std::vector<float> _scores;
float* f;
int i;
for (i = 0, f = scores.val; i < scores.length; ++f, ++i) {
_scores.push_back(*f);
}
std::vector<int> _indices(indices->length);
cv::dnn::NMSBoxes(_bboxes, _scores, score_threshold, nms_threshold, _indices, eta, top_k);
int* ptr = new int[_indices.size()];
for (size_t i=0; i<_indices.size(); ++i) {
ptr[i] = _indices[i];
}
indices->length = _indices.size();
indices->val = ptr;
return;
}

615
vendor/gocv.io/x/gocv/dnn.go generated vendored Normal file
View File

@ -0,0 +1,615 @@
package gocv
/*
#include <stdlib.h>
#include "dnn.h"
*/
import "C"
import (
"image"
"reflect"
"unsafe"
)
// Net allows you to create and manipulate comprehensive artificial neural networks.
//
// For further details, please see:
// https://docs.opencv.org/master/db/d30/classcv_1_1dnn_1_1Net.html
//
type Net struct {
// C.Net
p unsafe.Pointer
}
// NetBackendType is the type for the various different kinds of DNN backends.
type NetBackendType int
const (
// NetBackendDefault is the default backend.
NetBackendDefault NetBackendType = 0
// NetBackendHalide is the Halide backend.
NetBackendHalide NetBackendType = 1
// NetBackendOpenVINO is the OpenVINO backend.
NetBackendOpenVINO NetBackendType = 2
// NetBackendOpenCV is the OpenCV backend.
NetBackendOpenCV NetBackendType = 3
// NetBackendVKCOM is the Vulkan backend.
NetBackendVKCOM NetBackendType = 4
// NetBackendCUDA is the Cuda backend.
NetBackendCUDA NetBackendType = 5
)
// ParseNetBackend returns a valid NetBackendType given a string. Valid values are:
// - halide
// - openvino
// - opencv
// - vulkan
// - cuda
// - default
func ParseNetBackend(backend string) NetBackendType {
switch backend {
case "halide":
return NetBackendHalide
case "openvino":
return NetBackendOpenVINO
case "opencv":
return NetBackendOpenCV
case "vulkan":
return NetBackendVKCOM
case "cuda":
return NetBackendCUDA
default:
return NetBackendDefault
}
}
// NetTargetType is the type for the various different kinds of DNN device targets.
type NetTargetType int
const (
// NetTargetCPU is the default CPU device target.
NetTargetCPU NetTargetType = 0
// NetTargetFP32 is the 32-bit OpenCL target.
NetTargetFP32 NetTargetType = 1
// NetTargetFP16 is the 16-bit OpenCL target.
NetTargetFP16 NetTargetType = 2
// NetTargetVPU is the Movidius VPU target.
NetTargetVPU NetTargetType = 3
// NetTargetVulkan is the NVIDIA Vulkan target.
NetTargetVulkan NetTargetType = 4
// NetTargetFPGA is the FPGA target.
NetTargetFPGA NetTargetType = 5
// NetTargetCUDA is the CUDA target.
NetTargetCUDA NetTargetType = 6
// NetTargetCUDAFP16 is the CUDA target.
NetTargetCUDAFP16 NetTargetType = 7
)
// ParseNetTarget returns a valid NetTargetType given a string. Valid values are:
// - cpu
// - fp32
// - fp16
// - vpu
// - vulkan
// - fpga
// - cuda
// - cudafp16
func ParseNetTarget(target string) NetTargetType {
switch target {
case "cpu":
return NetTargetCPU
case "fp32":
return NetTargetFP32
case "fp16":
return NetTargetFP16
case "vpu":
return NetTargetVPU
case "vulkan":
return NetTargetVulkan
case "fpga":
return NetTargetFPGA
case "cuda":
return NetTargetCUDA
case "cudafp16":
return NetTargetCUDAFP16
default:
return NetTargetCPU
}
}
// Close Net
func (net *Net) Close() error {
C.Net_Close((C.Net)(net.p))
net.p = nil
return nil
}
// Empty returns true if there are no layers in the network.
//
// For further details, please see:
// https://docs.opencv.org/master/db/d30/classcv_1_1dnn_1_1Net.html#a6a5778787d5b8770deab5eda6968e66c
//
func (net *Net) Empty() bool {
return bool(C.Net_Empty((C.Net)(net.p)))
}
// SetInput sets the new value for the layer output blob.
//
// For further details, please see:
// https://docs.opencv.org/trunk/db/d30/classcv_1_1dnn_1_1Net.html#a672a08ae76444d75d05d7bfea3e4a328
//
func (net *Net) SetInput(blob Mat, name string) {
cName := C.CString(name)
defer C.free(unsafe.Pointer(cName))
C.Net_SetInput((C.Net)(net.p), blob.p, cName)
}
// Forward runs forward pass to compute output of layer with name outputName.
//
// For further details, please see:
// https://docs.opencv.org/trunk/db/d30/classcv_1_1dnn_1_1Net.html#a98ed94cb6ef7063d3697259566da310b
//
func (net *Net) Forward(outputName string) Mat {
cName := C.CString(outputName)
defer C.free(unsafe.Pointer(cName))
return newMat(C.Net_Forward((C.Net)(net.p), cName))
}
// ForwardLayers forward pass to compute outputs of layers listed in outBlobNames.
//
// For further details, please see:
// https://docs.opencv.org/3.4.1/db/d30/classcv_1_1dnn_1_1Net.html#adb34d7650e555264c7da3b47d967311b
//
func (net *Net) ForwardLayers(outBlobNames []string) (blobs []Mat) {
cMats := C.struct_Mats{}
C.Net_ForwardLayers((C.Net)(net.p), &(cMats), toCStrings(outBlobNames))
blobs = make([]Mat, cMats.length)
for i := C.int(0); i < cMats.length; i++ {
blobs[i].p = C.Mats_get(cMats, i)
addMatToProfile(blobs[i].p)
}
return
}
// SetPreferableBackend ask network to use specific computation backend.
//
// For further details, please see:
// https://docs.opencv.org/3.4/db/d30/classcv_1_1dnn_1_1Net.html#a7f767df11386d39374db49cd8df8f59e
//
func (net *Net) SetPreferableBackend(backend NetBackendType) error {
C.Net_SetPreferableBackend((C.Net)(net.p), C.int(backend))
return nil
}
// SetPreferableTarget ask network to make computations on specific target device.
//
// For further details, please see:
// https://docs.opencv.org/3.4/db/d30/classcv_1_1dnn_1_1Net.html#a9dddbefbc7f3defbe3eeb5dc3d3483f4
//
func (net *Net) SetPreferableTarget(target NetTargetType) error {
C.Net_SetPreferableTarget((C.Net)(net.p), C.int(target))
return nil
}
// ReadNet reads a deep learning network represented in one of the supported formats.
//
// For further details, please see:
// https://docs.opencv.org/3.4/d6/d0f/group__dnn.html#ga3b34fe7a29494a6a4295c169a7d32422
//
func ReadNet(model string, config string) Net {
cModel := C.CString(model)
defer C.free(unsafe.Pointer(cModel))
cConfig := C.CString(config)
defer C.free(unsafe.Pointer(cConfig))
return Net{p: unsafe.Pointer(C.Net_ReadNet(cModel, cConfig))}
}
// ReadNetBytes reads a deep learning network represented in one of the supported formats.
//
// For further details, please see:
// https://docs.opencv.org/master/d6/d0f/group__dnn.html#ga138439da76f26266fdefec9723f6c5cd
//
func ReadNetBytes(framework string, model []byte, config []byte) (Net, error) {
cFramework := C.CString(framework)
defer C.free(unsafe.Pointer(cFramework))
bModel, err := toByteArray(model)
if err != nil {
return Net{}, err
}
bConfig, err := toByteArray(config)
if err != nil {
return Net{}, err
}
return Net{p: unsafe.Pointer(C.Net_ReadNetBytes(cFramework, *bModel, *bConfig))}, nil
}
// ReadNetFromCaffe reads a network model stored in Caffe framework's format.
//
// For further details, please see:
// https://docs.opencv.org/master/d6/d0f/group__dnn.html#ga29d0ea5e52b1d1a6c2681e3f7d68473a
//
func ReadNetFromCaffe(prototxt string, caffeModel string) Net {
cprototxt := C.CString(prototxt)
defer C.free(unsafe.Pointer(cprototxt))
cmodel := C.CString(caffeModel)
defer C.free(unsafe.Pointer(cmodel))
return Net{p: unsafe.Pointer(C.Net_ReadNetFromCaffe(cprototxt, cmodel))}
}
// ReadNetFromCaffeBytes reads a network model stored in Caffe model in memory.
//
// For further details, please see:
// https://docs.opencv.org/master/d6/d0f/group__dnn.html#ga946b342af1355185a7107640f868b64a
//
func ReadNetFromCaffeBytes(prototxt []byte, caffeModel []byte) (Net, error) {
bPrototxt, err := toByteArray(prototxt)
if err != nil {
return Net{}, err
}
bCaffeModel, err := toByteArray(caffeModel)
if err != nil {
return Net{}, err
}
return Net{p: unsafe.Pointer(C.Net_ReadNetFromCaffeBytes(*bPrototxt, *bCaffeModel))}, nil
}
// ReadNetFromTensorflow reads a network model stored in Tensorflow framework's format.
//
// For further details, please see:
// https://docs.opencv.org/master/d6/d0f/group__dnn.html#gad820b280978d06773234ba6841e77e8d
//
func ReadNetFromTensorflow(model string) Net {
cmodel := C.CString(model)
defer C.free(unsafe.Pointer(cmodel))
return Net{p: unsafe.Pointer(C.Net_ReadNetFromTensorflow(cmodel))}
}
// ReadNetFromTensorflowBytes reads a network model stored in Tensorflow framework's format.
//
// For further details, please see:
// https://docs.opencv.org/master/d6/d0f/group__dnn.html#gacdba30a7c20db2788efbf5bb16a7884d
//
func ReadNetFromTensorflowBytes(model []byte) (Net, error) {
bModel, err := toByteArray(model)
if err != nil {
return Net{}, err
}
return Net{p: unsafe.Pointer(C.Net_ReadNetFromTensorflowBytes(*bModel))}, nil
}
// ReadNetFromTorch reads a network model stored in Torch framework's format (t7).
// check net.Empty() for read failure
//
// For further details, please see:
// https://docs.opencv.org/master/d6/d0f/group__dnn.html#gaaaed8c8530e9e92fe6647700c13d961e
//
func ReadNetFromTorch(model string) Net {
cmodel := C.CString(model)
defer C.free(unsafe.Pointer(cmodel))
return Net{p: unsafe.Pointer(C.Net_ReadNetFromTorch(cmodel))}
}
// ReadNetFromONNX reads a network model stored in ONNX framework's format.
// check net.Empty() for read failure
//
// For further details, please see:
// https://docs.opencv.org/master/d6/d0f/group__dnn.html#ga7faea56041d10c71dbbd6746ca854197
//
func ReadNetFromONNX(model string) Net {
cmodel := C.CString(model)
defer C.free(unsafe.Pointer(cmodel))
return Net{p: unsafe.Pointer(C.Net_ReadNetFromONNX(cmodel))}
}
// ReadNetFromONNXBytes reads a network model stored in ONNX framework's format.
//
// For further details, please see:
// https://docs.opencv.org/master/d6/d0f/group__dnn.html#ga9198ecaac7c32ddf0aa7a1bcbd359567
//
func ReadNetFromONNXBytes(model []byte) (Net, error) {
bModel, err := toByteArray(model)
if err != nil {
return Net{}, err
}
return Net{p: unsafe.Pointer(C.Net_ReadNetFromONNXBytes(*bModel))}, nil
}
// BlobFromImage creates 4-dimensional blob from image. Optionally resizes and crops
// image from center, subtract mean values, scales values by scalefactor,
// swap Blue and Red channels.
//
// For further details, please see:
// https://docs.opencv.org/trunk/d6/d0f/group__dnn.html#ga152367f253c81b53fe6862b299f5c5cd
//
func BlobFromImage(img Mat, scaleFactor float64, size image.Point, mean Scalar,
swapRB bool, crop bool) Mat {
sz := C.struct_Size{
width: C.int(size.X),
height: C.int(size.Y),
}
sMean := C.struct_Scalar{
val1: C.double(mean.Val1),
val2: C.double(mean.Val2),
val3: C.double(mean.Val3),
val4: C.double(mean.Val4),
}
return newMat(C.Net_BlobFromImage(img.p, C.double(scaleFactor), sz, sMean, C.bool(swapRB), C.bool(crop)))
}
// BlobFromImages Creates 4-dimensional blob from series of images.
// Optionally resizes and crops images from center, subtract mean values,
// scales values by scalefactor, swap Blue and Red channels.
//
// For further details, please see:
// https://docs.opencv.org/master/d6/d0f/group__dnn.html#ga2b89ed84432e4395f5a1412c2926293c
//
func BlobFromImages(imgs []Mat, blob *Mat, scaleFactor float64, size image.Point, mean Scalar,
swapRB bool, crop bool, ddepth MatType) {
cMatArray := make([]C.Mat, len(imgs))
for i, r := range imgs {
cMatArray[i] = r.p
}
cMats := C.struct_Mats{
mats: (*C.Mat)(&cMatArray[0]),
length: C.int(len(imgs)),
}
sz := C.struct_Size{
width: C.int(size.X),
height: C.int(size.Y),
}
sMean := C.struct_Scalar{
val1: C.double(mean.Val1),
val2: C.double(mean.Val2),
val3: C.double(mean.Val3),
val4: C.double(mean.Val4),
}
C.Net_BlobFromImages(cMats, blob.p, C.double(scaleFactor), sz, sMean, C.bool(swapRB), C.bool(crop), C.int(ddepth))
}
// ImagesFromBlob Parse a 4D blob and output the images it contains as
// 2D arrays through a simpler data structure (std::vector<cv::Mat>).
//
// For further details, please see:
// https://docs.opencv.org/master/d6/d0f/group__dnn.html#ga4051b5fa2ed5f54b76c059a8625df9f5
//
func ImagesFromBlob(blob Mat, imgs []Mat) {
cMats := C.struct_Mats{}
C.Net_ImagesFromBlob(blob.p, &(cMats))
// mv = make([]Mat, cMats.length)
for i := C.int(0); i < cMats.length; i++ {
imgs[i].p = C.Mats_get(cMats, i)
}
}
// GetBlobChannel extracts a single (2d)channel from a 4 dimensional blob structure
// (this might e.g. contain the results of a SSD or YOLO detection,
// a bones structure from pose detection, or a color plane from Colorization)
//
func GetBlobChannel(blob Mat, imgidx int, chnidx int) Mat {
return newMat(C.Net_GetBlobChannel(blob.p, C.int(imgidx), C.int(chnidx)))
}
// GetBlobSize retrieves the 4 dimensional size information in (N,C,H,W) order
//
func GetBlobSize(blob Mat) Scalar {
s := C.Net_GetBlobSize(blob.p)
return NewScalar(float64(s.val1), float64(s.val2), float64(s.val3), float64(s.val4))
}
// Layer is a wrapper around the cv::dnn::Layer algorithm.
type Layer struct {
// C.Layer
p unsafe.Pointer
}
// GetLayer returns pointer to layer with specified id from the network.
//
// For further details, please see:
// https://docs.opencv.org/master/db/d30/classcv_1_1dnn_1_1Net.html#a70aec7f768f38c32b1ee25f3a56526df
//
func (net *Net) GetLayer(layer int) Layer {
return Layer{p: unsafe.Pointer(C.Net_GetLayer((C.Net)(net.p), C.int(layer)))}
}
// GetPerfProfile returns overall time for inference and timings (in ticks) for layers
//
// For further details, please see:
// https://docs.opencv.org/master/db/d30/classcv_1_1dnn_1_1Net.html#a06ce946f675f75d1c020c5ddbc78aedc
//
func (net *Net) GetPerfProfile() float64 {
return float64(C.Net_GetPerfProfile((C.Net)(net.p)))
}
// GetUnconnectedOutLayers returns indexes of layers with unconnected outputs.
//
// For further details, please see:
// https://docs.opencv.org/master/db/d30/classcv_1_1dnn_1_1Net.html#ae62a73984f62c49fd3e8e689405b056a
//
func (net *Net) GetUnconnectedOutLayers() (ids []int) {
cids := C.IntVector{}
C.Net_GetUnconnectedOutLayers((C.Net)(net.p), &cids)
defer C.free(unsafe.Pointer(cids.val))
h := &reflect.SliceHeader{
Data: uintptr(unsafe.Pointer(cids.val)),
Len: int(cids.length),
Cap: int(cids.length),
}
pcids := *(*[]C.int)(unsafe.Pointer(h))
for i := 0; i < int(cids.length); i++ {
ids = append(ids, int(pcids[i]))
}
return
}
// GetLayerNames returns all layer names.
//
// For furtherdetails, please see:
// https://docs.opencv.org/master/db/d30/classcv_1_1dnn_1_1Net.html#ae8be9806024a0d1d41aba687cce99e6b
//
func (net *Net) GetLayerNames() (names []string) {
cstrs := C.CStrings{}
defer C.CStrings_Close(cstrs)
C.Net_GetLayerNames((C.Net)(net.p), &cstrs)
return toGoStrings(cstrs)
}
// Close Layer
func (l *Layer) Close() error {
C.Layer_Close((C.Layer)(l.p))
l.p = nil
return nil
}
// GetName returns name for this layer.
func (l *Layer) GetName() string {
return C.GoString(C.Layer_GetName((C.Layer)(l.p)))
}
// GetType returns type for this layer.
func (l *Layer) GetType() string {
return C.GoString(C.Layer_GetType((C.Layer)(l.p)))
}
// InputNameToIndex returns index of input blob in input array.
//
// For further details, please see:
// https://docs.opencv.org/master/d3/d6c/classcv_1_1dnn_1_1Layer.html#a60ffc8238f3fa26cd3f49daa7ac0884b
//
func (l *Layer) InputNameToIndex(name string) int {
cName := C.CString(name)
defer C.free(unsafe.Pointer(cName))
return int(C.Layer_InputNameToIndex((C.Layer)(l.p), cName))
}
// OutputNameToIndex returns index of output blob in output array.
//
// For further details, please see:
// https://docs.opencv.org/master/d3/d6c/classcv_1_1dnn_1_1Layer.html#a60ffc8238f3fa26cd3f49daa7ac0884b
//
func (l *Layer) OutputNameToIndex(name string) int {
cName := C.CString(name)
defer C.free(unsafe.Pointer(cName))
return int(C.Layer_OutputNameToIndex((C.Layer)(l.p), cName))
}
// NMSBoxes performs non maximum suppression given boxes and corresponding scores.
//
// For futher details, please see:
// https://docs.opencv.org/4.4.0/d6/d0f/group__dnn.html#ga9d118d70a1659af729d01b10233213ee
func NMSBoxes(bboxes []image.Rectangle, scores []float32, scoreThreshold float32, nmsThreshold float32, indices []int) {
bboxesRectArr := []C.struct_Rect{}
for _, v := range bboxes {
bbox := C.struct_Rect{
x: C.int(v.Min.X),
y: C.int(v.Min.Y),
width: C.int(v.Size().X),
height: C.int(v.Size().Y),
}
bboxesRectArr = append(bboxesRectArr, bbox)
}
bboxesRects := C.Rects{
rects: (*C.Rect)(&bboxesRectArr[0]),
length: C.int(len(bboxes)),
}
scoresFloats := []C.float{}
for _, v := range scores {
scoresFloats = append(scoresFloats, C.float(v))
}
scoresVector := C.struct_FloatVector{}
scoresVector.val = (*C.float)(&scoresFloats[0])
scoresVector.length = (C.int)(len(scoresFloats))
indicesVector := C.IntVector{}
C.NMSBoxes(bboxesRects, scoresVector, C.float(scoreThreshold), C.float(nmsThreshold), &indicesVector)
defer C.free(unsafe.Pointer(indicesVector.val))
h := &reflect.SliceHeader{
Data: uintptr(unsafe.Pointer(indicesVector.val)),
Len: int(indicesVector.length),
Cap: int(indicesVector.length),
}
ptr := *(*[]C.int)(unsafe.Pointer(h))
for i := 0; i < int(indicesVector.length); i++ {
indices[i] = int(ptr[i])
}
return
}
// NMSBoxesWithParams performs non maximum suppression given boxes and corresponding scores.
//
// For futher details, please see:
// https://docs.opencv.org/4.4.0/d6/d0f/group__dnn.html#ga9d118d70a1659af729d01b10233213ee
func NMSBoxesWithParams(bboxes []image.Rectangle, scores []float32, scoreThreshold float32, nmsThreshold float32, indices []int, eta float32, topK int) {
bboxesRectArr := []C.struct_Rect{}
for _, v := range bboxes {
bbox := C.struct_Rect{
x: C.int(v.Min.X),
y: C.int(v.Min.Y),
width: C.int(v.Size().X),
height: C.int(v.Size().Y),
}
bboxesRectArr = append(bboxesRectArr, bbox)
}
bboxesRects := C.Rects{
rects: (*C.Rect)(&bboxesRectArr[0]),
length: C.int(len(bboxes)),
}
scoresFloats := []C.float{}
for _, v := range scores {
scoresFloats = append(scoresFloats, C.float(v))
}
scoresVector := C.struct_FloatVector{}
scoresVector.val = (*C.float)(&scoresFloats[0])
scoresVector.length = (C.int)(len(scoresFloats))
indicesVector := C.IntVector{}
C.NMSBoxesWithParams(bboxesRects, scoresVector, C.float(scoreThreshold), C.float(nmsThreshold), &indicesVector, C.float(eta), C.int(topK))
defer C.free(unsafe.Pointer(indicesVector.val))
h := &reflect.SliceHeader{
Data: uintptr(unsafe.Pointer(indicesVector.val)),
Len: int(indicesVector.length),
Cap: int(indicesVector.length),
}
ptr := *(*[]C.int)(unsafe.Pointer(h))
for i := 0; i < int(indicesVector.length); i++ {
indices[i] = int(ptr[i])
}
return
}

64
vendor/gocv.io/x/gocv/dnn.h generated vendored Normal file
View File

@ -0,0 +1,64 @@
#ifndef _OPENCV3_DNN_H_
#define _OPENCV3_DNN_H_
#include <stdbool.h>
#ifdef __cplusplus
#include <opencv2/opencv.hpp>
#include <opencv2/dnn.hpp>
extern "C" {
#endif
#include "core.h"
#ifdef __cplusplus
typedef cv::dnn::Net* Net;
typedef cv::Ptr<cv::dnn::Layer>* Layer;
#else
typedef void* Net;
typedef void* Layer;
#endif
Net Net_ReadNet(const char* model, const char* config);
Net Net_ReadNetBytes(const char* framework, struct ByteArray model, struct ByteArray config);
Net Net_ReadNetFromCaffe(const char* prototxt, const char* caffeModel);
Net Net_ReadNetFromCaffeBytes(struct ByteArray prototxt, struct ByteArray caffeModel);
Net Net_ReadNetFromTensorflow(const char* model);
Net Net_ReadNetFromTensorflowBytes(struct ByteArray model);
Net Net_ReadNetFromTorch(const char* model);
Net Net_ReadNetFromONNX(const char* model);
Net Net_ReadNetFromONNXBytes(struct ByteArray model);
Mat Net_BlobFromImage(Mat image, double scalefactor, Size size, Scalar mean, bool swapRB,
bool crop);
void Net_BlobFromImages(struct Mats images, Mat blob, double scalefactor, Size size,
Scalar mean, bool swapRB, bool crop, int ddepth);
void Net_ImagesFromBlob(Mat blob_, struct Mats* images_);
void Net_Close(Net net);
bool Net_Empty(Net net);
void Net_SetInput(Net net, Mat blob, const char* name);
Mat Net_Forward(Net net, const char* outputName);
void Net_ForwardLayers(Net net, struct Mats* outputBlobs, struct CStrings outBlobNames);
void Net_SetPreferableBackend(Net net, int backend);
void Net_SetPreferableTarget(Net net, int target);
int64_t Net_GetPerfProfile(Net net);
void Net_GetUnconnectedOutLayers(Net net, IntVector* res);
void Net_GetLayerNames(Net net, CStrings* names);
Mat Net_GetBlobChannel(Mat blob, int imgidx, int chnidx);
Scalar Net_GetBlobSize(Mat blob);
Layer Net_GetLayer(Net net, int layerid);
void Layer_Close(Layer layer);
int Layer_InputNameToIndex(Layer layer, const char* name);
int Layer_OutputNameToIndex(Layer layer, const char* name);
const char* Layer_GetName(Layer layer);
const char* Layer_GetType(Layer layer);
void NMSBoxes(struct Rects bboxes, FloatVector scores, float score_threshold, float nms_threshold, IntVector* indices);
void NMSBoxesWithParams(struct Rects bboxes, FloatVector scores, const float score_threshold, const float nms_threshold, IntVector* indices, const float eta, const int top_k);
#ifdef __cplusplus
}
#endif
#endif //_OPENCV3_DNN_H_

26
vendor/gocv.io/x/gocv/dnn_async_openvino.go generated vendored Normal file
View File

@ -0,0 +1,26 @@
// +build openvino
package gocv
import (
"unsafe"
)
/*
#include <stdlib.h>
#include "dnn.h"
#include "asyncarray.h"
*/
import "C"
// ForwardAsync runs forward pass to compute output of layer with name outputName.
//
// For further details, please see:
// https://docs.opencv.org/trunk/db/d30/classcv_1_1dnn_1_1Net.html#a814890154ea9e10b132fec00b6f6ba30
//
func (net *Net) ForwardAsync(outputName string) AsyncArray {
cName := C.CString(outputName)
defer C.free(unsafe.Pointer(cName))
return newAsyncArray(C.Net_forwardAsync((C.Net)(net.p), cName))
}

67
vendor/gocv.io/x/gocv/dnn_ext.go generated vendored Normal file
View File

@ -0,0 +1,67 @@
package gocv
import (
"image"
)
// FP16BlobFromImage is an extended helper function to convert an Image to a half-float blob, as used by
// the Movidius Neural Compute Stick.
func FP16BlobFromImage(img Mat, scaleFactor float32, size image.Point, mean float32,
swapRB bool, crop bool) []byte {
// resizes image so it maintains aspect ratio
width := float32(img.Cols())
height := float32(img.Rows())
square := NewMatWithSize(size.Y, size.X, img.Type())
defer square.Close()
maxDim := height
var scale float32 = 1.0
if width > height {
maxDim = width
scale = float32(size.X) / float32(maxDim)
}
if width < height {
scale = float32(size.Y) / float32(maxDim)
}
var roi image.Rectangle
if width >= height {
roi.Min.X = 0
roi.Min.Y = int(float32(size.Y)-height*scale) / 2
roi.Max.X = size.X
roi.Max.Y = int(height * scale)
} else {
roi.Min.X = int(float32(size.X)-width*scale) / 2
roi.Min.Y = 0
roi.Max.X = int(width * scale)
roi.Max.Y = size.Y
}
Resize(img, &square, roi.Max, 0, 0, InterpolationDefault)
if swapRB {
CvtColor(square, &square, ColorBGRToRGB)
}
fp32Image := NewMat()
defer fp32Image.Close()
square.ConvertTo(&fp32Image, MatTypeCV32F)
if mean != 0 {
// subtract mean
fp32Image.SubtractFloat(mean)
}
if scaleFactor != 1.0 {
// multiply by scale factor
fp32Image.MultiplyFloat(scaleFactor)
}
fp16Blob := fp32Image.ConvertFp16()
defer fp16Blob.Close()
return fp16Blob.ToBytes()
}

41
vendor/gocv.io/x/gocv/dnn_string.go generated vendored Normal file
View File

@ -0,0 +1,41 @@
package gocv
func (c NetBackendType) String() string {
switch c {
case NetBackendDefault:
return ""
case NetBackendHalide:
return "halide"
case NetBackendOpenVINO:
return "openvino"
case NetBackendOpenCV:
return "opencv"
case NetBackendVKCOM:
return "vulkan"
case NetBackendCUDA:
return "cuda"
}
return ""
}
func (c NetTargetType) String() string {
switch c {
case NetTargetCPU:
return "cpu"
case NetTargetFP32:
return "fp32"
case NetTargetFP16:
return "fp16"
case NetTargetVPU:
return "vpu"
case NetTargetVulkan:
return "vulkan"
case NetTargetFPGA:
return "fpga"
case NetTargetCUDA:
return "cuda"
case NetTargetCUDAFP16:
return "cudafp16"
}
return ""
}

2
vendor/gocv.io/x/gocv/env.cmd generated vendored Normal file
View File

@ -0,0 +1,2 @@
ECHO This script is no longer necessary and has been deprecated.
ECHO See the Custom Environment section of the README if you need to customize your environment.

2
vendor/gocv.io/x/gocv/env.sh generated vendored Normal file
View File

@ -0,0 +1,2 @@
echo "This script is no longer necessary and has been deprecated."
echo "See the Custom Environment section of the README if you need to customize your environment."

559
vendor/gocv.io/x/gocv/features2d.cpp generated vendored Normal file
View File

@ -0,0 +1,559 @@
#include "features2d.h"
AKAZE AKAZE_Create() {
// TODO: params
return new cv::Ptr<cv::AKAZE>(cv::AKAZE::create());
}
void AKAZE_Close(AKAZE a) {
delete a;
}
struct KeyPoints AKAZE_Detect(AKAZE a, Mat src) {
std::vector<cv::KeyPoint> detected;
(*a)->detect(*src, detected);
KeyPoint* kps = new KeyPoint[detected.size()];
for (size_t i = 0; i < detected.size(); ++i) {
KeyPoint k = {detected[i].pt.x, detected[i].pt.y, detected[i].size, detected[i].angle,
detected[i].response, detected[i].octave, detected[i].class_id
};
kps[i] = k;
}
KeyPoints ret = {kps, (int)detected.size()};
return ret;
}
struct KeyPoints AKAZE_DetectAndCompute(AKAZE a, Mat src, Mat mask, Mat desc) {
std::vector<cv::KeyPoint> detected;
(*a)->detectAndCompute(*src, *mask, detected, *desc);
KeyPoint* kps = new KeyPoint[detected.size()];
for (size_t i = 0; i < detected.size(); ++i) {
KeyPoint k = {detected[i].pt.x, detected[i].pt.y, detected[i].size, detected[i].angle,
detected[i].response, detected[i].octave, detected[i].class_id
};
kps[i] = k;
}
KeyPoints ret = {kps, (int)detected.size()};
return ret;
}
AgastFeatureDetector AgastFeatureDetector_Create() {
// TODO: params
return new cv::Ptr<cv::AgastFeatureDetector>(cv::AgastFeatureDetector::create());
}
void AgastFeatureDetector_Close(AgastFeatureDetector a) {
delete a;
}
struct KeyPoints AgastFeatureDetector_Detect(AgastFeatureDetector a, Mat src) {
std::vector<cv::KeyPoint> detected;
(*a)->detect(*src, detected);
KeyPoint* kps = new KeyPoint[detected.size()];
for (size_t i = 0; i < detected.size(); ++i) {
KeyPoint k = {detected[i].pt.x, detected[i].pt.y, detected[i].size, detected[i].angle,
detected[i].response, detected[i].octave, detected[i].class_id
};
kps[i] = k;
}
KeyPoints ret = {kps, (int)detected.size()};
return ret;
}
BRISK BRISK_Create() {
// TODO: params
return new cv::Ptr<cv::BRISK>(cv::BRISK::create());
}
void BRISK_Close(BRISK b) {
delete b;
}
struct KeyPoints BRISK_Detect(BRISK b, Mat src) {
std::vector<cv::KeyPoint> detected;
(*b)->detect(*src, detected);
KeyPoint* kps = new KeyPoint[detected.size()];
for (size_t i = 0; i < detected.size(); ++i) {
KeyPoint k = {detected[i].pt.x, detected[i].pt.y, detected[i].size, detected[i].angle,
detected[i].response, detected[i].octave, detected[i].class_id
};
kps[i] = k;
}
KeyPoints ret = {kps, (int)detected.size()};
return ret;
}
struct KeyPoints BRISK_DetectAndCompute(BRISK b, Mat src, Mat mask, Mat desc) {
std::vector<cv::KeyPoint> detected;
(*b)->detectAndCompute(*src, *mask, detected, *desc);
KeyPoint* kps = new KeyPoint[detected.size()];
for (size_t i = 0; i < detected.size(); ++i) {
KeyPoint k = {detected[i].pt.x, detected[i].pt.y, detected[i].size, detected[i].angle,
detected[i].response, detected[i].octave, detected[i].class_id
};
kps[i] = k;
}
KeyPoints ret = {kps, (int)detected.size()};
return ret;
}
GFTTDetector GFTTDetector_Create() {
// TODO: params
return new cv::Ptr<cv::GFTTDetector>(cv::GFTTDetector::create());
}
void GFTTDetector_Close(GFTTDetector a) {
delete a;
}
struct KeyPoints GFTTDetector_Detect(GFTTDetector a, Mat src) {
std::vector<cv::KeyPoint> detected;
(*a)->detect(*src, detected);
KeyPoint* kps = new KeyPoint[detected.size()];
for (size_t i = 0; i < detected.size(); ++i) {
KeyPoint k = {detected[i].pt.x, detected[i].pt.y, detected[i].size, detected[i].angle,
detected[i].response, detected[i].octave, detected[i].class_id
};
kps[i] = k;
}
KeyPoints ret = {kps, (int)detected.size()};
return ret;
}
KAZE KAZE_Create() {
// TODO: params
return new cv::Ptr<cv::KAZE>(cv::KAZE::create());
}
void KAZE_Close(KAZE a) {
delete a;
}
struct KeyPoints KAZE_Detect(KAZE a, Mat src) {
std::vector<cv::KeyPoint> detected;
(*a)->detect(*src, detected);
KeyPoint* kps = new KeyPoint[detected.size()];
for (size_t i = 0; i < detected.size(); ++i) {
KeyPoint k = {detected[i].pt.x, detected[i].pt.y, detected[i].size, detected[i].angle,
detected[i].response, detected[i].octave, detected[i].class_id
};
kps[i] = k;
}
KeyPoints ret = {kps, (int)detected.size()};
return ret;
}
struct KeyPoints KAZE_DetectAndCompute(KAZE a, Mat src, Mat mask, Mat desc) {
std::vector<cv::KeyPoint> detected;
(*a)->detectAndCompute(*src, *mask, detected, *desc);
KeyPoint* kps = new KeyPoint[detected.size()];
for (size_t i = 0; i < detected.size(); ++i) {
KeyPoint k = {detected[i].pt.x, detected[i].pt.y, detected[i].size, detected[i].angle,
detected[i].response, detected[i].octave, detected[i].class_id
};
kps[i] = k;
}
KeyPoints ret = {kps, (int)detected.size()};
return ret;
}
MSER MSER_Create() {
// TODO: params
return new cv::Ptr<cv::MSER>(cv::MSER::create());
}
void MSER_Close(MSER a) {
delete a;
}
struct KeyPoints MSER_Detect(MSER a, Mat src) {
std::vector<cv::KeyPoint> detected;
(*a)->detect(*src, detected);
KeyPoint* kps = new KeyPoint[detected.size()];
for (size_t i = 0; i < detected.size(); ++i) {
KeyPoint k = {detected[i].pt.x, detected[i].pt.y, detected[i].size, detected[i].angle,
detected[i].response, detected[i].octave, detected[i].class_id
};
kps[i] = k;
}
KeyPoints ret = {kps, (int)detected.size()};
return ret;
}
FastFeatureDetector FastFeatureDetector_Create() {
return new cv::Ptr<cv::FastFeatureDetector>(cv::FastFeatureDetector::create());
}
void FastFeatureDetector_Close(FastFeatureDetector f) {
delete f;
}
FastFeatureDetector FastFeatureDetector_CreateWithParams(int threshold, bool nonmaxSuppression, int type) {
return new cv::Ptr<cv::FastFeatureDetector>(cv::FastFeatureDetector::create(threshold,nonmaxSuppression,static_cast<cv::FastFeatureDetector::DetectorType>(type)));
}
struct KeyPoints FastFeatureDetector_Detect(FastFeatureDetector f, Mat src) {
std::vector<cv::KeyPoint> detected;
(*f)->detect(*src, detected);
KeyPoint* kps = new KeyPoint[detected.size()];
for (size_t i = 0; i < detected.size(); ++i) {
KeyPoint k = {detected[i].pt.x, detected[i].pt.y, detected[i].size, detected[i].angle,
detected[i].response, detected[i].octave, detected[i].class_id
};
kps[i] = k;
}
KeyPoints ret = {kps, (int)detected.size()};
return ret;
}
ORB ORB_Create() {
return new cv::Ptr<cv::ORB>(cv::ORB::create());
}
ORB ORB_CreateWithParams(int nfeatures, float scaleFactor, int nlevels, int edgeThreshold, int firstLevel, int WTA_K, int scoreType, int patchSize, int fastThreshold) {
return new cv::Ptr<cv::ORB>(cv::ORB::create(nfeatures, scaleFactor, nlevels, edgeThreshold, firstLevel, WTA_K, static_cast<cv::ORB::ScoreType>(scoreType), patchSize, fastThreshold));
}
void ORB_Close(ORB o) {
delete o;
}
struct KeyPoints ORB_Detect(ORB o, Mat src) {
std::vector<cv::KeyPoint> detected;
(*o)->detect(*src, detected);
KeyPoint* kps = new KeyPoint[detected.size()];
for (size_t i = 0; i < detected.size(); ++i) {
KeyPoint k = {detected[i].pt.x, detected[i].pt.y, detected[i].size, detected[i].angle,
detected[i].response, detected[i].octave, detected[i].class_id
};
kps[i] = k;
}
KeyPoints ret = {kps, (int)detected.size()};
return ret;
}
struct KeyPoints ORB_DetectAndCompute(ORB o, Mat src, Mat mask, Mat desc) {
std::vector<cv::KeyPoint> detected;
(*o)->detectAndCompute(*src, *mask, detected, *desc);
KeyPoint* kps = new KeyPoint[detected.size()];
for (size_t i = 0; i < detected.size(); ++i) {
KeyPoint k = {detected[i].pt.x, detected[i].pt.y, detected[i].size, detected[i].angle,
detected[i].response, detected[i].octave, detected[i].class_id
};
kps[i] = k;
}
KeyPoints ret = {kps, (int)detected.size()};
return ret;
}
cv::SimpleBlobDetector::Params ConvertCParamsToCPPParams(SimpleBlobDetectorParams params) {
cv::SimpleBlobDetector::Params converted;
converted.blobColor = params.blobColor;
converted.filterByArea = params.filterByArea;
converted.filterByCircularity = params.filterByCircularity;
converted.filterByColor = params.filterByColor;
converted.filterByConvexity = params.filterByConvexity;
converted.filterByInertia = params.filterByInertia;
converted.maxArea = params.maxArea;
converted.maxCircularity = params.maxCircularity;
converted.maxConvexity = params.maxConvexity;
converted.maxInertiaRatio = params.maxInertiaRatio;
converted.maxThreshold = params.maxThreshold;
converted.minArea = params.minArea;
converted.minCircularity = params.minCircularity;
converted.minConvexity = params.minConvexity;
converted.minDistBetweenBlobs = params.minDistBetweenBlobs;
converted.minInertiaRatio = params.minInertiaRatio;
converted.minRepeatability = params.minRepeatability;
converted.minThreshold = params.minThreshold;
converted.thresholdStep = params.thresholdStep;
return converted;
}
SimpleBlobDetectorParams ConvertCPPParamsToCParams(cv::SimpleBlobDetector::Params params) {
SimpleBlobDetectorParams converted;
converted.blobColor = params.blobColor;
converted.filterByArea = params.filterByArea;
converted.filterByCircularity = params.filterByCircularity;
converted.filterByColor = params.filterByColor;
converted.filterByConvexity = params.filterByConvexity;
converted.filterByInertia = params.filterByInertia;
converted.maxArea = params.maxArea;
converted.maxCircularity = params.maxCircularity;
converted.maxConvexity = params.maxConvexity;
converted.maxInertiaRatio = params.maxInertiaRatio;
converted.maxThreshold = params.maxThreshold;
converted.minArea = params.minArea;
converted.minCircularity = params.minCircularity;
converted.minConvexity = params.minConvexity;
converted.minDistBetweenBlobs = params.minDistBetweenBlobs;
converted.minInertiaRatio = params.minInertiaRatio;
converted.minRepeatability = params.minRepeatability;
converted.minThreshold = params.minThreshold;
converted.thresholdStep = params.thresholdStep;
return converted;
}
SimpleBlobDetector SimpleBlobDetector_Create_WithParams(SimpleBlobDetectorParams params){
cv::SimpleBlobDetector::Params actualParams;
return new cv::Ptr<cv::SimpleBlobDetector>(cv::SimpleBlobDetector::create(ConvertCParamsToCPPParams(params)));
}
SimpleBlobDetector SimpleBlobDetector_Create() {
return new cv::Ptr<cv::SimpleBlobDetector>(cv::SimpleBlobDetector::create());
}
SimpleBlobDetectorParams SimpleBlobDetectorParams_Create() {
return ConvertCPPParamsToCParams(cv::SimpleBlobDetector::Params());
}
void SimpleBlobDetector_Close(SimpleBlobDetector b) {
delete b;
}
struct KeyPoints SimpleBlobDetector_Detect(SimpleBlobDetector b, Mat src) {
std::vector<cv::KeyPoint> detected;
(*b)->detect(*src, detected);
KeyPoint* kps = new KeyPoint[detected.size()];
for (size_t i = 0; i < detected.size(); ++i) {
KeyPoint k = {detected[i].pt.x, detected[i].pt.y, detected[i].size, detected[i].angle,
detected[i].response, detected[i].octave, detected[i].class_id
};
kps[i] = k;
}
KeyPoints ret = {kps, (int)detected.size()};
return ret;
}
BFMatcher BFMatcher_Create() {
return new cv::Ptr<cv::BFMatcher>(cv::BFMatcher::create());
}
BFMatcher BFMatcher_CreateWithParams(int normType, bool crossCheck) {
return new cv::Ptr<cv::BFMatcher>(cv::BFMatcher::create(normType, crossCheck));
}
void BFMatcher_Close(BFMatcher b) {
delete b;
}
struct MultiDMatches BFMatcher_KnnMatch(BFMatcher b, Mat query, Mat train, int k) {
std::vector< std::vector<cv::DMatch> > matches;
(*b)->knnMatch(*query, *train, matches, k);
DMatches *dms = new DMatches[matches.size()];
for (size_t i = 0; i < matches.size(); ++i) {
DMatch *dmatches = new DMatch[matches[i].size()];
for (size_t j = 0; j < matches[i].size(); ++j) {
DMatch dmatch = {matches[i][j].queryIdx, matches[i][j].trainIdx, matches[i][j].imgIdx,
matches[i][j].distance};
dmatches[j] = dmatch;
}
dms[i] = {dmatches, (int) matches[i].size()};
}
MultiDMatches ret = {dms, (int) matches.size()};
return ret;
}
struct MultiDMatches BFMatcher_KnnMatchWithParams(BFMatcher b, Mat query, Mat train, int k, Mat mask, bool compactResult) {
std::vector< std::vector<cv::DMatch> > matches;
(*b)->knnMatch(*query, *train, matches, k, *mask, compactResult);
DMatches *dms = new DMatches[matches.size()];
for (size_t i = 0; i < matches.size(); ++i) {
DMatch *dmatches = new DMatch[matches[i].size()];
for (size_t j = 0; j < matches[i].size(); ++j) {
DMatch dmatch = {matches[i][j].queryIdx, matches[i][j].trainIdx, matches[i][j].imgIdx,
matches[i][j].distance};
dmatches[j] = dmatch;
}
dms[i] = {dmatches, (int) matches[i].size()};
}
MultiDMatches ret = {dms, (int) matches.size()};
return ret;
}
FlannBasedMatcher FlannBasedMatcher_Create() {
return new cv::Ptr<cv::FlannBasedMatcher>(cv::FlannBasedMatcher::create());
}
void FlannBasedMatcher_Close(FlannBasedMatcher f) {
delete f;
}
struct MultiDMatches FlannBasedMatcher_KnnMatch(FlannBasedMatcher f, Mat query, Mat train, int k) {
std::vector< std::vector<cv::DMatch> > matches;
(*f)->knnMatch(*query, *train, matches, k);
DMatches *dms = new DMatches[matches.size()];
for (size_t i = 0; i < matches.size(); ++i) {
DMatch *dmatches = new DMatch[matches[i].size()];
for (size_t j = 0; j < matches[i].size(); ++j) {
DMatch dmatch = {matches[i][j].queryIdx, matches[i][j].trainIdx, matches[i][j].imgIdx,
matches[i][j].distance};
dmatches[j] = dmatch;
}
dms[i] = {dmatches, (int) matches[i].size()};
}
MultiDMatches ret = {dms, (int) matches.size()};
return ret;
}
struct MultiDMatches FlannBasedMatcher_KnnMatchWithParams(FlannBasedMatcher f, Mat query, Mat train, int k, Mat mask, bool compactResult) {
std::vector< std::vector<cv::DMatch> > matches;
(*f)->knnMatch(*query, *train, matches, k, *mask, compactResult);
DMatches *dms = new DMatches[matches.size()];
for (size_t i = 0; i < matches.size(); ++i) {
DMatch *dmatches = new DMatch[matches[i].size()];
for (size_t j = 0; j < matches[i].size(); ++j) {
DMatch dmatch = {matches[i][j].queryIdx, matches[i][j].trainIdx, matches[i][j].imgIdx,
matches[i][j].distance};
dmatches[j] = dmatch;
}
dms[i] = {dmatches, (int) matches[i].size()};
}
MultiDMatches ret = {dms, (int) matches.size()};
return ret;
}
void DrawKeyPoints(Mat src, struct KeyPoints kp, Mat dst, Scalar s, int flags) {
std::vector<cv::KeyPoint> keypts;
cv::KeyPoint keypt;
for (int i = 0; i < kp.length; ++i) {
keypt = cv::KeyPoint(kp.keypoints[i].x, kp.keypoints[i].y,
kp.keypoints[i].size, kp.keypoints[i].angle, kp.keypoints[i].response,
kp.keypoints[i].octave, kp.keypoints[i].classID);
keypts.push_back(keypt);
}
cv::Scalar color = cv::Scalar(s.val1, s.val2, s.val3, s.val4);
cv::drawKeypoints(*src, keypts, *dst, color, static_cast<cv::DrawMatchesFlags>(flags));
}
SIFT SIFT_Create() {
// TODO: params
return new cv::Ptr<cv::SIFT>(cv::SIFT::create());
}
void SIFT_Close(SIFT d) {
delete d;
}
struct KeyPoints SIFT_Detect(SIFT d, Mat src) {
std::vector<cv::KeyPoint> detected;
(*d)->detect(*src, detected);
KeyPoint* kps = new KeyPoint[detected.size()];
for (size_t i = 0; i < detected.size(); ++i) {
KeyPoint k = {detected[i].pt.x, detected[i].pt.y, detected[i].size, detected[i].angle,
detected[i].response, detected[i].octave, detected[i].class_id
};
kps[i] = k;
}
KeyPoints ret = {kps, (int)detected.size()};
return ret;
}
struct KeyPoints SIFT_DetectAndCompute(SIFT d, Mat src, Mat mask, Mat desc) {
std::vector<cv::KeyPoint> detected;
(*d)->detectAndCompute(*src, *mask, detected, *desc);
KeyPoint* kps = new KeyPoint[detected.size()];
for (size_t i = 0; i < detected.size(); ++i) {
KeyPoint k = {detected[i].pt.x, detected[i].pt.y, detected[i].size, detected[i].angle,
detected[i].response, detected[i].octave, detected[i].class_id
};
kps[i] = k;
}
KeyPoints ret = {kps, (int)detected.size()};
return ret;
}
void DrawMatches(Mat img1, struct KeyPoints kp1, Mat img2, struct KeyPoints kp2, struct DMatches matches1to2, Mat outImg, const Scalar matchesColor, const Scalar pointColor, struct ByteArray matchesMask, int flags) {
std::vector<cv::KeyPoint> kp1vec, kp2vec;
cv::KeyPoint keypt;
for (int i = 0; i < kp1.length; ++i) {
keypt = cv::KeyPoint(kp1.keypoints[i].x, kp1.keypoints[i].y,
kp1.keypoints[i].size, kp1.keypoints[i].angle, kp1.keypoints[i].response,
kp1.keypoints[i].octave, kp1.keypoints[i].classID);
kp1vec.push_back(keypt);
}
for (int i = 0; i < kp2.length; ++i) {
keypt = cv::KeyPoint(kp2.keypoints[i].x, kp2.keypoints[i].y,
kp2.keypoints[i].size, kp2.keypoints[i].angle, kp2.keypoints[i].response,
kp2.keypoints[i].octave, kp2.keypoints[i].classID);
kp2vec.push_back(keypt);
}
cv::Scalar cvmatchescolor = cv::Scalar(matchesColor.val1, matchesColor.val2, matchesColor.val3, matchesColor.val4);
cv::Scalar cvpointcolor = cv::Scalar(pointColor.val1, pointColor.val2, pointColor.val3, pointColor.val4);
std::vector<cv::DMatch> dmatchvec;
cv::DMatch dm;
for (int i = 0; i < matches1to2.length; i++) {
dm = cv::DMatch(matches1to2.dmatches[i].queryIdx, matches1to2.dmatches[i].trainIdx,
matches1to2.dmatches[i].imgIdx, matches1to2.dmatches[i].distance);
dmatchvec.push_back(dm);
}
std::vector<char> maskvec;
for (int i = 0; i < matchesMask.length; i++) {
maskvec.push_back(matchesMask.data[i]);
}
cv::drawMatches(*img1, kp1vec, *img2, kp2vec, dmatchvec, *outImg, cvmatchescolor, cvpointcolor, maskvec, static_cast<cv::DrawMatchesFlags>(flags));
}

941
vendor/gocv.io/x/gocv/features2d.go generated vendored Normal file
View File

@ -0,0 +1,941 @@
package gocv
/*
#include <stdlib.h>
#include "features2d.h"
*/
import "C"
import (
"image/color"
"reflect"
"unsafe"
)
// AKAZE is a wrapper around the cv::AKAZE algorithm.
type AKAZE struct {
// C.AKAZE
p unsafe.Pointer
}
// NewAKAZE returns a new AKAZE algorithm
//
// For further details, please see:
// https://docs.opencv.org/master/d8/d30/classcv_1_1AKAZE.html
//
func NewAKAZE() AKAZE {
return AKAZE{p: unsafe.Pointer(C.AKAZE_Create())}
}
// Close AKAZE.
func (a *AKAZE) Close() error {
C.AKAZE_Close((C.AKAZE)(a.p))
a.p = nil
return nil
}
// Detect keypoints in an image using AKAZE.
//
// For further details, please see:
// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#aa4e9a7082ec61ebc108806704fbd7887
//
func (a *AKAZE) Detect(src Mat) []KeyPoint {
ret := C.AKAZE_Detect((C.AKAZE)(a.p), src.p)
defer C.KeyPoints_Close(ret)
return getKeyPoints(ret)
}
// DetectAndCompute keypoints and compute in an image using AKAZE.
//
// For further details, please see:
// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#a8be0d1c20b08eb867184b8d74c15a677
//
func (a *AKAZE) DetectAndCompute(src Mat, mask Mat) ([]KeyPoint, Mat) {
desc := NewMat()
ret := C.AKAZE_DetectAndCompute((C.AKAZE)(a.p), src.p, mask.p, desc.p)
defer C.KeyPoints_Close(ret)
return getKeyPoints(ret), desc
}
// AgastFeatureDetector is a wrapper around the cv::AgastFeatureDetector.
type AgastFeatureDetector struct {
// C.AgastFeatureDetector
p unsafe.Pointer
}
// NewAgastFeatureDetector returns a new AgastFeatureDetector algorithm
//
// For further details, please see:
// https://docs.opencv.org/master/d7/d19/classcv_1_1AgastFeatureDetector.html
//
func NewAgastFeatureDetector() AgastFeatureDetector {
return AgastFeatureDetector{p: unsafe.Pointer(C.AgastFeatureDetector_Create())}
}
// Close AgastFeatureDetector.
func (a *AgastFeatureDetector) Close() error {
C.AgastFeatureDetector_Close((C.AgastFeatureDetector)(a.p))
a.p = nil
return nil
}
// Detect keypoints in an image using AgastFeatureDetector.
//
// For further details, please see:
// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#aa4e9a7082ec61ebc108806704fbd7887
//
func (a *AgastFeatureDetector) Detect(src Mat) []KeyPoint {
ret := C.AgastFeatureDetector_Detect((C.AgastFeatureDetector)(a.p), src.p)
defer C.KeyPoints_Close(ret)
return getKeyPoints(ret)
}
// BRISK is a wrapper around the cv::BRISK algorithm.
type BRISK struct {
// C.BRISK
p unsafe.Pointer
}
// NewBRISK returns a new BRISK algorithm
//
// For further details, please see:
// https://docs.opencv.org/master/d8/d30/classcv_1_1AKAZE.html
//
func NewBRISK() BRISK {
return BRISK{p: unsafe.Pointer(C.BRISK_Create())}
}
// Close BRISK.
func (b *BRISK) Close() error {
C.BRISK_Close((C.BRISK)(b.p))
b.p = nil
return nil
}
// Detect keypoints in an image using BRISK.
//
// For further details, please see:
// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#aa4e9a7082ec61ebc108806704fbd7887
//
func (b *BRISK) Detect(src Mat) []KeyPoint {
ret := C.BRISK_Detect((C.BRISK)(b.p), src.p)
defer C.KeyPoints_Close(ret)
return getKeyPoints(ret)
}
// DetectAndCompute keypoints and compute in an image using BRISK.
//
// For further details, please see:
// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#a8be0d1c20b08eb867184b8d74c15a677
//
func (b *BRISK) DetectAndCompute(src Mat, mask Mat) ([]KeyPoint, Mat) {
desc := NewMat()
ret := C.BRISK_DetectAndCompute((C.BRISK)(b.p), src.p, mask.p, desc.p)
defer C.KeyPoints_Close(ret)
return getKeyPoints(ret), desc
}
// FastFeatureDetectorType defines the detector type
//
// For further details, please see:
// https://docs.opencv.org/master/df/d74/classcv_1_1FastFeatureDetector.html#a4654f6fb0aa4b8e9123b223bfa0e2a08
type FastFeatureDetectorType int
const (
//FastFeatureDetectorType58 is an alias of FastFeatureDetector::TYPE_5_8
FastFeatureDetectorType58 FastFeatureDetectorType = 0
//FastFeatureDetectorType712 is an alias of FastFeatureDetector::TYPE_7_12
FastFeatureDetectorType712 FastFeatureDetectorType = 1
//FastFeatureDetectorType916 is an alias of FastFeatureDetector::TYPE_9_16
FastFeatureDetectorType916 FastFeatureDetectorType = 2
)
// FastFeatureDetector is a wrapper around the cv::FastFeatureDetector.
type FastFeatureDetector struct {
// C.FastFeatureDetector
p unsafe.Pointer
}
// NewFastFeatureDetector returns a new FastFeatureDetector algorithm
//
// For further details, please see:
// https://docs.opencv.org/master/df/d74/classcv_1_1FastFeatureDetector.html
//
func NewFastFeatureDetector() FastFeatureDetector {
return FastFeatureDetector{p: unsafe.Pointer(C.FastFeatureDetector_Create())}
}
// NewFastFeatureDetectorWithParams returns a new FastFeatureDetector algorithm with parameters
//
// For further details, please see:
// https://docs.opencv.org/master/df/d74/classcv_1_1FastFeatureDetector.html#ab986f2ff8f8778aab1707e2642bc7f8e
//
func NewFastFeatureDetectorWithParams(threshold int, nonmaxSuppression bool, typ FastFeatureDetectorType) FastFeatureDetector {
return FastFeatureDetector{p: unsafe.Pointer(C.FastFeatureDetector_CreateWithParams(C.int(threshold), C.bool(nonmaxSuppression), C.int(typ)))}
}
// Close FastFeatureDetector.
func (f *FastFeatureDetector) Close() error {
C.FastFeatureDetector_Close((C.FastFeatureDetector)(f.p))
f.p = nil
return nil
}
// Detect keypoints in an image using FastFeatureDetector.
//
// For further details, please see:
// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#aa4e9a7082ec61ebc108806704fbd7887
//
func (f *FastFeatureDetector) Detect(src Mat) []KeyPoint {
ret := C.FastFeatureDetector_Detect((C.FastFeatureDetector)(f.p), src.p)
defer C.KeyPoints_Close(ret)
return getKeyPoints(ret)
}
// GFTTDetector is a wrapper around the cv::GFTTDetector algorithm.
type GFTTDetector struct {
// C.GFTTDetector
p unsafe.Pointer
}
// NewGFTTDetector returns a new GFTTDetector algorithm
//
// For further details, please see:
// https://docs.opencv.org/master/df/d21/classcv_1_1GFTTDetector.html
//
func NewGFTTDetector() GFTTDetector {
return GFTTDetector{p: unsafe.Pointer(C.GFTTDetector_Create())}
}
// Close GFTTDetector.
func (a *GFTTDetector) Close() error {
C.GFTTDetector_Close((C.GFTTDetector)(a.p))
a.p = nil
return nil
}
// Detect keypoints in an image using GFTTDetector.
//
// For further details, please see:
// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#aa4e9a7082ec61ebc108806704fbd7887
//
func (a *GFTTDetector) Detect(src Mat) []KeyPoint {
ret := C.GFTTDetector_Detect((C.GFTTDetector)(a.p), src.p)
defer C.KeyPoints_Close(ret)
return getKeyPoints(ret)
}
// KAZE is a wrapper around the cv::KAZE algorithm.
type KAZE struct {
// C.KAZE
p unsafe.Pointer
}
// NewKAZE returns a new KAZE algorithm
//
// For further details, please see:
// https://docs.opencv.org/master/d3/d61/classcv_1_1KAZE.html
//
func NewKAZE() KAZE {
return KAZE{p: unsafe.Pointer(C.KAZE_Create())}
}
// Close KAZE.
func (a *KAZE) Close() error {
C.KAZE_Close((C.KAZE)(a.p))
a.p = nil
return nil
}
// Detect keypoints in an image using KAZE.
//
// For further details, please see:
// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#aa4e9a7082ec61ebc108806704fbd7887
//
func (a *KAZE) Detect(src Mat) []KeyPoint {
ret := C.KAZE_Detect((C.KAZE)(a.p), src.p)
defer C.KeyPoints_Close(ret)
return getKeyPoints(ret)
}
// DetectAndCompute keypoints and compute in an image using KAZE.
//
// For further details, please see:
// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#a8be0d1c20b08eb867184b8d74c15a677
//
func (a *KAZE) DetectAndCompute(src Mat, mask Mat) ([]KeyPoint, Mat) {
desc := NewMat()
ret := C.KAZE_DetectAndCompute((C.KAZE)(a.p), src.p, mask.p, desc.p)
defer C.KeyPoints_Close(ret)
return getKeyPoints(ret), desc
}
// MSER is a wrapper around the cv::MSER algorithm.
type MSER struct {
// C.MSER
p unsafe.Pointer
}
// NewMSER returns a new MSER algorithm
//
// For further details, please see:
// https://docs.opencv.org/master/d3/d28/classcv_1_1MSER.html
//
func NewMSER() MSER {
return MSER{p: unsafe.Pointer(C.MSER_Create())}
}
// Close MSER.
func (a *MSER) Close() error {
C.MSER_Close((C.MSER)(a.p))
a.p = nil
return nil
}
// Detect keypoints in an image using MSER.
//
// For further details, please see:
// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#aa4e9a7082ec61ebc108806704fbd7887
//
func (a *MSER) Detect(src Mat) []KeyPoint {
ret := C.MSER_Detect((C.MSER)(a.p), src.p)
defer C.KeyPoints_Close(ret)
return getKeyPoints(ret)
}
// ORB is a wrapper around the cv::ORB.
type ORB struct {
// C.ORB
p unsafe.Pointer
}
// NewORB returns a new ORB algorithm
//
// For further details, please see:
// https://docs.opencv.org/master/db/d95/classcv_1_1ORB.html
//
func NewORB() ORB {
return ORB{p: unsafe.Pointer(C.ORB_Create())}
}
// NewORBWithParams returns a new ORB algorithm with parameters
//
// For further details, please see:
// https://docs.opencv.org/master/db/d95/classcv_1_1ORB.html#aeff0cbe668659b7ca14bb85ff1c4073b
//
func NewORBWithParams(nFeatures int, scaleFactor float32, nLevels int, edgeThreshold int, firstLevel int, WTAK int, scoreType ORBScoreType, patchSize int, fastThreshold int) ORB {
return ORB{p: unsafe.Pointer(C.ORB_CreateWithParams(
C.int(nFeatures),
C.float(scaleFactor),
C.int(nLevels),
C.int(edgeThreshold),
C.int(firstLevel),
C.int(WTAK),
C.int(scoreType),
C.int(patchSize),
C.int(fastThreshold),
))}
}
type ORBScoreType int
const (
ORBScoreTypeHarris ORBScoreType = 0
ORBScoreTypeFAST ORBScoreType = 1
)
// Close ORB.
func (o *ORB) Close() error {
C.ORB_Close((C.ORB)(o.p))
o.p = nil
return nil
}
// Detect keypoints in an image using ORB.
//
// For further details, please see:
// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#aa4e9a7082ec61ebc108806704fbd7887
//
func (o *ORB) Detect(src Mat) []KeyPoint {
ret := C.ORB_Detect((C.ORB)(o.p), src.p)
defer C.KeyPoints_Close(ret)
return getKeyPoints(ret)
}
// DetectAndCompute detects keypoints and computes from an image using ORB.
//
// For further details, please see:
// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#a8be0d1c20b08eb867184b8d74c15a677
//
func (o *ORB) DetectAndCompute(src Mat, mask Mat) ([]KeyPoint, Mat) {
desc := NewMat()
ret := C.ORB_DetectAndCompute((C.ORB)(o.p), src.p, mask.p, desc.p)
defer C.KeyPoints_Close(ret)
return getKeyPoints(ret), desc
}
// SimpleBlobDetector is a wrapper around the cv::SimpleBlobDetector.
type SimpleBlobDetector struct {
// C.SimpleBlobDetector
p unsafe.Pointer
}
// SimpleBlobDetector_Params is a wrapper around the cv::SimpleBlobdetector::Params
type SimpleBlobDetectorParams struct {
p C.SimpleBlobDetectorParams
}
// NewSimpleBlobDetector returns a new SimpleBlobDetector algorithm
//
// For further details, please see:
// https://docs.opencv.org/master/d0/d7a/classcv_1_1SimpleBlobDetector.html
//
func NewSimpleBlobDetector() SimpleBlobDetector {
return SimpleBlobDetector{p: unsafe.Pointer(C.SimpleBlobDetector_Create())}
}
// NewSimpleBlobDetectorWithParams returns a new SimpleBlobDetector with custom parameters
//
// For further details, please see:
// https://docs.opencv.org/master/d0/d7a/classcv_1_1SimpleBlobDetector.html
//
func NewSimpleBlobDetectorWithParams(params SimpleBlobDetectorParams) SimpleBlobDetector {
return SimpleBlobDetector{p: unsafe.Pointer(C.SimpleBlobDetector_Create_WithParams(params.p))}
}
// Close SimpleBlobDetector.
func (b *SimpleBlobDetector) Close() error {
C.SimpleBlobDetector_Close((C.SimpleBlobDetector)(b.p))
b.p = nil
return nil
}
// NewSimpleBlobDetectorParams returns the default parameters for the SimpleBobDetector
func NewSimpleBlobDetectorParams() SimpleBlobDetectorParams {
return SimpleBlobDetectorParams{p: C.SimpleBlobDetectorParams_Create()}
}
// SetBlobColor sets the blobColor field
func (p *SimpleBlobDetectorParams) SetBlobColor(blobColor int) {
p.p.blobColor = C.uchar(blobColor)
}
// GetBlobColor gets the blobColor field
func (p *SimpleBlobDetectorParams) GetBlobColor() int {
return int(p.p.blobColor)
}
// SetFilterByArea sets the filterByArea field
func (p *SimpleBlobDetectorParams) SetFilterByArea(filterByArea bool) {
p.p.filterByArea = C.bool(filterByArea)
}
// GetFilterByArea gets the filterByArea field
func (p *SimpleBlobDetectorParams) GetFilterByArea() bool {
return bool(p.p.filterByArea)
}
// SetFilterByCircularity sets the filterByCircularity field
func (p *SimpleBlobDetectorParams) SetFilterByCircularity(filterByCircularity bool) {
p.p.filterByCircularity = C.bool(filterByCircularity)
}
// GetFilterByCircularity gets the filterByCircularity field
func (p *SimpleBlobDetectorParams) GetFilterByCircularity() bool {
return bool(p.p.filterByCircularity)
}
// SetFilterByColor sets the filterByColor field
func (p *SimpleBlobDetectorParams) SetFilterByColor(filterByColor bool) {
p.p.filterByColor = C.bool(filterByColor)
}
// GetFilterByColor gets the filterByColor field
func (p *SimpleBlobDetectorParams) GetFilterByColor() bool {
return bool(p.p.filterByColor)
}
// SetFilterByConvexity sets the filterByConvexity field
func (p *SimpleBlobDetectorParams) SetFilterByConvexity(filterByConvexity bool) {
p.p.filterByConvexity = C.bool(filterByConvexity)
}
// GetFilterByConvexity gets the filterByConvexity field
func (p *SimpleBlobDetectorParams) GetFilterByConvexity() bool {
return bool(p.p.filterByConvexity)
}
// SetFilterByInertia sets the filterByInertia field
func (p *SimpleBlobDetectorParams) SetFilterByInertia(filterByInertia bool) {
p.p.filterByInertia = C.bool(filterByInertia)
}
// GetFilterByInertia gets the filterByInertia field
func (p *SimpleBlobDetectorParams) GetFilterByInertia() bool {
return bool(p.p.filterByInertia)
}
// SetMaxArea sets the maxArea parameter for SimpleBlobDetector_Params
func (p *SimpleBlobDetectorParams) SetMaxArea(maxArea float64) {
p.p.maxArea = C.float(maxArea)
}
// GetMaxArea sets the maxArea parameter for SimpleBlobDetector_Params
func (p *SimpleBlobDetectorParams) GetMaxArea() float64 {
return float64(p.p.maxArea)
}
// SetMaxCircularity sets the maxCircularity parameter for SimpleBlobDetector_Params
func (p *SimpleBlobDetectorParams) SetMaxCircularity(maxCircularity float64) {
p.p.maxCircularity = C.float(maxCircularity)
}
// GetMaxCircularity sets the maxCircularity parameter for SimpleBlobDetector_Params
func (p *SimpleBlobDetectorParams) GetMaxCircularity() float64 {
return float64(p.p.maxCircularity)
}
// SetMaxConvexity sets the maxConvexity parameter for SimpleBlobDetector_Params
func (p *SimpleBlobDetectorParams) SetMaxConvexity(maxConvexity float64) {
p.p.maxConvexity = C.float(maxConvexity)
}
// GetMaxConvexity sets the maxConvexity parameter for SimpleBlobDetector_Params
func (p *SimpleBlobDetectorParams) GetMaxConvexity() float64 {
return float64(p.p.maxConvexity)
}
// SetMaxInertiaRatio sets the maxInertiaRatio parameter for SimpleBlobDetector_Params
func (p *SimpleBlobDetectorParams) SetMaxInertiaRatio(maxInertiaRatio float64) {
p.p.maxInertiaRatio = C.float(maxInertiaRatio)
}
// GetMaxInertiaRatio sets the maxCConvexity parameter for SimpleBlobDetector_Params
func (p *SimpleBlobDetectorParams) GetMaxInertiaRatio() float64 {
return float64(p.p.maxInertiaRatio)
}
// SetMaxThreshold sets the maxThreshold parameter for SimpleBlobDetector_Params
func (p *SimpleBlobDetectorParams) SetMaxThreshold(maxThreshold float64) {
p.p.maxThreshold = C.float(maxThreshold)
}
// GetMaxThreshold sets the maxCConvexity parameter for SimpleBlobDetector_Params
func (p *SimpleBlobDetectorParams) GetMaxThreshold() float64 {
return float64(p.p.maxThreshold)
}
// SetMinArea sets the minArea parameter for SimpleBlobDetector_Params
func (p *SimpleBlobDetectorParams) SetMinArea(minArea float64) {
p.p.minArea = C.float(minArea)
}
// GetMinArea sets theinArea parameter for SimpleBlobDetector_Params
func (p *SimpleBlobDetectorParams) GetMinArea() float64 {
return float64(p.p.minArea)
}
// SetMinCircularity sets the minCircularity parameter for SimpleBlobDetector_Params
func (p *SimpleBlobDetectorParams) SetMinCircularity(minCircularity float64) {
p.p.minCircularity = C.float(minCircularity)
}
// GetMinCircularity sets the minCircularity parameter for SimpleBlobDetector_Params
func (p *SimpleBlobDetectorParams) GetMinCircularity() float64 {
return float64(p.p.minCircularity)
}
// SetMinConvexity sets the minConvexity parameter for SimpleBlobDetector_Params
func (p *SimpleBlobDetectorParams) SetMinConvexity(minConvexity float64) {
p.p.minConvexity = C.float(minConvexity)
}
// GetMinConvexity sets the minConvexity parameter for SimpleBlobDetector_Params
func (p *SimpleBlobDetectorParams) GetMinConvexity() float64 {
return float64(p.p.minConvexity)
}
// SetMinDistBetweenBlobs sets the minDistBetweenBlobs parameter for SimpleBlobDetector_Params
func (p *SimpleBlobDetectorParams) SetMinDistBetweenBlobs(minDistBetweenBlobs float64) {
p.p.minDistBetweenBlobs = C.float(minDistBetweenBlobs)
}
// GetMinDistBetweenBlobs sets the minDistBetweenBlobs parameter for SimpleBlobDetector_Params
func (p *SimpleBlobDetectorParams) GetMinDistBetweenBlobs() float64 {
return float64(p.p.minDistBetweenBlobs)
}
// SetMinInertiaRatio sets the minInertiaRatio parameter for SimpleBlobDetector_Params
func (p *SimpleBlobDetectorParams) SetMinInertiaRatio(minInertiaRatio float64) {
p.p.minInertiaRatio = C.float(minInertiaRatio)
}
// GetMinInertiaRatio sets the minInertiaRatio parameter for SimpleBlobDetector_Params
func (p *SimpleBlobDetectorParams) GetMinInertiaRatio() float64 {
return float64(p.p.minInertiaRatio)
}
// SetMinRepeatability sets the minRepeatability parameter for SimpleBlobDetector_Params
func (p *SimpleBlobDetectorParams) SetMinRepeatability(minRepeatability int) {
p.p.minRepeatability = C.size_t(minRepeatability)
}
// GetMinInertiaRatio sets the minRepeatability parameter for SimpleBlobDetector_Params
func (p *SimpleBlobDetectorParams) GetMinRepeatability() int {
return int(p.p.minRepeatability)
}
// SetMinThreshold sets the minThreshold parameter for SimpleBlobDetector_Params
func (p *SimpleBlobDetectorParams) SetMinThreshold(minThreshold float64) {
p.p.minThreshold = C.float(minThreshold)
}
// GetMinThreshold sets the minInertiaRatio parameter for SimpleBlobDetector_Params
func (p *SimpleBlobDetectorParams) GetMinThreshold() float64 {
return float64(p.p.minThreshold)
}
// SetMinThreshold sets the minThreshold parameter for SimpleBlobDetector_Params
func (p *SimpleBlobDetectorParams) SetThresholdStep(thresholdStep float64) {
p.p.thresholdStep = C.float(thresholdStep)
}
// GetMinThreshold sets the minInertiaRatio parameter for SimpleBlobDetector_Params
func (p *SimpleBlobDetectorParams) GetThresholdStep() float64 {
return float64(p.p.thresholdStep)
}
// Detect keypoints in an image using SimpleBlobDetector.
//
// For further details, please see:
// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#aa4e9a7082ec61ebc108806704fbd7887
//
func (b *SimpleBlobDetector) Detect(src Mat) []KeyPoint {
ret := C.SimpleBlobDetector_Detect((C.SimpleBlobDetector)(b.p), src.p)
defer C.KeyPoints_Close(ret)
return getKeyPoints(ret)
}
// getKeyPoints returns a slice of KeyPoint given a pointer to a C.KeyPoints
func getKeyPoints(ret C.KeyPoints) []KeyPoint {
cArray := ret.keypoints
length := int(ret.length)
hdr := reflect.SliceHeader{
Data: uintptr(unsafe.Pointer(cArray)),
Len: length,
Cap: length,
}
s := *(*[]C.KeyPoint)(unsafe.Pointer(&hdr))
keys := make([]KeyPoint, length)
for i, r := range s {
keys[i] = KeyPoint{float64(r.x), float64(r.y), float64(r.size), float64(r.angle), float64(r.response),
int(r.octave), int(r.classID)}
}
return keys
}
// BFMatcher is a wrapper around the the cv::BFMatcher algorithm
type BFMatcher struct {
// C.BFMatcher
p unsafe.Pointer
}
// NewBFMatcher returns a new BFMatcher
//
// For further details, please see:
// https://docs.opencv.org/master/d3/da1/classcv_1_1BFMatcher.html#abe0bb11749b30d97f60d6ade665617bd
//
func NewBFMatcher() BFMatcher {
return BFMatcher{p: unsafe.Pointer(C.BFMatcher_Create())}
}
// NewBFMatcherWithParams creates a new BFMatchers but allows setting parameters
// to values other than just the defaults.
//
// For further details, please see:
// https://docs.opencv.org/master/d3/da1/classcv_1_1BFMatcher.html#abe0bb11749b30d97f60d6ade665617bd
//
func NewBFMatcherWithParams(normType NormType, crossCheck bool) BFMatcher {
return BFMatcher{p: unsafe.Pointer(C.BFMatcher_CreateWithParams(C.int(normType), C.bool(crossCheck)))}
}
// Close BFMatcher
func (b *BFMatcher) Close() error {
C.BFMatcher_Close((C.BFMatcher)(b.p))
b.p = nil
return nil
}
// KnnMatch Finds the k best matches for each descriptor from a query set.
//
// For further details, please see:
// https://docs.opencv.org/master/db/d39/classcv_1_1DescriptorMatcher.html#aa880f9353cdf185ccf3013e08210483a
//
func (b *BFMatcher) KnnMatch(query, train Mat, k int) [][]DMatch {
ret := C.BFMatcher_KnnMatch((C.BFMatcher)(b.p), query.p, train.p, C.int(k))
defer C.MultiDMatches_Close(ret)
return getMultiDMatches(ret)
}
// FlannBasedMatcher is a wrapper around the the cv::FlannBasedMatcher algorithm
type FlannBasedMatcher struct {
// C.FlannBasedMatcher
p unsafe.Pointer
}
// NewFlannBasedMatcher returns a new FlannBasedMatcher
//
// For further details, please see:
// https://docs.opencv.org/master/dc/de2/classcv_1_1FlannBasedMatcher.html#ab9114a6471e364ad221f89068ca21382
//
func NewFlannBasedMatcher() FlannBasedMatcher {
return FlannBasedMatcher{p: unsafe.Pointer(C.FlannBasedMatcher_Create())}
}
// Close FlannBasedMatcher
func (f *FlannBasedMatcher) Close() error {
C.FlannBasedMatcher_Close((C.FlannBasedMatcher)(f.p))
f.p = nil
return nil
}
// KnnMatch Finds the k best matches for each descriptor from a query set.
//
// For further details, please see:
// https://docs.opencv.org/master/db/d39/classcv_1_1DescriptorMatcher.html#aa880f9353cdf185ccf3013e08210483a
//
func (f *FlannBasedMatcher) KnnMatch(query, train Mat, k int) [][]DMatch {
ret := C.FlannBasedMatcher_KnnMatch((C.FlannBasedMatcher)(f.p), query.p, train.p, C.int(k))
defer C.MultiDMatches_Close(ret)
return getMultiDMatches(ret)
}
func getMultiDMatches(ret C.MultiDMatches) [][]DMatch {
cArray := ret.dmatches
length := int(ret.length)
hdr := reflect.SliceHeader{
Data: uintptr(unsafe.Pointer(cArray)),
Len: length,
Cap: length,
}
s := *(*[]C.DMatches)(unsafe.Pointer(&hdr))
keys := make([][]DMatch, length)
for i := range s {
keys[i] = getDMatches(C.MultiDMatches_get(ret, C.int(i)))
}
return keys
}
func getDMatches(ret C.DMatches) []DMatch {
cArray := ret.dmatches
length := int(ret.length)
hdr := reflect.SliceHeader{
Data: uintptr(unsafe.Pointer(cArray)),
Len: length,
Cap: length,
}
s := *(*[]C.DMatch)(unsafe.Pointer(&hdr))
keys := make([]DMatch, length)
for i, r := range s {
keys[i] = DMatch{int(r.queryIdx), int(r.trainIdx), int(r.imgIdx),
float64(r.distance)}
}
return keys
}
// DrawMatchesFlag are the flags setting drawing feature
//
// For further details please see:
// https://docs.opencv.org/master/de/d30/structcv_1_1DrawMatchesFlags.html
type DrawMatchesFlag int
const (
// DrawDefault creates new image and for each keypoint only the center point will be drawn
DrawDefault DrawMatchesFlag = 0
// DrawOverOutImg draws matches on existing content of image
DrawOverOutImg DrawMatchesFlag = 1
// NotDrawSinglePoints will not draw single points
NotDrawSinglePoints DrawMatchesFlag = 2
// DrawRichKeyPoints draws the circle around each keypoint with keypoint size and orientation
DrawRichKeyPoints DrawMatchesFlag = 3
)
// DrawKeyPoints draws keypoints
//
// For further details please see:
// https://docs.opencv.org/master/d4/d5d/group__features2d__draw.html#gab958f8900dd10f14316521c149a60433
func DrawKeyPoints(src Mat, keyPoints []KeyPoint, dst *Mat, color color.RGBA, flag DrawMatchesFlag) {
cKeyPointArray := make([]C.struct_KeyPoint, len(keyPoints))
for i, kp := range keyPoints {
cKeyPointArray[i].x = C.double(kp.X)
cKeyPointArray[i].y = C.double(kp.Y)
cKeyPointArray[i].size = C.double(kp.Size)
cKeyPointArray[i].angle = C.double(kp.Angle)
cKeyPointArray[i].response = C.double(kp.Response)
cKeyPointArray[i].octave = C.int(kp.Octave)
cKeyPointArray[i].classID = C.int(kp.ClassID)
}
cKeyPoints := C.struct_KeyPoints{
keypoints: (*C.struct_KeyPoint)(&cKeyPointArray[0]),
length: (C.int)(len(keyPoints)),
}
scalar := C.struct_Scalar{
val1: C.double(color.B),
val2: C.double(color.G),
val3: C.double(color.R),
val4: C.double(color.A),
}
C.DrawKeyPoints(src.p, cKeyPoints, dst.p, scalar, C.int(flag))
}
// SIFT is a wrapper around the cv::SIFT algorithm.
// Due to the patent having expired, this is now in the main OpenCV code modules.
type SIFT struct {
// C.SIFT
p unsafe.Pointer
}
// NewSIFT returns a new SIFT algorithm.
//
// For further details, please see:
// https://docs.opencv.org/master/d5/d3c/classcv_1_1xfeatures2d_1_1SIFT.html
//
func NewSIFT() SIFT {
return SIFT{p: unsafe.Pointer(C.SIFT_Create())}
}
// Close SIFT.
func (d *SIFT) Close() error {
C.SIFT_Close((C.SIFT)(d.p))
d.p = nil
return nil
}
// Detect keypoints in an image using SIFT.
//
// For further details, please see:
// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#aa4e9a7082ec61ebc108806704fbd7887
//
func (d *SIFT) Detect(src Mat) []KeyPoint {
ret := C.SIFT_Detect((C.SIFT)(d.p), C.Mat(src.Ptr()))
defer C.KeyPoints_Close(ret)
return getKeyPoints(ret)
}
// DetectAndCompute detects and computes keypoints in an image using SIFT.
//
// For further details, please see:
// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#a8be0d1c20b08eb867184b8d74c15a677
//
func (d *SIFT) DetectAndCompute(src Mat, mask Mat) ([]KeyPoint, Mat) {
desc := NewMat()
ret := C.SIFT_DetectAndCompute((C.SIFT)(d.p), C.Mat(src.Ptr()), C.Mat(mask.Ptr()),
C.Mat(desc.Ptr()))
defer C.KeyPoints_Close(ret)
return getKeyPoints(ret), desc
}
// DrawMatches draws matches on combined train and querry images.
//
// For further details, please see:
// https://docs.opencv.org/master/d4/d5d/group__features2d__draw.html#gad8f463ccaf0dc6f61083abd8717c261a
func DrawMatches(img1 Mat, kp1 []KeyPoint, img2 Mat, kp2 []KeyPoint, matches1to2 []DMatch, outImg *Mat, matchColor color.RGBA, singlePointColor color.RGBA, matchesMask []byte, flags DrawMatchesFlag) {
kp1arr := make([]C.struct_KeyPoint, len(kp1))
kp2arr := make([]C.struct_KeyPoint, len(kp2))
for i, kp := range kp1 {
kp1arr[i].x = C.double(kp.X)
kp1arr[i].y = C.double(kp.Y)
kp1arr[i].size = C.double(kp.Size)
kp1arr[i].angle = C.double(kp.Angle)
kp1arr[i].response = C.double(kp.Response)
kp1arr[i].octave = C.int(kp.Octave)
kp1arr[i].classID = C.int(kp.ClassID)
}
for i, kp := range kp2 {
kp2arr[i].x = C.double(kp.X)
kp2arr[i].y = C.double(kp.Y)
kp2arr[i].size = C.double(kp.Size)
kp2arr[i].angle = C.double(kp.Angle)
kp2arr[i].response = C.double(kp.Response)
kp2arr[i].octave = C.int(kp.Octave)
kp2arr[i].classID = C.int(kp.ClassID)
}
cKeyPoints1 := C.struct_KeyPoints{
keypoints: (*C.struct_KeyPoint)(&kp1arr[0]),
length: (C.int)(len(kp1)),
}
cKeyPoints2 := C.struct_KeyPoints{
keypoints: (*C.struct_KeyPoint)(&kp2arr[0]),
length: (C.int)(len(kp2)),
}
dMatchArr := make([]C.struct_DMatch, len(matches1to2))
for i, dm := range matches1to2 {
dMatchArr[i].queryIdx = C.int(dm.QueryIdx)
dMatchArr[i].trainIdx = C.int(dm.TrainIdx)
dMatchArr[i].imgIdx = C.int(dm.ImgIdx)
dMatchArr[i].distance = C.float(dm.Distance)
}
cDMatches := C.struct_DMatches{
dmatches: (*C.struct_DMatch)(&dMatchArr[0]),
length: (C.int)(len(matches1to2)),
}
scalarMatchColor := C.struct_Scalar{
val1: C.double(matchColor.R),
val2: C.double(matchColor.G),
val3: C.double(matchColor.B),
val4: C.double(matchColor.A),
}
scalarPointColor := C.struct_Scalar{
val1: C.double(singlePointColor.B),
val2: C.double(singlePointColor.G),
val3: C.double(singlePointColor.R),
val4: C.double(singlePointColor.A),
}
mask := make([]C.char, len(matchesMask))
cByteArray := C.struct_ByteArray{
length: (C.int)(len(matchesMask)),
}
if len(matchesMask) > 0 {
cByteArray = C.struct_ByteArray{
data: (*C.char)(&mask[0]),
length: (C.int)(len(matchesMask)),
}
}
C.DrawMatches(img1.p, cKeyPoints1, img2.p, cKeyPoints2, cDMatches, outImg.p, scalarMatchColor, scalarPointColor, cByteArray, C.int(flags))
}

105
vendor/gocv.io/x/gocv/features2d.h generated vendored Normal file
View File

@ -0,0 +1,105 @@
#ifndef _OPENCV3_FEATURES2D_H_
#define _OPENCV3_FEATURES2D_H_
#ifdef __cplusplus
#include <opencv2/opencv.hpp>
extern "C" {
#endif
#include "core.h"
#ifdef __cplusplus
typedef cv::Ptr<cv::AKAZE>* AKAZE;
typedef cv::Ptr<cv::AgastFeatureDetector>* AgastFeatureDetector;
typedef cv::Ptr<cv::BRISK>* BRISK;
typedef cv::Ptr<cv::FastFeatureDetector>* FastFeatureDetector;
typedef cv::Ptr<cv::GFTTDetector>* GFTTDetector;
typedef cv::Ptr<cv::KAZE>* KAZE;
typedef cv::Ptr<cv::MSER>* MSER;
typedef cv::Ptr<cv::ORB>* ORB;
typedef cv::Ptr<cv::SimpleBlobDetector>* SimpleBlobDetector;
typedef cv::Ptr<cv::BFMatcher>* BFMatcher;
typedef cv::Ptr<cv::FlannBasedMatcher>* FlannBasedMatcher;
typedef cv::Ptr<cv::SIFT>* SIFT;
#else
typedef void* AKAZE;
typedef void* AgastFeatureDetector;
typedef void* BRISK;
typedef void* FastFeatureDetector;
typedef void* GFTTDetector;
typedef void* KAZE;
typedef void* MSER;
typedef void* ORB;
typedef void* SimpleBlobDetector;
typedef void* BFMatcher;
typedef void* FlannBasedMatcher;
typedef void* SIFT;
#endif
AKAZE AKAZE_Create();
void AKAZE_Close(AKAZE a);
struct KeyPoints AKAZE_Detect(AKAZE a, Mat src);
struct KeyPoints AKAZE_DetectAndCompute(AKAZE a, Mat src, Mat mask, Mat desc);
AgastFeatureDetector AgastFeatureDetector_Create();
void AgastFeatureDetector_Close(AgastFeatureDetector a);
struct KeyPoints AgastFeatureDetector_Detect(AgastFeatureDetector a, Mat src);
BRISK BRISK_Create();
void BRISK_Close(BRISK b);
struct KeyPoints BRISK_Detect(BRISK b, Mat src);
struct KeyPoints BRISK_DetectAndCompute(BRISK b, Mat src, Mat mask, Mat desc);
FastFeatureDetector FastFeatureDetector_Create();
FastFeatureDetector FastFeatureDetector_CreateWithParams(int threshold, bool nonmaxSuppression, int type);
void FastFeatureDetector_Close(FastFeatureDetector f);
struct KeyPoints FastFeatureDetector_Detect(FastFeatureDetector f, Mat src);
GFTTDetector GFTTDetector_Create();
void GFTTDetector_Close(GFTTDetector a);
struct KeyPoints GFTTDetector_Detect(GFTTDetector a, Mat src);
KAZE KAZE_Create();
void KAZE_Close(KAZE a);
struct KeyPoints KAZE_Detect(KAZE a, Mat src);
struct KeyPoints KAZE_DetectAndCompute(KAZE a, Mat src, Mat mask, Mat desc);
MSER MSER_Create();
void MSER_Close(MSER a);
struct KeyPoints MSER_Detect(MSER a, Mat src);
ORB ORB_Create();
ORB ORB_CreateWithParams(int nfeatures, float scaleFactor, int nlevels, int edgeThreshold, int firstLevel, int WTA_K, int scoreType, int patchSize, int fastThreshold);
void ORB_Close(ORB o);
struct KeyPoints ORB_Detect(ORB o, Mat src);
struct KeyPoints ORB_DetectAndCompute(ORB o, Mat src, Mat mask, Mat desc);
SimpleBlobDetector SimpleBlobDetector_Create();
SimpleBlobDetector SimpleBlobDetector_Create_WithParams(SimpleBlobDetectorParams params);
void SimpleBlobDetector_Close(SimpleBlobDetector b);
struct KeyPoints SimpleBlobDetector_Detect(SimpleBlobDetector b, Mat src);
SimpleBlobDetectorParams SimpleBlobDetectorParams_Create();
BFMatcher BFMatcher_Create();
BFMatcher BFMatcher_CreateWithParams(int normType, bool crossCheck);
void BFMatcher_Close(BFMatcher b);
struct MultiDMatches BFMatcher_KnnMatch(BFMatcher b, Mat query, Mat train, int k);
FlannBasedMatcher FlannBasedMatcher_Create();
void FlannBasedMatcher_Close(FlannBasedMatcher f);
struct MultiDMatches FlannBasedMatcher_KnnMatch(FlannBasedMatcher f, Mat query, Mat train, int k);
void DrawKeyPoints(Mat src, struct KeyPoints kp, Mat dst, const Scalar s, int flags);
SIFT SIFT_Create();
void SIFT_Close(SIFT f);
struct KeyPoints SIFT_Detect(SIFT f, Mat src);
struct KeyPoints SIFT_DetectAndCompute(SIFT f, Mat src, Mat mask, Mat desc);
void DrawMatches(Mat img1, struct KeyPoints kp1, Mat img2, struct KeyPoints kp2, struct DMatches matches1to2, Mat outImg, const Scalar matchesColor, const Scalar pointColor, struct ByteArray matchesMask, int flags);
#ifdef __cplusplus
}
#endif
#endif //_OPENCV3_FEATURES2D_H_

33
vendor/gocv.io/x/gocv/features2d_string.go generated vendored Normal file
View File

@ -0,0 +1,33 @@
package gocv
/*
#include <stdlib.h>
#include "features2d.h"
*/
import "C"
func (c FastFeatureDetectorType) String() string {
switch c {
case FastFeatureDetectorType58:
return "fast-feature-detector-type-58"
case FastFeatureDetectorType712:
return "fast-feature-detector-type-712"
case FastFeatureDetectorType916:
return "fast-feature-detector-type-916"
}
return ""
}
func (c DrawMatchesFlag) String() string {
switch c {
case DrawDefault:
return "draw-default"
case DrawOverOutImg:
return "draw-over-out-imt"
case NotDrawSinglePoints:
return "draw-single-points"
case DrawRichKeyPoints:
return "draw-rich-key-points"
}
return ""
}

11
vendor/gocv.io/x/gocv/gocv.go generated vendored Normal file
View File

@ -0,0 +1,11 @@
// Package gocv is a wrapper around the OpenCV 4.x computer vision library.
// It provides a Go language interface to the latest version of OpenCV.
//
// OpenCV (Open Source Computer Vision Library: http://opencv.org) is an
// open-source BSD-licensed library that includes several hundreds of
// computer vision algorithms.
//
// For further details, please see:
// http://docs.opencv.org/master/d1/dfb/intro.html
//
package gocv // import "gocv.io/x/gocv"

83
vendor/gocv.io/x/gocv/highgui.cpp generated vendored Normal file
View File

@ -0,0 +1,83 @@
#include "highgui_gocv.h"
// Window
void Window_New(const char* winname, int flags) {
cv::namedWindow(winname, flags);
}
void Window_Close(const char* winname) {
cv::destroyWindow(winname);
}
void Window_IMShow(const char* winname, Mat mat) {
cv::imshow(winname, *mat);
}
double Window_GetProperty(const char* winname, int flag) {
return cv::getWindowProperty(winname, flag);
}
void Window_SetProperty(const char* winname, int flag, double value) {
cv::setWindowProperty(winname, flag, value);
}
void Window_SetTitle(const char* winname, const char* title) {
cv::setWindowTitle(winname, title);
}
int Window_WaitKey(int delay = 0) {
return cv::waitKey(delay);
}
void Window_Move(const char* winname, int x, int y) {
cv::moveWindow(winname, x, y);
}
void Window_Resize(const char* winname, int width, int height) {
cv::resizeWindow(winname, width, height);
}
struct Rect Window_SelectROI(const char* winname, Mat img) {
cv::Rect bRect = cv::selectROI(winname, *img);
Rect r = {bRect.x, bRect.y, bRect.width, bRect.height};
return r;
}
struct Rects Window_SelectROIs(const char* winname, Mat img) {
std::vector<cv::Rect> rois;
cv::selectROIs(winname, *img, rois);
Rect* rects = new Rect[rois.size()];
for (size_t i = 0; i < rois.size(); ++i) {
Rect r = {rois[i].x, rois[i].y, rois[i].width, rois[i].height};
rects[i] = r;
}
Rects ret = {rects, (int)rois.size()};
return ret;
}
// Trackbar
void Trackbar_Create(const char* winname, const char* trackname, int max) {
cv::createTrackbar(trackname, winname, NULL, max);
}
void Trackbar_CreateWithValue(const char* winname, const char* trackname, int* value, int max) {
cv::createTrackbar(trackname, winname, value, max);
}
int Trackbar_GetPos(const char* winname, const char* trackname) {
return cv::getTrackbarPos(trackname, winname);
}
void Trackbar_SetPos(const char* winname, const char* trackname, int pos) {
cv::setTrackbarPos(trackname, winname, pos);
}
void Trackbar_SetMin(const char* winname, const char* trackname, int pos) {
cv::setTrackbarMin(trackname, winname, pos);
}
void Trackbar_SetMax(const char* winname, const char* trackname, int pos) {
cv::setTrackbarMax(trackname, winname, pos);
}

361
vendor/gocv.io/x/gocv/highgui.go generated vendored Normal file
View File

@ -0,0 +1,361 @@
package gocv
/*
#include <stdlib.h>
#include "highgui_gocv.h"
*/
import "C"
import (
"image"
"runtime"
"unsafe"
)
// Window is a wrapper around OpenCV's "HighGUI" named windows.
// While OpenCV was designed for use in full-scale applications and can be used
// within functionally rich UI frameworks (such as Qt*, WinForms*, or Cocoa*)
// or without any UI at all, sometimes there it is required to try functionality
// quickly and visualize the results. This is what the HighGUI module has been designed for.
//
// For further details, please see:
// http://docs.opencv.org/master/d7/dfc/group__highgui.html
//
type Window struct {
name string
open bool
}
// NewWindow creates a new named OpenCV window
//
// For further details, please see:
// http://docs.opencv.org/master/d7/dfc/group__highgui.html#ga5afdf8410934fd099df85c75b2e0888b
//
func NewWindow(name string) *Window {
runtime.LockOSThread()
cName := C.CString(name)
defer C.free(unsafe.Pointer(cName))
C.Window_New(cName, 0)
return &Window{name: name, open: true}
}
// Close closes and deletes a named OpenCV Window.
//
// For further details, please see:
// http://docs.opencv.org/master/d7/dfc/group__highgui.html#ga851ccdd6961022d1d5b4c4f255dbab34
//
func (w *Window) Close() error {
cName := C.CString(w.name)
defer C.free(unsafe.Pointer(cName))
C.Window_Close(cName)
w.open = false
runtime.UnlockOSThread()
return nil
}
// IsOpen checks to see if the Window seems to be open.
func (w *Window) IsOpen() bool {
return w.open
}
// WindowFlag value for SetWindowProperty / GetWindowProperty.
type WindowFlag float32
const (
// WindowNormal indicates a normal window.
WindowNormal WindowFlag = 0x00000000
// WindowAutosize indicates a window sized based on the contents.
WindowAutosize WindowFlag = 0x00000001
// WindowFullscreen indicates a full-screen window.
WindowFullscreen WindowFlag = 1
// WindowFreeRatio indicates allow the user to resize without maintaining aspect ratio.
WindowFreeRatio WindowFlag = 0x00000100
// WindowKeepRatio indicates always maintain an aspect ratio that matches the contents.
WindowKeepRatio WindowFlag = 0x00000000
)
// WindowPropertyFlag flags for SetWindowProperty / GetWindowProperty.
type WindowPropertyFlag int
const (
// WindowPropertyFullscreen fullscreen property
// (can be WINDOW_NORMAL or WINDOW_FULLSCREEN).
WindowPropertyFullscreen WindowPropertyFlag = 0
// WindowPropertyAutosize is autosize property
// (can be WINDOW_NORMAL or WINDOW_AUTOSIZE).
WindowPropertyAutosize WindowPropertyFlag = 1
// WindowPropertyAspectRatio window's aspect ration
// (can be set to WINDOW_FREERATIO or WINDOW_KEEPRATIO).
WindowPropertyAspectRatio WindowPropertyFlag = 2
// WindowPropertyOpenGL opengl support.
WindowPropertyOpenGL WindowPropertyFlag = 3
// WindowPropertyVisible or not.
WindowPropertyVisible WindowPropertyFlag = 4
)
// GetWindowProperty returns properties of a window.
//
// For further details, please see:
// https://docs.opencv.org/master/d7/dfc/group__highgui.html#gaaf9504b8f9cf19024d9d44a14e461656
//
func (w *Window) GetWindowProperty(flag WindowPropertyFlag) float64 {
cName := C.CString(w.name)
defer C.free(unsafe.Pointer(cName))
return float64(C.Window_GetProperty(cName, C.int(flag)))
}
// SetWindowProperty changes parameters of a window dynamically.
//
// For further details, please see:
// https://docs.opencv.org/master/d7/dfc/group__highgui.html#ga66e4a6db4d4e06148bcdfe0d70a5df27
//
func (w *Window) SetWindowProperty(flag WindowPropertyFlag, value WindowFlag) {
cName := C.CString(w.name)
defer C.free(unsafe.Pointer(cName))
C.Window_SetProperty(cName, C.int(flag), C.double(value))
}
// SetWindowTitle updates window title.
//
// For further details, please see:
// https://docs.opencv.org/master/d7/dfc/group__highgui.html#ga56f8849295fd10d0c319724ddb773d96
//
func (w *Window) SetWindowTitle(title string) {
cName := C.CString(w.name)
defer C.free(unsafe.Pointer(cName))
cTitle := C.CString(title)
defer C.free(unsafe.Pointer(cTitle))
C.Window_SetTitle(cName, cTitle)
}
// IMShow displays an image Mat in the specified window.
// This function should be followed by the WaitKey function which displays
// the image for specified milliseconds. Otherwise, it won't display the image.
//
// For further details, please see:
// http://docs.opencv.org/master/d7/dfc/group__highgui.html#ga453d42fe4cb60e5723281a89973ee563
//
func (w *Window) IMShow(img Mat) {
cName := C.CString(w.name)
defer C.free(unsafe.Pointer(cName))
C.Window_IMShow(cName, img.p)
}
// WaitKey waits for a pressed key.
// This function is the only method in OpenCV's HighGUI that can fetch
// and handle events, so it needs to be called periodically
// for normal event processing
//
// For further details, please see:
// http://docs.opencv.org/master/d7/dfc/group__highgui.html#ga5628525ad33f52eab17feebcfba38bd7
//
func (w *Window) WaitKey(delay int) int {
return int(C.Window_WaitKey(C.int(delay)))
}
// MoveWindow moves window to the specified position.
//
// For further details, please see:
// https://docs.opencv.org/master/d7/dfc/group__highgui.html#ga8d86b207f7211250dbe6e28f76307ffb
//
func (w *Window) MoveWindow(x, y int) {
cName := C.CString(w.name)
defer C.free(unsafe.Pointer(cName))
C.Window_Move(cName, C.int(x), C.int(y))
}
// ResizeWindow resizes window to the specified size.
//
// For further details, please see:
// https://docs.opencv.org/master/d7/dfc/group__highgui.html#ga9e80e080f7ef33f897e415358aee7f7e
//
func (w *Window) ResizeWindow(width, height int) {
cName := C.CString(w.name)
defer C.free(unsafe.Pointer(cName))
C.Window_Resize(cName, C.int(width), C.int(height))
}
// SelectROI selects a Region Of Interest (ROI) on the given image.
// It creates a window and allows user to select a ROI using mouse.
//
// Controls:
// use space or enter to finish selection,
// use key c to cancel selection (function will return a zero Rect).
//
// For further details, please see:
// https://docs.opencv.org/master/d7/dfc/group__highgui.html#ga8daf4730d3adf7035b6de9be4c469af5
//
func (w *Window) SelectROI(img Mat) image.Rectangle {
cName := C.CString(w.name)
defer C.free(unsafe.Pointer(cName))
r := C.Window_SelectROI(cName, img.p)
rect := image.Rect(int(r.x), int(r.y), int(r.x+r.width), int(r.y+r.height))
return rect
}
// SelectROIs selects multiple Regions Of Interest (ROI) on the given image.
// It creates a window and allows user to select ROIs using mouse.
//
// Controls:
// use space or enter to finish current selection and start a new one
// use esc to terminate multiple ROI selection process
//
// For further details, please see:
// https://docs.opencv.org/master/d7/dfc/group__highgui.html#ga0f11fad74a6432b8055fb21621a0f893
//
func (w *Window) SelectROIs(img Mat) []image.Rectangle {
cName := C.CString(w.name)
defer C.free(unsafe.Pointer(cName))
ret := C.Window_SelectROIs(cName, img.p)
defer C.Rects_Close(ret)
return toRectangles(ret)
}
// Deprecated: use Window.SelectROI instead
func SelectROI(name string, img Mat) image.Rectangle {
cName := C.CString(name)
defer C.free(unsafe.Pointer(cName))
r := C.Window_SelectROI(cName, img.p)
rect := image.Rect(int(r.x), int(r.y), int(r.x+r.width), int(r.y+r.height))
return rect
}
// Deprecated: use Window.SelectROIs instead
func SelectROIs(name string, img Mat) []image.Rectangle {
cName := C.CString(name)
defer C.free(unsafe.Pointer(cName))
ret := C.Window_SelectROIs(cName, img.p)
defer C.Rects_Close(ret)
return toRectangles(ret)
}
// WaitKey that is not attached to a specific Window.
// Only use when no Window exists in your application, e.g. command line app.
//
func WaitKey(delay int) int {
return int(C.Window_WaitKey(C.int(delay)))
}
// Trackbar is a wrapper around OpenCV's "HighGUI" window Trackbars.
type Trackbar struct {
name string
parent *Window
}
// CreateTrackbar creates a trackbar and attaches it to the specified window.
//
// For further details, please see:
// https://docs.opencv.org/master/d7/dfc/group__highgui.html#gaf78d2155d30b728fc413803745b67a9b
//
func (w *Window) CreateTrackbar(name string, max int) *Trackbar {
cName := C.CString(w.name)
defer C.free(unsafe.Pointer(cName))
tName := C.CString(name)
defer C.free(unsafe.Pointer(tName))
C.Trackbar_Create(cName, tName, C.int(max))
return &Trackbar{name: name, parent: w}
}
// CreateTrackbarWithValue works like CreateTrackbar but also assigns a
// variable value to be a position synchronized with the trackbar.
//
// For further details, please see:
// https://docs.opencv.org/master/d7/dfc/group__highgui.html#gaf78d2155d30b728fc413803745b67a9b
//
func (w *Window) CreateTrackbarWithValue(name string, value *int, max int) *Trackbar {
cName := C.CString(w.name)
defer C.free(unsafe.Pointer(cName))
tName := C.CString(name)
defer C.free(unsafe.Pointer(tName))
C.Trackbar_CreateWithValue(cName, tName, (*C.int)(unsafe.Pointer(value)), C.int(max))
return &Trackbar{name: name, parent: w}
}
// GetPos returns the trackbar position.
//
// For further details, please see:
// https://docs.opencv.org/master/d7/dfc/group__highgui.html#ga122632e9e91b9ec06943472c55d9cda8
//
func (t *Trackbar) GetPos() int {
cName := C.CString(t.parent.name)
defer C.free(unsafe.Pointer(cName))
tName := C.CString(t.name)
defer C.free(unsafe.Pointer(tName))
return int(C.Trackbar_GetPos(cName, tName))
}
// SetPos sets the trackbar position.
//
// For further details, please see:
// https://docs.opencv.org/master/d7/dfc/group__highgui.html#ga67d73c4c9430f13481fd58410d01bd8d
//
func (t *Trackbar) SetPos(pos int) {
cName := C.CString(t.parent.name)
defer C.free(unsafe.Pointer(cName))
tName := C.CString(t.name)
defer C.free(unsafe.Pointer(tName))
C.Trackbar_SetPos(cName, tName, C.int(pos))
}
// SetMin sets the trackbar minimum position.
//
// For further details, please see:
// https://docs.opencv.org/master/d7/dfc/group__highgui.html#gabe26ffe8d2b60cc678895595a581b7aa
//
func (t *Trackbar) SetMin(pos int) {
cName := C.CString(t.parent.name)
defer C.free(unsafe.Pointer(cName))
tName := C.CString(t.name)
defer C.free(unsafe.Pointer(tName))
C.Trackbar_SetMin(cName, tName, C.int(pos))
}
// SetMax sets the trackbar maximum position.
//
// For further details, please see:
// https://docs.opencv.org/master/d7/dfc/group__highgui.html#ga7e5437ccba37f1154b65210902fc4480
//
func (t *Trackbar) SetMax(pos int) {
cName := C.CString(t.parent.name)
defer C.free(unsafe.Pointer(cName))
tName := C.CString(t.name)
defer C.free(unsafe.Pointer(tName))
C.Trackbar_SetMax(cName, tName, C.int(pos))
}

36
vendor/gocv.io/x/gocv/highgui_gocv.h generated vendored Normal file
View File

@ -0,0 +1,36 @@
#ifndef _OPENCV3_HIGHGUI_H_
#define _OPENCV3_HIGHGUI_H_
#ifdef __cplusplus
#include <opencv2/opencv.hpp>
extern "C" {
#endif
#include "core.h"
// Window
void Window_New(const char* winname, int flags);
void Window_Close(const char* winname);
void Window_IMShow(const char* winname, Mat mat);
double Window_GetProperty(const char* winname, int flag);
void Window_SetProperty(const char* winname, int flag, double value);
void Window_SetTitle(const char* winname, const char* title);
int Window_WaitKey(int);
void Window_Move(const char* winname, int x, int y);
void Window_Resize(const char* winname, int width, int height);
struct Rect Window_SelectROI(const char* winname, Mat img);
struct Rects Window_SelectROIs(const char* winname, Mat img);
// Trackbar
void Trackbar_Create(const char* winname, const char* trackname, int max);
void Trackbar_CreateWithValue(const char* winname, const char* trackname, int* value, int max);
int Trackbar_GetPos(const char* winname, const char* trackname);
void Trackbar_SetPos(const char* winname, const char* trackname, int pos);
void Trackbar_SetMin(const char* winname, const char* trackname, int pos);
void Trackbar_SetMax(const char* winname, const char* trackname, int pos);
#ifdef __cplusplus
}
#endif
#endif //_OPENCV3_HIGHGUI_H_

35
vendor/gocv.io/x/gocv/highgui_string.go generated vendored Normal file
View File

@ -0,0 +1,35 @@
package gocv
/*
#include <stdlib.h>
#include "highgui_gocv.h"
*/
import "C"
func (c WindowFlag) String() string {
switch c {
case WindowNormal:
return "window-normal"
case WindowFullscreen:
return "window-fullscreen"
case WindowFreeRatio:
return "window-free-ratio"
}
return ""
}
func (c WindowPropertyFlag) String() string {
switch c {
case WindowPropertyFullscreen:
return "window-property-fullscreen"
case WindowPropertyAutosize:
return "window-property-autosize"
case WindowPropertyAspectRatio:
return "window-property-aspect-ratio"
case WindowPropertyOpenGL:
return "window-property-opengl"
case WindowPropertyVisible:
return "window-property-visible"
}
return ""
}

44
vendor/gocv.io/x/gocv/imgcodecs.cpp generated vendored Normal file
View File

@ -0,0 +1,44 @@
#include "imgcodecs.h"
// Image
Mat Image_IMRead(const char* filename, int flags) {
cv::Mat img = cv::imread(filename, flags);
return new cv::Mat(img);
}
bool Image_IMWrite(const char* filename, Mat img) {
return cv::imwrite(filename, *img);
}
bool Image_IMWrite_WithParams(const char* filename, Mat img, IntVector params) {
std::vector<int> compression_params;
for (int i = 0, *v = params.val; i < params.length; ++v, ++i) {
compression_params.push_back(*v);
}
return cv::imwrite(filename, *img, compression_params);
}
void Image_IMEncode(const char* fileExt, Mat img, void* vector) {
auto vectorPtr = reinterpret_cast<std::vector<uchar> *>(vector);
cv::imencode(fileExt, *img, *vectorPtr);
}
void Image_IMEncode_WithParams(const char* fileExt, Mat img, IntVector params, void* vector) {
auto vectorPtr = reinterpret_cast<std::vector<uchar> *>(vector);
std::vector<int> compression_params;
for (int i = 0, *v = params.val; i < params.length; ++v, ++i) {
compression_params.push_back(*v);
}
cv::imencode(fileExt, *img, *vectorPtr, compression_params);
}
Mat Image_IMDecode(ByteArray buf, int flags) {
std::vector<uchar> data(buf.data, buf.data + buf.length);
cv::Mat img = cv::imdecode(data, flags);
return new cv::Mat(img);
}

252
vendor/gocv.io/x/gocv/imgcodecs.go generated vendored Normal file
View File

@ -0,0 +1,252 @@
package gocv
/*
#include <stdlib.h>
#include "imgcodecs.h"
*/
import "C"
import (
"unsafe"
)
// IMReadFlag is one of the valid flags to use for the IMRead function.
type IMReadFlag int
const (
// IMReadUnchanged return the loaded image as is (with alpha channel,
// otherwise it gets cropped).
IMReadUnchanged IMReadFlag = -1
// IMReadGrayScale always convert image to the single channel
// grayscale image.
IMReadGrayScale IMReadFlag = 0
// IMReadColor always converts image to the 3 channel BGR color image.
IMReadColor IMReadFlag = 1
// IMReadAnyDepth returns 16-bit/32-bit image when the input has the corresponding
// depth, otherwise convert it to 8-bit.
IMReadAnyDepth IMReadFlag = 2
// IMReadAnyColor the image is read in any possible color format.
IMReadAnyColor IMReadFlag = 4
// IMReadLoadGDAL uses the gdal driver for loading the image.
IMReadLoadGDAL IMReadFlag = 8
// IMReadReducedGrayscale2 always converts image to the single channel grayscale image
// and the image size reduced 1/2.
IMReadReducedGrayscale2 IMReadFlag = 16
// IMReadReducedColor2 always converts image to the 3 channel BGR color image and the
// image size reduced 1/2.
IMReadReducedColor2 IMReadFlag = 17
// IMReadReducedGrayscale4 always converts image to the single channel grayscale image and
// the image size reduced 1/4.
IMReadReducedGrayscale4 IMReadFlag = 32
// IMReadReducedColor4 always converts image to the 3 channel BGR color image and
// the image size reduced 1/4.
IMReadReducedColor4 IMReadFlag = 33
// IMReadReducedGrayscale8 always convert image to the single channel grayscale image and
// the image size reduced 1/8.
IMReadReducedGrayscale8 IMReadFlag = 64
// IMReadReducedColor8 always convert image to the 3 channel BGR color image and the
// image size reduced 1/8.
IMReadReducedColor8 IMReadFlag = 65
// IMReadIgnoreOrientation do not rotate the image according to EXIF's orientation flag.
IMReadIgnoreOrientation IMReadFlag = 128
)
// TODO: Define IMWriteFlag type?
const (
//IMWriteJpegQuality is the quality from 0 to 100 for JPEG (the higher is the better). Default value is 95.
IMWriteJpegQuality = 1
// IMWriteJpegProgressive enables JPEG progressive feature, 0 or 1, default is False.
IMWriteJpegProgressive = 2
// IMWriteJpegOptimize enables JPEG optimization, 0 or 1, default is False.
IMWriteJpegOptimize = 3
// IMWriteJpegRstInterval is the JPEG restart interval, 0 - 65535, default is 0 - no restart.
IMWriteJpegRstInterval = 4
// IMWriteJpegLumaQuality separates luma quality level, 0 - 100, default is 0 - don't use.
IMWriteJpegLumaQuality = 5
// IMWriteJpegChromaQuality separates chroma quality level, 0 - 100, default is 0 - don't use.
IMWriteJpegChromaQuality = 6
// IMWritePngCompression is the compression level from 0 to 9 for PNG. A
// higher value means a smaller size and longer compression time.
// If specified, strategy is changed to IMWRITE_PNG_STRATEGY_DEFAULT (Z_DEFAULT_STRATEGY).
// Default value is 1 (best speed setting).
IMWritePngCompression = 16
// IMWritePngStrategy is one of cv::IMWritePNGFlags, default is IMWRITE_PNG_STRATEGY_RLE.
IMWritePngStrategy = 17
// IMWritePngBilevel is the binary level PNG, 0 or 1, default is 0.
IMWritePngBilevel = 18
// IMWritePxmBinary for PPM, PGM, or PBM can be a binary format flag, 0 or 1. Default value is 1.
IMWritePxmBinary = 32
// IMWriteWebpQuality is the quality from 1 to 100 for WEBP (the higher is
// the better). By default (without any parameter) and for quality above
// 100 the lossless compression is used.
IMWriteWebpQuality = 64
// IMWritePamTupletype sets the TUPLETYPE field to the corresponding string
// value that is defined for the format.
IMWritePamTupletype = 128
// IMWritePngStrategyDefault is the value to use for normal data.
IMWritePngStrategyDefault = 0
// IMWritePngStrategyFiltered is the value to use for data produced by a
// filter (or predictor). Filtered data consists mostly of small values
// with a somewhat random distribution. In this case, the compression
// algorithm is tuned to compress them better.
IMWritePngStrategyFiltered = 1
// IMWritePngStrategyHuffmanOnly forces Huffman encoding only (no string match).
IMWritePngStrategyHuffmanOnly = 2
// IMWritePngStrategyRle is the value to use to limit match distances to
// one (run-length encoding).
IMWritePngStrategyRle = 3
// IMWritePngStrategyFixed is the value to prevent the use of dynamic
// Huffman codes, allowing for a simpler decoder for special applications.
IMWritePngStrategyFixed = 4
)
// IMRead reads an image from a file into a Mat.
// The flags param is one of the IMReadFlag flags.
// If the image cannot be read (because of missing file, improper permissions,
// unsupported or invalid format), the function returns an empty Mat.
//
// For further details, please see:
// http://docs.opencv.org/master/d4/da8/group__imgcodecs.html#ga288b8b3da0892bd651fce07b3bbd3a56
//
func IMRead(name string, flags IMReadFlag) Mat {
cName := C.CString(name)
defer C.free(unsafe.Pointer(cName))
return newMat(C.Image_IMRead(cName, C.int(flags)))
}
// IMWrite writes a Mat to an image file.
//
// For further details, please see:
// http://docs.opencv.org/master/d4/da8/group__imgcodecs.html#gabbc7ef1aa2edfaa87772f1202d67e0ce
//
func IMWrite(name string, img Mat) bool {
cName := C.CString(name)
defer C.free(unsafe.Pointer(cName))
return bool(C.Image_IMWrite(cName, img.p))
}
// IMWriteWithParams writes a Mat to an image file. With that func you can
// pass compression parameters.
//
// For further details, please see:
// http://docs.opencv.org/master/d4/da8/group__imgcodecs.html#gabbc7ef1aa2edfaa87772f1202d67e0ce
//
func IMWriteWithParams(name string, img Mat, params []int) bool {
cName := C.CString(name)
defer C.free(unsafe.Pointer(cName))
cparams := []C.int{}
for _, v := range params {
cparams = append(cparams, C.int(v))
}
paramsVector := C.struct_IntVector{}
paramsVector.val = (*C.int)(&cparams[0])
paramsVector.length = (C.int)(len(cparams))
return bool(C.Image_IMWrite_WithParams(cName, img.p, paramsVector))
}
// FileExt represents a file extension.
type FileExt string
const (
// PNGFileExt is the file extension for PNG.
PNGFileExt FileExt = ".png"
// JPEGFileExt is the file extension for JPEG.
JPEGFileExt FileExt = ".jpg"
// GIFFileExt is the file extension for GIF.
GIFFileExt FileExt = ".gif"
)
// IMEncode encodes an image Mat into a memory buffer.
// This function compresses the image and stores it in the returned memory buffer,
// using the image format passed in in the form of a file extension string.
//
// For further details, please see:
// http://docs.opencv.org/master/d4/da8/group__imgcodecs.html#ga461f9ac09887e47797a54567df3b8b63
//
func IMEncode(fileExt FileExt, img Mat) (buf *NativeByteBuffer, err error) {
cfileExt := C.CString(string(fileExt))
defer C.free(unsafe.Pointer(cfileExt))
buffer := newNativeByteBuffer()
C.Image_IMEncode(cfileExt, img.Ptr(), buffer.nativePointer())
return buffer, nil
}
// IMEncodeWithParams encodes an image Mat into a memory buffer.
// This function compresses the image and stores it in the returned memory buffer,
// using the image format passed in in the form of a file extension string.
//
// Usage example:
// buffer, err := gocv.IMEncodeWithParams(gocv.JPEGFileExt, img, []int{gocv.IMWriteJpegQuality, quality})
//
// For further details, please see:
// http://docs.opencv.org/master/d4/da8/group__imgcodecs.html#ga461f9ac09887e47797a54567df3b8b63
//
func IMEncodeWithParams(fileExt FileExt, img Mat, params []int) (buf *NativeByteBuffer, err error) {
cfileExt := C.CString(string(fileExt))
defer C.free(unsafe.Pointer(cfileExt))
cparams := []C.int{}
for _, v := range params {
cparams = append(cparams, C.int(v))
}
paramsVector := C.struct_IntVector{}
paramsVector.val = (*C.int)(&cparams[0])
paramsVector.length = (C.int)(len(cparams))
b := newNativeByteBuffer()
C.Image_IMEncode_WithParams(cfileExt, img.Ptr(), paramsVector, b.nativePointer())
return b, nil
}
// IMDecode reads an image from a buffer in memory.
// The function IMDecode reads an image from the specified buffer in memory.
// If the buffer is too short or contains invalid data, the function
// returns an empty matrix.
//
// For further details, please see:
// https://docs.opencv.org/master/d4/da8/group__imgcodecs.html#ga26a67788faa58ade337f8d28ba0eb19e
//
func IMDecode(buf []byte, flags IMReadFlag) (Mat, error) {
data, err := toByteArray(buf)
if err != nil {
return Mat{}, err
}
return newMat(C.Image_IMDecode(*data, C.int(flags))), nil
}

25
vendor/gocv.io/x/gocv/imgcodecs.h generated vendored Normal file
View File

@ -0,0 +1,25 @@
#ifndef _OPENCV3_IMGCODECS_H_
#define _OPENCV3_IMGCODECS_H_
#include <stdbool.h>
#ifdef __cplusplus
#include <opencv2/opencv.hpp>
extern "C" {
#endif
#include "core.h"
Mat Image_IMRead(const char* filename, int flags);
bool Image_IMWrite(const char* filename, Mat img);
bool Image_IMWrite_WithParams(const char* filename, Mat img, IntVector params);
void Image_IMEncode(const char* fileExt, Mat img, void* vector);
void Image_IMEncode_WithParams(const char* fileExt, Mat img, IntVector params, void* vector);
Mat Image_IMDecode(ByteArray buf, int flags);
#ifdef __cplusplus
}
#endif
#endif //_OPENCV3_IMGCODECS_H_

671
vendor/gocv.io/x/gocv/imgproc.cpp generated vendored Normal file
View File

@ -0,0 +1,671 @@
#include "imgproc.h"
double ArcLength(PointVector curve, bool is_closed) {
return cv::arcLength(*curve, is_closed);
}
PointVector ApproxPolyDP(PointVector curve, double epsilon, bool closed) {
PointVector approxCurvePts = new std::vector<cv::Point>;
cv::approxPolyDP(*curve, *approxCurvePts, epsilon, closed);
return approxCurvePts;
}
void CvtColor(Mat src, Mat dst, int code) {
cv::cvtColor(*src, *dst, code);
}
void EqualizeHist(Mat src, Mat dst) {
cv::equalizeHist(*src, *dst);
}
void CalcHist(struct Mats mats, IntVector chans, Mat mask, Mat hist, IntVector sz, FloatVector rng, bool acc) {
std::vector<cv::Mat> images;
for (int i = 0; i < mats.length; ++i) {
images.push_back(*mats.mats[i]);
}
std::vector<int> channels;
for (int i = 0, *v = chans.val; i < chans.length; ++v, ++i) {
channels.push_back(*v);
}
std::vector<int> histSize;
for (int i = 0, *v = sz.val; i < sz.length; ++v, ++i) {
histSize.push_back(*v);
}
std::vector<float> ranges;
float* f;
int i;
for (i = 0, f = rng.val; i < rng.length; ++f, ++i) {
ranges.push_back(*f);
}
cv::calcHist(images, channels, *mask, *hist, histSize, ranges, acc);
}
void CalcBackProject(struct Mats mats, IntVector chans, Mat hist, Mat backProject, FloatVector rng, bool uniform){
std::vector<cv::Mat> images;
for (int i = 0; i < mats.length; ++i) {
images.push_back(*mats.mats[i]);
}
std::vector<int> channels;
for (int i = 0, *v = chans.val; i < chans.length; ++v, ++i) {
channels.push_back(*v);
}
std::vector<float> ranges;
float* f;
int i;
for (i = 0, f = rng.val; i < rng.length; ++f, ++i) {
ranges.push_back(*f);
}
cv::calcBackProject(images, channels, *hist, *backProject, ranges, uniform);
}
double CompareHist(Mat hist1, Mat hist2, int method) {
return cv::compareHist(*hist1, *hist2, method);
}
struct RotatedRect FitEllipse(PointVector pts)
{
cv::RotatedRect bRect = cv::fitEllipse(*pts);
Rect r = {bRect.boundingRect().x, bRect.boundingRect().y, bRect.boundingRect().width, bRect.boundingRect().height};
Point centrpt = {int(lroundf(bRect.center.x)), int(lroundf(bRect.center.y))};
Size szsz = {int(lroundf(bRect.size.width)), int(lroundf(bRect.size.height))};
cv::Point2f* pts4 = new cv::Point2f[4];
bRect.points(pts4);
Point* rpts = new Point[4];
for (size_t j = 0; j < 4; j++) {
Point pt = {int(lroundf(pts4[j].x)), int(lroundf(pts4[j].y))};
rpts[j] = pt;
}
delete[] pts4;
RotatedRect rotRect = {Points{rpts, 4}, r, centrpt, szsz, bRect.angle};
return rotRect;
}
void ConvexHull(PointVector points, Mat hull, bool clockwise, bool returnPoints) {
cv::convexHull(*points, *hull, clockwise, returnPoints);
}
void ConvexityDefects(PointVector points, Mat hull, Mat result) {
cv::convexityDefects(*points, *hull, *result);
}
void BilateralFilter(Mat src, Mat dst, int d, double sc, double ss) {
cv::bilateralFilter(*src, *dst, d, sc, ss);
}
void Blur(Mat src, Mat dst, Size ps) {
cv::Size sz(ps.width, ps.height);
cv::blur(*src, *dst, sz);
}
void BoxFilter(Mat src, Mat dst, int ddepth, Size ps) {
cv::Size sz(ps.width, ps.height);
cv::boxFilter(*src, *dst, ddepth, sz);
}
void SqBoxFilter(Mat src, Mat dst, int ddepth, Size ps) {
cv::Size sz(ps.width, ps.height);
cv::sqrBoxFilter(*src, *dst, ddepth, sz);
}
void Dilate(Mat src, Mat dst, Mat kernel) {
cv::dilate(*src, *dst, *kernel);
}
void DilateWithParams(Mat src, Mat dst, Mat kernel, Point anchor, int iterations, int borderType, Scalar borderValue) {
cv::Point pt1(anchor.x, anchor.y);
cv::Scalar c = cv::Scalar(borderValue.val1, borderValue.val2, borderValue.val3, borderValue.val4);
cv::dilate(*src, *dst, *kernel, pt1, iterations, borderType, c);
}
void DistanceTransform(Mat src, Mat dst, Mat labels, int distanceType, int maskSize, int labelType) {
cv::distanceTransform(*src, *dst, *labels, distanceType, maskSize, labelType);
}
void Erode(Mat src, Mat dst, Mat kernel) {
cv::erode(*src, *dst, *kernel);
}
void ErodeWithParams(Mat src, Mat dst, Mat kernel, Point anchor, int iterations, int borderType) {
cv::Point pt1(anchor.x, anchor.y);
cv::erode(*src, *dst, *kernel, pt1, iterations, borderType, cv::morphologyDefaultBorderValue());
}
void MatchTemplate(Mat image, Mat templ, Mat result, int method, Mat mask) {
cv::matchTemplate(*image, *templ, *result, method, *mask);
}
struct Moment Moments(Mat src, bool binaryImage) {
cv::Moments m = cv::moments(*src, binaryImage);
Moment mom = {m.m00, m.m10, m.m01, m.m20, m.m11, m.m02, m.m30, m.m21, m.m12, m.m03,
m.mu20, m.mu11, m.mu02, m.mu30, m.mu21, m.mu12, m.mu03,
m.nu20, m.nu11, m.nu02, m.nu30, m.nu21, m.nu12, m.nu03
};
return mom;
}
void PyrDown(Mat src, Mat dst, Size size, int borderType) {
cv::Size cvSize(size.width, size.height);
cv::pyrDown(*src, *dst, cvSize, borderType);
}
void PyrUp(Mat src, Mat dst, Size size, int borderType) {
cv::Size cvSize(size.width, size.height);
cv::pyrUp(*src, *dst, cvSize, borderType);
}
struct Rect BoundingRect(PointVector pts) {
cv::Rect bRect = cv::boundingRect(*pts);
Rect r = {bRect.x, bRect.y, bRect.width, bRect.height};
return r;
}
void BoxPoints(RotatedRect rect, Mat boxPts){
cv::Point2f centerPt(rect.center.x , rect.center.y);
cv::Size2f rSize(rect.size.width, rect.size.height);
cv::RotatedRect rotatedRectangle(centerPt, rSize, rect.angle);
cv::boxPoints(rotatedRectangle, *boxPts);
}
double ContourArea(PointVector pts) {
return cv::contourArea(*pts);
}
struct RotatedRect MinAreaRect(PointVector pts){
cv::RotatedRect cvrect = cv::minAreaRect(*pts);
Point* rpts = new Point[4];
cv::Point2f* pts4 = new cv::Point2f[4];
cvrect.points(pts4);
for (size_t j = 0; j < 4; j++) {
Point pt = {int(lroundf(pts4[j].x)), int(lroundf(pts4[j].y))};
rpts[j] = pt;
}
delete[] pts4;
cv::Rect bRect = cvrect.boundingRect();
Rect r = {bRect.x, bRect.y, bRect.width, bRect.height};
Point centrpt = {int(lroundf(cvrect.center.x)), int(lroundf(cvrect.center.y))};
Size szsz = {int(lroundf(cvrect.size.width)), int(lroundf(cvrect.size.height))};
RotatedRect retrect = {(Contour){rpts, 4}, r, centrpt, szsz, cvrect.angle};
return retrect;
}
void MinEnclosingCircle(PointVector pts, Point2f* center, float* radius){
cv::Point2f center2f;
cv::minEnclosingCircle(*pts, center2f, *radius);
center->x = center2f.x;
center->y = center2f.y;
}
PointsVector FindContours(Mat src, Mat hierarchy, int mode, int method) {
PointsVector contours = new std::vector<std::vector<cv::Point> >;
cv::findContours(*src, *contours, *hierarchy, mode, method);
return contours;
}
double PointPolygonTest(PointVector pts, Point pt, bool measureDist) {
cv::Point2f pt1(pt.x, pt.y);
return cv::pointPolygonTest(*pts, pt1, measureDist);
}
int ConnectedComponents(Mat src, Mat labels, int connectivity, int ltype, int ccltype){
return cv::connectedComponents(*src, *labels, connectivity, ltype, ccltype);
}
int ConnectedComponentsWithStats(Mat src, Mat labels, Mat stats, Mat centroids,
int connectivity, int ltype, int ccltype){
return cv::connectedComponentsWithStats(*src, *labels, *stats, *centroids, connectivity, ltype, ccltype);
}
Mat GetStructuringElement(int shape, Size ksize) {
cv::Size sz(ksize.width, ksize.height);
return new cv::Mat(cv::getStructuringElement(shape, sz));
}
Scalar MorphologyDefaultBorderValue(){
cv::Scalar cs = cv::morphologyDefaultBorderValue();
return (Scalar){cs[0],cs[1],cs[2],cs[3]};
}
void MorphologyEx(Mat src, Mat dst, int op, Mat kernel) {
cv::morphologyEx(*src, *dst, op, *kernel);
}
void MorphologyExWithParams(Mat src, Mat dst, int op, Mat kernel, Point pt, int iterations, int borderType) {
cv::Point pt1(pt.x, pt.y);
cv::morphologyEx(*src, *dst, op, *kernel, pt1, iterations, borderType);
}
void GaussianBlur(Mat src, Mat dst, Size ps, double sX, double sY, int bt) {
cv::Size sz(ps.width, ps.height);
cv::GaussianBlur(*src, *dst, sz, sX, sY, bt);
}
Mat GetGaussianKernel(int ksize, double sigma, int ktype){
return new cv::Mat(cv::getGaussianKernel(ksize, sigma, ktype));
}
void Laplacian(Mat src, Mat dst, int dDepth, int kSize, double scale, double delta,
int borderType) {
cv::Laplacian(*src, *dst, dDepth, kSize, scale, delta, borderType);
}
void Scharr(Mat src, Mat dst, int dDepth, int dx, int dy, double scale, double delta,
int borderType) {
cv::Scharr(*src, *dst, dDepth, dx, dy, scale, delta, borderType);
}
void MedianBlur(Mat src, Mat dst, int ksize) {
cv::medianBlur(*src, *dst, ksize);
}
void Canny(Mat src, Mat edges, double t1, double t2) {
cv::Canny(*src, *edges, t1, t2);
}
void CornerSubPix(Mat img, Mat corners, Size winSize, Size zeroZone, TermCriteria criteria) {
cv::Size wsz(winSize.width, winSize.height);
cv::Size zsz(zeroZone.width, zeroZone.height);
cv::cornerSubPix(*img, *corners, wsz, zsz, *criteria);
}
void GoodFeaturesToTrack(Mat img, Mat corners, int maxCorners, double quality, double minDist) {
cv::goodFeaturesToTrack(*img, *corners, maxCorners, quality, minDist);
}
void GrabCut(Mat img, Mat mask, Rect r, Mat bgdModel, Mat fgdModel, int iterCount, int mode) {
cv::Rect cvRect = cv::Rect(r.x, r.y, r.width, r.height);
cv::grabCut(*img, *mask, cvRect, *bgdModel, *fgdModel, iterCount, mode);
}
void HoughCircles(Mat src, Mat circles, int method, double dp, double minDist) {
cv::HoughCircles(*src, *circles, method, dp, minDist);
}
void HoughCirclesWithParams(Mat src, Mat circles, int method, double dp, double minDist,
double param1, double param2, int minRadius, int maxRadius) {
cv::HoughCircles(*src, *circles, method, dp, minDist, param1, param2, minRadius, maxRadius);
}
void HoughLines(Mat src, Mat lines, double rho, double theta, int threshold) {
cv::HoughLines(*src, *lines, rho, theta, threshold);
}
void HoughLinesP(Mat src, Mat lines, double rho, double theta, int threshold) {
cv::HoughLinesP(*src, *lines, rho, theta, threshold);
}
void HoughLinesPWithParams(Mat src, Mat lines, double rho, double theta, int threshold, double minLineLength, double maxLineGap) {
cv::HoughLinesP(*src, *lines, rho, theta, threshold, minLineLength, maxLineGap);
}
void HoughLinesPointSet(Mat points, Mat lines, int linesMax, int threshold,
double minRho, double maxRho, double rhoStep,
double minTheta, double maxTheta, double thetaStep) {
cv::HoughLinesPointSet(*points, *lines, linesMax, threshold,
minRho, maxRho, rhoStep, minTheta, maxTheta, thetaStep );
}
void Integral(Mat src, Mat sum, Mat sqsum, Mat tilted) {
cv::integral(*src, *sum, *sqsum, *tilted);
}
double Threshold(Mat src, Mat dst, double thresh, double maxvalue, int typ) {
return cv::threshold(*src, *dst, thresh, maxvalue, typ);
}
void AdaptiveThreshold(Mat src, Mat dst, double maxValue, int adaptiveMethod, int thresholdType,
int blockSize, double c) {
cv::adaptiveThreshold(*src, *dst, maxValue, adaptiveMethod, thresholdType, blockSize, c);
}
void ArrowedLine(Mat img, Point pt1, Point pt2, Scalar color, int thickness) {
cv::Point p1(pt1.x, pt1.y);
cv::Point p2(pt2.x, pt2.y);
cv::Scalar c = cv::Scalar(color.val1, color.val2, color.val3, color.val4);
cv::arrowedLine(*img, p1, p2, c, thickness);
}
bool ClipLine(Size imgSize, Point pt1, Point pt2) {
cv::Size sz(imgSize.width, imgSize.height);
cv::Point p1(pt1.x, pt1.y);
cv::Point p2(pt2.x, pt2.y);
return cv::clipLine(sz, p1, p2);
}
void Circle(Mat img, Point center, int radius, Scalar color, int thickness) {
cv::Point p1(center.x, center.y);
cv::Scalar c = cv::Scalar(color.val1, color.val2, color.val3, color.val4);
cv::circle(*img, p1, radius, c, thickness);
}
void CircleWithParams(Mat img, Point center, int radius, Scalar color, int thickness, int lineType, int shift) {
cv::Point p1(center.x, center.y);
cv::Scalar c = cv::Scalar(color.val1, color.val2, color.val3, color.val4);
cv::circle(*img, p1, radius, c, thickness, lineType, shift);
}
void Ellipse(Mat img, Point center, Point axes, double angle, double
startAngle, double endAngle, Scalar color, int thickness) {
cv::Point p1(center.x, center.y);
cv::Point p2(axes.x, axes.y);
cv::Scalar c = cv::Scalar(color.val1, color.val2, color.val3, color.val4);
cv::ellipse(*img, p1, p2, angle, startAngle, endAngle, c, thickness);
}
void EllipseWithParams(Mat img, Point center, Point axes, double angle, double
startAngle, double endAngle, Scalar color, int thickness, int lineType, int shift) {
cv::Point p1(center.x, center.y);
cv::Point p2(axes.x, axes.y);
cv::Scalar c = cv::Scalar(color.val1, color.val2, color.val3, color.val4);
cv::ellipse(*img, p1, p2, angle, startAngle, endAngle, c, thickness, lineType, shift);
}
void Line(Mat img, Point pt1, Point pt2, Scalar color, int thickness) {
cv::Point p1(pt1.x, pt1.y);
cv::Point p2(pt2.x, pt2.y);
cv::Scalar c = cv::Scalar(color.val1, color.val2, color.val3, color.val4);
cv::line(*img, p1, p2, c, thickness);
}
void Rectangle(Mat img, Rect r, Scalar color, int thickness) {
cv::Scalar c = cv::Scalar(color.val1, color.val2, color.val3, color.val4);
cv::rectangle(
*img,
cv::Point(r.x, r.y),
cv::Point(r.x + r.width, r.y + r.height),
c,
thickness,
cv::LINE_AA
);
}
void RectangleWithParams(Mat img, Rect r, Scalar color, int thickness, int lineType, int shift) {
cv::Scalar c = cv::Scalar(color.val1, color.val2, color.val3, color.val4);
cv::rectangle(
*img,
cv::Point(r.x, r.y),
cv::Point(r.x + r.width, r.y + r.height),
c,
thickness,
lineType,
shift
);
}
void FillPoly(Mat img, PointsVector pts, Scalar color) {
cv::Scalar c = cv::Scalar(color.val1, color.val2, color.val3, color.val4);
cv::fillPoly(*img, *pts, c);
}
void FillPolyWithParams(Mat img, PointsVector pts, Scalar color, int lineType, int shift, Point offset) {
cv::Scalar c = cv::Scalar(color.val1, color.val2, color.val3, color.val4);
cv::fillPoly(*img, *pts, c, lineType, shift, cv::Point(offset.x, offset.y));
}
void Polylines(Mat img, PointsVector pts, bool isClosed, Scalar color,int thickness) {
cv::Scalar c = cv::Scalar(color.val1, color.val2, color.val3, color.val4);
cv::polylines(*img, *pts, isClosed, c, thickness);
}
struct Size GetTextSize(const char* text, int fontFace, double fontScale, int thickness) {
return GetTextSizeWithBaseline(text, fontFace, fontScale, thickness, NULL);
}
struct Size GetTextSizeWithBaseline(const char* text, int fontFace, double fontScale, int thickness, int* baesline) {
cv::Size sz = cv::getTextSize(text, fontFace, fontScale, thickness, baesline);
Size size = {sz.width, sz.height};
return size;
}
void PutText(Mat img, const char* text, Point org, int fontFace, double fontScale,
Scalar color, int thickness) {
cv::Point pt(org.x, org.y);
cv::Scalar c = cv::Scalar(color.val1, color.val2, color.val3, color.val4);
cv::putText(*img, text, pt, fontFace, fontScale, c, thickness);
}
void PutTextWithParams(Mat img, const char* text, Point org, int fontFace, double fontScale,
Scalar color, int thickness, int lineType, bool bottomLeftOrigin) {
cv::Point pt(org.x, org.y);
cv::Scalar c = cv::Scalar(color.val1, color.val2, color.val3, color.val4);
cv::putText(*img, text, pt, fontFace, fontScale, c, thickness, lineType, bottomLeftOrigin);
}
void Resize(Mat src, Mat dst, Size dsize, double fx, double fy, int interp) {
cv::Size sz(dsize.width, dsize.height);
cv::resize(*src, *dst, sz, fx, fy, interp);
}
void GetRectSubPix(Mat src, Size patchSize, Point center, Mat dst) {
cv::Size sz(patchSize.width, patchSize.height);
cv::Point pt(center.x, center.y);
cv::getRectSubPix(*src, sz, pt, *dst);
}
Mat GetRotationMatrix2D(Point center, double angle, double scale) {
cv::Point pt(center.x, center.y);
return new cv::Mat(cv::getRotationMatrix2D(pt, angle, scale));
}
void WarpAffine(Mat src, Mat dst, Mat m, Size dsize) {
cv::Size sz(dsize.width, dsize.height);
cv::warpAffine(*src, *dst, *m, sz);
}
void WarpAffineWithParams(Mat src, Mat dst, Mat rot_mat, Size dsize, int flags, int borderMode,
Scalar borderValue) {
cv::Size sz(dsize.width, dsize.height);
cv::Scalar c = cv::Scalar(borderValue.val1, borderValue.val2, borderValue.val3, borderValue.val4);
cv::warpAffine(*src, *dst, *rot_mat, sz, flags, borderMode, c);
}
void WarpPerspective(Mat src, Mat dst, Mat m, Size dsize) {
cv::Size sz(dsize.width, dsize.height);
cv::warpPerspective(*src, *dst, *m, sz);
}
void WarpPerspectiveWithParams(Mat src, Mat dst, Mat rot_mat, Size dsize, int flags, int borderMode,
Scalar borderValue) {
cv::Size sz(dsize.width, dsize.height);
cv::Scalar c = cv::Scalar(borderValue.val1, borderValue.val2, borderValue.val3, borderValue.val4);
cv::warpPerspective(*src, *dst, *rot_mat, sz, flags, borderMode, c);
}
void Watershed(Mat image, Mat markers) {
cv::watershed(*image, *markers);
}
void ApplyColorMap(Mat src, Mat dst, int colormap) {
cv::applyColorMap(*src, *dst, colormap);
}
void ApplyCustomColorMap(Mat src, Mat dst, Mat colormap) {
cv::applyColorMap(*src, *dst, *colormap);
}
Mat GetPerspectiveTransform(PointVector src, PointVector dst) {
std::vector<cv::Point2f> src_pts;
copyPointVectorToPoint2fVector(src, &src_pts);
std::vector<cv::Point2f> dst_pts;
copyPointVectorToPoint2fVector(dst, &dst_pts);
return new cv::Mat(cv::getPerspectiveTransform(src_pts, dst_pts));
}
Mat GetPerspectiveTransform2f(Point2fVector src, Point2fVector dst) {
return new cv::Mat(cv::getPerspectiveTransform(*src, *dst));
}
Mat GetAffineTransform(PointVector src, PointVector dst) {
std::vector<cv::Point2f> src_pts;
copyPointVectorToPoint2fVector(src, &src_pts);
std::vector<cv::Point2f> dst_pts;
copyPointVectorToPoint2fVector(dst, &dst_pts);
return new cv::Mat(cv::getAffineTransform(src_pts, dst_pts));
}
Mat GetAffineTransform2f(Point2fVector src, Point2fVector dst) {
return new cv::Mat(cv::getAffineTransform(*src, *dst));
}
Mat FindHomography(Mat src, Mat dst, int method, double ransacReprojThreshold, Mat mask, const int maxIters, const double confidence) {
return new cv::Mat(cv::findHomography(*src, *dst, method, ransacReprojThreshold, *mask, maxIters, confidence));
}
void DrawContours(Mat src, PointsVector contours, int contourIdx, Scalar color, int thickness) {
cv::Scalar c = cv::Scalar(color.val1, color.val2, color.val3, color.val4);
cv::drawContours(*src, *contours, contourIdx, c, thickness);
}
void DrawContoursWithParams(Mat src, PointsVector contours, int contourIdx, Scalar color, int thickness, int lineType, Mat hierarchy, int maxLevel, Point offset) {
cv::Scalar c = cv::Scalar(color.val1, color.val2, color.val3, color.val4);
cv::Point offsetPt(offset.x, offset.y);
std::vector<cv::Vec4i> vecHierarchy;
if (hierarchy->empty() == 0) {
for (int j = 0; j < hierarchy->cols; ++j) {
vecHierarchy.push_back(hierarchy->at<cv::Vec4i>(0, j));
}
}
cv::drawContours(*src, *contours, contourIdx, c, thickness, lineType, vecHierarchy, maxLevel, offsetPt);
}
void Sobel(Mat src, Mat dst, int ddepth, int dx, int dy, int ksize, double scale, double delta, int borderType) {
cv::Sobel(*src, *dst, ddepth, dx, dy, ksize, scale, delta, borderType);
}
void SpatialGradient(Mat src, Mat dx, Mat dy, int ksize, int borderType) {
cv::spatialGradient(*src, *dx, *dy, ksize, borderType);
}
void Remap(Mat src, Mat dst, Mat map1, Mat map2, int interpolation, int borderMode, Scalar borderValue) {
cv::Scalar c = cv::Scalar(borderValue.val1, borderValue.val2, borderValue.val3, borderValue.val4);
cv::remap(*src, *dst, *map1, *map2, interpolation, borderMode, c);
}
void Filter2D(Mat src, Mat dst, int ddepth, Mat kernel, Point anchor, double delta, int borderType) {
cv::Point anchorPt(anchor.x, anchor.y);
cv::filter2D(*src, *dst, ddepth, *kernel, anchorPt, delta, borderType);
}
void SepFilter2D(Mat src, Mat dst, int ddepth, Mat kernelX, Mat kernelY, Point anchor, double delta, int borderType) {
cv::Point anchorPt(anchor.x, anchor.y);
cv::sepFilter2D(*src, *dst, ddepth, *kernelX, *kernelY, anchorPt, delta, borderType);
}
void LogPolar(Mat src, Mat dst, Point center, double m, int flags) {
cv::Point2f centerPt(center.x, center.y);
cv::logPolar(*src, *dst, centerPt, m, flags);
}
void FitLine(PointVector pts, Mat line, int distType, double param, double reps, double aeps) {
cv::fitLine(*pts, *line, distType, param, reps, aeps);
}
void LinearPolar(Mat src, Mat dst, Point center, double maxRadius, int flags) {
cv::Point2f centerPt(center.x, center.y);
cv::linearPolar(*src, *dst, centerPt, maxRadius, flags);
}
CLAHE CLAHE_Create() {
return new cv::Ptr<cv::CLAHE>(cv::createCLAHE());
}
CLAHE CLAHE_CreateWithParams(double clipLimit, Size tileGridSize) {
cv::Size sz(tileGridSize.width, tileGridSize.height);
return new cv::Ptr<cv::CLAHE>(cv::createCLAHE(clipLimit, sz));
}
void CLAHE_Close(CLAHE c) {
delete c;
}
void CLAHE_Apply(CLAHE c, Mat src, Mat dst) {
(*c)->apply(*src, *dst);
}
void InvertAffineTransform(Mat src, Mat dst) {
cv::invertAffineTransform(*src, *dst);
}
Point2f PhaseCorrelate(Mat src1, Mat src2, Mat window, double* response) {
cv::Point2d result = cv::phaseCorrelate(*src1, *src2, *window, response);
Point2f result2f = {
.x = float(result.x),
.y = float(result.y),
};
return result2f;
}
void Mat_Accumulate(Mat src, Mat dst) {
cv::accumulate(*src, *dst);
}
void Mat_AccumulateWithMask(Mat src, Mat dst, Mat mask) {
cv::accumulate(*src, *dst, *mask);
}
void Mat_AccumulateSquare(Mat src, Mat dst) {
cv::accumulateSquare(*src, *dst);
}
void Mat_AccumulateSquareWithMask(Mat src, Mat dst, Mat mask) {
cv::accumulateSquare(*src, *dst, *mask);
}
void Mat_AccumulateProduct(Mat src1, Mat src2, Mat dst) {
cv::accumulateProduct(*src1, *src2, *dst);
}
void Mat_AccumulateProductWithMask(Mat src1, Mat src2, Mat dst, Mat mask) {
cv::accumulateProduct(*src1, *src2, *dst, *mask);
}
void Mat_AccumulatedWeighted(Mat src, Mat dst, double alpha) {
cv::accumulateWeighted(*src, *dst, alpha);
}
void Mat_AccumulatedWeightedWithMask(Mat src, Mat dst, double alpha, Mat mask) {
cv::accumulateWeighted(*src, *dst, alpha, *mask);
}

2240
vendor/gocv.io/x/gocv/imgproc.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

146
vendor/gocv.io/x/gocv/imgproc.h generated vendored Normal file
View File

@ -0,0 +1,146 @@
#ifndef _OPENCV3_IMGPROC_H_
#define _OPENCV3_IMGPROC_H_
#include <stdbool.h>
#ifdef __cplusplus
#include <opencv2/opencv.hpp>
extern "C" {
#endif
#ifdef __cplusplus
typedef cv::Ptr<cv::CLAHE>* CLAHE;
#else
typedef void* CLAHE;
#endif
#include "core.h"
double ArcLength(PointVector curve, bool is_closed);
PointVector ApproxPolyDP(PointVector curve, double epsilon, bool closed);
void CvtColor(Mat src, Mat dst, int code);
void EqualizeHist(Mat src, Mat dst);
void CalcHist(struct Mats mats, IntVector chans, Mat mask, Mat hist, IntVector sz, FloatVector rng, bool acc);
void CalcBackProject(struct Mats mats, IntVector chans, Mat hist, Mat backProject, FloatVector rng, bool uniform);
double CompareHist(Mat hist1, Mat hist2, int method);
void ConvexHull(PointVector points, Mat hull, bool clockwise, bool returnPoints);
void ConvexityDefects(PointVector points, Mat hull, Mat result);
void BilateralFilter(Mat src, Mat dst, int d, double sc, double ss);
void Blur(Mat src, Mat dst, Size ps);
void BoxFilter(Mat src, Mat dst, int ddepth, Size ps);
void SqBoxFilter(Mat src, Mat dst, int ddepth, Size ps);
void Dilate(Mat src, Mat dst, Mat kernel);
void DilateWithParams(Mat src, Mat dst, Mat kernel, Point anchor, int iterations, int borderType, Scalar borderValue);
void DistanceTransform(Mat src, Mat dst, Mat labels, int distanceType, int maskSize, int labelType);
void Erode(Mat src, Mat dst, Mat kernel);
void ErodeWithParams(Mat src, Mat dst, Mat kernel, Point anchor, int iterations, int borderType);
void MatchTemplate(Mat image, Mat templ, Mat result, int method, Mat mask);
struct Moment Moments(Mat src, bool binaryImage);
void PyrDown(Mat src, Mat dst, Size dstsize, int borderType);
void PyrUp(Mat src, Mat dst, Size dstsize, int borderType);
struct Rect BoundingRect(PointVector pts);
void BoxPoints(RotatedRect rect, Mat boxPts);
double ContourArea(PointVector pts);
struct RotatedRect MinAreaRect(PointVector pts);
struct RotatedRect FitEllipse(PointVector pts);
void MinEnclosingCircle(PointVector pts, Point2f* center, float* radius);
PointsVector FindContours(Mat src, Mat hierarchy, int mode, int method);
double PointPolygonTest(PointVector pts, Point pt, bool measureDist);
int ConnectedComponents(Mat src, Mat dst, int connectivity, int ltype, int ccltype);
int ConnectedComponentsWithStats(Mat src, Mat labels, Mat stats, Mat centroids, int connectivity, int ltype, int ccltype);
void GaussianBlur(Mat src, Mat dst, Size ps, double sX, double sY, int bt);
Mat GetGaussianKernel(int ksize, double sigma, int ktype);
void Laplacian(Mat src, Mat dst, int dDepth, int kSize, double scale, double delta, int borderType);
void Scharr(Mat src, Mat dst, int dDepth, int dx, int dy, double scale, double delta,
int borderType);
Mat GetStructuringElement(int shape, Size ksize);
Scalar MorphologyDefaultBorderValue();
void MorphologyEx(Mat src, Mat dst, int op, Mat kernel);
void MorphologyExWithParams(Mat src, Mat dst, int op, Mat kernel, Point pt, int iterations, int borderType);
void MedianBlur(Mat src, Mat dst, int ksize);
void Canny(Mat src, Mat edges, double t1, double t2);
void CornerSubPix(Mat img, Mat corners, Size winSize, Size zeroZone, TermCriteria criteria);
void GoodFeaturesToTrack(Mat img, Mat corners, int maxCorners, double quality, double minDist);
void GrabCut(Mat img, Mat mask, Rect rect, Mat bgdModel, Mat fgdModel, int iterCount, int mode);
void HoughCircles(Mat src, Mat circles, int method, double dp, double minDist);
void HoughCirclesWithParams(Mat src, Mat circles, int method, double dp, double minDist,
double param1, double param2, int minRadius, int maxRadius);
void HoughLines(Mat src, Mat lines, double rho, double theta, int threshold);
void HoughLinesP(Mat src, Mat lines, double rho, double theta, int threshold);
void HoughLinesPWithParams(Mat src, Mat lines, double rho, double theta, int threshold, double minLineLength, double maxLineGap);
void HoughLinesPointSet(Mat points, Mat lines, int lines_max, int threshold,
double min_rho, double max_rho, double rho_step,
double min_theta, double max_theta, double theta_step);
void Integral(Mat src, Mat sum, Mat sqsum, Mat tilted);
double Threshold(Mat src, Mat dst, double thresh, double maxvalue, int typ);
void AdaptiveThreshold(Mat src, Mat dst, double maxValue, int adaptiveTyp, int typ, int blockSize,
double c);
void ArrowedLine(Mat img, Point pt1, Point pt2, Scalar color, int thickness);
void Circle(Mat img, Point center, int radius, Scalar color, int thickness);
void CircleWithParams(Mat img, Point center, int radius, Scalar color, int thickness, int lineType, int shift);
void Ellipse(Mat img, Point center, Point axes, double angle, double
startAngle, double endAngle, Scalar color, int thickness);
void EllipseWithParams(Mat img, Point center, Point axes, double angle, double
startAngle, double endAngle, Scalar color, int thickness, int lineType, int shift);
void Line(Mat img, Point pt1, Point pt2, Scalar color, int thickness);
void Rectangle(Mat img, Rect rect, Scalar color, int thickness);
void RectangleWithParams(Mat img, Rect rect, Scalar color, int thickness, int lineType, int shift);
void FillPoly(Mat img, PointsVector points, Scalar color);
void FillPolyWithParams(Mat img, PointsVector points, Scalar color, int lineType, int shift, Point offset);
void Polylines(Mat img, PointsVector points, bool isClosed, Scalar color, int thickness);
struct Size GetTextSize(const char* text, int fontFace, double fontScale, int thickness);
struct Size GetTextSizeWithBaseline(const char* text, int fontFace, double fontScale, int thickness, int* baseline);
void PutText(Mat img, const char* text, Point org, int fontFace, double fontScale,
Scalar color, int thickness);
void PutTextWithParams(Mat img, const char* text, Point org, int fontFace, double fontScale,
Scalar color, int thickness, int lineType, bool bottomLeftOrigin);
void Resize(Mat src, Mat dst, Size sz, double fx, double fy, int interp);
void GetRectSubPix(Mat src, Size patchSize, Point center, Mat dst);
Mat GetRotationMatrix2D(Point center, double angle, double scale);
void WarpAffine(Mat src, Mat dst, Mat rot_mat, Size dsize);
void WarpAffineWithParams(Mat src, Mat dst, Mat rot_mat, Size dsize, int flags, int borderMode,
Scalar borderValue);
void WarpPerspective(Mat src, Mat dst, Mat m, Size dsize);
void WarpPerspectiveWithParams(Mat src, Mat dst, Mat rot_mat, Size dsize, int flags, int borderMode,
Scalar borderValue);
void Watershed(Mat image, Mat markers);
void ApplyColorMap(Mat src, Mat dst, int colormap);
void ApplyCustomColorMap(Mat src, Mat dst, Mat colormap);
Mat GetPerspectiveTransform(PointVector src, PointVector dst);
Mat GetPerspectiveTransform2f(Point2fVector src, Point2fVector dst);
Mat GetAffineTransform(PointVector src, PointVector dst);
Mat GetAffineTransform2f(Point2fVector src, Point2fVector dst);
Mat FindHomography(Mat src, Mat dst, int method, double ransacReprojThreshold, Mat mask, const int maxIters, const double confidence) ;
void DrawContours(Mat src, PointsVector contours, int contourIdx, Scalar color, int thickness);
void DrawContoursWithParams(Mat src, PointsVector contours, int contourIdx, Scalar color, int thickness, int lineType, Mat hierarchy, int maxLevel, Point offset);
void Sobel(Mat src, Mat dst, int ddepth, int dx, int dy, int ksize, double scale, double delta, int borderType);
void SpatialGradient(Mat src, Mat dx, Mat dy, int ksize, int borderType);
void Remap(Mat src, Mat dst, Mat map1, Mat map2, int interpolation, int borderMode, Scalar borderValue);
void Filter2D(Mat src, Mat dst, int ddepth, Mat kernel, Point anchor, double delta, int borderType);
void SepFilter2D(Mat src, Mat dst, int ddepth, Mat kernelX, Mat kernelY, Point anchor, double delta, int borderType);
void LogPolar(Mat src, Mat dst, Point center, double m, int flags);
void FitLine(PointVector pts, Mat line, int distType, double param, double reps, double aeps);
void LinearPolar(Mat src, Mat dst, Point center, double maxRadius, int flags);
bool ClipLine(Size imgSize, Point pt1, Point pt2);
CLAHE CLAHE_Create();
CLAHE CLAHE_CreateWithParams(double clipLimit, Size tileGridSize);
void CLAHE_Close(CLAHE c);
void CLAHE_Apply(CLAHE c, Mat src, Mat dst);
void InvertAffineTransform(Mat src, Mat dst);
Point2f PhaseCorrelate(Mat src1, Mat src2, Mat window, double* response);
void Mat_Accumulate(Mat src, Mat dst);
void Mat_AccumulateWithMask(Mat src, Mat dst, Mat mask);
void Mat_AccumulateSquare(Mat src, Mat dst);
void Mat_AccumulateSquareWithMask(Mat src, Mat dst, Mat mask);
void Mat_AccumulateProduct(Mat src1, Mat src2, Mat dst);
void Mat_AccumulateProductWithMask(Mat src1, Mat src2, Mat dst, Mat mask);
void Mat_AccumulatedWeighted(Mat src, Mat dst, double alpha);
void Mat_AccumulatedWeightedWithMask(Mat src, Mat dst, double alpha, Mat mask);
#ifdef __cplusplus
}
#endif
#endif //_OPENCV3_IMGPROC_H_

351
vendor/gocv.io/x/gocv/imgproc_colorcodes.go generated vendored Normal file
View File

@ -0,0 +1,351 @@
package gocv
// ColorConversionCode is a color conversion code used on Mat.
//
// For further details, please see:
// http://docs.opencv.org/master/d7/d1b/group__imgproc__misc.html#ga4e0972be5de079fed4e3a10e24ef5ef0
//
type ColorConversionCode int
const (
// ColorBGRToBGRA adds alpha channel to BGR image.
ColorBGRToBGRA ColorConversionCode = 0
// ColorBGRAToBGR removes alpha channel from BGR image.
ColorBGRAToBGR ColorConversionCode = 1
// ColorBGRToRGBA converts from BGR to RGB with alpha channel.
ColorBGRToRGBA ColorConversionCode = 2
// ColorRGBAToBGR converts from RGB with alpha to BGR color space.
ColorRGBAToBGR ColorConversionCode = 3
// ColorBGRToRGB converts from BGR to RGB without alpha channel.
ColorBGRToRGB ColorConversionCode = 4
// ColorBGRAToRGBA converts from BGR with alpha channel
// to RGB with alpha channel.
ColorBGRAToRGBA ColorConversionCode = 5
// ColorBGRToGray converts from BGR to grayscale.
ColorBGRToGray ColorConversionCode = 6
// ColorRGBToGray converts from RGB to grayscale.
ColorRGBToGray ColorConversionCode = 7
// ColorGrayToBGR converts from grayscale to BGR.
ColorGrayToBGR ColorConversionCode = 8
// ColorGrayToBGRA converts from grayscale to BGR with alpha channel.
ColorGrayToBGRA ColorConversionCode = 9
// ColorBGRAToGray converts from BGR with alpha channel to grayscale.
ColorBGRAToGray ColorConversionCode = 10
// ColorRGBAToGray converts from RGB with alpha channel to grayscale.
ColorRGBAToGray ColorConversionCode = 11
// ColorBGRToBGR565 converts from BGR to BGR565 (16-bit images).
ColorBGRToBGR565 ColorConversionCode = 12
// ColorRGBToBGR565 converts from RGB to BGR565 (16-bit images).
ColorRGBToBGR565 ColorConversionCode = 13
// ColorBGR565ToBGR converts from BGR565 (16-bit images) to BGR.
ColorBGR565ToBGR ColorConversionCode = 14
// ColorBGR565ToRGB converts from BGR565 (16-bit images) to RGB.
ColorBGR565ToRGB ColorConversionCode = 15
// ColorBGRAToBGR565 converts from BGRA (with alpha channel)
// to BGR565 (16-bit images).
ColorBGRAToBGR565 ColorConversionCode = 16
// ColorRGBAToBGR565 converts from RGBA (with alpha channel)
// to BGR565 (16-bit images).
ColorRGBAToBGR565 ColorConversionCode = 17
// ColorBGR565ToBGRA converts from BGR565 (16-bit images)
// to BGRA (with alpha channel).
ColorBGR565ToBGRA ColorConversionCode = 18
// ColorBGR565ToRGBA converts from BGR565 (16-bit images)
// to RGBA (with alpha channel).
ColorBGR565ToRGBA ColorConversionCode = 19
// ColorGrayToBGR565 converts from grayscale
// to BGR565 (16-bit images).
ColorGrayToBGR565 ColorConversionCode = 20
// ColorBGR565ToGray converts from BGR565 (16-bit images)
// to grayscale.
ColorBGR565ToGray ColorConversionCode = 21
// ColorBGRToBGR555 converts from BGR to BGR555 (16-bit images).
ColorBGRToBGR555 ColorConversionCode = 22
// ColorRGBToBGR555 converts from RGB to BGR555 (16-bit images).
ColorRGBToBGR555 ColorConversionCode = 23
// ColorBGR555ToBGR converts from BGR555 (16-bit images) to BGR.
ColorBGR555ToBGR ColorConversionCode = 24
// ColorBGR555ToRGB converts from BGR555 (16-bit images) to RGB.
ColorBGR555ToRGB ColorConversionCode = 25
// ColorBGRAToBGR555 converts from BGRA (with alpha channel)
// to BGR555 (16-bit images).
ColorBGRAToBGR555 ColorConversionCode = 26
// ColorRGBAToBGR555 converts from RGBA (with alpha channel)
// to BGR555 (16-bit images).
ColorRGBAToBGR555 ColorConversionCode = 27
// ColorBGR555ToBGRA converts from BGR555 (16-bit images)
// to BGRA (with alpha channel).
ColorBGR555ToBGRA ColorConversionCode = 28
// ColorBGR555ToRGBA converts from BGR555 (16-bit images)
// to RGBA (with alpha channel).
ColorBGR555ToRGBA ColorConversionCode = 29
// ColorGrayToBGR555 converts from grayscale to BGR555 (16-bit images).
ColorGrayToBGR555 ColorConversionCode = 30
// ColorBGR555ToGRAY converts from BGR555 (16-bit images) to grayscale.
ColorBGR555ToGRAY ColorConversionCode = 31
// ColorBGRToXYZ converts from BGR to CIE XYZ.
ColorBGRToXYZ ColorConversionCode = 32
// ColorRGBToXYZ converts from RGB to CIE XYZ.
ColorRGBToXYZ ColorConversionCode = 33
// ColorXYZToBGR converts from CIE XYZ to BGR.
ColorXYZToBGR ColorConversionCode = 34
// ColorXYZToRGB converts from CIE XYZ to RGB.
ColorXYZToRGB ColorConversionCode = 35
// ColorBGRToYCrCb converts from BGR to luma-chroma (aka YCC).
ColorBGRToYCrCb ColorConversionCode = 36
// ColorRGBToYCrCb converts from RGB to luma-chroma (aka YCC).
ColorRGBToYCrCb ColorConversionCode = 37
// ColorYCrCbToBGR converts from luma-chroma (aka YCC) to BGR.
ColorYCrCbToBGR ColorConversionCode = 38
// ColorYCrCbToRGB converts from luma-chroma (aka YCC) to RGB.
ColorYCrCbToRGB ColorConversionCode = 39
// ColorBGRToHSV converts from BGR to HSV (hue saturation value).
ColorBGRToHSV ColorConversionCode = 40
// ColorRGBToHSV converts from RGB to HSV (hue saturation value).
ColorRGBToHSV ColorConversionCode = 41
// ColorBGRToLab converts from BGR to CIE Lab.
ColorBGRToLab ColorConversionCode = 44
// ColorRGBToLab converts from RGB to CIE Lab.
ColorRGBToLab ColorConversionCode = 45
// ColorBGRToLuv converts from BGR to CIE Luv.
ColorBGRToLuv ColorConversionCode = 50
// ColorRGBToLuv converts from RGB to CIE Luv.
ColorRGBToLuv ColorConversionCode = 51
// ColorBGRToHLS converts from BGR to HLS (hue lightness saturation).
ColorBGRToHLS ColorConversionCode = 52
// ColorRGBToHLS converts from RGB to HLS (hue lightness saturation).
ColorRGBToHLS ColorConversionCode = 53
// ColorHSVToBGR converts from HSV (hue saturation value) to BGR.
ColorHSVToBGR ColorConversionCode = 54
// ColorHSVToRGB converts from HSV (hue saturation value) to RGB.
ColorHSVToRGB ColorConversionCode = 55
// ColorLabToBGR converts from CIE Lab to BGR.
ColorLabToBGR ColorConversionCode = 56
// ColorLabToRGB converts from CIE Lab to RGB.
ColorLabToRGB ColorConversionCode = 57
// ColorLuvToBGR converts from CIE Luv to BGR.
ColorLuvToBGR ColorConversionCode = 58
// ColorLuvToRGB converts from CIE Luv to RGB.
ColorLuvToRGB ColorConversionCode = 59
// ColorHLSToBGR converts from HLS (hue lightness saturation) to BGR.
ColorHLSToBGR ColorConversionCode = 60
// ColorHLSToRGB converts from HLS (hue lightness saturation) to RGB.
ColorHLSToRGB ColorConversionCode = 61
// ColorBGRToHSVFull converts from BGR to HSV (hue saturation value) full.
ColorBGRToHSVFull ColorConversionCode = 66
// ColorRGBToHSVFull converts from RGB to HSV (hue saturation value) full.
ColorRGBToHSVFull ColorConversionCode = 67
// ColorBGRToHLSFull converts from BGR to HLS (hue lightness saturation) full.
ColorBGRToHLSFull ColorConversionCode = 68
// ColorRGBToHLSFull converts from RGB to HLS (hue lightness saturation) full.
ColorRGBToHLSFull ColorConversionCode = 69
// ColorHSVToBGRFull converts from HSV (hue saturation value) to BGR full.
ColorHSVToBGRFull ColorConversionCode = 70
// ColorHSVToRGBFull converts from HSV (hue saturation value) to RGB full.
ColorHSVToRGBFull ColorConversionCode = 71
// ColorHLSToBGRFull converts from HLS (hue lightness saturation) to BGR full.
ColorHLSToBGRFull ColorConversionCode = 72
// ColorHLSToRGBFull converts from HLS (hue lightness saturation) to RGB full.
ColorHLSToRGBFull ColorConversionCode = 73
// ColorLBGRToLab converts from LBGR to CIE Lab.
ColorLBGRToLab ColorConversionCode = 74
// ColorLRGBToLab converts from LRGB to CIE Lab.
ColorLRGBToLab ColorConversionCode = 75
// ColorLBGRToLuv converts from LBGR to CIE Luv.
ColorLBGRToLuv ColorConversionCode = 76
// ColorLRGBToLuv converts from LRGB to CIE Luv.
ColorLRGBToLuv ColorConversionCode = 77
// ColorLabToLBGR converts from CIE Lab to LBGR.
ColorLabToLBGR ColorConversionCode = 78
// ColorLabToLRGB converts from CIE Lab to LRGB.
ColorLabToLRGB ColorConversionCode = 79
// ColorLuvToLBGR converts from CIE Luv to LBGR.
ColorLuvToLBGR ColorConversionCode = 80
// ColorLuvToLRGB converts from CIE Luv to LRGB.
ColorLuvToLRGB ColorConversionCode = 81
// ColorBGRToYUV converts from BGR to YUV.
ColorBGRToYUV ColorConversionCode = 82
// ColorRGBToYUV converts from RGB to YUV.
ColorRGBToYUV ColorConversionCode = 83
// ColorYUVToBGR converts from YUV to BGR.
ColorYUVToBGR ColorConversionCode = 84
// ColorYUVToRGB converts from YUV to RGB.
ColorYUVToRGB ColorConversionCode = 85
// ColorYUVToRGBNV12 converts from YUV 4:2:0 to RGB NV12.
ColorYUVToRGBNV12 ColorConversionCode = 90
// ColorYUVToBGRNV12 converts from YUV 4:2:0 to BGR NV12.
ColorYUVToBGRNV12 ColorConversionCode = 91
// ColorYUVToRGBNV21 converts from YUV 4:2:0 to RGB NV21.
ColorYUVToRGBNV21 ColorConversionCode = 92
// ColorYUVToBGRNV21 converts from YUV 4:2:0 to BGR NV21.
ColorYUVToBGRNV21 ColorConversionCode = 93
// ColorYUVToRGBANV12 converts from YUV 4:2:0 to RGBA NV12.
ColorYUVToRGBANV12 ColorConversionCode = 94
// ColorYUVToBGRANV12 converts from YUV 4:2:0 to BGRA NV12.
ColorYUVToBGRANV12 ColorConversionCode = 95
// ColorYUVToRGBANV21 converts from YUV 4:2:0 to RGBA NV21.
ColorYUVToRGBANV21 ColorConversionCode = 96
// ColorYUVToBGRANV21 converts from YUV 4:2:0 to BGRA NV21.
ColorYUVToBGRANV21 ColorConversionCode = 97
ColorYUVToRGBYV12 ColorConversionCode = 98
ColorYUVToBGRYV12 ColorConversionCode = 99
ColorYUVToRGBIYUV ColorConversionCode = 100
ColorYUVToBGRIYUV ColorConversionCode = 101
ColorYUVToRGBAYV12 ColorConversionCode = 102
ColorYUVToBGRAYV12 ColorConversionCode = 103
ColorYUVToRGBAIYUV ColorConversionCode = 104
ColorYUVToBGRAIYUV ColorConversionCode = 105
ColorYUVToGRAY420 ColorConversionCode = 106
// YUV 4:2:2 family to RGB
ColorYUVToRGBUYVY ColorConversionCode = 107
ColorYUVToBGRUYVY ColorConversionCode = 108
ColorYUVToRGBAUYVY ColorConversionCode = 111
ColorYUVToBGRAUYVY ColorConversionCode = 112
ColorYUVToRGBYUY2 ColorConversionCode = 115
ColorYUVToBGRYUY2 ColorConversionCode = 116
ColorYUVToRGBYVYU ColorConversionCode = 117
ColorYUVToBGRYVYU ColorConversionCode = 118
ColorYUVToRGBAYUY2 ColorConversionCode = 119
ColorYUVToBGRAYUY2 ColorConversionCode = 120
ColorYUVToRGBAYVYU ColorConversionCode = 121
ColorYUVToBGRAYVYU ColorConversionCode = 122
ColorYUVToGRAYUYVY ColorConversionCode = 123
ColorYUVToGRAYYUY2 ColorConversionCode = 124
// alpha premultiplication
ColorRGBATomRGBA ColorConversionCode = 125
ColormRGBAToRGBA ColorConversionCode = 126
// RGB to YUV 4:2:0 family
ColorRGBToYUVI420 ColorConversionCode = 127
ColorBGRToYUVI420 ColorConversionCode = 128
ColorRGBAToYUVI420 ColorConversionCode = 129
ColorBGRAToYUVI420 ColorConversionCode = 130
ColorRGBToYUVYV12 ColorConversionCode = 131
ColorBGRToYUVYV12 ColorConversionCode = 132
ColorRGBAToYUVYV12 ColorConversionCode = 133
ColorBGRAToYUVYV12 ColorConversionCode = 134
// Demosaicing
ColorBayerBGToBGR ColorConversionCode = 46
ColorBayerGBToBGR ColorConversionCode = 47
ColorBayerRGToBGR ColorConversionCode = 48
ColorBayerGRToBGR ColorConversionCode = 49
ColorBayerBGToGRAY ColorConversionCode = 86
ColorBayerGBToGRAY ColorConversionCode = 87
ColorBayerRGToGRAY ColorConversionCode = 88
ColorBayerGRToGRAY ColorConversionCode = 89
// Demosaicing using Variable Number of Gradients
ColorBayerBGToBGRVNG ColorConversionCode = 62
ColorBayerGBToBGRVNG ColorConversionCode = 63
ColorBayerRGToBGRVNG ColorConversionCode = 64
ColorBayerGRToBGRVNG ColorConversionCode = 65
// Edge-Aware Demosaicing
ColorBayerBGToBGREA ColorConversionCode = 135
ColorBayerGBToBGREA ColorConversionCode = 136
ColorBayerRGToBGREA ColorConversionCode = 137
ColorBayerGRToBGREA ColorConversionCode = 138
// Demosaicing with alpha channel
ColorBayerBGToBGRA ColorConversionCode = 139
ColorBayerGBToBGRA ColorConversionCode = 140
ColorBayerRGToBGRA ColorConversionCode = 141
ColorBayerGRToBGRA ColorConversionCode = 142
ColorCOLORCVTMAX ColorConversionCode = 143
)

303
vendor/gocv.io/x/gocv/imgproc_colorcodes_string.go generated vendored Normal file
View File

@ -0,0 +1,303 @@
package gocv
func (c ColorConversionCode) String() string {
switch c {
case ColorBGRToBGRA:
return "color-bgr-to-bgra"
case ColorBGRAToBGR:
return "color-bgra-to-bgr"
case ColorBGRToRGBA:
return "color-bgr-to-rgba"
case ColorRGBAToBGR:
return "color-rgba-to-bgr"
case ColorBGRToRGB:
return "color-bgr-to-rgb"
case ColorBGRAToRGBA:
return "color-bgra-to-rgba"
case ColorBGRToGray:
return "color-bgr-to-gray"
case ColorRGBToGray:
return "color-rgb-to-gray"
case ColorGrayToBGR:
return "color-gray-to-bgr"
case ColorGrayToBGRA:
return "color-gray-to-bgra"
case ColorBGRAToGray:
return "color-bgra-to-gray"
case ColorRGBAToGray:
return "color-rgba-to-gray"
case ColorBGRToBGR565:
return "color-bgr-to-bgr565"
case ColorRGBToBGR565:
return "color-rgb-to-bgr565"
case ColorBGR565ToBGR:
return "color-bgr565-to-bgr"
case ColorBGR565ToRGB:
return "color-bgr565-to-rgb"
case ColorBGRAToBGR565:
return "color-bgra-to-bgr565"
case ColorRGBAToBGR565:
return "color-rgba-to-bgr565"
case ColorBGR565ToBGRA:
return "color-bgr565-to-bgra"
case ColorBGR565ToRGBA:
return "color-bgr565-to-rgba"
case ColorGrayToBGR565:
return "color-gray-to-bgr565"
case ColorBGR565ToGray:
return "color-bgr565-to-gray"
case ColorBGRToBGR555:
return "color-bgr-to-bgr555"
case ColorRGBToBGR555:
return "color-rgb-to-bgr555"
case ColorBGR555ToBGR:
return "color-bgr555-to-bgr"
case ColorBGRAToBGR555:
return "color-bgra-to-bgr555"
case ColorRGBAToBGR555:
return "color-rgba-to-bgr555"
case ColorBGR555ToBGRA:
return "color-bgr555-to-bgra"
case ColorBGR555ToRGBA:
return "color-bgr555-to-rgba"
case ColorGrayToBGR555:
return "color-gray-to-bgr555"
case ColorBGR555ToGRAY:
return "color-bgr555-to-gray"
case ColorBGRToXYZ:
return "color-bgr-to-xyz"
case ColorRGBToXYZ:
return "color-rgb-to-xyz"
case ColorXYZToBGR:
return "color-xyz-to-bgr"
case ColorXYZToRGB:
return "color-xyz-to-rgb"
case ColorBGRToYCrCb:
return "color-bgr-to-ycrcb"
case ColorRGBToYCrCb:
return "color-rgb-to-ycrcb"
case ColorYCrCbToBGR:
return "color-ycrcb-to-bgr"
case ColorYCrCbToRGB:
return "color-ycrcb-to-rgb"
case ColorBGRToHSV:
return "color-bgr-to-hsv"
case ColorRGBToHSV:
return "color-rgb-to-hsv"
case ColorBGRToLab:
return "color-bgr-to-lab"
case ColorRGBToLab:
return "color-rgb-to-lab"
case ColorBGRToLuv:
return "color-bgr-to-luv"
case ColorRGBToLuv:
return "color-rgb-to-luv"
case ColorBGRToHLS:
return "color-bgr-to-hls"
case ColorRGBToHLS:
return "color-rgb-to-hls"
case ColorHSVToBGR:
return "color-hsv-to-bgr"
case ColorHSVToRGB:
return "color-hsv-to-rgb"
case ColorLabToBGR:
return "color-lab-to-bgr"
case ColorLabToRGB:
return "color-lab-to-rgb"
case ColorLuvToBGR:
return "color-luv-to-bgr"
case ColorLuvToRGB:
return "color-luv-to-rgb"
case ColorHLSToBGR:
return "color-hls-to-bgr"
case ColorHLSToRGB:
return "color-hls-to-rgb"
case ColorBGRToHSVFull:
return "color-bgr-to-hsv-full"
case ColorRGBToHSVFull:
return "color-rgb-to-hsv-full"
case ColorBGRToHLSFull:
return "color-bgr-to-hls-full"
case ColorRGBToHLSFull:
return "color-rgb-to-hls-full"
case ColorHSVToBGRFull:
return "color-hsv-to-bgr-full"
case ColorHSVToRGBFull:
return "color-hsv-to-rgb-full"
case ColorHLSToBGRFull:
return "color-hls-to-bgr-full"
case ColorHLSToRGBFull:
return "color-hls-to-rgb-full"
case ColorLBGRToLab:
return "color-lbgr-to-lab"
case ColorLRGBToLab:
return "color-lrgb-to-lab"
case ColorLBGRToLuv:
return "color-lbgr-to-luv"
case ColorLRGBToLuv:
return "color-lrgb-to-luv"
case ColorLabToLBGR:
return "color-lab-to-lbgr"
case ColorLabToLRGB:
return "color-lab-to-lrgb"
case ColorLuvToLBGR:
return "color-luv-to-lbgr"
case ColorLuvToLRGB:
return "color-luv-to-lrgb"
case ColorBGRToYUV:
return "color-bgr-to-yuv"
case ColorRGBToYUV:
return "color-rgb-to-yuv"
case ColorYUVToBGR:
return "color-yuv-to-bgr"
case ColorYUVToRGB:
return "color-yuv-to-rgb"
case ColorYUVToRGBNV12:
return "color-yuv-to-rgbnv12"
case ColorYUVToBGRNV12:
return "color-yuv-to-bgrnv12"
case ColorYUVToRGBNV21:
return "color-yuv-to-rgbnv21"
case ColorYUVToBGRNV21:
return "color-yuv-to-bgrnv21"
case ColorYUVToRGBANV12:
return "color-yuv-to-rgbanv12"
case ColorYUVToBGRANV12:
return "color-yuv-to-bgranv12"
case ColorYUVToRGBANV21:
return "color-yuv-to-rgbanv21"
case ColorYUVToBGRANV21:
return "color-yuv-to-bgranv21"
case ColorYUVToRGBYV12:
return "color-yuv-to-rgbyv12"
case ColorYUVToBGRYV12:
return "color-yuv-to-bgryv12"
case ColorYUVToRGBIYUV:
return "color-yuv-to-rgbiyuv"
case ColorYUVToBGRIYUV:
return "color-yuv-to-bgriyuv"
case ColorYUVToRGBAYV12:
return "color-yuv-to-rgbayv12"
case ColorYUVToBGRAYV12:
return "color-yuv-to-bgrayv12"
case ColorYUVToRGBAIYUV:
return "color-yuv-to-rgbaiyuv"
case ColorYUVToBGRAIYUV:
return "color-yuv-to-bgraiyuv"
case ColorYUVToGRAY420:
return "color-yuv-to-gray420"
case ColorYUVToRGBUYVY:
return "color-yuv-to-rgbuyvy"
case ColorYUVToBGRUYVY:
return "color-yuv-to-bgruyvy"
case ColorYUVToRGBAUYVY:
return "color-yuv-to-rgbauyvy"
case ColorYUVToBGRAUYVY:
return "color-yuv-to-bgrauyvy"
case ColorYUVToRGBYUY2:
return "color-yuv-to-rgbyuy2"
case ColorYUVToBGRYUY2:
return "color-yuv-to-bgryuy2"
case ColorYUVToRGBYVYU:
return "color-yuv-to-rgbyvyu"
case ColorYUVToBGRYVYU:
return "color-yuv-to-bgryvyu"
case ColorYUVToRGBAYUY2:
return "color-yuv-to-rgbayuy2"
case ColorYUVToBGRAYUY2:
return "color-yuv-to-bgrayuy2"
case ColorYUVToRGBAYVYU:
return "color-yuv-to-rgbayvyu"
case ColorYUVToBGRAYVYU:
return "color-yuv-to-bgrayvyu"
case ColorYUVToGRAYUYVY:
return "color-yuv-to-grayuyvy"
case ColorYUVToGRAYYUY2:
return "color-yuv-to-grayyuy2"
case ColorRGBATomRGBA:
return "color-rgba-to-mrgba"
case ColormRGBAToRGBA:
return "color-mrgba-to-rgba"
case ColorRGBToYUVI420:
return "color-rgb-to-yuvi420"
case ColorBGRToYUVI420:
return "color-bgr-to-yuvi420"
case ColorRGBAToYUVI420:
return "color-rgba-to-yuvi420"
case ColorBGRAToYUVI420:
return "color-bgra-to-yuvi420"
case ColorRGBToYUVYV12:
return "color-rgb-to-yuvyv12"
case ColorBGRToYUVYV12:
return "color-bgr-to-yuvyv12"
case ColorRGBAToYUVYV12:
return "color-rgba-to-yuvyv12"
case ColorBGRAToYUVYV12:
return "color-bgra-to-yuvyv12"
case ColorBayerBGToBGR:
return "color-bayer-bgt-to-bgr"
case ColorBayerGBToBGR:
return "color-bayer-gbt-to-bgr"
case ColorBayerRGToBGR:
return "color-bayer-rgt-to-bgr"
case ColorBayerGRToBGR:
return "color-bayer-grt-to-bgr"
case ColorBayerBGToGRAY:
return "color-bayer-bgt-to-gray"
case ColorBayerGBToGRAY:
return "color-bayer-gbt-to-gray"
case ColorBayerRGToGRAY:
return "color-bayer-rgt-to-gray"
case ColorBayerGRToGRAY:
return "color-bayer-grt-to-gray"
case ColorBayerBGToBGRVNG:
return "color-bayer-bgt-to-bgrvng"
case ColorBayerGBToBGRVNG:
return "color-bayer-gbt-to-bgrvng"
case ColorBayerRGToBGRVNG:
return "color-bayer-rgt-to-bgrvng"
case ColorBayerGRToBGRVNG:
return "color-bayer-grt-to-bgrvng"
case ColorBayerBGToBGREA:
return "color-bayer-bgt-to-bgrea"
case ColorBayerGBToBGREA:
return "color-bayer-gbt-to-bgrea"
case ColorBayerRGToBGREA:
return "color-bayer-rgt-to-bgrea"
case ColorBayerGRToBGREA:
return "color-bayer-grt-to-bgrea"
case ColorBayerBGToBGRA:
return "color-bayer-bgt-to-bgra"
case ColorBayerGBToBGRA:
return "color-bayer-gbt-to-bgra"
case ColorBayerRGToBGRA:
return "color-bayer-rgt-to-bgra"
case ColorBayerGRToBGRA:
return "color-bayer-grt-to-bgra"
case ColorCOLORCVTMAX:
return "color-color-cvt-max"
}
return ""
}

333
vendor/gocv.io/x/gocv/imgproc_string.go generated vendored Normal file
View File

@ -0,0 +1,333 @@
package gocv
func (c HistCompMethod) String() string {
switch c {
case HistCmpCorrel:
return "hist-cmp-correl"
case HistCmpChiSqr:
return "hist-cmp-chi-sqr"
case HistCmpIntersect:
return "hist-cmp-intersect"
case HistCmpBhattacharya:
return "hist-cmp-bhattacharya"
case HistCmpChiSqrAlt:
return "hist-cmp-chi-sqr-alt"
case HistCmpKlDiv:
return "hist-cmp-kl-div"
}
return ""
}
func (c DistanceTransformLabelTypes) String() string {
switch c {
case DistanceLabelCComp:
return "distance-label-ccomp"
}
return ""
}
func (c DistanceTransformMasks) String() string {
switch c {
case DistanceMask3:
return "distance-mask3"
}
return ""
}
func (c RetrievalMode) String() string {
switch c {
case RetrievalExternal:
return "retrieval-external"
case RetrievalList:
return "retrieval-list"
case RetrievalCComp:
return "retrieval-ccomp"
case RetrievalTree:
return "retrieval-tree"
case RetrievalFloodfill:
return "retrieval-floodfill"
}
return ""
}
func (c ContourApproximationMode) String() string {
switch c {
case ChainApproxNone:
return "chain-approx-none"
case ChainApproxSimple:
return "chain-approx-simple"
case ChainApproxTC89L1:
return "chain-approx-tc89l1"
case ChainApproxTC89KCOS:
return "chain-approx-tc89kcos"
}
return ""
}
func (c ConnectedComponentsAlgorithmType) String() string {
switch c {
case CCL_WU:
return "ccl-wu"
case CCL_DEFAULT:
return "ccl-default"
case CCL_GRANA:
return "ccl-grana"
}
return ""
}
func (c ConnectedComponentsTypes) String() string {
switch c {
case CC_STAT_LEFT:
return "cc-stat-left"
case CC_STAT_TOP:
return "cc-stat-top"
case CC_STAT_WIDTH:
return "cc-stat-width"
case CC_STAT_AREA:
return "cc-stat-area"
case CC_STAT_MAX:
return "cc-stat-max"
case CC_STAT_HEIGHT:
return "cc-stat-height"
}
return ""
}
func (c TemplateMatchMode) String() string {
switch c {
case TmSqdiff:
return "tm-sq-diff"
case TmSqdiffNormed:
return "tm-sq-diff-normed"
case TmCcorr:
return "tm-ccorr"
case TmCcorrNormed:
return "tm-ccorr-normed"
case TmCcoeff:
return "tm-ccoeff"
case TmCcoeffNormed:
return "tm-ccoeff-normed"
}
return ""
}
func (c MorphShape) String() string {
switch c {
case MorphRect:
return "morph-rect"
case MorphCross:
return "morph-cross"
case MorphEllipse:
return "morph-ellispe"
}
return ""
}
func (c MorphType) String() string {
switch c {
case MorphErode:
return "morph-erode"
case MorphDilate:
return "morph-dilate"
case MorphOpen:
return "morph-open"
case MorphClose:
return "morph-close"
case MorphGradient:
return "morph-gradient"
case MorphTophat:
return "morph-tophat"
case MorphBlackhat:
return "morph-blackhat"
case MorphHitmiss:
return "morph-hitmiss"
}
return ""
}
func (c BorderType) String() string {
switch c {
case BorderConstant:
return "border-constant"
case BorderReplicate:
return "border-replicate"
case BorderReflect:
return "border-reflect"
case BorderWrap:
return "border-wrap"
case BorderTransparent:
return "border-transparent"
case BorderDefault:
return "border-default"
case BorderIsolated:
return "border-isolated"
}
return ""
}
func (c GrabCutMode) String() string {
switch c {
case GCInitWithRect:
return "gc-init-with-rect"
case GCInitWithMask:
return "gc-init-with-mask"
case GCEval:
return "gc-eval"
case GCEvalFreezeModel:
return "gc-eval-freeze-model"
}
return ""
}
func (c HoughMode) String() string {
switch c {
case HoughStandard:
return "hough-standard"
case HoughProbabilistic:
return "hough-probabilistic"
case HoughMultiScale:
return "hough-multi-scale"
case HoughGradient:
return "hough-gradient"
}
return ""
}
func (c ThresholdType) String() string {
switch c {
case ThresholdBinary:
return "threshold-binary"
case ThresholdBinaryInv:
return "threshold-binary-inv"
case ThresholdTrunc:
return "threshold-trunc"
case ThresholdToZero:
return "threshold-to-zero"
case ThresholdToZeroInv:
return "threshold-to-zero-inv"
case ThresholdMask:
return "threshold-mask"
case ThresholdOtsu:
return "threshold-otsu"
case ThresholdTriangle:
return "threshold-triangle"
}
return ""
}
func (c AdaptiveThresholdType) String() string {
switch c {
case AdaptiveThresholdMean:
return "adaptative-threshold-mean"
case AdaptiveThresholdGaussian:
return "adaptative-threshold-gaussian"
}
return ""
}
func (c HersheyFont) String() string {
switch c {
case FontHersheySimplex:
return "font-hershey-simplex"
case FontHersheyPlain:
return "font-hershey-plain"
case FontHersheyDuplex:
return "font-hershey-duplex"
case FontHersheyComplex:
return "font-hershey-complex"
case FontHersheyTriplex:
return "font-hershey-triplex"
case FontHersheyComplexSmall:
return "font-hershey-complex-small"
case FontHersheyScriptSimplex:
return "font-hershey-script-simplex"
case FontHersheyScriptComplex:
return "font-hershey-scipt-complex"
case FontItalic:
return "font-italic"
}
return ""
}
func (c LineType) String() string {
switch c {
case Filled:
return "filled"
case Line4:
return "line4"
case Line8:
return "line8"
case LineAA:
return "line-aa"
}
return ""
}
func (c InterpolationFlags) String() string {
switch c {
case InterpolationNearestNeighbor:
return "interpolation-nearest-neighbor"
case InterpolationLinear:
return "interpolation-linear"
case InterpolationCubic:
return "interpolation-cubic"
case InterpolationArea:
return "interpolation-area"
case InterpolationLanczos4:
return "interpolation-lanczos4"
case InterpolationMax:
return "interpolation-max"
}
return ""
}
func (c ColormapTypes) String() string {
switch c {
case ColormapAutumn:
return "colormap-autumn"
case ColormapBone:
return "colormap-bone"
case ColormapJet:
return "colormap-jet"
case ColormapWinter:
return "colormap-winter"
case ColormapRainbow:
return "colormap-rainbow"
case ColormapOcean:
return "colormap-ocean"
case ColormapSummer:
return "colormap-summer"
case ColormapSpring:
return "colormap-spring"
case ColormapCool:
return "colormap-cool"
case ColormapHsv:
return "colormap-hsv"
case ColormapPink:
return "colormap-pink"
case ColormapParula:
return "colormap-parula"
}
return ""
}
func (c DistanceTypes) String() string {
switch c {
case DistUser:
return "dist-user"
case DistL1:
return "dist-l1"
case DistL2:
return "dist-l2"
case DistL12:
return "dist-l12"
case DistFair:
return "dist-fair"
case DistWelsch:
return "dist-welsch"
case DistHuber:
return "dist-huber"
}
return ""
}

27
vendor/gocv.io/x/gocv/mat_noprofile.go generated vendored Normal file
View File

@ -0,0 +1,27 @@
// +build !matprofile
package gocv
/*
#include <stdlib.h>
#include "core.h"
*/
import "C"
// addMatToProfile does nothing if matprofile tag is not set.
func addMatToProfile(p C.Mat) {
return
}
// newMat returns a new Mat from a C Mat
func newMat(p C.Mat) Mat {
return Mat{p: p}
}
// Close the Mat object.
func (m *Mat) Close() error {
C.Mat_Close(m.p)
m.p = nil
m.d = nil
return nil
}

83
vendor/gocv.io/x/gocv/mat_profile.go generated vendored Normal file
View File

@ -0,0 +1,83 @@
// +build matprofile
package gocv
/*
#include <stdlib.h>
#include "core.h"
*/
import (
"C"
)
import (
"runtime/pprof"
)
// MatProfile a pprof.Profile that contains stack traces that led to (currently)
// unclosed Mat's creations. Every time a Mat is created, the stack trace is
// added to this profile and every time the Mat is closed the trace is removed.
// In a program that is not leaking, this profile's count should not
// continuously increase and ideally when a program is terminated the count
// should be zero. You can get the count at any time with:
//
// gocv.MatProfile.Count()
//
// and you can display the current entries with:
//
// var b bytes.Buffer
// gocv.MatProfile.WriteTo(&b, 1)
// fmt.Print(b.String())
//
// This will display stack traces of where the unclosed Mats were instantiated.
// For example, the results could look something like this:
//
// 1 @ 0x4146a0c 0x4146a57 0x4119666 0x40bb18f 0x405a841
// # 0x4146a0b gocv.io/x/gocv.newMat+0x4b /go/src/gocv.io/x/gocv/core.go:120
// # 0x4146a56 gocv.io/x/gocv.NewMat+0x26 /go/src/gocv.io/x/gocv/core.go:126
// # 0x4119665 gocv.io/x/gocv.TestMat+0x25 /go/src/gocv.io/x/gocv/core_test.go:29
// # 0x40bb18e testing.tRunner+0xbe /usr/local/Cellar/go/1.11/libexec/src/testing/testing.go:827
//
// Furthermore, if the program is a long running process or if gocv is being used on a
// web server, it may be helpful to install the HTTP interface using:
//
// import _ "net/http/pprof"
//
// In order to include the MatProfile custom profiler, you MUST build or run your application
// or tests using the following build tag:
// -tags matprofile
//
// For more information, see the runtime/pprof package documentation.
var MatProfile *pprof.Profile
func init() {
profName := "gocv.io/x/gocv.Mat"
MatProfile = pprof.Lookup(profName)
if MatProfile == nil {
MatProfile = pprof.NewProfile(profName)
}
}
// addMatToProfile records Mat to the MatProfile.
func addMatToProfile(p C.Mat) {
MatProfile.Add(p, 1)
return
}
// newMat returns a new Mat from a C Mat and records it to the MatProfile.
func newMat(p C.Mat) Mat {
m := Mat{p: p}
MatProfile.Add(p, 1)
return m
}
// Close the Mat object.
func (m *Mat) Close() error {
// NOTE: The pointer must be removed from the profile before it is deleted to
// avoid a data race.
MatProfile.Remove(m.p)
C.Mat_Close(m.p)
m.p = nil
m.d = nil
return nil
}

178
vendor/gocv.io/x/gocv/objdetect.cpp generated vendored Normal file
View File

@ -0,0 +1,178 @@
#include "objdetect.h"
// CascadeClassifier
CascadeClassifier CascadeClassifier_New() {
return new cv::CascadeClassifier();
}
void CascadeClassifier_Close(CascadeClassifier cs) {
delete cs;
}
int CascadeClassifier_Load(CascadeClassifier cs, const char* name) {
return cs->load(name);
}
struct Rects CascadeClassifier_DetectMultiScale(CascadeClassifier cs, Mat img) {
std::vector<cv::Rect> detected;
cs->detectMultiScale(*img, detected); // uses all default parameters
Rect* rects = new Rect[detected.size()];
for (size_t i = 0; i < detected.size(); ++i) {
Rect r = {detected[i].x, detected[i].y, detected[i].width, detected[i].height};
rects[i] = r;
}
Rects ret = {rects, (int)detected.size()};
return ret;
}
struct Rects CascadeClassifier_DetectMultiScaleWithParams(CascadeClassifier cs, Mat img,
double scale, int minNeighbors, int flags, Size minSize, Size maxSize) {
cv::Size minSz(minSize.width, minSize.height);
cv::Size maxSz(maxSize.width, maxSize.height);
std::vector<cv::Rect> detected;
cs->detectMultiScale(*img, detected, scale, minNeighbors, flags, minSz, maxSz);
Rect* rects = new Rect[detected.size()];
for (size_t i = 0; i < detected.size(); ++i) {
Rect r = {detected[i].x, detected[i].y, detected[i].width, detected[i].height};
rects[i] = r;
}
Rects ret = {rects, (int)detected.size()};
return ret;
}
// HOGDescriptor
HOGDescriptor HOGDescriptor_New() {
return new cv::HOGDescriptor();
}
void HOGDescriptor_Close(HOGDescriptor hog) {
delete hog;
}
int HOGDescriptor_Load(HOGDescriptor hog, const char* name) {
return hog->load(name);
}
struct Rects HOGDescriptor_DetectMultiScale(HOGDescriptor hog, Mat img) {
std::vector<cv::Rect> detected;
hog->detectMultiScale(*img, detected);
Rect* rects = new Rect[detected.size()];
for (size_t i = 0; i < detected.size(); ++i) {
Rect r = {detected[i].x, detected[i].y, detected[i].width, detected[i].height};
rects[i] = r;
}
Rects ret = {rects, (int)detected.size()};
return ret;
}
struct Rects HOGDescriptor_DetectMultiScaleWithParams(HOGDescriptor hog, Mat img,
double hitThresh, Size winStride, Size padding, double scale, double finalThresh,
bool useMeanshiftGrouping) {
cv::Size wSz(winStride.width, winStride.height);
cv::Size pSz(padding.width, padding.height);
std::vector<cv::Rect> detected;
hog->detectMultiScale(*img, detected, hitThresh, wSz, pSz, scale, finalThresh,
useMeanshiftGrouping);
Rect* rects = new Rect[detected.size()];
for (size_t i = 0; i < detected.size(); ++i) {
Rect r = {detected[i].x, detected[i].y, detected[i].width, detected[i].height};
rects[i] = r;
}
Rects ret = {rects, (int)detected.size()};
return ret;
}
Mat HOG_GetDefaultPeopleDetector() {
return new cv::Mat(cv::HOGDescriptor::getDefaultPeopleDetector());
}
void HOGDescriptor_SetSVMDetector(HOGDescriptor hog, Mat det) {
hog->setSVMDetector(*det);
}
struct Rects GroupRectangles(struct Rects rects, int groupThreshold, double eps) {
std::vector<cv::Rect> vRect;
for (int i = 0; i < rects.length; ++i) {
cv::Rect r = cv::Rect(rects.rects[i].x, rects.rects[i].y, rects.rects[i].width,
rects.rects[i].height);
vRect.push_back(r);
}
cv::groupRectangles(vRect, groupThreshold, eps);
Rect* results = new Rect[vRect.size()];
for (size_t i = 0; i < vRect.size(); ++i) {
Rect r = {vRect[i].x, vRect[i].y, vRect[i].width, vRect[i].height};
results[i] = r;
}
Rects ret = {results, (int)vRect.size()};
return ret;
}
// QRCodeDetector
QRCodeDetector QRCodeDetector_New() {
return new cv::QRCodeDetector();
}
void QRCodeDetector_Close(QRCodeDetector qr) {
delete qr;
}
const char* QRCodeDetector_DetectAndDecode(QRCodeDetector qr, Mat input,Mat points,Mat straight_qrcode) {
cv::String *str = new cv::String(qr->detectAndDecode(*input,*points,*straight_qrcode));
return str->c_str();
}
bool QRCodeDetector_Detect(QRCodeDetector qr, Mat input,Mat points) {
return qr->detect(*input,*points);
}
const char* QRCodeDetector_Decode(QRCodeDetector qr, Mat input,Mat inputPoints,Mat straight_qrcode) {
cv::String *str = new cv::String(qr->detectAndDecode(*input,*inputPoints,*straight_qrcode));
return str->c_str();
}
bool QRCodeDetector_DetectMulti(QRCodeDetector qr, Mat input, Mat points) {
return qr->detectMulti(*input,*points);
}
bool QRCodeDetector_DetectAndDecodeMulti(QRCodeDetector qr, Mat input, CStrings* decoded, Mat points, struct Mats* qrCodes) {
std::vector<cv::String> decodedCodes;
std::vector<cv::Mat> straightQrCodes;
bool res = qr->detectAndDecodeMulti(*input, decodedCodes, *points, straightQrCodes);
if (!res) {
return res;
}
qrCodes->mats = new Mat[straightQrCodes.size()];
qrCodes->length = straightQrCodes.size();
for (size_t i = 0; i < straightQrCodes.size(); i++) {
qrCodes->mats[i] = new cv::Mat(straightQrCodes[i]);
}
const char **strs = new const char*[decodedCodes.size()];
for (size_t i = 0; i < decodedCodes.size(); ++i) {
strs[i] = decodedCodes[i].c_str();
}
decoded->length = decodedCodes.size();
decoded->strs = strs;
return res;
}

285
vendor/gocv.io/x/gocv/objdetect.go generated vendored Normal file
View File

@ -0,0 +1,285 @@
package gocv
/*
#include <stdlib.h>
#include "objdetect.h"
*/
import "C"
import (
"image"
"unsafe"
)
// CascadeClassifier is a cascade classifier class for object detection.
//
// For further details, please see:
// http://docs.opencv.org/master/d1/de5/classcv_1_1CascadeClassifier.html
//
type CascadeClassifier struct {
p C.CascadeClassifier
}
// NewCascadeClassifier returns a new CascadeClassifier.
func NewCascadeClassifier() CascadeClassifier {
return CascadeClassifier{p: C.CascadeClassifier_New()}
}
// Close deletes the CascadeClassifier's pointer.
func (c *CascadeClassifier) Close() error {
C.CascadeClassifier_Close(c.p)
c.p = nil
return nil
}
// Load cascade classifier from a file.
//
// For further details, please see:
// http://docs.opencv.org/master/d1/de5/classcv_1_1CascadeClassifier.html#a1a5884c8cc749422f9eb77c2471958bc
//
func (c *CascadeClassifier) Load(name string) bool {
cName := C.CString(name)
defer C.free(unsafe.Pointer(cName))
return C.CascadeClassifier_Load(c.p, cName) != 0
}
// DetectMultiScale detects objects of different sizes in the input Mat image.
// The detected objects are returned as a slice of image.Rectangle structs.
//
// For further details, please see:
// http://docs.opencv.org/master/d1/de5/classcv_1_1CascadeClassifier.html#aaf8181cb63968136476ec4204ffca498
//
func (c *CascadeClassifier) DetectMultiScale(img Mat) []image.Rectangle {
ret := C.CascadeClassifier_DetectMultiScale(c.p, img.p)
defer C.Rects_Close(ret)
return toRectangles(ret)
}
// DetectMultiScaleWithParams calls DetectMultiScale but allows setting parameters
// to values other than just the defaults.
//
// For further details, please see:
// http://docs.opencv.org/master/d1/de5/classcv_1_1CascadeClassifier.html#aaf8181cb63968136476ec4204ffca498
//
func (c *CascadeClassifier) DetectMultiScaleWithParams(img Mat, scale float64,
minNeighbors, flags int, minSize, maxSize image.Point) []image.Rectangle {
minSz := C.struct_Size{
width: C.int(minSize.X),
height: C.int(minSize.Y),
}
maxSz := C.struct_Size{
width: C.int(maxSize.X),
height: C.int(maxSize.Y),
}
ret := C.CascadeClassifier_DetectMultiScaleWithParams(c.p, img.p, C.double(scale),
C.int(minNeighbors), C.int(flags), minSz, maxSz)
defer C.Rects_Close(ret)
return toRectangles(ret)
}
// HOGDescriptor is a Histogram Of Gradiants (HOG) for object detection.
//
// For further details, please see:
// https://docs.opencv.org/master/d5/d33/structcv_1_1HOGDescriptor.html#a723b95b709cfd3f95cf9e616de988fc8
//
type HOGDescriptor struct {
p C.HOGDescriptor
}
// NewHOGDescriptor returns a new HOGDescriptor.
func NewHOGDescriptor() HOGDescriptor {
return HOGDescriptor{p: C.HOGDescriptor_New()}
}
// Close deletes the HOGDescriptor's pointer.
func (h *HOGDescriptor) Close() error {
C.HOGDescriptor_Close(h.p)
h.p = nil
return nil
}
// DetectMultiScale detects objects in the input Mat image.
// The detected objects are returned as a slice of image.Rectangle structs.
//
// For further details, please see:
// https://docs.opencv.org/master/d5/d33/structcv_1_1HOGDescriptor.html#a660e5cd036fd5ddf0f5767b352acd948
//
func (h *HOGDescriptor) DetectMultiScale(img Mat) []image.Rectangle {
ret := C.HOGDescriptor_DetectMultiScale(h.p, img.p)
defer C.Rects_Close(ret)
return toRectangles(ret)
}
// DetectMultiScaleWithParams calls DetectMultiScale but allows setting parameters
// to values other than just the defaults.
//
// For further details, please see:
// https://docs.opencv.org/master/d5/d33/structcv_1_1HOGDescriptor.html#a660e5cd036fd5ddf0f5767b352acd948
//
func (h *HOGDescriptor) DetectMultiScaleWithParams(img Mat, hitThresh float64,
winStride, padding image.Point, scale, finalThreshold float64, useMeanshiftGrouping bool) []image.Rectangle {
wSz := C.struct_Size{
width: C.int(winStride.X),
height: C.int(winStride.Y),
}
pSz := C.struct_Size{
width: C.int(padding.X),
height: C.int(padding.Y),
}
ret := C.HOGDescriptor_DetectMultiScaleWithParams(h.p, img.p, C.double(hitThresh),
wSz, pSz, C.double(scale), C.double(finalThreshold), C.bool(useMeanshiftGrouping))
defer C.Rects_Close(ret)
return toRectangles(ret)
}
// HOGDefaultPeopleDetector returns a new Mat with the HOG DefaultPeopleDetector.
//
// For further details, please see:
// https://docs.opencv.org/master/d5/d33/structcv_1_1HOGDescriptor.html#a660e5cd036fd5ddf0f5767b352acd948
//
func HOGDefaultPeopleDetector() Mat {
return newMat(C.HOG_GetDefaultPeopleDetector())
}
// SetSVMDetector sets the data for the HOGDescriptor.
//
// For further details, please see:
// https://docs.opencv.org/master/d5/d33/structcv_1_1HOGDescriptor.html#a09e354ad701f56f9c550dc0385dc36f1
//
func (h *HOGDescriptor) SetSVMDetector(det Mat) error {
C.HOGDescriptor_SetSVMDetector(h.p, det.p)
return nil
}
// GroupRectangles groups the object candidate rectangles.
//
// For further details, please see:
// https://docs.opencv.org/master/d5/d54/group__objdetect.html#ga3dba897ade8aa8227edda66508e16ab9
//
func GroupRectangles(rects []image.Rectangle, groupThreshold int, eps float64) []image.Rectangle {
cRectArray := make([]C.struct_Rect, len(rects))
for i, r := range rects {
cRect := C.struct_Rect{
x: C.int(r.Min.X),
y: C.int(r.Min.Y),
width: C.int(r.Size().X),
height: C.int(r.Size().Y),
}
cRectArray[i] = cRect
}
cRects := C.struct_Rects{
rects: (*C.Rect)(&cRectArray[0]),
length: C.int(len(rects)),
}
ret := C.GroupRectangles(cRects, C.int(groupThreshold), C.double(eps))
return toRectangles(ret)
}
// QRCodeDetector groups the object candidate rectangles.
//
// For further details, please see:
// https://docs.opencv.org/master/de/dc3/classcv_1_1QRCodeDetector.html
//
type QRCodeDetector struct {
p C.QRCodeDetector
}
// newQRCodeDetector returns a new QRCodeDetector from a C QRCodeDetector
func newQRCodeDetector(p C.QRCodeDetector) QRCodeDetector {
return QRCodeDetector{p: p}
}
func NewQRCodeDetector() QRCodeDetector {
return newQRCodeDetector(C.QRCodeDetector_New())
}
func (a *QRCodeDetector) Close() error {
C.QRCodeDetector_Close(a.p)
a.p = nil
return nil
}
// DetectAndDecode Both detects and decodes QR code.
//
// Returns true as long as some QR code was detected even in case where the decoding failed
// For further details, please see:
// https://docs.opencv.org/master/de/dc3/classcv_1_1QRCodeDetector.html#a7290bd6a5d59b14a37979c3a14fbf394
//
func (a *QRCodeDetector) DetectAndDecode(input Mat, points *Mat, straight_qrcode *Mat) string {
goResult := C.GoString(C.QRCodeDetector_DetectAndDecode(a.p, input.p, points.p, straight_qrcode.p))
return string(goResult)
}
// Detect detects QR code in image and returns the quadrangle containing the code.
//
// For further details, please see:
// https://docs.opencv.org/master/de/dc3/classcv_1_1QRCodeDetector.html#a64373f7d877d27473f64fe04bb57d22b
//
func (a *QRCodeDetector) Detect(input Mat, points *Mat) bool {
result := C.QRCodeDetector_Detect(a.p, input.p, points.p)
return bool(result)
}
// Decode decodes QR code in image once it's found by the detect() method. Returns UTF8-encoded output string or empty string if the code cannot be decoded.
//
// For further details, please see:
// https://docs.opencv.org/master/de/dc3/classcv_1_1QRCodeDetector.html#a4172c2eb4825c844fb1b0ae67202d329
//
func (a *QRCodeDetector) Decode(input Mat, points Mat, straight_qrcode *Mat) string {
goResult := C.GoString(C.QRCodeDetector_DetectAndDecode(a.p, input.p, points.p, straight_qrcode.p))
return string(goResult)
}
// Detects QR codes in image and finds of the quadrangles containing the codes.
//
// Each quadrangle would be returned as a row in the `points` Mat and each point is a Vecf.
// Returns true if QR code was detected
// For usage please see TestQRCodeDetector
// For further details, please see:
// https://docs.opencv.org/master/de/dc3/classcv_1_1QRCodeDetector.html#aaf2b6b2115b8e8fbc9acf3a8f68872b6
func (a *QRCodeDetector) DetectMulti(input Mat, points *Mat) bool {
result := C.QRCodeDetector_DetectMulti(a.p, input.p, points.p)
return bool(result)
}
// Detects QR codes in image and finds of the quadrangles containing the codes and decode the decode the QRCodes to strings.
//
// Each quadrangle would be returned as a row in the `points` Mat and each point is a Vecf.
// Returns true as long as some QR code was detected even in case where the decoding failed
// For usage please see TestQRCodeDetector
// For further details, please see:
//https://docs.opencv.org/master/de/dc3/classcv_1_1QRCodeDetector.html#a188b63ffa17922b2c65d8a0ab7b70775
func (a *QRCodeDetector) DetectAndDecodeMulti(input Mat, decoded *[]string, points *Mat, qrCodes *[]Mat) bool {
cDecoded := C.CStrings{}
defer C.CStrings_Close(cDecoded)
cQrCodes := C.struct_Mats{}
defer C.Mats_Close(cQrCodes)
success := C.QRCodeDetector_DetectAndDecodeMulti(a.p, input.p, &cDecoded, points.p, &cQrCodes)
if !success {
return bool(success)
}
tmpCodes := make([]Mat, cQrCodes.length)
for i := C.int(0); i < cQrCodes.length; i++ {
tmpCodes[i].p = C.Mats_get(cQrCodes, i)
}
for _, qr := range tmpCodes {
*qrCodes = append(*qrCodes, qr)
}
for _, s := range toGoStrings(cDecoded) {
*decoded = append(*decoded, s)
}
return bool(success)
}

55
vendor/gocv.io/x/gocv/objdetect.h generated vendored Normal file
View File

@ -0,0 +1,55 @@
#ifndef _OPENCV3_OBJDETECT_H_
#define _OPENCV3_OBJDETECT_H_
#include <stdbool.h>
#ifdef __cplusplus
#include <opencv2/opencv.hpp>
extern "C" {
#endif
#include "core.h"
#ifdef __cplusplus
typedef cv::CascadeClassifier* CascadeClassifier;
typedef cv::HOGDescriptor* HOGDescriptor;
typedef cv::QRCodeDetector* QRCodeDetector;
#else
typedef void* CascadeClassifier;
typedef void* HOGDescriptor;
typedef void* QRCodeDetector;
#endif
// CascadeClassifier
CascadeClassifier CascadeClassifier_New();
void CascadeClassifier_Close(CascadeClassifier cs);
int CascadeClassifier_Load(CascadeClassifier cs, const char* name);
struct Rects CascadeClassifier_DetectMultiScale(CascadeClassifier cs, Mat img);
struct Rects CascadeClassifier_DetectMultiScaleWithParams(CascadeClassifier cs, Mat img,
double scale, int minNeighbors, int flags, Size minSize, Size maxSize);
HOGDescriptor HOGDescriptor_New();
void HOGDescriptor_Close(HOGDescriptor hog);
int HOGDescriptor_Load(HOGDescriptor hog, const char* name);
struct Rects HOGDescriptor_DetectMultiScale(HOGDescriptor hog, Mat img);
struct Rects HOGDescriptor_DetectMultiScaleWithParams(HOGDescriptor hog, Mat img,
double hitThresh, Size winStride, Size padding, double scale, double finalThreshold,
bool useMeanshiftGrouping);
Mat HOG_GetDefaultPeopleDetector();
void HOGDescriptor_SetSVMDetector(HOGDescriptor hog, Mat det);
struct Rects GroupRectangles(struct Rects rects, int groupThreshold, double eps);
QRCodeDetector QRCodeDetector_New();
const char* QRCodeDetector_DetectAndDecode(QRCodeDetector qr, Mat input,Mat points,Mat straight_qrcode);
bool QRCodeDetector_Detect(QRCodeDetector qr, Mat input,Mat points);
const char* QRCodeDetector_Decode(QRCodeDetector qr, Mat input,Mat inputPoints,Mat straight_qrcode);
void QRCodeDetector_Close(QRCodeDetector qr);
bool QRCodeDetector_DetectMulti(QRCodeDetector qr, Mat input, Mat points);
bool QRCodeDetector_DetectAndDecodeMulti(QRCodeDetector qr, Mat input, CStrings* decoded ,Mat points, struct Mats* mats);
#ifdef __cplusplus
}
#endif
#endif //_OPENCV3_OBJDETECT_H_

118
vendor/gocv.io/x/gocv/photo.cpp generated vendored Normal file
View File

@ -0,0 +1,118 @@
#include "photo.h"
void ColorChange(Mat src, Mat mask, Mat dst, float red_mul, float green_mul, float blue_mul) {
cv::colorChange(*src, *mask, *dst, red_mul, green_mul, blue_mul);
}
void IlluminationChange(Mat src, Mat mask, Mat dst, float alpha, float beta) {
cv::illuminationChange(*src, *mask, *dst, alpha, beta);
}
void SeamlessClone(Mat src, Mat dst, Mat mask, Point p, Mat blend, int flags) {
cv::Point pt(p.x, p.y);
cv::seamlessClone(*src, *dst, *mask, pt, *blend, flags);
}
void TextureFlattening(Mat src, Mat mask, Mat dst, float low_threshold, float high_threshold, int kernel_size) {
cv::textureFlattening(*src, *mask, *dst, low_threshold, high_threshold, kernel_size);
}
void FastNlMeansDenoisingColoredMulti( struct Mats src, Mat dst, int imgToDenoiseIndex, int temporalWindowSize){
std::vector<cv::Mat> images;
for (int i = 0; i < src.length; ++i) {
images.push_back(*src.mats[i]);
}
cv::fastNlMeansDenoisingColoredMulti( images, *dst, imgToDenoiseIndex, temporalWindowSize );
}
void FastNlMeansDenoisingColoredMultiWithParams( struct Mats src, Mat dst, int imgToDenoiseIndex, int temporalWindowSize, float h, float hColor, int templateWindowSize, int searchWindowSize ){
std::vector<cv::Mat> images;
for (int i = 0; i < src.length; ++i) {
images.push_back(*src.mats[i]);
}
cv::fastNlMeansDenoisingColoredMulti( images, *dst, imgToDenoiseIndex, temporalWindowSize, h, hColor, templateWindowSize, searchWindowSize );
}
MergeMertens MergeMertens_Create() {
return new cv::Ptr<cv::MergeMertens>(cv::createMergeMertens());
}
MergeMertens MergeMertens_CreateWithParams(float contrast_weight,
float saturation_weight,
float exposure_weight) {
return new cv::Ptr<cv::MergeMertens>(cv::createMergeMertens(
contrast_weight, saturation_weight, exposure_weight));
}
void MergeMertens_Close(MergeMertens b) {
delete b;
}
void MergeMertens_Process(MergeMertens b, struct Mats src, Mat dst) {
std::vector<cv::Mat> images;
for (int i = 0; i < src.length; ++i) {
images.push_back(*src.mats[i]);
}
(*b)->process(images, *dst);
}
AlignMTB AlignMTB_Create() {
return new cv::Ptr<cv::AlignMTB>(cv::createAlignMTB(6,4,false));
}
AlignMTB AlignMTB_CreateWithParams(int max_bits, int exclude_range, bool cut) {
return new cv::Ptr<cv::AlignMTB>(
cv::createAlignMTB(max_bits, exclude_range, cut));
}
void AlignMTB_Close(AlignMTB b) { delete b; }
void AlignMTB_Process(AlignMTB b, struct Mats src, struct Mats *dst) {
std::vector<cv::Mat> srcMats;
for (int i = 0; i < src.length; ++i) {
srcMats.push_back(*src.mats[i]);
}
std::vector<cv::Mat> dstMats;
(*b)->process(srcMats, dstMats);
dst->mats = new Mat[dstMats.size()];
for (size_t i = 0; i < dstMats.size() ; ++i) {
dst->mats[i] = new cv::Mat( dstMats[i] );
}
dst->length = (int)dstMats.size();
}
void FastNlMeansDenoising(Mat src, Mat dst) {
cv::fastNlMeansDenoising(*src, *dst);
}
void FastNlMeansDenoisingWithParams(Mat src, Mat dst, float h, int templateWindowSize, int searchWindowSize) {
cv::fastNlMeansDenoising(*src, *dst, h, templateWindowSize, searchWindowSize);
}
void FastNlMeansDenoisingColored(Mat src, Mat dst) {
cv::fastNlMeansDenoisingColored(*src, *dst);
}
void FastNlMeansDenoisingColoredWithParams(Mat src, Mat dst, float h, float hColor, int templateWindowSize, int searchWindowSize) {
cv::fastNlMeansDenoisingColored(*src, *dst, h, hColor, templateWindowSize, searchWindowSize);
}
void EdgePreservingFilter(Mat src, Mat dst, int filter, float sigma_s, float sigma_r) {
cv::edgePreservingFilter(*src, *dst, filter, sigma_s, sigma_r);
}
void DetailEnhance(Mat src, Mat dst, float sigma_s, float sigma_r) {
cv::detailEnhance(*src, *dst, sigma_s, sigma_r);
}
void PencilSketch(Mat src, Mat dst1, Mat dst2, float sigma_s, float sigma_r, float shade_factor) {
cv::pencilSketch(*src, *dst1, *dst2, sigma_s, sigma_r, shade_factor);
}
void Stylization(Mat src, Mat dst, float sigma_s, float sigma_r) {
cv::stylization(*src, *dst, sigma_s, sigma_r);
}

316
vendor/gocv.io/x/gocv/photo.go generated vendored Normal file
View File

@ -0,0 +1,316 @@
package gocv
/*
#include <stdlib.h>
#include "photo.h"
*/
import "C"
import (
"image"
"unsafe"
)
//SeamlessCloneFlags seamlessClone algorithm flags
type SeamlessCloneFlags int
// MergeMertens is a wrapper around the cv::MergeMertens.
type MergeMertens struct {
p unsafe.Pointer // This unsafe pointer will in fact be a C.MergeMertens
}
// AlignMTB is a wrapper around the cv::AlignMTB.
type AlignMTB struct {
p unsafe.Pointer // This unsafe pointer will in fact be a C.AlignMTB
}
const (
// NormalClone The power of the method is fully expressed when inserting objects with complex outlines into a new background.
NormalClone SeamlessCloneFlags = iota
// MixedClone The classic method, color-based selection and alpha masking might be time consuming and often leaves an undesirable halo. Seamless cloning, even averaged with the original image, is not effective. Mixed seamless cloning based on a loose selection proves effective.
MixedClone
// MonochromeTransfer Monochrome transfer allows the user to easily replace certain features of one object by alternative features.
MonochromeTransfer
)
// ColorChange mix two differently colored versions of an image seamlessly.
//
// For further details, please see:
// https://docs.opencv.org/master/df/da0/group__photo__clone.html#ga6684f35dc669ff6196a7c340dc73b98e
//
func ColorChange(src, mask Mat, dst *Mat, red_mul, green_mul, blue_mul float32) {
C.ColorChange(src.p, mask.p, dst.p, C.float(red_mul), C.float(green_mul), C.float(blue_mul))
}
// SeamlessClone blend two image by Poisson Blending.
//
// For further details, please see:
// https://docs.opencv.org/master/df/da0/group__photo__clone.html#ga2bf426e4c93a6b1f21705513dfeca49d
//
func SeamlessClone(src, dst, mask Mat, p image.Point, blend *Mat, flags SeamlessCloneFlags) {
cp := C.struct_Point{
x: C.int(p.X),
y: C.int(p.Y),
}
C.SeamlessClone(src.p, dst.p, mask.p, cp, blend.p, C.int(flags))
}
// IlluminationChange modifies locally the apparent illumination of an image.
//
// For further details, please see:
// https://docs.opencv.org/master/df/da0/group__photo__clone.html#gac5025767cf2febd8029d474278e886c7
//
func IlluminationChange(src, mask Mat, dst *Mat, alpha, beta float32) {
C.IlluminationChange(src.p, mask.p, dst.p, C.float(alpha), C.float(beta))
}
// TextureFlattening washes out the texture of the selected region, giving its contents a flat aspect.
//
// For further details, please see:
// https://docs.opencv.org/master/df/da0/group__photo__clone.html#gad55df6aa53797365fa7cc23959a54004
//
func TextureFlattening(src, mask Mat, dst *Mat, lowThreshold, highThreshold float32, kernelSize int) {
C.TextureFlattening(src.p, mask.p, dst.p, C.float(lowThreshold), C.float(highThreshold), C.int(kernelSize))
}
// FastNlMeansDenoisingColoredMulti denoises the selected images.
//
// For further details, please see:
// https://docs.opencv.org/master/d1/d79/group__photo__denoise.html#gaa501e71f52fb2dc17ff8ca5e7d2d3619
//
func FastNlMeansDenoisingColoredMulti(src []Mat, dst *Mat, imgToDenoiseIndex int, temporalWindowSize int) {
cMatArray := make([]C.Mat, len(src))
for i, r := range src {
cMatArray[i] = (C.Mat)(r.p)
}
matsVector := C.struct_Mats{
mats: (*C.Mat)(&cMatArray[0]),
length: C.int(len(src)),
}
C.FastNlMeansDenoisingColoredMulti(matsVector, dst.p, C.int(imgToDenoiseIndex), C.int(temporalWindowSize))
}
// FastNlMeansDenoisingColoredMulti denoises the selected images.
//
// For further details, please see:
// https://docs.opencv.org/master/d1/d79/group__photo__denoise.html#gaa501e71f52fb2dc17ff8ca5e7d2d3619
//
func FastNlMeansDenoisingColoredMultiWithParams(src []Mat, dst *Mat, imgToDenoiseIndex int, temporalWindowSize int, h float32, hColor float32, templateWindowSize int, searchWindowSize int) {
cMatArray := make([]C.Mat, len(src))
for i, r := range src {
cMatArray[i] = (C.Mat)(r.p)
}
matsVector := C.struct_Mats{
mats: (*C.Mat)(&cMatArray[0]),
length: C.int(len(src)),
}
C.FastNlMeansDenoisingColoredMultiWithParams(matsVector, dst.p, C.int(imgToDenoiseIndex), C.int(temporalWindowSize), C.float(h), C.float(hColor), C.int(templateWindowSize), C.int(searchWindowSize))
}
// NewMergeMertens returns returns a new MergeMertens white LDR merge algorithm.
// of type MergeMertens with default parameters.
// MergeMertens algorithm merge the ldr image should result in a HDR image.
//
// For further details, please see:
// https://docs.opencv.org/master/d6/df5/group__photo__hdr.html
// https://docs.opencv.org/master/d7/dd6/classcv_1_1MergeMertens.html
// https://docs.opencv.org/master/d6/df5/group__photo__hdr.html#ga79d59aa3cb3a7c664e59a4b5acc1ccb6
//
func NewMergeMertens() MergeMertens {
return MergeMertens{p: unsafe.Pointer(C.MergeMertens_Create())}
}
// NewMergeMertensWithParams returns a new MergeMertens white LDR merge algorithm
// of type MergeMertens with customized parameters.
// MergeMertens algorithm merge the ldr image should result in a HDR image.
//
// For further details, please see:
// https://docs.opencv.org/master/d6/df5/group__photo__hdr.html
// https://docs.opencv.org/master/d7/dd6/classcv_1_1MergeMertens.html
// https://docs.opencv.org/master/d6/df5/group__photo__hdr.html#ga79d59aa3cb3a7c664e59a4b5acc1ccb6
//
func NewMergeMertensWithParams(contrast_weight float32, saturation_weight float32, exposure_weight float32) MergeMertens {
return MergeMertens{p: unsafe.Pointer(C.MergeMertens_CreateWithParams(C.float(contrast_weight), C.float(saturation_weight), C.float(exposure_weight)))}
}
// Close MergeMertens.
func (b *MergeMertens) Close() error {
C.MergeMertens_Close((C.MergeMertens)(b.p)) // Here the unsafe pointer is cast into the right type
b.p = nil
return nil
}
// BalanceWhite computes merge LDR images using the current MergeMertens.
// Return a image MAT : 8bits 3 channel image ( RGB 8 bits )
// For further details, please see:
// https://docs.opencv.org/master/d7/dd6/classcv_1_1MergeMertens.html#a2d2254b2aab722c16954de13a663644d
//
func (b *MergeMertens) Process(src []Mat, dst *Mat) {
cMatArray := make([]C.Mat, len(src))
for i, r := range src {
cMatArray[i] = (C.Mat)(r.p)
}
// Conversion function from a Golang slice into an array of matrices that are understood by OpenCV
matsVector := C.struct_Mats{
mats: (*C.Mat)(&cMatArray[0]),
length: C.int(len(src)),
}
C.MergeMertens_Process((C.MergeMertens)(b.p), matsVector, dst.p)
// Convert a series of double [0.0,1.0] to [0,255] with Golang
dst.ConvertToWithParams(dst, MatTypeCV8UC3, 255.0, 0.0)
}
// NewAlignMTB returns an AlignMTB for converts images to median threshold bitmaps.
// of type AlignMTB converts images to median threshold bitmaps (1 for pixels
// brighter than median luminance and 0 otherwise) and than aligns the resulting
// bitmaps using bit operations.
// For further details, please see:
// https://docs.opencv.org/master/d6/df5/group__photo__hdr.html
// https://docs.opencv.org/master/d7/db6/classcv_1_1AlignMTB.html
// https://docs.opencv.org/master/d6/df5/group__photo__hdr.html#ga2f1fafc885a5d79dbfb3542e08db0244
//
func NewAlignMTB() AlignMTB {
return AlignMTB{p: unsafe.Pointer(C.AlignMTB_Create())}
}
// NewAlignMTBWithParams returns an AlignMTB for converts images to median threshold bitmaps.
// of type AlignMTB converts images to median threshold bitmaps (1 for pixels
// brighter than median luminance and 0 otherwise) and than aligns the resulting
// bitmaps using bit operations.
// For further details, please see:
// https://docs.opencv.org/master/d6/df5/group__photo__hdr.html
// https://docs.opencv.org/master/d7/db6/classcv_1_1AlignMTB.html
// https://docs.opencv.org/master/d6/df5/group__photo__hdr.html#ga2f1fafc885a5d79dbfb3542e08db0244
//
func NewAlignMTBWithParams(max_bits int, exclude_range int, cut bool) AlignMTB {
return AlignMTB{p: unsafe.Pointer(C.AlignMTB_CreateWithParams(C.int(max_bits), C.int(exclude_range), C.bool(cut)))}
}
// Close AlignMTB.
func (b *AlignMTB) Close() error {
C.AlignMTB_Close((C.AlignMTB)(b.p))
b.p = nil
return nil
}
// Process computes an alignment using the current AlignMTB.
//
// For further details, please see:
// https://docs.opencv.org/master/d7/db6/classcv_1_1AlignMTB.html#a37b3417d844f362d781f34155cbcb201
//
func (b *AlignMTB) Process(src []Mat, dst *[]Mat) {
cSrcArray := make([]C.Mat, len(src))
for i, r := range src {
cSrcArray[i] = r.p
}
cSrcMats := C.struct_Mats{
mats: (*C.Mat)(&cSrcArray[0]),
length: C.int(len(src)),
}
cDstMats := C.struct_Mats{}
C.AlignMTB_Process((C.AlignMTB)(b.p), cSrcMats, &cDstMats)
// Pass the matrices by reference from an OpenCV/C++ to a GoCV::Mat object
for i := C.int(0); i < cDstMats.length; i++ {
var tempdst Mat
tempdst.p = C.Mats_get(cDstMats, i)
*dst = append(*dst, tempdst)
}
return
}
// FastNlMeansDenoising performs image denoising using Non-local Means Denoising algorithm
// http://www.ipol.im/pub/algo/bcm_non_local_means_denoising/
//
// For further details, please see:
// https://docs.opencv.org/4.x/d1/d79/group__photo__denoise.html#ga4c6b0031f56ea3f98f768881279ffe93
//
func FastNlMeansDenoising(src Mat, dst *Mat) {
C.FastNlMeansDenoising(src.p, dst.p)
}
// FastNlMeansDenoisingWithParams performs image denoising using Non-local Means Denoising algorithm
// http://www.ipol.im/pub/algo/bcm_non_local_means_denoising/
//
// For further details, please see:
// https://docs.opencv.org/4.x/d1/d79/group__photo__denoise.html#ga4c6b0031f56ea3f98f768881279ffe93
//
func FastNlMeansDenoisingWithParams(src Mat, dst *Mat, h float32, templateWindowSize int, searchWindowSize int) {
C.FastNlMeansDenoisingWithParams(src.p, dst.p, C.float(h), C.int(templateWindowSize), C.int(searchWindowSize))
}
// FastNlMeansDenoisingColored is a modification of fastNlMeansDenoising function for colored images.
//
// For further details, please see:
// https://docs.opencv.org/4.x/d1/d79/group__photo__denoise.html#ga21abc1c8b0e15f78cd3eff672cb6c476
//
func FastNlMeansDenoisingColored(src Mat, dst *Mat) {
C.FastNlMeansDenoisingColored(src.p, dst.p)
}
// FastNlMeansDenoisingColoredWithParams is a modification of fastNlMeansDenoising function for colored images.
//
// For further details, please see:
// https://docs.opencv.org/4.x/d1/d79/group__photo__denoise.html#ga21abc1c8b0e15f78cd3eff672cb6c476
//
func FastNlMeansDenoisingColoredWithParams(src Mat, dst *Mat, h float32, hColor float32, templateWindowSize int, searchWindowSize int) {
C.FastNlMeansDenoisingColoredWithParams(src.p, dst.p, C.float(h), C.float(hColor), C.int(templateWindowSize), C.int(searchWindowSize))
}
// DetailEnhance filter enhances the details of a particular image
//
// For further details, please see:
// https://docs.opencv.org/4.x/df/dac/group__photo__render.html#gae5930dd822c713b36f8529b21ddebd0c
//
func DetailEnhance(src Mat, dst *Mat, sigma_s, sigma_r float32) {
C.DetailEnhance(src.p, dst.p, C.float(sigma_s), C.float(sigma_r))
}
type EdgeFilter int
const (
// RecursFilter Recursive Filtering.
RecursFilter EdgeFilter = 1
// NormconvFilter Normalized Convolution Filtering.
NormconvFilter = 2
)
// EdgePreservingFilter filtering is the fundamental operation in image and video processing.
// Edge-preserving smoothing filters are used in many different applications.
//
// For further details, please see:
// https://docs.opencv.org/4.x/df/dac/group__photo__render.html#gafaee2977597029bc8e35da6e67bd31f7
//
func EdgePreservingFilter(src Mat, dst *Mat, filter EdgeFilter, sigma_s, sigma_r float32) {
C.EdgePreservingFilter(src.p, dst.p, C.int(filter), C.float(sigma_s), C.float(sigma_r))
}
// PencilSketch pencil-like non-photorealistic line drawing.
//
// For further details, please see:
// https://docs.opencv.org/4.x/df/dac/group__photo__render.html#gae5930dd822c713b36f8529b21ddebd0c
//
func PencilSketch(src Mat, dst1, dst2 *Mat, sigma_s, sigma_r, shade_factor float32) {
C.PencilSketch(src.p, dst1.p, dst2.p, C.float(sigma_s), C.float(sigma_r), C.float(shade_factor))
}
// Stylization aims to produce digital imagery with a wide variety of effects
// not focused on photorealism. Edge-aware filters are ideal for stylization,
// as they can abstract regions of low contrast while preserving, or enhancing,
// high-contrast features.
//
// For further details, please see:
// https://docs.opencv.org/4.x/df/dac/group__photo__render.html#gacb0f7324017df153d7b5d095aed53206
//
func Stylization(src Mat, dst *Mat, sigma_s, sigma_r float32) {
C.Stylization(src.p, dst.p, C.float(sigma_s), C.float(sigma_r))
}

56
vendor/gocv.io/x/gocv/photo.h generated vendored Normal file
View File

@ -0,0 +1,56 @@
#ifndef _OPENCV3_PHOTO_H_
#define _OPENCV3_PHOTO_H_
#ifdef __cplusplus
#include <opencv2/opencv.hpp>
#include <opencv2/photo.hpp>
extern "C" {
#endif
#include "core.h"
#ifdef __cplusplus
// see : https://docs.opencv.org/3.4/d7/dd6/classcv_1_1MergeMertens.html
typedef cv::Ptr<cv::MergeMertens> *MergeMertens;
// see : https://docs.opencv.org/master/d7/db6/classcv_1_1AlignMTB.html
typedef cv::Ptr<cv::AlignMTB> *AlignMTB;
#else
typedef void *MergeMertens;
typedef void *AlignMTB;
#endif
void ColorChange(Mat src, Mat mask, Mat dst, float red_mul, float green_mul, float blue_mul);
void SeamlessClone(Mat src, Mat dst, Mat mask, Point p, Mat blend, int flags);
void IlluminationChange(Mat src, Mat mask, Mat dst, float alpha, float beta);
void TextureFlattening(Mat src, Mat mask, Mat dst, float low_threshold, float high_threshold, int kernel_size);
void FastNlMeansDenoisingColoredMulti(struct Mats src, Mat dst, int imgToDenoiseIndex, int temporalWindowSize);
void FastNlMeansDenoisingColoredMultiWithParams(struct Mats src, Mat dst, int imgToDenoiseIndex, int temporalWindowSize, float h, float hColor, int templateWindowSize, int searchWindowSize );
void FastNlMeansDenoising(Mat src, Mat dst);
void FastNlMeansDenoisingWithParams(Mat src, Mat dst, float h, int templateWindowSize, int searchWindowSize);
void FastNlMeansDenoisingColored(Mat src, Mat dst);
void FastNlMeansDenoisingColoredWithParams(Mat src, Mat dst, float h, float hColor, int templateWindowSize, int searchWindowSize);
MergeMertens MergeMertens_Create();
MergeMertens MergeMertens_CreateWithParams(float contrast_weight, float saturation_weight, float exposure_weight);
void MergeMertens_Process(MergeMertens b, struct Mats src, Mat dst);
void MergeMertens_Close(MergeMertens b);
AlignMTB AlignMTB_Create();
AlignMTB AlignMTB_CreateWithParams(int max_bits, int exclude_range, bool cut);
void AlignMTB_Process(AlignMTB b, struct Mats src, struct Mats *dst);
void AlignMTB_Close(AlignMTB b);
void DetailEnhance(Mat src, Mat dst, float sigma_s, float sigma_r);
void EdgePreservingFilter(Mat src, Mat dst, int filter, float sigma_s, float sigma_r);
void PencilSketch(Mat src, Mat dst1, Mat dst2, float sigma_s, float sigma_r, float shade_factor);
void Stylization(Mat src, Mat dst, float sigma_s, float sigma_r);
#ifdef __cplusplus
}
#endif
#endif //_OPENCV3_PHOTO_H

13
vendor/gocv.io/x/gocv/photo_string.go generated vendored Normal file
View File

@ -0,0 +1,13 @@
package gocv
func (c SeamlessCloneFlags) String() string {
switch c {
case NormalClone:
return "normal-clone"
case MixedClone:
return "mixed-clone"
case MonochromeTransfer:
return "monochrome-transfer"
}
return ""
}

5
vendor/gocv.io/x/gocv/svd.cpp generated vendored Normal file
View File

@ -0,0 +1,5 @@
#include "svd.h"
void SVD_Compute(Mat src, Mat w, Mat u, Mat vt) {
cv::SVD::compute(*src, *w, *u, *vt, 0);
}

14
vendor/gocv.io/x/gocv/svd.go generated vendored Normal file
View File

@ -0,0 +1,14 @@
package gocv
/*
#include <stdlib.h>
#include "svd.h"
*/
import "C"
// SVDCompute decomposes matrix and stores the results to user-provided matrices
//
// https://docs.opencv.org/4.1.2/df/df7/classcv_1_1SVD.html#a76f0b2044df458160292045a3d3714c6
func SVDCompute(src Mat, w, u, vt *Mat) {
C.SVD_Compute(src.Ptr(), w.Ptr(), u.Ptr(), vt.Ptr())
}

18
vendor/gocv.io/x/gocv/svd.h generated vendored Normal file
View File

@ -0,0 +1,18 @@
#ifndef _OPENCV3_SVD_H_
#define _OPENCV3_SVD_H_
#ifdef __cplusplus
#include <opencv2/opencv.hpp>
extern "C" {
#endif
#include "core.h"
void SVD_Compute(Mat src, Mat w, Mat u, Mat vt);
#ifdef __cplusplus
}
#endif
#endif //_OPENCV3_SVD_H

5
vendor/gocv.io/x/gocv/version.cpp generated vendored Normal file
View File

@ -0,0 +1,5 @@
#include "version.h"
const char* openCVVersion() {
return CV_VERSION;
}

20
vendor/gocv.io/x/gocv/version.go generated vendored Normal file
View File

@ -0,0 +1,20 @@
package gocv
/*
#include <stdlib.h>
#include "version.h"
*/
import "C"
// GoCVVersion of this package, for display purposes.
const GoCVVersion = "0.31.0"
// Version returns the current golang package version
func Version() string {
return GoCVVersion
}
// OpenCVVersion returns the current OpenCV lib version
func OpenCVVersion() string {
return C.GoString(C.openCVVersion())
}

17
vendor/gocv.io/x/gocv/version.h generated vendored Normal file
View File

@ -0,0 +1,17 @@
#ifndef _OPENCV3_VERSION_H_
#define _OPENCV3_VERSION_H_
#ifdef __cplusplus
#include <opencv2/opencv.hpp>
extern "C" {
#endif
#include "core.h"
const char* openCVVersion();
#ifdef __cplusplus
}
#endif
#endif //_OPENCV3_VERSION_H_

77
vendor/gocv.io/x/gocv/video.cpp generated vendored Normal file
View File

@ -0,0 +1,77 @@
#include "video.h"
BackgroundSubtractorMOG2 BackgroundSubtractorMOG2_Create() {
return new cv::Ptr<cv::BackgroundSubtractorMOG2>(cv::createBackgroundSubtractorMOG2());
}
BackgroundSubtractorMOG2 BackgroundSubtractorMOG2_CreateWithParams(int history, double varThreshold, bool detectShadows) {
return new cv::Ptr<cv::BackgroundSubtractorMOG2>(cv::createBackgroundSubtractorMOG2(history,varThreshold,detectShadows));
}
BackgroundSubtractorKNN BackgroundSubtractorKNN_Create() {
return new cv::Ptr<cv::BackgroundSubtractorKNN>(cv::createBackgroundSubtractorKNN());
}
BackgroundSubtractorKNN BackgroundSubtractorKNN_CreateWithParams(int history, double dist2Threshold, bool detectShadows) {
return new cv::Ptr<cv::BackgroundSubtractorKNN>(cv::createBackgroundSubtractorKNN(history,dist2Threshold,detectShadows));
}
void BackgroundSubtractorMOG2_Close(BackgroundSubtractorMOG2 b) {
delete b;
}
void BackgroundSubtractorMOG2_Apply(BackgroundSubtractorMOG2 b, Mat src, Mat dst) {
(*b)->apply(*src, *dst);
}
void BackgroundSubtractorKNN_Close(BackgroundSubtractorKNN k) {
delete k;
}
void BackgroundSubtractorKNN_Apply(BackgroundSubtractorKNN k, Mat src, Mat dst) {
(*k)->apply(*src, *dst);
}
void CalcOpticalFlowFarneback(Mat prevImg, Mat nextImg, Mat flow, double scale, int levels,
int winsize, int iterations, int polyN, double polySigma, int flags) {
cv::calcOpticalFlowFarneback(*prevImg, *nextImg, *flow, scale, levels, winsize, iterations, polyN,
polySigma, flags);
}
void CalcOpticalFlowPyrLK(Mat prevImg, Mat nextImg, Mat prevPts, Mat nextPts, Mat status, Mat err) {
cv::calcOpticalFlowPyrLK(*prevImg, *nextImg, *prevPts, *nextPts, *status, *err);
}
void CalcOpticalFlowPyrLKWithParams(Mat prevImg, Mat nextImg, Mat prevPts, Mat nextPts, Mat status, Mat err, Size winSize, int maxLevel, TermCriteria criteria, int flags, double minEigThreshold){
cv::Size sz(winSize.width, winSize.height);
cv::calcOpticalFlowPyrLK(*prevImg, *nextImg, *prevPts, *nextPts, *status, *err, sz, maxLevel, *criteria, flags, minEigThreshold);
}
double FindTransformECC(Mat templateImage, Mat inputImage, Mat warpMatrix, int motionType, TermCriteria criteria, Mat inputMask, int gaussFiltSize){
return cv::findTransformECC(*templateImage, *inputImage, *warpMatrix, motionType, *criteria, *inputMask, gaussFiltSize);
}
bool Tracker_Init(Tracker self, Mat image, Rect boundingBox) {
cv::Rect bb(boundingBox.x, boundingBox.y, boundingBox.width, boundingBox.height);
(*self)->init(*image, bb);
return true;
}
bool Tracker_Update(Tracker self, Mat image, Rect* boundingBox) {
cv::Rect bb;
bool ret = (*self)->update(*image, bb);
boundingBox->x = int(bb.x);
boundingBox->y = int(bb.y);
boundingBox->width = int(bb.width);
boundingBox->height = int(bb.height);
return ret;
}
TrackerMIL TrackerMIL_Create() {
return new cv::Ptr<cv::TrackerMIL>(cv::TrackerMIL::create());
}
void TrackerMIL_Close(TrackerMIL self) {
delete self;
}

258
vendor/gocv.io/x/gocv/video.go generated vendored Normal file
View File

@ -0,0 +1,258 @@
package gocv
/*
#include <stdlib.h>
#include "video.h"
*/
import "C"
import (
"image"
"unsafe"
)
/**
cv::OPTFLOW_USE_INITIAL_FLOW = 4,
cv::OPTFLOW_LK_GET_MIN_EIGENVALS = 8,
cv::OPTFLOW_FARNEBACK_GAUSSIAN = 256
For further details, please see: https://docs.opencv.org/master/dc/d6b/group__video__track.html#gga2c6cc144c9eee043575d5b311ac8af08a9d4430ac75199af0cf6fcdefba30eafe
*/
const (
OptflowUseInitialFlow = 4
OptflowLkGetMinEigenvals = 8
OptflowFarnebackGaussian = 256
)
/**
cv::MOTION_TRANSLATION = 0,
cv::MOTION_EUCLIDEAN = 1,
cv::MOTION_AFFINE = 2,
cv::MOTION_HOMOGRAPHY = 3
For further details, please see: https://docs.opencv.org/4.x/dc/d6b/group__video__track.html#ggaaedb1f94e6b143cef163622c531afd88a01106d6d20122b782ff25eaeffe9a5be
*/
const (
MotionTranslation = 0
MotionEuclidean = 1
MotionAffine = 2
MotionHomography = 3
)
// BackgroundSubtractorMOG2 is a wrapper around the cv::BackgroundSubtractorMOG2.
type BackgroundSubtractorMOG2 struct {
// C.BackgroundSubtractorMOG2
p unsafe.Pointer
}
// NewBackgroundSubtractorMOG2 returns a new BackgroundSubtractor algorithm
// of type MOG2. MOG2 is a Gaussian Mixture-based Background/Foreground
// Segmentation Algorithm.
//
// For further details, please see:
// https://docs.opencv.org/master/de/de1/group__video__motion.html#ga2beb2dee7a073809ccec60f145b6b29c
// https://docs.opencv.org/master/d7/d7b/classcv_1_1BackgroundSubtractorMOG2.html
//
func NewBackgroundSubtractorMOG2() BackgroundSubtractorMOG2 {
return BackgroundSubtractorMOG2{p: unsafe.Pointer(C.BackgroundSubtractorMOG2_Create())}
}
// NewBackgroundSubtractorMOG2WithParams returns a new BackgroundSubtractor algorithm
// of type MOG2 with customized parameters. MOG2 is a Gaussian Mixture-based Background/Foreground
// Segmentation Algorithm.
//
// For further details, please see:
// https://docs.opencv.org/master/de/de1/group__video__motion.html#ga2beb2dee7a073809ccec60f145b6b29c
// https://docs.opencv.org/master/d7/d7b/classcv_1_1BackgroundSubtractorMOG2.html
//
func NewBackgroundSubtractorMOG2WithParams(history int, varThreshold float64, detectShadows bool) BackgroundSubtractorMOG2 {
return BackgroundSubtractorMOG2{p: unsafe.Pointer(C.BackgroundSubtractorMOG2_CreateWithParams(C.int(history), C.double(varThreshold), C.bool(detectShadows)))}
}
// Close BackgroundSubtractorMOG2.
func (b *BackgroundSubtractorMOG2) Close() error {
C.BackgroundSubtractorMOG2_Close((C.BackgroundSubtractorMOG2)(b.p))
b.p = nil
return nil
}
// Apply computes a foreground mask using the current BackgroundSubtractorMOG2.
//
// For further details, please see:
// https://docs.opencv.org/master/d7/df6/classcv_1_1BackgroundSubtractor.html#aa735e76f7069b3fa9c3f32395f9ccd21
//
func (b *BackgroundSubtractorMOG2) Apply(src Mat, dst *Mat) {
C.BackgroundSubtractorMOG2_Apply((C.BackgroundSubtractorMOG2)(b.p), src.p, dst.p)
return
}
// BackgroundSubtractorKNN is a wrapper around the cv::BackgroundSubtractorKNN.
type BackgroundSubtractorKNN struct {
// C.BackgroundSubtractorKNN
p unsafe.Pointer
}
// NewBackgroundSubtractorKNN returns a new BackgroundSubtractor algorithm
// of type KNN. K-Nearest Neighbors (KNN) uses a Background/Foreground
// Segmentation Algorithm
//
// For further details, please see:
// https://docs.opencv.org/master/de/de1/group__video__motion.html#gac9be925771f805b6fdb614ec2292006d
// https://docs.opencv.org/master/db/d88/classcv_1_1BackgroundSubtractorKNN.html
//
func NewBackgroundSubtractorKNN() BackgroundSubtractorKNN {
return BackgroundSubtractorKNN{p: unsafe.Pointer(C.BackgroundSubtractorKNN_Create())}
}
// NewBackgroundSubtractorKNNWithParams returns a new BackgroundSubtractor algorithm
// of type KNN with customized parameters. K-Nearest Neighbors (KNN) uses a Background/Foreground
// Segmentation Algorithm
//
// For further details, please see:
// https://docs.opencv.org/master/de/de1/group__video__motion.html#gac9be925771f805b6fdb614ec2292006d
// https://docs.opencv.org/master/db/d88/classcv_1_1BackgroundSubtractorKNN.html
//
func NewBackgroundSubtractorKNNWithParams(history int, dist2Threshold float64, detectShadows bool) BackgroundSubtractorKNN {
return BackgroundSubtractorKNN{p: unsafe.Pointer(C.BackgroundSubtractorKNN_CreateWithParams(C.int(history), C.double(dist2Threshold), C.bool(detectShadows)))}
}
// Close BackgroundSubtractorKNN.
func (k *BackgroundSubtractorKNN) Close() error {
C.BackgroundSubtractorKNN_Close((C.BackgroundSubtractorKNN)(k.p))
k.p = nil
return nil
}
// Apply computes a foreground mask using the current BackgroundSubtractorKNN.
//
// For further details, please see:
// https://docs.opencv.org/master/d7/df6/classcv_1_1BackgroundSubtractor.html#aa735e76f7069b3fa9c3f32395f9ccd21
//
func (k *BackgroundSubtractorKNN) Apply(src Mat, dst *Mat) {
C.BackgroundSubtractorKNN_Apply((C.BackgroundSubtractorKNN)(k.p), src.p, dst.p)
return
}
// CalcOpticalFlowFarneback computes a dense optical flow using
// Gunnar Farneback's algorithm.
//
// For further details, please see:
// https://docs.opencv.org/master/dc/d6b/group__video__track.html#ga5d10ebbd59fe09c5f650289ec0ece5af
//
func CalcOpticalFlowFarneback(prevImg Mat, nextImg Mat, flow *Mat, pyrScale float64, levels int, winsize int,
iterations int, polyN int, polySigma float64, flags int) {
C.CalcOpticalFlowFarneback(prevImg.p, nextImg.p, flow.p, C.double(pyrScale), C.int(levels), C.int(winsize),
C.int(iterations), C.int(polyN), C.double(polySigma), C.int(flags))
return
}
// CalcOpticalFlowPyrLK calculates an optical flow for a sparse feature set using
// the iterative Lucas-Kanade method with pyramids.
//
// For further details, please see:
// https://docs.opencv.org/master/dc/d6b/group__video__track.html#ga473e4b886d0bcc6b65831eb88ed93323
//
func CalcOpticalFlowPyrLK(prevImg Mat, nextImg Mat, prevPts Mat, nextPts Mat, status *Mat, err *Mat) {
C.CalcOpticalFlowPyrLK(prevImg.p, nextImg.p, prevPts.p, nextPts.p, status.p, err.p)
return
}
// CalcOpticalFlowPyrLKWithParams calculates an optical flow for a sparse feature set using
// the iterative Lucas-Kanade method with pyramids.
//
// For further details, please see:
// https://docs.opencv.org/master/dc/d6b/group__video__track.html#ga473e4b886d0bcc6b65831eb88ed93323
//
func CalcOpticalFlowPyrLKWithParams(prevImg Mat, nextImg Mat, prevPts Mat, nextPts Mat, status *Mat, err *Mat,
winSize image.Point, maxLevel int, criteria TermCriteria, flags int, minEigThreshold float64) {
winSz := C.struct_Size{
width: C.int(winSize.X),
height: C.int(winSize.Y),
}
C.CalcOpticalFlowPyrLKWithParams(prevImg.p, nextImg.p, prevPts.p, nextPts.p, status.p, err.p, winSz, C.int(maxLevel), criteria.p, C.int(flags), C.double(minEigThreshold))
return
}
// FindTransformECC finds the geometric transform (warp) between two images in terms of the ECC criterion.
//
// For futther details, please see:
// https://docs.opencv.org/4.x/dc/d6b/group__video__track.html#ga1aa357007eaec11e9ed03500ecbcbe47
//
func FindTransformECC(templateImage Mat, inputImage Mat, warpMatrix *Mat, motionType int, criteria TermCriteria, inputMask Mat, gaussFiltSize int) float64 {
return float64(C.FindTransformECC(templateImage.p, inputImage.p, warpMatrix.p, C.int(motionType), criteria.p, inputMask.p, C.int(gaussFiltSize)))
}
// Tracker is the base interface for object tracking.
//
// see: https://docs.opencv.org/master/d0/d0a/classcv_1_1Tracker.html
//
type Tracker interface {
// Close closes, as Trackers need to be Closed manually.
//
Close() error
// Init initializes the tracker with a known bounding box that surrounded the target.
// Note: this can only be called once. If you lose the object, you have to Close() the instance,
// create a new one, and call Init() on it again.
//
// see: https://docs.opencv.org/master/d0/d0a/classcv_1_1Tracker.html#a4d285747589b1bdd16d2e4f00c3255dc
//
Init(image Mat, boundingBox image.Rectangle) bool
// Update updates the tracker, returns a new bounding box and a boolean determining whether the tracker lost the target.
//
// see: https://docs.opencv.org/master/d0/d0a/classcv_1_1Tracker.html#a549159bd0553e6a8de356f3866df1f18
//
Update(image Mat) (image.Rectangle, bool)
}
func trackerInit(trk C.Tracker, img Mat, boundingBox image.Rectangle) bool {
cBox := C.struct_Rect{
x: C.int(boundingBox.Min.X),
y: C.int(boundingBox.Min.Y),
width: C.int(boundingBox.Size().X),
height: C.int(boundingBox.Size().Y),
}
ret := C.Tracker_Init(trk, C.Mat(img.Ptr()), cBox)
return bool(ret)
}
func trackerUpdate(trk C.Tracker, img Mat) (image.Rectangle, bool) {
cBox := C.struct_Rect{}
ret := C.Tracker_Update(trk, C.Mat(img.Ptr()), &cBox)
rect := image.Rect(int(cBox.x), int(cBox.y), int(cBox.x+cBox.width), int(cBox.y+cBox.height))
return rect, bool(ret)
}
// TrackerMIL is a Tracker that uses the MIL algorithm. MIL trains a classifier in an online manner
// to separate the object from the background.
// Multiple Instance Learning avoids the drift problem for a robust tracking.
//
// For further details, please see:
// https://docs.opencv.org/master/d0/d26/classcv_1_1TrackerMIL.html
//
type TrackerMIL struct {
p C.TrackerMIL
}
// NewTrackerMIL returns a new TrackerMIL.
func NewTrackerMIL() Tracker {
return TrackerMIL{p: C.TrackerMIL_Create()}
}
// Close closes the TrackerMIL.
func (trk TrackerMIL) Close() error {
C.TrackerMIL_Close(trk.p)
trk.p = nil
return nil
}
// Init initializes the TrackerMIL.
func (trk TrackerMIL) Init(img Mat, boundingBox image.Rectangle) bool {
return trackerInit(C.Tracker(trk.p), img, boundingBox)
}
// Update updates the TrackerMIL.
func (trk TrackerMIL) Update(img Mat) (image.Rectangle, bool) {
return trackerUpdate(C.Tracker(trk.p), img)
}

54
vendor/gocv.io/x/gocv/video.h generated vendored Normal file
View File

@ -0,0 +1,54 @@
#ifndef _OPENCV3_VIDEO_H_
#define _OPENCV3_VIDEO_H_
#ifdef __cplusplus
#include <opencv2/opencv.hpp>
#include <opencv2/video.hpp>
extern "C" {
#endif
#include "core.h"
#ifdef __cplusplus
typedef cv::Ptr<cv::BackgroundSubtractorMOG2>* BackgroundSubtractorMOG2;
typedef cv::Ptr<cv::BackgroundSubtractorKNN>* BackgroundSubtractorKNN;
typedef cv::Ptr<cv::Tracker>* Tracker;
typedef cv::Ptr<cv::TrackerMIL>* TrackerMIL;
typedef cv::Ptr<cv::TrackerGOTURN>* TrackerGOTURN;
#else
typedef void* BackgroundSubtractorMOG2;
typedef void* BackgroundSubtractorKNN;
typedef void* Tracker;
typedef void* TrackerMIL;
typedef void* TrackerGOTURN;
#endif
BackgroundSubtractorMOG2 BackgroundSubtractorMOG2_Create();
BackgroundSubtractorMOG2 BackgroundSubtractorMOG2_CreateWithParams(int history, double varThreshold, bool detectShadows);
void BackgroundSubtractorMOG2_Close(BackgroundSubtractorMOG2 b);
void BackgroundSubtractorMOG2_Apply(BackgroundSubtractorMOG2 b, Mat src, Mat dst);
BackgroundSubtractorKNN BackgroundSubtractorKNN_Create();
BackgroundSubtractorKNN BackgroundSubtractorKNN_CreateWithParams(int history, double dist2Threshold, bool detectShadows);
void BackgroundSubtractorKNN_Close(BackgroundSubtractorKNN b);
void BackgroundSubtractorKNN_Apply(BackgroundSubtractorKNN b, Mat src, Mat dst);
void CalcOpticalFlowPyrLK(Mat prevImg, Mat nextImg, Mat prevPts, Mat nextPts, Mat status, Mat err);
void CalcOpticalFlowPyrLKWithParams(Mat prevImg, Mat nextImg, Mat prevPts, Mat nextPts, Mat status, Mat err, Size winSize, int maxLevel, TermCriteria criteria, int flags, double minEigThreshold);
void CalcOpticalFlowFarneback(Mat prevImg, Mat nextImg, Mat flow, double pyrScale, int levels,
int winsize, int iterations, int polyN, double polySigma, int flags);
double FindTransformECC(Mat templateImage, Mat inputImage, Mat warpMatrix, int motionType, TermCriteria criteria, Mat inputMask, int gaussFiltSize);
bool Tracker_Init(Tracker self, Mat image, Rect boundingBox);
bool Tracker_Update(Tracker self, Mat image, Rect* boundingBox);
TrackerMIL TrackerMIL_Create();
void TrackerMIL_Close(TrackerMIL self);
#ifdef __cplusplus
}
#endif
#endif //_OPENCV3_VIDEO_H_

71
vendor/gocv.io/x/gocv/videoio.cpp generated vendored Normal file
View File

@ -0,0 +1,71 @@
#include "videoio.h"
// VideoWriter
VideoCapture VideoCapture_New() {
return new cv::VideoCapture();
}
void VideoCapture_Close(VideoCapture v) {
delete v;
}
bool VideoCapture_Open(VideoCapture v, const char* uri) {
return v->open(uri);
}
bool VideoCapture_OpenWithAPI(VideoCapture v, const char* uri, int apiPreference) {
return v->open(uri, apiPreference);
}
bool VideoCapture_OpenDevice(VideoCapture v, int device) {
return v->open(device);
}
bool VideoCapture_OpenDeviceWithAPI(VideoCapture v, int device, int apiPreference) {
return v->open(device, apiPreference);
}
void VideoCapture_Set(VideoCapture v, int prop, double param) {
v->set(prop, param);
}
double VideoCapture_Get(VideoCapture v, int prop) {
return v->get(prop);
}
int VideoCapture_IsOpened(VideoCapture v) {
return v->isOpened();
}
int VideoCapture_Read(VideoCapture v, Mat buf) {
return v->read(*buf);
}
void VideoCapture_Grab(VideoCapture v, int skip) {
for (int i = 0; i < skip; i++) {
v->grab();
}
}
// VideoWriter
VideoWriter VideoWriter_New() {
return new cv::VideoWriter();
}
void VideoWriter_Close(VideoWriter vw) {
delete vw;
}
void VideoWriter_Open(VideoWriter vw, const char* name, const char* codec, double fps, int width,
int height, bool isColor) {
int codecCode = cv::VideoWriter::fourcc(codec[0], codec[1], codec[2], codec[3]);
vw->open(name, codecCode, fps, cv::Size(width, height), isColor);
}
int VideoWriter_IsOpened(VideoWriter vw) {
return vw->isOpened();
}
void VideoWriter_Write(VideoWriter vw, Mat img) {
*vw << *img;
}

504
vendor/gocv.io/x/gocv/videoio.go generated vendored Normal file
View File

@ -0,0 +1,504 @@
package gocv
/*
#include <stdlib.h>
#include "videoio.h"
*/
import "C"
import (
"errors"
"fmt"
"strconv"
"sync"
"unsafe"
)
// Select preferred API for a capture object.
// Note: Backends are available only if they have been built with your OpenCV binaries
type VideoCaptureAPI int
const (
// Auto detect == 0
VideoCaptureAny VideoCaptureAPI = 0
// Video For Windows (obsolete, removed)
VideoCaptureVFW VideoCaptureAPI = 200
// V4L/V4L2 capturing support
VideoCaptureV4L VideoCaptureAPI = 200
// Same as VideoCaptureV4L
VideoCaptureV4L2 VideoCaptureAPI = 200
// IEEE 1394 drivers
VideoCaptureFirewire VideoCaptureAPI = 300
// Same value as VideoCaptureFirewire
VideoCaptureFireware VideoCaptureAPI = 300
// Same value as VideoCaptureFirewire
VideoCaptureIEEE1394 VideoCaptureAPI = 300
// Same value as VideoCaptureFirewire
VideoCaptureDC1394 VideoCaptureAPI = 300
// Same value as VideoCaptureFirewire
VideoCaptureCMU1394 VideoCaptureAPI = 300
// QuickTime (obsolete, removed)
VideoCaptureQT VideoCaptureAPI = 500
// Unicap drivers (obsolete, removed)
VideoCaptureUnicap VideoCaptureAPI = 600
// DirectShow (via videoInput)
VideoCaptureDshow VideoCaptureAPI = 700
// PvAPI, Prosilica GigE SDK
VideoCapturePvAPI VideoCaptureAPI = 800
// OpenNI (for Kinect)
VideoCaptureOpenNI VideoCaptureAPI = 900
// OpenNI (for Asus Xtion)
VideoCaptureOpenNIAsus VideoCaptureAPI = 910
// Android - not used
VideoCaptureAndroid VideoCaptureAPI = 1000
// XIMEA Camera API
VideoCaptureXiAPI VideoCaptureAPI = 1100
// AVFoundation framework for iOS (OS X Lion will have the same API)
VideoCaptureAVFoundation VideoCaptureAPI = 1200
// Smartek Giganetix GigEVisionSDK
VideoCaptureGiganetix VideoCaptureAPI = 1300
// Microsoft Media Foundation (via videoInput)
VideoCaptureMSMF VideoCaptureAPI = 1400
// Microsoft Windows Runtime using Media Foundation
VideoCaptureWinRT VideoCaptureAPI = 1410
// RealSense (former Intel Perceptual Computing SDK)
VideoCaptureIntelPerc VideoCaptureAPI = 1500
// Synonym for VideoCaptureIntelPerc
VideoCaptureRealsense VideoCaptureAPI = 1500
// OpenNI2 (for Kinect)
VideoCaptureOpenNI2 VideoCaptureAPI = 1600
// OpenNI2 (for Asus Xtion and Occipital Structure sensors)
VideoCaptureOpenNI2Asus VideoCaptureAPI = 1610
// gPhoto2 connection
VideoCaptureGPhoto2 VideoCaptureAPI = 1700
// GStreamer
VideoCaptureGstreamer VideoCaptureAPI = 1800
// Open and record video file or stream using the FFMPEG library
VideoCaptureFFmpeg VideoCaptureAPI = 1900
// OpenCV Image Sequence (e.g. img_%02d.jpg)
VideoCaptureImages VideoCaptureAPI = 2000
// Aravis SDK
VideoCaptureAravis VideoCaptureAPI = 2100
// Built-in OpenCV MotionJPEG codec
VideoCaptureOpencvMjpeg VideoCaptureAPI = 2200
// Intel MediaSDK
VideoCaptureIntelMFX VideoCaptureAPI = 2300
// XINE engine (Linux)
VideoCaptureXINE VideoCaptureAPI = 2400
)
// VideoCaptureProperties are the properties used for VideoCapture operations.
type VideoCaptureProperties int
const (
// VideoCapturePosMsec contains current position of the
// video file in milliseconds.
VideoCapturePosMsec VideoCaptureProperties = 0
// VideoCapturePosFrames 0-based index of the frame to be
// decoded/captured next.
VideoCapturePosFrames VideoCaptureProperties = 1
// VideoCapturePosAVIRatio relative position of the video file:
// 0=start of the film, 1=end of the film.
VideoCapturePosAVIRatio VideoCaptureProperties = 2
// VideoCaptureFrameWidth is width of the frames in the video stream.
VideoCaptureFrameWidth VideoCaptureProperties = 3
// VideoCaptureFrameHeight controls height of frames in the video stream.
VideoCaptureFrameHeight VideoCaptureProperties = 4
// VideoCaptureFPS controls capture frame rate.
VideoCaptureFPS VideoCaptureProperties = 5
// VideoCaptureFOURCC contains the 4-character code of codec.
// see VideoWriter::fourcc for details.
VideoCaptureFOURCC VideoCaptureProperties = 6
// VideoCaptureFrameCount contains number of frames in the video file.
VideoCaptureFrameCount VideoCaptureProperties = 7
// VideoCaptureFormat format of the Mat objects returned by
// VideoCapture::retrieve().
VideoCaptureFormat VideoCaptureProperties = 8
// VideoCaptureMode contains backend-specific value indicating
// the current capture mode.
VideoCaptureMode VideoCaptureProperties = 9
// VideoCaptureBrightness is brightness of the image
// (only for those cameras that support).
VideoCaptureBrightness VideoCaptureProperties = 10
// VideoCaptureContrast is contrast of the image
// (only for cameras that support it).
VideoCaptureContrast VideoCaptureProperties = 11
// VideoCaptureSaturation saturation of the image
// (only for cameras that support).
VideoCaptureSaturation VideoCaptureProperties = 12
// VideoCaptureHue hue of the image (only for cameras that support).
VideoCaptureHue VideoCaptureProperties = 13
// VideoCaptureGain is the gain of the capture image.
// (only for those cameras that support).
VideoCaptureGain VideoCaptureProperties = 14
// VideoCaptureExposure is the exposure of the capture image.
// (only for those cameras that support).
VideoCaptureExposure VideoCaptureProperties = 15
// VideoCaptureConvertRGB is a boolean flags indicating whether
// images should be converted to RGB.
VideoCaptureConvertRGB VideoCaptureProperties = 16
// VideoCaptureWhiteBalanceBlueU is currently unsupported.
VideoCaptureWhiteBalanceBlueU VideoCaptureProperties = 17
// VideoCaptureRectification is the rectification flag for stereo cameras.
// Note: only supported by DC1394 v 2.x backend currently.
VideoCaptureRectification VideoCaptureProperties = 18
// VideoCaptureMonochrome indicates whether images should be
// converted to monochrome.
VideoCaptureMonochrome VideoCaptureProperties = 19
// VideoCaptureSharpness controls image capture sharpness.
VideoCaptureSharpness VideoCaptureProperties = 20
// VideoCaptureAutoExposure controls the DC1394 exposure control
// done by camera, user can adjust reference level using this feature.
VideoCaptureAutoExposure VideoCaptureProperties = 21
// VideoCaptureGamma controls video capture gamma.
VideoCaptureGamma VideoCaptureProperties = 22
// VideoCaptureTemperature controls video capture temperature.
VideoCaptureTemperature VideoCaptureProperties = 23
// VideoCaptureTrigger controls video capture trigger.
VideoCaptureTrigger VideoCaptureProperties = 24
// VideoCaptureTriggerDelay controls video capture trigger delay.
VideoCaptureTriggerDelay VideoCaptureProperties = 25
// VideoCaptureWhiteBalanceRedV controls video capture setting for
// white balance.
VideoCaptureWhiteBalanceRedV VideoCaptureProperties = 26
// VideoCaptureZoom controls video capture zoom.
VideoCaptureZoom VideoCaptureProperties = 27
// VideoCaptureFocus controls video capture focus.
VideoCaptureFocus VideoCaptureProperties = 28
// VideoCaptureGUID controls video capture GUID.
VideoCaptureGUID VideoCaptureProperties = 29
// VideoCaptureISOSpeed controls video capture ISO speed.
VideoCaptureISOSpeed VideoCaptureProperties = 30
// VideoCaptureBacklight controls video capture backlight.
VideoCaptureBacklight VideoCaptureProperties = 32
// VideoCapturePan controls video capture pan.
VideoCapturePan VideoCaptureProperties = 33
// VideoCaptureTilt controls video capture tilt.
VideoCaptureTilt VideoCaptureProperties = 34
// VideoCaptureRoll controls video capture roll.
VideoCaptureRoll VideoCaptureProperties = 35
// VideoCaptureIris controls video capture iris.
VideoCaptureIris VideoCaptureProperties = 36
// VideoCaptureSettings is the pop up video/camera filter dialog. Note:
// only supported by DSHOW backend currently. The property value is ignored.
VideoCaptureSettings VideoCaptureProperties = 37
// VideoCaptureBufferSize controls video capture buffer size.
VideoCaptureBufferSize VideoCaptureProperties = 38
// VideoCaptureAutoFocus controls video capture auto focus..
VideoCaptureAutoFocus VideoCaptureProperties = 39
// VideoCaptureSarNumerator controls the sample aspect ratio: num/den (num)
VideoCaptureSarNumerator VideoCaptureProperties = 40
// VideoCaptureSarDenominator controls the sample aspect ratio: num/den (den)
VideoCaptureSarDenominator VideoCaptureProperties = 41
// VideoCaptureBackend is the current api backend (VideoCaptureAPI). Read-only property.
VideoCaptureBackend VideoCaptureProperties = 42
// VideoCaptureChannel controls the video input or channel number (only for those cameras that support).
VideoCaptureChannel VideoCaptureProperties = 43
// VideoCaptureAutoWB controls the auto white-balance.
VideoCaptureAutoWB VideoCaptureProperties = 44
// VideoCaptureWBTemperature controls the white-balance color temperature
VideoCaptureWBTemperature VideoCaptureProperties = 45
// VideoCaptureCodecPixelFormat shows the the codec's pixel format (4-character code). Read-only property.
// Subset of AV_PIX_FMT_* or -1 if unknown.
VideoCaptureCodecPixelFormat VideoCaptureProperties = 46
// VideoCaptureBitrate displays the video bitrate in kbits/s. Read-only property.
VideoCaptureBitrate VideoCaptureProperties = 47
)
// VideoCapture is a wrapper around the OpenCV VideoCapture class.
//
// For further details, please see:
// http://docs.opencv.org/master/d8/dfe/classcv_1_1VideoCapture.html
//
type VideoCapture struct {
p C.VideoCapture
}
// VideoCaptureFile opens a VideoCapture from a file and prepares
// to start capturing. It returns error if it fails to open the file stored in uri path.
func VideoCaptureFile(uri string) (vc *VideoCapture, err error) {
vc = &VideoCapture{p: C.VideoCapture_New()}
cURI := C.CString(uri)
defer C.free(unsafe.Pointer(cURI))
if !C.VideoCapture_Open(vc.p, cURI) {
err = fmt.Errorf("Error opening file: %s", uri)
}
return
}
// VideoCaptureFile opens a VideoCapture from a file and prepares
// to start capturing. It returns error if it fails to open the file stored in uri path.
func VideoCaptureFileWithAPI(uri string, apiPreference VideoCaptureAPI) (vc *VideoCapture, err error) {
vc = &VideoCapture{p: C.VideoCapture_New()}
cURI := C.CString(uri)
defer C.free(unsafe.Pointer(cURI))
if !C.VideoCapture_OpenWithAPI(vc.p, cURI, C.int(apiPreference)) {
err = fmt.Errorf("Error opening file: %s with api backend: %d", uri, apiPreference)
}
return
}
// VideoCaptureDevice opens a VideoCapture from a device and prepares
// to start capturing. It returns error if it fails to open the video device.
func VideoCaptureDevice(device int) (vc *VideoCapture, err error) {
vc = &VideoCapture{p: C.VideoCapture_New()}
if !C.VideoCapture_OpenDevice(vc.p, C.int(device)) {
err = fmt.Errorf("Error opening device: %d", device)
}
return
}
// VideoCaptureDevice opens a VideoCapture from a device with the api preference.
// It returns error if it fails to open the video device.
func VideoCaptureDeviceWithAPI(device int, apiPreference VideoCaptureAPI) (vc *VideoCapture, err error) {
vc = &VideoCapture{p: C.VideoCapture_New()}
if !C.VideoCapture_OpenDeviceWithAPI(vc.p, C.int(device), C.int(apiPreference)) {
err = fmt.Errorf("Error opening device: %d with api backend: %d", device, apiPreference)
}
return
}
// Close VideoCapture object.
func (v *VideoCapture) Close() error {
C.VideoCapture_Close(v.p)
v.p = nil
return nil
}
// Set parameter with property (=key).
func (v *VideoCapture) Set(prop VideoCaptureProperties, param float64) {
C.VideoCapture_Set(v.p, C.int(prop), C.double(param))
}
// Get parameter with property (=key).
func (v VideoCapture) Get(prop VideoCaptureProperties) float64 {
return float64(C.VideoCapture_Get(v.p, C.int(prop)))
}
// IsOpened returns if the VideoCapture has been opened to read from
// a file or capture device.
func (v *VideoCapture) IsOpened() bool {
isOpened := C.VideoCapture_IsOpened(v.p)
return isOpened != 0
}
// Read reads the next frame from the VideoCapture to the Mat passed in
// as the param. It returns false if the VideoCapture cannot read frame.
func (v *VideoCapture) Read(m *Mat) bool {
return C.VideoCapture_Read(v.p, m.p) != 0
}
// Grab skips a specific number of frames.
func (v *VideoCapture) Grab(skip int) {
C.VideoCapture_Grab(v.p, C.int(skip))
}
// CodecString returns a string representation of FourCC bytes, i.e. the name of a codec
func (v *VideoCapture) CodecString() string {
res := ""
hexes := []int64{0xff, 0xff00, 0xff0000, 0xff000000}
for i, h := range hexes {
res += string(rune(int64(v.Get(VideoCaptureFOURCC)) & h >> (uint(i * 8))))
}
return res
}
// ToCodec returns an float64 representation of FourCC bytes
func (v *VideoCapture) ToCodec(codec string) float64 {
if len(codec) != 4 {
return -1.0
}
c1 := []rune(string(codec[0]))[0]
c2 := []rune(string(codec[1]))[0]
c3 := []rune(string(codec[2]))[0]
c4 := []rune(string(codec[3]))[0]
return float64((c1 & 255) + ((c2 & 255) << 8) + ((c3 & 255) << 16) + ((c4 & 255) << 24))
}
// VideoWriter is a wrapper around the OpenCV VideoWriter`class.
//
// For further details, please see:
// http://docs.opencv.org/master/dd/d9e/classcv_1_1VideoWriter.html
//
type VideoWriter struct {
mu *sync.RWMutex
p C.VideoWriter
}
// VideoWriterFile opens a VideoWriter with a specific output file.
// The "codec" param should be the four-letter code for the desired output
// codec, for example "MJPG".
//
// For further details, please see:
// http://docs.opencv.org/master/dd/d9e/classcv_1_1VideoWriter.html#a0901c353cd5ea05bba455317dab81130
//
func VideoWriterFile(name string, codec string, fps float64, width int, height int, isColor bool) (vw *VideoWriter, err error) {
if fps == 0 || width == 0 || height == 0 {
return nil, fmt.Errorf("one of the numerical parameters "+
"is equal to zero: FPS: %f, width: %d, height: %d", fps, width, height)
}
vw = &VideoWriter{
p: C.VideoWriter_New(),
mu: &sync.RWMutex{},
}
cName := C.CString(name)
defer C.free(unsafe.Pointer(cName))
cCodec := C.CString(codec)
defer C.free(unsafe.Pointer(cCodec))
C.VideoWriter_Open(vw.p, cName, cCodec, C.double(fps), C.int(width), C.int(height), C.bool(isColor))
return
}
// Close VideoWriter object.
func (vw *VideoWriter) Close() error {
C.VideoWriter_Close(vw.p)
vw.p = nil
return nil
}
// IsOpened checks if the VideoWriter is open and ready to be written to.
//
// For further details, please see:
// http://docs.opencv.org/master/dd/d9e/classcv_1_1VideoWriter.html#a9a40803e5f671968ac9efa877c984d75
//
func (vw *VideoWriter) IsOpened() bool {
isOpend := C.VideoWriter_IsOpened(vw.p)
return isOpend != 0
}
// Write the next video frame from the Mat image to the open VideoWriter.
//
// For further details, please see:
// http://docs.opencv.org/master/dd/d9e/classcv_1_1VideoWriter.html#a3115b679d612a6a0b5864a0c88ed4b39
//
func (vw *VideoWriter) Write(img Mat) error {
vw.mu.Lock()
defer vw.mu.Unlock()
C.VideoWriter_Write(vw.p, img.p)
return nil
}
// OpenVideoCapture return VideoCapture specified by device ID if v is a
// number. Return VideoCapture created from video file, URL, or GStreamer
// pipeline if v is a string.
func OpenVideoCapture(v interface{}) (*VideoCapture, error) {
switch vv := v.(type) {
case int:
return VideoCaptureDevice(vv)
case string:
id, err := strconv.Atoi(vv)
if err == nil {
return VideoCaptureDevice(id)
}
return VideoCaptureFile(vv)
default:
return nil, errors.New("argument must be int or string")
}
}
func OpenVideoCaptureWithAPI(v interface{}, apiPreference VideoCaptureAPI) (*VideoCapture, error) {
switch vv := v.(type) {
case int:
return VideoCaptureDeviceWithAPI(vv, apiPreference)
case string:
id, err := strconv.Atoi(vv)
if err == nil {
return VideoCaptureDeviceWithAPI(id, apiPreference)
}
return VideoCaptureFileWithAPI(vv, apiPreference)
default:
return nil, errors.New("argument must be int or string")
}
}

44
vendor/gocv.io/x/gocv/videoio.h generated vendored Normal file
View File

@ -0,0 +1,44 @@
#ifndef _OPENCV3_VIDEOIO_H_
#define _OPENCV3_VIDEOIO_H_
#ifdef __cplusplus
#include <opencv2/opencv.hpp>
extern "C" {
#endif
#include "core.h"
#ifdef __cplusplus
typedef cv::VideoCapture* VideoCapture;
typedef cv::VideoWriter* VideoWriter;
#else
typedef void* VideoCapture;
typedef void* VideoWriter;
#endif
// VideoCapture
VideoCapture VideoCapture_New();
void VideoCapture_Close(VideoCapture v);
bool VideoCapture_Open(VideoCapture v, const char* uri);
bool VideoCapture_OpenWithAPI(VideoCapture v, const char* uri, int apiPreference);
bool VideoCapture_OpenDevice(VideoCapture v, int device);
bool VideoCapture_OpenDeviceWithAPI(VideoCapture v, int device, int apiPreference);
void VideoCapture_Set(VideoCapture v, int prop, double param);
double VideoCapture_Get(VideoCapture v, int prop);
int VideoCapture_IsOpened(VideoCapture v);
int VideoCapture_Read(VideoCapture v, Mat buf);
void VideoCapture_Grab(VideoCapture v, int skip);
// VideoWriter
VideoWriter VideoWriter_New();
void VideoWriter_Close(VideoWriter vw);
void VideoWriter_Open(VideoWriter vw, const char* name, const char* codec, double fps, int width,
int height, bool isColor);
int VideoWriter_IsOpened(VideoWriter vw);
void VideoWriter_Write(VideoWriter vw, Mat img);
#ifdef __cplusplus
}
#endif
#endif //_OPENCV3_VIDEOIO_H_

159
vendor/gocv.io/x/gocv/videoio_string.go generated vendored Normal file
View File

@ -0,0 +1,159 @@
package gocv
func (c VideoCaptureAPI) String() string {
switch c {
case VideoCaptureAny:
return "video-capture-any"
case VideoCaptureV4L2:
return "video-capture-v4l2"
case VideoCaptureFirewire:
return "video-capture-firewire"
case VideoCaptureQT:
return "video-capture-qt"
case VideoCaptureUnicap:
return "video-capture-unicap"
case VideoCaptureDshow:
return "video-capture-dshow"
case VideoCapturePvAPI:
return "video-capture-pvapi"
case VideoCaptureOpenNI:
return "video-capture-openni"
case VideoCaptureOpenNIAsus:
return "video-capture-openni-asus"
case VideoCaptureAndroid:
return "video-capture-android"
case VideoCaptureXiAPI:
return "video-capture-xiapi"
case VideoCaptureAVFoundation:
return "video-capture-av-foundation"
case VideoCaptureGiganetix:
return "video-capture-giganetix"
case VideoCaptureMSMF:
return "video-capture-msmf"
case VideoCaptureWinRT:
return "video-capture-winrt"
case VideoCaptureIntelPerc:
return "video-capture-intel-perc"
case VideoCaptureOpenNI2:
return "video-capture-openni2"
case VideoCaptureOpenNI2Asus:
return "video-capture-openni2-asus"
case VideoCaptureGPhoto2:
return "video-capture-gphoto2"
case VideoCaptureGstreamer:
return "video-capture-gstreamer"
case VideoCaptureFFmpeg:
return "video-capture-ffmpeg"
case VideoCaptureImages:
return "video-capture-images"
case VideoCaptureAravis:
return "video-capture-aravis"
case VideoCaptureOpencvMjpeg:
return "video-capture-opencv-mjpeg"
case VideoCaptureIntelMFX:
return "video-capture-intel-mfx"
case VideoCaptureXINE:
return "video-capture-xine"
}
return ""
}
func (c VideoCaptureProperties) String() string {
switch c {
case VideoCapturePosMsec:
return "video-capture-pos-msec"
case VideoCapturePosFrames:
return "video-capture-pos-frames"
case VideoCapturePosAVIRatio:
return "video-capture-pos-avi-ratio"
case VideoCaptureFrameWidth:
return "video-capture-frame-width"
case VideoCaptureFrameHeight:
return "video-capture-frame-height"
case VideoCaptureFPS:
return "video-capture-fps"
case VideoCaptureFOURCC:
return "video-capture-fourcc"
case VideoCaptureFrameCount:
return "video-capture-frame-count"
case VideoCaptureFormat:
return "video-capture-format"
case VideoCaptureMode:
return "video-capture-mode"
case VideoCaptureBrightness:
return "video-capture-brightness"
case VideoCaptureContrast:
return "video-capture-contrast"
case VideoCaptureSaturation:
return "video-capture-saturation"
case VideoCaptureHue:
return "video-capture-hue"
case VideoCaptureGain:
return "video-capture-gain"
case VideoCaptureExposure:
return "video-capture-exposure"
case VideoCaptureConvertRGB:
return "video-capture-convert-rgb"
case VideoCaptureWhiteBalanceBlueU:
return "video-capture-white-balanced-blue-u"
case VideoCaptureWhiteBalanceRedV:
return "video-capture-white-balanced-red-v"
case VideoCaptureRectification:
return "video-capture-rectification"
case VideoCaptureMonochrome:
return "video-capture-monochrome"
case VideoCaptureSharpness:
return "video-capture-sharpness"
case VideoCaptureAutoExposure:
return "video-capture-auto-exposure"
case VideoCaptureGamma:
return "video-capture-gamma"
case VideoCaptureTemperature:
return "video-capture-temperature"
case VideoCaptureTrigger:
return "video-capture-trigger"
case VideoCaptureTriggerDelay:
return "video-capture-trigger-delay"
case VideoCaptureZoom:
return "video-capture-zoom"
case VideoCaptureFocus:
return "video-capture-focus"
case VideoCaptureGUID:
return "video-capture-guid"
case VideoCaptureISOSpeed:
return "video-capture-iso-speed"
case VideoCaptureBacklight:
return "video-capture-backlight"
case VideoCapturePan:
return "video-capture-pan"
case VideoCaptureTilt:
return "video-capture-tilt"
case VideoCaptureRoll:
return "video-capture-roll"
case VideoCaptureIris:
return "video-capture-iris"
case VideoCaptureSettings:
return "video-capture-settings"
case VideoCaptureBufferSize:
return "video-capture-buffer-size"
case VideoCaptureAutoFocus:
return "video-capture-auto-focus"
case VideoCaptureSarNumerator:
return "video-capture-sar-numerator"
case VideoCaptureSarDenominator:
return "video-capture-sar-denominator"
case VideoCaptureBackend:
return "video-capture-backend"
case VideoCaptureChannel:
return "video-capture-channel"
case VideoCaptureAutoWB:
return "video-capture-auto-wb"
case VideoCaptureWBTemperature:
return "video-capture-wb-temperature"
case VideoCaptureCodecPixelFormat:
return "video-capture-pixel-format"
case VideoCaptureBitrate:
return "video-capture-bitrate"
}
return ""
}

46
vendor/gocv.io/x/gocv/win_build_opencv.cmd generated vendored Normal file
View File

@ -0,0 +1,46 @@
echo off
if not exist "C:\opencv" mkdir "C:\opencv"
if not exist "C:\opencv\build" mkdir "C:\opencv\build"
echo Downloading OpenCV sources
echo.
echo For monitoring the download progress please check the C:\opencv directory.
echo.
REM This is why there is no progress bar:
REM https://github.com/PowerShell/PowerShell/issues/2138
echo Downloading: opencv-4.6.0.zip [91MB]
powershell -command "[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; $ProgressPreference = 'SilentlyContinue'; Invoke-WebRequest -Uri https://github.com/opencv/opencv/archive/4.6.0.zip -OutFile c:\opencv\opencv-4.6.0.zip"
echo Extracting...
powershell -command "$ProgressPreference = 'SilentlyContinue'; Expand-Archive -Path c:\opencv\opencv-4.6.0.zip -DestinationPath c:\opencv"
del c:\opencv\opencv-4.6.0.zip /q
echo.
echo Downloading: opencv_contrib-4.6.0.zip [58MB]
powershell -command "[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; $ProgressPreference = 'SilentlyContinue'; Invoke-WebRequest -Uri https://github.com/opencv/opencv_contrib/archive/4.6.0.zip -OutFile c:\opencv\opencv_contrib-4.6.0.zip"
echo Extracting...
powershell -command "$ProgressPreference = 'SilentlyContinue'; Expand-Archive -Path c:\opencv\opencv_contrib-4.6.0.zip -DestinationPath c:\opencv"
del c:\opencv\opencv_contrib-4.6.0.zip /q
echo.
echo Done with downloading and extracting sources.
echo.
echo on
cd /D C:\opencv\build
set PATH=%PATH%;C:\Program Files (x86)\CMake\bin;C:\mingw-w64\x86_64-8.1.0-posix-seh-rt_v6-rev0\mingw64\bin
if [%1]==[static] (
echo Build static opencv
set enable_shared=OFF
) else (
set enable_shared=ON
)
cmake C:\opencv\opencv-4.6.0 -G "MinGW Makefiles" -BC:\opencv\build -DENABLE_CXX11=ON -DOPENCV_EXTRA_MODULES_PATH=C:\opencv\opencv_contrib-4.6.0\modules -DBUILD_SHARED_LIBS=%enable_shared% -DWITH_IPP=OFF -DWITH_MSMF=OFF -DBUILD_EXAMPLES=OFF -DBUILD_TESTS=OFF -DBUILD_PERF_TESTS=OFF -DBUILD_opencv_java=OFF -DBUILD_opencv_python=OFF -DBUILD_opencv_python2=OFF -DBUILD_opencv_python3=OFF -DBUILD_DOCS=OFF -DENABLE_PRECOMPILED_HEADERS=OFF -DBUILD_opencv_saliency=OFF -DBUILD_opencv_wechat_qrcode=ON -DCPU_DISPATCH= -DOPENCV_GENERATE_PKGCONFIG=ON -DWITH_OPENCL_D3D11_NV=OFF -DOPENCV_ALLOCATOR_STATS_COUNTER_TYPE=int64_t -Wno-dev
mingw32-make -j%NUMBER_OF_PROCESSORS%
mingw32-make install
rmdir c:\opencv\opencv-4.6.0 /s /q
rmdir c:\opencv\opencv_contrib-4.6.0 /s /q
chdir /D %GOPATH%\src\gocv.io\x\gocv

3
vendor/modules.txt vendored
View File

@ -30,6 +30,9 @@ go.uber.org/zap/internal/bufferpool
go.uber.org/zap/internal/color
go.uber.org/zap/internal/exit
go.uber.org/zap/zapcore
# gocv.io/x/gocv v0.31.0
## explicit; go 1.13
gocv.io/x/gocv
# golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4
## explicit; go 1.11
golang.org/x/net/internal/socks