반응형

Build a windows dialog-based OpenCV perspective transform program.


Change Subsystem setting from Console to Windows if necessary.


Add a resource and create a dialog box.


Include and declare necessities.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
#include <Windows.h>
#include <opencv2/opencv.hpp>
#include "resource.h"
 
using namespace cv;
 
bool getFile(PTCHAR filename);
void onMouse(int event, int x, int y, int flags, void* userdata);
INT_PTR MainDlgProc(HWND hDlg, UINT iMessage, WPARAM wParam, LPARAM lParam);
 
HWND hDlgMain;
 
Mat sourceImage;
Mat targetImage;
// TTC card ratio
int width = 850;
int height = 550;
Point2f srcQuad[4], dstQuad[4];



Define WinMain which calls getFile() and DialogBox().

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
int APIENTRY WinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, LPSTR lpszCmdParam, int nCmdShow)
{
    TCHAR filename[MAX_PATH] = "";
    if (!getFile(filename)) {
        return -1;
    }
 
    sourceImage = imread(filename);
    if (sourceImage.empty()) {
        MessageBox(NULL, TEXT("Image load failed"), TEXT("No image loaded"), MB_OK);
 
        return -1;
    }
 
    namedWindow("sourceImage");
    setMouseCallback("sourceImage", onMouse);
 
    imshow("sourceImage", sourceImage);
 
    DialogBox(hInstance, MAKEINTRESOURCE(IDD_DIALOG1), HWND_DESKTOP, MainDlgProc);
 
    return 0;
}



getFile() retrieves an image file.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
bool getFile(PTCHAR filename)
{
    OPENFILENAME ofn;
    memset(&ofn, 0sizeof(OPENFILENAME));
    ofn.lStructSize = sizeof(OPENFILENAME);
    ofn.hwndOwner = NULL;
    ofn.lpstrFilter = TEXT("all(*.*)\0*.*\0jpg(*.jpg)\0*.jpg\0png(*.png)\0*.png\0bmp(*.bmp)\0*.bmp\0");
    ofn.lpstrFile = filename;
    ofn.nMaxFile = MAX_PATH;
 
    if (GetOpenFileName(&ofn) != 0) {
        //MessageBox(NULL, filename, TEXT("File opened."), MB_OK);
 
        return true;
    }
    else {
        MessageBox(NULL, TEXT("File open failed"), TEXT("No file selected"), MB_OK);
 
        return false;
    }
}



Define a mouse callback function that processes perspective transform.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
void onMouse(int event, int x, int y, int flags, void* userdata)
{
    static int cnt = 0;
 
    if (event == EVENT_LBUTTONDOWN) {
        if (cnt < 4) {
            srcQuad[cnt++= Point2f(x, y);
 
            circle(sourceImage, Point2f(x, y), 3, Scalar(00255), -1);
            imshow("sourceImage", sourceImage);
 
            if (cnt == 4) {
                /*
                width = max(
                    sqrt(pow(srcQuad[0].x - srcQuad[1].x, 2)) + sqrt(pow(srcQuad[0].y - srcQuad[1].y, 2)),
                    sqrt(pow(srcQuad[2].x - srcQuad[3].x, 2)) + sqrt(pow(srcQuad[2].y - srcQuad[3].y, 2))
                );
                height = max(
                    sqrt(pow(srcQuad[1].x - srcQuad[2].x, 2)) + sqrt(pow(srcQuad[1].y - srcQuad[2].y, 2)),
                    sqrt(pow(srcQuad[3].x - srcQuad[0].x, 2)) + sqrt(pow(srcQuad[3].y - srcQuad[0].y, 2))
                );
                */            
 
                dstQuad[0= Point2f(00);
                dstQuad[1= Point2f(width - 10);
                dstQuad[2= Point2f(width - 1, height - 1);
                dstQuad[3= Point2f(0, height - 1);
 
                Mat perspectiveTransform = getPerspectiveTransform(srcQuad, dstQuad);
 
                warpPerspective(sourceImage, targetImage, perspectiveTransform, Size(width, height));
 
                cnt = 0;
                
                imshow("targetImage", targetImage);
            }
        }
    }
}



Dialog box procedure sets and/or retrieves target image width and height value from the dialog. 

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
INT_PTR MainDlgProc(HWND hDlg, UINT iMessage, WPARAM wParam, LPARAM lParam)
{
    switch (iMessage) {
    case WM_INITDIALOG:
        hDlgMain = hDlg;
        SetDlgItemInt(hDlg, IDC_WIDTH, width, FALSE);
        SetDlgItemInt(hDlg, IDC_HEIGHT, height, FALSE);
 
        return TRUE;
 
    case WM_COMMAND:
        switch (LOWORD(wParam)) {
        case IDOK:
            width = GetDlgItemInt(hDlg, IDC_WIDTH, NULL, FALSE);
            height = GetDlgItemInt(hDlg, IDC_HEIGHT, NULL, FALSE);
 
            return TRUE;
 
        case IDCLOSE:
            EndDialog(hDlg, IDOK);
 
            return TRUE;
        }
    }
 
    return FALSE;
}




Run the program and select an image file.


You can change the target image size or just leave it.


Source image window.


Choose four corners from the left-top to clockwise.


Perspective transform processed image.


반응형
Posted by J-sean
:
반응형

Detects motions in the vision and saves an image of it as a PNG file. Adjust the sensitivity value if needed.


카메라 영상의 움직임을 감시하고 움직임이 감지된 순간의 영상을 저장한다. sensitivity 값으로 감도를 조절한다.


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
#include <opencv2/opencv.hpp>
 
using namespace std;
using namespace cv;
 
int main(int argc, char** argv)
{
    VideoCapture cap(0);
 
    Mat frameNew;
    Mat frameOld;
    Mat frameDiff;
 
    double min, max;
    int sensitivity = 100;
    int detectionCount = 0;
 
    cap >> frameOld;
 
    while (true)
    {
        cap >> frameNew;
        if (frameNew.empty())
            break;
 
        // Calculates the per-element absolute difference
        // between two arrays or between an array and a scalar.
        absdiff(frameNew, frameOld, frameDiff);
        cvtColor(frameDiff, frameDiff, COLOR_BGR2GRAY);
        minMaxLoc(frameDiff, &min, &max);
 
        if (max > sensitivity)
        {
            cout << "Motion detected. (Max: " << max << ")" << endl;
 
            // For PNG, it can be the compression level from 0 to 9.
            // A higher value means a smaller size and longer compression time.
            vector<int> compression_params;
            compression_params.push_back(IMWRITE_PNG_COMPRESSION);
            compression_params.push_back(3);
            if (imwrite(format("detection_%03d.png", detectionCount++), frameNew, compression_params))
                cout << "Image saved." << endl;
            else
                cout << "Image not saved." << endl;
        }
 
        imshow("Motion Detectoion", frameDiff);
        
        frameNew.copyTo(frameOld);
 
        if (waitKey(10== 27)
            break;
    }
 
    return 0;
}
cs



<frameDiff>


<Detection_XXX.png>




반응형
Posted by J-sean
:
반응형

Function execution time can be measured by counting ticks after a certain event (for example, when the machine was turned on).

Below code shows how to do this.


getTickCount()나 TickMeter 클래스를 이용해 실행 시간을 계산 할 수 있다.


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
#include <opencv2/opencv.hpp>
 
using namespace std;
using namespace cv;
 
int main(int argc, char** argv)
{
    RNG rng(getTickCount());
    Mat mat(300400, CV_8SC3);
    
    double start =  (double)getTickCount(); // Returns the number of ticks.
    rng.fill(mat, RNG::UNIFORM, Scalar::all(0), Scalar::all(255));
    // getTickFrequency() - Returns the number of ticks per second.
    double duration = ((double)getTickCount() - start) / getTickFrequency();
    cout << "Duration measured by getTickCount(): " << duration << endl;
    imshow("getTickCount", mat);
 
    // TickMeter class computes passing time by counting the number of ticks per second.
    TickMeter tm;
    tm.start(); // Starts counting ticks.
    rng.fill(mat, RNG::UNIFORM, Scalar::all(0), Scalar::all(255));
    tm.stop(); // Stops counting ticks.
    // Returns passed time in seconds.
    cout << "Duration measured by TickMeter: " << tm.getTimeSec() << endl;
    imshow("TickMeter", mat);
 
    waitKey(0);
 
    return 0;
}
cs








반응형
Posted by J-sean
:
반응형

Random number generator. It encapsulates the state (currently, a 64-bit integer) and has methods to return scalar random values and to fill arrays with random values. Currently, it supports uniform and Gaussian (normal) distributions. The generator uses Multiply-With-Carry algorithm, introduced by G. Marsaglia (http://en.wikipedia.org/wiki/Multiply-with-carry). Gaussian-distribution random numbers are generated using the Ziggurat algorithm (http://en.wikipedia.org/wiki/Ziggurat_algorithm), introduced by G. Marsaglia and W. W. Tsang.


아래 코드는 OpenCV에서 지원하는 난수 발생기의 사용 방법 입니다.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
#include <opencv2/opencv.hpp>
 
using namespace std;
using namespace cv;
 
int main(int argc, char** argv)
{
    RNG rng(getTickCount());    // Constructor sets the state to the specified value.
    Mat mat(300400, CV_8SC3);
    
    // Fills arrays with random numbers.
    rng.fill(mat, RNG::UNIFORM, Scalar::all(0), Scalar::all(255));
    
    // Returns uniformly distributed integer random number from [a,b) range
    cout << "RNG::uniform()" << endl;
    cout << "- rng.uniform(0, 255): " << rng.uniform(0255<< endl;
    cout << "- rng.uniform(0.0f, 255.0f): " << rng.uniform(0.0f, 255.0f) << endl;
    cout << "- rng.uniform(0.0, 255.0): " << rng.uniform(0.0255.0<< endl << endl;
 
    // Returns a random integer sampled uniformly from [0, N).
    cout << "RNG::operator()" << endl;
    for (int i = 0; i < 5; i++)
        cout << "- rng(10): " << rng(10<< endl;
 
    // The method updates the state using the MWC algorithm and returns
    // the next 32-bit random number.
    cout << endl << "RNG::next()" << endl;
    for (int i = 0; i < 5; i++)
        cout << "- rng.next(): " << rng.next() << endl;
 
    imshow("RNG", mat);
 
    waitKey(0);
 
    return 0;
}
cs



Matrix filled by RNG::fill().


Random numbers generated by RNG.



반응형
Posted by J-sean
:
반응형

Below code shows how to use cv::String class.


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
#include <opencv2/opencv.hpp>
 
using namespace std;
using namespace cv;
 
int main(int argc, char** argv)
{
    const char* str = "Open Source Computer Vision";
    String str1("Hello World");
    String str2(str, str+11);
    String str3 = "Software Engineer";
 
    cout << "str1: " << str1 << endl << "str2: " << str2 << endl
        << "str3: " << str3 << endl << endl;
 
    cout << "*str1.begin(): " << *str1.begin() << endl;
    cout << "str1[1]: " << str1[1<< endl;
    cout << "*(str1.end()-1): " << *(str1.end()-1<< endl << endl;
 
    cout << "str2.size(): " << str2.size() << endl;
    cout << "str2.length(): " << str2.length() << endl;
    cout << "str2.empty(): " << str2.empty() << endl;
    cout << "str3.find(\"ng\"): " << str3.find("ng"<< endl << endl;
 
    cout << "format(\"%s %d\", str3.c_str(), 100): " << format("%s %d", str3.c_str(), 100<< endl;
    cout << "str3.toLowerCase(): " << str3.toLowerCase() << endl;
    cout << "str3.substr(2, 4): " << str3.substr(24<< endl << endl;
 
    str1.swap(str3);
    cout << "str1.swap(str3)" << endl;
    cout << "- str1: " << str1 << endl << "- str3: " << str3 << endl;
    str1.clear();
    cout << "str1.clear()" << endl;
    cout << "- str1: " << endl;
 
    return 0;
}
cs






반응형
Posted by J-sean
:
반응형

Only two matching methods currently accept a mask: CV_TM_SQDIFF and CV_TM_CCORR_NORMED


The mask should have a CV_8U or CV_32F depth and the same number of channels and size as the target image. In CV_8U case, the mask values are treated as binary, i.e. zero and non-zero. In CV_32F case, the values should fall into [0..1] range and the target image pixels will be multiplied by the corresponding mask pixel values.


OpenCV matchTemplate 함수에 마스크를 적용해서 (배경이 다른) 같은 이미지를 모두 찾을 수 있다. 마스크는 CV_8U 아니면 CV_32F의 깊이값을 가져야 하며 target image와 같은 채널 수와 사이즈를 가져야 한다.


2019/07/08 - [Software/OpenCV] - Template Matching(Image Searching) - 부분 이미지 검색

2019/07/10 - [Software/OpenCV] - Template Matching(Image Searching) for multiple objects - 반복되는 이미지 모두 찾기


<Target>


<Mask>


<Source>


There are 3 objects(bones) to find in the source image.

Each of them has a different background as below.


Below code explains how to spot different background multiple objects with a mask.

Adjust threshold value if it doesn't work properly.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
#include <opencv2/opencv.hpp>
#include <time.h>
 
using namespace cv;
using namespace std;
 
int main()
{
    clock_t start, end;
    double minVal;
    Point minLoc;
    double threshold = 0.001;
    int count = 0;
 
    Mat FinalImage = imread("source.png", IMREAD_COLOR);
    if (FinalImage.empty())
        return -1;
 
    // Grayscale source, target and mask for faster calculation.
    Mat SourceImage;
    cvtColor(FinalImage, SourceImage, CV_BGR2GRAY);
 
    Mat TargetImage = imread("target.png", IMREAD_GRAYSCALE);
    if (TargetImage.empty())
        return -1;
 
    Mat Mask = imread("mask.png", IMREAD_GRAYSCALE);
    if (Mask.empty())
        return -1;
 
    Mat Result;
 
    start = clock();
    // Mask must have the same datatype and size with target image.
    // It is not set by default. Currently, only the TM_SQDIFF and TM_CCORR_NORMED methods are supported.
    matchTemplate(SourceImage, TargetImage, Result, TM_SQDIFF, Mask); // Type of the template matching operation: TM_SQDIFF
    normalize(Result, Result, 01, NORM_MINMAX, -1, Mat());
    minMaxLoc(Result, &minVal, NULL&minLoc, NULL);
 
    for (int i = 0; i < Result.rows; i++)
        for (int j = 0; j < Result.cols; j++)
            if (Result.at<float>(i, j) < threshold)
            {
                rectangle(FinalImage, Point(j, i), Point(j + TargetImage.cols, i + TargetImage.rows), Scalar(00255), 1);
                count++;
            }
    end = clock();
 
    cout << "Searching time: " << difftime(end, start) / CLOCKS_PER_SEC << endl;
    cout << "Minimum Value: " << minVal << " " << minLoc << endl;
    cout << "Threshold: " << threshold << endl;
    cout << "Found: " << count << endl;
 
    imshow("Mask", Mask);
    imshow("TargetImage", TargetImage);
    imshow("Result", Result);
    imshow("FinalImage", FinalImage);
 
    waitKey(0);
 
    return 0;
}
cs




Grayscale target image


Binary mask


Result image


Final image


Found 3 bones in 0.097 secs.



반응형
Posted by J-sean
:
반응형

Template matching is a technique for finding areas of an image that match (are similar) to a template image (patch).


OpenCV matchTemplate 함수와 threshold 값을 이용해 이미지에서 찾고 싶은 부분을 검색해 모두 찾을 수 있다.


2019/07/08 - [Software/OpenCV] - Template Matching(Image Searching) - 부분 이미지 검색

2019/07/12 - [Software/OpenCV] - Template Matching(Image Searching) with a mask for multiple objects - 마스크를 이용해 (배경이 다른) 반복되는 이미지 모두 찾기


<Target>


<Source>


Below code explains how to spot multiple objects with a threshold. Adjust threshold value if it doesn't work properly.

  • Type of the template matching operation: TM_SQDIFF_NORMED

  • Threshold: 0.00015

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
#include <opencv2/opencv.hpp>
#include <time.h>
 
using namespace cv;
using namespace std;
 
int main()
{
    clock_t start, end;
    double minVal;
    Point minLoc;
    double threshold = 0.00015;
    int count = 0;
 
    Mat FinalImage = imread("source.jpg", IMREAD_COLOR);
    if (FinalImage.empty())
        return -1;
 
    // Grayscale source and target for faster calculation.
    Mat SourceImage;
    cvtColor(FinalImage, SourceImage, CV_BGR2GRAY);
 
    Mat TargetImage = imread("target.jpg", IMREAD_GRAYSCALE);
    if (TargetImage.empty())
        return -1;
 
    Mat Result;
 
    start = clock();
    matchTemplate(SourceImage, TargetImage, Result, TM_SQDIFF_NORMED); // Type of the template matching operation: TM_SQDIFF_NORMED
    minMaxLoc(Result, &minVal, NULL&minLoc, NULL);
 
    for (int i = 0; i < Result.rows; i++)
        for (int j = 0; j < Result.cols; j++)
            if (Result.at<float>(i, j) < threshold)
            {
                rectangle(FinalImage, Point(j, i), Point(j + TargetImage.cols, i + TargetImage.rows), Scalar(00255), 1);
                count++;
            }
    end = clock();
 
    cout << "Searching time: " << difftime(end, start) / CLOCKS_PER_SEC << endl;
    cout << "Minimum Value: " << minVal << " " << minLoc << endl;
    cout << "Threshold: " << threshold << endl;
    cout << "Found: " << count << endl;
 
    imshow("TargetImage", TargetImage);
    imshow("Result", Result);
    imshow("FinalImage", FinalImage);
 
    waitKey(0);
 
    return 0;
}
cs




<Result>


Found 4 coins in 0.035 secs.




반응형
Posted by J-sean
:
반응형

Template matching is a technique for finding areas of an image that match (are similar) to a template image (patch).


Python Pillow library로 구현해 봤던 Image searching 기술을 OpenCV matchTemplate 함수로 간단히 만들 수 있다.


2018/11/30 - [Software/Python] - Pillow 이미지 서치(Image Search) 1

2018/12/02 - [Software/Python] - Pillow 이미지 서치(Image Search) 2

2019/07/10 - [Software/OpenCV] - Template Matching(Image Searching) for multiple objects - 반복되는 이미지 모두 찾기

2019/07/12 - [Software/OpenCV] - Template Matching(Image Searching) with a mask for multiple objects - 마스크를 이용해 (배경이 다른) 반복되는 이미지 모두 찾기


<Target>


<Source>




Type of the template matching operation: TM_SQDIFF

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
#include <opencv2/opencv.hpp>
#include <time.h>
 
using namespace cv;
using namespace std;
 
int main()
{
    clock_t start, end;
    double minVal;
    Point minLoc;
 
    Mat FinalImage = imread("source.jpg", IMREAD_COLOR);
    if (FinalImage.empty())
        return -1;
 
    // Grayscale source and target for faster calculation.
    Mat SourceImage;
    cvtColor(FinalImage, SourceImage, CV_BGR2GRAY);
 
    Mat TargetImage = imread("target.jpg", IMREAD_GRAYSCALE);
    if (TargetImage.empty())
        return -1;
 
    Mat Result;
 
    start = clock();
    matchTemplate(SourceImage, TargetImage, Result, TM_SQDIFF); // Type of the template matching operation: TM_SQDIFF
    normalize(Result, Result, 01, NORM_MINMAX, -1, Mat());
    minMaxLoc(Result, &minVal, NULL&minLoc, NULL);
    end = clock();
 
    cout << "Searching time: " << difftime(end, start) / CLOCKS_PER_SEC << endl;
    cout << "Minimum Value: " << minVal << endl << "Location: " << minLoc << endl;
    rectangle(FinalImage, minLoc, Point(minLoc.x + TargetImage.cols, minLoc.y + TargetImage.rows), Scalar(00255), 1);
 
    imshow("TargetImage", TargetImage);
    imshow("Result", Result);
    imshow("FinalImage", FinalImage);
 
    waitKey(0);
 
    return 0;
}
cs


<Result>


Found the target at the husky's front paw in 0.014 secs.



반응형
Posted by J-sean
: