반응형

Hue is the color portion of the model, expressed as a number from 0 to 360 degrees:

Red: falls between 0 and 60 degrees.

Yellow: falls between 61 and 120 degrees.

Green: falls between 121-180 degrees.

Cyan: falls between 181-240 degrees.

Blue: falls between 241-300 degrees.

Magenta: falls between 301-360 degrees.


For HSV, OpenCV Hue range is [0,179], Saturation range is [0,255] and Value range is [0,255]. Different software uses different scales. So if you are comparing OpenCV values with them, you need to normalize these ranges.


HSV(Hue)를 이용하면 RGB를 이용하는 것 보다 간단히 특정색을 검출할 수 있다. OpenCV의 Hue값은 0~179 이다.



1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
#include <opencv2/opencv.hpp>
 
using namespace std;
using namespace cv;
 
int lowerHue = 40, upperHue = 80;    // Green
Mat src, src_hsv, mask, dst;
 
void OnHueChenaged(int pos, void* userdata)
{
    Scalar lowerb(lowerHue, 1000);
    Scalar upperb(upperHue, 255255);
 
    inRange(src_hsv, lowerb, upperb, mask);
    // Checks if array elements lie between the elements of two other arrays.
 
    dst.setTo(0);    // 매번 초기화 해 주지 않으면 이전에 선택한 색과 겹친다.
    src.copyTo(dst, mask);
    // The method copies the matrix data to another matrix. Before copying the data, the method invokes: m.create(this->size(), this->type());
    // so that the destination matrix is reallocated if needed.While m.copyTo(m); works flawlessly, the function does not handle the case of a
    // partial overlap between the sourceand the destination matrices. When the operation mask is specified, if the Mat::create call shown above
    // reallocates the matrix, the newly allocated matrix is initialized with all zeros before copying the data.
 
    imshow("mask", mask);
    imshow("dst", dst);
}
 
int main(int argc, char** argv)
{
    src = imread("candies.jpg", IMREAD_COLOR);
    if (src.empty()) {
        cerr << "Image load failed." << endl;
        
        return -1;
    }
 
    imshow("src", src);
 
    cvtColor(src, src_hsv, COLOR_BGR2HSV);
 
    namedWindow("mask");
    createTrackbar("Lower Hue""mask"&lowerHue, 179, OnHueChenaged);
    createTrackbar("Upper Hue""mask"&upperHue, 179, OnHueChenaged);
    OnHueChenaged(NULLNULL);
 
    waitKey(0);
    
    return 0;
}




Original candy image.


Mask for green color.


Green candies detected.


반응형
Posted by J-sean
:
반응형

This is a Windows backend only function but you can simply copy or save the image displayed with cv::imshow().


cv::imshow()로 이미지를 출력할 때 출력된 이미지를 간단히 클립 보드에 복사하거나 파일로 저장 할 수 있다.


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
#include <opencv2/opencv.hpp>
 
using namespace std;
using namespace cv;
 
int main(int argc, char** argv)
{
    Mat src = imread("matera.jpg");
    if (src.empty()) {
        cerr << "Image load failed." << endl;
 
        return 0;
    }
 
    imshow("src", src);
    
    waitKey(0);
    
    return 0;
}



[Windows backend only]

  • Pressing Ctrl+C will copy the image to the clipboard.
  • Pressing Ctrl+S will show a dialog to save the image




반응형
Posted by J-sean
:
반응형

If we pass the set of points from the target and source images, FindHomography() will find the perspective transformation of that object. Then we can use perspectiveTransform() to find the object. It needs at least four correct points to find the transformation.


<Target>


<Source>



1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
#include <opencv2/opencv.hpp>
 
using namespace std;
using namespace cv;
 
int main(int argc, char** argv)
{
    Mat trg = imread("Target.jpg", IMREAD_GRAYSCALE);
    Mat src = imread("Source.jpg", IMREAD_GRAYSCALE);
    
    if (trg.empty() || src.empty()) {
        cerr << "Image load failed!" << endl;
        
        return -1;
    }
 
    Ptr<Feature2D> feature = ORB::create();
    
    vector<KeyPoint> trgKeypoints, srcKeypoints;
    Mat trgDesc, srcDesc;
    feature->detectAndCompute(trg, Mat(), trgKeypoints, trgDesc);
    feature->detectAndCompute(src, Mat(), srcKeypoints, srcDesc);
 
    Ptr<DescriptorMatcher> matcher = BFMatcher::create(NORM_HAMMING);
 
    vector<DMatch> matches;
    matcher->match(trgDesc, srcDesc, matches);
 
    sort(matches.begin(), matches.end());
    vector<DMatch> good_matches(matches.begin(), matches.begin() + 100);
 
    Mat dst;
    drawMatches(trg, trgKeypoints, src, srcKeypoints, good_matches, dst,
        Scalar::all(-1), Scalar(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
 
    vector<Point2f> trgPts, srcPts;
    for (size_t i = 0; i < good_matches.size(); i++) {
        trgPts.push_back(trgKeypoints[good_matches[i].queryIdx].pt);
        srcPts.push_back(srcKeypoints[good_matches[i].trainIdx].pt);
    }
 
    Mat H = findHomography(trgPts, srcPts, RANSAC);
    // Finds a perspective transformation between two planes.
 
    vector<Point2f> trgCorners, srcCorners;
    trgCorners.push_back(Point2f(0.f, 0.f));
    trgCorners.push_back(Point2f(trg.cols - 1.f, 0.f));
    trgCorners.push_back(Point2f(trg.cols - 1.f, trg.rows - 1.f));
    trgCorners.push_back(Point2f(0.f, trg.rows - 1.f));
    perspectiveTransform(trgCorners, srcCorners, H);
 
    vector<Point> dstCorners;
    for (Point2f pt : srcCorners) {
        dstCorners.push_back(Point(cvRound(pt.x + trg.cols), cvRound(pt.y)));
        // Move the corners to the right as the width of the Target image.
    }
 
    polylines(dst, dstCorners, true, Scalar(02550), 3, LINE_AA);
 
    imshow("dst", dst);
 
    waitKey();
    
    return 0;
}



Close but not perfect. As mentioned above, it needs correct points to find the transformation.


반응형
Posted by J-sean
:
반응형


Keypoint matching.


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
#include <opencv2/opencv.hpp>
 
using namespace std;
using namespace cv;
 
int main(int argc, char** argv)
{
    Mat trg = imread("Target.jpg", IMREAD_GRAYSCALE);
    Mat src = imread("Source.jpg", IMREAD_GRAYSCALE);
    
    if (trg.empty() || src.empty()) {
        cerr << "Image load failed!" << endl;
        
        return -1;
    }
 
    Ptr<Feature2D> feature = ORB::create();
    
    vector<KeyPoint> trgKeypoints, srcKeypoints;
    Mat trgDesc, srcDesc;
    feature->detectAndCompute(trg, Mat(), trgKeypoints, trgDesc);
    feature->detectAndCompute(src, Mat(), srcKeypoints, srcDesc);
    // Detects keypoints and computes the descriptors.
 
    Ptr<DescriptorMatcher> matcher = BFMatcher::create(NORM_HAMMING);
    // Brute-force matcher create method.
 
    vector<DMatch> matches;
    // Class for matching keypoint descriptors.
    matcher->match(trgDesc, srcDesc, matches);
    // Finds the best match for each descriptor from a query set.
 
    sort(matches.begin(), matches.end());
    vector<DMatch> good_matches(matches.begin(), matches.begin() + 100);
 
    Mat dst;
    drawMatches(trg, trgKeypoints, src, srcKeypoints, good_matches, dst,
        Scalar::all(-1), Scalar(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
    // Draws the found matches of keypoints from two images.
 
    imshow("dst", dst);
 
    waitKey();
    
    return 0;
}




100 best matches.


반응형
Posted by J-sean
:
반응형

Detect, compute and draw key points.


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
#include <opencv2/opencv.hpp>
 
using namespace std;
using namespace cv;
 
int main(int argc, char** argv)
{
    Mat src = imread("Source.jpg", IMREAD_GRAYSCALE);
    
    if (src.empty()) {
        cerr << "Image load failed!" << endl;
        
        return -1;
    }
 
    Ptr<Feature2D> feature = ORB::create();
    // static Ptr<ORB> cv::ORB::create(int nfeatures = 500, float scaleFactor = 1.2f, int nlevels = 8,
    // int edgeThreshold = 31, int firstLevel = 0, int WTA_K = 2, ORB::ScoreType scoreType = ORB::HARRIS_SCORE,
    // int patchSize = 31, int fastThreshold = 20)
 
    vector<KeyPoint> keypoints;
    feature->detect(src, keypoints);
    // Detects keypoints in an image (first variant) or image set (second variant).
 
    Mat desc;
    feature->compute(src, keypoints, desc);
    // Computes the descriptors for a set of keypoints detected in an image (first variant) or image set (second variant).
 
    cout << "keypoints.size(): " << keypoints.size() << endl;
    cout << "desc.size(): " << desc.size() << endl;
 
    Mat dst;
    drawKeypoints(src, keypoints, dst, Scalar::all(-1), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
    // Draws keypoints.
 
    imshow("src", src);
    imshow("dst", dst);
 
    waitKey();
    
    return 0;
}




Sizes of keypoints and descriptors.


Source image.


Keypoints with sizes and orientations.


반응형
Posted by J-sean
:
반응형

Screen capture with Windows API and OpenCV.


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
#include <Windows.h>
#include <iostream>
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
 
using namespace std;
using namespace cv;
 
class hWnd2Mat
{
public:
    hWnd2Mat(HWND hWindow, float scale = 1);
    virtual ~hWnd2Mat();
    virtual void Read();
    Mat capture;
 
private:
    HWND hWnd;
    HDC hWindowDC, hWindowCompatibleDC;
    int height, width, srcHeight, srcWidth;
    HBITMAP hBitmap;
    BITMAPINFOHEADER bi;
};
 
hWnd2Mat::hWnd2Mat(HWND hWindow, float scale)
{
    hWnd = hWindow;
    hWindowDC = GetDC(hWnd);
    hWindowCompatibleDC = CreateCompatibleDC(hWindowDC);
    SetStretchBltMode(hWindowCompatibleDC, COLORONCOLOR);
 
    RECT windowsize;    // get the height and width of the screen
    GetClientRect(hWnd, &windowsize);
 
    srcHeight = windowsize.bottom;
    srcWidth = windowsize.right;
    height = (int)(windowsize.bottom * scale);
    width = (int)(windowsize.right * scale);
 
    capture.create(height, width, CV_8UC4);
 
    // create a bitmap
    hBitmap = CreateCompatibleBitmap(hWindowDC, width, height);
    bi.biSize = sizeof(BITMAPINFOHEADER);    // http://msdn.microsoft.com/en-us/library/windows/window/dd183402%28v=vs.85%29.aspx
    bi.biWidth = width;
    bi.biHeight = -height;  //this is the line that makes it draw upside down or not
    bi.biPlanes = 1;
    bi.biBitCount = 32;
    bi.biCompression = BI_RGB;
    bi.biSizeImage = 0;
    bi.biXPelsPerMeter = 0;
    bi.biYPelsPerMeter = 0;
    bi.biClrUsed = 0;
    bi.biClrImportant = 0;
 
    // use the previously created device context with the bitmap
    SelectObject(hWindowCompatibleDC, hBitmap);
};
 
void hWnd2Mat::Read()
{
    // copy from the window device context to the bitmap device context
    StretchBlt(hWindowCompatibleDC, 00, width, height, hWindowDC, 00, srcWidth, srcHeight, SRCCOPY);
    //change SRCCOPY to NOTSRCCOPY for wacky colors!
    GetDIBits(hWindowCompatibleDC, hBitmap, 0, height, capture.data, (BITMAPINFO*)&bi, DIB_RGB_COLORS);
    //copy from hWindowCompatibleDC to hBitmap
};
 
hWnd2Mat::~hWnd2Mat()
{
    DeleteObject(hBitmap);
    DeleteDC(hWindowCompatibleDC);
    ReleaseDC(hWnd, hWindowDC);
};
 
int main()
{
    HWND hWndDesktop = GetDesktopWindow();
    hWnd2Mat desktop(hWndDesktop, 1);    // scale = 1
 
    cout << "Screen capure in 3 seconds." << endl;
    
    for (int i = 3; i > 0; i--)
    {
        cout << i << ".." << endl;
        Sleep(1000);
    }
 
    desktop.Read();
    imshow("Capture", desktop.capture);
 
    waitKey();
 
    return 0;
}



It captures your desktop image in 3 seconds.


반응형
Posted by J-sean
:
반응형

Build a Windows dialog-based OpenCV program.


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
#include <Windows.h>
#include <gdiplus.h>
#include <opencv2/opencv.hpp>
#include "resource.h"
 
#pragma comment(lib, "gdiplus")
 
using namespace cv;
using namespace Gdiplus;
 
INT_PTR MainDlgProc(HWND hDlg, UINT iMessage, WPARAM wParam, LPARAM lParam);
HWND hDlgMain;
 
VideoCapture cap;
Mat frame;
RECT invalidateRect;


Prepare necessities.


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
int APIENTRY WinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, LPSTR lpszCmdParam, int nCmdShow)
{
    ULONG_PTR gpToken;
    GdiplusStartupInput gpsi;
 
    if (GdiplusStartup(&gpToken, &gpsi, NULL!= Ok) {
        MessageBox(NULL, TEXT("GDI+ start-up error."), TEXT("GDI+ Error"), MB_OK);
    }
 
    DialogBox(hInstance, MAKEINTRESOURCE(IDD_DIALOG1), HWND_DESKTOP, MainDlgProc);
    
    GdiplusShutdown(gpToken);
 
    return 0;
}


Start GDI+ and create a modal dialog box from a dialog template resource.


1
2
3
4
5
6
7
8
9
10
11
12
13
14
bool initCamera()
{
    cap.open(0);
    if (cap.isOpened()) {
        invalidateRect = { 00, cvRound(cap.get(CAP_PROP_FRAME_WIDTH)), cvRound(cap.get(CAP_PROP_FRAME_HEIGHT)) };
 
        return true;
    }
    else {
        MessageBox(NULL, TEXT("Camera open failed."), TEXT("Camera Error"), MB_OK);
 
        return false;
    }    
}


Initiate a camera and set the windows update region.


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
void OnPaint(HDC hdc)
{
    Graphics G(hdc);
 
    cap >> frame;
    if (frame.empty()) {
        TextOut(hdc, 300240"No frame to show"8);
 
        return;
    }
 
    if (IsDlgButtonChecked(hDlgMain, IDC_CHECK1) == BST_CHECKED) {
        cvtColor(frame, frame, COLOR_BGR2GRAY);
        Canny(frame, frame, 50100);
        cvtColor(frame, frame, COLOR_GRAY2BGRA);
    }
    else {
        cvtColor(frame, frame, COLOR_BGR2BGRA);
    }
 
    Bitmap bitmap(frame.size().width, frame.size().height, frame.step1(), PixelFormat32bppARGB, frame.data);
    G.DrawImage(&bitmap, 00, bitmap.GetWidth(), bitmap.GetHeight());
}


Process each frame and draw it. 


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
INT_PTR MainDlgProc(HWND hDlg, UINT iMessage, WPARAM wParam, LPARAM lParam)
{
    HDC hdc;
    PAINTSTRUCT ps;
    static HANDLE hTimer;
 
    switch (iMessage) {
    case WM_INITDIALOG:
        hDlgMain = hDlg;
        if (initCamera()) {
            hTimer = (HANDLE)SetTimer(hDlg, 110NULL);
        }
        else {
            hTimer = NULL;
        }
 
        return TRUE;
 
    case WM_COMMAND:
        switch (LOWORD(wParam)) {
        case IDOK:
 
            return TRUE;
 
        case IDCANCEL:
            if (hTimer != NULL) {
                KillTimer(hDlg, 1);
            }
            EndDialog(hDlg, IDCANCEL);
 
            return TRUE;
        }
 
    case WM_PAINT:
        hdc = BeginPaint(hDlg, &ps);
        OnPaint(hdc);
        EndPaint(hDlg, &ps);
 
        return TRUE;
 
    case WM_TIMER:
        InvalidateRect(hDlg, &invalidateRect, FALSE);
        return 0;
    }
 
    return FALSE;
}


Create a timer and define the necessary variables and handle window messages.



Run the program.


Check 'Canny Edge Detection'.


반응형
Posted by J-sean
:
반응형

GDI+ Bitmap class inherits from the Image class. The Image class provides methods for loading and saving vector images (metafiles) and raster images (bitmaps). You can build Windows applications with GDI+ and OpenCV.

 

 

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
#include <windows.h>
#include <gdiplus.h>
#include <opencv2/opencv.hpp>
 
#pragma comment(lib, "gdiplus")
 
using namespace cv;
using namespace Gdiplus;
 
LRESULT CALLBACK WndProc(HWND, UINT, WPARAM, LPARAM);
HINSTANCE g_hInst;
LPCSTR lpszClass = "MatToBitmap";
 
Mat src;    // source image
Mat org;    // original image
Mat dst;    // processed image
 

Prepare necessities.

 

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
int APIENTRY WinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, LPSTR lpszCmdParam, int nCmdShow)
{
    HWND hWnd;
    MSG Message;
    WNDCLASS WndClass;
    g_hInst = hInstance;
 
    ULONG_PTR gpToken;
    GdiplusStartupInput gpsi;
    if (GdiplusStartup(&gpToken, &gpsi, NULL!= Ok) {
        MessageBox(NULL, TEXT("GDI+ start-up error."), TEXT("GDI+ Error"), MB_OK);
 
        return 0;
    }
 
    WndClass.cbClsExtra = 0;
    WndClass.cbWndExtra = 0;
    WndClass.hbrBackground = (HBRUSH)(COLOR_BTNFACE + 1); //(HBRUSH)GetStockObject(WHITE_BRUSH);
    WndClass.hCursor = LoadCursor(NULL, IDC_ARROW);
    WndClass.hIcon = LoadIcon(NULL, IDI_APPLICATION);
    WndClass.hInstance = hInstance;
    WndClass.lpfnWndProc = (WNDPROC)WndProc;
    WndClass.lpszClassName = lpszClass;
    WndClass.lpszMenuName = NULL;
    WndClass.style = CS_HREDRAW | CS_VREDRAW;
    RegisterClass(&WndClass);
 
    hWnd = CreateWindow(lpszClass, lpszClass, WS_OVERLAPPEDWINDOW,CW_USEDEFAULT, CW_USEDEFAULT,
        670340NULL, (HMENU)NULL, hInstance, NULL);
    ShowWindow(hWnd, nCmdShow);
 
    while (GetMessage(&Message, 000)) {
        TranslateMessage(&Message);
        DispatchMessage(&Message);
    }
 
    GdiplusShutdown(gpToken);
 
    return (int)Message.wParam;
}
 
 

Start GDI+ and change the background color to the radio button color and Window size.

 

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
bool getFile(PTCHAR filename)
{
    OPENFILENAME ofn;
    memset(&ofn, 0sizeof(OPENFILENAME));
    ofn.lStructSize = sizeof(OPENFILENAME);
    ofn.hwndOwner = NULL;
    ofn.lpstrFilter = TEXT("all(*.*)\0*.*\0jpg(*.jpg)\0*.jpg\0png(*.png)\0*.png\0bmp(*.bmp)\0*.bmp\0");
    ofn.lpstrFile = filename;
    ofn.nMaxFile = MAX_PATH;
 
    if (GetOpenFileName(&ofn) != 0) {
        //MessageBox(NULL, filename, TEXT("File opened."), MB_OK);
 
        return true;
    }
    else {
        MessageBox(NULL, TEXT("File open failed"), TEXT("No file selected"), MB_OK);
 
        return false;
    }
}
 
 

Retrieve an image file name.

 

 

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
bool ImageLoad()
{
    TCHAR filename[MAX_PATH] = "";
 
    if (getFile(filename)) {
        src = imread(filename);
        if (src.empty()) {
            MessageBox(NULL, TEXT("Image load failed"), TEXT("No image loaded"), MB_OK);
 
            return false;
        }
 
        return true;
    }
    else {
        return false;
    }
}
 
 

Read the image from the file.

 

1
2
3
4
5
6
7
8
void ImageResize()
{
    // maximum image display size: 320 X 240 with original ratio
    double ratio = min((double)320 / (double)src.size().width, (double)240 / (double)src.size().height);
    resize(src, src, cv::Size(), ratio, ratio);
    org = src.clone();
    cvtColor(org, org, COLOR_BGR2BGRA);
}
 
 

Resize an image.

 

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
void ImageProcess(int direction)
{
    cvtColor(src, dst, COLOR_BGR2GRAY);
 
    float vertical[] = {
        -202,
        -202,
        -202
    };
 
    float horizontal[] = {
        -2-2-2,
        000,
        222
    };
 
    float diagonal[] = {
        -2-20,
        -202,
        022
    };
    
    Mat emboss;
    switch (direction)
    {
    case 0:
        emboss = Mat(33, CV_32FC1, vertical);
 
        break;
 
    case 1:
        emboss = Mat(33, CV_32FC1, horizontal);
 
        break;
 
    case 2:
        emboss = Mat(33, CV_32FC1, diagonal);
 
        break;
 
    default:
        emboss = Mat(33, CV_32FC1, vertical);
    }
    filter2D(dst, dst, -1, emboss, cv::Point(-1-1), 128);
    
    cvtColor(dst, dst, COLOR_GRAY2BGRA);
}
 
 

Process an image with embossing filter.

 

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
void OnPaint(HDC hdc)
{
    if (!dst.empty()) {
        Graphics G(hdc);
 
        Bitmap bitmapDst(dst.size().width, dst.size().height, dst.step, PixelFormat32bppARGB, dst.data);
        Bitmap bitmapOrg(org.size().width, org.size().height, org.step, PixelFormat32bppARGB, org.data);
        // stride(src.step): Integer that specifies the byte offset between the beginning of one scan line and
        // the next. This is usually(but not necessarily) the number of bytes in the pixel format(for example,
        // 2 for 16 bits per pixel) multiplied by the width of the bitmap. The value passed to this parameter
        // must be a multiple of four.
 
        G.DrawImage(&bitmapDst, 00, bitmapDst.GetWidth(), bitmapDst.GetHeight());
        G.DrawImage(&bitmapOrg, bitmapOrg.GetWidth() + 100, bitmapOrg.GetWidth(), bitmapOrg.GetHeight());
    }
    else {
        TextOut(hdc, 270110, TEXT("No image to display"), 19);
    }
}
 
 

Draw original and processed images.

 

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
enum { ID_R1 = 101, ID_R2, ID_R3 };
HWND r1, r2, r3;
int x = 10;
int y = 270;
int w = 90;
int h = 20;
 
LRESULT CALLBACK WndProc(HWND hWnd, UINT iMessage, WPARAM wParam, LPARAM lParam)
{
    HDC hdc;
    PAINTSTRUCT ps;    
 
    switch (iMessage) {
    case WM_CREATE:
        if (ImageLoad()) {
            ImageResize();
            ImageProcess(0);
        }
        
        CreateWindow(TEXT("button"), TEXT("Filter type"), WS_CHILD | WS_VISIBLE |
            BS_GROUPBOX, 525029050, hWnd, (HMENU)0, g_hInst, NULL);
 
        r1 = CreateWindow(TEXT("button"), TEXT("Vertical"), WS_CHILD | WS_VISIBLE |
            BS_AUTORADIOBUTTON | WS_GROUP, x, y, w, h, hWnd, (HMENU)ID_R1, g_hInst, NULL);
 
        r2 = CreateWindow(TEXT("button"), TEXT("Horizontal"), WS_CHILD | WS_VISIBLE |
            BS_AUTORADIOBUTTON, x + 90, y, w, h, hWnd, (HMENU)ID_R2, g_hInst, NULL);
 
        r3 = CreateWindow(TEXT("button"), TEXT("Diagonal"), WS_CHILD | WS_VISIBLE |
            BS_AUTORADIOBUTTON, x + 180, y, w, h, hWnd, (HMENU)ID_R3, g_hInst, NULL);
 
        CheckRadioButton(hWnd, ID_R1, ID_R3, ID_R1);
 
        return 0;
 
    case WM_COMMAND:
        if (!dst.empty()) {
            switch (LOWORD(wParam)) {
            case ID_R1:
                ImageProcess(0);
                break;
 
            case ID_R2:
                ImageProcess(1);
                break;
 
            case ID_R3:
                ImageProcess(2);
                break;
            }
 
            InvalidateRect(hWnd, NULL, TRUE);
        }
 
        return 0;
 
    case WM_PAINT:
        hdc = BeginPaint(hWnd, &ps);
        OnPaint(hdc);
        EndPaint(hWnd, &ps);
 
        return 0;
 
    case WM_DESTROY:
        PostQuitMessage(0);
 
        return 0;
    }
 
    return(DefWindowProc(hWnd, iMessage, wParam, lParam));
}
 
 

Define the necessary variables and handle window messages.

 

 

Run the application and select an image file.

 

Processed and original image with a vertical filter.

 

Processed and original image with a horizontal filter.

 

Processed and original image with a diagonal filter.

 

 

반응형
Posted by J-sean
: