粘連物體如何影象分割
1。 分水嶺分割方法
它是依賴於形態學的,影象的灰度等級不一樣,如果影象的灰度等級一樣的情況下怎麼人為的把它造成不一樣?可以透過距離變換實現,這樣它們的灰度值就有了階梯狀的變換。風水嶺演算法常見的有三種方法:(1)基於浸泡理論的分水嶺分割方法;(2)基於連通圖方法;(3)基於距離變換的方法。OpenCV 中是基於距離變換的分割方法,就相當於我們的小山頭(認為造成的)。
基本的步驟:
例子1 粘連物件分離和計數。
例子程式碼:
#include#includeusing namespace std;using namespace cv;void test(){ Mat srcImg; srcImg = imread(“pill_002。png”); if (srcImg。empty()) { cout << “could not load image。。。\n” << endl; } namedWindow(“Original image”, CV_WINDOW_AUTOSIZE); imshow(“Original image”, srcImg); Mat grayImg, binaryImg, shiftedImg; //做濾波,使影象更加平滑,保留邊緣,類似於雙邊濾波 pyrMeanShiftFiltering(srcImg, shiftedImg, 21, 51); namedWindow(“shifted”, CV_WINDOW_AUTOSIZE); imshow(“shifted”, shiftedImg); cvtColor(shiftedImg, grayImg, COLOR_BGR2GRAY); //轉為灰度影象 //二值化 threshold(grayImg, binaryImg, 0, 255, THRESH_BINARY | THRESH_OTSU); namedWindow(“binary”, CV_WINDOW_AUTOSIZE); imshow(“binary”, binaryImg); //距離變換 Mat distImg; distanceTransform(binaryImg, distImg, DistanceTypes::DIST_L2, 3, CV_32F); //歸一化,因為距離變換後得出來的值都比較小。 normalize(distImg, distImg, 0, 1, NORM_MINMAX); namedWindow(“distance”, CV_WINDOW_AUTOSIZE); imshow(“distance”, distImg); //這個二值化的作用是尋找區域性最大。 threshold(distImg, distImg, 0。4, 1, THRESH_BINARY); namedWindow(“distance_binary”, CV_WINDOW_AUTOSIZE); imshow(“distance_binary”, distImg); //生成 marker Mat distMaskImg; // distImg 得到的是 0- 1之間的數,轉化成8位單通道的。 distImg。convertTo(distMaskImg, CV_8U); vector>contours; //找到 marker 的輪廓 findContours(distMaskImg, contours, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE, Point(0, 0)); //create marker 填充 marker Mat markersImg = Mat::zeros(srcImg。size(), CV_32SC1); for (int i = 0; i < contours。size(); i++) { drawContours(markersImg, contours, static_cast(i), Scalar::all(static_cast(i)+1), -1); } circle(markersImg, Point(5, 5), 3, Scalar(255), -1); //形態學操作 - 彩色影象,目的是去掉干擾,讓結果更好。 Mat kernel = getStructuringElement(MORPH_RECT, Size(3, 3), Point(-1, -1)); morphologyEx(srcImg, srcImg, MORPH_ERODE, kernel); //完成分水嶺變換 watershed(srcImg, markersImg); Mat mark = Mat::zeros(markersImg。size(), CV_8UC1); markersImg。convertTo(mark, CV_8UC1); bitwise_not(mark, mark, Mat()); namedWindow(“watershed”, CV_WINDOW_AUTOSIZE); imshow(“watershed”, mark); //下面的步驟可以不做,最好做出來讓結果顯示更美觀。 //生成隨機顏色 vectorcolors; for (int i = 0; i < contours。size(); i++) { int r = theRNG()。uniform(0, 255); int g = theRNG()。uniform(0, 255); int b = theRNG()。uniform(0, 255); colors。push_back(Vec3b((uchar)b, (uchar)g, (uchar)r)); } //顏色填充和最終顯示 Mat dstImg = Mat::zeros(markersImg。size(), CV_8UC3); int index = 0; for (int i = 0; i < markersImg。rows; i++) { for (int j = 0; j < markersImg。cols; j++) { index = markersImg。at(i, j); if (index > 0 && index <= contours。size()) { dstImg。at(i, j) = colors[index - 1]; } else { dstImg。at(i, j) = Vec3b(0, 0, 0); } } } cout << “number of objects:” << contours。size() << endl; namedWindow(“Final Result”, CV_WINDOW_AUTOSIZE); imshow(“Final Result”, dstImg);}int main(){ test(); waitKey(0); return 0;}
總結:有時候會導致碎片化,過度分割,因為二值化中如果有很多小的黑點或碎片,在分割的時候導致很多 mask ,即小山頭太多了,這個時候我們要考慮怎麼去合併它,可以透過聯通區域的直方圖,或者畫素值均值相似程度等。
例子2:影象分割
#include#includeusing namespace std;using namespace cv;//執行分水嶺演算法函式Mat watershedCluster(Mat &srcImg, int &numSegments);//結果顯示函式void DisplaySegments(Mat &markersImg, int numSegments);void test(){ Mat srcImg; srcImg = imread(“toux。jpg”); if (srcImg。empty()) { cout << “could not load image。。。\n” << endl; } namedWindow(“Original image”, CV_WINDOW_AUTOSIZE); imshow(“Original image”, srcImg); int numSegments; Mat markers = watershedCluster(srcImg, numSegments); DisplaySegments(markers, numSegments);}Mat watershedCluster(Mat &srcImg, int &numSegments){ //二值化 Mat grayImg, binaryImg; cvtColor(srcImg, grayImg, COLOR_BGR2GRAY); threshold(grayImg, binaryImg, 0, 255, THRESH_BINARY | THRESH_OTSU); //形態學和距離變換 Mat kernel = getStructuringElement(MORPH_RECT, Size(3, 3), Point(-1, -1)); morphologyEx(binaryImg, binaryImg, MORPH_OPEN, kernel, Point(-1, -1)); Mat distImg; distanceTransform(binaryImg, distImg, DistanceTypes::DIST_L2, 3, CV_32F); normalize(distImg, distImg, 0。0, 1。0, NORM_MINMAX); //開始生成標記 threshold(distImg, distImg, 0。1, 1。0, THRESH_BINARY); normalize(distImg, distImg, 0, 255, NORM_MINMAX); distImg。convertTo(distImg, CV_8UC1); //CV_32F 轉成 CV_8UC1 //標記開始 vector>contours; vectorhireachy; findContours(distImg, contours, hireachy, RETR_CCOMP, CHAIN_APPROX_SIMPLE); if (contours。empty()) { return Mat(); } Mat markersImg(distImg。size(), CV_32S); markersImg = Scalar::all(0); for (int i = 0; i < contours。size(); i++) { drawContours(markersImg, contours, i, Scalar(i + 1), -1, 8, hireachy, INT_MAX); } circle(markersImg, Point(5, 5) ,3, Scalar(255), -1); //分水嶺變換 watershed(srcImg, markersImg); numSegments = contours。size(); return markersImg;}void DisplaySegments(Mat &markersImg, int numSegments){ //生成隨機顏色 vectorcolors; for (int i = 0; i < numSegments; i++) { int r = theRNG()。uniform(0, 255); int g = theRNG()。uniform(0, 255); int b = theRNG()。uniform(0, 255); colors。push_back(Vec3b((uchar)b, (uchar)g, (uchar)r)); } //顏色填充和最終顯示 Mat dstImg = Mat::zeros(markersImg。size(), CV_8UC3); int index = 0; for (int i = 0; i < markersImg。rows; i++) { for (int j = 0; j < markersImg。cols; j++) { index = markersImg。at(i, j); if (index > 0 && index <= numSegments) { dstImg。at(i, j) = colors[index - 1]; } else { dstImg。at(i, j) = Vec3b(255, 255, 255); } } } cout << “number of objects:” << numSegments << endl; namedWindow(“Final Result”, CV_WINDOW_AUTOSIZE); imshow(“Final Result”, dstImg);}int main(){ test(); waitKey(0); return 0;}
效果圖:
2。 GrabCut 演算法分割影象
GrabCut 演算法的原理前面有介紹過,這裡就不在介紹了,具體可以看下文章末尾往期推薦中閱讀。下面例子實現影象中物件的摳圖。
基本步驟:
例子程式碼:
#include#includeusing namespace std;using namespace cv;int numRun = 0; //演算法迭代次數bool init = false;Rect rect;Mat srcImg, MaskImg, bgModel, fgModel;//滑鼠回撥函式void onMouse(int event, int x, int y, int flags, void* param);void showImg(); //顯示畫的圖片void setRoiMask(); //選擇 ROI 的函式void runGrabCut(); //執行演算法函式static void ShowHelpText(); //提示使用者操作函式void test(){ srcImg = imread(“toux。jpg”); if (srcImg。empty()) { cout << “could not load image。。。\n” << endl; } namedWindow(“Original image”, CV_WINDOW_AUTOSIZE); imshow(“Original image”, srcImg); //初始化 mask,單通道 8 位 MaskImg。create(srcImg。size(), CV_8UC1); //在不知道它是前景還是背景的情況下,把它全部設為背景。 MaskImg。setTo(Scalar::all(GC_BGD)); //結果不是 0 就是 1 GC_BGD為0 setMouseCallback(“Original image”, onMouse, 0); while (true) { char c = (char)waitKey(0); if (c == ‘n’) // 按下 n 建開始執行演算法 { runGrabCut(); numRun++; showImg(); cout << “current iteative times:” << numRun << endl; } if (c == 27) { break; } }}void onMouse(int event, int x, int y, int flags, void* param){ switch (event) { case EVENT_LBUTTONDOWN: rect。x = x; rect。y = y; rect。width = 1; rect。height = 1; break; case EVENT_MOUSEMOVE: if (flags& EVENT_FLAG_LBUTTON) { rect = Rect(Point(rect。x, rect。y), Point(x, y)); showImg(); } break; case EVENT_LBUTTONUP: if (rect。width > 1 && rect。height > 1) { showImg(); } break; default: break; }}void showImg(){ Mat result, binMask; binMask。create(MaskImg。size(), CV_8UC1); binMask = MaskImg & 1; if (init) { srcImg。copyTo(result,binMask); } else { srcImg。copyTo(result); } rectangle(result, rect, Scalar(0, 0, 255), 2, 8); namedWindow(“Original image”, CV_WINDOW_AUTOSIZE); imshow(“Original image”, result);}void setRoiMask(){ //GC_BGD = 0 明確屬於背景的畫素 //GC_FGD = 1 明確屬於前景的畫素 //GC_PR_BGD = 2 可能屬於背景的畫素 //GC_PR_FGD = 3 可能屬於前景的畫素 MaskImg。setTo(GC_BGD); //為了避免選擇越界 rect。x = max(0, rect。x); rect。y = max(0, rect。y); rect。width = min(rect。width, srcImg。cols - rect。x); rect。height = min(rect。height, srcImg。rows - rect。y); //把我們選取的那一塊設為前景 MaskImg(rect)。setTo(Scalar(GC_PR_FGD));}void runGrabCut(){ if (rect。width < 2 || rect。height < 2) { return; } if (init) { grabCut(srcImg, MaskImg, rect, bgModel, fgModel, 1); } else { grabCut(srcImg, MaskImg, rect, bgModel, fgModel, 1, GC_INIT_WITH_RECT); init = true; }}static void ShowHelpText(){ cout << “請先用滑鼠在圖片視窗中標記出屬於前景的區域” << endl; cout << “然後再按按鍵【n】啟動演算法” << endl; cout << “按鍵【ESC】- 退出程式” << endl;}int main(){ ShowHelpText(); test(); waitKey(0); return 0;}
效果圖: