// ConExtraction.cpp: implementation of the CConExtraction class. #include "ConExtraction.h" #include #include // Construction/Destruction void CConExtraction::CalMask(vector> regions, int width, int height) { Mat img_mask_human_tmp(height, width, CV_8UC1, Scalar(0)); Mat img_mask_tmp(height, width, CV_8UC1, Scalar(0)); Mat img_mask_show(height, width, CV_8UC1, Scalar(0)); /*const int points_size = regions.size()-1; int *npt = new int[points_size]; printf("points_size=%d\n", points_size); Point **rp = new Point*[points_size]; for (int p = 1; p < regions.size(); p++) { printf("p=%d ", p); rp[p-1] = new Point[regions[p].size()]; npt[p-1] = regions[p].size(); for (int j = 0; j < regions[p].size(); j++) { rp[p-1][j] = regions[p][j]; printf("(%d %d) ", rp[p-1][j].x, rp[p-1][j].y); } printf("\n"); }*/ const int points_size = regions.size(); int *npt = new int[points_size]; Point **rp = new Point*[points_size]; for (int p = 0; p < regions.size(); p++) { rp[p] = new Point[regions[p].size()]; npt[p] = regions[p].size(); for (int j = 0; j < regions[p].size(); j++) { rp[p][j] = regions[p][j]; } } const Point **ppt = (const Point**)rp; fillPoly(img_mask_tmp, ppt+1, npt+1, points_size-1, Scalar(1)); fillPoly(img_mask_show, ppt+1, npt+1, points_size-1, Scalar(255)); fillPoly(img_mask_human_tmp, ppt, npt, 1, Scalar(1)); fillPoly(img_mask_show, ppt, npt, 1, Scalar(128)); /*imshow("img_mask", img_mask_show); cv::imwrite("img_mask.bmp", img_mask_show); cv::waitKey(0);*/ mask_image = img_mask_tmp.clone(); mask_image_human = img_mask_human_tmp.clone(); total_mask_image_human = 0; for (int i = 0; i < mask_image_human.cols; i++) { for (int j = 0; j < mask_image_human.rows; j++) { if (mask_image_human.at(j, i) == 1) total_mask_image_human++; } } if (npt) { delete[] npt; npt = NULL; } if (rp) { for (int i = 0; i < points_size; i++) { if (rp[i]) { delete[] rp[i]; rp[i] = NULL; } } delete[] rp; rp = NULL; } } CConExtraction::CConExtraction():m_nbd(2),m_area(100),m_size(2), human_ratio(0.1), bk_update_interval(10) { m_Contourvec.clear(); vector().swap(m_Contourvec); { std::vectortmp = m_Contourvec; m_Contourvec.swap(tmp); } //m_ForeTargetvec.clear(); //m_rect=NULL;//??? m_rect.bottom = 0; m_rect.top = 0; m_rect.left = 0; m_rect.right = 0; //mark value:it is used to mark the contour point in order to repeatly scan } /* //copy construction function CConExtraction::CConExtraction(const int &m_nbd, const int &m_area, const int &m_size) { this->m_nbd=m_nbd; this->m_area=m_area; this->m_size=m_size; } */ CConExtraction::~CConExtraction() { m_Contourvec.clear(); vector().swap(m_Contourvec); { std::vectortmp = m_Contourvec; m_Contourvec.swap(tmp); } //m_ForeTargetvec.clear(); } /****************************************************************************** * Function: ExtractContours * Description: * Calls: FetchContour * Called By: VibeModelGetTrace * Input: pSrcImage 当前图像前景背景(前景为255 背景为0) width 图像宽度 height 图像高度 step 图像RGB step * Output: * Return: m_Contourvec *******************************************************************************/ vector CConExtraction::ExtractContours( unsigned char *pSrcImage,// the point to the source imagedata const int &width, // the pixel number of each coloum const int &height, // the pixel number of each row(列) const int &step // the byte number of each colum ) { m_Contourvec.clear(); //m_Contourvec.swap(vector(0)); vector().swap(m_Contourvec); { std::vectortmp = m_Contourvec; m_Contourvec.swap(tmp); } unsigned char *img = pSrcImage; // the point to the source imagedata //unsigned char *Diaimg = pSrcImage; //unsigned char *Removeimg = pSrcImage; int x = 0; int y = 0; // the parameter of the height and the width of the image //make zero borders // 将边界设为背景 for (x = 0; x < width; x++) { pSrcImage[x] = 0; pSrcImage[step * (height - 1) + x] = 0; } for (y = 0; y < height; y++) { pSrcImage[step * y] = 0; pSrcImage[step * y + width - 1] = 0; } /* memset(pSrcImage, 0, width); memset(pSrcImage + step * (height - 1), 0, width); for (y = 1; y < height - 1; ++y) { pSrcImage += step; *pSrcImage = *(pSrcImage + width - 1) = 0; } */ //remove the unrelevant noise //RemoveNoise(Removeimg,width,height,step); //it is used to remove the hole of the contour //Dilation(Diaimg,width,height,step); //find the external contour point int prev=img[0+step]; int pcur; iCPoint origin; // 外轮廓开始时的坐标 CContour m_contour; for (y = 1; y < height - 1; ++y) { for (x = 1; x < width - 1; ++x) { pcur = img[step * y + x]; if ((prev == 0) && (pcur == 255)) // external contour to extract { origin.x = x; origin.y = y; m_contour = FetchContour(img + step * y + x, step, origin); // for debug use //m_contour.label=0; if (m_contour.label) { m_Contourvec.push_back(m_contour); } } else if ((prev == 255) && (pcur == 0)) // inner contour to fill { } else { prev=pcur; } } //for } //for return m_Contourvec; } vector CConExtraction::ExtractContours_Canny(unsigned char *pSrcImage, const int &width, const int &height, const int &step) { m_Contourvec.clear(); //m_Contourvec.swap(vector(0)); vector().swap(m_Contourvec); cv::Mat matimg(height, width, CV_8UC3, pSrcImage); Mat DstPic, edge, grayImage; DstPic.create(matimg.size(), matimg.type()); cvtColor(matimg, grayImage, COLOR_BGR2GRAY); blur(grayImage, edge, Size(3, 3)); Canny(edge, edge, 35, 105, 3); cv::Mat small_edge; cv::resize(edge, small_edge, cv::Size(edge.cols / 2, edge.rows / 2)); // imshow("边缘提取效果", edge); // cv::waitKey(1); cv::Mat bk_image = cv::imread("images2/0.jpg"); Mat bk_DstPic, bk_edge, bk_grayImage; bk_DstPic.create(matimg.size(), matimg.type()); cvtColor(bk_image, bk_grayImage, COLOR_BGR2GRAY); blur(bk_grayImage, bk_edge, Size(3, 3)); Canny(bk_edge, bk_edge, 35, 105, 3); //imshow("bk_edge", bk_edge); //cv::waitKey(1); Mat difframe2, tempframe; absdiff(bk_edge, edge, difframe2);//做差求绝对值 2-3 threshold(difframe2, tempframe, 20, 255.0, CV_THRESH_BINARY); dilate(tempframe, tempframe, Mat());//膨胀 Mat erode_element = getStructuringElement(MORPH_RECT, Size(3, 3)); erode(tempframe, tempframe, erode_element);//腐蚀 imshow("tempframe", tempframe); cv::waitKey(1); CConExtraction *pConExtraction = new CConExtraction(); m_Contourvec = pConExtraction->ExtractContours(tempframe.data, tempframe.cols, tempframe.rows, tempframe.step); return m_Contourvec; } vector CConExtraction::AeraMaxX(vector CForegrounds, int topx) { /*vector CForegrounds_MaxX(0); int size_flaw = CForegrounds.size(); if (size_flaw > 0 && size_flaw <= topx) return CForegrounds; else if (size_flaw > 0 && size_flaw > topx) { vector area_all; for (int i = 0; i < size_flaw; i++) { area_all.push_back(CForegrounds[i].aera); } sort(area_all.begin(), area_all.end()); for (int i = 0; i < size_flaw; i++) { if (CForegrounds[i].aera > area_all[size_flaw - topx]) CForegrounds_MaxX.push_back(CForegrounds[i]); } if (CForegrounds_MaxX.size() < topx) { for (int i = 0; i < size_flaw; i++) { if (CForegrounds[i].aera == area_all[size_flaw - topx]) CForegrounds_MaxX.push_back(CForegrounds[i]); if (CForegrounds_MaxX.size() >= topx) break; } } } return CForegrounds_MaxX;*/ } void CConExtraction::InitBackgroud(unsigned char *pSrcImage, const int &width, const int &height, const int& channels) { if (channels == 1) { cv::Mat img_gray(height, width, CV_8UC1, pSrcImage); background_image = img_gray.clone(); } else { cv::Mat img_gray(height, width, CV_8UC3, pSrcImage); cvtColor(img_gray, img_gray, COLOR_BGR2GRAY); background_image = img_gray.clone(); } time(&last_time); } vector CConExtraction::ExtractContours_PixelSub(unsigned char *pSrcImage, const int &width, const int &height, const int &step) { double dif; static int nFrmNum = 0; static cv::Mat img_diff, img_foreg_human; m_Contourvec.clear(); vector().swap(m_Contourvec); cv::Mat img_gray(height, width, CV_8UC1, pSrcImage); if (nFrmNum <= 2) { absdiff(img_gray, background_image, img_diff); } else { img_diffLast = img_diff.clone(); absdiff(img_gray, background_image, img_diff); bitwise_and(img_diffLast, img_diff, img_foreg_human); threshold(img_foreg_human, img_foreg_human, 45, 255, 0); dilate(img_foreg_human, img_foreg_human, getStructuringElement(MORPH_RECT, Size(7, 7))); img_foreg_human = img_foreg_human.mul(mask_image_human); /*cv::imshow("img_foreg_human", img_foreg_human); cv::waitKey(1);*/ int human_pixel_count = 0; for (int i = 0; i < img_foreg_human.cols; i++) { for (int j = 0; j < img_foreg_human.rows; j++) { if (img_foreg_human.at(j, i) == 255) human_pixel_count++; } } // printf("%d %f\n", human_pixel_count, (float)human_pixel_count / total_mask_image_human); //没有人的情况下 做物品遗留的判断 if ((float)human_pixel_count / total_mask_image_human < human_ratio) { bitwise_and(img_diffLast, img_diff, img_foreg); threshold(img_foreg, img_foreg, 45, 255, 0); dilate(img_foreg, img_foreg, getStructuringElement(MORPH_RECT, Size(7, 7))); //erode(img_foreg, img_foreg, getStructuringElement(MORPH_RECT, Size(3, 3))); //dilate(img_foreg, img_foreg, getStructuringElement(MORPH_RECT, Size(3, 3))); img_foreg = img_foreg.mul(mask_image); /*cv::imshow("img_foreg", img_foreg); cv::waitKey(1);*/ m_Contourvec = ExtractContours(img_foreg.data, img_foreg.cols, img_foreg.rows, img_foreg.cols); if (m_Contourvec.size() == 0) { time_t t; time(&t); dif = difftime(t, last_time); if (dif > bk_update_interval) { cv::Mat new_bk(height, width, CV_8UC1, pSrcImage); background_image = new_bk.clone(); last_time = t; } } } } nFrmNum++; //printf("m_Contourvec size: %d\n", m_Contourvec.size()); return m_Contourvec; } //vector CConExtraction::ExtractContours_PixelSub(unsigned char *pSrcImage, const int &width, const int &height, const int &step) //{ // m_Contourvec.clear(); // //m_Contourvec.swap(vector(0)); // vector().swap(m_Contourvec); // // /*{ // std::vectortmp = m_Contourvec; // m_Contourvec.swap(tmp); // }*/ // // background_image = cv::imread("img_backg.bmp"); // cv::Mat image2(height, width, CV_8UC3, pSrcImage); // cv::Mat display_image = image2.clone(); // // //cv::imshow("show", image2); // if ((background_image.rows != image2.rows) || (background_image.cols != image2.cols)) // { // if (background_image.rows > image2.rows) // { // resize(background_image, background_image, image2.size(), 0, 0, cv::INTER_LINEAR); // } // else if (background_image.rows < image2.rows) // { // resize(image2, image2, background_image.size(), 0, 0, cv::INTER_LINEAR); // } // } // // cv::Mat image1_gary, image2_gary; // if (background_image.channels() != 1) // { // cvtColor(background_image, image1_gary, cv::COLOR_BGR2GRAY); // } // if (image2.channels() != 1) // { // cvtColor(image2, image2_gary, cv::COLOR_BGR2GRAY); // } // // cv::Mat frameDifference, absFrameDifferece; // cv::Mat previousGrayFrame = image2_gary.clone(); // //图1减图2 // subtract(image1_gary, image2_gary, frameDifference, cv::Mat(), CV_16SC1); // // //取绝对值 // absFrameDifferece = abs(frameDifference); // // //位深的改变 // absFrameDifferece.convertTo(absFrameDifferece, CV_8UC1, 1, 0); // //imshow("absFrameDifferece", absFrameDifferece); // cv::Mat segmentation; // // //阈值处理(这一步很关键,要调好二值化的值) // threshold(absFrameDifferece, segmentation, 75, 255, cv::THRESH_BINARY); // // //中值滤波 // medianBlur(segmentation, segmentation, 3); // // //形态学处理(开闭运算) // //形态学处理用到的算子 // cv::Mat morphologyKernel = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(3, 3), cv::Point(-1, -1)); // morphologyEx(segmentation, segmentation, cv::MORPH_CLOSE, morphologyKernel, cv::Point(-1, -1), 2, cv::BORDER_REPLICATE); // // //显示二值化图片 // imshow("segmentation", segmentation); // // //找边界 // CvMemStorage* m_storage = cvCreateMemStorage(0); // CvSeq *pContour = NULL; // CvSeq *pConInner = NULL; // IplImage *ip_segmentation = (IplImage *)&IplImage(segmentation); // cvFindContours(ip_segmentation, m_storage, &pContour, sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0, 0)); // //释放内存 // IplImage *show_image = (IplImage *)&IplImage(image2); // // 外轮廓循环 // int wai = 0; // int nei = 0; // // CContour m_contour; // for (; pContour != NULL; pContour = pContour->h_next) // { // wai++; // //// 内轮廓循环 // //for (pConInner = pContour->v_next; pConInner != NULL; pConInner = pConInner->h_next) // //{ // // nei++; // // // 内轮廓面积 // // dConArea = fabs(cvContourArea(pConInner, CV_WHOLE_SEQ)); // // printf("%f\n", dConArea); // //} // CvRect rect = cvBoundingRect(pContour, 0); // // m_contour.left = rect.x; // m_contour.top = rect.y; // m_contour.right = rect.x + rect.width; // m_contour.bottom = rect.y + rect.height; // m_contour.xCenter = (m_contour.left + m_contour.right) / 2; // m_contour.yCenter = (m_contour.top + m_contour.bottom) / 2; // m_contour.label = false; // m_Contourvec.push_back(m_contour); // //cvRectangle(show_image, cvPoint(rect.x, rect.y), cvPoint(rect.x + rect.width, rect.y + rect.height), CV_RGB(255, 255, 255), 1, 8, 0); // // /* rectangle(display_image, // cvPoint(rect.x, rect.y), // cvPoint(rect.x + rect.width, rect.y + rect.height), // cv::Scalar(127, 64, 120), 3, 1);*/ // } // // // cvReleaseMemStorage(&m_storage); // // // //cvShowImage("show_img", show_image); // /*imshow("效果图", display_image); // cv::waitKey(1);*/ // // return m_Contourvec; //} /* vector CConExtraction::DetectTarget( unsigned char *pSrcImage, const int &width, const int &height, const int &step ) { m_Contourvec.clear(); //m_Contourvec.swap(vector(0)); m_ForeTargetvec.clear(); //m_ForeTargetvec.swap(vector(0)); unsigned char *img = pSrcImage; //the point to the source imagedata unsigned char *Diaimg = pSrcImage; unsigned char *Removeimg = pSrcImage; int x = 0; int y = 0; // the parameter of the height and the width of the image //make zero borders memset(pSrcImage, 0, width); memset(pSrcImage + step * (height - 1), 0, width); for (y = 1; y < height - 1; ++y) { pSrcImage += step; *pSrcImage = *(pSrcImage + width - 1) = 0; } //remove the unrelevant noise RemoveNoise(Removeimg, width, height, step); //it is used to fill the hole of the contour Dilation(Diaimg, width, height, step); //find the external contour point int prev = img[0 + step]; int pcur; iCPoint origin; //外轮廓开始时的坐标 CContour m_contour; CForegroundTarget m_ForeTarget; for (y = 1; y < height - 1; ++y) { //img+=step; for (x = 1; x < width - 1; ++x) { pcur = img[step * y + x]; if ((prev == 0) && (pcur == 255)) //external contour to extract { origin.x = x; origin.y = y; m_contour = FetchContour(img + step * y + x, step, origin); if (m_contour.label) { //m_Contourvec.push_back(m_contour); m_ForeTarget.m_Contour = m_contour; m_ForeTarget.m_point.x = m_contour.xCenter; m_ForeTarget.m_point.y = m_contour.yCenter; m_ForeTargetvec.push_back(m_ForeTarget); } } else { prev=pcur; } } //for } //for return m_ForeTargetvec; } */ /****************************************************************************** * Function: FetchContour * Description: * Calls: DELTAS BoundingRect * Called By: ExtractContours * Input: pImage 指向当前点的指针 step 图像RGB step 每一行所占的字节数 pt 当前点坐标 * Output: * Return: m_Contour *******************************************************************************/ CContour CConExtraction::FetchContour( unsigned char *pImage, // the pointer to the starting pixel position value of the external contour const int &step, // the byte number of each colum iCPoint &pt // the starting point position of the external contour ) { int deltas[16]; // 相邻8个点距当前点的索引差 DELTAS(deltas, step, 1); memcpy(deltas + 8, deltas, 8 * sizeof(deltas[0])); //initialize the deltas array // 对应的邻域内8个点的x,y偏移 int CodeDeltas[8][2] = {{1, 0}, {1, -1}, {0, -1}, {-1, -1}, {-1, 0}, {-1, 1}, {0, 1}, {1, 1}}; unsigned char *i0 = pImage; unsigned char *i1, *i3, *i4 = NULL; int s, s_end; s_end = s = 4; vector m_Pointvec; m_Pointvec.clear(); vector().swap(m_Pointvec); { std::vectortmp = m_Pointvec; m_Pointvec.swap(tmp); } do { s = (s - 1) & 7; i1 = i0 + deltas[s]; if (0 != (*i1)) { break; } }while(s != s_end); //find the second contour point if (s != s_end) { i3 = i0; for(; ;) { //s_end = s; // 通过循环取数 且当前点一定为前景 保证不会陷入无限循环中 for(; ;) { i4 = i3 + deltas[++s]; if (0 != (*i4)) { break; } } //for(; ;) s &= 7; // 已经检索过的点不再进行分析 加速 if (255 == *i3) { *i3 = this->m_nbd; } m_Pointvec.push_back(pt); pt.x += CodeDeltas[s][0]; pt.y += CodeDeltas[s][1]; if ((i4 == i0) && (i3 == i1)) //the condition of the connected component areas { break; } i3 = i4; s = (s + 4) & 7; } //for(; ;) } //if(s!=s_end) else { *i0 = this->m_nbd; //labeled by the signed char } CContour m_Contour; if (this->m_size <= m_Pointvec.size()) //the first step to remove the noise point { //get the rectangle of the contour BoundingRect(m_Pointvec); int width = m_rect.right - m_rect.left; int height = m_rect.bottom - m_rect.top; int numpixel = width * height; if (this->m_area <= numpixel) //the second step to remove the noise point { m_Contour.left = m_rect.left; m_Contour.top = m_rect.top; m_Contour.right = m_rect.right; m_Contour.bottom = m_rect.bottom; m_Contour.xCenter = (m_Contour.left + m_Contour.right) / 2; m_Contour.yCenter = (m_Contour.top + m_Contour.bottom) / 2; m_Contour.label = true; } else { m_Contour.left = 0; m_Contour.top = 0; m_Contour.right = 0; m_Contour.bottom = 0; m_Contour.xCenter = 0; m_Contour.yCenter = 0; m_Contour.label = false; } } else { m_Contour.left = 0; m_Contour.top = 0; m_Contour.right = 0; m_Contour.bottom = 0; m_Contour.xCenter = 0; m_Contour.yCenter = 0; m_Contour.label = false; } m_Pointvec.clear(); vector().swap(m_Pointvec); { std::vectortmp = m_Pointvec; m_Pointvec.swap(tmp); } return m_Contour; } //this function is used to get the external rectangle void CConExtraction::BoundingRect(vector Pointvec) { int xmin, xmax; int ymin, ymax; vector::iterator iter_begin = Pointvec.begin(); vector::iterator iter_end = Pointvec.end(); xmin = xmax = (*iter_begin).x; ymin = ymax = (*iter_begin).y; //intialize the value iter_begin += 1; for(; iter_begin != iter_end; ++iter_begin) { int xPoint = (*iter_begin).x; int yPoint = (*iter_begin).y; if (xmin > xPoint) { xmin = xPoint; } if (xmax < xPoint) { xmax = xPoint; } if (ymin > yPoint) { ymin = yPoint; } if (ymax < yPoint) { ymax = yPoint; } } m_rect.top = ymin; m_rect.left = xmin; m_rect.right = xmax; m_rect.bottom = ymax; } /* //this function is used to fill the hole of the contour void CConExtraction::Dilation( unsigned char *pSrcImage, const int &width, const int &height, const int &step) { int deltas[8]; DELTAS(deltas, step, 1); int i, j; i = j = 0; int num; num = 0; unsigned char *lpSrc = NULL; unsigned char *lpDst = NULL; unsigned char *pixel = NULL; int label = 0; unsigned char *pDstImage = new unsigned char[height * width]; for (i = 0; i < height; ++i) { for (j = 0; j < width; ++j) { pDstImage[i * width + j] = 0; } } for (i = 1; i < height - 1; ++i) { for (j = 1; j < width - 1; ++j) { label = i * step + j; lpSrc = pSrcImage + label; for (num = 0; num < 8; ++num) { pixel = lpSrc + deltas[num]; if (1 == (*pixel)) { pDstImage[label] = 1; break; } } } } for (i = 0; i < height; ++i) { for (j = 0; j < width; ++j) { label = i * width + j; pSrcImage[label] = pDstImage[label]; } } delete []pDstImage; pDstImage = NULL; } */ /* //this function is used to remove the noise of background before Fetching contour void CConExtraction::RemoveNoise( unsigned char *pSrcImage, const int &width, const int &height, const int &step) { int deltas[8]; DELTAS(deltas, step, 1); unsigned char *pSrc = pSrcImage; int i, j; i = j = 0; int label = 0; unsigned char *pDstImage = new unsigned char[height * width]; for (i = 0; i < height; ++i) { for (j = 0; j < width; ++j) { label = i * width + j; pDstImage[label] = pSrcImage[label]; } } int label0, label1, label2, label3; label0 = label1 = label2 = label3 = 0; for (i = 2; i < height - 2; ++i) { for (j = 2; j < width - 2; ++j) { label0 = i * step + j; label1 = (i - 1) * step + j; label2 = (i + 1) * step + j; label3 = (i + 2) * step + j; if (1 == pSrc[label0]) { if (0 == pSrc[label0 + 1] && 0 == pSrc[label0 - 1] && 0 == pSrc[label1] && 0 == pSrc[label1 + 1] && 0 == pSrc[label1 - 1] && 0 == pSrc[label2] && 0 == pSrc[label2 + 1] && 0 == pSrc[label2 - 1]) { pDstImage[label0] = 0; } if (1 == pSrc[label0 + 1] && 0 == pSrc[label0 - 1] && 0 == pSrc[label0 + 2] && 0 == pSrc[label1] && 0 == pSrc[label1 + 1] && 0 == pSrc[label1 - 1] && 0 == pSrc[label1 + 2] && 0 == pSrc[label2] && 0 == pSrc[label2 + 1] && 0 == pSrc[label2 - 1] && 0 == pSrc[label2 + 2]) { pDstImage[label0] = 0; pDstImage[label0 + 1] = 0; } if (1 == pSrc[label2] && 0 == pSrc[label2 + 1] && 0 == pSrc[label2 - 1] && 0 == pSrc[label0 + 1] && 0 == pSrc[label0 - 1] && 0 == pSrc[label1] && 0 == pSrc[label1 + 1] && 0 == pSrc[label1 - 1] && 0 == pSrc[label3] && 0 == pSrc[label3 + 1] && 0 == pSrc[label3 - 1]) { pDstImage[label0] = 0; pDstImage[label2] = 0; } if (1 == pSrc[label0 + 1] && 0 == pSrc[label0 - 1] && 0 == pSrc[label0 + 2] && 1 == pSrc[label2] && 1 == pSrc[label2 + 1] && 0 == pSrc[label2 - 1] && 0 == pSrc[label2 + 2] && 0 == pSrc[label1] && 0 == pSrc[label1 + 1] && 0 == pSrc[label1 - 1] && 0 == pSrc[label1 + 2] && 0 == pSrc[label3] && 0 == pSrc[label1 + 1] && 0 == pSrc[label3 - 1] && 0 == pSrc[label3 + 2]) { pDstImage[label0] = 0; pDstImage[label0 + 1] = 0; pDstImage[label2] = 0; pDstImage[label2 + 1] = 0; } } } } for (i = 0; i < height; ++i) { for (j = 0; j < width; ++j) { label = i * width + j; pSrcImage[label] = pDstImage[label]; } } delete []pDstImage; pDstImage = NULL; } */