using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Xaml;
using DocumentFormat.OpenXml.Vml;
using OpenCvSharp;
namespace LeatherApp.Utils
{
public class OpenCVUtil
{
public static Mat resize(Mat mat, int width, int height, out int xw, out int xh)
{
OpenCvSharp.Size dsize = new OpenCvSharp.Size(width, height);
Mat mat2 = new Mat();
//Cv2.Resize(mat, mat2, dsize);
ResizeUniform(mat, dsize, out mat2, out xw, out xh);
return mat2;
}
public static int ResizeUniform(Mat src, Size dst_size, out Mat dst, out int xw, out int xh)
{
xw = xh = 0;
int w = src.Cols;
int h = src.Rows;
int dst_w = dst_size.Width;
int dst_h = dst_size.Height;
//std::cout << "src: (" << h << ", " << w << ")" << std::endl;
dst = new Mat(dst_h, dst_w, MatType.CV_8UC3, new Scalar(114, 114, 114));
float[] ratio = new float[2];
float ratio_src = w * 1.0f / h;
float ratio_dst = dst_w * 1.0f / dst_h;
int tmp_w = 0;
int tmp_h = 0;
if (ratio_src > ratio_dst)
{
tmp_w = dst_w;
tmp_h = (int)(dst_w * 1.0f / w) * h;
ratio[0] = (float)w / (float)tmp_w;
ratio[1] = (float)h / (float)tmp_h;
}
else if (ratio_src < ratio_dst)
{
tmp_h = dst_h;
tmp_w = (int)((dst_h * 1.0f / h) * w);
ratio[0] = (float)w / (float)tmp_w;
ratio[1] = (float)h / (float)tmp_h;
}
else
{
Cv2.Resize(src, dst, dst_size);
ratio[0] = (float)w / (float)tmp_w;
ratio[1] = (float)h / (float)tmp_h;
return 0;
}
//std::cout << "tmp: (" << tmp_h << ", " << tmp_w << ")" << std::endl;
Mat tmp = new Mat();
Cv2.Resize(src, tmp, new Size(tmp_w, tmp_h));
unsafe
{
if (tmp_w != dst_w)
{ //高对齐,宽没对齐
int index_w = (int)((dst_w - tmp_w) / 2.0);
xw = index_w;
//std::cout << "index_w: " << index_w << std::endl;
for (int i = 0; i < dst_h; i++)
{
Buffer.MemoryCopy(IntPtr.Add(tmp.Data, i * tmp_w * 3).ToPointer(), IntPtr.Add(dst.Data, i * dst_w * 3 + index_w * 3).ToPointer(), tmp_w * 3, tmp_w * 3);
}
}
else if (tmp_h != dst_h)
{ //宽对齐, 高没有对齐
int index_h = (int)((dst_h - tmp_h) / 2.0);
xh = index_h;
//std::cout << "index_h: " << index_h << std::endl;
Buffer.MemoryCopy(tmp.Data.ToPointer(), IntPtr.Add(dst.Data, index_h * dst_w * 3).ToPointer(), tmp_w * tmp_h * 3, tmp_w * tmp_h * 3);
}
else
{
}
}
return 0;
}
public static Mat CreateLetterbox(Mat mat, OpenCvSharp.Size sz, Scalar color, out float ratio, out OpenCvSharp.Point diff, out OpenCvSharp.Point diff2, bool auto = true, bool scaleFill = false, bool scaleup = true)
{
//Mat mat = new Mat();
//Cv2.CvtColor(mat, mat, ColorConversionCodes.BGR2RGB);
ratio = Math.Min((float)sz.Width / (float)mat.Width, (float)sz.Height / (float)mat.Height);
if (!scaleup)
{
ratio = Math.Min(ratio, 1f);
}
OpenCvSharp.Size dsize = new OpenCvSharp.Size((int)Math.Round((float)mat.Width * ratio), (int)Math.Round((float)mat.Height * ratio));
int num = sz.Width - dsize.Width;
int num2 = sz.Height - dsize.Height;
float num3 = (float)sz.Height / (float)sz.Width;
float num4 = (float)mat.Height / (float)mat.Width;
if (auto && num3 != num4)
{
bool flag = false;
}
else if (scaleFill)
{
num = 0;
num2 = 0;
dsize = sz;
}
int num5 = (int)Math.Round((float)num / 2f);
int num6 = (int)Math.Round((float)num2 / 2f);
int num7 = 0;
int num8 = 0;
if (num5 * 2 != num)
{
num7 = num - num5 * 2;
}
if (num6 * 2 != num2)
{
num8 = num2 - num6 * 2;
}
if (mat.Width != dsize.Width || mat.Height != dsize.Height)
{
Cv2.Resize(mat, mat, dsize);
}
Cv2.CopyMakeBorder(mat, mat, num6 + num8, num6, num5, num5 + num7, BorderTypes.Constant, color);
diff = new OpenCvSharp.Point(num5, num6);
diff2 = new OpenCvSharp.Point(num7, num8);
return mat;
}
///
/// 裁切指定区域
///
///
///
///
///
///
///
public static Mat cutImage(Mat mat, int x, int y, int width, int height)
{
Rect roi = new Rect(x, y, width, height);
return new Mat(mat, roi).Clone();
}
///
/// 合并MAT(宽高必需一致)
///
///
///
///
public static Mat mergeImage_sameSize(Mat[] mats, bool isHorizontal = true)
{
Mat matOut = new Mat();
if (isHorizontal)
Cv2.HConcat(mats, matOut);//横向拼接
else
Cv2.VConcat(mats, matOut);//纵向拼接
return matOut;
}
///
/// 合并MAT-纵向
///
///
///
///
public static Mat mergeImageV(Mat mat1,Mat mat2 )
{
Mat matOut = new Mat();
//push_back 方法将图像2拷贝到图像1的最后一行
Mat img_merge = new Mat();//要先设置大小吗
img_merge.PushBack(mat1);
img_merge.PushBack(mat2);
return matOut;
}
///
/// 合并MAT-横向
///
///
///
///
public static Mat mergeImageH(Mat[] mats)
{
Stitcher stitcher = Stitcher.Create(Stitcher.Mode.Scans);
Mat pano = new Mat();
var status = stitcher.Stitch(mats, pano);
if (status == Stitcher.Status.OK)
return pano;
else
return null;
// //1.新建一个要合并的图像
// Size size = new Size(image1.Cols + image2.Cols, Math.Max(image1.Rows, image1.Rows));
//Mat img_merge=new Mat();
//img_merge.Create(size,new MatType( image1.Depth()));
////img_merge = Scalar.All(0);
//Mat outImg_left, outImg_right;
////2.在新建合并图像中设置感兴趣区域
//outImg_left = img_merge.a(Rect(0, 0, image1.cols, image1.rows));
//outImg_right = img_merge(Rect(image1.cols, 0, image1.cols, image1.rows));
////3.将待拷贝图像拷贝到感性趣区域中
//image1.copyTo(outImg_left);
//image2.copyTo(outImg_right);
//namedWindow("image1", 0);
//Cv2.ImShow("image1", img_merge);
}
///
/// 获取最小外接矩形(正矩形)
///
///
public static Mat getMimOutRect(Mat srcImg)
{
try
{
//Mat srcImg = new Mat(@"E:\D\AutoCode\LeatherProject\LeatherApp\bin\Debug\testpic\2\11.bmp");
Mat grayImg = new Mat();
Mat binaryImg = new Mat();
Cv2.CvtColor(srcImg, grayImg, ColorConversionCodes.BGR2GRAY);
//Cv2.ImShow("src", srcImg);
//toImg(this.pictureBox1, grayImg);
//
//double thresh = 30;//小于此值(超小超是所选黑色越多)转为maxval色
//double maxval = 255;//上面值转为255白色
double thresh = 80;//小于此值(超小超是所选黑色越多)转为maxval色
double maxval = 255;//上面值转为255白色
Cv2.Threshold(grayImg, binaryImg, thresh, maxval, ThresholdTypes.Binary);//转化黑白二值图 thresh:阀值
//颜色反转
//byte grayPixel = 0;
//for (int r = 0; r < binary.Rows; r++)
//{
// for (int c = 0; c < binary.Cols; c++)
// {
// grayPixel = binary.At(r, c);
// binary.Set(r, c, (byte)(255 - grayPixel));
// }
//}
//FindContours让轮廓
OpenCvSharp.Point[][] contours; //轮廓查找结果变量
HierarchyIndex[] hierarchy; //轮廓拓扑结构变量
//====RetrievalModes:
//CV_RETR_EXTERNAL表示只检测外轮廓
//CV_RETR_LIST检测的轮廓不建立等级关系
//CV_RETR_CCOMP建立两个等级的轮廓,上面的一层为外边界,里面的一层为内孔的边界信息。如果内孔内还有一个连通物体,这个物体的边界也在顶层。
//CV_RETR_TREE建立一个等级树结构的轮廓。具体参考contours.c这个demo
//====ContourApproximationModes:
//CV_CHAIN_APPROX_NONE存储所有的轮廓点,相邻的两个点的像素位置差不超过1,即max(abs(x1 - x2),abs(y2 - y1))== 1
//CV_CHAIN_APPROX_SIMPLE压缩水平方向,垂直方向,对角线方向的元素,只保留该方向的终点坐标,例如一个矩形轮廓只需4个点来保存轮廓信息
//CV_CHAIN_APPROX_TC89_L1,CV_CHAIN_APPROX_TC89_KCOS使用teh - Chinl chain 近似算法
Cv2.FindContours(binaryImg, out contours, out hierarchy, RetrievalModes.CComp, ContourApproximationModes.ApproxSimple);
//DrawContours将结果画出并返回结果
Mat dst_Image = Mat.Zeros(grayImg.Size(), srcImg.Type());
Random rnd = new Random();
int maxIndex = 0, maxLength = 0;
for (int i = 0; i < contours.Length; i++)
{
if (contours[i].Length > maxLength)
{
maxLength = contours[i].Length;
maxIndex = i;
}
}
Scalar color = new Scalar(rnd.Next(0, 0), rnd.Next(0, 255), rnd.Next(0, 255));
//var rectMin = Cv2.MinAreaRect(contours[i]);
//这里有三个参数 分别是中心位置,旋转角度,缩放程度
//Cv2.WarpAffine(srcImg, rectMin.Center, (height, width))
//Rect rect = rectMin.BoundingRect();//
Rect rect = Cv2.BoundingRect(contours[maxIndex]);// 获取矩形边界框
//OpenCvSharp.Point pt1 = new OpenCvSharp.Point(rect.X, rect.Y);
//OpenCvSharp.Point pt2 = new OpenCvSharp.Point(rect.X + rect.Width, rect.Y + rect.Height); //定义矩形对顶点
//Cv2.Rectangle(srcImg, pt1, pt2, color, 1); //绘制矩形边框
//Cv2.Line(srcImg, pt1, pt2, color, 1); //矩形单个对角线相,两点
//Cv2.DrawContours(srcImg, contours, maxIndex, color, 2, LineTypes.Link8, hierarchy);
//toImg(this.pictureBox1, srcImg);
//return cutImage(srcImg, rect.X, rect.Y, rect.Width, rect.Height);
return cutImage(srcImg, rect.X, 0, rect.Width, rect.Height);
//
//建立轮廓接受数组
//Point[][] contours;
//HierarchyIndex[] hierarchy;
//Cv2.FindContours(binary, out contours, out hierarchy, RetrievalModes.External, ContourApproximationModes.ApproxNone);
//最小外接矩形接收数组
//RotatedRect[] rotateRect = new RotatedRect[contours.Length];
//Point[][] contours_poly = new Point[contours.Length][];
//int maxPointCount = 0, index = -1;
//for (int x = 0; x < contours.Length; x++)
//{
// if (maxPointCount < contours[x].Length)
// {
// maxPointCount = contours[x].Length;
// index = x;
// }
//}
}
catch (Exception ex)
{
return null;
}
}
#region 获取最大内接矩形
///
/// 获取最大内接矩形(高度使用原图值未裁剪)
///
///
///
public static Mat getMaxInsetRect(Mat srcImg, double thresh = 45, double maxval = 255)
{
API.OutputDebugString("--------start:"+DateTime.Now.ToString("mm:ss fff"));
var dst = new Mat();
//转灰度
Cv2.CvtColor(srcImg, dst, ColorConversionCodes.RGB2GRAY);
API.OutputDebugString("--------转灰度:" + DateTime.Now.ToString("mm:ss fff"));
//转化黑白二值图 thresh:阀值
//double thresh = 50;//小于此值(超小超是所选黑色越多)转为maxval色
//double maxval = 255;//上面值转为255白色
Cv2.Threshold(dst, dst, thresh, maxval, ThresholdTypes.Binary);
API.OutputDebugString("--------黑白二值图:" + DateTime.Now.ToString("mm:ss fff"));
//取轮廓
Cv2.FindContours(dst, out var contours, out var hierarchy, RetrievalModes.External, ContourApproximationModes.ApproxSimple);
int maxIndex = 0, maxLength = 0;
for (int i = 0; i < contours.Length; i++)
{
if (contours[i].Length > maxLength)
{
maxLength = contours[i].Length;
maxIndex = i;
}
}
API.OutputDebugString("--------取全部轮廓:" + DateTime.Now.ToString("mm:ss fff"));
List> approxContours = new List>();
//先求出多边形的近似轮廓,减少轮廓数量,方便后面计算
var approxContour = Cv2.ApproxPolyDP(contours[maxIndex], 20, true);
API.OutputDebugString("--------减少轮廓数量:" + DateTime.Now.ToString("mm:ss fff"));
approxContours.Add(approxContour.ToList());
//绘制边缘
//DrawContour(srcImg, approxContour, Scalar.Red, 20);
//return srcImg;
Rect rect = GetMaxInscribedRect(srcImg, approxContour.ToList());
API.OutputDebugString("--------取最大内切矩形:" + DateTime.Now.ToString("mm:ss fff"));
var result= cutImage(srcImg, rect.X, 0, rect.Width, srcImg.Height);
API.OutputDebugString("--------裁剪完成:" + DateTime.Now.ToString("mm:ss fff"));
return result;
}
public static Mat getMaxInsetRect2(Mat mat_rgb,bool isLeft,int marginHoleWidth,out int marginWidth)
{
int bian = 3500;
Rect Roi;
if (!isLeft)
Roi = new Rect(mat_rgb.Width - bian, 0, bian, mat_rgb.Height);
else
Roi = new Rect(0, 0, bian, mat_rgb.Height);
int type = isLeft ? 1 : 0;
int len = EdgeClipping2(mat_rgb, type, Roi, isLeft);
#if false
//Mat mat_rgb = new Mat("E:\\CPL\\测试代码\\边缘检测\\test\\test\\test\\img\\19.bmp");
Mat image_gray = new Mat();
Cv2.CvtColor(mat_rgb, image_gray, ColorConversionCodes.BGR2GRAY);
//cvtColor(image_RGB, image, COLOR_RGB2GRAY);
int height = image_gray.Rows;
int width = image_gray.Cols;
// 算法定义:取均分5段图片的五条横线,经过一系列处理之后,二值化,找到沿边位置,然后取均值作为直边,在缩进一段有针眼的位置
// 定义每段的行数
int num_rows = 5;
int segment_height = height / num_rows - 1;
// 定义空数组保存结果
int[] total = new int[num_rows];
// 平均截取5行数据并处理图像
for (int i = 0; i < num_rows; i++)
{
// 截取当前行的图像
int start_row = i * segment_height;
Rect roi = new Rect(0, start_row, width, 1);
Mat current_segment = image_gray.Clone(roi);
// 对当前行的图像进行平滑处理
Mat smoothed_image = new Mat();
Cv2.GaussianBlur(current_segment, smoothed_image, new Size(5, 1), 0);
// 计算当前行的灰度直方图
Mat absolute_histo = new Mat();
Cv2.CalcHist(new Mat[] { smoothed_image }, new int[] { 0 }, new Mat(), absolute_histo, 1, new int[] { 256 }, new Rangef[] { new Rangef(0, 256) });
Cv2.GaussianBlur(current_segment, smoothed_image, new Size(19, 1), 0);
// 对图片进行分割i+1
//double otsu_threshold;
//threshold(smoothed_image, smoothed_image, 0, 255, THRESH_BINARY + THRESH_OTSU, &otsu_threshold);
Cv2.Threshold(smoothed_image, smoothed_image, 0, 255, ThresholdTypes.Binary | ThresholdTypes.Otsu);
// 使用形态学操作进行孔洞填充
Mat kernel = Cv2.GetStructuringElement(MorphShapes.Rect, new Size(25, 1));
Mat filled_image = new Mat();
Cv2.MorphologyEx(smoothed_image, filled_image, MorphTypes.Close, kernel);
// 取较长的一个值作为皮革的宽度
int num_255 = Cv2.CountNonZero(filled_image);
int length_t = (num_255 > width / 2) ? num_255 : width - num_255;
total[i] = (length_t);
API.OutputDebugString($"getMaxInsetRect2: 【{i + 1}】{length_t}={num_255}|{width}");
}
// 取平均值作为宽度
int length = (int)total.Average();
marginWidth = width-length;
#endif
int length = (len > mat_rgb.Width / 2) ? len : mat_rgb.Width - len;
marginWidth = mat_rgb.Width - length;
// 判断数据是否异常,判断当前线段的宽度是否大于设定像素的偏差
//int abnormal_pxl = 200;
//for (int i = 0; i < num_rows; i++)
//{
// if (Math.Abs(total[i] - length) > abnormal_pxl)
// throw new Exception("数据异常,当段图片的宽度有问题!");
//}
//右侧相机,拍摄产品,边缘位于右侧判断,缩进100像素,去点针眼
//Cv2.Line(mat_rgb, new Point(length - 100, 0), new Point(length - 100, height), new Scalar(255, 0, 0), 20);
////左侧相机,拍摄产品,边缘位于左侧判断,缩进100像素,去点针眼
//Cv2.Line(mat_rgb, new Point(width - length + 100, 0), new Point(width - length + 100, height), new Scalar(0, 255, 0), 20);
//int decWidth = width - length + marginHoleWidth;
//if (isLeft)
// return cutImage(mat_rgb, decWidth, 0, width- decWidth, height);
//else
// return cutImage(mat_rgb, 0, 0, width - decWidth, height);
API.OutputDebugString($"getMaxInsetRect2:margin={marginWidth},length={length}({marginHoleWidth}),isLeft={isLeft},mat_rgb={mat_rgb.Width}*{mat_rgb.Height},w={length - marginHoleWidth},h={mat_rgb.Height}");
if (isLeft)
return cutImage(mat_rgb, mat_rgb.Width - length+ marginHoleWidth, 0, length- marginHoleWidth, mat_rgb.Height);
else
return cutImage(mat_rgb, 0, 0, length- marginHoleWidth, mat_rgb.Height);
}
///
///
///
/// 图片
/// 0:从左往右找边,1:从右往左找边
/// 寻找区域
///
public static int EdgeClipping(Mat image, int FindType, Rect Roi)
{
DateTimeOffset startTime = DateTimeOffset.Now;
Mat mat_rgb = image.Clone(Roi);
int height = mat_rgb.Rows;
int width = mat_rgb.Cols;
int sf = 10; //缩放比例
int pix = 5; //获取均值区域长宽像素
int pointNum = 15; //获取找遍点数
//按比例缩放
int sf_height = height / sf;
int sf_width = width / sf;
Cv2.Resize(mat_rgb, mat_rgb, new Size(sf_width, sf_height), 0, 0, InterpolationFlags.Linear);
Mat himg = new Mat();
himg = mat_rgb.Clone();
DateTimeOffset endTime = DateTimeOffset.Now;
Console.WriteLine("图片缩小(ms): " + (endTime - startTime).TotalMilliseconds.ToString("0.000"));
startTime = DateTimeOffset.Now;
//滤过去除多余噪声
//Cv2.EdgePreservingFilter(himg, himg, EdgePreservingMethods.RecursFilter);
//Cv2.PyrMeanShiftFiltering(himg, himg, 10, 500, 3);
Cv2.PyrMeanShiftFiltering(himg, himg, 1, 2, 1);
//himg.ImWrite("himg.jpg");
endTime = DateTimeOffset.Now;
Console.WriteLine("滤过去除多余噪声(ms): " + (endTime - startTime).TotalMilliseconds.ToString("0.000"));
startTime = DateTimeOffset.Now;
//转灰度图
Mat image_gray = new Mat();
Cv2.CvtColor(himg, image_gray, ColorConversionCodes.BGR2GRAY);
//image_gray.ImWrite("image_gray.jpg");
//二值化
Mat image_Otsu = new Mat();
int hDis = sf_height / (pointNum + 2); //去除边缘两点
#if false
List LeftAvg = new List();
List RightAvg = new List();
//double thb = Cv2.Threshold(image_gray, image_Otsu, 0, 255, ThresholdTypes.Binary | ThresholdTypes.Otsu);
#region 多点获取二值化均值
for (int i = 0; i < pointNum; i++)
{
Rect roiLeft = new Rect(0, hDis + hDis * i, pix, pix);
Mat current_segmentL = image_gray.Clone(roiLeft);
//Scalar ttr = current_segmentL.Mean();
LeftAvg.Add(current_segmentL.Mean().Val0);
Rect roiRight = new Rect(sf_width - pix, hDis + hDis * i, pix, pix);
Mat current_segmentR = image_gray.Clone(roiRight);
RightAvg.Add(current_segmentR.Mean().Val0);
}
double thres = (RightAvg.Average() + LeftAvg.Average())/2;
#endregion
#else
double min, max;
image_gray.MinMaxLoc(out min, out max);
double thres = (min + max) / 2;
#endif
//Cv2.Threshold(image_gray, image_Otsu, 0, 255, ThresholdTypes.Otsu);
double thb = Cv2.Threshold(image_gray, image_Otsu, thres, 255, ThresholdTypes.Binary);
//image_Otsu.ImWrite("Otsu1.jpg");
endTime = DateTimeOffset.Now;
Console.WriteLine("灰度图二值化(ms): " + (endTime - startTime).TotalMilliseconds.ToString("0.000"));
startTime = DateTimeOffset.Now;
// 定义空数组保存结果
int[] total = new int[pointNum];
List total_t = new List();
bool isLeft = FindType == 0 ? true : false;
// 平均截取pointNum行数据并处理图像
for (int i = 0; i < pointNum; i++)
{
// 截取当前行的图像
Rect roi = new Rect(0, hDis + hDis * i, sf_width, 1);
Mat current_segment = image_Otsu.Clone(roi);
#if false
#region 预处理
// 对当前行的图像进行平滑处理
Mat smoothed_image2 = new Mat();
Cv2.GaussianBlur(current_segment, smoothed_image2, new Size(5, 1), 0);
// 计算当前行的灰度直方图
Mat absolute_histo2 = new Mat();
Cv2.CalcHist(new Mat[] { smoothed_image2 }, new int[] { 0 }, new Mat(), absolute_histo2, 1, new int[] { 256 }, new Rangef[] { new Rangef(0, 256) });
Cv2.GaussianBlur(current_segment, smoothed_image2, new Size(9, 1), 0);
// 对图片进行分割
//double otsu_threshold;
//threshold(smoothed_image, smoothed_image, 0, 255, THRESH_BINARY + THRESH_OTSU, &otsu_threshold);
double otsu_threshold2 = Cv2.Threshold(smoothed_image2, smoothed_image2, 0, 255, ThresholdTypes.Binary | ThresholdTypes.Otsu);
// 使用形态学操作进行孔洞填充
Mat kernel3 = Cv2.GetStructuringElement(MorphShapes.Rect, new Size(5, 1));
Mat filled_image3 = new Mat();
Cv2.MorphologyEx(smoothed_image2, filled_image3, MorphTypes.Close, kernel3);
#endregion
#else
Mat filled_image3 = current_segment.Clone();
#endif
#if true
//从左到右判断边和从右到左判断边
int numX = 0;
byte tempVal = 0;
if (isLeft)
{
tempVal = filled_image3.At(0, 0);
for (int j = 0; j < filled_image3.Cols; j++)
{
if (filled_image3.At(0, j) != tempVal)
{
numX = j;
break;
}
}
}
else
{
tempVal = filled_image3.At(0, filled_image3.Cols - 1);
for (int j = filled_image3.Cols - 1; j >= 0; j--)
{
if (filled_image3.At(0, j) != tempVal)
{
numX = j;
break;
}
}
}
#else
int numX = Cv2.CountNonZero(filled_image3);
#endif
//int length_t = (numX > (sf_width / 2)) ? numX :sf_width - numX;
int length_t = numX;
total[i] = (length_t);
if (length_t > 0)
total_t.Add(length_t);
}
// 取平均值作为宽度
int length = (int)total_t.Average();
endTime = DateTimeOffset.Now;
Console.WriteLine("计算边(ms): " + (endTime - startTime).TotalMilliseconds.ToString("0.000"));
// 判断数据是否异常,判断当前线段的宽度是否大于设定像素的偏差
//int abnormal_pxl = 100 / 4;
//for (int i = 0; i < pointNum; i++)
//{
// if (Math.Abs(total[i] - length) > abnormal_pxl)
// Console.WriteLine("数据异常!");
// //出现数据异常,当段图片的宽度有问题
//}
//乘上换算系数还原
length = length * sf + Roi.X;
return length;
}
public static int EdgeClipping2(Mat image, int FindType, Rect Roi, bool IsLeft)
{
DateTimeOffset startTime = DateTimeOffset.Now;
Mat mat_rgb = image.Clone(Roi);
int height = mat_rgb.Rows;
int width = mat_rgb.Cols;
int sf = 10; //缩放比例
int pix = 5; //获取均值区域长宽像素
int pointNum = 15; //获取找遍点数
int offsetGray = 5; //二值化偏差
//按比例缩放
int sf_height = height / sf;
int sf_width = width / sf;
Cv2.Resize(mat_rgb, mat_rgb, new Size(sf_width, sf_height), 0, 0, InterpolationFlags.Linear);
Mat himg = new Mat();
himg = mat_rgb.Clone();
DateTimeOffset endTime = DateTimeOffset.Now;
Console.WriteLine("图片缩小(ms): " + (endTime - startTime).TotalMilliseconds.ToString("0.000"));
startTime = DateTimeOffset.Now;
//滤过去除多余噪声
//Cv2.EdgePreservingFilter(himg, himg, EdgePreservingMethods.NormconvFilter);
//Cv2.PyrMeanShiftFiltering(himg, himg, 1, 2, 1);
Cv2.PyrMeanShiftFiltering(himg, himg, 10, 17, 2);
//himg.ImWrite("himg.jpg");
endTime = DateTimeOffset.Now;
Console.WriteLine("滤过去除多余噪声(ms): " + (endTime - startTime).TotalMilliseconds.ToString("0.000"));
startTime = DateTimeOffset.Now;
//转灰度图
Mat image_gray = new Mat();
Cv2.CvtColor(himg, image_gray, ColorConversionCodes.BGR2GRAY);
//image_gray.ImWrite("image_gray.jpg");
Mat image_Canny = new Mat();
Cv2.Canny(image_gray, image_Canny, 32, 64);
//image_Canny.ImWrite("image_Canny.jpg");
//二值化
Mat image_Otsu = new Mat();
int hDis = sf_height / (pointNum + 2); //去除边缘两点
#if false //二值算法
List LeftAvg = new List();
List RightAvg = new List();
//double thb = Cv2.Threshold(image_gray, image_Otsu, 0, 255, ThresholdTypes.Binary | ThresholdTypes.Otsu);
#region 多点获取二值化均值
for (int i = 0; i < pointNum; i++)
{
Rect roiLeft = new Rect(0, hDis + hDis * i, pix, pix);
Mat current_segmentL = image_gray.Clone(roiLeft);
//Scalar ttr = current_segmentL.Mean();
LeftAvg.Add(current_segmentL.Mean().Val0);
Rect roiRight = new Rect(sf_width - pix, hDis + hDis * i, pix, pix);
Mat current_segmentR = image_gray.Clone(roiRight);
RightAvg.Add(current_segmentR.Mean().Val0);
}
double thres = 0;
if (IsLeft)
{
if (LeftAvg.Average() > RightAvg.Average())
thres = RightAvg.Max() + offsetGray;
else
thres = RightAvg.Min() - offsetGray;
}
else
{
if (LeftAvg.Average() > RightAvg.Average())
thres = LeftAvg.Min() - offsetGray;
else
thres = LeftAvg.Max() + offsetGray;
}
//double thres = (RightAvg.Average() + )/2;
#endregion
#endif
#if false
double min, max;
image_gray.MinMaxLoc(out min, out max);
double thres = (min + max) / 2;
#endif
#if false //二值化图片
//Cv2.Threshold(image_gray, image_Otsu, 0, 255, ThresholdTypes.Otsu);
double thb = Cv2.Threshold(image_gray, image_Otsu, thres, 255, ThresholdTypes.Binary);
image_Otsu.ImWrite("Otsu1.jpg");
Cv2.MedianBlur(image_Otsu, image_Otsu, 21);
image_Otsu.ImWrite("Otsu2.jpg");
endTime = DateTimeOffset.Now;
Console.WriteLine("灰度图二值化(ms): " + (endTime - startTime).TotalMilliseconds.ToString("0.000"));
startTime = DateTimeOffset.Now;
#else
image_Otsu = image_Canny;
#endif
// 定义空数组保存结果
int[] total = new int[pointNum];
List total_t = new List();
bool isLeft = FindType == 0 ? true : false;
// 平均截取pointNum行数据并处理图像
for (int i = 0; i < pointNum; i++)
{
// 截取当前行的图像
Rect roi = new Rect(0, hDis + hDis * i, sf_width, 1);
Mat current_segment = image_Otsu.Clone(roi);
#if false
#region 预处理
// 对当前行的图像进行平滑处理
Mat smoothed_image2 = new Mat();
Cv2.GaussianBlur(current_segment, smoothed_image2, new Size(5, 1), 0);
// 计算当前行的灰度直方图
Mat absolute_histo2 = new Mat();
Cv2.CalcHist(new Mat[] { smoothed_image2 }, new int[] { 0 }, new Mat(), absolute_histo2, 1, new int[] { 256 }, new Rangef[] { new Rangef(0, 256) });
Cv2.GaussianBlur(current_segment, smoothed_image2, new Size(9, 1), 0);
// 对图片进行分割
//double otsu_threshold;
//threshold(smoothed_image, smoothed_image, 0, 255, THRESH_BINARY + THRESH_OTSU, &otsu_threshold);
double otsu_threshold2 = Cv2.Threshold(smoothed_image2, smoothed_image2, 0, 255, ThresholdTypes.Binary | ThresholdTypes.Otsu);
// 使用形态学操作进行孔洞填充
Mat kernel3 = Cv2.GetStructuringElement(MorphShapes.Rect, new Size(5, 1));
Mat filled_image3 = new Mat();
Cv2.MorphologyEx(smoothed_image2, filled_image3, MorphTypes.Close, kernel3);
#endregion
#else
//Mat filled_image3 = current_segment.Clone();
Mat filled_image3 = current_segment;
#endif
#if true
//从左到右判断边和从右到左判断边
int numX = 0;
byte tempVal = 0;
if (isLeft)
{
tempVal = filled_image3.At(0, 0);
for (int j = 0; j < filled_image3.Cols; j++)
{
if (filled_image3.At(0, j) != tempVal)
{
numX = j;
break;
}
}
}
else
{
tempVal = filled_image3.At(0, filled_image3.Cols - 1);
for (int j = filled_image3.Cols - 1; j >= 0; j--)
{
if (filled_image3.At(0, j) != tempVal)
{
numX = j;
break;
}
}
}
#else
int numX = Cv2.CountNonZero(filled_image3);
#endif
//int length_t = (numX > (sf_width / 2)) ? numX :sf_width - numX;
int length_t = numX;
total[i] = (length_t);
if (length_t > 0)
total_t.Add(length_t);
}
// 取平均值作为宽度
int length = (int)total_t.Average();
endTime = DateTimeOffset.Now;
Console.WriteLine("计算边(ms): " + (endTime - startTime).TotalMilliseconds.ToString("0.000"));
// 判断数据是否异常,判断当前线段的宽度是否大于设定像素的偏差
//int abnormal_pxl = 100 / 4;
//for (int i = 0; i < pointNum; i++)
//{
// if (Math.Abs(total[i] - length) > abnormal_pxl)
// Console.WriteLine("数据异常!");
// //出现数据异常,当段图片的宽度有问题
//}
//乘上换算系数还原
length = length * sf + Roi.X;
return length;
}
private static Rect GetMaxInscribedRect(Mat src, List contour)
{
//根据轮廓让点与下一个点之间形成一个矩形,然后让每个矩形都与当前所有矩形相交,求出相交的矩形,
//再把这些矩形所有的角放到一个集合里,筛选出在轮廓内并且非重复的点,
//最后让这些点两两组合成一个矩形,判断是否为内部矩形,算出面积,找出最大内接矩形。
//比如一共4个点,第1个与第2个形成矩形(矩形1),第1与第3(矩形2),
//第1与第4(矩形3),第2与第3(矩形4),第2与第4(矩形5),第3与第4(矩形6),
//由于矩形1为第一个元素,没有相交矩形,所以直接放入allPoint中,
//接着把矩形2的四个角,以及矩形2和矩形1相交矩形的四个角,放入allPoint中,
//矩形3以此类推,其本身四个角,以及和矩形1相交矩形的四个角,以及和矩形2相交矩形的四个角
Rect maxInscribedRect = new Rect();
List allRect = new List();
List allPoint = new List(contour);
//根据轮廓让点与下一个点之间形成一个矩形
for (int i = 0; i < contour.Count; i++)
{
for (int j = i + 1; j < contour.Count; j++)
{
var p1 = contour[i];
var p2 = contour[j];
if (p1.Y == p2.Y || p1.X == p2.X)
continue;
var tempRect = FromTowPoint(p1, p2);
allPoint.AddRange(GetAllCorner(tempRect));
//让每个矩形都与当前所有矩形相交,求出相交的矩形,再把这些矩形所有的角放到一个集合里
foreach (var rect in allRect)
{
var intersectR = tempRect.Intersect(rect);
if (intersectR != Rect.Empty)
allPoint.AddRange(GetAllCorner(intersectR));
}
allRect.Add(tempRect);
}
}
//去除重复的点,再让这些点两两组合成一个矩形,判断是否为内部矩形,算出面积,找出最大内接矩形
List distinctPoints = allPoint.Distinct().ToList();
for (int i = 0; i < distinctPoints.Count; i++)
{
for (int j = i + 1; j < distinctPoints.Count; j++)
{
var tempRect = FromTowPoint(distinctPoints[i], distinctPoints[j]);
//只要矩形包含一个轮廓内的点,就不算多边形的内部矩形;只要轮廓不包含该矩形,该矩形就不算多边形的内部矩形
if (!ContainPoints(contour, GetAllCorner(tempRect)) || ContainsAnyPt(tempRect, contour))
continue;
//src.Rectangle(tempRect, Scalar.RandomColor(), 2);
if (tempRect.Width * tempRect.Height > maxInscribedRect.Width * maxInscribedRect.Height)
maxInscribedRect = tempRect;
}
}
//src.Rectangle(maxInscribedRect, Scalar.Yellow, 2);
return maxInscribedRect == Rect.Empty ? Cv2.BoundingRect(contour) : maxInscribedRect;
}
public static Point[] GetAllCorner(Rect rect)
{
Point[] result = new Point[4];
result[0] = rect.Location;
result[1] = new Point(rect.X + rect.Width, rect.Y);
result[2] = rect.BottomRight;
result[3] = new Point(rect.X, rect.Y + rect.Height);
return result;
}
private static bool ContainPoint(List contour, Point p1)
{
return Cv2.PointPolygonTest(contour, p1, false) > 0;
}
private static bool ContainPoints(List contour, IEnumerable points)
{
foreach (var point in points)
{
if (Cv2.PointPolygonTest(contour, point, false) < 0)
return false;
}
return true;
}
private static void DrawContour(Mat mat, Point[] contour, Scalar color, int thickness)
{
for (int i = 0; i < contour.Length; i++)
{
if (i + 1 < contour.Length)
Cv2.Line(mat, contour[i], contour[i + 1], color, thickness);
}
}
///
/// 是否有任意一个点集合中的点包含在矩形内,在矩形边界上不算包含
///
///
///
///
public static bool ContainsAnyPt(Rect rect, IEnumerable points)
{
foreach (var point in points)
{
if (point.X > rect.X && point.X < rect.X + rect.Width && point.Y < rect.BottomRight.Y && point.Y > rect.Y)
return true;
}
return false;
}
///
/// 用任意两点组成一个矩形
///
///
///
///
public static Rect FromTowPoint(Point p1, Point p2)
{
if (p1.X == p2.X || p1.Y == p2.Y)
return Rect.Empty;
if (p1.X > p2.X && p1.Y < p2.Y)
{
(p1, p2) = (p2, p1);
}
else if (p1.X > p2.X && p1.Y > p2.Y)
{
(p1.X, p2.X) = (p2.X, p1.X);
}
else if (p1.X < p2.X && p1.Y < p2.Y)
{
(p1.Y, p2.Y) = (p2.Y, p1.Y);
}
return Rect.FromLTRB(p1.X, p2.Y, p2.X, p1.Y);
}
#endregion
public static Mat CannyOperator(Mat srcImg, double threshold1 = 100, double threshold2 = 200)
{
var dst = new Mat();// srcImg.Rows, srcImg.Cols,MatType.CV_8UC1);
//转灰度
Cv2.CvtColor(srcImg, dst, ColorConversionCodes.RGB2GRAY);
//滤波
Cv2.Blur(dst, dst, new OpenCvSharp.Size(2, 2));
//double threshold1 = 255, threshold2 = 0;
Cv2.Canny(srcImg, dst, threshold1, threshold2);
//Cv2.ImShow("dst", dst);
return dst;
}
public static Mat LaplacianOperator(Mat srcImg, double threshold1 = 10, double threshold2 = 255)
{
Mat LaplacianImg = new Mat();
Mat gussImage = new Mat();
//高斯滤波: 每个像素点的值都由本身与和邻近区域的其他像素值经过加权平均后得到,加权系数越靠近中心越大,越远离中心越小
/* src:输入图像
dst:输出图像
ksize:高斯核的大小。ksize。宽度和高度可以不同,但它们都必须是正的和奇数的。或者,它们可以是0然后用sigma来计算
sigmaX:表示高斯核在X轴方向的标准偏差
sigmaY :表示高斯核在Y轴方向的标准偏差值,如果sigmaY 为0,则sigmaY =sigmaX,如果两个sigma都为零,则用ksize计算
borderType :一般用默认值
*/
Cv2.GaussianBlur(srcImg, gussImage, new OpenCvSharp.Size(3, 3), 0, 0, BorderTypes.Default);
Mat grayImage = new Mat();
Cv2.CvtColor(gussImage, grayImage, ColorConversionCodes.RGB2GRAY); //灰度图
//Laplacian运算, 计算二阶导数
/*src 源图像
dst 输出图像,将具有与src相同的大小和相同数量的通道
ddepth 目标图像的所需深度 默认填 -1,与源图一致
ksize 用于计算二阶导数滤波器的孔径大小,卷积核大小,奇数
scale 计算的拉普拉斯值的可选缩放因子(默认情况下不应用缩放)
delta 可选的增量值,在将结果存储到dst之前添加到结果中
borderType 边缘处理方法
*/
Cv2.Laplacian(grayImage, LaplacianImg, -1, 3); //参数:1,源图像;2,输出图像;3,目标图像的所需深度 默认填 -1,与源图一致;4,用于计算二阶导数滤波器的卷积核大小,需奇数。
//阈值操作:可根据灰度的差异来分割图像
/* src:输入图像
dst:输出图像
thresh:阈值
maxval:阈值最大
type:阈值类型,详解见下
Binary:阈值二值化(大于阈值的让它等于最大值,小于的等于最小值)
BinaryInv:阈值反二值化(二值化阈值相反,大于阈值为最小值,小于阈值为最大值)
Trunc:截断(大于阈值的就等于阈值,小的不变)
ToZero:阈值归零(当大于阈值的不变,小于阈值的归零)
ToZeroIv:阈值归零取反(与阈值取零相反,大于时为最小值,小于时保持不变)
*/
Mat dst = new Mat();
Cv2.Threshold(LaplacianImg, dst, threshold1, threshold2, ThresholdTypes.Binary);
return dst;
}
//Sobel算子主要用来检测离散微分边缘算子,Sobel算子对噪声灰常敏感,一般需要先把图片进行高斯降噪
public static Mat SobelOperator(Mat src_img, double threshold1 = 10, double threshold2 = 250)
{
Mat dst = new Mat();
//高斯滤波
Cv2.GaussianBlur(src_img, dst, new OpenCvSharp.Size(3, 3), 0, 0, BorderTypes.Default);
Mat grayImage = new Mat();
Cv2.CvtColor(dst, grayImage, ColorConversionCodes.BGR2GRAY); //转换为灰度图
Mat X = new Mat();
Mat Y = new Mat();
/*src:输入图像
dst:输出图像
ddepth:输出图像深度
xorder:X方向的差分阶数
yorder:Y方向的差分阶数
ksize :表示Sobel核大小,只能为奇数
scale: 计算导数值时候的缩放因子,默认为1
delta :表示存入目标图前可选的delta值
borderType :边界模式,一般为默认
*/
Cv2.Sobel(grayImage, X, MatType.CV_16S, 1, 0, 3); //Sobel边缘查找,参数:1,输入;2,输出X方向梯度图像;3,输出图像的深度;4,X方向几阶导数;5,Y方向几阶导数;6,卷积核大小,必须为奇数。
Cv2.Sobel(grayImage, Y, MatType.CV_16S, 0, 1, 3); //输出Y方向梯度图像
#region 方式1:像素操作进行相加
int width = X.Cols;
int hight = Y.Rows;
Mat output = new Mat(X.Size(), X.Type());
for (int x = 0; x < hight; x++) //合并X和Y,G= (Gx*Gx +Gy*Gy)的开平方根
{
for (int y = 0; y < width; y++)
{
int xg = X.At(x, y); //获取像素点的值
int yg = Y.At(x, y);
double v1 = Math.Pow(xg, 2); //平方
double v2 = Math.Pow(yg, 2);
int val = (int)Math.Sqrt(v1 + v2); //开平方根
if (val > 255) //确保像素值在 0至255之间
{
val = 255;
}
if (val < 0)
{
val = 0;
}
byte xy = (byte)val;
output.Set(x, y, xy); //为图像设置像素值
}
}
Mat tmp = new Mat(output.Size(), MatType.CV_8UC1);
#endregion
#region 方式2:利用现有API实现(X梯度+Y梯度)
//Mat Abs_X = new Mat();
//Mat Abs_Y = new Mat();
//Mat Result = new Mat();
//Cv2.ConvertScaleAbs(X, Abs_X, 1.0);//缩放,计算绝对值并将结果转换为8位。
//Cv2.ConvertScaleAbs(Y, Abs_Y, 1.0);//缩放,计算绝对值并将结果转换为8位。
//Cv2.AddWeighted(Abs_X, 0.5, Abs_Y, 0.5, 0, Result);//以不同的权重将两幅图片叠加
#endregion
//阈值
Mat result = new Mat();
Cv2.Threshold(tmp, result, threshold1, threshold2, ThresholdTypes.Binary);
return result;
}
//Scharr算子是对Sobel算子的优化,特别在核为3*3时
public static Mat ScharrOperator(Mat srcImg, double threshold1 = 10, double threshold2 = 250)
{
Mat dst = new Mat();
Cv2.GaussianBlur(srcImg, dst, new OpenCvSharp.Size(3, 3), 0, 0, BorderTypes.Default);
Mat grayImage = new Mat();
Cv2.CvtColor(dst, grayImage, ColorConversionCodes.BGR2GRAY); //转换为灰度图
Mat grad_x = new Mat();
Mat grad_x2 = new Mat();
Mat grad_y = new Mat();
Mat grad_y2 = new Mat();
Cv2.Scharr(grayImage, grad_x, MatType.CV_16S, 1, 0);
Cv2.Scharr(grayImage, grad_y, MatType.CV_16S, 0, 1);
Cv2.ConvertScaleAbs(grad_x, grad_x2);
Cv2.ConvertScaleAbs(grad_y, grad_y2);
Mat result = new Mat();
Cv2.AddWeighted(grad_x2, 0.5, grad_y2, 0.5, 0, result);
//阈值
Cv2.Threshold(result, result, threshold1, threshold2, ThresholdTypes.Binary);
//Cv2.ImShow("Scharr", result);
return result;
}
}
}