geboshi_V1/LeatherProject/LeatherApp/Utils/OpenCVUtil.cs
2024-10-18 08:22:25 +08:00

1398 lines
63 KiB
C#
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

using System;
using System.Collections.Generic;
using System.Linq;
using System.Runtime.InteropServices;
using System.Text;
using System.Threading.Tasks;
using System.Xaml;
using DocumentFormat.OpenXml.Vml;
using HalconDotNet;
using OpenCvSharp;
using OpenCvSharp.XImgProc;
namespace LeatherApp.Utils
{
public class OpenCVUtil
{
#region
private static StructuredEdgeDetection _edgeDetect;
public static void LoadEdgeMode()
{
if (_edgeDetect == null)
_edgeDetect = OpenCvSharp.XImgProc.CvXImgProc.CreateStructuredEdgeDetection("model.yml");
}
/// <summary>
/// 模型寻边
/// </summary>
/// <param name="image"></param>
/// <param name="FindType"></param>
/// <param name="Roi"></param>
/// <param name="IsLeft"></param>
/// <returns></returns>
private static int EdgeClipping3(Mat image, int FindType, Rect Roi, bool IsLeft)
{
Mat mat_rgb = image.Clone(Roi);
int height = mat_rgb.Rows;
int width = mat_rgb.Cols;
int sf = 10; //缩放比例
int pix = 5; //获取均值区域长宽像素
int pointNum = 15; //获取找遍点数
int offsetGray = 5; //二值化偏差
int length_t = 0;
List<int> lines = new List<int>();
List<int> total_t = new List<int>();
//按比例缩放
double sf_height = height / sf;
double sf_width = width / sf;
Cv2.Resize(mat_rgb, mat_rgb, new Size(sf_width, sf_height), 0, 0, InterpolationFlags.Linear);
Mat himg = new Mat();
Mat edgeimg = new Mat();
Cv2.CvtColor(mat_rgb, edgeimg, ColorConversionCodes.BGR2RGB);
Mat edges = new Mat();
edgeimg.ConvertTo(edgeimg, MatType.CV_32F, 1 / 255.0);
if (_edgeDetect == null)
LoadEdgeMode();
//Cv2.Normalize(edgeimg, edgeimg, 1.0, 0, NormTypes.L2, -1);
_edgeDetect.DetectEdges(edgeimg, edges);
Mat image_Otsu = new Mat();
int hDis = (int)sf_height / (pointNum + 2); //去除边缘两点
edges.ConvertTo(image_Otsu, MatType.CV_8U, 255.0);
Cv2.Threshold(image_Otsu, image_Otsu, 0, 255, ThresholdTypes.Otsu);
// 定义空数组保存结果
int[] total = new int[pointNum];
// 平均截取pointNum行数据并处理图像
for (int i = 0; i < pointNum; i++)
{
// 截取当前行的图像
Rect roi = new Rect(0, hDis + hDis * i, (int)sf_width, 1);
Mat current_segment = image_Otsu.Clone(roi);
//Mat filled_image3 = current_segment.Clone();
Mat filled_image3 = current_segment;
#if true
//从左到右判断边和从右到左判断边
int numX = 0;
int tm = 0;
byte tempVal = 0;
bool findOne = false;
if (!IsLeft)
{
tempVal = filled_image3.At<byte>(0, 0);
//filled_image3.
for (int j = 0; j < filled_image3.Cols; j++)
{
if (filled_image3.At<byte>(0, j) != tempVal)
{
if (!findOne)
{
tm = j;
findOne = true;
tempVal = filled_image3.At<byte>(0, j);
}
else
{
//numX = j;
numX = (tm + j) / 2;
break;
}
}
}
}
else
{
tempVal = filled_image3.At<byte>(0, filled_image3.Cols - 1);
for (int j = filled_image3.Cols - 1; j >= 0; j--)
{
if (filled_image3.At<byte>(0, j) != tempVal)
{
if (!findOne)
{
tm = j;
findOne = true;
tempVal = filled_image3.At<byte>(0, j);
}
else
{
//numX = j;
numX = (tm + j) / 2;
break;
}
}
}
}
#else
int numX = Cv2.CountNonZero(filled_image3);
#endif
//length_t = (numX > (sf_width / 2)) ? numX :(int)(sf_width - numX);
length_t = numX;
total[i] = (length_t);
if (length_t > 0)
total_t.Add(length_t);
}
// 取平均值作为宽度
int length = 0;
if (total_t.Count > 0)
{
length = (int)total_t.Average();
if (IsLeft)
length = length - 0;
else
length = length + 0;
}
//乘上换算系数还原
length = length * sf + Roi.X;
return length;
}
#endregion
public static Mat resize(Mat mat, int width, int height, out int xw, out int xh)
{
OpenCvSharp.Size dsize = new OpenCvSharp.Size(width, height);
//Mat mat2 = new Mat();
//Cv2.Resize(mat, mat2, dsize);
//ResizeUniform(mat, dsize, out mat2, out xw, out xh);
xw = (width - mat.Cols) / 2;
xh = (height - mat.Rows) / 2;
Mat mat2 = new Mat(height, width, MatType.CV_8UC3, new Scalar(114, 114, 114));
Rect roi = new Rect((width - mat.Cols) / 2, (height - mat.Rows) / 2, mat.Cols, mat.Rows);
mat.CopyTo(new Mat(mat2, roi));
return mat2;
}
public static int ResizeUniform(Mat src, Size dst_size, out Mat dst, out int xw, out int xh)
{
xw = xh = 0;
int w = src.Cols;
int h = src.Rows;
int dst_w = dst_size.Width;
int dst_h = dst_size.Height;
//std::cout << "src: (" << h << ", " << w << ")" << std::endl;
dst = new Mat(dst_h, dst_w, MatType.CV_8UC3, new Scalar(114, 114, 114));
float[] ratio = new float[2];
float ratio_src = w * 1.0f / h;
float ratio_dst = dst_w * 1.0f / dst_h;
int tmp_w = 0;
int tmp_h = 0;
if (ratio_src > ratio_dst)
{
tmp_w = dst_w;
tmp_h = (int)(dst_w * 1.0f / w) * h;
ratio[0] = (float)w / (float)tmp_w;
ratio[1] = (float)h / (float)tmp_h;
}
else if (ratio_src < ratio_dst)
{
tmp_h = dst_h;
tmp_w = (int)((dst_h * 1.0f / h) * w);
ratio[0] = (float)w / (float)tmp_w;
ratio[1] = (float)h / (float)tmp_h;
}
else
{
Cv2.Resize(src, dst, dst_size);
ratio[0] = (float)w / (float)tmp_w;
ratio[1] = (float)h / (float)tmp_h;
return 0;
}
//std::cout << "tmp: (" << tmp_h << ", " << tmp_w << ")" << std::endl;
Mat tmp = new Mat();
Cv2.Resize(src, tmp, new Size(tmp_w, tmp_h));
unsafe
{
if (tmp_w != dst_w)
{ //高对齐,宽没对齐
int index_w = (int)((dst_w - tmp_w) / 2.0);
xw = index_w;
//std::cout << "index_w: " << index_w << std::endl;
for (int i = 0; i < dst_h; i++)
{
Buffer.MemoryCopy(IntPtr.Add(tmp.Data, i * tmp_w * 3).ToPointer(), IntPtr.Add(dst.Data, i * dst_w * 3 + index_w * 3).ToPointer(), tmp_w * 3, tmp_w * 3);
}
}
else if (tmp_h != dst_h)
{ //宽对齐, 高没有对齐
int index_h = (int)((dst_h - tmp_h) / 2.0);
xh = index_h;
//std::cout << "index_h: " << index_h << std::endl;
Buffer.MemoryCopy(tmp.Data.ToPointer(), IntPtr.Add(dst.Data, index_h * dst_w * 3).ToPointer(), tmp_w * tmp_h * 3, tmp_w * tmp_h * 3);
}
else
{
}
}
return 0;
}
public static Mat CreateLetterbox(Mat mat, OpenCvSharp.Size sz, Scalar color, out float ratio, out OpenCvSharp.Point diff, out OpenCvSharp.Point diff2, bool auto = true, bool scaleFill = false, bool scaleup = true)
{
//Mat mat = new Mat();
//Cv2.CvtColor(mat, mat, ColorConversionCodes.BGR2RGB);
ratio = Math.Min((float)sz.Width / (float)mat.Width, (float)sz.Height / (float)mat.Height);
if (!scaleup)
{
ratio = Math.Min(ratio, 1f);
}
OpenCvSharp.Size dsize = new OpenCvSharp.Size((int)Math.Round((float)mat.Width * ratio), (int)Math.Round((float)mat.Height * ratio));
int num = sz.Width - dsize.Width;
int num2 = sz.Height - dsize.Height;
float num3 = (float)sz.Height / (float)sz.Width;
float num4 = (float)mat.Height / (float)mat.Width;
if (auto && num3 != num4)
{
bool flag = false;
}
else if (scaleFill)
{
num = 0;
num2 = 0;
dsize = sz;
}
int num5 = (int)Math.Round((float)num / 2f);
int num6 = (int)Math.Round((float)num2 / 2f);
int num7 = 0;
int num8 = 0;
if (num5 * 2 != num)
{
num7 = num - num5 * 2;
}
if (num6 * 2 != num2)
{
num8 = num2 - num6 * 2;
}
if (mat.Width != dsize.Width || mat.Height != dsize.Height)
{
Cv2.Resize(mat, mat, dsize);
}
Cv2.CopyMakeBorder(mat, mat, num6 + num8, num6, num5, num5 + num7, BorderTypes.Constant, color);
diff = new OpenCvSharp.Point(num5, num6);
diff2 = new OpenCvSharp.Point(num7, num8);
return mat;
}
/// <summary>
/// 裁切指定区域
/// </summary>
/// <param name="mat"></param>
/// <param name="x"></param>
/// <param name="y"></param>
/// <param name="width"></param>
/// <param name="height"></param>
/// <returns></returns>
public static Mat cutImage(Mat mat, int x, int y, int width, int height)
{
Rect roi = new Rect(x, y, width, height);
return new Mat(mat, roi).Clone();
}
/// <summary>
/// 合并MAT宽高必需一致
/// </summary>
/// <param name="mats"></param>
/// <param name="isHorizontal"></param>
/// <returns></returns>
public static Mat mergeImage_sameSize(Mat[] mats, bool isHorizontal = true)
{
Mat matOut = new Mat();
if (isHorizontal)
Cv2.HConcat(mats, matOut);//横向拼接
else
Cv2.VConcat(mats, matOut);//纵向拼接
return matOut;
}
/// <summary>
/// 合并MAT-纵向
/// </summary>
/// <param name="mat1"></param>
/// <param name="mat2"></param>
/// <returns></returns>
public static Mat mergeImageV(Mat mat1,Mat mat2 )
{
Mat matOut = new Mat();
//push_back 方法将图像2拷贝到图像1的最后一行
Mat img_merge = new Mat();//要先设置大小吗
img_merge.PushBack(mat1);
img_merge.PushBack(mat2);
return matOut;
}
/// <summary>
/// 合并MAT-横向
/// </summary>
/// <param name="mat1"></param>
/// <param name="mat2"></param>
/// <returns></returns>
public static Mat mergeImageH(Mat[] mats)
{
Stitcher stitcher = Stitcher.Create(Stitcher.Mode.Scans);
Mat pano = new Mat();
var status = stitcher.Stitch(mats, pano);
if (status == Stitcher.Status.OK)
return pano;
else
return null;
// //1.新建一个要合并的图像
// Size size = new Size(image1.Cols + image2.Cols, Math.Max(image1.Rows, image1.Rows));
//Mat img_merge=new Mat();
//img_merge.Create(size,new MatType( image1.Depth()));
////img_merge = Scalar.All(0);
//Mat outImg_left, outImg_right;
////2.在新建合并图像中设置感兴趣区域
//outImg_left = img_merge.a(Rect(0, 0, image1.cols, image1.rows));
//outImg_right = img_merge(Rect(image1.cols, 0, image1.cols, image1.rows));
////3.将待拷贝图像拷贝到感性趣区域中
//image1.copyTo(outImg_left);
//image2.copyTo(outImg_right);
//namedWindow("image1", 0);
//Cv2.ImShow("image1", img_merge);
}
/// <summary>
/// 获取最小外接矩形(正矩形)
/// </summary>
/// <returns></returns>
public static Mat getMimOutRect(Mat srcImg)
{
try
{
//Mat srcImg = new Mat(@"E:\D\AutoCode\LeatherProject\LeatherApp\bin\Debug\testpic\2\11.bmp");
Mat grayImg = new Mat();
Mat binaryImg = new Mat();
Cv2.CvtColor(srcImg, grayImg, ColorConversionCodes.BGR2GRAY);
//Cv2.ImShow("src", srcImg);
//toImg(this.pictureBox1, grayImg);
//
//double thresh = 30;//小于此值(超小超是所选黑色越多)转为maxval色
//double maxval = 255;//上面值转为255白色
double thresh = 80;//小于此值(超小超是所选黑色越多)转为maxval色
double maxval = 255;//上面值转为255白色
Cv2.Threshold(grayImg, binaryImg, thresh, maxval, ThresholdTypes.Binary);//转化黑白二值图 thresh阀值
//颜色反转
//byte grayPixel = 0;
//for (int r = 0; r < binary.Rows; r++)
//{
// for (int c = 0; c < binary.Cols; c++)
// {
// grayPixel = binary.At<byte>(r, c);
// binary.Set<byte>(r, c, (byte)(255 - grayPixel));
// }
//}
//FindContours让轮廓
OpenCvSharp.Point[][] contours; //轮廓查找结果变量
HierarchyIndex[] hierarchy; //轮廓拓扑结构变量
//====RetrievalModes:
//CV_RETR_EXTERNAL表示只检测外轮廓
//CV_RETR_LIST检测的轮廓不建立等级关系
//CV_RETR_CCOMP建立两个等级的轮廓上面的一层为外边界里面的一层为内孔的边界信息。如果内孔内还有一个连通物体这个物体的边界也在顶层。
//CV_RETR_TREE建立一个等级树结构的轮廓。具体参考contours.c这个demo
//====ContourApproximationModes:
//CV_CHAIN_APPROX_NONE存储所有的轮廓点相邻的两个点的像素位置差不超过1即maxabsx1 - x2absy2 - y1== 1
//CV_CHAIN_APPROX_SIMPLE压缩水平方向垂直方向对角线方向的元素只保留该方向的终点坐标例如一个矩形轮廓只需4个点来保存轮廓信息
//CV_CHAIN_APPROX_TC89_L1CV_CHAIN_APPROX_TC89_KCOS使用teh - Chinl chain 近似算法
Cv2.FindContours(binaryImg, out contours, out hierarchy, RetrievalModes.CComp, ContourApproximationModes.ApproxSimple);
//DrawContours将结果画出并返回结果
Mat dst_Image = Mat.Zeros(grayImg.Size(), srcImg.Type());
Random rnd = new Random();
int maxIndex = 0, maxLength = 0;
for (int i = 0; i < contours.Length; i++)
{
if (contours[i].Length > maxLength)
{
maxLength = contours[i].Length;
maxIndex = i;
}
}
Scalar color = new Scalar(rnd.Next(0, 0), rnd.Next(0, 255), rnd.Next(0, 255));
//var rectMin = Cv2.MinAreaRect(contours[i]);
//这里有三个参数 分别是中心位置,旋转角度,缩放程度
//Cv2.WarpAffine(srcImg, rectMin.Center, (height, width))
//Rect rect = rectMin.BoundingRect();//
Rect rect = Cv2.BoundingRect(contours[maxIndex]);// 获取矩形边界框
//OpenCvSharp.Point pt1 = new OpenCvSharp.Point(rect.X, rect.Y);
//OpenCvSharp.Point pt2 = new OpenCvSharp.Point(rect.X + rect.Width, rect.Y + rect.Height); //定义矩形对顶点
//Cv2.Rectangle(srcImg, pt1, pt2, color, 1); //绘制矩形边框
//Cv2.Line(srcImg, pt1, pt2, color, 1); //矩形单个对角线相,两点
//Cv2.DrawContours(srcImg, contours, maxIndex, color, 2, LineTypes.Link8, hierarchy);
//toImg(this.pictureBox1, srcImg);
//return cutImage(srcImg, rect.X, rect.Y, rect.Width, rect.Height);
return cutImage(srcImg, rect.X, 0, rect.Width, rect.Height);
//
//建立轮廓接受数组
//Point[][] contours;
//HierarchyIndex[] hierarchy;
//Cv2.FindContours(binary, out contours, out hierarchy, RetrievalModes.External, ContourApproximationModes.ApproxNone);
//最小外接矩形接收数组
//RotatedRect[] rotateRect = new RotatedRect[contours.Length];
//Point[][] contours_poly = new Point[contours.Length][];
//int maxPointCount = 0, index = -1;
//for (int x = 0; x < contours.Length; x++)
//{
// if (maxPointCount < contours[x].Length)
// {
// maxPointCount = contours[x].Length;
// index = x;
// }
//}
}
catch (Exception ex)
{
return null;
}
}
#region
/// <summary>
/// 获取最大内接矩形(高度使用原图值未裁剪)
/// </summary>
/// <param name="srcImg"></param>
/// <returns></returns>
public static Mat getMaxInsetRect(Mat srcImg, double thresh = 45, double maxval = 255)
{
API.OutputDebugString("--------start:"+DateTime.Now.ToString("mm:ss fff"));
var dst = new Mat();
//转灰度
Cv2.CvtColor(srcImg, dst, ColorConversionCodes.RGB2GRAY);
API.OutputDebugString("--------转灰度:" + DateTime.Now.ToString("mm:ss fff"));
//转化黑白二值图 thresh阀值
//double thresh = 50;//小于此值(超小超是所选黑色越多)转为maxval色
//double maxval = 255;//上面值转为255白色
Cv2.Threshold(dst, dst, thresh, maxval, ThresholdTypes.Binary);
API.OutputDebugString("--------黑白二值图:" + DateTime.Now.ToString("mm:ss fff"));
//取轮廓
Cv2.FindContours(dst, out var contours, out var hierarchy, RetrievalModes.External, ContourApproximationModes.ApproxSimple);
int maxIndex = 0, maxLength = 0;
for (int i = 0; i < contours.Length; i++)
{
if (contours[i].Length > maxLength)
{
maxLength = contours[i].Length;
maxIndex = i;
}
}
API.OutputDebugString("--------取全部轮廓:" + DateTime.Now.ToString("mm:ss fff"));
List<List<Point>> approxContours = new List<List<Point>>();
//先求出多边形的近似轮廓,减少轮廓数量,方便后面计算
var approxContour = Cv2.ApproxPolyDP(contours[maxIndex], 20, true);
API.OutputDebugString("--------减少轮廓数量:" + DateTime.Now.ToString("mm:ss fff"));
approxContours.Add(approxContour.ToList());
//绘制边缘
//DrawContour(srcImg, approxContour, Scalar.Red, 20);
//return srcImg;
Rect rect = GetMaxInscribedRect(srcImg, approxContour.ToList());
API.OutputDebugString("--------取最大内切矩形:" + DateTime.Now.ToString("mm:ss fff"));
var result= cutImage(srcImg, rect.X, 0, rect.Width, srcImg.Height);
API.OutputDebugString("--------裁剪完成:" + DateTime.Now.ToString("mm:ss fff"));
return result;
}
public static Mat getMaxInsetRect2(Mat mat_rgb,bool isLeft,int marginHoleWidth,out int marginWidth)
{
int bian = 3500;
Rect Roi;
if (!isLeft)
Roi = new Rect(mat_rgb.Width - bian, 0, bian, mat_rgb.Height);
else
Roi = new Rect(0, 0, bian, mat_rgb.Height);
int type = isLeft ? 1 : 0;
int len = EdgeClipping3(mat_rgb, type, Roi, isLeft);
#if false
//Mat mat_rgb = new Mat("E:\\CPL\\测试代码\\边缘检测\\test\\test\\test\\img\\19.bmp");
Mat image_gray = new Mat();
Cv2.CvtColor(mat_rgb, image_gray, ColorConversionCodes.BGR2GRAY);
//cvtColor(image_RGB, image, COLOR_RGB2GRAY);
int height = image_gray.Rows;
int width = image_gray.Cols;
// 算法定义取均分5段图片的五条横线经过一系列处理之后二值化找到沿边位置然后取均值作为直边在缩进一段有针眼的位置
// 定义每段的行数
int num_rows = 5;
int segment_height = height / num_rows - 1;
// 定义空数组保存结果
int[] total = new int[num_rows];
// 平均截取5行数据并处理图像
for (int i = 0; i < num_rows; i++)
{
// 截取当前行的图像
int start_row = i * segment_height;
Rect roi = new Rect(0, start_row, width, 1);
Mat current_segment = image_gray.Clone(roi);
// 对当前行的图像进行平滑处理
Mat smoothed_image = new Mat();
Cv2.GaussianBlur(current_segment, smoothed_image, new Size(5, 1), 0);
// 计算当前行的灰度直方图
Mat absolute_histo = new Mat();
Cv2.CalcHist(new Mat[] { smoothed_image }, new int[] { 0 }, new Mat(), absolute_histo, 1, new int[] { 256 }, new Rangef[] { new Rangef(0, 256) });
Cv2.GaussianBlur(current_segment, smoothed_image, new Size(19, 1), 0);
// 对图片进行分割i+1
//double otsu_threshold;
//threshold(smoothed_image, smoothed_image, 0, 255, THRESH_BINARY + THRESH_OTSU, &otsu_threshold);
Cv2.Threshold(smoothed_image, smoothed_image, 0, 255, ThresholdTypes.Binary | ThresholdTypes.Otsu);
// 使用形态学操作进行孔洞填充
Mat kernel = Cv2.GetStructuringElement(MorphShapes.Rect, new Size(25, 1));
Mat filled_image = new Mat();
Cv2.MorphologyEx(smoothed_image, filled_image, MorphTypes.Close, kernel);
// 取较长的一个值作为皮革的宽度
int num_255 = Cv2.CountNonZero(filled_image);
int length_t = (num_255 > width / 2) ? num_255 : width - num_255;
total[i] = (length_t);
API.OutputDebugString($"getMaxInsetRect2: 【{i + 1}】{length_t}={num_255}|{width}");
}
// 取平均值作为宽度
int length = (int)total.Average();
marginWidth = width-length;
#endif
int length = (len > mat_rgb.Width / 2) ? len : mat_rgb.Width - len;
marginWidth = mat_rgb.Width - length;
// 判断数据是否异常,判断当前线段的宽度是否大于设定像素的偏差
//int abnormal_pxl = 200;
//for (int i = 0; i < num_rows; i++)
//{
// if (Math.Abs(total[i] - length) > abnormal_pxl)
// throw new Exception("数据异常,当段图片的宽度有问题!");
//}
//右侧相机拍摄产品边缘位于右侧判断缩进100像素去点针眼
//Cv2.Line(mat_rgb, new Point(length - 100, 0), new Point(length - 100, height), new Scalar(255, 0, 0), 20);
////左侧相机拍摄产品边缘位于左侧判断缩进100像素去点针眼
//Cv2.Line(mat_rgb, new Point(width - length + 100, 0), new Point(width - length + 100, height), new Scalar(0, 255, 0), 20);
//int decWidth = width - length + marginHoleWidth;
//if (isLeft)
// return cutImage(mat_rgb, decWidth, 0, width- decWidth, height);
//else
// return cutImage(mat_rgb, 0, 0, width - decWidth, height);
API.OutputDebugString($"getMaxInsetRect2:margin={marginWidth}length={length}({marginHoleWidth}),isLeft={isLeft},mat_rgb={mat_rgb.Width}*{mat_rgb.Height},w={length - marginHoleWidth},h={mat_rgb.Height}");
if (isLeft)
return cutImage(mat_rgb, mat_rgb.Width - length + marginHoleWidth, 0, length - marginHoleWidth, mat_rgb.Height);
else
return cutImage(mat_rgb, 0, 0, length - marginHoleWidth, mat_rgb.Height);
//if (isLeft)
// return cutImage(mat_rgb, length + marginHoleWidth, 0, length - marginHoleWidth, mat_rgb.Height);
//else
// return cutImage(mat_rgb, 0, 0, length - marginHoleWidth, mat_rgb.Height);
}
/// <summary>
///
/// </summary>
/// <param name="image">图片</param>
/// <param name="FindType">0:从左往右找边1:从右往左找边</param>
/// <param name="Roi">寻找区域</param>
/// <returns></returns>
public static int EdgeClipping(Mat image, int FindType, Rect Roi)
{
DateTimeOffset startTime = DateTimeOffset.Now;
Mat mat_rgb = image.Clone(Roi);
int height = mat_rgb.Rows;
int width = mat_rgb.Cols;
int sf = 10; //缩放比例
int pix = 5; //获取均值区域长宽像素
int pointNum = 15; //获取找遍点数
//按比例缩放
int sf_height = height / sf;
int sf_width = width / sf;
Cv2.Resize(mat_rgb, mat_rgb, new Size(sf_width, sf_height), 0, 0, InterpolationFlags.Linear);
Mat himg = new Mat();
himg = mat_rgb.Clone();
DateTimeOffset endTime = DateTimeOffset.Now;
Console.WriteLine("图片缩小(ms): " + (endTime - startTime).TotalMilliseconds.ToString("0.000"));
startTime = DateTimeOffset.Now;
//滤过去除多余噪声
//Cv2.EdgePreservingFilter(himg, himg, EdgePreservingMethods.RecursFilter);
//Cv2.PyrMeanShiftFiltering(himg, himg, 10, 500, 3);
Cv2.PyrMeanShiftFiltering(himg, himg, 1, 2, 1);
//himg.ImWrite("himg.jpg");
endTime = DateTimeOffset.Now;
Console.WriteLine("滤过去除多余噪声(ms): " + (endTime - startTime).TotalMilliseconds.ToString("0.000"));
startTime = DateTimeOffset.Now;
//转灰度图
Mat image_gray = new Mat();
Cv2.CvtColor(himg, image_gray, ColorConversionCodes.BGR2GRAY);
//image_gray.ImWrite("image_gray.jpg");
//二值化
Mat image_Otsu = new Mat();
int hDis = sf_height / (pointNum + 2); //去除边缘两点
#if false
List<double> LeftAvg = new List<double>();
List<double> RightAvg = new List<double>();
//double thb = Cv2.Threshold(image_gray, image_Otsu, 0, 255, ThresholdTypes.Binary | ThresholdTypes.Otsu);
#region
for (int i = 0; i < pointNum; i++)
{
Rect roiLeft = new Rect(0, hDis + hDis * i, pix, pix);
Mat current_segmentL = image_gray.Clone(roiLeft);
//Scalar ttr = current_segmentL.Mean();
LeftAvg.Add(current_segmentL.Mean().Val0);
Rect roiRight = new Rect(sf_width - pix, hDis + hDis * i, pix, pix);
Mat current_segmentR = image_gray.Clone(roiRight);
RightAvg.Add(current_segmentR.Mean().Val0);
}
double thres = (RightAvg.Average() + LeftAvg.Average())/2;
#endregion
#else
double min, max;
image_gray.MinMaxLoc(out min, out max);
double thres = (min + max) / 2;
#endif
//Cv2.Threshold(image_gray, image_Otsu, 0, 255, ThresholdTypes.Otsu);
double thb = Cv2.Threshold(image_gray, image_Otsu, thres, 255, ThresholdTypes.Binary);
//image_Otsu.ImWrite("Otsu1.jpg");
endTime = DateTimeOffset.Now;
Console.WriteLine("灰度图二值化(ms): " + (endTime - startTime).TotalMilliseconds.ToString("0.000"));
startTime = DateTimeOffset.Now;
// 定义空数组保存结果
int[] total = new int[pointNum];
List<int> total_t = new List<int>();
bool isLeft = FindType == 0 ? true : false;
// 平均截取pointNum行数据并处理图像
for (int i = 0; i < pointNum; i++)
{
// 截取当前行的图像
Rect roi = new Rect(0, hDis + hDis * i, sf_width, 1);
Mat current_segment = image_Otsu.Clone(roi);
#if false
#region
// 对当前行的图像进行平滑处理
Mat smoothed_image2 = new Mat();
Cv2.GaussianBlur(current_segment, smoothed_image2, new Size(5, 1), 0);
// 计算当前行的灰度直方图
Mat absolute_histo2 = new Mat();
Cv2.CalcHist(new Mat[] { smoothed_image2 }, new int[] { 0 }, new Mat(), absolute_histo2, 1, new int[] { 256 }, new Rangef[] { new Rangef(0, 256) });
Cv2.GaussianBlur(current_segment, smoothed_image2, new Size(9, 1), 0);
// 对图片进行分割
//double otsu_threshold;
//threshold(smoothed_image, smoothed_image, 0, 255, THRESH_BINARY + THRESH_OTSU, &otsu_threshold);
double otsu_threshold2 = Cv2.Threshold(smoothed_image2, smoothed_image2, 0, 255, ThresholdTypes.Binary | ThresholdTypes.Otsu);
// 使用形态学操作进行孔洞填充
Mat kernel3 = Cv2.GetStructuringElement(MorphShapes.Rect, new Size(5, 1));
Mat filled_image3 = new Mat();
Cv2.MorphologyEx(smoothed_image2, filled_image3, MorphTypes.Close, kernel3);
#endregion
#else
Mat filled_image3 = current_segment.Clone();
#endif
#if true
//从左到右判断边和从右到左判断边
int numX = 0;
byte tempVal = 0;
if (isLeft)
{
tempVal = filled_image3.At<byte>(0, 0);
for (int j = 0; j < filled_image3.Cols; j++)
{
if (filled_image3.At<byte>(0, j) != tempVal)
{
numX = j;
break;
}
}
}
else
{
tempVal = filled_image3.At<byte>(0, filled_image3.Cols - 1);
for (int j = filled_image3.Cols - 1; j >= 0; j--)
{
if (filled_image3.At<byte>(0, j) != tempVal)
{
numX = j;
break;
}
}
}
#else
int numX = Cv2.CountNonZero(filled_image3);
#endif
//int length_t = (numX > (sf_width / 2)) ? numX :sf_width - numX;
int length_t = numX;
total[i] = (length_t);
if (length_t > 0)
total_t.Add(length_t);
}
// 取平均值作为宽度
int length = (int)total_t.Average();
endTime = DateTimeOffset.Now;
Console.WriteLine("计算边(ms): " + (endTime - startTime).TotalMilliseconds.ToString("0.000"));
// 判断数据是否异常,判断当前线段的宽度是否大于设定像素的偏差
//int abnormal_pxl = 100 / 4;
//for (int i = 0; i < pointNum; i++)
//{
// if (Math.Abs(total[i] - length) > abnormal_pxl)
// Console.WriteLine("数据异常!");
// //出现数据异常,当段图片的宽度有问题
//}
//乘上换算系数还原
length = length * sf + Roi.X;
return length;
}
public static int EdgeClipping2(Mat image, int FindType, Rect Roi, bool IsLeft)
{
DateTimeOffset startTime = DateTimeOffset.Now;
Mat mat_rgb = image.Clone(Roi);
int height = mat_rgb.Rows;
int width = mat_rgb.Cols;
int sf = 10; //缩放比例
int pix = 5; //获取均值区域长宽像素
int pointNum = 15; //获取找遍点数
int offsetGray = 5; //二值化偏差
//按比例缩放
int sf_height = height / sf;
int sf_width = width / sf;
Cv2.Resize(mat_rgb, mat_rgb, new Size(sf_width, sf_height), 0, 0, InterpolationFlags.Linear);
Mat himg = new Mat();
himg = mat_rgb.Clone();
DateTimeOffset endTime = DateTimeOffset.Now;
Console.WriteLine("图片缩小(ms): " + (endTime - startTime).TotalMilliseconds.ToString("0.000"));
startTime = DateTimeOffset.Now;
//滤过去除多余噪声
//Cv2.EdgePreservingFilter(himg, himg, EdgePreservingMethods.NormconvFilter);
//Cv2.PyrMeanShiftFiltering(himg, himg, 1, 2, 1);
Cv2.PyrMeanShiftFiltering(himg, himg, 10, 17, 2);
//himg.ImWrite("himg.jpg");
endTime = DateTimeOffset.Now;
Console.WriteLine("滤过去除多余噪声(ms): " + (endTime - startTime).TotalMilliseconds.ToString("0.000"));
startTime = DateTimeOffset.Now;
//转灰度图
Mat image_gray = new Mat();
Cv2.CvtColor(himg, image_gray, ColorConversionCodes.BGR2GRAY);
//image_gray.ImWrite("image_gray.jpg");
Mat image_Canny = new Mat();
Cv2.Canny(image_gray, image_Canny, 32, 64);
//image_Canny.ImWrite("image_Canny.jpg");
//二值化
Mat image_Otsu = new Mat();
int hDis = sf_height / (pointNum + 2); //去除边缘两点
#if false //二值算法
List<double> LeftAvg = new List<double>();
List<double> RightAvg = new List<double>();
//double thb = Cv2.Threshold(image_gray, image_Otsu, 0, 255, ThresholdTypes.Binary | ThresholdTypes.Otsu);
#region
for (int i = 0; i < pointNum; i++)
{
Rect roiLeft = new Rect(0, hDis + hDis * i, pix, pix);
Mat current_segmentL = image_gray.Clone(roiLeft);
//Scalar ttr = current_segmentL.Mean();
LeftAvg.Add(current_segmentL.Mean().Val0);
Rect roiRight = new Rect(sf_width - pix, hDis + hDis * i, pix, pix);
Mat current_segmentR = image_gray.Clone(roiRight);
RightAvg.Add(current_segmentR.Mean().Val0);
}
double thres = 0;
if (IsLeft)
{
if (LeftAvg.Average() > RightAvg.Average())
thres = RightAvg.Max() + offsetGray;
else
thres = RightAvg.Min() - offsetGray;
}
else
{
if (LeftAvg.Average() > RightAvg.Average())
thres = LeftAvg.Min() - offsetGray;
else
thres = LeftAvg.Max() + offsetGray;
}
//double thres = (RightAvg.Average() + )/2;
#endregion
#endif
#if false
double min, max;
image_gray.MinMaxLoc(out min, out max);
double thres = (min + max) / 2;
#endif
#if false //二值化图片
//Cv2.Threshold(image_gray, image_Otsu, 0, 255, ThresholdTypes.Otsu);
double thb = Cv2.Threshold(image_gray, image_Otsu, thres, 255, ThresholdTypes.Binary);
image_Otsu.ImWrite("Otsu1.jpg");
Cv2.MedianBlur(image_Otsu, image_Otsu, 21);
image_Otsu.ImWrite("Otsu2.jpg");
endTime = DateTimeOffset.Now;
Console.WriteLine("灰度图二值化(ms): " + (endTime - startTime).TotalMilliseconds.ToString("0.000"));
startTime = DateTimeOffset.Now;
#else
image_Otsu = image_Canny;
#endif
// 定义空数组保存结果
int[] total = new int[pointNum];
List<int> total_t = new List<int>();
bool isLeft = FindType == 0 ? true : false;
// 平均截取pointNum行数据并处理图像
for (int i = 0; i < pointNum; i++)
{
// 截取当前行的图像
Rect roi = new Rect(0, hDis + hDis * i, sf_width, 1);
Mat current_segment = image_Otsu.Clone(roi);
#if false
#region
// 对当前行的图像进行平滑处理
Mat smoothed_image2 = new Mat();
Cv2.GaussianBlur(current_segment, smoothed_image2, new Size(5, 1), 0);
// 计算当前行的灰度直方图
Mat absolute_histo2 = new Mat();
Cv2.CalcHist(new Mat[] { smoothed_image2 }, new int[] { 0 }, new Mat(), absolute_histo2, 1, new int[] { 256 }, new Rangef[] { new Rangef(0, 256) });
Cv2.GaussianBlur(current_segment, smoothed_image2, new Size(9, 1), 0);
// 对图片进行分割
//double otsu_threshold;
//threshold(smoothed_image, smoothed_image, 0, 255, THRESH_BINARY + THRESH_OTSU, &otsu_threshold);
double otsu_threshold2 = Cv2.Threshold(smoothed_image2, smoothed_image2, 0, 255, ThresholdTypes.Binary | ThresholdTypes.Otsu);
// 使用形态学操作进行孔洞填充
Mat kernel3 = Cv2.GetStructuringElement(MorphShapes.Rect, new Size(5, 1));
Mat filled_image3 = new Mat();
Cv2.MorphologyEx(smoothed_image2, filled_image3, MorphTypes.Close, kernel3);
#endregion
#else
//Mat filled_image3 = current_segment.Clone();
Mat filled_image3 = current_segment;
#endif
#if true
//从左到右判断边和从右到左判断边
int numX = 0;
byte tempVal = 0;
if (isLeft)
{
tempVal = filled_image3.At<byte>(0, 0);
for (int j = 0; j < filled_image3.Cols; j++)
{
if (filled_image3.At<byte>(0, j) != tempVal)
{
numX = j;
break;
}
}
}
else
{
tempVal = filled_image3.At<byte>(0, filled_image3.Cols - 1);
for (int j = filled_image3.Cols - 1; j >= 0; j--)
{
if (filled_image3.At<byte>(0, j) != tempVal)
{
numX = j;
break;
}
}
}
#else
int numX = Cv2.CountNonZero(filled_image3);
#endif
//int length_t = (numX > (sf_width / 2)) ? numX :sf_width - numX;
int length_t = numX;
total[i] = (length_t);
if (length_t > 0)
total_t.Add(length_t);
}
// 取平均值作为宽度
int length = (int)total_t.Average();
endTime = DateTimeOffset.Now;
Console.WriteLine("计算边(ms): " + (endTime - startTime).TotalMilliseconds.ToString("0.000"));
// 判断数据是否异常,判断当前线段的宽度是否大于设定像素的偏差
//int abnormal_pxl = 100 / 4;
//for (int i = 0; i < pointNum; i++)
//{
// if (Math.Abs(total[i] - length) > abnormal_pxl)
// Console.WriteLine("数据异常!");
// //出现数据异常,当段图片的宽度有问题
//}
//乘上换算系数还原
length = length * sf + Roi.X;
return length;
}
private static Rect GetMaxInscribedRect(Mat src, List<Point> contour)
{
//根据轮廓让点与下一个点之间形成一个矩形,然后让每个矩形都与当前所有矩形相交,求出相交的矩形,
//再把这些矩形所有的角放到一个集合里,筛选出在轮廓内并且非重复的点,
//最后让这些点两两组合成一个矩形,判断是否为内部矩形,算出面积,找出最大内接矩形。
//比如一共4个点第1个与第2个形成矩形矩形1第1与第3矩形2
//第1与第4矩形3第2与第3矩形4第2与第4矩形5第3与第4矩形6
//由于矩形1为第一个元素没有相交矩形所以直接放入allPoint中
//接着把矩形2的四个角以及矩形2和矩形1相交矩形的四个角放入allPoint中
//矩形3以此类推其本身四个角以及和矩形1相交矩形的四个角以及和矩形2相交矩形的四个角
Rect maxInscribedRect = new Rect();
List<Rect> allRect = new List<Rect>();
List<Point> allPoint = new List<Point>(contour);
//根据轮廓让点与下一个点之间形成一个矩形
for (int i = 0; i < contour.Count; i++)
{
for (int j = i + 1; j < contour.Count; j++)
{
var p1 = contour[i];
var p2 = contour[j];
if (p1.Y == p2.Y || p1.X == p2.X)
continue;
var tempRect = FromTowPoint(p1, p2);
allPoint.AddRange(GetAllCorner(tempRect));
//让每个矩形都与当前所有矩形相交,求出相交的矩形,再把这些矩形所有的角放到一个集合里
foreach (var rect in allRect)
{
var intersectR = tempRect.Intersect(rect);
if (intersectR != Rect.Empty)
allPoint.AddRange(GetAllCorner(intersectR));
}
allRect.Add(tempRect);
}
}
//去除重复的点,再让这些点两两组合成一个矩形,判断是否为内部矩形,算出面积,找出最大内接矩形
List<Point> distinctPoints = allPoint.Distinct().ToList();
for (int i = 0; i < distinctPoints.Count; i++)
{
for (int j = i + 1; j < distinctPoints.Count; j++)
{
var tempRect = FromTowPoint(distinctPoints[i], distinctPoints[j]);
//只要矩形包含一个轮廓内的点,就不算多边形的内部矩形;只要轮廓不包含该矩形,该矩形就不算多边形的内部矩形
if (!ContainPoints(contour, GetAllCorner(tempRect)) || ContainsAnyPt(tempRect, contour))
continue;
//src.Rectangle(tempRect, Scalar.RandomColor(), 2);
if (tempRect.Width * tempRect.Height > maxInscribedRect.Width * maxInscribedRect.Height)
maxInscribedRect = tempRect;
}
}
//src.Rectangle(maxInscribedRect, Scalar.Yellow, 2);
return maxInscribedRect == Rect.Empty ? Cv2.BoundingRect(contour) : maxInscribedRect;
}
public static Point[] GetAllCorner(Rect rect)
{
Point[] result = new Point[4];
result[0] = rect.Location;
result[1] = new Point(rect.X + rect.Width, rect.Y);
result[2] = rect.BottomRight;
result[3] = new Point(rect.X, rect.Y + rect.Height);
return result;
}
private static bool ContainPoint(List<Point> contour, Point p1)
{
return Cv2.PointPolygonTest(contour, p1, false) > 0;
}
private static bool ContainPoints(List<Point> contour, IEnumerable<Point> points)
{
foreach (var point in points)
{
if (Cv2.PointPolygonTest(contour, point, false) < 0)
return false;
}
return true;
}
private static void DrawContour(Mat mat, Point[] contour, Scalar color, int thickness)
{
for (int i = 0; i < contour.Length; i++)
{
if (i + 1 < contour.Length)
Cv2.Line(mat, contour[i], contour[i + 1], color, thickness);
}
}
/// <summary>
/// 是否有任意一个点集合中的点包含在矩形内,在矩形边界上不算包含
/// </summary>
/// <param name="rect"></param>
/// <param name="points"></param>
/// <returns></returns>
public static bool ContainsAnyPt(Rect rect, IEnumerable<Point> points)
{
foreach (var point in points)
{
if (point.X > rect.X && point.X < rect.X + rect.Width && point.Y < rect.BottomRight.Y && point.Y > rect.Y)
return true;
}
return false;
}
/// <summary>
/// 用任意两点组成一个矩形
/// </summary>
/// <param name="p1"></param>
/// <param name="p2"></param>
/// <returns></returns>
public static Rect FromTowPoint(Point p1, Point p2)
{
if (p1.X == p2.X || p1.Y == p2.Y)
return Rect.Empty;
if (p1.X > p2.X && p1.Y < p2.Y)
{
(p1, p2) = (p2, p1);
}
else if (p1.X > p2.X && p1.Y > p2.Y)
{
(p1.X, p2.X) = (p2.X, p1.X);
}
else if (p1.X < p2.X && p1.Y < p2.Y)
{
(p1.Y, p2.Y) = (p2.Y, p1.Y);
}
return Rect.FromLTRB(p1.X, p2.Y, p2.X, p1.Y);
}
#endregion
public static Mat CannyOperator(Mat srcImg, double threshold1 = 100, double threshold2 = 200)
{
var dst = new Mat();// srcImg.Rows, srcImg.Cols,MatType.CV_8UC1);
//转灰度
Cv2.CvtColor(srcImg, dst, ColorConversionCodes.RGB2GRAY);
//滤波
Cv2.Blur(dst, dst, new OpenCvSharp.Size(2, 2));
//double threshold1 = 255, threshold2 = 0;
Cv2.Canny(srcImg, dst, threshold1, threshold2);
//Cv2.ImShow("dst", dst);
return dst;
}
public static Mat LaplacianOperator(Mat srcImg, double threshold1 = 10, double threshold2 = 255)
{
Mat LaplacianImg = new Mat();
Mat gussImage = new Mat();
//高斯滤波: 每个像素点的值都由本身与和邻近区域的其他像素值经过加权平均后得到,加权系数越靠近中心越大,越远离中心越小
/* src:输入图像
dst:输出图像
ksize:高斯核的大小。ksize。宽度和高度可以不同但它们都必须是正的和奇数的。或者它们可以是0然后用sigma来计算
sigmaX:表示高斯核在X轴方向的标准偏差
sigmaY :表示高斯核在Y轴方向的标准偏差值,如果sigmaY 为0,则sigmaY =sigmaX,如果两个sigma都为零则用ksize计算
borderType :一般用默认值
*/
Cv2.GaussianBlur(srcImg, gussImage, new OpenCvSharp.Size(3, 3), 0, 0, BorderTypes.Default);
Mat grayImage = new Mat();
Cv2.CvtColor(gussImage, grayImage, ColorConversionCodes.RGB2GRAY); //灰度图
//Laplacian运算 计算二阶导数
/*src 源图像
dst 输出图像将具有与src相同的大小和相同数量的通道
ddepth 目标图像的所需深度 默认填 -1与源图一致
ksize 用于计算二阶导数滤波器的孔径大小,卷积核大小,奇数
scale 计算的拉普拉斯值的可选缩放因子(默认情况下不应用缩放)
delta 可选的增量值在将结果存储到dst之前添加到结果中
borderType 边缘处理方法
*/
Cv2.Laplacian(grayImage, LaplacianImg, -1, 3); //参数1源图像2输出图像3目标图像的所需深度 默认填 -1与源图一致4用于计算二阶导数滤波器的卷积核大小需奇数。
//阈值操作:可根据灰度的差异来分割图像
/* src:输入图像
dst:输出图像
thresh:阈值
maxval:阈值最大
type:阈值类型,详解见下
Binary:阈值二值化(大于阈值的让它等于最大值,小于的等于最小值)
BinaryInv:阈值反二值化(二值化阈值相反,大于阈值为最小值,小于阈值为最大值)
Trunc:截断(大于阈值的就等于阈值,小的不变)
ToZero:阈值归零(当大于阈值的不变,小于阈值的归零)
ToZeroIv:阈值归零取反(与阈值取零相反,大于时为最小值,小于时保持不变)
*/
Mat dst = new Mat();
Cv2.Threshold(LaplacianImg, dst, threshold1, threshold2, ThresholdTypes.Binary);
return dst;
}
//Sobel算子主要用来检测离散微分边缘算子,Sobel算子对噪声灰常敏感,一般需要先把图片进行高斯降噪
public static Mat SobelOperator(Mat src_img, double threshold1 = 10, double threshold2 = 250)
{
Mat dst = new Mat();
//高斯滤波
Cv2.GaussianBlur(src_img, dst, new OpenCvSharp.Size(3, 3), 0, 0, BorderTypes.Default);
Mat grayImage = new Mat();
Cv2.CvtColor(dst, grayImage, ColorConversionCodes.BGR2GRAY); //转换为灰度图
Mat X = new Mat();
Mat Y = new Mat();
/*src:输入图像
dst:输出图像
ddepth:输出图像深度
xorder:X方向的差分阶数
yorder:Y方向的差分阶数
ksize :表示Sobel核大小,只能为奇数
scale: 计算导数值时候的缩放因子,默认为1
delta :表示存入目标图前可选的delta值
borderType :边界模式,一般为默认
*/
Cv2.Sobel(grayImage, X, MatType.CV_16S, 1, 0, 3); //Sobel边缘查找参数1输入2输出X方向梯度图像3输出图像的深度4X方向几阶导数5Y方向几阶导数6卷积核大小必须为奇数。
Cv2.Sobel(grayImage, Y, MatType.CV_16S, 0, 1, 3); //输出Y方向梯度图像
#region 1
int width = X.Cols;
int hight = Y.Rows;
Mat output = new Mat(X.Size(), X.Type());
for (int x = 0; x < hight; x++) //合并X和Y,G= (Gx*Gx +Gy*Gy)的开平方根
{
for (int y = 0; y < width; y++)
{
int xg = X.At<byte>(x, y); //获取像素点的值
int yg = Y.At<byte>(x, y);
double v1 = Math.Pow(xg, 2); //平方
double v2 = Math.Pow(yg, 2);
int val = (int)Math.Sqrt(v1 + v2); //开平方根
if (val > 255) //确保像素值在 0至255之间
{
val = 255;
}
if (val < 0)
{
val = 0;
}
byte xy = (byte)val;
output.Set<byte>(x, y, xy); //为图像设置像素值
}
}
Mat tmp = new Mat(output.Size(), MatType.CV_8UC1);
#endregion
#region 2API实现X梯度+Y梯度
//Mat Abs_X = new Mat();
//Mat Abs_Y = new Mat();
//Mat Result = new Mat();
//Cv2.ConvertScaleAbs(X, Abs_X, 1.0);//缩放,计算绝对值并将结果转换为8位。
//Cv2.ConvertScaleAbs(Y, Abs_Y, 1.0);//缩放,计算绝对值并将结果转换为8位。
//Cv2.AddWeighted(Abs_X, 0.5, Abs_Y, 0.5, 0, Result);//以不同的权重将两幅图片叠加
#endregion
//阈值
Mat result = new Mat();
Cv2.Threshold(tmp, result, threshold1, threshold2, ThresholdTypes.Binary);
return result;
}
//Scharr算子是对Sobel算子的优化特别在核为3*3时
public static Mat ScharrOperator(Mat srcImg, double threshold1 = 10, double threshold2 = 250)
{
Mat dst = new Mat();
Cv2.GaussianBlur(srcImg, dst, new OpenCvSharp.Size(3, 3), 0, 0, BorderTypes.Default);
Mat grayImage = new Mat();
Cv2.CvtColor(dst, grayImage, ColorConversionCodes.BGR2GRAY); //转换为灰度图
Mat grad_x = new Mat();
Mat grad_x2 = new Mat();
Mat grad_y = new Mat();
Mat grad_y2 = new Mat();
Cv2.Scharr(grayImage, grad_x, MatType.CV_16S, 1, 0);
Cv2.Scharr(grayImage, grad_y, MatType.CV_16S, 0, 1);
Cv2.ConvertScaleAbs(grad_x, grad_x2);
Cv2.ConvertScaleAbs(grad_y, grad_y2);
Mat result = new Mat();
Cv2.AddWeighted(grad_x2, 0.5, grad_y2, 0.5, 0, result);
//阈值
Cv2.Threshold(result, result, threshold1, threshold2, ThresholdTypes.Binary);
//Cv2.ImShow("Scharr", result);
return result;
}
#region MatToHalcon
public static void MatToHObject(Mat imgMat, out HObject imgHOject)
{
int ImageWidth = imgMat.Width;
int ImageHeight = imgMat.Height;
int channel = imgMat.Channels();
long size = ImageWidth * ImageHeight * channel;
int col_byte_num = ImageWidth * channel;
byte[] rgbValues = new byte[size];
unsafe
{
for (int i = 0; i < ImageHeight; i++)
{
IntPtr c = imgMat.Ptr(i);
// 一行一行将mat 像素复制到byte[]
Marshal.Copy(c, rgbValues, i * col_byte_num, col_byte_num);
}
void* p;
IntPtr ptr;
fixed (byte* pc = rgbValues)
{
p = (void*)pc;
ptr = new IntPtr(p);
}
if (channel == 1)
{
HOperatorSet.GenImage1(out imgHOject, "byte", ImageWidth, ImageHeight, ptr);
}
else
{
HOperatorSet.GenImageInterleaved(out imgHOject, ptr, "bgr", ImageWidth, ImageHeight, 0, "byte", 0, 0, 0, 0, -1, 0);
}
}
}
#if false
/// <summary>
/// 把OpenCV图像转换到Halcon图像
/// </summary>
/// <param name="mImage">OpenCV图像_Mat</param>
/// <returns>Halcon图像_HObject</returns>
public HObject MatToHImage(Mat mImage)
{
try
{
HObject hImage;
int matChannels = 0; // 通道数
Type matType = null;
int width, height; // 宽,高
width = height = 0; // 宽,高初始化
// 获取通道数
matChannels = mImage.Channels();
if (matChannels == 0)
{
return null;
}
if (matChannels == 1) // 单通道
{
IntPtr ptr; // 灰度图通道
Mat[] mats = mImage.Split();
// 改自Mat.GetImagePointer1(mImage, out ptr, out matType, out width, out height); // ptr=2157902018096 cType=byte width=830 height=822
ptr = mats[0].Data; // 取灰度图值
matType = mImage.GetType(); // byte
height = mImage.Rows; // 高
width = mImage.Cols; // 宽
// 改自hImage = new HObject(new OpenCvSharp.Size(width, height), MatType.CV_8UC1, new Scalar(0));
byte[] dataGrayScaleImage = new byte[width * height]; //Mat dataGrayScaleImage = new Mat(new OpenCvSharp.Size(width, height), MatType.CV_8UC1);
unsafe
{
fixed (byte* ptrdata = dataGrayScaleImage)
{
#region
//for (int i = 0; i < height; i++)
//{
// CopyMemory((IntPtr)(ptrdata + width * i), new IntPtr((long)ptr + width * i), width);
//}
#endregion
CopyMemory((IntPtr)ptrdata, new IntPtr((long)ptr), width * height);
HOperatorSet.GenImage1(out hImage, "byte", width, height, (IntPtr) ptrdata);
}
}
return hImage;
}
else if (matChannels == 3) // 三通道
{
IntPtr ptrRed; // R通道图
IntPtr ptrGreen; // G通道图
IntPtr ptrBlue; // B通道图
Mat[] mats = mImage.Split();
ptrRed = mats[0].Data; // 取R通道值
ptrGreen = mats[1].Data; // 取G通道值
ptrBlue = mats[2].Data; // 取B通道值
matType = mImage.GetType(); // 类型
height = mImage.Rows; // 高
width = mImage.Cols; // 宽
// 改自hImage = new HObject(new OpenCvSharp.Size(width, height), MatType.CV_8UC1, new Scalar(0));
byte[] dataRed = new byte[width * height]; //Mat dataGrayScaleImage = new Mat(new OpenCvSharp.Size(width, height), MatType.CV_8UC1);
byte[] dataGreen = new byte[width * height];
byte[] dataBlue = new byte[width * height];
unsafe
{
fixed (byte* ptrdataRed = dataRed, ptrdataGreen = dataGreen, ptrdataBlue = dataBlue)
{
#region
//HImage himg = new HImage("byte", width, height, (IntPtr)ptrdataRed);
//for (int i = 0; i < height; i++)
//{
// CopyMemory((IntPtr)(ptrdataRed + width * i), new IntPtr((long)ptrRed + width * i), width);
// CopyMemory((IntPtr)(ptrdataGreen + width * i), new IntPtr((long)ptrGreen + width * i), width);
// CopyMemory((IntPtr)(ptrdataBlue + width * i), new IntPtr((long)ptrBlue + width * i), width);
//}
#endregion
CopyMemory((IntPtr)ptrdataRed, new IntPtr((long)ptrRed), width * height); // 复制R通道
CopyMemory((IntPtr)ptrdataGreen, new IntPtr((long)ptrGreen), width * height); // 复制G通道
CopyMemory((IntPtr)ptrdataBlue, new IntPtr((long)ptrBlue), width * height); // 复制B通道
HOperatorSet.GenImage3(out hImage, "byte", width, height, (IntPtr)ptrdataRed, (IntPtr)ptrdataGreen, (IntPtr)ptrdataBlue); // 合成
}
}
return hImage;
}
else
{
return null;
}
}
catch (Exception ex)
{
throw ex;
}
}
#endif
#endregion
}
}