diff --git a/图像识别CV/pythoncv_learn/detection_chessboard_v2.py b/图像识别CV/pythoncv_learn/detection_chessboard_v2.py new file mode 100644 index 0000000..20a0910 --- /dev/null +++ b/图像识别CV/pythoncv_learn/detection_chessboard_v2.py @@ -0,0 +1,387 @@ +import cv2 +import numpy as np +import math + +img = cv2.imread('10069.jpg') + +img_shape = img.shape +length = img_shape[0] +height = img_shape[1] + +img = cv2.resize(img, (800, int(800 * length / height))) + +hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV) + +lower_skin = np.array([14,43,46]) +upper_skin = np.array([30,255,255]) + +mask = cv2.inRange(hsv,lower_skin,upper_skin) +image = cv2.bitwise_and(img,img,mask=mask) + +grayImage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) +cv2.imshow('grayImage', grayImage) + +edgeImage = grayImage + +# 直方图均衡 +edgeImage = cv2.equalizeHist(edgeImage) +cv2.imshow('equalizeHist', edgeImage) + +''' +# 自适应直方图均衡 +clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8)) +edgeImage = clahe.apply(edgeImage) +cv2.imshow('createCLAHE', edgeImage) +''' + +# 二值化 +h, w = edgeImage.shape[:2] +m = np.reshape(edgeImage, [1, w*h]) +mean = m.sum()/(w*h) +# binary = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,25,10) +ret, binary = cv2.threshold(edgeImage, mean, 255, cv2.THRESH_BINARY) +image_binary = 255 - binary +cv2.imshow('image_binary', image_binary) + +''' +# 双边滤波 +edgeImage = cv2.bilateralFilter(edgeImage,9,75,75) +cv2.imshow('bilateralFilter', edgeImage) +''' + +''' +# 中位模糊 +edgeImage = cv2.medianBlur(edgeImage,5) +cv2.imshow('medianBlur', edgeImage) +''' + +# 高斯模糊 +edgeImage = cv2.GaussianBlur(grayImage, (3, 3), 1) +cv2.imshow('GaussianBlur', edgeImage) + +''' +# 平均 +edgeImage = cv2.blur(edgeImage,(5,5)) +cv2.imshow('blur', edgeImage) +''' + +# 边缘检测 +canny = cv2.Canny(edgeImage, 20, 80) +cv2.imshow('Canny', canny) + +kernel = np.ones((5,5),np.uint8) +dilate = cv2.dilate(canny,kernel,iterations = 1) +cv2.imshow('dilate', dilate) + +''' +# 轮廓检测 +ret, thresh = cv2.threshold(edgeImage, 127, 255, 0) +contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) + +def cnt_area(cnt): + area = cv2.contourArea(cnt) + return area +contours.sort(key = cnt_area, reverse=True) +print(len(contours)) +for i in range(0, 1): + cnt = contours[i] + img = image.copy() + cv2.drawContours(img, [cnt], 0, (0,255,0), 3) + cv2.imshow('drawContours_' + str(i), img) +cv2.drawContours(image, contours, -1, (0,255,0), 1) +cv2.imshow('drawContours', image) +''' + +laplacian = cv2.Laplacian(edgeImage,cv2.CV_8U) +# laplacian = cv2.Laplacian(edgeImage,cv2.CV_64F) +cv2.imshow('laplacian', laplacian) + +# 二值化 +h, w = laplacian.shape[:2] +m = np.reshape(laplacian, [1, w*h]) +mean = m.sum()/(w*h) +# binary = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,25,10) +ret, laplacian_binary = cv2.threshold(laplacian, mean, 255, cv2.THRESH_BINARY) +cv2.imshow('laplacian_binary', laplacian_binary) + +sobelx = edgeImage +for i in range(0, 6): # 3,6,8有奇效(3: HoughLinesP检测竖线, 6: HoughLines检测竖线, 8:检测横线) + sobelx = cv2.Sobel(sobelx,cv2.CV_8U,1,0,ksize=5) + # sobelx = cv2.Sobel(edgeImage,cv2.CV_64F,1,0,ksize=5) +cv2.imshow('sobelx', sobelx) + +sobely = edgeImage +for i in range(0, 6): # 3,6,8有奇效(3: HoughLinesP检测横线, 6: HoughLines检测横线, 8:检测竖线) + sobely = cv2.Sobel(sobely,cv2.CV_8U,0,1,ksize=5) + # sobely = cv2.Sobel(edgeImage,cv2.CV_64F,0,1,ksize=5) +cv2.imshow('sobely', sobely) + +binary_1 = cv2.bitwise_and(dilate, laplacian_binary) +cv2.imshow('binary_1', binary_1) + +sobelx_binary = cv2.bitwise_and(sobelx, binary_1) +cv2.imshow('sobelx_binary', sobelx_binary) + +sobely_binary = cv2.bitwise_and(sobely, binary_1) +cv2.imshow('sobely_binary', sobely_binary) + +# lineXImage = sobelx_binary +# lineYImage = sobely_binary +lineXImage = binary_1 +lineYImage = binary_1 + +''' +# 侵蚀 +kernel = np.ones((5,5),np.uint8) +edgeImage = cv2.erode(edgeImage,kernel,iterations = 1) +cv2.imshow('erode', edgeImage) +''' + +''' +# 扩张 +kernel = np.ones((5,5),np.uint8) +edgeImage = cv2.dilate(edgeImage,kernel,iterations = 1) +cv2.imshow('dilate', edgeImage) +''' + +''' +# 开运算 +kernel = np.ones((5,5),np.uint8) +edgeImage = cv2.morphologyEx(edgeImage, cv2.MORPH_OPEN, kernel) +cv2.imshow('morphologyEx', edgeImage) +''' + +''' +# 闭运算 +kernel = np.ones((5,5),np.uint8) +morphologyEx = cv2.morphologyEx(edgeImage, cv2.MORPH_CLOSE, kernel) +cv2.imshow('morphologyEx', morphologyEx) +''' + +''' +# 形态学梯度 +kernel = np.ones((5,5),np.uint8) +edgeImage = cv2.morphologyEx(edgeImage, cv2.MORPH_GRADIENT, kernel) +cv2.imshow('morphologyEx', edgeImage) +''' + +''' +# 顶帽 +kernel = np.ones((5,5),np.uint8) +edgeImage = cv2.morphologyEx(edgeImage, cv2.MORPH_TOPHAT, kernel) +cv2.imshow('morphologyEx', edgeImage) +''' + +''' +# 黑帽 +kernel = np.ones((5,5),np.uint8) +edgeImage = cv2.morphologyEx(edgeImage, cv2.MORPH_BLACKHAT, kernel) +cv2.imshow('morphologyEx', edgeImage) +''' + +''' +# 锐化 +cv2.imshow('edgeImage + edgeImage', edgeImage + edgeImage) +''' + +def isVerticalLine(line): + x1,y1,x2,y2 = line[0] + + if (x2-x1) == 0 : + x_angle = 90 + else: + xielv = (y2-y1)/(x2-x1) + x_angle = xielv * (180 / math.pi) + x_angle = x_angle % 180 + + if x_angle > 67.5 and x_angle < 112.5 : + return True + else: + return False + +def isHorizontalLine(line): + x1,y1,x2,y2 = line[0] + + if (x2-x1) == 0 : + x_angle = 90 + else: + xielv = (y2-y1)/(x2-x1) + x_angle = xielv * (180 / math.pi) + x_angle = x_angle % 180 + + if x_angle < 22.5 or x_angle > 157.5 : + return True + else: + return False + +def findHoughEdge(edgeImage, isLinesP=True): + if isLinesP: + lines = findLinesP(edgeImage) + else: + lines = findLines(edgeImage) + + if lines is None: + return [[0, 1, 5, 21], [8, 1, 5, 3], [4, 12, 0, 8], [41, 11, 20, 15]] + + lines_vertical = filter(lambda line: True if abs(line[0][1] - line[0][3]) > edgeImage.shape[0] * 0.5 else False, lines) + lines_horizontal = filter(lambda line: True if abs(line[0][0] - line[0][2]) > edgeImage.shape[1] * 0.5 else False, lines) + + # lines_vertical = filter(isVerticalLine, lines_vertical) + # lines_horizontal = filter(isHorizontalLine, lines_horizontal) + + lines_vertical = sorted(lines_vertical, key=lambda line: line[0][0]) + lines_horizontal = sorted(lines_horizontal , key=lambda line: line[0][1]) + + if len(lines_vertical) < 2 or len(lines_horizontal) < 2: + return [[0, 1, 5, 21], [8, 1, 5, 3], [4, 12, 0, 8], [41, 11, 20, 15]] + return [lines_vertical[0][0], lines_horizontal [0][0], lines_vertical[-1][0], lines_horizontal [-1][0]] + +def writeHoughEdge(edgeImage, title): + lines = findHoughEdge(edgeImage, False) + houghEdgeImage = image.copy() + for line in lines: + x1,y1,x2,y2 = line + cv2.line(houghEdgeImage,(x1,y1),(x2,y2),(0,0,255),2) + + cv2.imshow('houghEdgeImage' + title, houghEdgeImage) + +def writeHoughEdgeP(edgeImage, title): + lines = findHoughEdge(edgeImage, True) + writeHoughEdgeP = image.copy() + for line in lines: + x1,y1,x2,y2 = line + cv2.line(writeHoughEdgeP,(x1,y1),(x2,y2),(255,0,0),2) + + cv2.imshow('writeHoughEdgeP' + title, writeHoughEdgeP) + +def findLines(edgeImage): + lines = cv2.HoughLines(edgeImage,1,np.pi/180,200) + ok_lines = [] + for line in lines: + rho,theta = line[0] + + if (theta < (np.pi/4. )) or (theta > (3.*np.pi/4.0)): #垂直直线 + pt1 = (int(rho/np.cos(theta)),0) #该直线与第一行的交点 + #该直线与最后一行的焦点 + pt2 = (int((rho-edgeImage.shape[0]*np.sin(theta))/np.cos(theta)),edgeImage.shape[0]) + else: #水平直线 + pt1 = (0,int(rho/np.sin(theta))) # 该直线与第一列的交点 + #该直线与最后一列的交点 + pt2 = (edgeImage.shape[1], int((rho-edgeImage.shape[1]*np.cos(theta))/np.sin(theta))) + + x1, y1 = pt1 + x2, y2 = pt2 + + if (x2-x1) == 0 : + x_angle = 90 + else: + xielv = (y2-y1)/(x2-x1) + x_angle = xielv * (180 / math.pi) + x_angle = x_angle % 180 + + if (x_angle < 22.5 or x_angle > 157.5) or (x_angle > 67.5 and x_angle < 112.5) : + ok_lines.append([[x1,y1,x2,y2]]) + return ok_lines + +def findLinesP(edgeImage): + thresh_min = min(edgeImage.shape) + lines = cv2.HoughLinesP(edgeImage, 1.2, np.pi / 180, 160, minLineLength=int(edgeImage.shape[0] * 0.7),maxLineGap=int(thresh_min * 0.5)) + ok_lines = [] + for line in lines: + x1,y1,x2,y2 = line[0] + + if (x2-x1) == 0 : + x_angle = 90 + else: + xielv = (y2-y1)/(x2-x1) + x_angle = xielv * (180 / math.pi) + x_angle = x_angle % 180 + + if (x_angle < 22.5 or x_angle > 157.5) or (x_angle > 67.5 and x_angle < 112.5) : + ok_lines.append([[x1,y1,x2,y2]]) + return ok_lines + +def writeLines(edgeImage, title): + lines = findLines(edgeImage) + houghEdgeImage = image.copy() + for line in lines: + x1,y1,x2,y2 = line[0] + cv2.line(houghEdgeImage,(x1,y1),(x2,y2),(0,0,255),2) + cv2.imshow('findLines' + title, houghEdgeImage) + +def writeLinesP(edgeImage, title): + lines = findLinesP(edgeImage) + houghEdgeImage = image.copy() + for line in lines: + x1,y1,x2,y2 = line[0] + cv2.line(houghEdgeImage,(x1,y1),(x2,y2),(255,0,0),2) + cv2.imshow('findLinesP' + title, houghEdgeImage) + +def __clac_intersection(line_a, line_b): + x1_a, y1_a, x2_a, y2_a = line_a + x1_b, y1_b, x2_b, y2_b = line_b + A_a = y2_a - y1_a + B_a = x1_a - x2_a + C_a = x2_a * y1_a - x1_a * y2_a + A_b = y2_b - y1_b + B_b = x1_b - x2_b + C_b = x2_b * y1_b - x1_b * y2_b + m = A_a * B_b - A_b * B_a + output_x = (C_b * B_a - C_a * B_b) / m + output_y = (C_a * A_b - C_b * A_a) / m + return (int(output_x), int(output_y)) + +writeLines(lineXImage, 'X') +writeLines(lineYImage, 'Y') +writeLinesP(lineXImage, 'X') +writeLinesP(lineYImage, 'Y') + +# writeHoughEdge(lineXImage, 'X') +# writeHoughEdge(lineYImage, 'Y') +# writeHoughEdgeP(lineXImage, 'X') +# writeHoughEdgeP(lineYImage, 'Y') + +linesX = findHoughEdge(lineXImage, False) +linesY = findHoughEdge(lineYImage, False) + +houghEdgeImage = image.copy() +cv2.line(houghEdgeImage,(linesX[0][0],linesX[0][1]),(linesX[0][2],linesX[0][3]),(255,0,0),2) +cv2.line(houghEdgeImage,(linesX[2][0],linesX[2][1]),(linesX[2][2],linesX[2][3]),(0,255,0),2) +cv2.line(houghEdgeImage,(linesY[1][0],linesY[1][1]),(linesY[1][2],linesY[1][3]),(0,0,255),2) +cv2.line(houghEdgeImage,(linesY[3][0],linesY[3][1]),(linesY[3][2],linesY[3][3]),(255,255,0),2) +cv2.imshow('houghEdge', houghEdgeImage) + +p1 = __clac_intersection((linesX[0][0],linesX[0][1], linesX[0][2],linesX[0][3]), (linesY[1][0],linesY[1][1], linesY[1][2],linesY[1][3])) +p2 = __clac_intersection((linesY[1][0],linesY[1][1], linesY[1][2],linesY[1][3]), (linesX[2][0],linesX[2][1], linesX[2][2],linesX[2][3])) +p3 = __clac_intersection((linesX[2][0],linesX[2][1], linesX[2][2],linesX[2][3]), (linesY[3][0],linesY[3][1], linesY[3][2],linesY[3][3])) +p4 = __clac_intersection((linesY[3][0],linesY[3][1], linesY[3][2],linesY[3][3]), (linesX[0][0],linesX[0][1], linesX[0][2],linesX[0][3])) + +print((p1, p2, p3, p4)) + +warpPerspectiveImage = image.copy() +pts1 = np.float32([p1,p2,p4,p3]) +pts2 = np.float32([[0,0],[800,0],[0,800],[800,800]]) +M = cv2.getPerspectiveTransform(pts1,pts2) +dst = cv2.warpPerspective(warpPerspectiveImage,M,(800,800)) +cv2.imshow('warpPerspective', dst) + +''' +circles = cv2.HoughCircles(edgeImage,cv2.HOUGH_GRADIENT,1,20,param1=50,param2=30,minRadius=0,maxRadius=0) +circles = np.uint16(np.around(circles)) +for i in circles[0,:]: + # 绘制外圆 + cv2.circle(image,(i[0],i[1]),i[2],(0,255,0),2) + # 绘制圆心 + cv2.circle(image,(i[0],i[1]),2,(0,0,255),3) +''' + +''' +pts1 = np.float32([[56,65],[368,52],[28,387],[389,390]]) +pts2 = np.float32([[0,0],[300,0],[0,300],[300,300]]) +M = cv2.getPerspectiveTransform(pts1, pts2) +image = cv2.warpPerspective(image, M, (600, 600)) +cv2.imshow('show', image) +''' + +cv2.waitKey(0) +cv2.destroyAllWindows() \ No newline at end of file diff --git a/图像识别CV/pythoncv_learn/detection_chessboard_v3.py b/图像识别CV/pythoncv_learn/detection_chessboard_v3.py new file mode 100644 index 0000000..ad6ac8f --- /dev/null +++ b/图像识别CV/pythoncv_learn/detection_chessboard_v3.py @@ -0,0 +1,193 @@ +import cv2 +import numpy as np +import math +from matplotlib import pyplot as plt + +img = cv2.imread('10069.jpg') + +img_shape = img.shape +length = img_shape[0] +height = img_shape[1] + +img = cv2.resize(img, (800, int(800 * length / height))) + +hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV) + +lower_skin = np.array([14,43,46]) +upper_skin = np.array([30,255,255]) + +mask = cv2.inRange(hsv,lower_skin,upper_skin) +img = cv2.bitwise_and(img,img,mask=mask) + +img_shape = img.shape +img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) +dst = img_gray.copy() + +# 获取图像的长宽 +length = img_shape[0] +height = img_shape[1] +midlength = length / 2 +midheight = height / 2 +quarterL = length / 3 +quarterT = height / 3 +sigmaX = 2*(quarterL**2) +sigmaY = 2*(quarterT**2) + +# 设置kernel +kernel = np.ones((5, 5), np.int16) +kernel[2][2] = -24 + +# 设置卷积核 +kernel2 = np.ones((5, 5), np.int16) +kernel3 = np.ones((3, 3), np.int16) +dst = cv2.morphologyEx(img_gray, cv2.MORPH_OPEN, kernel2) + +# 平滑 +# 高斯 +dst = cv2.blur(dst, (5, 5)) +# dst = cv2.GaussianBlur(dst, (5, 5), 0) + +# 锐化 +blur = cv2.Laplacian(dst, cv2.CV_16S, ksize=3) +dst = cv2.convertScaleAbs(blur) + +# 线性滤波,低通滤波 +dst = cv2.filter2D(img_gray, -1, kernel) + +# 平滑(双边滤波) +# 双边滤波 +dst = cv2.bilateralFilter(dst, 9, 100, 100) + +# 在二值化前进行膨胀,以增强线段 +# 膨胀导致线段筛选困难 +# dst = cv2.dilate(dst, kernel3) + +# 二值化 +ret, dst = cv2.threshold(dst, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU) + +# Hough变换前的高斯赋值 +# 从图像中央到边缘,逐渐降低权重 +for i in range(length): + for j in range(height): + dst[i][j] = dst[i][j] * math.exp((i-midlength)**2/sigmaX + + (j-midheight)**2/sigmaY) + +# Hough变换 +lines = cv2.HoughLines(dst, 1, np.pi/180, 250) + +houghLinesImage = img.copy() +print(len(lines)) +for line in lines: + rho,theta = line[0] + a = np.cos(theta) + b = np.sin(theta) + x0 = a*rho + y0 = b*rho + x1 = int(x0 + 1000*(-b)) + y1 = int(y0 + 1000*(a)) + x2 = int(x0 - 1000*(-b)) + y2 = int(y0 - 1000*(a)) + if (x2-x1) == 0 : + xielv = (y2-y1)/0.00000001 + else: + xielv = (y2-y1)/(x2-x1) + # if (xielv > -0.08 and xielv < 0.08) or xielv > 8.0 or xielv < -8.0 : + # print(xielv) + cv2.line(houghLinesImage,(x1,y1),(x2,y2),(0,0,255),2) +cv2.imshow('houghLinesImage', houghLinesImage) + + + +# 利用方差进行平行线的判断 +# 近邻生长法 +# Hough变换 +# 首先找到水平的范围,上边界为rho最大,theta最小,下边界为rho最小,theta最大(<90) +ThetaMIN2 = 180 +ThetaMIN = 180 +ThetaMAX = -1 +ThetaMAX2 = -1 +RhoMAX = -1 +RhoMAX2 = -1 +RhoMIN = 1000 +RhoMIN2 = 1000 +up = 0 +bot = 0 +up2 = 0 +bot2 = 0 + +for i in range(int(lines[0].size/2)): + # 遍历全部点集 + r, t = lines[0][i] + print(lines[0][i]) + t = t / np.pi*180 + if t < 90: + # theta < 90,属于水平范围 + if t > ThetaMAX: + ThetaMAX = t + up = i + if r > RhoMAX: + RhoMAX = r + bot = i + else: + if t > ThetaMAX2: + ThetaMAX2 = t + up2 = i + if t < ThetaMIN2: + ThetaMIN2 = t + bot2 = i + +result = [up, bot, up2, bot2] +print(result) +# Hough变换 +xtheta = [] +yrho = [] +for rho, theta in lines[0]: + a = np.cos(theta) + b = np.sin(theta) + x0 = a*rho + y0 = b*rho + x1 = int(x0 + 1000*(-b)) + y1 = int(y0 + 1000*(a)) + x2 = int(x0 - 1000*(-b)) + y2 = int(y0 - 1000*(a)) + # if theta < 0.5: + xtheta.append(theta / np.pi * 180) + yrho.append(rho) + cv2.line(img, (x1, y1), (x2, y2), (255, 255, 0), 2) + +print(result) +for i in result: + rho = lines[0][i][0] + theta = lines[0][i][1] + a = np.cos(theta) + b = np.sin(theta) + x0 = a*rho + y0 = b*rho + x1 = int(x0 + 1000*(-b)) + y1 = int(y0 + 1000*(a)) + x2 = int(x0 - 1000*(-b)) + y2 = int(y0 - 1000*(a)) + cv2.line(img, (x1, y1), (x2, y2), (255, 0, 0), 10) + +# 计算最有可能的两条best + +# 显示 +# plt.subplot(1, 2, 1), plt.imshow(dst, 'gray') + +plt.subplot(1, 3, 1), plt.imshow(dst, 'gray') +plt.title('test') +plt.xticks([]), plt.yticks([]) + +plt.subplot(1, 3, 2), plt.imshow(img_gray, 'gray') +plt.title('ori') +plt.xticks([]), plt.yticks([]) + +# plt.subplot(1, 4, 3), plt.scatter(xtheta, yrho, c='b', marker='o') +# plt.title('Hough') +# plt.xlabel('X'), plt.ylabel('Y') + +plt.subplot(1, 3, 3), plt.imshow(img) +plt.title('Lines') +plt.xticks([]), plt.yticks([]) + +plt.show() diff --git a/图像识别CV/pythoncv_learn/detection_chessboard_v4.py b/图像识别CV/pythoncv_learn/detection_chessboard_v4.py new file mode 100644 index 0000000..53db7cb --- /dev/null +++ b/图像识别CV/pythoncv_learn/detection_chessboard_v4.py @@ -0,0 +1,214 @@ +import cv2 +import numpy as np +import math +from matplotlib import pyplot as plt + +img = cv2.imread('10076.jpg') + +img_shape = img.shape +length = img_shape[0] +height = img_shape[1] + +if length < height: + img = cv2.resize(img, (800, int(800 * length / height))) +else: + img = cv2.resize(img, (int(800 * height / length), 800)) + +# 边缘保护滤波 +img = cv2.edgePreservingFilter(img) +cv2.imshow('edgePreservingFilter', img) + +hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV) + +lower_skin = np.array([11,43,46]) +upper_skin = np.array([34,255,255]) + +mask = cv2.inRange(hsv,lower_skin,upper_skin) +img = cv2.bitwise_and(img,img,mask=mask) + +img_shape = img.shape +img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) +dst = img_gray.copy() + +# 获取图像的长宽 +length = img_shape[0] +height = img_shape[1] +midlength = length / 2 +midheight = height / 2 +quarterL = length / 3 +quarterT = height / 3 +sigmaX = 2*(quarterL**2) +sigmaY = 2*(quarterT**2) + +# 设置kernel +kernel = np.ones((5, 5), np.int16) +kernel[2][2] = -24 + +# 设置卷积核 +kernel2 = np.ones((5, 5), np.int16) +kernel3 = np.ones((3, 3), np.int16) +dst = cv2.morphologyEx(img_gray, cv2.MORPH_OPEN, kernel2) + +# 平滑 +# 高斯 +dst = cv2.blur(dst, (5, 5)) +# dst = cv2.GaussianBlur(dst, (5, 5), 0) + +# 锐化 +blur = cv2.Laplacian(dst, cv2.CV_16S, ksize=3) +dst = cv2.convertScaleAbs(blur) + +# 线性滤波,低通滤波 +dst = cv2.filter2D(img_gray, -1, kernel) + +# 平滑(双边滤波) +# 双边滤波 +dst = cv2.bilateralFilter(dst, 9, 100, 100) + +# 在二值化前进行膨胀,以增强线段 +# 膨胀导致线段筛选困难 +# dst = cv2.dilate(dst, kernel3) + +# 二值化 +ret, dst = cv2.threshold(dst, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU) + +# Hough变换前的高斯赋值 +# 从图像中央到边缘,逐渐降低权重 +''' +for i in range(length): + for j in range(height): + dst[i][j] = dst[i][j] * math.exp((i-midlength)**2/sigmaX + + (j-midheight)**2/sigmaY) +''' + +''' +# Hough变换 +lines = cv2.HoughLines(dst, 1, np.pi/180, 250) + +houghLinesImage = img.copy() +print(len(lines)) +for line in lines: + rho,theta = line[0] + a = np.cos(theta) + b = np.sin(theta) + x0 = a*rho + y0 = b*rho + x1 = int(x0 + 1000*(-b)) + y1 = int(y0 + 1000*(a)) + x2 = int(x0 - 1000*(-b)) + y2 = int(y0 - 1000*(a)) + if (x2-x1) == 0 : + xielv = (y2-y1)/0.00000001 + else: + xielv = (y2-y1)/(x2-x1) + # if (xielv > -0.08 and xielv < 0.08) or xielv > 8.0 or xielv < -8.0 : + # print(xielv) + cv2.line(houghLinesImage,(x1,y1),(x2,y2),(0,0,255),2) +cv2.imshow('houghLinesImage', houghLinesImage) +''' + +# Hough变换 +thresh_min = min(dst.shape) +lines = cv2.HoughLinesP(dst, 1, np.pi / 180, 200, minLineLength=int(dst.shape[0] * 0.47), maxLineGap=int(thresh_min * 0.5)) + +houghLinesImage = img.copy() +for line in lines: + x1,y1,x2,y2 = line[0] + cv2.line(houghLinesImage,(x1,y1),(x2,y2),(0,0,255),2) +cv2.imshow('houghLinesImage', houghLinesImage) + +# 利用方差进行平行线的判断 +# 近邻生长法 +# Hough变换 +# 首先找到水平的范围,上边界为rho最大,theta最小,下边界为rho最小,theta最大(<90) +ThetaMIN2 = 180 +ThetaMIN = 180 +ThetaMAX = -1 +ThetaMAX2 = -1 +RhoMAX = -1 +RhoMAX2 = -1 +RhoMIN = 1000 +RhoMIN2 = 1000 +up = 0 +bot = 0 +up2 = 0 +bot2 = 0 + +''' +for i in range(int(lines[0].size/2)): + # 遍历全部点集 + r, t = lines[0][i] + print(lines[0][i]) + t = t / np.pi*180 + if t < 90: + # theta < 90,属于水平范围 + if t > ThetaMAX: + ThetaMAX = t + up = i + if r > RhoMAX: + RhoMAX = r + bot = i + else: + if t > ThetaMAX2: + ThetaMAX2 = t + up2 = i + if t < ThetaMIN2: + ThetaMIN2 = t + bot2 = i + +result = [up, bot, up2, bot2] +print(result) +# Hough变换 +xtheta = [] +yrho = [] +for rho, theta in lines[0]: + a = np.cos(theta) + b = np.sin(theta) + x0 = a*rho + y0 = b*rho + x1 = int(x0 + 1000*(-b)) + y1 = int(y0 + 1000*(a)) + x2 = int(x0 - 1000*(-b)) + y2 = int(y0 - 1000*(a)) + # if theta < 0.5: + xtheta.append(theta / np.pi * 180) + yrho.append(rho) + cv2.line(img, (x1, y1), (x2, y2), (255, 255, 0), 2) + +print(result) +for i in result: + rho = lines[0][i][0] + theta = lines[0][i][1] + a = np.cos(theta) + b = np.sin(theta) + x0 = a*rho + y0 = b*rho + x1 = int(x0 + 1000*(-b)) + y1 = int(y0 + 1000*(a)) + x2 = int(x0 - 1000*(-b)) + y2 = int(y0 - 1000*(a)) + cv2.line(img, (x1, y1), (x2, y2), (255, 0, 0), 10) +''' + +# 计算最有可能的两条best + +# 显示 +# plt.subplot(1, 2, 1), plt.imshow(dst, 'gray') + +plt.subplot(1, 3, 1), plt.imshow(dst, 'gray') +plt.title('test') +plt.xticks([]), plt.yticks([]) + +plt.subplot(1, 3, 2), plt.imshow(img_gray, 'gray') +plt.title('ori') +plt.xticks([]), plt.yticks([]) + +# plt.subplot(1, 4, 3), plt.scatter(xtheta, yrho, c='b', marker='o') +# plt.title('Hough') +# plt.xlabel('X'), plt.ylabel('Y') + +plt.subplot(1, 3, 3), plt.imshow(img) +plt.title('Lines') +plt.xticks([]), plt.yticks([]) + +plt.show() diff --git a/图像识别CV/pythoncv_learn/image_cutting_v5.py b/图像识别CV/pythoncv_learn/image_cutting_v5.py index 4f59a1b..8ec957e 100644 --- a/图像识别CV/pythoncv_learn/image_cutting_v5.py +++ b/图像识别CV/pythoncv_learn/image_cutting_v5.py @@ -149,7 +149,7 @@ def findLine(edgeImage, title): cv2.imshow('HoughLinesP' + title, houghLinesPImage) if __name__ == '__main__': - image = cv2.imread('10007.jpg', cv2.IMREAD_COLOR) + image = cv2.imread('10076.jpg', cv2.IMREAD_COLOR) # image = cv2.imread('10008.jpg', cv2.IMREAD_GRAYSCALE) #如果本身是彩图 似乎也没什么影响! @@ -164,6 +164,7 @@ if __name__ == '__main__': threshold = 1 labels = k_means(image_s, k, threshold) + ''' labels = (labels * (255 / k)).astype(np.uint8) labels = cv2.resize(labels, (800, 800)) @@ -180,13 +181,38 @@ if __name__ == '__main__': cv2.imshow('image_binary', image_binary) findLine(image_binary, "line") + ''' + + labels_5 = (labels / 5).astype(np.uint8) + labels_4_5 = (labels / 4).astype(np.uint8) + labels_4 = labels_4_5 - labels_5 + labels_3_4_5 = (labels / 3).astype(np.uint8) + labels_3 = labels_3_4_5 - labels_4_5 + labels_2_3_4_5 = (labels / 2).astype(np.uint8) + labels_2 = labels_2_3_4_5 - labels_3_4_5 - labels_4_5 + labels_1 = labels - labels_2 * 2 - labels_3 *3 - labels_4 * 4 - labels_5 * 5 - plt.subplot(1, 2, 1) + plt.subplot(1, 7, 1) plt.title("Soucre Image") plt.imshow(image,cmap="gray") - plt.subplot(1, 2, 2) + plt.subplot(1, 7, 2) plt.title("Segamenting Image with k-means\n" + "k=" + str(k) + " threshold=" + str(threshold)) plt.imshow(labels) + plt.subplot(1, 7, 3) + plt.title("labels_1") + plt.imshow(labels_1) + plt.subplot(1, 7, 4) + plt.title("labels_2") + plt.imshow(labels_2) + plt.subplot(1, 7, 5) + plt.title("labels_3") + plt.imshow(labels_3) + plt.subplot(1, 7, 6) + plt.title("labels_4") + plt.imshow(labels_4) + plt.subplot(1, 7, 7) + plt.title("labels_5") + plt.imshow(labels_5) plt.show() cv2.waitKey(0) diff --git a/图像识别CV/pythoncv_learn/image_cutting_v6.py b/图像识别CV/pythoncv_learn/image_cutting_v6.py new file mode 100644 index 0000000..640d97e --- /dev/null +++ b/图像识别CV/pythoncv_learn/image_cutting_v6.py @@ -0,0 +1,58 @@ +import cv2 +import numpy as np + +def main(): + img = cv2.imread('10076.jpg') + + img_shape = img.shape + length = img_shape[0] + height = img_shape[1] + + if length < height: + img = cv2.resize(img, (800, int(800 * length / height))) + else: + img = cv2.resize(img, (int(800 * height / length), 800)) + + # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + skinMask = HSVBin(img) + contours = getContours(skinMask) + cv2.drawContours(img,contours,-1,(0,255,0),2) + cv2.imshow('capture',img) + k = cv2.waitKey(0) + +def getContours(img): + kernel = np.ones((5,5),np.uint8) + closed = cv2.morphologyEx(img,cv2.MORPH_OPEN,kernel) + closed = cv2.morphologyEx(closed,cv2.MORPH_CLOSE,kernel) + contours,h = cv2.findContours(closed,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + vaildContours = [] + for cont in contours: + if cv2.contourArea(cont)>9000: + #x,y,w,h = cv2.boundingRect(cont) + #if h/w >0.75: + #filter face failed + vaildContours.append(cv2.convexHull(cont)) + #rect = cv2.minAreaRect(cont) + #box = cv2.cv.BoxPoint(rect) + #vaildContours.append(np.int0(box)) + return vaildContours + +def HSVBin(img): + hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV) + # cv2.imshow('hsv',hsv) + + ''' + lower_skin = np.array([11,43,46]) + upper_skin = np.array([34,255,255]) + ''' + lower_skin = np.array([14,43,46]) + upper_skin = np.array([30,255,255]) + + mask = cv2.inRange(hsv,lower_skin,upper_skin) + # cv2.imshow('mask',mask) + res = cv2.bitwise_and(img,img,mask=mask) + cv2.imshow('res',res) + return mask + +if __name__ =='__main__': + main() diff --git a/图像识别CV/pythoncv_learn/pic1.png b/图像识别CV/pythoncv_learn/pic1.png new file mode 100644 index 0000000..2463d49 Binary files /dev/null and b/图像识别CV/pythoncv_learn/pic1.png differ diff --git a/图像识别CV/pythoncv_learn/pic2.jpg b/图像识别CV/pythoncv_learn/pic2.jpg new file mode 100644 index 0000000..ed16f9a Binary files /dev/null and b/图像识别CV/pythoncv_learn/pic2.jpg differ diff --git a/图像识别CV/pythoncv_learn/pic3.jpg b/图像识别CV/pythoncv_learn/pic3.jpg new file mode 100644 index 0000000..344eb4f Binary files /dev/null and b/图像识别CV/pythoncv_learn/pic3.jpg differ diff --git a/图像识别CV/pythoncv_learn/pic4.jpg b/图像识别CV/pythoncv_learn/pic4.jpg new file mode 100644 index 0000000..a3f81df Binary files /dev/null and b/图像识别CV/pythoncv_learn/pic4.jpg differ diff --git a/图像识别CV/pythoncv_learn/pic5.png b/图像识别CV/pythoncv_learn/pic5.png new file mode 100644 index 0000000..ec5b2c6 Binary files /dev/null and b/图像识别CV/pythoncv_learn/pic5.png differ diff --git a/图像识别CV/pythoncv_learn/srcImage.jpg b/图像识别CV/pythoncv_learn/srcImage.jpg new file mode 100644 index 0000000..dd6177d Binary files /dev/null and b/图像识别CV/pythoncv_learn/srcImage.jpg differ diff --git a/图像识别CV/pythoncv_learn/test.jpg b/图像识别CV/pythoncv_learn/test.jpg new file mode 100644 index 0000000..8ebcff5 Binary files /dev/null and b/图像识别CV/pythoncv_learn/test.jpg differ