Browse Source

pythoncv_learn上传

master
lipeng 4 years ago
parent
commit
518bd9b511
35 changed files with 1869 additions and 0 deletions
  1. BIN
      图像识别CV/pythoncv_learn/10007.jpg
  2. BIN
      图像识别CV/pythoncv_learn/10008.jpg
  3. BIN
      图像识别CV/pythoncv_learn/10009.jpg
  4. BIN
      图像识别CV/pythoncv_learn/10044.jpg
  5. BIN
      图像识别CV/pythoncv_learn/10049.jpg
  6. BIN
      图像识别CV/pythoncv_learn/10061.jpg
  7. BIN
      图像识别CV/pythoncv_learn/10069.jpg
  8. BIN
      图像识别CV/pythoncv_learn/10076.jpg
  9. +53
    -0
      图像识别CV/pythoncv_learn/FastFeatureDetector.py
  10. +372
    -0
      图像识别CV/pythoncv_learn/detection_chessboard_v1.py
  11. +55
    -0
      图像识别CV/pythoncv_learn/image_cutting_v1.py
  12. +26
    -0
      图像识别CV/pythoncv_learn/image_cutting_v2.py
  13. +33
    -0
      图像识别CV/pythoncv_learn/image_cutting_v3.py
  14. +133
    -0
      图像识别CV/pythoncv_learn/image_cutting_v4.py
  15. +193
    -0
      图像识别CV/pythoncv_learn/image_cutting_v5.py
  16. +111
    -0
      图像识别CV/pythoncv_learn/main.py
  17. +209
    -0
      图像识别CV/pythoncv_learn/main_v2.py
  18. +220
    -0
      图像识别CV/pythoncv_learn/main_v3.py
  19. +232
    -0
      图像识别CV/pythoncv_learn/main_v4.py
  20. +232
    -0
      图像识别CV/pythoncv_learn/main_v5.py
  21. BIN
      图像识别CV/pythoncv_learn/present_center.npz
  22. BIN
      图像识别CV/pythoncv_learn/围棋识别过程参考/01.gif
  23. BIN
      图像识别CV/pythoncv_learn/围棋识别过程参考/02.gif
  24. BIN
      图像识别CV/pythoncv_learn/围棋识别过程参考/03.gif
  25. BIN
      图像识别CV/pythoncv_learn/围棋识别过程参考/04.gif
  26. BIN
      图像识别CV/pythoncv_learn/围棋识别过程参考/05.gif
  27. BIN
      图像识别CV/pythoncv_learn/围棋识别过程参考/06.gif
  28. BIN
      图像识别CV/pythoncv_learn/围棋识别过程参考/07.gif
  29. BIN
      图像识别CV/pythoncv_learn/围棋识别过程参考/08.gif
  30. BIN
      图像识别CV/pythoncv_learn/围棋识别过程参考/09.gif
  31. BIN
      图像识别CV/pythoncv_learn/围棋识别过程参考/10.gif
  32. BIN
      图像识别CV/pythoncv_learn/围棋识别过程参考/11.gif
  33. BIN
      图像识别CV/pythoncv_learn/围棋识别过程参考/12.gif
  34. BIN
      图像识别CV/pythoncv_learn/围棋识别过程参考/13.gif
  35. BIN
      图像识别CV/pythoncv_learn/围棋识别过程参考/围棋识别过程参考.gif

BIN
图像识别CV/pythoncv_learn/10007.jpg View File

Before After
Width: 1920  |  Height: 1440  |  Size: 330 kB

BIN
图像识别CV/pythoncv_learn/10008.jpg View File

Before After
Width: 1920  |  Height: 1440  |  Size: 296 kB

BIN
图像识别CV/pythoncv_learn/10009.jpg View File

Before After
Width: 560  |  Height: 560  |  Size: 131 kB

BIN
图像识别CV/pythoncv_learn/10044.jpg View File

Before After
Width: 640  |  Height: 640  |  Size: 49 kB

BIN
图像识别CV/pythoncv_learn/10049.jpg View File

Before After
Width: 550  |  Height: 369  |  Size: 69 kB

BIN
图像识别CV/pythoncv_learn/10061.jpg View File

Before After
Width: 960  |  Height: 720  |  Size: 88 kB

BIN
图像识别CV/pythoncv_learn/10069.jpg View File

Before After
Width: 1080  |  Height: 1440  |  Size: 187 kB

BIN
图像识别CV/pythoncv_learn/10076.jpg View File

Before After
Width: 1080  |  Height: 686  |  Size: 86 kB

+ 53
- 0
图像识别CV/pythoncv_learn/FastFeatureDetector.py View File

@@ -0,0 +1,53 @@
import cv2 as cv
import numpy as np

img = cv.imread('10069.jpg')
img = cv.resize(img, (800, 800))

'''
# 用默认值初始化FAST对象
fast = cv.FastFeatureDetector_create()

# 寻找并绘制关键点
kp = fast.detect(img,None)
img2 = cv.drawKeypoints(img, kp, None, color=(255,0,0))

# 打印所有默认参数
print( "Threshold: {}".format(fast.getThreshold()) )
print( "nonmaxSuppression:{}".format(fast.getNonmaxSuppression()) )
print( "neighborhood: {}".format(fast.getType()) )
print( "Total Keypoints with nonmaxSuppression: {}".format(len(kp)) )
cv.imshow('fast_true', img2)

# 关闭非极大抑制
fast.setNonmaxSuppression(0)
kp = fast.detect(img,None)
print( "Total Keypoints without nonmaxSuppression: {}".format(len(kp)) )
img3 = cv.drawKeypoints(img, kp, None, color=(255,0,0))
cv.imshow('fast_false', img2)

cv.waitKey(0)
cv.destroyAllWindows()
'''

'''
gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
gray = np.float32(gray)
dst = cv.cornerHarris(gray,2,3,0.04)
#result用于标记角点,并不重要
dst = cv.dilate(dst,None)
#最佳值的阈值,它可能因图像而异。
img[dst>0.01*dst.max()]=[0,0,255]
cv.imshow('dst',img)
'''

gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
corners = cv.goodFeaturesToTrack(gray,25,0.01,10)
corners = np.int0(corners)
for i in corners:
x,y = i.ravel()
cv.circle(img,(x,y),3,255,-1)
cv.imshow('dst',img)

cv.waitKey(0)
cv.destroyAllWindows()

+ 372
- 0
图像识别CV/pythoncv_learn/detection_chessboard_v1.py View File

@@ -0,0 +1,372 @@
import cv2
import numpy as np
import math

image=cv2.imread('10076.jpg')
image = cv2.resize(image, (800, 800))

grayImage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
cv2.imshow('grayImage', grayImage)

edgeImage = grayImage

# 直方图均衡
edgeImage = cv2.equalizeHist(edgeImage)
cv2.imshow('equalizeHist', edgeImage)

'''
# 自适应直方图均衡
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
edgeImage = clahe.apply(edgeImage)
cv2.imshow('createCLAHE', edgeImage)
'''

# 二值化
h, w = edgeImage.shape[:2]
m = np.reshape(edgeImage, [1, w*h])
mean = m.sum()/(w*h)
# binary = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,25,10)
ret, binary = cv2.threshold(edgeImage, mean, 255, cv2.THRESH_BINARY)
image_binary = 255 - binary
cv2.imshow('image_binary', image_binary)

'''
# 双边滤波
edgeImage = cv2.bilateralFilter(edgeImage,9,75,75)
cv2.imshow('bilateralFilter', edgeImage)
'''

'''
# 中位模糊
edgeImage = cv2.medianBlur(edgeImage,5)
cv2.imshow('medianBlur', edgeImage)
'''

# 高斯模糊
edgeImage = cv2.GaussianBlur(grayImage, (3, 3), 1)
cv2.imshow('GaussianBlur', edgeImage)

'''
# 平均
edgeImage = cv2.blur(edgeImage,(5,5))
cv2.imshow('blur', edgeImage)
'''

# 边缘检测
canny = cv2.Canny(edgeImage, 20, 80)
cv2.imshow('Canny', canny)

kernel = np.ones((5,5),np.uint8)
dilate = cv2.dilate(canny,kernel,iterations = 1)
cv2.imshow('dilate', dilate)

'''
# 轮廓检测
ret, thresh = cv2.threshold(edgeImage, 127, 255, 0)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)

def cnt_area(cnt):
area = cv2.contourArea(cnt)
return area
contours.sort(key = cnt_area, reverse=True)
print(len(contours))
for i in range(0, 1):
cnt = contours[i]
img = image.copy()
cv2.drawContours(img, [cnt], 0, (0,255,0), 3)
cv2.imshow('drawContours_' + str(i), img)
cv2.drawContours(image, contours, -1, (0,255,0), 1)
cv2.imshow('drawContours', image)
'''

laplacian = cv2.Laplacian(edgeImage,cv2.CV_8U)
# laplacian = cv2.Laplacian(edgeImage,cv2.CV_64F)
cv2.imshow('laplacian', laplacian)

# 二值化
h, w = laplacian.shape[:2]
m = np.reshape(laplacian, [1, w*h])
mean = m.sum()/(w*h)
# binary = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,25,10)
ret, laplacian_binary = cv2.threshold(laplacian, mean, 255, cv2.THRESH_BINARY)
cv2.imshow('laplacian_binary', laplacian_binary)

sobelx = edgeImage
for i in range(0, 6): # 3,6,8有奇效(3: HoughLinesP检测竖线, 6: HoughLines检测竖线, 8:检测横线)
sobelx = cv2.Sobel(sobelx,cv2.CV_8U,1,0,ksize=5)
# sobelx = cv2.Sobel(edgeImage,cv2.CV_64F,1,0,ksize=5)
cv2.imshow('sobelx', sobelx)

sobely = edgeImage
for i in range(0, 6): # 3,6,8有奇效(3: HoughLinesP检测横线, 6: HoughLines检测横线, 8:检测竖线)
sobely = cv2.Sobel(sobely,cv2.CV_8U,0,1,ksize=5)
# sobely = cv2.Sobel(edgeImage,cv2.CV_64F,0,1,ksize=5)
cv2.imshow('sobely', sobely)

binary_1 = cv2.bitwise_and(dilate, laplacian_binary)
cv2.imshow('binary_1', binary_1)

sobelx_binary = cv2.bitwise_and(sobelx, binary_1)
cv2.imshow('sobelx_binary', sobelx_binary)

sobely_binary = cv2.bitwise_and(sobely, binary_1)
cv2.imshow('sobely_binary', sobely_binary)

lineXImage = sobelx_binary
lineYImage = sobely_binary

'''
# 侵蚀
kernel = np.ones((5,5),np.uint8)
edgeImage = cv2.erode(edgeImage,kernel,iterations = 1)
cv2.imshow('erode', edgeImage)
'''

'''
# 扩张
kernel = np.ones((5,5),np.uint8)
edgeImage = cv2.dilate(edgeImage,kernel,iterations = 1)
cv2.imshow('dilate', edgeImage)
'''

'''
# 开运算
kernel = np.ones((5,5),np.uint8)
edgeImage = cv2.morphologyEx(edgeImage, cv2.MORPH_OPEN, kernel)
cv2.imshow('morphologyEx', edgeImage)
'''

'''
# 闭运算
kernel = np.ones((5,5),np.uint8)
morphologyEx = cv2.morphologyEx(edgeImage, cv2.MORPH_CLOSE, kernel)
cv2.imshow('morphologyEx', morphologyEx)
'''

'''
# 形态学梯度
kernel = np.ones((5,5),np.uint8)
edgeImage = cv2.morphologyEx(edgeImage, cv2.MORPH_GRADIENT, kernel)
cv2.imshow('morphologyEx', edgeImage)
'''

'''
# 顶帽
kernel = np.ones((5,5),np.uint8)
edgeImage = cv2.morphologyEx(edgeImage, cv2.MORPH_TOPHAT, kernel)
cv2.imshow('morphologyEx', edgeImage)
'''

'''
# 黑帽
kernel = np.ones((5,5),np.uint8)
edgeImage = cv2.morphologyEx(edgeImage, cv2.MORPH_BLACKHAT, kernel)
cv2.imshow('morphologyEx', edgeImage)
'''

'''
# 锐化
cv2.imshow('edgeImage + edgeImage', edgeImage + edgeImage)
'''

def isVerticalLine(line):
x1,y1,x2,y2 = line[0]

if (x2-x1) == 0 :
x_angle = 90
else:
xielv = (y2-y1)/(x2-x1)
x_angle = xielv * (180 / math.pi)
x_angle = x_angle % 180

if x_angle > 67.5 and x_angle < 112.5 :
return True
else:
return False

def isHorizontalLine(line):
x1,y1,x2,y2 = line[0]

if (x2-x1) == 0 :
x_angle = 90
else:
xielv = (y2-y1)/(x2-x1)
x_angle = xielv * (180 / math.pi)
x_angle = x_angle % 180

if x_angle < 22.5 or x_angle > 157.5 :
return True
else:
return False

def findHoughEdge(edgeImage, isLinesP=True):
if isLinesP:
lines = findLinesP(edgeImage)
else:
lines = findLines(edgeImage)

if lines is None:
return [[0, 1, 5, 21], [8, 1, 5, 3], [4, 12, 0, 8], [41, 11, 20, 15]]

lines_vertical = filter(lambda line: True if abs(line[0][1] - line[0][3]) > edgeImage.shape[0] * 0.5 else False, lines)
lines_horizontal = filter(lambda line: True if abs(line[0][0] - line[0][2]) > edgeImage.shape[1] * 0.5 else False, lines)

# lines_vertical = filter(isVerticalLine, lines_vertical)
# lines_horizontal = filter(isHorizontalLine, lines_horizontal)

lines_vertical = sorted(lines_vertical, key=lambda line: line[0][0])
lines_horizontal = sorted(lines_horizontal , key=lambda line: line[0][1])

if len(lines_vertical) < 2 or len(lines_horizontal) < 2:
return [[0, 1, 5, 21], [8, 1, 5, 3], [4, 12, 0, 8], [41, 11, 20, 15]]
return [lines_vertical[0][0], lines_horizontal [0][0], lines_vertical[-1][0], lines_horizontal [-1][0]]

def writeHoughEdge(edgeImage, title):
lines = findHoughEdge(edgeImage, False)
houghEdgeImage = image.copy()
for line in lines:
x1,y1,x2,y2 = line
cv2.line(houghEdgeImage,(x1,y1),(x2,y2),(0,0,255),2)

cv2.imshow('houghEdgeImage' + title, houghEdgeImage)

def writeHoughEdgeP(edgeImage, title):
lines = findHoughEdge(edgeImage, True)
writeHoughEdgeP = image.copy()
for line in lines:
x1,y1,x2,y2 = line
cv2.line(writeHoughEdgeP,(x1,y1),(x2,y2),(255,0,0),2)

cv2.imshow('writeHoughEdgeP' + title, writeHoughEdgeP)

def findLines(edgeImage):
lines = cv2.HoughLines(edgeImage,1,np.pi/180,200)
ok_lines = []
for line in lines:
rho,theta = line[0]

if (theta < (np.pi/4. )) or (theta > (3.*np.pi/4.0)): #垂直直线
pt1 = (int(rho/np.cos(theta)),0) #该直线与第一行的交点
#该直线与最后一行的焦点
pt2 = (int((rho-edgeImage.shape[0]*np.sin(theta))/np.cos(theta)),edgeImage.shape[0])
else: #水平直线
pt1 = (0,int(rho/np.sin(theta))) # 该直线与第一列的交点
#该直线与最后一列的交点
pt2 = (edgeImage.shape[1], int((rho-edgeImage.shape[1]*np.cos(theta))/np.sin(theta)))

x1, y1 = pt1
x2, y2 = pt2

if (x2-x1) == 0 :
x_angle = 90
else:
xielv = (y2-y1)/(x2-x1)
x_angle = xielv * (180 / math.pi)
x_angle = x_angle % 180

if (x_angle < 22.5 or x_angle > 157.5) or (x_angle > 67.5 and x_angle < 112.5) :
ok_lines.append([[x1,y1,x2,y2]])
return ok_lines

def findLinesP(edgeImage):
thresh_min = min(edgeImage.shape)
lines = cv2.HoughLinesP(edgeImage, 1.2, np.pi / 180, 160, minLineLength=int(edgeImage.shape[0] * 0.7),maxLineGap=int(thresh_min * 0.5))
ok_lines = []
for line in lines:
x1,y1,x2,y2 = line[0]

if (x2-x1) == 0 :
x_angle = 90
else:
xielv = (y2-y1)/(x2-x1)
x_angle = xielv * (180 / math.pi)
x_angle = x_angle % 180

if (x_angle < 22.5 or x_angle > 157.5) or (x_angle > 67.5 and x_angle < 112.5) :
ok_lines.append([[x1,y1,x2,y2]])
return ok_lines

def writeLines(edgeImage, title):
lines = findLines(edgeImage)
houghEdgeImage = image.copy()
for line in lines:
x1,y1,x2,y2 = line[0]
cv2.line(houghEdgeImage,(x1,y1),(x2,y2),(0,0,255),2)
cv2.imshow('findLines' + title, houghEdgeImage)

def writeLinesP(edgeImage, title):
lines = findLinesP(edgeImage)
houghEdgeImage = image.copy()
for line in lines:
x1,y1,x2,y2 = line[0]
cv2.line(houghEdgeImage,(x1,y1),(x2,y2),(255,0,0),2)
cv2.imshow('findLinesP' + title, houghEdgeImage)

def __clac_intersection(line_a, line_b):
x1_a, y1_a, x2_a, y2_a = line_a
x1_b, y1_b, x2_b, y2_b = line_b
A_a = y2_a - y1_a
B_a = x1_a - x2_a
C_a = x2_a * y1_a - x1_a * y2_a
A_b = y2_b - y1_b
B_b = x1_b - x2_b
C_b = x2_b * y1_b - x1_b * y2_b
m = A_a * B_b - A_b * B_a
output_x = (C_b * B_a - C_a * B_b) / m
output_y = (C_a * A_b - C_b * A_a) / m
return (int(output_x), int(output_y))

# writeLines(lineXImage, 'X')
# writeLines(lineYImage, 'Y')
# writeLinesP(lineXImage, 'X')
# writeLinesP(lineYImage, 'Y')

# writeHoughEdge(lineXImage, 'X')
# writeHoughEdge(lineYImage, 'Y')
# writeHoughEdgeP(lineXImage, 'X')
# writeHoughEdgeP(lineYImage, 'Y')

linesX = findHoughEdge(lineXImage, False)
linesY = findHoughEdge(lineYImage, False)

houghEdgeImage = image.copy()
cv2.line(houghEdgeImage,(linesX[0][0],linesX[0][1]),(linesX[0][2],linesX[0][3]),(255,0,0),2)
cv2.line(houghEdgeImage,(linesX[2][0],linesX[2][1]),(linesX[2][2],linesX[2][3]),(0,255,0),2)
cv2.line(houghEdgeImage,(linesY[1][0],linesY[1][1]),(linesY[1][2],linesY[1][3]),(0,0,255),2)
cv2.line(houghEdgeImage,(linesY[3][0],linesY[3][1]),(linesY[3][2],linesY[3][3]),(255,255,0),2)
cv2.imshow('houghEdge', houghEdgeImage)

p1 = __clac_intersection((linesX[0][0],linesX[0][1], linesX[0][2],linesX[0][3]), (linesY[1][0],linesY[1][1], linesY[1][2],linesY[1][3]))
p2 = __clac_intersection((linesY[1][0],linesY[1][1], linesY[1][2],linesY[1][3]), (linesX[2][0],linesX[2][1], linesX[2][2],linesX[2][3]))
p3 = __clac_intersection((linesX[2][0],linesX[2][1], linesX[2][2],linesX[2][3]), (linesY[3][0],linesY[3][1], linesY[3][2],linesY[3][3]))
p4 = __clac_intersection((linesY[3][0],linesY[3][1], linesY[3][2],linesY[3][3]), (linesX[0][0],linesX[0][1], linesX[0][2],linesX[0][3]))

print((p1, p2, p3, p4))

warpPerspectiveImage = image.copy()
pts1 = np.float32([p1,p2,p4,p3])
pts2 = np.float32([[0,0],[800,0],[0,800],[800,800]])
M = cv2.getPerspectiveTransform(pts1,pts2)
dst = cv2.warpPerspective(warpPerspectiveImage,M,(800,800))
cv2.imshow('warpPerspective', dst)

'''
circles = cv2.HoughCircles(edgeImage,cv2.HOUGH_GRADIENT,1,20,param1=50,param2=30,minRadius=0,maxRadius=0)
circles = np.uint16(np.around(circles))
for i in circles[0,:]:
# 绘制外圆
cv2.circle(image,(i[0],i[1]),i[2],(0,255,0),2)
# 绘制圆心
cv2.circle(image,(i[0],i[1]),2,(0,0,255),3)
'''

'''
pts1 = np.float32([[56,65],[368,52],[28,387],[389,390]])
pts2 = np.float32([[0,0],[300,0],[0,300],[300,300]])
M = cv2.getPerspectiveTransform(pts1, pts2)
image = cv2.warpPerspective(image, M, (600, 600))
cv2.imshow('show', image)
'''

cv2.waitKey(0)
cv2.destroyAllWindows()

+ 55
- 0
图像识别CV/pythoncv_learn/image_cutting_v1.py View File

@@ -0,0 +1,55 @@
# -*- coding: utf-8 -*-
# @Author : matthew
# @Software: PyCharm

import cv2
import matplotlib.pyplot as plt
import numpy as np


def seg_kmeans_gray():
img = cv2.imread('10049.jpg', cv2.IMREAD_GRAYSCALE)

# 展平
img_flat = img.reshape((img.shape[0] * img.shape[1], 1))
img_flat = np.float32(img_flat)

# 迭代参数
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TermCriteria_MAX_ITER, 20, 0.5)
flags = cv2.KMEANS_RANDOM_CENTERS

# 聚类
compactness, labels, centers = cv2.kmeans(img_flat, 2, None, criteria, 10, flags)

# 显示结果
img_output = labels.reshape((img.shape[0], img.shape[1]))
plt.subplot(121), plt.imshow(img, 'gray'), plt.title('input')
plt.subplot(122), plt.imshow(img_output, 'gray'), plt.title('kmeans')
plt.show()

def seg_kmeans_color():
img = cv2.imread('10049.jpg', cv2.IMREAD_COLOR)
# 变换一下图像通道bgr->rgb,否则很别扭啊
b, g, r = cv2.split(img)
img = cv2.merge([r, g, b])

# 3个通道展平
img_flat = img.reshape((img.shape[0] * img.shape[1], 3))
img_flat = np.float32(img_flat)

# 迭代参数
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TermCriteria_MAX_ITER, 20, 0.5)
flags = cv2.KMEANS_RANDOM_CENTERS

# 聚类
compactness, labels, centers = cv2.kmeans(img_flat, 2, None, criteria, 10, flags)

# 显示结果
img_output = labels.reshape((img.shape[0], img.shape[1]))
plt.subplot(121), plt.imshow(img), plt.title('input')
plt.subplot(122), plt.imshow(img_output, 'gray'), plt.title('kmeans')
plt.show()

if __name__ == '__main__':
# seg_kmeans_gray()
seg_kmeans_color()

+ 26
- 0
图像识别CV/pythoncv_learn/image_cutting_v2.py View File

@@ -0,0 +1,26 @@
from PIL import Image
from sklearn.cluster import KMeans
import numpy as np

q = Image.open('10049.jpg')
# q.show()
m, n = q.size
q1 = np.array(q)
#这里有坑,注意q.size和q1.shape的值不一样,横竖跌倒

q1 = q1.reshape((m*n, 3))

k = 8
y = KMeans(n_clusters = k).fit_predict(q1)

centroids = np.zeros((k, 3))
#计算类心值,之后填充图像
for cent in range(k):
centroids[cent] = q1[np.where(y == cent)].mean(axis = 0)
ynew = y.reshape((n, m))
pic_new = Image.new("RGB", (m, n))
for i in range(m):
for j in range(n):
pic_new.putpixel((i, j), tuple([int(x) for x in centroids[ynew[j][i]]]))
pic_new.show()

+ 33
- 0
图像识别CV/pythoncv_learn/image_cutting_v3.py View File

@@ -0,0 +1,33 @@
import numpy as np
from PIL import Image as image
#加载PIL包,用于加载创建图片
from sklearn.cluster import KMeans#加载Kmeans算法

def loadData(filePath):
f = open(filePath,'rb') #以二进制形式打开文件
data= []
img =image.open(f)#以列表形式返回图片像素值
m,n =img.size #获得图片大小
for i in range(m):
for j in range(n):
#将每个像素点RGB颜色处理到0-1范围内
x,y,z =img.getpixel((i,j))
#将颜色值存入data内
data.append([x/256.0,y/256.0,z/256.0])
f.close()
#以矩阵的形式返回data,以及图片大小
return np.mat(data),m,n
imgData,row,col =loadData('10069.jpg')#加载数据

km=KMeans(n_clusters=3)
#聚类获得每个像素所属的类别
label =km.fit_predict(imgData)
label=label.reshape([row,col])
#创建一张新的灰度图以保存聚类后的结果
pic_new = image.new("L",(row,col))
#根据类别向图片中添加灰度值
for i in range(row):
for j in range(col):
pic_new.putpixel((i,j),int(256/(label[i][j]+1)))
#以JPEG格式保存图像
pic_new.show()

+ 133
- 0
图像识别CV/pythoncv_learn/image_cutting_v4.py View File

@@ -0,0 +1,133 @@
import numpy as np
import random
from matplotlib import pyplot as plt
import cv2
import os

def loss_function(present_center, pre_center):
'''
损失函数,计算上一次与当前聚类中的差异(像素差的平方和)
:param present_center: 当前聚类中心
:param pre_center: 上一次聚类中心
:return: 损失值
'''
present_center = np.array(present_center)
pre_center = np.array(pre_center)
return np.sum((present_center - pre_center)**2)


def classifer(intput_signal, center):
'''
分类器(通过当前的聚类中心,给输入图像分类)
:param intput_signal: 输入图像
:param center: 聚类中心
:return: 标签矩阵
'''
if len(intput_signal.shape) == 2:
input_row, input_col = intput_signal.shape # 输入图像的尺寸
else:
input_row, input_col, layers = intput_signal.shape # 输入图像的尺寸

pixls_labels = np.zeros((input_row, input_col)) # 储存所有像素标签

pixl_distance_t = [] # 单个元素与所有聚类中心的距离,临时用

for i in range(input_row):
for j in range(input_col):
# 计算每个像素与所有聚类中心的差平方
for k in range(len(center)):
distance_t = np.sum(abs((intput_signal[i, j]).astype(int) - center[k].astype(int))**2)
pixl_distance_t.append(distance_t)
# 差异最小则为该类
pixls_labels[i, j] = int(pixl_distance_t.index(min(pixl_distance_t)))
# 清空该list,为下一个像素点做准备
pixl_distance_t = []
return pixls_labels


def k_means(input_signal, center_num, threshold):
'''
基于k-means算法的图像分割(适用于灰度图)
:param input_signal: 输入图像
:param center_num: 聚类中心数目
:param threshold: 迭代阈值
:return:
'''
input_signal_cp = np.copy(input_signal) # 输入信号的副本
if len(input_signal_cp.shape) == 2:
input_row, input_col = input_signal_cp.shape # 输入图像的尺寸
is_gray = True
else:
input_row, input_col, layers = input_signal_cp.shape # 输入图像的尺寸
is_gray = False
pixls_labels = np.zeros((input_row, input_col)) # 储存所有像素标签

is_present_center_random = True
if os.path.exists('./present_center.npz'):
present_center_npz = np.load('./present_center.npz', allow_pickle=True)
present_center = present_center_npz['center_arr']
is_gray_old = present_center_npz['is_gray']
pixls_labels = present_center_npz['pixls_labels']

if is_gray_old == is_gray:
is_present_center_random = False

if is_present_center_random:
# 随机初始聚类中心行标与列标
initial_center_row_num = [i for i in range(input_row)]
random.shuffle(initial_center_row_num)
initial_center_row_num = initial_center_row_num[:center_num]

initial_center_col_num = [i for i in range(input_col)]
random.shuffle(initial_center_col_num)
initial_center_col_num = initial_center_col_num[:center_num]

# 当前的聚类中心
present_center = []
for i in range(center_num):
present_center.append(input_signal_cp[initial_center_row_num[i], initial_center_row_num[i]])
pixls_labels = classifer(input_signal_cp, present_center)


print("Start Train")
num = 0 # 用于记录迭代次数
while True:
pre_centet = present_center.copy() # 储存前一次的聚类中心
# 计算当前聚类中心
for n in range(center_num):
temp = np.where(pixls_labels == n)
present_center[n] = sum(input_signal_cp[temp].astype(int)) / len(input_signal_cp[temp])
# 根据当前聚类中心分类
pixls_labels = classifer(input_signal_cp, present_center)
# 计算上一次聚类中心与当前聚类中心的差异
loss = loss_function(present_center, pre_centet)
num = num + 1
print("Step:"+ str(num) + " Loss:" + str(loss))
# 当损失小于迭代阈值时,结束迭代
if loss <= threshold:
np.savez('./present_center.npz', center_arr=present_center, is_gray=is_gray, pixls_labels=pixls_labels)
break
return pixls_labels

if __name__ == '__main__':
image = cv2.imread('10008.jpg', cv2.IMREAD_COLOR)
# image = cv2.imread('10008.jpg', cv2.IMREAD_GRAYSCALE)

#如果本身是彩图 似乎也没什么影响!
#plt显示按照 rgb次序!因此要转换
b,g,r = cv2.split(image)
image = cv2.merge([r,g,b])

image = cv2.resize(image, (100, 100))

k = 3
threshold = 1
labels = k_means(image, k, threshold)

plt.subplot(1, 2, 1)
plt.title("Soucre Image")
plt.imshow(image,cmap="gray")
plt.subplot(1, 2, 2)
plt.title("Segamenting Image with k-means\n" + "k=" + str(k) + " threshold=" + str(threshold))
plt.imshow(labels/3)
plt.show()

+ 193
- 0
图像识别CV/pythoncv_learn/image_cutting_v5.py View File

@@ -0,0 +1,193 @@
import numpy as np
import random
from matplotlib import pyplot as plt
import cv2
import os

def loss_function(present_center, pre_center):
'''
损失函数,计算上一次与当前聚类中的差异(像素差的平方和)
:param present_center: 当前聚类中心
:param pre_center: 上一次聚类中心
:return: 损失值
'''
present_center = np.array(present_center)
pre_center = np.array(pre_center)
return np.sum((present_center - pre_center)**2)


def classifer(intput_signal, center):
'''
分类器(通过当前的聚类中心,给输入图像分类)
:param intput_signal: 输入图像
:param center: 聚类中心
:return: 标签矩阵
'''
if len(intput_signal.shape) == 2:
input_row, input_col = intput_signal.shape # 输入图像的尺寸
else:
input_row, input_col, layers = intput_signal.shape # 输入图像的尺寸

pixls_labels = np.zeros((input_row, input_col)) # 储存所有像素标签

pixl_distance_t = [] # 单个元素与所有聚类中心的距离,临时用

for i in range(input_row):
for j in range(input_col):
# 计算每个像素与所有聚类中心的差平方
for k in range(len(center)):
distance_t = np.sum(abs((intput_signal[i, j]).astype(int) - center[k].astype(int))**2)
pixl_distance_t.append(distance_t)
# 差异最小则为该类
pixls_labels[i, j] = int(pixl_distance_t.index(min(pixl_distance_t)))
# 清空该list,为下一个像素点做准备
pixl_distance_t = []
return pixls_labels


def k_means(input_signal, center_num, threshold):
'''
基于k-means算法的图像分割(适用于灰度图)
:param input_signal: 输入图像
:param center_num: 聚类中心数目
:param threshold: 迭代阈值
:return:
'''
input_signal_cp = np.copy(input_signal) # 输入信号的副本
if len(input_signal_cp.shape) == 2:
input_row, input_col = input_signal_cp.shape # 输入图像的尺寸
is_gray = True
else:
input_row, input_col, layers = input_signal_cp.shape # 输入图像的尺寸
is_gray = False
pixls_labels = np.zeros((input_row, input_col)) # 储存所有像素标签

is_present_center_random = True
if os.path.exists('./present_center.npz'):
present_center_npz = np.load('./present_center.npz', allow_pickle=True)
present_center = present_center_npz['center_arr']
is_gray_old = present_center_npz['is_gray']
pixls_labels = present_center_npz['pixls_labels']

if is_gray_old == is_gray:
is_present_center_random = False

if is_present_center_random:
# 随机初始聚类中心行标与列标
initial_center_row_num = [i for i in range(input_row)]
random.shuffle(initial_center_row_num)
initial_center_row_num = initial_center_row_num[:center_num]

initial_center_col_num = [i for i in range(input_col)]
random.shuffle(initial_center_col_num)
initial_center_col_num = initial_center_col_num[:center_num]

# 当前的聚类中心
present_center = []
for i in range(center_num):
present_center.append(input_signal_cp[initial_center_row_num[i], initial_center_row_num[i]])
pixls_labels = classifer(input_signal_cp, present_center)


print("Start Train")
num = 0 # 用于记录迭代次数
while True:
pre_centet = present_center.copy() # 储存前一次的聚类中心
# 计算当前聚类中心
for n in range(center_num):
temp = np.where(pixls_labels == n)
present_center[n] = sum(input_signal_cp[temp].astype(int)) / len(input_signal_cp[temp])
# 根据当前聚类中心分类
pixls_labels = classifer(input_signal_cp, present_center)
# 计算上一次聚类中心与当前聚类中心的差异
loss = loss_function(present_center, pre_centet)
num = num + 1
print("Step:"+ str(num) + " Loss:" + str(loss))
# 当损失小于迭代阈值时,结束迭代
if loss <= threshold:
np.savez('./present_center.npz', center_arr=present_center, is_gray=is_gray, pixls_labels=pixls_labels)
break
return pixls_labels

def findLine(edgeImage, title):
houghLinesImage = edgeImage.copy()
'''
lines = cv2.HoughLines(edgeImage,1,np.pi/180,200)
print(len(lines))
for line in lines:
rho,theta = line[0]
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
if (x2-x1) == 0 :
xielv = (y2-y1)/0.00000001
else:
xielv = (y2-y1)/(x2-x1)
# if (xielv > -0.08 and xielv < 0.08) or xielv > 8.0 or xielv < -8.0 :
# print(xielv)
cv2.line(houghLinesImage,(x1,y1),(x2,y2),(0,0,255),2)
cv2.imshow('houghLinesImage' + title, houghLinesImage)
'''

houghLinesPImage = image.copy()
thresh_min = min(edgeImage.shape)
lines = cv2.HoughLinesP(edgeImage, 1.2, np.pi / 180, 160, minLineLength=int(edgeImage.shape[0] * 0.7),maxLineGap=int(thresh_min * 0.5))
print(len(lines))
for line in lines:
x1,y1,x2,y2 = line[0]
xielv = (y2-y1)/(x2-x1)
'''
if (xielv > -0.08 and xielv < 0.08) or xielv > 8.0 or xielv < -8.0 :
print(xielv)
'''
cv2.line(houghLinesPImage,(x1,y1),(x2,y2),(255,0,0),2)
cv2.imshow('HoughLinesP' + title, houghLinesPImage)

if __name__ == '__main__':
image = cv2.imread('10007.jpg', cv2.IMREAD_COLOR)
# image = cv2.imread('10008.jpg', cv2.IMREAD_GRAYSCALE)

#如果本身是彩图 似乎也没什么影响!
#plt显示按照 rgb次序!因此要转换
b,g,r = cv2.split(image)
image = cv2.merge([r,g,b])
image = cv2.resize(image, (800, 800))

image_s = cv2.resize(image, (100, 100))

k = 6
threshold = 1
labels = k_means(image_s, k, threshold)

labels = (labels * (255 / k)).astype(np.uint8)

labels = cv2.resize(labels, (800, 800))

cv2.imshow('labels', labels)

# 二值化
h, w = labels.shape[:2]
m = np.reshape(labels, [1, w*h])
mean = m.sum()/(w*h)
# binary = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,25,10)
ret, binary = cv2.threshold(labels, mean, 255, cv2.THRESH_BINARY)
image_binary = 255 - binary
cv2.imshow('image_binary', image_binary)

findLine(image_binary, "line")

plt.subplot(1, 2, 1)
plt.title("Soucre Image")
plt.imshow(image,cmap="gray")
plt.subplot(1, 2, 2)
plt.title("Segamenting Image with k-means\n" + "k=" + str(k) + " threshold=" + str(threshold))
plt.imshow(labels)
plt.show()

cv2.waitKey(0)
cv2.destroyAllWindows()

+ 111
- 0
图像识别CV/pythoncv_learn/main.py View File

@@ -0,0 +1,111 @@
import cv2
import numpy as np

image=cv2.imread('10076.jpg')
image = cv2.resize(image, (600, 600))

'''
imgray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(imgray, 127, 255, 0)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
'''

#(3, 3)表示高斯矩阵的长与宽都是3,标准差取1
InputArray = image.copy()
cv2.GaussianBlur(InputArray, (3, 3), 1, InputArray)
edgeImage = cv2.Canny(InputArray, 20, 80)
cv2.imshow('GaussianBlur', edgeImage)

ret, thresh = cv2.threshold(edgeImage, 127, 255, 0)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

'''
blur = cv2.bilateralFilter(edgeImage,9,75,75)
cv2.imshow('blur', blur)
'''

kernel = np.ones((5,5),np.uint8)
edgeImage = cv2.morphologyEx(edgeImage, cv2.MORPH_CLOSE, kernel)
cv2.imshow('morphologyEx', edgeImage)

'''
kernel = np.ones((5,5),np.uint8)
edgeImage = cv2.dilate(edgeImage,kernel,iterations = 1)
cv2.imshow('dilate', edgeImage)
'''

laplacian = cv2.Laplacian(edgeImage,cv2.CV_8U)
sobelx = cv2.Sobel(edgeImage,cv2.CV_8U,1,0,ksize=5)
sobely = cv2.Sobel(edgeImage,cv2.CV_8U,0,1,ksize=5)
# laplacian = cv2.Laplacian(edgeImage,cv2.CV_64F)
# sobelx = cv2.Sobel(edgeImage,cv2.CV_64F,1,0,ksize=5)
# sobely = cv2.Sobel(edgeImage,cv2.CV_64F,0,1,ksize=5)
cv2.imshow('laplacian', laplacian)
cv2.imshow('sobelx', sobelx)
cv2.imshow('sobely', sobely)
edgeImage = sobely

def cnt_area(cnt):
area = cv2.contourArea(cnt)
return area
contours.sort(key = cnt_area, reverse=True)
print(len(contours))
for i in range(0, 10):
cnt = contours[i]
img = image.copy()
cv2.drawContours(img, [cnt], 0, (0,255,0), 3)
cv2.imshow('drawContours_' + str(i), img)
cv2.drawContours(image, contours, -1, (0,255,0), 1)
cv2.imshow('drawContours', image)

houghLinesImage = image.copy()
lines = cv2.HoughLines(edgeImage,1,np.pi/180,200)
for line in lines:
rho,theta = line[0]
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
if (x2-x1) == 0 :
xielv = (y2-y1)/0.00000001
else:
xielv = (y2-y1)/(x2-x1)
if (xielv > -0.08 and xielv < 0.08) or xielv > 8.0 or xielv < -8.0 :
# print(xielv)
cv2.line(houghLinesImage,(x1,y1),(x2,y2),(0,0,255),2)
cv2.imshow('houghLinesImage', houghLinesImage)

houghLinesPImage = image.copy()
thresh_min = min(edgeImage.shape)
lines = cv2.HoughLinesP(edgeImage, 1.2, np.pi / 180, 160, minLineLength=int(edgeImage.shape[0] * 0.7),maxLineGap=int(thresh_min * 0.5))
print(len(lines))
for line in lines:
x1,y1,x2,y2 = line[0]
xielv = (y2-y1)/(x2-x1)
if (xielv > -0.08 and xielv < 0.08) or xielv > 8.0 or xielv < -8.0 :
# print(xielv)
cv2.line(houghLinesPImage,(x1,y1),(x2,y2),(255,0,0),2)
cv2.imshow('HoughLinesP', houghLinesPImage)

'''
circles = cv2.HoughCircles(edgeImage,cv2.HOUGH_GRADIENT,1,20,param1=50,param2=30,minRadius=0,maxRadius=0)
circles = np.uint16(np.around(circles))
for i in circles[0,:]:
# 绘制外圆
cv2.circle(image,(i[0],i[1]),i[2],(0,255,0),2)
# 绘制圆心
cv2.circle(image,(i[0],i[1]),2,(0,0,255),3)
'''

pts1 = np.float32([[56,65],[368,52],[28,387],[389,390]])
pts2 = np.float32([[0,0],[300,0],[0,300],[300,300]])
M = cv2.getPerspectiveTransform(pts1, pts2)
image = cv2.warpPerspective(image, M, (600, 600))

cv2.imshow('show', image)
cv2.waitKey(0)
cv2.destroyAllWindows()

+ 209
- 0
图像识别CV/pythoncv_learn/main_v2.py View File

@@ -0,0 +1,209 @@
import cv2
import numpy as np

image=cv2.imread('10009.jpg')
image = cv2.resize(image, (600, 600))

grayImage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
cv2.imshow('grayImage', grayImage)

edgeImage = grayImage

# 直方图均衡
edgeImage = cv2.equalizeHist(edgeImage)
cv2.imshow('equalizeHist', edgeImage)

'''
# 自适应直方图均衡
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
edgeImage = clahe.apply(edgeImage)
cv2.imshow('createCLAHE', edgeImage)
'''

# 二值化
h, w = edgeImage.shape[:2]
m = np.reshape(edgeImage, [1, w*h])
mean = m.sum()/(w*h)
# binary = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,25,10)
ret, binary = cv2.threshold(edgeImage, mean, 255, cv2.THRESH_BINARY)
image_binary = 255 - binary
cv2.imshow('image_binary', image_binary)

'''
# 双边滤波
edgeImage = cv2.bilateralFilter(edgeImage,9,75,75)
cv2.imshow('bilateralFilter', edgeImage)
'''

'''
# 中位模糊
edgeImage = cv2.medianBlur(edgeImage,5)
cv2.imshow('medianBlur', edgeImage)
'''

# 高斯模糊
edgeImage = cv2.GaussianBlur(grayImage, (3, 3), 1)
cv2.imshow('GaussianBlur', edgeImage)

'''
# 平均
edgeImage = cv2.blur(edgeImage,(5,5))
cv2.imshow('blur', edgeImage)
'''

'''
# 边缘检测
canny = cv2.Canny(edgeImage, 20, 80)
cv2.imshow('Canny', canny)
'''

'''
# 轮廓检测
ret, thresh = cv2.threshold(edgeImage, 127, 255, 0)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)

def cnt_area(cnt):
area = cv2.contourArea(cnt)
return area
contours.sort(key = cnt_area, reverse=True)
print(len(contours))
for i in range(0, 1):
cnt = contours[i]
img = image.copy()
cv2.drawContours(img, [cnt], 0, (0,255,0), 3)
cv2.imshow('drawContours_' + str(i), img)
cv2.drawContours(image, contours, -1, (0,255,0), 1)
cv2.imshow('drawContours', image)
'''

laplacian = cv2.Laplacian(edgeImage,cv2.CV_8U)
# laplacian = cv2.Laplacian(edgeImage,cv2.CV_64F)
cv2.imshow('laplacian', laplacian)

# 二值化
h, w = laplacian.shape[:2]
m = np.reshape(laplacian, [1, w*h])
mean = m.sum()/(w*h)
# binary = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,25,10)
ret, laplacian_binary = cv2.threshold(laplacian, mean, 255, cv2.THRESH_BINARY)
cv2.imshow('laplacian_binary', laplacian_binary)

sobelx = cv2.Sobel(edgeImage,cv2.CV_8U,1,0,ksize=5)
# sobelx = cv2.Sobel(edgeImage,cv2.CV_64F,1,0,ksize=5)
cv2.imshow('sobelx', sobelx)

sobely = cv2.Sobel(edgeImage,cv2.CV_8U,0,1,ksize=5)
# sobely = cv2.Sobel(edgeImage,cv2.CV_64F,0,1,ksize=5)
cv2.imshow('sobely', sobely)

binary_1 = cv2.bitwise_and(image_binary, laplacian_binary)
cv2.imshow('binary_1', binary_1)
edgeImage = binary_1

'''
# 侵蚀
kernel = np.ones((5,5),np.uint8)
edgeImage = cv2.erode(edgeImage,kernel,iterations = 1)
cv2.imshow('erode', edgeImage)
'''

'''
# 扩张
kernel = np.ones((5,5),np.uint8)
edgeImage = cv2.dilate(edgeImage,kernel,iterations = 1)
cv2.imshow('dilate', edgeImage)
'''

'''
# 开运算
kernel = np.ones((5,5),np.uint8)
edgeImage = cv2.morphologyEx(edgeImage, cv2.MORPH_OPEN, kernel)
cv2.imshow('morphologyEx', edgeImage)
'''

'''
# 闭运算
kernel = np.ones((5,5),np.uint8)
morphologyEx = cv2.morphologyEx(edgeImage, cv2.MORPH_CLOSE, kernel)
cv2.imshow('morphologyEx', morphologyEx)
'''

'''
# 形态学梯度
kernel = np.ones((5,5),np.uint8)
edgeImage = cv2.morphologyEx(edgeImage, cv2.MORPH_GRADIENT, kernel)
cv2.imshow('morphologyEx', edgeImage)
'''

'''
# 顶帽
kernel = np.ones((5,5),np.uint8)
edgeImage = cv2.morphologyEx(edgeImage, cv2.MORPH_TOPHAT, kernel)
cv2.imshow('morphologyEx', edgeImage)
'''

'''
# 黑帽
kernel = np.ones((5,5),np.uint8)
edgeImage = cv2.morphologyEx(edgeImage, cv2.MORPH_BLACKHAT, kernel)
cv2.imshow('morphologyEx', edgeImage)
'''

'''
# 锐化
cv2.imshow('edgeImage + edgeImage', edgeImage + edgeImage)
'''

houghLinesImage = image.copy()
lines = cv2.HoughLines(edgeImage,1,np.pi/180,200)
for line in lines:
rho,theta = line[0]
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
if (x2-x1) == 0 :
xielv = (y2-y1)/0.00000001
else:
xielv = (y2-y1)/(x2-x1)
if (xielv > -0.08 and xielv < 0.08) or xielv > 8.0 or xielv < -8.0 :
print(xielv)
cv2.line(houghLinesImage,(x1,y1),(x2,y2),(0,0,255),2)
cv2.imshow('houghLinesImage', houghLinesImage)

houghLinesPImage = image.copy()
thresh_min = min(edgeImage.shape)
lines = cv2.HoughLinesP(edgeImage, 1.2, np.pi / 180, 160, minLineLength=int(edgeImage.shape[0] * 0.7),maxLineGap=int(thresh_min * 0.5))
print(len(lines))
for line in lines:
x1,y1,x2,y2 = line[0]
xielv = (y2-y1)/(x2-x1)
if (xielv > -0.08 and xielv < 0.08) or xielv > 8.0 or xielv < -8.0 :
print(xielv)
cv2.line(houghLinesPImage,(x1,y1),(x2,y2),(255,0,0),2)
cv2.imshow('HoughLinesP', houghLinesPImage)

'''
circles = cv2.HoughCircles(edgeImage,cv2.HOUGH_GRADIENT,1,20,param1=50,param2=30,minRadius=0,maxRadius=0)
circles = np.uint16(np.around(circles))
for i in circles[0,:]:
# 绘制外圆
cv2.circle(image,(i[0],i[1]),i[2],(0,255,0),2)
# 绘制圆心
cv2.circle(image,(i[0],i[1]),2,(0,0,255),3)
'''

'''
pts1 = np.float32([[56,65],[368,52],[28,387],[389,390]])
pts2 = np.float32([[0,0],[300,0],[0,300],[300,300]])
M = cv2.getPerspectiveTransform(pts1, pts2)
image = cv2.warpPerspective(image, M, (600, 600))
cv2.imshow('show', image)
'''

cv2.waitKey(0)
cv2.destroyAllWindows()

+ 220
- 0
图像识别CV/pythoncv_learn/main_v3.py View File

@@ -0,0 +1,220 @@
import cv2
import numpy as np

image=cv2.imread('10076.jpg')
image = cv2.resize(image, (800, 800))

grayImage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
cv2.imshow('grayImage', grayImage)

edgeImage = grayImage

# 直方图均衡
edgeImage = cv2.equalizeHist(edgeImage)
cv2.imshow('equalizeHist', edgeImage)

'''
# 自适应直方图均衡
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
edgeImage = clahe.apply(edgeImage)
cv2.imshow('createCLAHE', edgeImage)
'''

# 二值化
h, w = edgeImage.shape[:2]
m = np.reshape(edgeImage, [1, w*h])
mean = m.sum()/(w*h)
# binary = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,25,10)
ret, binary = cv2.threshold(edgeImage, mean, 255, cv2.THRESH_BINARY)
image_binary = 255 - binary
cv2.imshow('image_binary', image_binary)

'''
# 双边滤波
edgeImage = cv2.bilateralFilter(edgeImage,9,75,75)
cv2.imshow('bilateralFilter', edgeImage)
'''

'''
# 中位模糊
edgeImage = cv2.medianBlur(edgeImage,5)
cv2.imshow('medianBlur', edgeImage)
'''

# 高斯模糊
edgeImage = cv2.GaussianBlur(grayImage, (3, 3), 1)
cv2.imshow('GaussianBlur', edgeImage)

'''
# 平均
edgeImage = cv2.blur(edgeImage,(5,5))
cv2.imshow('blur', edgeImage)
'''

'''
# 边缘检测
canny = cv2.Canny(edgeImage, 20, 80)
cv2.imshow('Canny', canny)
'''

'''
# 轮廓检测
ret, thresh = cv2.threshold(edgeImage, 127, 255, 0)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)

def cnt_area(cnt):
area = cv2.contourArea(cnt)
return area
contours.sort(key = cnt_area, reverse=True)
print(len(contours))
for i in range(0, 1):
cnt = contours[i]
img = image.copy()
cv2.drawContours(img, [cnt], 0, (0,255,0), 3)
cv2.imshow('drawContours_' + str(i), img)
cv2.drawContours(image, contours, -1, (0,255,0), 1)
cv2.imshow('drawContours', image)
'''

laplacian = cv2.Laplacian(edgeImage,cv2.CV_8U)
# laplacian = cv2.Laplacian(edgeImage,cv2.CV_64F)
cv2.imshow('laplacian', laplacian)

# 二值化
h, w = laplacian.shape[:2]
m = np.reshape(laplacian, [1, w*h])
mean = m.sum()/(w*h)
# binary = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,25,10)
ret, laplacian_binary = cv2.threshold(laplacian, mean, 255, cv2.THRESH_BINARY)
cv2.imshow('laplacian_binary', laplacian_binary)

sobelx = edgeImage
for i in range(0, 6): # 3,6,8有奇效(3: HoughLinesP检测竖线, 6: HoughLines检测竖线, 8:检测横线)
sobelx = cv2.Sobel(sobelx,cv2.CV_8U,1,0,ksize=5)
# sobelx = cv2.Sobel(edgeImage,cv2.CV_64F,1,0,ksize=5)
cv2.imshow('sobelx', sobelx)

sobely = edgeImage
for i in range(0, 6): # 3,6,8有奇效(3: HoughLinesP检测横线, 6: HoughLines检测横线, 8:检测竖线)
sobely = cv2.Sobel(sobely,cv2.CV_8U,0,1,ksize=5)
# sobely = cv2.Sobel(edgeImage,cv2.CV_64F,0,1,ksize=5)
cv2.imshow('sobely', sobely)

binary_1 = cv2.bitwise_and(image_binary, laplacian_binary)
cv2.imshow('binary_1', binary_1)

sobelx_binary = cv2.bitwise_and(sobelx, binary_1)
cv2.imshow('sobelx_binary', sobelx_binary)

sobely_binary = cv2.bitwise_and(sobely, binary_1)
cv2.imshow('sobely_binary', sobely_binary)

edgeImage = sobelx_binary

'''
# 侵蚀
kernel = np.ones((5,5),np.uint8)
edgeImage = cv2.erode(edgeImage,kernel,iterations = 1)
cv2.imshow('erode', edgeImage)
'''

'''
# 扩张
kernel = np.ones((5,5),np.uint8)
edgeImage = cv2.dilate(edgeImage,kernel,iterations = 1)
cv2.imshow('dilate', edgeImage)
'''

'''
# 开运算
kernel = np.ones((5,5),np.uint8)
edgeImage = cv2.morphologyEx(edgeImage, cv2.MORPH_OPEN, kernel)
cv2.imshow('morphologyEx', edgeImage)
'''

'''
# 闭运算
kernel = np.ones((5,5),np.uint8)
morphologyEx = cv2.morphologyEx(edgeImage, cv2.MORPH_CLOSE, kernel)
cv2.imshow('morphologyEx', morphologyEx)
'''

'''
# 形态学梯度
kernel = np.ones((5,5),np.uint8)
edgeImage = cv2.morphologyEx(edgeImage, cv2.MORPH_GRADIENT, kernel)
cv2.imshow('morphologyEx', edgeImage)
'''

'''
# 顶帽
kernel = np.ones((5,5),np.uint8)
edgeImage = cv2.morphologyEx(edgeImage, cv2.MORPH_TOPHAT, kernel)
cv2.imshow('morphologyEx', edgeImage)
'''

'''
# 黑帽
kernel = np.ones((5,5),np.uint8)
edgeImage = cv2.morphologyEx(edgeImage, cv2.MORPH_BLACKHAT, kernel)
cv2.imshow('morphologyEx', edgeImage)
'''

'''
# 锐化
cv2.imshow('edgeImage + edgeImage', edgeImage + edgeImage)
'''

houghLinesImage = image.copy()
lines = cv2.HoughLines(edgeImage,1,np.pi/180,200)
for line in lines:
rho,theta = line[0]
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
if (x2-x1) == 0 :
xielv = (y2-y1)/0.00000001
else:
xielv = (y2-y1)/(x2-x1)
if (xielv > -0.08 and xielv < 0.08) or xielv > 8.0 or xielv < -8.0 :
print(xielv)
cv2.line(houghLinesImage,(x1,y1),(x2,y2),(0,0,255),2)
cv2.imshow('houghLinesImage', houghLinesImage)

houghLinesPImage = image.copy()
thresh_min = min(edgeImage.shape)
lines = cv2.HoughLinesP(edgeImage, 1.2, np.pi / 180, 160, minLineLength=int(edgeImage.shape[0] * 0.7),maxLineGap=int(thresh_min * 0.5))
print(len(lines))
for line in lines:
x1,y1,x2,y2 = line[0]
xielv = (y2-y1)/(x2-x1)
if (xielv > -0.08 and xielv < 0.08) or xielv > 8.0 or xielv < -8.0 :
print(xielv)
cv2.line(houghLinesPImage,(x1,y1),(x2,y2),(255,0,0),2)
cv2.imshow('HoughLinesP', houghLinesPImage)

'''
circles = cv2.HoughCircles(edgeImage,cv2.HOUGH_GRADIENT,1,20,param1=50,param2=30,minRadius=0,maxRadius=0)
circles = np.uint16(np.around(circles))
for i in circles[0,:]:
# 绘制外圆
cv2.circle(image,(i[0],i[1]),i[2],(0,255,0),2)
# 绘制圆心
cv2.circle(image,(i[0],i[1]),2,(0,0,255),3)
'''

'''
pts1 = np.float32([[56,65],[368,52],[28,387],[389,390]])
pts2 = np.float32([[0,0],[300,0],[0,300],[300,300]])
M = cv2.getPerspectiveTransform(pts1, pts2)
image = cv2.warpPerspective(image, M, (600, 600))
cv2.imshow('show', image)
'''

cv2.waitKey(0)
cv2.destroyAllWindows()

+ 232
- 0
图像识别CV/pythoncv_learn/main_v4.py View File

@@ -0,0 +1,232 @@
import cv2
import numpy as np

image=cv2.imread('10076.jpg')
image = cv2.resize(image, (800, 800))

grayImage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
cv2.imshow('grayImage', grayImage)

edgeImage = grayImage

# 直方图均衡
edgeImage = cv2.equalizeHist(edgeImage)
cv2.imshow('equalizeHist', edgeImage)

'''
# 自适应直方图均衡
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
edgeImage = clahe.apply(edgeImage)
cv2.imshow('createCLAHE', edgeImage)
'''

# 二值化
h, w = edgeImage.shape[:2]
m = np.reshape(edgeImage, [1, w*h])
mean = m.sum()/(w*h)
# binary = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,25,10)
ret, binary = cv2.threshold(edgeImage, mean, 255, cv2.THRESH_BINARY)
image_binary = 255 - binary
cv2.imshow('image_binary', image_binary)

'''
# 双边滤波
edgeImage = cv2.bilateralFilter(edgeImage,9,75,75)
cv2.imshow('bilateralFilter', edgeImage)
'''

'''
# 中位模糊
edgeImage = cv2.medianBlur(edgeImage,5)
cv2.imshow('medianBlur', edgeImage)
'''

# 高斯模糊
edgeImage = cv2.GaussianBlur(grayImage, (3, 3), 1)
cv2.imshow('GaussianBlur', edgeImage)

'''
# 平均
edgeImage = cv2.blur(edgeImage,(5,5))
cv2.imshow('blur', edgeImage)
'''

# 边缘检测
canny = cv2.Canny(edgeImage, 20, 80)
cv2.imshow('Canny', canny)

kernel = np.ones((5,5),np.uint8)
dilate = cv2.dilate(canny,kernel,iterations = 1)
cv2.imshow('dilate', dilate)

'''
# 轮廓检测
ret, thresh = cv2.threshold(edgeImage, 127, 255, 0)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)

def cnt_area(cnt):
area = cv2.contourArea(cnt)
return area
contours.sort(key = cnt_area, reverse=True)
print(len(contours))
for i in range(0, 1):
cnt = contours[i]
img = image.copy()
cv2.drawContours(img, [cnt], 0, (0,255,0), 3)
cv2.imshow('drawContours_' + str(i), img)
cv2.drawContours(image, contours, -1, (0,255,0), 1)
cv2.imshow('drawContours', image)
'''

laplacian = cv2.Laplacian(edgeImage,cv2.CV_8U)
# laplacian = cv2.Laplacian(edgeImage,cv2.CV_64F)
cv2.imshow('laplacian', laplacian)

# 二值化
h, w = laplacian.shape[:2]
m = np.reshape(laplacian, [1, w*h])
mean = m.sum()/(w*h)
# binary = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,25,10)
ret, laplacian_binary = cv2.threshold(laplacian, mean, 255, cv2.THRESH_BINARY)
cv2.imshow('laplacian_binary', laplacian_binary)

sobelx = edgeImage
for i in range(0, 6): # 3,6,8有奇效(3: HoughLinesP检测竖线, 6: HoughLines检测竖线, 8:检测横线)
sobelx = cv2.Sobel(sobelx,cv2.CV_8U,1,0,ksize=5)
# sobelx = cv2.Sobel(edgeImage,cv2.CV_64F,1,0,ksize=5)
cv2.imshow('sobelx', sobelx)

sobely = edgeImage
for i in range(0, 6): # 3,6,8有奇效(3: HoughLinesP检测横线, 6: HoughLines检测横线, 8:检测竖线)
sobely = cv2.Sobel(sobely,cv2.CV_8U,0,1,ksize=5)
# sobely = cv2.Sobel(edgeImage,cv2.CV_64F,0,1,ksize=5)
cv2.imshow('sobely', sobely)

binary_1 = cv2.bitwise_and(dilate, laplacian_binary)
cv2.imshow('binary_1', binary_1)

sobelx_binary = cv2.bitwise_and(sobelx, binary_1)
cv2.imshow('sobelx_binary', sobelx_binary)

sobely_binary = cv2.bitwise_and(sobely, binary_1)
cv2.imshow('sobely_binary', sobely_binary)

lineXImage = sobelx_binary
lineYImage = sobely_binary

'''
# 侵蚀
kernel = np.ones((5,5),np.uint8)
edgeImage = cv2.erode(edgeImage,kernel,iterations = 1)
cv2.imshow('erode', edgeImage)
'''

'''
# 扩张
kernel = np.ones((5,5),np.uint8)
edgeImage = cv2.dilate(edgeImage,kernel,iterations = 1)
cv2.imshow('dilate', edgeImage)
'''

'''
# 开运算
kernel = np.ones((5,5),np.uint8)
edgeImage = cv2.morphologyEx(edgeImage, cv2.MORPH_OPEN, kernel)
cv2.imshow('morphologyEx', edgeImage)
'''

'''
# 闭运算
kernel = np.ones((5,5),np.uint8)
morphologyEx = cv2.morphologyEx(edgeImage, cv2.MORPH_CLOSE, kernel)
cv2.imshow('morphologyEx', morphologyEx)
'''

'''
# 形态学梯度
kernel = np.ones((5,5),np.uint8)
edgeImage = cv2.morphologyEx(edgeImage, cv2.MORPH_GRADIENT, kernel)
cv2.imshow('morphologyEx', edgeImage)
'''

'''
# 顶帽
kernel = np.ones((5,5),np.uint8)
edgeImage = cv2.morphologyEx(edgeImage, cv2.MORPH_TOPHAT, kernel)
cv2.imshow('morphologyEx', edgeImage)
'''

'''
# 黑帽
kernel = np.ones((5,5),np.uint8)
edgeImage = cv2.morphologyEx(edgeImage, cv2.MORPH_BLACKHAT, kernel)
cv2.imshow('morphologyEx', edgeImage)
'''

'''
# 锐化
cv2.imshow('edgeImage + edgeImage', edgeImage + edgeImage)
'''

def findLine(edgeImage, title):
houghLinesImage = image.copy()
lines = cv2.HoughLines(edgeImage,1,np.pi/180,200)
print(len(lines))
for line in lines:
rho,theta = line[0]
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
if (x2-x1) == 0 :
xielv = (y2-y1)/0.00000001
else:
xielv = (y2-y1)/(x2-x1)
'''
if (xielv > -0.08 and xielv < 0.08) or xielv > 8.0 or xielv < -8.0 :
print(xielv)
'''
cv2.line(houghLinesImage,(x1,y1),(x2,y2),(0,0,255),2)
cv2.imshow('houghLinesImage' + title, houghLinesImage)

houghLinesPImage = image.copy()
thresh_min = min(edgeImage.shape)
lines = cv2.HoughLinesP(edgeImage, 1.2, np.pi / 180, 160, minLineLength=int(edgeImage.shape[0] * 0.7),maxLineGap=int(thresh_min * 0.5))
print(len(lines))
for line in lines:
x1,y1,x2,y2 = line[0]
xielv = (y2-y1)/(x2-x1)
'''
if (xielv > -0.08 and xielv < 0.08) or xielv > 8.0 or xielv < -8.0 :
print(xielv)
'''
cv2.line(houghLinesPImage,(x1,y1),(x2,y2),(255,0,0),2)
cv2.imshow('HoughLinesP' + title, houghLinesPImage)

findLine(lineXImage, 'X')
findLine(lineYImage, 'Y')

'''
circles = cv2.HoughCircles(edgeImage,cv2.HOUGH_GRADIENT,1,20,param1=50,param2=30,minRadius=0,maxRadius=0)
circles = np.uint16(np.around(circles))
for i in circles[0,:]:
# 绘制外圆
cv2.circle(image,(i[0],i[1]),i[2],(0,255,0),2)
# 绘制圆心
cv2.circle(image,(i[0],i[1]),2,(0,0,255),3)
'''

'''
pts1 = np.float32([[56,65],[368,52],[28,387],[389,390]])
pts2 = np.float32([[0,0],[300,0],[0,300],[300,300]])
M = cv2.getPerspectiveTransform(pts1, pts2)
image = cv2.warpPerspective(image, M, (600, 600))
cv2.imshow('show', image)
'''

cv2.waitKey(0)
cv2.destroyAllWindows()

+ 232
- 0
图像识别CV/pythoncv_learn/main_v5.py View File

@@ -0,0 +1,232 @@
import cv2
import numpy as np

image=cv2.imread('10008.jpg')
image = cv2.resize(image, (800, 800))

grayImage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
cv2.imshow('grayImage', grayImage)

edgeImage = grayImage

# 直方图均衡
edgeImage = cv2.equalizeHist(edgeImage)
cv2.imshow('equalizeHist', edgeImage)

'''
# 自适应直方图均衡
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
edgeImage = clahe.apply(edgeImage)
cv2.imshow('createCLAHE', edgeImage)
'''

# 二值化
h, w = edgeImage.shape[:2]
m = np.reshape(edgeImage, [1, w*h])
mean = m.sum()/(w*h)
# binary = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,25,10)
ret, binary = cv2.threshold(edgeImage, mean, 255, cv2.THRESH_BINARY)
image_binary = 255 - binary
cv2.imshow('image_binary', image_binary)

'''
# 双边滤波
edgeImage = cv2.bilateralFilter(edgeImage,9,75,75)
cv2.imshow('bilateralFilter', edgeImage)
'''

'''
# 中位模糊
edgeImage = cv2.medianBlur(edgeImage,5)
cv2.imshow('medianBlur', edgeImage)
'''

# 高斯模糊
edgeImage = cv2.GaussianBlur(grayImage, (3, 3), 1)
cv2.imshow('GaussianBlur', edgeImage)

'''
# 平均
edgeImage = cv2.blur(edgeImage,(5,5))
cv2.imshow('blur', edgeImage)
'''

# 边缘检测
canny = cv2.Canny(edgeImage, 20, 80)
cv2.imshow('Canny', canny)

kernel = np.ones((5,5),np.uint8)
dilate = cv2.dilate(canny,kernel,iterations = 1)
cv2.imshow('dilate', dilate)

'''
# 轮廓检测
ret, thresh = cv2.threshold(edgeImage, 127, 255, 0)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)

def cnt_area(cnt):
area = cv2.contourArea(cnt)
return area
contours.sort(key = cnt_area, reverse=True)
print(len(contours))
for i in range(0, 1):
cnt = contours[i]
img = image.copy()
cv2.drawContours(img, [cnt], 0, (0,255,0), 3)
cv2.imshow('drawContours_' + str(i), img)
cv2.drawContours(image, contours, -1, (0,255,0), 1)
cv2.imshow('drawContours', image)
'''

laplacian = cv2.Laplacian(edgeImage,cv2.CV_8U)
# laplacian = cv2.Laplacian(edgeImage,cv2.CV_64F)
cv2.imshow('laplacian', laplacian)

# 二值化
h, w = laplacian.shape[:2]
m = np.reshape(laplacian, [1, w*h])
mean = m.sum()/(w*h)
# binary = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,25,10)
ret, laplacian_binary = cv2.threshold(laplacian, mean, 255, cv2.THRESH_BINARY)
cv2.imshow('laplacian_binary', laplacian_binary)

sobelx = edgeImage
for i in range(0, 6): # 3,6,8有奇效(3: HoughLinesP检测竖线, 6: HoughLines检测竖线, 8:检测横线)
sobelx = cv2.Sobel(sobelx,cv2.CV_8U,1,0,ksize=5)
# sobelx = cv2.Sobel(edgeImage,cv2.CV_64F,1,0,ksize=5)
cv2.imshow('sobelx', sobelx)

sobely = edgeImage
for i in range(0, 6): # 3,6,8有奇效(3: HoughLinesP检测横线, 6: HoughLines检测横线, 8:检测竖线)
sobely = cv2.Sobel(sobely,cv2.CV_8U,0,1,ksize=5)
# sobely = cv2.Sobel(edgeImage,cv2.CV_64F,0,1,ksize=5)
cv2.imshow('sobely', sobely)

binary_1 = cv2.bitwise_and(dilate, laplacian_binary)
cv2.imshow('binary_1', binary_1)

sobelx_binary = cv2.bitwise_and(sobelx, binary_1)
cv2.imshow('sobelx_binary', sobelx_binary)

sobely_binary = cv2.bitwise_and(sobely, binary_1)
cv2.imshow('sobely_binary', sobely_binary)

lineXImage = sobelx_binary
lineYImage = sobely_binary

'''
# 侵蚀
kernel = np.ones((5,5),np.uint8)
edgeImage = cv2.erode(edgeImage,kernel,iterations = 1)
cv2.imshow('erode', edgeImage)
'''

'''
# 扩张
kernel = np.ones((5,5),np.uint8)
edgeImage = cv2.dilate(edgeImage,kernel,iterations = 1)
cv2.imshow('dilate', edgeImage)
'''

'''
# 开运算
kernel = np.ones((5,5),np.uint8)
edgeImage = cv2.morphologyEx(edgeImage, cv2.MORPH_OPEN, kernel)
cv2.imshow('morphologyEx', edgeImage)
'''

'''
# 闭运算
kernel = np.ones((5,5),np.uint8)
morphologyEx = cv2.morphologyEx(edgeImage, cv2.MORPH_CLOSE, kernel)
cv2.imshow('morphologyEx', morphologyEx)
'''

'''
# 形态学梯度
kernel = np.ones((5,5),np.uint8)
edgeImage = cv2.morphologyEx(edgeImage, cv2.MORPH_GRADIENT, kernel)
cv2.imshow('morphologyEx', edgeImage)
'''

'''
# 顶帽
kernel = np.ones((5,5),np.uint8)
edgeImage = cv2.morphologyEx(edgeImage, cv2.MORPH_TOPHAT, kernel)
cv2.imshow('morphologyEx', edgeImage)
'''

'''
# 黑帽
kernel = np.ones((5,5),np.uint8)
edgeImage = cv2.morphologyEx(edgeImage, cv2.MORPH_BLACKHAT, kernel)
cv2.imshow('morphologyEx', edgeImage)
'''

'''
# 锐化
cv2.imshow('edgeImage + edgeImage', edgeImage + edgeImage)
'''

def findLine(edgeImage, title):
houghLinesImage = image.copy()
lines = cv2.HoughLines(edgeImage,1,np.pi/180,200)
print(len(lines))
for line in lines:
rho,theta = line[0]
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
if (x2-x1) == 0 :
xielv = (y2-y1)/0.00000001
else:
xielv = (y2-y1)/(x2-x1)
'''
if (xielv > -0.08 and xielv < 0.08) or xielv > 8.0 or xielv < -8.0 :
print(xielv)
'''
cv2.line(houghLinesImage,(x1,y1),(x2,y2),(0,0,255),2)
cv2.imshow('houghLinesImage' + title, houghLinesImage)

houghLinesPImage = image.copy()
thresh_min = min(edgeImage.shape)
lines = cv2.HoughLinesP(edgeImage, 1.2, np.pi / 180, 160, minLineLength=int(edgeImage.shape[0] * 0.7),maxLineGap=int(thresh_min * 0.5))
print(len(lines))
for line in lines:
x1,y1,x2,y2 = line[0]
xielv = (y2-y1)/(x2-x1)
'''
if (xielv > -0.08 and xielv < 0.08) or xielv > 8.0 or xielv < -8.0 :
print(xielv)
'''
cv2.line(houghLinesPImage,(x1,y1),(x2,y2),(255,0,0),2)
cv2.imshow('HoughLinesP' + title, houghLinesPImage)

findLine(lineXImage, 'X')
findLine(lineYImage, 'Y')

'''
circles = cv2.HoughCircles(edgeImage,cv2.HOUGH_GRADIENT,1,20,param1=50,param2=30,minRadius=0,maxRadius=0)
circles = np.uint16(np.around(circles))
for i in circles[0,:]:
# 绘制外圆
cv2.circle(image,(i[0],i[1]),i[2],(0,255,0),2)
# 绘制圆心
cv2.circle(image,(i[0],i[1]),2,(0,0,255),3)
'''

'''
pts1 = np.float32([[56,65],[368,52],[28,387],[389,390]])
pts2 = np.float32([[0,0],[300,0],[0,300],[300,300]])
M = cv2.getPerspectiveTransform(pts1, pts2)
image = cv2.warpPerspective(image, M, (600, 600))
cv2.imshow('show', image)
'''

cv2.waitKey(0)
cv2.destroyAllWindows()

BIN
图像识别CV/pythoncv_learn/present_center.npz View File


BIN
图像识别CV/pythoncv_learn/围棋识别过程参考/01.gif View File

Before After
Width: 600  |  Height: 600  |  Size: 280 kB

BIN
图像识别CV/pythoncv_learn/围棋识别过程参考/02.gif View File

Before After
Width: 600  |  Height: 600  |  Size: 250 kB

BIN
图像识别CV/pythoncv_learn/围棋识别过程参考/03.gif View File

Before After
Width: 600  |  Height: 600  |  Size: 112 kB

BIN
图像识别CV/pythoncv_learn/围棋识别过程参考/04.gif View File

Before After
Width: 600  |  Height: 600  |  Size: 137 kB

BIN
图像识别CV/pythoncv_learn/围棋识别过程参考/05.gif View File

Before After
Width: 600  |  Height: 600  |  Size: 12 kB

BIN
图像识别CV/pythoncv_learn/围棋识别过程参考/06.gif View File

Before After
Width: 600  |  Height: 600  |  Size: 6.9 kB

BIN
图像识别CV/pythoncv_learn/围棋识别过程参考/07.gif View File

Before After
Width: 600  |  Height: 600  |  Size: 21 kB

BIN
图像识别CV/pythoncv_learn/围棋识别过程参考/08.gif View File

Before After
Width: 600  |  Height: 600  |  Size: 22 kB

BIN
图像识别CV/pythoncv_learn/围棋识别过程参考/09.gif View File

Before After
Width: 600  |  Height: 600  |  Size: 98 kB

BIN
图像识别CV/pythoncv_learn/围棋识别过程参考/10.gif View File

Before After
Width: 600  |  Height: 600  |  Size: 114 kB

BIN
图像识别CV/pythoncv_learn/围棋识别过程参考/11.gif View File

Before After
Width: 600  |  Height: 600  |  Size: 114 kB

BIN
图像识别CV/pythoncv_learn/围棋识别过程参考/12.gif View File

Before After
Width: 600  |  Height: 600  |  Size: 116 kB

BIN
图像识别CV/pythoncv_learn/围棋识别过程参考/13.gif View File

Before After
Width: 600  |  Height: 600  |  Size: 94 kB

BIN
图像识别CV/pythoncv_learn/围棋识别过程参考/围棋识别过程参考.gif View File

Before After
Width: 600  |  Height: 600  |  Size: 1.4 MB

Loading…
Cancel
Save