diff --git a/HyperLPRLite.py b/HyperLPRLite.py deleted file mode 100644 index 36b9d3c..0000000 --- a/HyperLPRLite.py +++ /dev/null @@ -1,155 +0,0 @@ -#coding=utf-8 -import cv2 -import numpy as np -from keras import backend as K -from keras.models import * -from keras.layers import * - -chars = [u"京", u"沪", u"津", u"渝", u"冀", u"晋", u"蒙", u"辽", u"吉", u"黑", u"苏", u"浙", u"皖", u"闽", u"赣", u"鲁", u"豫", u"鄂", u"湘", u"粤", u"桂", - u"琼", u"川", u"贵", u"云", u"藏", u"陕", u"甘", u"青", u"宁", u"新", u"0", u"1", u"2", u"3", u"4", u"5", u"6", u"7", u"8", u"9", u"A", - u"B", u"C", u"D", u"E", u"F", u"G", u"H", u"J", u"K", u"L", u"M", u"N", u"P", u"Q", u"R", u"S", u"T", u"U", u"V", u"W", u"X", - u"Y", u"Z",u"港",u"学",u"使",u"警",u"澳",u"挂",u"军",u"北",u"南",u"广",u"沈",u"兰",u"成",u"济",u"海",u"民",u"航",u"空" - ] - -class LPR(): - def __init__(self,model_detection,model_finemapping,model_seq_rec): - self.watch_cascade = cv2.CascadeClassifier(model_detection) - self.modelFineMapping = self.model_finemapping() - self.modelFineMapping.load_weights(model_finemapping) - self.modelSeqRec = self.model_seq_rec(model_seq_rec) - - def computeSafeRegion(self,shape,bounding_rect): - top = bounding_rect[1] # y - bottom = bounding_rect[1] + bounding_rect[3] # y + h - left = bounding_rect[0] # x - right = bounding_rect[0] + bounding_rect[2] # x + w - min_top = 0 - max_bottom = shape[0] - min_left = 0 - max_right = shape[1] - if top < min_top: - top = min_top - if left < min_left: - left = min_left - if bottom > max_bottom: - bottom = max_bottom - if right > max_right: - right = max_right - return [left,top,right-left,bottom-top] - - def cropImage(self,image,rect): - x, y, w, h = self.computeSafeRegion(image.shape,rect) - return image[y:y+h,x:x+w] - - def detectPlateRough(self,image_gray,resize_h = 720,en_scale =1.08 ,top_bottom_padding_rate = 0.05): - if top_bottom_padding_rate>0.2: - print("error:top_bottom_padding_rate > 0.2:",top_bottom_padding_rate) - exit(1) - height = image_gray.shape[0] - padding = int(height*top_bottom_padding_rate) - scale = image_gray.shape[1]/float(image_gray.shape[0]) - image = cv2.resize(image_gray, (int(scale*resize_h), resize_h)) - image_color_cropped = image[padding:resize_h-padding,0:image_gray.shape[1]] - image_gray = cv2.cvtColor(image_color_cropped,cv2.COLOR_RGB2GRAY) - watches = self.watch_cascade.detectMultiScale(image_gray, en_scale, 2, minSize=(36, 9),maxSize=(36*40, 9*40)) - cropped_images = [] - for (x, y, w, h) in watches: - x -= w * 0.14 - w += w * 0.28 - y -= h * 0.15 - h += h * 0.3 - cropped = self.cropImage(image_color_cropped, (int(x), int(y), int(w), int(h))) - cropped_images.append([cropped,[x, y+padding, w, h]]) - return cropped_images - - def fastdecode(self,y_pred): - results = "" - confidence = 0.0 - table_pred = y_pred.reshape(-1, len(chars)+1) - res = table_pred.argmax(axis=1) - for i,one in enumerate(res): - if one= image.shape[1]-1: - T= image.shape[1]-1 - rect[2] -= rect[2]*(1-res_raw[1] + res_raw[0]) - rect[0]+=res[0] - image = image[:,H:T+2] - image = cv2.resize(image, (int(136), int(36))) - return image,rect - - def recognizeOne(self,src): - x_tempx = src - x_temp = cv2.resize(x_tempx,( 164,48)) - x_temp = x_temp.transpose(1, 0, 2) - y_pred = self.modelSeqRec.predict(np.array([x_temp])) - y_pred = y_pred[:,2:,:] - return self.fastdecode(y_pred) - - def SimpleRecognizePlateByE2E(self,image): - images = self.detectPlateRough(image,image.shape[0],top_bottom_padding_rate=0.1) - res_set = [] - for j,plate in enumerate(images): - plate, rect =plate - image_rgb,rect_refine = self.finemappingVertical(plate,rect) - res,confidence = self.recognizeOne(image_rgb) - res_set.append([res,confidence,rect_refine]) - return res_set diff --git a/HyperLprGUI.py b/HyperLprGUI.py deleted file mode 100644 index f77756d..0000000 --- a/HyperLprGUI.py +++ /dev/null @@ -1,794 +0,0 @@ -""" -Author: youngorsu -Email : zhiyongsu@qq.com -Last edited: 2018.1.29 -""" -# coding=utf-8 - - -import sys -import os -from PyQt5.QtWidgets import ( - QMainWindow, - QLabel, - QLineEdit, - QPushButton, - QHBoxLayout, - QVBoxLayout, - QGridLayout, - QTableWidget, - QWidget, - QAbstractItemView, - QHeaderView, - QGraphicsView, - QGraphicsScene, - QGraphicsPixmapItem, - QSplitter, - QFileDialog, - QTableWidgetItem, - QGraphicsRectItem, - QCheckBox, - QMessageBox, - QGroupBox, - QGraphicsSimpleTextItem, - qApp, - QAction, - QApplication) -from PyQt5.QtGui import QIcon, QColor, QPainter, QImage, QPixmap, QPen, QBrush, QFont, QPalette, QKeySequence -from PyQt5.QtCore import Qt, QDir, QSize, QEventLoop, QThread, pyqtSignal - -from hyperlpr_py3 import pipline as pp - -import cv2 - -import numpy as np - -import time - -import shutil - -draw_plate_in_image_enable = 1 - -plateTypeName = ["蓝", "黄", "绿", "白", "黑 "] - - -def SimpleRecognizePlateWithGui(image): - t0 = time.time() - - images = pp.detect.detectPlateRough( - image, image.shape[0], top_bottom_padding_rate=0.1) - - res_set = [] - y_offset = 32 - for j, plate in enumerate(images): - plate, rect, origin_plate = plate - - plate = cv2.resize(plate, (136, 36 * 2)) - t1 = time.time() - - plate_type = pp.td.SimplePredict(plate) - plate_color = plateTypeName[plate_type] - - if (plate_type > 0) and (plate_type < 5): - plate = cv2.bitwise_not(plate) - - if draw_plate_in_image_enable == 1: - image[y_offset:y_offset + plate.shape[0], 0:plate.shape[1]] = plate - y_offset = y_offset + plate.shape[0] + 4 - - image_rgb = pp.fm.findContoursAndDrawBoundingBox(plate) - - if draw_plate_in_image_enable == 1: - image[y_offset:y_offset + image_rgb.shape[0], - 0:image_rgb.shape[1]] = image_rgb - y_offset = y_offset + image_rgb.shape[0] + 4 - - image_rgb = pp.fv.finemappingVertical(image_rgb) - - if draw_plate_in_image_enable == 1: - image[y_offset:y_offset + image_rgb.shape[0], - 0:image_rgb.shape[1]] = image_rgb - y_offset = y_offset + image_rgb.shape[0] + 4 - - pp.cache.verticalMappingToFolder(image_rgb) - - if draw_plate_in_image_enable == 1: - image[y_offset:y_offset + image_rgb.shape[0], - 0:image_rgb.shape[1]] = image_rgb - y_offset = y_offset + image_rgb.shape[0] + 4 - - e2e_plate, e2e_confidence = pp.e2e.recognizeOne(image_rgb) - print("e2e:", e2e_plate, e2e_confidence) - - image_gray = cv2.cvtColor(image_rgb, cv2.COLOR_RGB2GRAY) - - #print("校正", time.time() - t1, "s") - - t2 = time.time() - val = pp.segmentation.slidingWindowsEval(image_gray) - # print val - #print("分割和识别", time.time() - t2, "s") - - res="" - confidence = 0 - if len(val) == 3: - blocks, res, confidence = val - if confidence / 7 > 0.7: - - if draw_plate_in_image_enable == 1: - image = pp.drawRectBox(image, rect, res) - for i, block in enumerate(blocks): - block_ = cv2.resize(block, (24, 24)) - block_ = cv2.cvtColor(block_, cv2.COLOR_GRAY2BGR) - image[j * 24:(j * 24) + 24, i * - 24:(i * 24) + 24] = block_ - if image[j * 24:(j * 24) + 24, - i * 24:(i * 24) + 24].shape == block_.shape: - pass - - res_set.append([res, - confidence / 7, - rect, - plate_color, - e2e_plate, - e2e_confidence, - len(blocks)]) - print("seg:",res,confidence/7) - #print(time.time() - t0, "s") - - print("---------------------------------") - return image, res_set - - -class LicenseRecognizationThread(QThread): - - recognization_done_signal = pyqtSignal(list) - - def __init__(self, parent=None): - super().__init__(parent) - self.hyperlpr_dir_path = "" - self.filenames = [] - - def set_parameter(self, filename_list, path): - self.hyperlpr_dir_path = path - self.filenames = filename_list - - def run(self): - while True: - time.sleep(1) - if len(self.hyperlpr_dir_path) > 0: - for i in range(0, len(self.filenames)): - path = os.path.join( - self.hyperlpr_dir_path, self.filenames[i]) - image = cv2.imdecode(np.fromfile(path, dtype=np.uint8), -1) - image, res_set = SimpleRecognizePlateWithGui(image) - self.recognization_done_signal.emit([i, res_set]) - - self.hyperlpr_dir_path = "" - - -class HyperLprImageView(QGraphicsView): - - def __init__(self): - - super().__init__() - - self.init_ui() - - def init_ui(self): - - scene = QGraphicsScene() - scene.setBackgroundBrush(QColor(100, 100, 100)) - scene.setItemIndexMethod(QGraphicsScene.BspTreeIndex) - - scene.setSceneRect(scene.itemsBoundingRect()) - - self.setDragMode(QGraphicsView.RubberBandDrag) - self.setViewportUpdateMode(QGraphicsView.FullViewportUpdate) - self.setRenderHints(QPainter.Antialiasing | QPainter.TextAntialiasing) - - self.frame_item = QGraphicsPixmapItem() - - self.text_item_offset = 0 - self.rect_item_array = [] - self.text_item_array = [] - for i in range(0, 5): - rect_item = QGraphicsRectItem() - rect_item.setVisible(False) - rect_item.setZValue(20.0) - rect_item.setPen(QPen(Qt.red, 5)) - rect_item.setRect(20, 20, 20, 20) - scene.addItem(rect_item) - self.rect_item_array.append(rect_item) - text_item = QGraphicsSimpleTextItem("") - text_item.setBrush(QBrush(Qt.red)) - text_item.setZValue(20.0) - text_item.setPos(10, 50) - text_item.setFont(QFont("黑体", 24)) - text_item.setVisible(False) - scene.addItem(text_item) - self.text_item_array.append(text_item) - - scene.addItem(self.frame_item) - - self.curr_factor = 1.0 - - self.setScene(scene) - - def resetRectText(self, res_set): - max_no = len(res_set) - - if max_no > 5: - max_no = 5 - - for i in range(0, 5): - if i < max_no: - curr_rect = res_set[i][2] - self.rect_item_array[i].setRect(int(curr_rect[0]), int( - curr_rect[1]), int(curr_rect[2]), int(curr_rect[3])) - self.rect_item_array[i].setVisible(True) - - self.text_item_array[i].setText( - res_set[i][4] + " " + res_set[i][3]) - self.text_item_array[i].setPos( - int(curr_rect[0]), int(curr_rect[1]) - 48) - self.text_item_array[i].setVisible(True) - else: - self.text_item_array[i].setVisible(False) - self.rect_item_array[i].setVisible(False) - - def wheelEvent(self, event): - factor = event.angleDelta().y() / 120.0 - if event.angleDelta().y() / 120.0 > 0: - factor = 1.08 - else: - factor = 0.92 - - if self.curr_factor > 0.1 and self.curr_factor < 10: - self.curr_factor = self.curr_factor * factor - self.scale(factor, factor) - - def resetPixmap(self, image): - - self.frame_item.setPixmap(QPixmap.fromImage(image)) - - -class HyperLprWindow(QMainWindow): - - start_init_signal = pyqtSignal() - - def __init__(self): - - super().__init__() - - self.initUI() - - def initUI(self): - - self.statusBar().showMessage('Ready') - - self.left_action = QAction('上一个', self) - self.left_action.setShortcut(QKeySequence.MoveToPreviousChar) - self.left_action.triggered.connect(self.analyze_last_one_image) - - self.right_action = QAction('下一个', self) - self.right_action.setShortcut(QKeySequence.MoveToNextChar) - self.right_action.triggered.connect(self.analyze_next_one_image) - - self.rename_image_action = QAction('保存e2e文件名', self) - self.rename_image_action.setShortcut(QKeySequence.MoveToPreviousLine) - self.rename_image_action.triggered.connect(self.rename_current_image_with_info) - - self.statusBar() - - menubar = self.menuBar() - fileMenu = menubar.addMenu('&Function') - fileMenu.addAction(self.left_action) - fileMenu.addAction(self.right_action) - fileMenu.addAction(self.rename_image_action) - - self.image_window_view = HyperLprImageView() - - table_widget_header_labels = [ - "文件名", - "分割识别", - "置信度", - "颜色", - "E2E识别", - "E2E置信度"] - - self.hyperlpr_tableview = QTableWidget( - 0, len(table_widget_header_labels)) - self.hyperlpr_tableview.setHorizontalHeaderLabels( - table_widget_header_labels) - - self.hyperlpr_tableview.setSelectionBehavior( - QAbstractItemView.SelectItems) - self.hyperlpr_tableview.setSelectionMode( - QAbstractItemView.SingleSelection) - self.hyperlpr_tableview.setEditTriggers( - QAbstractItemView.NoEditTriggers) - self.hyperlpr_tableview.horizontalHeader().setSectionResizeMode( - QHeaderView.ResizeToContents) - self.hyperlpr_tableview.setEditTriggers( - QAbstractItemView.NoEditTriggers) - - self.hyperlpr_tableview.cellClicked.connect( - self.recognize_one_license_plate) - - self.left_button = QPushButton("<") - self.left_button.setFixedWidth(60) - self.right_button = QPushButton(">") - self.right_button.setFixedWidth(60) - self.left_button.setEnabled(False) - self.right_button.setEnabled(False) - self.left_button.clicked.connect(self.analyze_last_one_image) - self.right_button.clicked.connect(self.analyze_next_one_image) - left_right_layout = QHBoxLayout() - left_right_layout.addStretch() - left_right_layout.addWidget(self.left_button) - left_right_layout.addStretch() - left_right_layout.addWidget(self.right_button) - left_right_layout.addStretch() - - self.location_label = QLabel("车牌目录", self) - self.location_text = QLineEdit(self) - self.location_text.setEnabled(False) - #self.location_text.setFixedWidth(300) - self.location_button = QPushButton("...") - self.location_button.clicked.connect(self.select_new_dir) - - self.location_layout = QHBoxLayout() - self.location_layout.addWidget(self.location_label) - self.location_layout.addWidget(self.location_text) - self.location_layout.addWidget(self.location_button) - self.location_layout.addStretch() - - self.check_box = QCheckBox("与文件名比较车牌") - self.check_box.setChecked(True) - - self.update_file_path_button = QPushButton('批量识别') - self.update_file_path_button.clicked.connect( - self.batch_recognize_all_images) - - self.update_file_path_layout = QHBoxLayout() - self.update_file_path_layout.addWidget(self.check_box) - self.update_file_path_layout.addWidget(self.update_file_path_button) - self.update_file_path_layout.addStretch() - - self.save_as_e2e_filename_button = QPushButton("保存e2e文件名") - self.save_as_e2e_filename_button.setEnabled(False) - self.save_as_e2e_filename_button.clicked.connect(self.rename_current_image_with_info) - self.save_layout = QHBoxLayout() - self.save_layout.addWidget(self.save_as_e2e_filename_button) - self.save_layout.addStretch() - - self.top_layout = QVBoxLayout() - self.top_layout.addLayout(left_right_layout) - self.top_layout.addLayout(self.location_layout) - self.top_layout.addLayout(self.update_file_path_layout) - self.top_layout.addLayout(self.save_layout) - - function_groupbox = QGroupBox("功能区") - function_groupbox.setLayout(self.top_layout) - - license_plate_image_label = QLabel("车牌图") - self.license_plate_widget = QLabel("") - - block_image_label = QLabel("分割图") - self.block_plate_widget = QLabel("") - - filename_label = QLabel("文件名:") - self.filename_edit = QLineEdit() - - segmentation_recognition_label = QLabel("分割识别:") - self.segmentation_recognition_edit = QLineEdit() - self.segmentation_recognition_edit.setFont(QFont("黑体", 24, QFont.Bold)) - # self.segmentation_recognition_edit.setStyleSheet("color:red") - - confidence_label = QLabel("分割识别\n置信度") - self.confidence_edit = QLineEdit() - #self.confidence_edit.setFont(QFont("黑体", 24, QFont.Bold)) - # self.confidence_edit.setStyleSheet("color:red") - - plate_color_label = QLabel("车牌颜色") - self.plate_color_edit = QLineEdit() - self.plate_color_edit.setFont(QFont("黑体", 24, QFont.Bold)) - # self.plate_color_edit.setStyleSheet("color:red") - - e2e_recognization_label = QLabel("e2e识别:") - self.e2e_recognization_edit = QLineEdit() - self.e2e_recognization_edit.setFont(QFont("黑体", 24, QFont.Bold)) - # self.e2e_recognization_edit.setStyleSheet("color:red") - - e2e_confidence_label = QLabel("e2e置信度") - self.e2e_confidence_edit = QLineEdit() - #self.e2e_confidence_edit.setFont(QFont("黑体", 24, QFont.Bold)) - # self.e2e_confidence_edit.setStyleSheet("color:red") - - info_gridlayout = QGridLayout() - line_index = 0 - info_gridlayout.addWidget(filename_label, line_index, 0) - info_gridlayout.addWidget(self.filename_edit, line_index, 1) - line_index += 1 - info_gridlayout.addWidget(license_plate_image_label, line_index, 0) - info_gridlayout.addWidget(self.license_plate_widget, line_index, 1) - line_index += 1 - info_gridlayout.addWidget(e2e_recognization_label, line_index, 0) - info_gridlayout.addWidget(self.e2e_recognization_edit, line_index, 1) - line_index += 1 - info_gridlayout.addWidget( - segmentation_recognition_label, line_index, 0) - info_gridlayout.addWidget( - self.segmentation_recognition_edit, line_index, 1) - line_index += 1 - info_gridlayout.addWidget(plate_color_label, line_index, 0) - info_gridlayout.addWidget(self.plate_color_edit, line_index, 1) - line_index += 1 - info_gridlayout.addWidget(block_image_label, line_index, 0) - info_gridlayout.addWidget(self.block_plate_widget, line_index, 1) - line_index += 1 - info_gridlayout.addWidget(confidence_label, line_index, 0) - info_gridlayout.addWidget(self.confidence_edit, line_index, 1) - line_index += 1 - info_gridlayout.addWidget(e2e_confidence_label, line_index, 0) - info_gridlayout.addWidget(self.e2e_confidence_edit, line_index, 1) - - info_widget = QGroupBox("分割识别&e2e") - - info_widget.setLayout(info_gridlayout) - - right_splitter = QSplitter(Qt.Vertical) - right_splitter.addWidget(self.hyperlpr_tableview) - right_splitter.addWidget(function_groupbox) - right_splitter.addWidget(info_widget) - right_splitter.setStretchFactor(0, 2) - right_splitter.setStretchFactor(2, 1) - - main_splitter = QSplitter(Qt.Horizontal) - main_splitter.addWidget(self.image_window_view) - main_splitter.addWidget(right_splitter) - main_splitter.setStretchFactor(0, 1) - - self.image_filename_list = [] - self.hyperlpr_dir_path = "" - self.segmentation_recognition_correct_number = 0 - self.color_correct_number = 0 - self.e2e_recognization_correct_number = 0 - self.current_row = 0 - - self.batch_recognization_thread = LicenseRecognizationThread() - self.batch_recognization_thread.recognization_done_signal.connect( - self.recognization_done_slot) - self.batch_recognization_thread.start() - - self.start_init_signal.connect(self.read_path_and_show_one_image) - - self.setCentralWidget(main_splitter) - - self.setWindowTitle("HyperLPR车牌识别软件v1.0") - - self.start_init_signal.emit() - - def read_path_and_show_one_image(self): - - hyperlpr_dir_info_filepath = QDir.homePath() + "/hyperlpr_dir_file" - if os.path.exists(hyperlpr_dir_info_filepath): - with open(hyperlpr_dir_info_filepath, 'r') as f: - self.hyperlpr_dir_path = f.read() - - if len(self.hyperlpr_dir_path) > 0: - self.reset_info_gui() - - if len(self.image_filename_list) > 0: - self.recognize_and_show_one_image(self.image_filename_list[0], 0) - - def select_new_dir(self): - - self.hyperlpr_dir_path = QFileDialog.getExistingDirectory( - self, "读取文件夹", QDir.currentPath()) - - if len(self.hyperlpr_dir_path) > 0: - hyperlpr_dir_info_filepath = QDir.homePath() + "/hyperlpr_dir_file" - with open(hyperlpr_dir_info_filepath, 'w') as f: - f.write(self.hyperlpr_dir_path) - self.reset_info_gui() - - def rename_current_image_with_info(self): - if len(self.hyperlpr_dir_path) > 0: - target_dir_path = self.hyperlpr_dir_path + "/result" - if not os.path.exists(target_dir_path): - os.makedirs(target_dir_path) - if len(self.plate_color_edit.text())>0 and len(self.e2e_recognization_edit.text())>0: - orign_path = os.path.join(self.hyperlpr_dir_path, self.filename_edit.text()) - target_path = os.path.join(target_dir_path,self.plate_color_edit.text()+"-"+self.e2e_recognization_edit.text()+".jpg") - shutil.copyfile(orign_path, target_path) - - def reset_info_gui(self): - - self.location_text.setText(self.hyperlpr_dir_path) - self.scan_files_with_new_dir(self.hyperlpr_dir_path) - self.fill_table_with_new_info() - - def scan_files_with_new_dir(self, path): - - name_list = os.listdir(path) # 列出文件夹下所有的目录与文件 - self.image_filename_list.clear() - for i in range(0, len(name_list)): - if name_list[i].endswith( - ".jpg") or name_list[i].endswith(".png"): - self.image_filename_list.append(name_list[i]) - - def fill_table_with_new_info(self): - self.hyperlpr_tableview.clearContents() - row_count = self.hyperlpr_tableview.rowCount() - for i in range(row_count, -1, -1): - self.hyperlpr_tableview.removeRow(i) - - for i in range(0, len(self.image_filename_list)): - row = self.hyperlpr_tableview.rowCount() - self.hyperlpr_tableview.insertRow(row) - - item0 = QTableWidgetItem() - item0.setTextAlignment(Qt.AlignCenter) - self.hyperlpr_tableview.setItem(row, 0, item0) - self.hyperlpr_tableview.item( - row, 0).setText( - self.image_filename_list[i]) - - item1 = QTableWidgetItem() - item1.setTextAlignment(Qt.AlignCenter) - self.hyperlpr_tableview.setItem(row, 1, item1) - - item2 = QTableWidgetItem() - item2.setTextAlignment(Qt.AlignCenter) - self.hyperlpr_tableview.setItem(row, 2, item2) - - item3 = QTableWidgetItem() - item3.setTextAlignment(Qt.AlignCenter) - self.hyperlpr_tableview.setItem(row, 3, item3) - - item4 = QTableWidgetItem() - item4.setTextAlignment(Qt.AlignCenter) - self.hyperlpr_tableview.setItem(row, 4, item4) - - item5 = QTableWidgetItem() - item5.setTextAlignment(Qt.AlignCenter) - self.hyperlpr_tableview.setItem(row, 5, item5) - - if len(self.image_filename_list) > 0: - self.left_button.setEnabled(True) - self.right_button.setEnabled(True) - self.save_as_e2e_filename_button.setEnabled(True) - - def analyze_last_one_image(self): - if self.current_row > 0: - self.recognize_one_license_plate(self.current_row-1, 0) - - def analyze_next_one_image(self): - if self.current_row < (len(self.image_filename_list)-1): - self.recognize_one_license_plate(self.current_row + 1, 0) - - def recognize_one_license_plate(self, row, col): - if col == 0 and row < len(self.image_filename_list): - self.current_row = row - self.recognize_and_show_one_image( - self.image_filename_list[row], row) - - def recognize_and_show_one_image(self, image_filename_text, row): - - if image_filename_text.endswith(".jpg"): - - print(image_filename_text) - path = os.path.join(self.hyperlpr_dir_path, image_filename_text) - image = cv2.imdecode(np.fromfile(path, dtype=np.uint8), -1) - image, res_set = SimpleRecognizePlateWithGui(image) - img = QImage( - image.data, - image.shape[1], - image.shape[0], - image.shape[1] * image.shape[2], - QImage.Format_RGB888) - self.image_window_view.resetPixmap(img.rgbSwapped()) - self.image_window_view.resetRectText(res_set) - - if len(res_set) > 0: - curr_rect = res_set[0][2] - image_crop = image[int(curr_rect[1]):int( - curr_rect[1] + curr_rect[3]), int(curr_rect[0]):int(curr_rect[0] + curr_rect[2])] - curr_plate = cv2.resize(image_crop, (204, 108)) - plate_img = QImage( - curr_plate.data, - curr_plate.shape[1], - curr_plate.shape[0], - curr_plate.shape[1] * - curr_plate.shape[2], - QImage.Format_RGB888) - self.license_plate_widget.setPixmap( - QPixmap.fromImage(plate_img.rgbSwapped())) - - # print(res_set[0][6]) - block_crop = image[0:24, 0:(24 * int(res_set[0][6]))] - curr_block = cv2.resize( - block_crop, (24 * int(res_set[0][6]), 24)) - block_image = QImage( - curr_block.data, - curr_block.shape[1], - curr_block.shape[0], - curr_block.shape[1] * - curr_block.shape[2], - QImage.Format_RGB888) - self.block_plate_widget.setPixmap( - QPixmap.fromImage(block_image.rgbSwapped())) - - self.segmentation_recognition_edit.setText(res_set[0][0]) - if res_set[0][0] in image_filename_text: - self.segmentation_recognition_edit.setStyleSheet("color:black") - else: - self.segmentation_recognition_edit.setStyleSheet("color:red") - - - self.filename_edit.setText(image_filename_text) - self.confidence_edit.setText("%.3f" % (float(res_set[0][1]))) - - self.plate_color_edit.setText(res_set[0][3]) - if res_set[0][3] in image_filename_text: - self.plate_color_edit.setStyleSheet("color:black") - else: - self.plate_color_edit.setStyleSheet("color:red") - - self.e2e_recognization_edit.setText(res_set[0][4]) - if res_set[0][4] in image_filename_text: - self.e2e_recognization_edit.setStyleSheet("color:black") - else: - self.e2e_recognization_edit.setStyleSheet("color:red") - - self.e2e_confidence_edit.setText( - "%.3f" % (float(res_set[0][5]))) - else: - self.license_plate_widget.clear() - self.block_plate_widget.clear() - self.segmentation_recognition_edit.setText("") - self.filename_edit.setText(image_filename_text) - self.confidence_edit.setText("") - self.plate_color_edit.setText("") - self.e2e_recognization_edit.setText("") - self.e2e_confidence_edit.setText("") - - self.fill_table_widget_with_res_info(res_set, row) - - def batch_recognize_all_images(self): - self.segmentation_recognition_correct_number = 0 - self.color_correct_number = 0 - self.e2e_recognization_correct_number = 0 - self.batch_recognization_thread.set_parameter( - self.image_filename_list, self.hyperlpr_dir_path) - - def recognization_done_slot(self, result_list): - row = result_list[0] - res_set = result_list[1] - self.fill_table_widget_with_res_info(res_set, row) - - if row == len(self.image_filename_list) - 1: - total_number = len(self.image_filename_list) - - row_count = self.hyperlpr_tableview.rowCount() - if row_count > total_number: - self.hyperlpr_tableview.removeRow(total_number) - - self.hyperlpr_tableview.insertRow(total_number) - - item0 = QTableWidgetItem() - item0.setTextAlignment(Qt.AlignCenter) - self.hyperlpr_tableview.setItem(total_number, 0, item0) - self.hyperlpr_tableview.item( - total_number, 0).setText( - "统计结果") - - item1 = QTableWidgetItem() - item1.setTextAlignment(Qt.AlignCenter) - self.hyperlpr_tableview.setItem(total_number, 1, item1) - self.hyperlpr_tableview.item( - total_number, - 1).setText( - "{0} / {1} = {2: .3f}".format( - self.segmentation_recognition_correct_number, - total_number, - self.segmentation_recognition_correct_number / - total_number)) - - item2 = QTableWidgetItem() - item2.setTextAlignment(Qt.AlignCenter) - self.hyperlpr_tableview.setItem(total_number, 2, item2) - - item3 = QTableWidgetItem() - item3.setTextAlignment(Qt.AlignCenter) - self.hyperlpr_tableview.setItem(total_number, 3, item3) - self.hyperlpr_tableview.item( - total_number, 3).setText( - "{0} / {1} = {2: .3f}".format(self.e2e_recognization_correct_number, total_number, - self.e2e_recognization_correct_number / total_number)) - - item4 = QTableWidgetItem() - item4.setTextAlignment(Qt.AlignCenter) - self.hyperlpr_tableview.setItem(total_number, 4, item4) - self.hyperlpr_tableview.item( - total_number, 4).setText( - "{0} / {1} = {2: .3f}".format(self.color_correct_number, total_number, - self.color_correct_number / total_number)) - - item5 = QTableWidgetItem() - item5.setTextAlignment(Qt.AlignCenter) - self.hyperlpr_tableview.setItem(total_number, 5, item5) - - def fill_table_widget_with_res_info(self, res_set, row): - image_filename_text = self.image_filename_list[row] - if len(res_set) > 0: - - self.hyperlpr_tableview.item(row, 1).setText(res_set[0][0]) - if res_set[0][0] in image_filename_text: - self.hyperlpr_tableview.item( - row, 1).setForeground( - QBrush( - QColor( - 0, 0, 255))) - self.segmentation_recognition_correct_number += 1 - else: - self.hyperlpr_tableview.item( - row, 1).setForeground( - QBrush( - QColor( - 255, 0, 0))) - - self.hyperlpr_tableview.item( - row, 2).setText( - "%.3f" % - (float( - res_set[0][1]))) - - self.hyperlpr_tableview.item(row, 3).setText(res_set[0][3]) - if res_set[0][3] in image_filename_text: - self.hyperlpr_tableview.item( - row, 3).setForeground( - QBrush( - QColor( - 0, 0, 255))) - self.color_correct_number += 1 - else: - self.hyperlpr_tableview.item( - row, 3).setForeground( - QBrush( - QColor( - 255, 0, 0))) - - self.hyperlpr_tableview.item(row, 4).setText(res_set[0][4]) - if res_set[0][4] in image_filename_text: - self.hyperlpr_tableview.item( - row, 4).setForeground( - QBrush( - QColor( - 0, 0, 255))) - self.e2e_recognization_correct_number += 1 - else: - self.hyperlpr_tableview.item( - row, 4).setForeground( - QBrush( - QColor( - 255, 0, 0))) - - self.hyperlpr_tableview.item( - row, 5).setText( - "%.3f" % - (float( - res_set[0][5]))) - - -if __name__ == '__main__': - - app = QApplication(sys.argv) - - hyper_lpr_widow = HyperLprWindow() - - hyper_lpr_widow.showMaximized() - - sys.exit(app.exec_()) diff --git a/Prj-Linux/CMakeLists.txt b/Prj-Linux/CMakeLists.txt index f436e67..1349664 100644 --- a/Prj-Linux/CMakeLists.txt +++ b/Prj-Linux/CMakeLists.txt @@ -1,3 +1,3 @@ cmake_minimum_required(VERSION 3.6) -project(SwiftPR) -add_subdirectory(lpr) +project(HyperLPR) +add_subdirectory(hyperlpr) diff --git a/Prj-Linux/lpr/.idea/dictionaries/yujinke.xml b/Prj-Linux/lpr/.idea/dictionaries/yujinke.xml new file mode 100644 index 0000000..9813a61 --- /dev/null +++ b/Prj-Linux/lpr/.idea/dictionaries/yujinke.xml @@ -0,0 +1,3 @@ + + + \ No newline at end of file diff --git a/Prj-Linux/lpr/.idea/encodings.xml b/Prj-Linux/lpr/.idea/encodings.xml new file mode 100644 index 0000000..97626ba --- /dev/null +++ b/Prj-Linux/lpr/.idea/encodings.xml @@ -0,0 +1,6 @@ + + + + + + \ No newline at end of file diff --git a/Prj-Linux/lpr/.idea/lpr.iml b/Prj-Linux/lpr/.idea/lpr.iml new file mode 100644 index 0000000..397b5fd --- /dev/null +++ b/Prj-Linux/lpr/.idea/lpr.iml @@ -0,0 +1,408 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Prj-Linux/lpr/.idea/misc.xml b/Prj-Linux/lpr/.idea/misc.xml new file mode 100644 index 0000000..2355b99 --- /dev/null +++ b/Prj-Linux/lpr/.idea/misc.xml @@ -0,0 +1,51 @@ + + + + + + + + + + + + C/C++ + + + + + Declaration orderC/C++ + + + + + + + + + + + + + + + + + $USER_HOME$/.subversion + + + + + + + + + + + + + \ No newline at end of file diff --git a/Prj-Linux/lpr/.idea/modules.xml b/Prj-Linux/lpr/.idea/modules.xml new file mode 100644 index 0000000..2a12827 --- /dev/null +++ b/Prj-Linux/lpr/.idea/modules.xml @@ -0,0 +1,8 @@ + + + + + + + + \ No newline at end of file diff --git a/Prj-Linux/lpr/.idea/workspace.xml b/Prj-Linux/lpr/.idea/workspace.xml new file mode 100644 index 0000000..0193572 --- /dev/null +++ b/Prj-Linux/lpr/.idea/workspace.xml @@ -0,0 +1,355 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + true + DEFINITION_ORDER + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1575735616184 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Prj-Linux/lpr/CMakeLists.txt b/Prj-Linux/lpr/CMakeLists.txt index 495411e..2a477b3 100644 --- a/Prj-Linux/lpr/CMakeLists.txt +++ b/Prj-Linux/lpr/CMakeLists.txt @@ -8,17 +8,11 @@ include_directories( ${OpenCV_INCLUDE_DIRS}) include_directories(include) set(SRC_DETECTION src/PlateDetection.cpp src/util.h include/PlateDetection.h) - set(SRC_FINEMAPPING src/FineMapping.cpp ) - set(SRC_FASTDESKEW src/FastDeskew.cpp ) - set(SRC_SEGMENTATION src/PlateSegmentation.cpp ) - set(SRC_RECOGNIZE src/Recognizer.cpp src/CNNRecognizer.cpp) - set(SRC_PIPLINE src/Pipeline.cpp) - set(SRC_SEGMENTATIONFREE src/SegmentationFreeRecognizer.cpp ) #set(SOURCE_FILES main.cpp) diff --git a/Prj-Linux/lpr/include/PlateDetection.h b/Prj-Linux/lpr/include/PlateDetection.h index 71ad9af..f799554 100644 --- a/Prj-Linux/lpr/include/PlateDetection.h +++ b/Prj-Linux/lpr/include/PlateDetection.h @@ -15,19 +15,11 @@ namespace pr{ PlateDetection(); void LoadModel(std::string filename_cascade); void plateDetectionRough(cv::Mat InputImage,std::vector &plateInfos,int min_w=36,int max_w=800); -// std::vector plateDetectionRough(cv::Mat InputImage,int min_w= 60,int max_h = 400); - - -// std::vector plateDetectionRoughByMultiScaleEdge(cv::Mat InputImage); - - - private: cv::CascadeClassifier cascade; - - }; }// namespace pr #endif //SWIFTPR_PLATEDETECTION_H + diff --git a/Prj-Linux/lpr/include/PlateInfo.h b/Prj-Linux/lpr/include/PlateInfo.h index f500bb5..270cd9e 100644 --- a/Prj-Linux/lpr/include/PlateInfo.h +++ b/Prj-Linux/lpr/include/PlateInfo.h @@ -6,13 +6,9 @@ #define SWIFTPR_PLATEINFO_H #include namespace pr { - typedef std::vector Character; - enum PlateColor { BLUE, YELLOW, WHITE, GREEN, BLACK,UNKNOWN}; enum CharType {CHINESE,LETTER,LETTER_NUMS,INVALID}; - - class PlateInfo { public: std::vector> plateChars; @@ -72,10 +68,6 @@ namespace pr { plateCoding.push_back(charProb); } - // cv::Mat getPlateChars(int id) { - // if(id mappingTable) { std::string decode; for(auto plate:plateCoding) { @@ -84,10 +76,6 @@ namespace pr { decode += mappingTable[std::max_element(prob,prob+31) - prob]; confidence+=*std::max_element(prob,prob+31); - - -// std::cout<<*std::max_element(prob,prob+31)<(prob.at(0,0)*FinedVertical.cols); @@ -35,9 +29,8 @@ namespace pr{ if(back>FinedVertical.cols-1) back=FinedVertical.cols - 1; cv::Mat cropped = FinedVertical.colRange(front,back).clone(); return cropped; - - } + std::pair FitLineRansac(std::vector pts,int zeroadd = 0 ) { std::pair res; diff --git a/Prj-Linux/lpr/src/Pipeline.cpp b/Prj-Linux/lpr/src/Pipeline.cpp index 95ae658..f34a09e 100644 --- a/Prj-Linux/lpr/src/Pipeline.cpp +++ b/Prj-Linux/lpr/src/Pipeline.cpp @@ -7,8 +7,6 @@ namespace pr { - - const int HorizontalPadding = 4; PipelinePR::PipelinePR(std::string detector_filename, std::string finemapping_prototxt, std::string finemapping_caffemodel, @@ -20,35 +18,26 @@ namespace pr { plateSegmentation = new PlateSegmentation(segmentation_prototxt, segmentation_caffemodel); generalRecognizer = new CNNRecognizer(charRecognization_proto, charRecognization_caffemodel); segmentationFreeRecognizer = new SegmentationFreeRecognizer(segmentationfree_proto,segmentationfree_caffemodel); - } PipelinePR::~PipelinePR() { - delete plateDetection; delete fineMapping; delete plateSegmentation; delete generalRecognizer; delete segmentationFreeRecognizer; - - } std::vector PipelinePR:: RunPiplineAsImage(cv::Mat plateImage,int method) { std::vector results; std::vector plates; plateDetection->plateDetectionRough(plateImage,plates,36,700); - for (pr::PlateInfo plateinfo:plates) { cv::Mat image_finemapping = plateinfo.getPlateImage(); image_finemapping = fineMapping->FineMappingVertical(image_finemapping); image_finemapping = pr::fastdeskew(image_finemapping, 5); - - - //Segmentation-based - if(method==SEGMENTATION_BASED_METHOD) { image_finemapping = fineMapping->FineMappingHorizon(image_finemapping, 2, HorizontalPadding); diff --git a/Prj-Linux/lpr/tests/test_detection.cpp b/Prj-Linux/lpr/tests/test_detection.cpp index db73366..db6c0a7 100644 --- a/Prj-Linux/lpr/tests/test_detection.cpp +++ b/Prj-Linux/lpr/tests/test_detection.cpp @@ -29,6 +29,4 @@ int main() cv::imshow("image",image); cv::waitKey(0); return 0 ; - - } diff --git a/Prj-Linux/lpr/tests/test_pipeline.cpp b/Prj-Linux/lpr/tests/test_pipeline.cpp index d480aa7..c355ac7 100644 --- a/Prj-Linux/lpr/tests/test_pipeline.cpp +++ b/Prj-Linux/lpr/tests/test_pipeline.cpp @@ -136,51 +136,31 @@ void TEST_PIPELINE(){ void TEST_CAM() { - cv::VideoCapture capture("test1.mp4"); cv::Mat frame; - pr::PipelinePR prc("model/cascade.xml", "model/HorizonalFinemapping.prototxt","model/HorizonalFinemapping.caffemodel", "model/Segmentation.prototxt","model/Segmentation.caffemodel", "model/CharacterRecognization.prototxt","model/CharacterRecognization.caffemodel", "model/SegmentationFree.prototxt","model/SegmentationFree.caffemodel" ); - - - - - while(1) { //读取下一帧 if (!capture.read(frame)) { std::cout << "读取视频失败" << std::endl; exit(1); } -// -// cv::transpose(frame,frame); -// cv::flip(frame,frame,2); - -// cv::resize(frame,frame,cv::Size(frame.cols/2,frame.rows/2)); - - - std::vector res = prc.RunPiplineAsImage(frame,pr::SEGMENTATION_FREE_METHOD); for(auto st:res) { if(st.confidence>0.75) { std::cout << st.getPlateName() << " " << st.confidence << std::endl; cv::Rect region = st.getPlateRect(); - cv::rectangle(frame,cv::Point(region.x,region.y),cv::Point(region.x+region.width,region.y+region.height),cv::Scalar(255,255,0),2); } } - cv::imshow("image",frame); cv::waitKey(1); - - - } } diff --git a/README.md b/README.md index 17d7df7..496e763 100644 --- a/README.md +++ b/README.md @@ -15,14 +15,14 @@ #### 快速上手 ```python -#导入包 +#导入hyperlpr from hyperlpr import * #导入OpenCV库 import cv2 #读入图片 image = cv2.imread("demo.jpg") #识别结果 -print(HyperLPR_PlateRecogntion(image)) +print(HyperLPR_plate_recognition(image)) ``` #### Q&A @@ -194,7 +194,7 @@ int main(){ #### 获取帮助 -- HyperLPR讨论QQ群1: 673071218, 群2: 746123554(已满) ,加前请备注HyperLPR交流。 +- HyperLPR讨论QQ群1: 673071218, 群2: 746123554(已满) ,加前请备注HyperLPR交流,高精度版本商务合作可在群内联系。 ### 作者和贡献者信息: diff --git a/WebAPI.py b/WebAPI.py index 19f8cf0..01509f3 100644 --- a/WebAPI.py +++ b/WebAPI.py @@ -17,15 +17,11 @@ app = Flask(__name__) def recognize(filename): image = cv2.imread(filename) - #通过文件名读入一张图片 放到 image中 - return pipline.RecognizePlateJson(image) - #识别一张图片并返回json结果 - -#识别函数 + return HyperLPR_plate_recognition(image) + #return pipline.RecognizePlateJson(image) import base64 - def recognizeBase64(base64_code): file_bytes = np.asarray(bytearray(base64.b64decode(base64_code)),dtype=np.uint8) image_data_ndarray = cv2.imdecode(file_bytes,1) @@ -46,7 +42,6 @@ def upload_file(): print "识别时间",time.time() - t0 return res #返回识别结果 - # return 'file uploaded successfully' return render_template('upload.html') diff --git a/benchmark.py b/benchmark.py deleted file mode 100644 index e59e5c1..0000000 --- a/benchmark.py +++ /dev/null @@ -1,75 +0,0 @@ -#coding=utf-8 -import os -import numpy as np -import cv2 -import json -from hyperlpr import pipline as pp -import sys -from Levenshtein import StringMatcher as sm - -reload(sys) -sys.setdefaultencoding("utf-8") -parent= "you folder" - -def comparestring(a,b): - g = 0 - if len(a) == len(b): - for x,y in zip(a,b): - if x!=y: - g+=1 - return g -count = 0 ; -count_p = 0 -count_d = 0 -count_lev = 0 -count_undetected = 0 -roi = [470,400,650,580] -for filename in os.listdir(parent): - - path = os.path.join(parent,filename) - print path - - if path.endswith(".jpg") or path.endswith(".png"): - ics,name = os.path.split(path) - - - name,ext = name.split(".") - - image = cv2.imread(path) - image = image[roi[1]:roi[1]+roi[3],roi[0]:roi[0]+roi[2]] - # cv2.imshow("test",image) - # cv2.waitKey(0) - info,dataset = pp.SimpleRecognizePlate(image) - ext = ext.strip() - name = name.strip() - - if len(dataset)==0: - count_undetected +=1 - - # cv2.imwrite("./cache/bad2/" + name + ".png", image) - - for one in dataset: - - # p = sm.StringMatcher(seq1=one.encode("utf-8"),seq2=name.encode("utf-8")) - A = one.decode("utf-8") - B = name.decode("utf-8") - print one.decode("utf-8"),"<->",name.decode("utf-8"),"编辑距离:",comparestring(A,B) - if comparestring(A,B)<3: - count_lev+=1 - else: - cv2.imwrite("./cache/bad2/"+B+"->"+A+".png",image) - - if one.decode("utf-8") == name.decode("utf-8"): - count_p+=1 - break - else: - print "error",one.decode("utf-8"), name.decode("utf-8") - count_d+=1 - # cv2.imshow("image",image) - # cv2.waitKey(0) - # break - count+=1 - print count_p / float(count),"编辑距离[1]:",count_lev/float(count),u"识出",count_p,u"总数",count,u"未识出",count_d,u"未检测出",count_undetected - if count_p+count_d+count_undetected!=count: - print dataset,len(dataset) - # exit(0) diff --git a/config.json b/config.json deleted file mode 100644 index eb1bb44..0000000 --- a/config.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "global":{ - "debug":true - }, - "log":{ - }, - "detect":{ - }, - "recognize":{ - }, - "detectTest":{ - "detectPath":"/Users/universe/Documents/work/data/Plate/boundingbox", - "outputPath":"/Users/universe/ProgramUniverse/python/Forked/HyperLPR/out/detect_test" - }, - "colorTest":{ - "colorPath":"/Users/universe/Documents/work/data/Plate/colour/u" - } -} \ No newline at end of file diff --git a/demo.py b/demo.py deleted file mode 100644 index d39ab23..0000000 --- a/demo.py +++ /dev/null @@ -1,59 +0,0 @@ -import sys -reload(sys) -sys.setdefaultencoding("utf-8") - - - - -import time - -def SpeedTest(image_path): - grr = cv2.imread(image_path) - model = pr.LPR("model/cascade.xml", "model/model12.h5", "model/ocr_plate_all_gru.h5") - model.SimpleRecognizePlateByE2E(grr) - t0 = time.time() - for x in range(20): - model.SimpleRecognizePlateByE2E(grr) - t = (time.time() - t0)/20.0 - print "Image size :" + str(grr.shape[1])+"x"+str(grr.shape[0]) + " need " + str(round(t*1000,2))+"ms" - - - -from PIL import ImageFont -from PIL import Image -from PIL import ImageDraw -fontC = ImageFont.truetype("./Font/platech.ttf", 14, 0) - -def drawRectBox(image,rect,addText): - cv2.rectangle(image, (int(rect[0]), int(rect[1])), (int(rect[0] + rect[2]), int(rect[1] + rect[3])), (0,0, 255), 2,cv2.LINE_AA) - cv2.rectangle(image, (int(rect[0]-1), int(rect[1])-16), (int(rect[0] + 115), int(rect[1])), (0, 0, 255), -1, - cv2.LINE_AA) - img = Image.fromarray(image) - draw = ImageDraw.Draw(img) - draw.text((int(rect[0]+1), int(rect[1]-16)), addText.decode("utf-8"), (255, 255, 255), font=fontC) - imagex = np.array(img) - return imagex - - - - - -import HyperLPRLite as pr -import cv2 -import numpy as np -grr = cv2.imread("images_rec/2_.jpg") -model = pr.LPR("model/cascade.xml","model/model12.h5","model/ocr_plate_all_gru.h5") -for pstr,confidence,rect in model.SimpleRecognizePlateByE2E(grr): - if confidence>0.7: - image = drawRectBox(grr, rect, pstr+" "+str(round(confidence,3))) - print "plate_str:" - print pstr - print "plate_confidence" - print confidence - -cv2.imshow("image",image) -cv2.waitKey(0) - - - -SpeedTest("images_rec/2_.jpg") diff --git a/hyperlpr/__init__.py b/hyperlpr/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/hyperlpr/cache.py b/hyperlpr/cache.py deleted file mode 100644 index 378d0b2..0000000 --- a/hyperlpr/cache.py +++ /dev/null @@ -1,11 +0,0 @@ -import cv2 -import os -import hashlib - -def verticalMappingToFolder(image): - name = hashlib.md5(image.data).hexdigest()[:8] - print name - - cv2.imwrite("./cache/finemapping/"+name+".png",image) - - diff --git a/hyperlpr/colourDetection.py b/hyperlpr/colourDetection.py deleted file mode 100644 index 5a64d56..0000000 --- a/hyperlpr/colourDetection.py +++ /dev/null @@ -1,103 +0,0 @@ -# -- coding: UTF-8 -import cv2 -import matplotlib.pyplot as plt -from sklearn.cluster import KMeans -import os - -boundaries = [ - ([100,80,0],[240,220,110]), # yellow - ([0,40,50],[110,180,250]), # blue - ([0,60,0],[60,160,70]), # green -] -color_attr = ["黄牌","蓝牌",'绿牌','白牌','黑牌'] - -threhold_green = 13 -threhold_blue = 13 -threhold_yellow1 = 50 -threhold_yellow2 = 70 - -# plt.figure() -# plt.axis("off") -# plt.imshow(image) -# plt.show() - -import numpy as np -def centroid_histogram(clt): - numLabels = np.arange(0, len(np.unique(clt.labels_)) + 1) - (hist, _) = np.histogram(clt.labels_, bins=numLabels) - - # normalize the histogram, such that it sums to one - hist = hist.astype("float") - hist /= hist.sum() - - # return the histogram - return hist - - -def plot_colors(hist, centroids): - bar = np.zeros((50, 300, 3), dtype="uint8") - startX = 0 - - for (percent, color) in zip(hist, centroids): - - endX = startX + (percent * 300) - cv2.rectangle(bar, (int(startX), 0), (int(endX), 50), - color.astype("uint8").tolist(), -1) - startX = endX - - # return the bar chart - return bar - -def search_boundaries(color): - for i,color_bound in enumerate(boundaries): - if np.all(color >= color_bound[0]) and np.all(color <= color_bound[1]): - return i - return -1 - -def judge_color(color): - r = color[0] - g = color[1] - b = color[2] - if g - r >= threhold_green and g - b >= threhold_green: - return 2 - if b - r >= threhold_blue and b - g >= threhold_blue: - return 1 - if r- b > threhold_yellow2 and g - b > threhold_yellow2: - return 0 - if r > 200 and b > 200 and g > 200: - return 3 - if r < 50 and b < 50 and g < 50: - return 4 - return -1 - -def judge_plate_color(img): - image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) - image = image.reshape((image.shape[0] * image.shape[1], 3)) - clt = KMeans(n_clusters=2) - clt.fit(image) - - hist = centroid_histogram(clt) - index = np.argmax(hist) - #print clt.cluster_centers_[index] - #color_index = search_boundaries(clt.cluster_centers_[index]) - color_index = judge_color(clt.cluster_centers_[index]) - if color_index == -1: - if index == 0: - secound_index = 1 - else: - secound_index = 0 - color_index = judge_color(clt.cluster_centers_[secound_index]) - - if color_index == -1: - print clt.cluster_centers_ - bar = plot_colors(hist, clt.cluster_centers_) - # show our color bart - plt.figure() - plt.axis("off") - plt.imshow(bar) - plt.show() - - if color_index != -1: - return color_attr[color_index],clt.cluster_centers_[index] - else: - return None,clt.cluster_centers_[index] \ No newline at end of file diff --git a/hyperlpr/config.py b/hyperlpr/config.py deleted file mode 100644 index 40f2099..0000000 --- a/hyperlpr/config.py +++ /dev/null @@ -1,6 +0,0 @@ -import json - - - -with open("/Users/universe/ProgramUniverse/zeusees/HyperLPR/config.json") as f: - configuration = json.load(f) diff --git a/hyperlpr/deskew.py b/hyperlpr/deskew.py deleted file mode 100644 index 78a4871..0000000 --- a/hyperlpr/deskew.py +++ /dev/null @@ -1,111 +0,0 @@ -#coding=utf-8 -import numpy as np -import cv2 -import time; -from matplotlib import pyplot as plt -import math - -from scipy.ndimage import filters -# -# def strokeFiter(): -# pass; - -def angle(x,y): - return int(math.atan2(float(y),float(x))*180.0/3.1415); - -def h_rot(src, angle, scale=1.): - w = src.shape[1] - h = src.shape[0] - rangle = np.deg2rad(angle) - nw = (abs(np.sin(rangle)*h) + abs(np.cos(rangle)*w))*scale - nh = (abs(np.cos(rangle)*h) + abs(np.sin(rangle)*w))*scale - - rot_mat = cv2.getRotationMatrix2D((nw*0.5, nh*0.5), angle, scale) - - rot_move = np.dot(rot_mat, np.array([(nw-w)*0.5, (nh-h)*0.5,0])) - - rot_mat[0,2] += rot_move[0] - rot_mat[1,2] += rot_move[1] - return cv2.warpAffine(src, rot_mat, (int(math.ceil(nw)), int(math.ceil(nh))), flags=cv2.INTER_LANCZOS4) - pass - - -def v_rot(img,angel,shape,max_angel): - - size_o = [shape[1],shape[0]] - - size = (shape[1]+ int(shape[0]*np.cos((float(max_angel )/180) * 3.14)),shape[0]) - - - interval = abs( int( np.sin((float(angel) /180) * 3.14)* shape[0])); - - pts1 = np.float32([[0,0] ,[0,size_o[1]],[size_o[0],0],[size_o[0],size_o[1]]]) - if(angel>0): - - pts2 = np.float32([[interval,0],[0,size[1] ],[size[0],0 ],[size[0]-interval,size_o[1]]]) - else: - pts2 = np.float32([[0,0],[interval,size[1] ],[size[0]-interval,0 ],[size[0],size_o[1]]]) - - M = cv2.getPerspectiveTransform(pts1,pts2); - dst = cv2.warpPerspective(img,M,size); - return dst,M; - -def skew_detection(image_gray): - h, w = image_gray.shape[:2] - eigen = cv2.cornerEigenValsAndVecs(image_gray,12, 5) - angle_sur = np.zeros(180,np.uint); - eigen = eigen.reshape(h, w, 3, 2) - flow = eigen[:,:,2] - vis = image_gray.copy() - vis[:] = (192 + np.uint32(vis)) / 2 - d = 12 - points = np.dstack( np.mgrid[d/2:w:d, d/2:h:d] ).reshape(-1, 2) - for x, y in points: - vx, vy = np.int32(flow[y, x]*d) - # cv2.line(rgb, (x-vx, y-vy), (x+vx, y+vy), (0, 355, 0), 1, cv2.LINE_AA) - ang = angle(vx,vy); - angle_sur[(ang+180)%180] +=1; - # torr_bin = 30 - angle_sur = angle_sur.astype(np.float) - angle_sur = (angle_sur-angle_sur.min())/(angle_sur.max()-angle_sur.min()) - angle_sur = filters.gaussian_filter1d(angle_sur,5) - skew_v_val = angle_sur[20:180-20].max(); - skew_v = angle_sur[30:180-30].argmax() + 30; - skew_h_A = angle_sur[0:30].max() - skew_h_B = angle_sur[150:180].max() - skew_h = 0; - if (skew_h_A > skew_v_val*0.3 or skew_h_B > skew_v_val*0.3): - if skew_h_A>=skew_h_B: - skew_h = angle_sur[0:20].argmax() - else: - skew_h = - angle_sur[160:180].argmax() - return skew_h,skew_v - - - - -def fastDeskew(image): - image_gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY) - skew_h,skew_v = skew_detection(image_gray) - - print "校正角度 h ",skew_h,"v",skew_v - - deskew,M = v_rot(image,int((90-skew_v)*1.5),image.shape,60) - return deskew,M - - - -if __name__ == '__main__': - fn = 'test_data/test4.png' - - img = cv2.imread(fn) - gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) - skew_h,skew_v = skew_detection(img,gray) - img = v_rot(img,(90-skew_v ),img.shape,60) - # img = h_rot(img,skew_h) - # if img.shape[0]>img.shape[1]: - # img = h_rot(img, -90) - - plt.show() - - cv2.waitKey() diff --git a/hyperlpr/detect.py b/hyperlpr/detect.py deleted file mode 100644 index 90865e7..0000000 --- a/hyperlpr/detect.py +++ /dev/null @@ -1,76 +0,0 @@ - -import cv2 -import numpy as np - - - -watch_cascade = cv2.CascadeClassifier('./model/cascade.xml') - - -def computeSafeRegion(shape,bounding_rect): - top = bounding_rect[1] # y - bottom = bounding_rect[1] + bounding_rect[3] # y + h - left = bounding_rect[0] # x - right = bounding_rect[0] + bounding_rect[2] # x + w - - min_top = 0 - max_bottom = shape[0] - min_left = 0 - max_right = shape[1] - - # print "computeSateRegion input shape",shape - if top < min_top: - top = min_top - # print "tap top 0" - if left < min_left: - left = min_left - # print "tap left 0" - - if bottom > max_bottom: - bottom = max_bottom - #print "tap max_bottom max" - if right > max_right: - right = max_right - #print "tap max_right max" - - # print "corr",left,top,right,bottom - return [left,top,right-left,bottom-top] - - -def cropped_from_image(image,rect): - x, y, w, h = computeSafeRegion(image.shape,rect) - return image[y:y+h,x:x+w] - - -def detectPlateRough(image_gray,resize_h = 720,en_scale =1.08 ,top_bottom_padding_rate = 0.05): - print image_gray.shape - - if top_bottom_padding_rate>0.2: - print "error:top_bottom_padding_rate > 0.2:",top_bottom_padding_rate - exit(1) - - height = image_gray.shape[0] - padding = int(height*top_bottom_padding_rate) - scale = image_gray.shape[1]/float(image_gray.shape[0]) - - image = cv2.resize(image_gray, (int(scale*resize_h), resize_h)) - - image_color_cropped = image[padding:resize_h-padding,0:image_gray.shape[1]] - - image_gray = cv2.cvtColor(image_color_cropped,cv2.COLOR_RGB2GRAY) - - watches = watch_cascade.detectMultiScale(image_gray, en_scale, 2, minSize=(36, 9),maxSize=(36*40, 9*40)) - - cropped_images = [] - for (x, y, w, h) in watches: - cropped_origin = cropped_from_image(image_color_cropped, (int(x), int(y), int(w), int(h))) - x -= w * 0.14 - w += w * 0.28 - y -= h * 0.6 - h += h * 1.1; - - cropped = cropped_from_image(image_color_cropped, (int(x), int(y), int(w), int(h))) - - - cropped_images.append([cropped,[x, y+padding, w, h],cropped_origin]) - return cropped_images diff --git a/hyperlpr/e2e.py b/hyperlpr/e2e.py deleted file mode 100755 index 6333ff4..0000000 --- a/hyperlpr/e2e.py +++ /dev/null @@ -1,63 +0,0 @@ -#coding=utf-8 -from keras import backend as K -from keras.models import load_model -from keras.layers import * -import numpy as np -import random -import string - -import cv2 -import e2emodel as model -chars = [u"京", u"沪", u"津", u"渝", u"冀", u"晋", u"蒙", u"辽", u"吉", u"黑", u"苏", u"浙", u"皖", u"闽", u"赣", u"鲁", u"豫", u"鄂", u"湘", u"粤", u"桂", - u"琼", u"川", u"贵", u"云", u"藏", u"陕", u"甘", u"青", u"宁", u"新", u"0", u"1", u"2", u"3", u"4", u"5", u"6", u"7", u"8", u"9", u"A", - u"B", u"C", u"D", u"E", u"F", u"G", u"H", u"J", u"K", u"L", u"M", u"N", u"P", u"Q", u"R", u"S", u"T", u"U", u"V", u"W", u"X", - u"Y", u"Z",u"港",u"学",u"使",u"警",u"澳",u"挂",u"军",u"北",u"南",u"广",u"沈",u"兰",u"成",u"济",u"海",u"民",u"航",u"空" - ]; -pred_model = model.construct_model("./model/ocr_plate_all_w_rnn_2.h5",) -import time - - - -def fastdecode(y_pred): - results = "" - confidence = 0.0 - table_pred = y_pred.reshape(-1, len(chars)+1) - - res = table_pred.argmax(axis=1) - - for i,one in enumerate(res): - if one=2: - [vx, vy, x, y] = cv2.fitLine(pts, cv2.DIST_HUBER, 0, 0.01, 0.01) - lefty = int((-x * vy / vx) + y) - righty = int(((136- x) * vy / vx) + y) - return lefty+30+zero_add,righty+30+zero_add - return 0,0 - - - -#精定位算法 -def findContoursAndDrawBoundingBox(image_rgb): - - - line_upper = []; - line_lower = []; - - line_experiment = [] - grouped_rects = [] - gray_image = cv2.cvtColor(image_rgb,cv2.COLOR_BGR2GRAY) - - # for k in np.linspace(-1.5, -0.2,10): - for k in np.linspace(-50, 0, 15): - - # thresh_niblack = threshold_niblack(gray_image, window_size=21, k=k) - # binary_niblack = gray_image > thresh_niblack - # binary_niblack = binary_niblack.astype(np.uint8) * 255 - - binary_niblack = cv2.adaptiveThreshold(gray_image,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,17,k) - # cv2.imshow("image1",binary_niblack) - # cv2.waitKey(0) - imagex, contours, hierarchy = cv2.findContours(binary_niblack.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) - for contour in contours: - bdbox = cv2.boundingRect(contour) - if (bdbox[3]/float(bdbox[2])>0.7 and bdbox[3]*bdbox[2]>100 and bdbox[3]*bdbox[2]<1200) or (bdbox[3]/float(bdbox[2])>3 and bdbox[3]*bdbox[2]<100): - # cv2.rectangle(rgb,(bdbox[0],bdbox[1]),(bdbox[0]+bdbox[2],bdbox[1]+bdbox[3]),(255,0,0),1) - line_upper.append([bdbox[0],bdbox[1]]) - line_lower.append([bdbox[0]+bdbox[2],bdbox[1]+bdbox[3]]) - - line_experiment.append([bdbox[0],bdbox[1]]) - line_experiment.append([bdbox[0]+bdbox[2],bdbox[1]+bdbox[3]]) - # grouped_rects.append(bdbox) - - rgb = cv2.copyMakeBorder(image_rgb,30,30,0,0,cv2.BORDER_REPLICATE) - leftyA, rightyA = fitLine_ransac(np.array(line_lower),3) - rows,cols = rgb.shape[:2] - - # rgb = cv2.line(rgb, (cols - 1, rightyA), (0, leftyA), (0, 0, 255), 1,cv2.LINE_AA) - - leftyB, rightyB = fitLine_ransac(np.array(line_upper),-3) - - rows,cols = rgb.shape[:2] - - # rgb = cv2.line(rgb, (cols - 1, rightyB), (0, leftyB), (0,255, 0), 1,cv2.LINE_AA) - pts_map1 = np.float32([[cols - 1, rightyA], [0, leftyA],[cols - 1, rightyB], [0, leftyB]]) - pts_map2 = np.float32([[136,36],[0,36],[136,0],[0,0]]) - mat = cv2.getPerspectiveTransform(pts_map1,pts_map2) - image = cv2.warpPerspective(rgb,mat,(136,36),flags=cv2.INTER_CUBIC) - image,M = deskew.fastDeskew(image) - - return image - - - -#多级 -def findContoursAndDrawBoundingBox2(image_rgb): - - - line_upper = []; - line_lower = []; - - line_experiment = [] - - grouped_rects = [] - - gray_image = cv2.cvtColor(image_rgb,cv2.COLOR_BGR2GRAY) - - for k in np.linspace(-1.6, -0.2,10): - # for k in np.linspace(-15, 0, 15): - # # - # thresh_niblack = threshold_niblack(gray_image, window_size=21, k=k) - # binary_niblack = gray_image > thresh_niblack - # binary_niblack = binary_niblack.astype(np.uint8) * 255 - - binary_niblack = nt.niBlackThreshold(gray_image,19,k) - # cv2.imshow("binary_niblack_opencv",binary_niblack_) - # cv2.imshow("binary_niblack_skimage", binary_niblack) - - # cv2.waitKey(0) - imagex, contours, hierarchy = cv2.findContours(binary_niblack.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) - - for contour in contours: - bdbox = cv2.boundingRect(contour) - if (bdbox[3]/float(bdbox[2])>0.7 and bdbox[3]*bdbox[2]>100 and bdbox[3]*bdbox[2]<1000) or (bdbox[3]/float(bdbox[2])>3 and bdbox[3]*bdbox[2]<100): - # cv2.rectangle(rgb,(bdbox[0],bdbox[1]),(bdbox[0]+bdbox[2],bdbox[1]+bdbox[3]),(255,0,0),1) - line_upper.append([bdbox[0],bdbox[1]]) - line_lower.append([bdbox[0]+bdbox[2],bdbox[1]+bdbox[3]]) - - line_experiment.append([bdbox[0],bdbox[1]]) - line_experiment.append([bdbox[0]+bdbox[2],bdbox[1]+bdbox[3]]) - # grouped_rects.append(bdbox) - - rgb = cv2.copyMakeBorder(image_rgb,30,30,0,0,cv2.BORDER_REPLICATE) - leftyA, rightyA = fitLine_ransac(np.array(line_lower),2) - rows,cols = rgb.shape[:2] - - # rgb = cv2.line(rgb, (cols - 1, rightyA), (0, leftyA), (0, 0, 255), 1,cv2.LINE_AA) - - leftyB, rightyB = fitLine_ransac(np.array(line_upper),-4) - - rows,cols = rgb.shape[:2] - - # rgb = cv2.line(rgb, (cols - 1, rightyB), (0, leftyB), (0,255, 0), 1,cv2.LINE_AA) - pts_map1 = np.float32([[cols - 1, rightyA], [0, leftyA],[cols - 1, rightyB], [0, leftyB]]) - pts_map2 = np.float32([[136,36],[0,36],[136,0],[0,0]]) - mat = cv2.getPerspectiveTransform(pts_map1,pts_map2) - image = cv2.warpPerspective(rgb,mat,(136,36),flags=cv2.INTER_CUBIC) - image,M= deskew.fastDeskew(image) - - - return image diff --git a/hyperlpr/finemapping_vertical.py b/hyperlpr/finemapping_vertical.py deleted file mode 100644 index 1d6eeb6..0000000 --- a/hyperlpr/finemapping_vertical.py +++ /dev/null @@ -1,92 +0,0 @@ -#coding=utf-8 -from keras.layers import Conv2D, Input,MaxPool2D, Reshape,Activation,Flatten, Dense -from keras.models import Model, Sequential -from keras.layers.advanced_activations import PReLU -from keras.optimizers import adam -import numpy as np - -import cv2 - -def getModel(): - input = Input(shape=[16, 66, 3]) # change this shape to [None,None,3] to enable arbitraty shape input - x = Conv2D(10, (3, 3), strides=1, padding='valid', name='conv1')(input) - x = Activation("relu", name='relu1')(x) - x = MaxPool2D(pool_size=2)(x) - x = Conv2D(16, (3, 3), strides=1, padding='valid', name='conv2')(x) - x = Activation("relu", name='relu2')(x) - x = Conv2D(32, (3, 3), strides=1, padding='valid', name='conv3')(x) - x = Activation("relu", name='relu3')(x) - x = Flatten()(x) - output = Dense(2,name = "dense")(x) - output = Activation("relu", name='relu4')(output) - model = Model([input], [output]) - return model - - - -model = getModel() -model.load_weights("./model/model12.h5") - - -def getmodel(): - return model - -def gettest_model(): - input = Input(shape=[16, 66, 3]) # change this shape to [None,None,3] to enable arbitraty shape input - A = Conv2D(10, (3, 3), strides=1, padding='valid', name='conv1')(input) - B = Activation("relu", name='relu1')(A) - C = MaxPool2D(pool_size=2)(B) - x = Conv2D(16, (3, 3), strides=1, padding='valid', name='conv2')(C) - x = Activation("relu", name='relu2')(x) - x = Conv2D(32, (3, 3), strides=1, padding='valid', name='conv3')(x) - K = Activation("relu", name='relu3')(x) - - - x = Flatten()(K) - dense = Dense(2,name = "dense")(x) - output = Activation("relu", name='relu4')(dense) - x = Model([input], [output]) - x.load_weights("./model/model12.h5") - ok = Model([input], [dense]) - - for layer in ok.layers: - print layer - - return ok - - - - -def finemappingVertical(image): - resized = cv2.resize(image,(66,16)) - resized = resized.astype(np.float)/255 - res= model.predict(np.array([resized]))[0] - print "keras_predict",res - res =res*image.shape[1] - res = res.astype(np.int) - H,T = res - H-=3 - #3 79.86 - #4 79.3 - #5 79.5 - #6 78.3 - - - #T - #T+1 80.9 - #T+2 81.75 - #T+3 81.75 - - - - if H<0: - H=0 - T+=2; - - if T>= image.shape[1]-1: - T= image.shape[1]-1 - - image = image[0:35,H:T+2] - - image = cv2.resize(image, (int(136), int(36))) - return image \ No newline at end of file diff --git a/hyperlpr/niblack_thresholding.py b/hyperlpr/niblack_thresholding.py deleted file mode 100644 index 652bae0..0000000 --- a/hyperlpr/niblack_thresholding.py +++ /dev/null @@ -1,18 +0,0 @@ -import cv2 -import numpy as np - - - -def niBlackThreshold( src, blockSize, k, binarizationMethod= 0 ): - mean = cv2.boxFilter(src,cv2.CV_32F,(blockSize, blockSize),borderType=cv2.BORDER_REPLICATE) - sqmean = cv2.sqrBoxFilter(src, cv2.CV_32F, (blockSize, blockSize), borderType = cv2.BORDER_REPLICATE) - variance = sqmean - (mean*mean) - stddev = np.sqrt(variance) - thresh = mean + stddev * float(-k) - thresh = thresh.astype(src.dtype) - k = (src>thresh)*255 - k = k.astype(np.uint8) - return k - - -# cv2.imshow() \ No newline at end of file diff --git a/hyperlpr/pipline.py b/hyperlpr/pipline.py deleted file mode 100644 index 4536b1d..0000000 --- a/hyperlpr/pipline.py +++ /dev/null @@ -1,264 +0,0 @@ -#coding=utf-8 -import detect -import finemapping as fm - -import segmentation -import cv2 - -import time -import numpy as np - -from PIL import ImageFont -from PIL import Image -from PIL import ImageDraw -import json - -import sys -import typeDistinguish as td - - -reload(sys) -sys.setdefaultencoding("utf-8") - -fontC = ImageFont.truetype("./Font/platech.ttf", 14, 0); - -import e2e -#寻找车牌左右边界 - -def find_edge(image): - sum_i = image.sum(axis=0) - sum_i = sum_i.astype(np.float) - sum_i/=image.shape[0]*255 - # print sum_i - - start= 0 ; - end = image.shape[1]-1 - - for i,one in enumerate(sum_i): - if one>0.4: - start = i; - if start-3<0: - start = 0 - else: - start -=3 - - break; - for i,one in enumerate(sum_i[::-1]): - - if one>0.4: - end = end - i; - if end+4>image.shape[1]-1: - end = image.shape[1]-1 - else: - end+=4 - break - return start,end - - -#垂直边缘检测 - -def verticalEdgeDetection(image): - image_sobel = cv2.Sobel(image.copy(),cv2.CV_8U,1,0) - # image = auto_canny(image_sobel) - - # img_sobel, CV_8U, 1, 0, 3, 1, 0, BORDER_DEFAULT - # canny_image = auto_canny(image) - flag,thres = cv2.threshold(image_sobel,0,255,cv2.THRESH_OTSU|cv2.THRESH_BINARY) - print flag - flag,thres = cv2.threshold(image_sobel,int(flag*0.7),255,cv2.THRESH_BINARY) - # thres = simpleThres(image_sobel) - kernal = np.ones(shape=(3,15)) - thres = cv2.morphologyEx(thres,cv2.MORPH_CLOSE,kernal) - return thres - -#确定粗略的左右边界 -def horizontalSegmentation(image): - - thres = verticalEdgeDetection(image) - # thres = thres*image - head,tail = find_edge(thres) - # print head,tail - # cv2.imshow("edge",thres) - tail = tail+5 - if tail>135: - tail = 135 - image = image[0:35,head:tail] - image = cv2.resize(image, (int(136), int(36))) - return image - - - -#打上boundingbox和标签 -def drawRectBox(image,rect,addText): - cv2.rectangle(image, (int(rect[0]), int(rect[1])), (int(rect[0] + rect[2]), int(rect[1] + rect[3])), (0,0, 255), 2,cv2.LINE_AA) - cv2.rectangle(image, (int(rect[0]-1), int(rect[1])-16), (int(rect[0] + 115), int(rect[1])), (0, 0, 255), -1, - cv2.LINE_AA) - - img = Image.fromarray(image) - draw = ImageDraw.Draw(img) - draw.text((int(rect[0]+1), int(rect[1]-16)), addText.decode("utf-8"), (255, 255, 255), font=fontC) - imagex = np.array(img) - - return imagex - - - - -import cache -import finemapping_vertical as fv - - -def RecognizePlateJson(image): - - images = detect.detectPlateRough(image,image.shape[0],top_bottom_padding_rate=0.1) - - jsons = [] - - for j,plate in enumerate(images): - - - plate,rect,origin_plate =plate - res, confidence = e2e.recognizeOne(origin_plate) - print "res",res - - cv2.imwrite("./"+str(j)+"_rough.jpg",plate) - - # print "车牌类型:",ptype - # plate = cv2.cvtColor(plate, cv2.COLOR_RGB2GRAY) - plate =cv2.resize(plate,(136,int(36*2.5))) - t1 = time.time() - - - ptype = td.SimplePredict(plate) - if ptype>0 and ptype<4: - plate = cv2.bitwise_not(plate) - # demo = verticalEdgeDetection(plate) - - image_rgb = fm.findContoursAndDrawBoundingBox(plate) - image_rgb = fv.finemappingVertical(image_rgb) - cache.verticalMappingToFolder(image_rgb) - # print time.time() - t1,"校正" - print "e2e:",e2e.recognizeOne(image_rgb)[0] - image_gray = cv2.cvtColor(image_rgb,cv2.COLOR_BGR2GRAY) - - - cv2.imwrite("./"+str(j)+".jpg",image_gray) - # image_gray = horizontalSegmentation(image_gray) - - - t2 = time.time() - res, confidence = e2e.recognizeOne(image_rgb) - res_json = {} - if confidence > 0.6: - res_json["Name"] = res - res_json["Type"] = td.plateType[ptype] - res_json["Confidence"] = confidence; - res_json["x"] = int(rect[0]) - res_json["y"] = int(rect[1]) - res_json["w"] = int(rect[2]) - res_json["h"] = int(rect[3]) - jsons.append(res_json) - print json.dumps(jsons,ensure_ascii=False,encoding="gb2312") - - return json.dumps(jsons,ensure_ascii=False,encoding="gb2312") - - - - -def SimpleRecognizePlateByE2E(image): - t0 = time.time() - images = detect.detectPlateRough(image,image.shape[0],top_bottom_padding_rate=0.1) - res_set = [] - for j,plate in enumerate(images): - plate, rect, origin_plate =plate - # plate = cv2.cvtColor(plate, cv2.COLOR_RGB2GRAY) - plate =cv2.resize(plate,(136,36*2)) - res,confidence = e2e.recognizeOne(origin_plate) - print "res",res - - t1 = time.time() - ptype = td.SimplePredict(plate) - if ptype>0 and ptype<5: - # pass - plate = cv2.bitwise_not(plate) - image_rgb = fm.findContoursAndDrawBoundingBox(plate) - image_rgb = fv.finemappingVertical(image_rgb) - image_rgb = fv.finemappingVertical(image_rgb) - cache.verticalMappingToFolder(image_rgb) - cv2.imwrite("./"+str(j)+".jpg",image_rgb) - res,confidence = e2e.recognizeOne(image_rgb) - print res,confidence - res_set.append([[],res,confidence]) - - if confidence>0.7: - image = drawRectBox(image, rect, res+" "+str(round(confidence,3))) - return image,res_set - - - - - - - -def SimpleRecognizePlate(image): - t0 = time.time() - images = detect.detectPlateRough(image,image.shape[0],top_bottom_padding_rate=0.1) - res_set = [] - for j,plate in enumerate(images): - plate, rect, origin_plate =plate - # plate = cv2.cvtColor(plate, cv2.COLOR_RGB2GRAY) - plate =cv2.resize(plate,(136,36*2)) - t1 = time.time() - - ptype = td.SimplePredict(plate) - if ptype>0 and ptype<5: - plate = cv2.bitwise_not(plate) - - image_rgb = fm.findContoursAndDrawBoundingBox(plate) - - image_rgb = fv.finemappingVertical(image_rgb) - cache.verticalMappingToFolder(image_rgb) - print "e2e:", e2e.recognizeOne(image_rgb) - image_gray = cv2.cvtColor(image_rgb,cv2.COLOR_RGB2GRAY) - - # image_gray = horizontalSegmentation(image_gray) - cv2.imshow("image_gray",image_gray) - # cv2.waitKey() - - cv2.imwrite("./"+str(j)+".jpg",image_gray) - # cv2.imshow("image",image_gray) - # cv2.waitKey(0) - print "校正",time.time() - t1,"s" - # cv2.imshow("image,",image_gray) - # cv2.waitKey(0) - t2 = time.time() - val = segmentation.slidingWindowsEval(image_gray) - # print val - print "分割和识别",time.time() - t2,"s" - if len(val)==3: - blocks, res, confidence = val - if confidence/7>0.7: - image = drawRectBox(image,rect,res) - res_set.append(res) - for i,block in enumerate(blocks): - - block_ = cv2.resize(block,(25,25)) - block_ = cv2.cvtColor(block_,cv2.COLOR_GRAY2BGR) - image[j * 25:(j * 25) + 25, i * 25:(i * 25) + 25] = block_ - if image[j*25:(j*25)+25,i*25:(i*25)+25].shape == block_.shape: - pass - - - if confidence>0: - print "车牌:",res,"置信度:",confidence/7 - else: - pass - - # print "不确定的车牌:", res, "置信度:", confidence - - print time.time() - t0,"s" - return image,res_set - - - - diff --git a/hyperlpr/plateStructure.py b/hyperlpr/plateStructure.py deleted file mode 100644 index e69de29..0000000 diff --git a/hyperlpr/precise.py b/hyperlpr/precise.py deleted file mode 100644 index e69de29..0000000 diff --git a/hyperlpr/recognizer.py b/hyperlpr/recognizer.py deleted file mode 100644 index 4280a4e..0000000 --- a/hyperlpr/recognizer.py +++ /dev/null @@ -1,154 +0,0 @@ -#coding=utf-8 -from keras.models import Sequential -from keras.layers import Dense, Dropout, Activation, Flatten -from keras.layers import Conv2D,MaxPool2D -from keras.optimizers import SGD -from keras import backend as K - -K.set_image_dim_ordering('tf') - - -import cv2 -import numpy as np - - - -index = {u"京": 0, u"沪": 1, u"津": 2, u"渝": 3, u"冀": 4, u"晋": 5, u"蒙": 6, u"辽": 7, u"吉": 8, u"黑": 9, u"苏": 10, u"浙": 11, u"皖": 12, - u"闽": 13, u"赣": 14, u"鲁": 15, u"豫": 16, u"鄂": 17, u"湘": 18, u"粤": 19, u"桂": 20, u"琼": 21, u"川": 22, u"贵": 23, u"云": 24, - u"藏": 25, u"陕": 26, u"甘": 27, u"青": 28, u"宁": 29, u"新": 30, u"0": 31, u"1": 32, u"2": 33, u"3": 34, u"4": 35, u"5": 36, - u"6": 37, u"7": 38, u"8": 39, u"9": 40, u"A": 41, u"B": 42, u"C": 43, u"D": 44, u"E": 45, u"F": 46, u"G": 47, u"H": 48, - u"J": 49, u"K": 50, u"L": 51, u"M": 52, u"N": 53, u"P": 54, u"Q": 55, u"R": 56, u"S": 57, u"T": 58, u"U": 59, u"V": 60, - u"W": 61, u"X": 62, u"Y": 63, u"Z": 64,u"港":65,u"学":66 ,u"O":67 ,u"使":68,u"警":69,u"澳":70,u"挂":71}; - -chars = ["京", "沪", "津", "渝", "冀", "晋", "蒙", "辽", "吉", "黑", "苏", "浙", "皖", "闽", "赣", "鲁", "豫", "鄂", "湘", "粤", "桂", - "琼", "川", "贵", "云", "藏", "陕", "甘", "青", "宁", "新", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "A", - "B", "C", "D", "E", "F", "G", "H", "J", "K", "L", "M", "N", "P", - "Q", "R", "S", "T", "U", "V", "W", "X", - "Y", "Z","港","学","O","使","警","澳","挂" ]; - - - -def Getmodel_tensorflow(nb_classes): - # nb_classes = len(charset) - - img_rows, img_cols = 23, 23 - # number of convolutional filters to use - nb_filters = 32 - # size of pooling area for max pooling - nb_pool = 2 - # convolution kernel size - nb_conv = 3 - - # x = np.load('x.npy') - - # y = np_utils.to_categorical(range(3062)*45*5*2, nb_classes) - # weight = ((type_class - np.arange(type_class)) / type_class + 1) ** 3 - # weight = dict(zip(range(3063), weight / weight.mean())) # 调整权重,高频字优先 - - model = Sequential() - model.add(Conv2D(32, (5, 5),input_shape=(img_rows, img_cols,1))) - model.add(Activation('relu')) - model.add(MaxPool2D(pool_size=(nb_pool, nb_pool))) - model.add(Dropout(0.25)) - model.add(Conv2D(32, (3, 3))) - model.add(Activation('relu')) - model.add(MaxPool2D(pool_size=(nb_pool, nb_pool))) - model.add(Dropout(0.25)) - model.add(Conv2D(512, (3, 3))) - # model.add(Activation('relu')) - # model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool))) - # model.add(Dropout(0.25)) - model.add(Flatten()) - model.add(Dense(512)) - model.add(Activation('relu')) - model.add(Dropout(0.5)) - model.add(Dense(nb_classes)) - model.add(Activation('softmax')) - model.compile(loss='categorical_crossentropy', - optimizer='adam', - metrics=['accuracy']) - return model - - - - -def Getmodel_ch(nb_classes): - # nb_classes = len(charset) - - img_rows, img_cols = 23, 23 - # number of convolutional filters to use - nb_filters = 32 - # size of pooling area for max pooling - nb_pool = 2 - # convolution kernel size - nb_conv = 3 - - # x = np.load('x.npy') - # y = np_utils.to_categorical(range(3062)*45*5*2, nb_classes) - # weight = ((type_class - np.arange(type_class)) / type_class + 1) ** 3 - # weight = dict(zip(range(3063), weight / weight.mean())) # 调整权重,高频字优先 - - model = Sequential() - model.add(Conv2D(32, (5, 5),input_shape=(img_rows, img_cols,1))) - model.add(Activation('relu')) - model.add(MaxPool2D(pool_size=(nb_pool, nb_pool))) - model.add(Dropout(0.25)) - model.add(Conv2D(32, (3, 3))) - model.add(Activation('relu')) - model.add(MaxPool2D(pool_size=(nb_pool, nb_pool))) - model.add(Dropout(0.25)) - model.add(Conv2D(512, (3, 3))) - # model.add(Activation('relu')) - # model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool))) - # model.add(Dropout(0.25)) - model.add(Flatten()) - model.add(Dense(756)) - model.add(Activation('relu')) - model.add(Dropout(0.5)) - model.add(Dense(nb_classes)) - model.add(Activation('softmax')) - model.compile(loss='categorical_crossentropy', - optimizer='adam', - metrics=['accuracy']) - return model - - - -model = Getmodel_tensorflow(65) -#构建网络 - -model_ch = Getmodel_ch(31) - -model_ch.load_weights("./model/char_chi_sim.h5") -# model_ch.save_weights("./model/char_chi_sim.h5") -model.load_weights("./model/char_rec.h5") -# model.save("./model/char_rec.h5") - - -def SimplePredict(image,pos): - image = cv2.resize(image, (23, 23)) - image = cv2.equalizeHist(image) - image = image.astype(np.float) / 255 - image -= image.mean() - image = np.expand_dims(image, 3) - if pos!=0: - res = np.array(model.predict(np.array([image]))[0]) - else: - res = np.array(model_ch.predict(np.array([image]))[0]) - - zero_add = 0 ; - - if pos==0: - res = res[:31] - elif pos==1: - res = res[31+10:65] - zero_add = 31+10 - else: - res = res[31:] - zero_add = 31 - - max_id = res.argmax() - - - return res.max(),chars[max_id+zero_add],max_id+zero_add - diff --git a/hyperlpr/segmentation.py b/hyperlpr/segmentation.py deleted file mode 100644 index 584929f..0000000 --- a/hyperlpr/segmentation.py +++ /dev/null @@ -1,320 +0,0 @@ -#coding=utf-8 -import cv2 -import numpy as np - -# from matplotlib import pyplot as plt -import scipy.ndimage.filters as f -import scipy - -import time -import scipy.signal as l - - - - - -from keras.models import Sequential -from keras.layers import Dense, Dropout, Activation, Flatten -from keras.layers import Conv2D, MaxPool2D -from keras.optimizers import SGD -from keras import backend as K - -K.set_image_dim_ordering('tf') - - -def Getmodel_tensorflow(nb_classes): - # nb_classes = len(charset) - img_rows, img_cols = 23, 23 - # number of convolutional filters to use - nb_filters = 16 - # size of pooling area for max pooling - nb_pool = 2 - # convolution kernel size - nb_conv = 3 - # x = np.load('x.npy') - # y = np_utils.to_categorical(range(3062)*45*5*2, nb_classes) - # weight = ((type_class - np.arange(type_class)) / type_class + 1) ** 3 - # weight = dict(zip(range(3063), weight / weight.mean())) # 调整权重,高频字优先 - - model = Sequential() - model.add(Conv2D(nb_filters, (nb_conv, nb_conv),input_shape=(img_rows, img_cols,1))) - model.add(Activation('relu')) - model.add(MaxPool2D(pool_size=(nb_pool, nb_pool))) - model.add(Conv2D(nb_filters, (nb_conv, nb_conv))) - model.add(Activation('relu')) - model.add(MaxPool2D(pool_size=(nb_pool, nb_pool))) - model.add(Flatten()) - model.add(Dense(256)) - model.add(Dropout(0.5)) - - model.add(Activation('relu')) - model.add(Dense(nb_classes)) - model.add(Activation('softmax')) - model.compile(loss='categorical_crossentropy', - optimizer='sgd', - metrics=['accuracy']) - return model - - - -def Getmodel_tensorflow_light(nb_classes): - # nb_classes = len(charset) - img_rows, img_cols = 23, 23 - # number of convolutional filters to use - nb_filters = 8 - # size of pooling area for max pooling - nb_pool = 2 - # convolution kernel size - nb_conv = 3 - # x = np.load('x.npy') - # y = np_utils.to_categorical(range(3062)*45*5*2, nb_classes) - # weight = ((type_class - np.arange(type_class)) / type_class + 1) ** 3 - # weight = dict(zip(range(3063), weight / weight.mean())) # 调整权重,高频字优先 - - model = Sequential() - model.add(Conv2D(nb_filters, (nb_conv, nb_conv),input_shape=(img_rows, img_cols, 1))) - model.add(Activation('relu')) - model.add(MaxPool2D(pool_size=(nb_pool, nb_pool))) - model.add(Conv2D(nb_filters, (nb_conv * 2, nb_conv * 2))) - model.add(Activation('relu')) - model.add(MaxPool2D(pool_size=(nb_pool, nb_pool))) - model.add(Flatten()) - model.add(Dense(32)) - # model.add(Dropout(0.25)) - - model.add(Activation('relu')) - model.add(Dense(nb_classes)) - model.add(Activation('softmax')) - model.compile(loss='categorical_crossentropy', - optimizer='adam', - metrics=['accuracy']) - return model - - - - -model = Getmodel_tensorflow_light(3) -model2 = Getmodel_tensorflow(3) - -import os -model.load_weights("./model/char_judgement1.h5") -# model.save("./model/char_judgement1.h5") -model2.load_weights("./model/char_judgement.h5") -# model2.save("./model/char_judgement.h5") - - -model = model2 -def get_median(data): - data = sorted(data) - size = len(data) - # print size - - if size % 2 == 0: # 判断列表长度为偶数 - median = (data[size//2]+data[size//2-1])/2 - data[0] = median - if size % 2 == 1: # 判断列表长度为奇数 - median = data[(size-1)//2] - data[0] = median - return data[0] -import time - -def searchOptimalCuttingPoint(rgb,res_map,start,width_boundingbox,interval_range): - t0 = time.time() - # - # for x in xrange(10): - # res_map = np.vstack((res_map,res_map[-1])) - length = res_map.shape[0] - refine_s = -2; - - if width_boundingbox>20: - refine_s = -9 - score_list = [] - interval_big = int(width_boundingbox * 0.3) # - p = 0 - for zero_add in xrange(start,start+50,3): - # for interval_small in xrange(-0,width_boundingbox/2): - for i in xrange(-8,int(width_boundingbox/1)-8): - for refine in xrange(refine_s,width_boundingbox/2+3): - p1 = zero_add# this point is province - p2 = p1 + width_boundingbox +refine # - p3 = p2 + width_boundingbox + interval_big+i+1 - p4 = p3 + width_boundingbox +refine - p5 = p4 + width_boundingbox +refine - p6 = p5 + width_boundingbox +refine - p7 = p6 + width_boundingbox +refine - if p7>=length: - continue - score = res_map[p1][2]*3 -(res_map[p3][1]+res_map[p4][1]+res_map[p5][1]+res_map[p6][1]+res_map[p7][1])+7 - # print score - score_list.append([score,[p1,p2,p3,p4,p5,p6,p7]]) - p+=1 - print p - - score_list = sorted(score_list , key=lambda x:x[0]) - # for one in score_list[-1][1]: - # cv2.line(debug,(one,0),(one,36),(255,0,0),1) - # # - # cv2.imshow("one",debug) - # cv2.waitKey(0) - # - print "寻找最佳点",time.time()-t0 - return score_list[-1] - - -import sys - -sys.path.append('../') -import recognizer as cRP -import niblack_thresholding as nt - -def refineCrop(sections,width=16): - new_sections = [] - for section in sections: - # cv2.imshow("section¡",section) - - # cv2.blur(section,(3,3),3) - - sec_center = np.array([section.shape[1]/2,section.shape[0]/2]) - binary_niblack = nt.niBlackThreshold(section,17,-0.255) - imagex, contours, hierarchy = cv2.findContours(binary_niblack,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) - boxs = [] - for contour in contours: - x,y,w,h = cv2.boundingRect(contour) - - ratio = w/float(h) - if ratio<1 and h>36*0.4 and y<16\ - : - box = [x,y,w,h] - - boxs.append([box,np.array([x+w/2,y+h/2])]) - # cv2.rectangle(section,(x,y),(x+w,y+h),255,1) - - - - - # print boxs - - dis_ = np.array([ ((one[1]-sec_center)**2).sum() for one in boxs]) - if len(dis_)==0: - kernal = [0, 0, section.shape[1], section.shape[0]] - else: - kernal = boxs[dis_.argmin()][0] - - center_c = (kernal[0]+kernal[2]/2,kernal[1]+kernal[3]/2) - w_2 = int(width/2) - h_2 = kernal[3]/2 - - if center_c[0] - w_2< 0: - w_2 = center_c[0] - new_box = [center_c[0] - w_2,kernal[1],width,kernal[3]] - # print new_box[2]/float(new_box[3]) - if new_box[2]/float(new_box[3])>0.5: - # print "异常" - h = int((new_box[2]/0.35 )/2) - if h>35: - h = 35 - new_box[1] = center_c[1]- h - if new_box[1]<0: - new_box[1] = 1 - - new_box[3] = h*2 - - - - - - - - - section = section[new_box[1]:new_box[1]+new_box[3],new_box[0]:new_box[0]+new_box[2]] - # cv2.imshow("section",section) - # cv2.waitKey(0) - new_sections.append(section) - # print new_box - - - return new_sections - - - - - -def slidingWindowsEval(image): - windows_size = 16; - stride = 1 - height= image.shape[0] - t0 = time.time() - data_sets = [] - - for i in range(0,image.shape[1]-windows_size+1,stride): - data = image[0:height,i:i+windows_size] - data = cv2.resize(data,(23,23)) - # cv2.imshow("image",data) - data = cv2.equalizeHist(data) - data = data.astype(np.float)/255 - data= np.expand_dims(data,3) - data_sets.append(data) - - res = model2.predict(np.array(data_sets)) - print "分割",time.time() - t0 - - pin = res - p = 1 - (res.T)[1] - p = f.gaussian_filter1d(np.array(p,dtype=np.float),3) - lmin = l.argrelmax(np.array(p),order = 3)[0] - interval = [] - for i in xrange(len(lmin)-1): - interval.append(lmin[i+1]-lmin[i]) - - if(len(interval)>3): - mid = get_median(interval) - else: - return [] - pin = np.array(pin) - res = searchOptimalCuttingPoint(image,pin,0,mid,3) - - cutting_pts = res[1] - last = cutting_pts[-1] + mid - if last < image.shape[1]: - cutting_pts.append(last) - else: - cutting_pts.append(image.shape[1]-1) - name = "" - confidence =0.00 - seg_block = [] - for x in xrange(1,len(cutting_pts)): - if x != len(cutting_pts)-1 and x!=1: - section = image[0:36,cutting_pts[x-1]-2:cutting_pts[x]+2] - elif x==1: - c_head = cutting_pts[x - 1]- 2 - if c_head<0: - c_head=0 - c_tail = cutting_pts[x] + 2 - section = image[0:36, c_head:c_tail] - elif x==len(cutting_pts)-1: - end = cutting_pts[x] - diff = image.shape[1]-end - c_head = cutting_pts[x - 1] - c_tail = cutting_pts[x] - if diff<7 : - section = image[0:36, c_head-5:c_tail+5] - else: - diff-=1 - section = image[0:36, c_head - diff:c_tail + diff] - elif x==2: - section = image[0:36, cutting_pts[x - 1] - 3:cutting_pts[x-1]+ mid] - else: - section = image[0:36,cutting_pts[x-1]:cutting_pts[x]] - seg_block.append(section) - refined = refineCrop(seg_block,mid-1) - - t0 = time.time() - for i,one in enumerate(refined): - res_pre = cRP.SimplePredict(one, i ) - # cv2.imshow(str(i),one) - # cv2.waitKey(0) - confidence+=res_pre[0] - name+= res_pre[1] - print "字符识别",time.time() - t0 - - return refined,name,confidence diff --git a/hyperlpr/typeDistinguish.py b/hyperlpr/typeDistinguish.py deleted file mode 100644 index 21ee7fd..0000000 --- a/hyperlpr/typeDistinguish.py +++ /dev/null @@ -1,56 +0,0 @@ -#coding=utf-8 -from keras.models import Sequential -from keras.layers import Dense, Dropout, Activation, Flatten -from keras.layers import Conv2D, MaxPool2D -from keras.optimizers import SGD -from keras import backend as K - -K.set_image_dim_ordering('tf') - - -import cv2 -import numpy as np - - -plateType = [u"蓝牌",u"单层黄牌",u"新能源车牌",u"白色",u"黑色-港澳"] -def Getmodel_tensorflow(nb_classes): - # nb_classes = len(charset) - - img_rows, img_cols = 9, 34 - # number of convolutional filters to use - nb_filters = 32 - # size of pooling area for max pooling - nb_pool = 2 - # convolution kernel size - nb_conv = 3 - - # x = np.load('x.npy') - # y = np_utils.to_categorical(range(3062)*45*5*2, nb_classes) - # weight = ((type_class - np.arange(type_class)) / type_class + 1) ** 3 - # weight = dict(zip(range(3063), weight / weight.mean())) # 调整权重,高频字优先 - - model = Sequential() - model.add(Conv2D(16, (5, 5),input_shape=(img_rows, img_cols,3))) - model.add(Activation('relu')) - model.add(MaxPool2D(pool_size=(nb_pool, nb_pool))) - model.add(Flatten()) - model.add(Dense(64)) - model.add(Activation('relu')) - model.add(Dropout(0.5)) - model.add(Dense(nb_classes)) - model.add(Activation('softmax')) - model.compile(loss='categorical_crossentropy', - optimizer='adam', - metrics=['accuracy']) - return model - -model = Getmodel_tensorflow(5) -model.load_weights("./model/plate_type.h5") -model.save("./model/plate_type.h5") -def SimplePredict(image): - image = cv2.resize(image, (34, 9)) - image = image.astype(np.float) / 255 - res = np.array(model.predict(np.array([image]))[0]) - return res.argmax() - - diff --git a/hyperlpr_py3/__init__.py b/hyperlpr_py3/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/hyperlpr_py3/cache.py b/hyperlpr_py3/cache.py deleted file mode 100644 index 80a2953..0000000 --- a/hyperlpr_py3/cache.py +++ /dev/null @@ -1,11 +0,0 @@ -import cv2 -import os -import hashlib - -def verticalMappingToFolder(image): - name = hashlib.md5(image.data).hexdigest()[:8] - print(name) - - cv2.imwrite("./cache/finemapping/"+name+".png",image) - - diff --git a/hyperlpr_py3/colourDetection.py b/hyperlpr_py3/colourDetection.py deleted file mode 100644 index 78c5854..0000000 --- a/hyperlpr_py3/colourDetection.py +++ /dev/null @@ -1,103 +0,0 @@ -# -- coding: UTF-8 -import cv2 -import matplotlib.pyplot as plt -from sklearn.cluster import KMeans -import os - -boundaries = [ - ([100,80,0],[240,220,110]), # yellow - ([0,40,50],[110,180,250]), # blue - ([0,60,0],[60,160,70]), # green -] -color_attr = ["黄牌","蓝牌",'绿牌','白牌','黑牌'] - -threhold_green = 13 -threhold_blue = 13 -threhold_yellow1 = 50 -threhold_yellow2 = 70 - -# plt.figure() -# plt.axis("off") -# plt.imshow(image) -# plt.show() - -import numpy as np -def centroid_histogram(clt): - numLabels = np.arange(0, len(np.unique(clt.labels_)) + 1) - (hist, _) = np.histogram(clt.labels_, bins=numLabels) - - # normalize the histogram, such that it sums to one - hist = hist.astype("float") - hist /= hist.sum() - - # return the histogram - return hist - - -def plot_colors(hist, centroids): - bar = np.zeros((50, 300, 3), dtype="uint8") - startX = 0 - - for (percent, color) in zip(hist, centroids): - - endX = startX + (percent * 300) - cv2.rectangle(bar, (int(startX), 0), (int(endX), 50), - color.astype("uint8").tolist(), -1) - startX = endX - - # return the bar chart - return bar - -def search_boundaries(color): - for i,color_bound in enumerate(boundaries): - if np.all(color >= color_bound[0]) and np.all(color <= color_bound[1]): - return i - return -1 - -def judge_color(color): - r = color[0] - g = color[1] - b = color[2] - if g - r >= threhold_green and g - b >= threhold_green: - return 2 - if b - r >= threhold_blue and b - g >= threhold_blue: - return 1 - if r- b > threhold_yellow2 and g - b > threhold_yellow2: - return 0 - if r > 200 and b > 200 and g > 200: - return 3 - if r < 50 and b < 50 and g < 50: - return 4 - return -1 - -def judge_plate_color(img): - image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) - image = image.reshape((image.shape[0] * image.shape[1], 3)) - clt = KMeans(n_clusters=2) - clt.fit(image) - - hist = centroid_histogram(clt) - index = np.argmax(hist) - #print clt.cluster_centers_[index] - #color_index = search_boundaries(clt.cluster_centers_[index]) - color_index = judge_color(clt.cluster_centers_[index]) - if color_index == -1: - if index == 0: - secound_index = 1 - else: - secound_index = 0 - color_index = judge_color(clt.cluster_centers_[secound_index]) - - if color_index == -1: - print(clt.cluster_centers_) - bar = plot_colors(hist, clt.cluster_centers_) - # show our color bart - plt.figure() - plt.axis("off") - plt.imshow(bar) - plt.show() - - if color_index != -1: - return color_attr[color_index],clt.cluster_centers_[index] - else: - return None,clt.cluster_centers_[index] \ No newline at end of file diff --git a/hyperlpr_py3/config.py b/hyperlpr_py3/config.py deleted file mode 100644 index 40f2099..0000000 --- a/hyperlpr_py3/config.py +++ /dev/null @@ -1,6 +0,0 @@ -import json - - - -with open("/Users/universe/ProgramUniverse/zeusees/HyperLPR/config.json") as f: - configuration = json.load(f) diff --git a/hyperlpr_py3/deskew.py b/hyperlpr_py3/deskew.py deleted file mode 100644 index 1a5ea42..0000000 --- a/hyperlpr_py3/deskew.py +++ /dev/null @@ -1,100 +0,0 @@ -#coding=utf-8 -import numpy as np -import cv2 -import time -from matplotlib import pyplot as plt -import math - -from scipy.ndimage import filters -# -# def strokeFiter(): -# pass; - -def angle(x,y): - return int(math.atan2(float(y),float(x))*180.0/3.1415) - - -def h_rot(src, angle, scale=1.0): - w = src.shape[1] - h = src.shape[0] - rangle = np.deg2rad(angle) - nw = (abs(np.sin(rangle)*h) + abs(np.cos(rangle)*w))*scale - nh = (abs(np.cos(rangle)*h) + abs(np.sin(rangle)*w))*scale - rot_mat = cv2.getRotationMatrix2D((nw*0.5, nh*0.5), angle, scale) - rot_move = np.dot(rot_mat, np.array([(nw-w)*0.5, (nh-h)*0.5,0])) - rot_mat[0,2] += rot_move[0] - rot_mat[1,2] += rot_move[1] - return cv2.warpAffine(src, rot_mat, (int(math.ceil(nw)), int(math.ceil(nh))), flags=cv2.INTER_LANCZOS4) - pass - - -def v_rot(img, angel, shape, max_angel): - size_o = [shape[1],shape[0]] - size = (shape[1]+ int(shape[0]*np.cos((float(max_angel )/180) * 3.14)),shape[0]) - interval = abs( int( np.sin((float(angel) /180) * 3.14)* shape[0])) - pts1 = np.float32([[0,0],[0,size_o[1]],[size_o[0],0],[size_o[0],size_o[1]]]) - if(angel>0): - pts2 = np.float32([[interval,0],[0,size[1] ],[size[0],0 ],[size[0]-interval,size_o[1]]]) - else: - pts2 = np.float32([[0,0],[interval,size[1] ],[size[0]-interval,0 ],[size[0],size_o[1]]]) - - M = cv2.getPerspectiveTransform(pts1,pts2) - dst = cv2.warpPerspective(img,M,size) - return dst,M - - -def skew_detection(image_gray): - h, w = image_gray.shape[:2] - eigen = cv2.cornerEigenValsAndVecs(image_gray,12, 5) - angle_sur = np.zeros(180,np.uint) - eigen = eigen.reshape(h, w, 3, 2) - flow = eigen[:,:,2] - vis = image_gray.copy() - vis[:] = (192 + np.uint32(vis)) / 2 - d = 12 - points = np.dstack( np.mgrid[d/2:w:d, d/2:h:d] ).reshape(-1, 2) - for x, y in points: - vx, vy = np.int32(flow[int(y), int(x)]*d) - # cv2.line(rgb, (x-vx, y-vy), (x+vx, y+vy), (0, 355, 0), 1, cv2.LINE_AA) - ang = angle(vx,vy) - angle_sur[(ang+180)%180] +=1 - - # torr_bin = 30 - angle_sur = angle_sur.astype(np.float) - angle_sur = (angle_sur-angle_sur.min())/(angle_sur.max()-angle_sur.min()) - angle_sur = filters.gaussian_filter1d(angle_sur,5) - skew_v_val = angle_sur[20:180-20].max() - skew_v = angle_sur[30:180-30].argmax() + 30 - skew_h_A = angle_sur[0:30].max() - skew_h_B = angle_sur[150:180].max() - skew_h = 0 - if (skew_h_A > skew_v_val*0.3 or skew_h_B > skew_v_val*0.3): - if skew_h_A>=skew_h_B: - skew_h = angle_sur[0:20].argmax() - else: - skew_h = - angle_sur[160:180].argmax() - return skew_h,skew_v - - -def fastDeskew(image): - image_gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY) - skew_h,skew_v = skew_detection(image_gray) - print("校正角度 h ",skew_h,"v",skew_v) - deskew,M = v_rot(image,int((90-skew_v)*1.5),image.shape,60) - return deskew,M - - - -if __name__ == '__main__': - fn = './dataset/0.jpg' - - img = cv2.imread(fn) - gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) - skew_h,skew_v = skew_detection(gray) - img = v_rot(img,(90-skew_v ),img.shape,60) - # img = h_rot(img,skew_h) - # if img.shape[0]>img.shape[1]: - # img = h_rot(img, -90) - - plt.show() - cv2.waitKey() diff --git a/hyperlpr_py3/detect.py b/hyperlpr_py3/detect.py deleted file mode 100644 index c4bc4ad..0000000 --- a/hyperlpr_py3/detect.py +++ /dev/null @@ -1,76 +0,0 @@ - -import cv2 -import numpy as np - - - -watch_cascade = cv2.CascadeClassifier('./model/cascade.xml') - - -def computeSafeRegion(shape,bounding_rect): - top = bounding_rect[1] # y - bottom = bounding_rect[1] + bounding_rect[3] # y + h - left = bounding_rect[0] # x - right = bounding_rect[0] + bounding_rect[2] # x + w - - min_top = 0 - max_bottom = shape[0] - min_left = 0 - max_right = shape[1] - - # print "computeSateRegion input shape",shape - if top < min_top: - top = min_top - # print "tap top 0" - if left < min_left: - left = min_left - # print "tap left 0" - - if bottom > max_bottom: - bottom = max_bottom - #print "tap max_bottom max" - if right > max_right: - right = max_right - #print "tap max_right max" - - # print "corr",left,top,right,bottom - return [left,top,right-left,bottom-top] - - -def cropped_from_image(image,rect): - x, y, w, h = computeSafeRegion(image.shape,rect) - return image[y:y+h,x:x+w] - - -def detectPlateRough(image_gray,resize_h = 720,en_scale =1.08 ,top_bottom_padding_rate = 0.05): - print(image_gray.shape) - - if top_bottom_padding_rate>0.2: - print("error:top_bottom_padding_rate > 0.2:",top_bottom_padding_rate) - exit(1) - - height = image_gray.shape[0] - padding = int(height*top_bottom_padding_rate) - scale = image_gray.shape[1]/float(image_gray.shape[0]) - - image = cv2.resize(image_gray, (int(scale*resize_h), resize_h)) - - image_color_cropped = image[padding:resize_h-padding,0:image_gray.shape[1]] - - image_gray = cv2.cvtColor(image_color_cropped,cv2.COLOR_RGB2GRAY) - - watches = watch_cascade.detectMultiScale(image_gray, en_scale, 2, minSize=(36, 9),maxSize=(36*40, 9*40)) - - cropped_images = [] - for (x, y, w, h) in watches: - cropped_origin = cropped_from_image(image_color_cropped, (int(x), int(y), int(w), int(h))) - x -= w * 0.14 - w += w * 0.28 - y -= h * 0.6 - h += h * 1.1; - - cropped = cropped_from_image(image_color_cropped, (int(x), int(y), int(w), int(h))) - - - cropped_images.append([cropped,[x, y+padding, w, h],cropped_origin]) - return cropped_images diff --git a/hyperlpr_py3/e2e.py b/hyperlpr_py3/e2e.py deleted file mode 100644 index d5d1159..0000000 --- a/hyperlpr_py3/e2e.py +++ /dev/null @@ -1,63 +0,0 @@ -#coding=utf-8 -from keras import backend as K -from keras.models import load_model -from keras.layers import * -import numpy as np -import random -import string - -import cv2 -from . import e2emodel as model -chars = ["京", "沪", "津", "渝", "冀", "晋", "蒙", "辽", "吉", "黑", "苏", "浙", "皖", "闽", "赣", "鲁", "豫", "鄂", "湘", "粤", "桂", - "琼", "川", "贵", "云", "藏", "陕", "甘", "青", "宁", "新", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "A", - "B", "C", "D", "E", "F", "G", "H", "J", "K", "L", "M", "N", "P", "Q", "R", "S", "T", "U", "V", "W", "X", - "Y", "Z","港","学","使","警","澳","挂","军","北","南","广","沈","兰","成","济","海","民","航","空" - ]; -pred_model = model.construct_model("./model/ocr_plate_all_w_rnn_2.h5",) -import time - - - -def fastdecode(y_pred): - results = "" - confidence = 0.0 - table_pred = y_pred.reshape(-1, len(chars)+1) - - res = table_pred.argmax(axis=1) - - for i,one in enumerate(res): - if one=2: - [vx, vy, x, y] = cv2.fitLine(pts, cv2.DIST_HUBER, 0, 0.01, 0.01) - lefty = int((-x * vy / vx) + y) - righty = int(((136- x) * vy / vx) + y) - return lefty+30+zero_add,righty+30+zero_add - return 0,0 - - - -#精定位算法 -def findContoursAndDrawBoundingBox(image_rgb): - - - line_upper = []; - line_lower = []; - - line_experiment = [] - grouped_rects = [] - gray_image = cv2.cvtColor(image_rgb,cv2.COLOR_BGR2GRAY) - - # for k in np.linspace(-1.5, -0.2,10): - for k in np.linspace(-50, 0, 15): - - # thresh_niblack = threshold_niblack(gray_image, window_size=21, k=k) - # binary_niblack = gray_image > thresh_niblack - # binary_niblack = binary_niblack.astype(np.uint8) * 255 - - binary_niblack = cv2.adaptiveThreshold(gray_image,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,17,k) - # cv2.imshow("image1",binary_niblack) - # cv2.waitKey(0) - imagex, contours, hierarchy = cv2.findContours(binary_niblack.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) - for contour in contours: - bdbox = cv2.boundingRect(contour) - if (bdbox[3]/float(bdbox[2])>0.7 and bdbox[3]*bdbox[2]>100 and bdbox[3]*bdbox[2]<1200) or (bdbox[3]/float(bdbox[2])>3 and bdbox[3]*bdbox[2]<100): - # cv2.rectangle(rgb,(bdbox[0],bdbox[1]),(bdbox[0]+bdbox[2],bdbox[1]+bdbox[3]),(255,0,0),1) - line_upper.append([bdbox[0],bdbox[1]]) - line_lower.append([bdbox[0]+bdbox[2],bdbox[1]+bdbox[3]]) - - line_experiment.append([bdbox[0],bdbox[1]]) - line_experiment.append([bdbox[0]+bdbox[2],bdbox[1]+bdbox[3]]) - # grouped_rects.append(bdbox) - - rgb = cv2.copyMakeBorder(image_rgb,30,30,0,0,cv2.BORDER_REPLICATE) - leftyA, rightyA = fitLine_ransac(np.array(line_lower),3) - rows,cols = rgb.shape[:2] - - # rgb = cv2.line(rgb, (cols - 1, rightyA), (0, leftyA), (0, 0, 255), 1,cv2.LINE_AA) - - leftyB, rightyB = fitLine_ransac(np.array(line_upper),-3) - - rows,cols = rgb.shape[:2] - - # rgb = cv2.line(rgb, (cols - 1, rightyB), (0, leftyB), (0,255, 0), 1,cv2.LINE_AA) - pts_map1 = np.float32([[cols - 1, rightyA], [0, leftyA],[cols - 1, rightyB], [0, leftyB]]) - pts_map2 = np.float32([[136,36],[0,36],[136,0],[0,0]]) - mat = cv2.getPerspectiveTransform(pts_map1,pts_map2) - image = cv2.warpPerspective(rgb,mat,(136,36),flags=cv2.INTER_CUBIC) - image,M = deskew.fastDeskew(image) - - return image - - - -#多级 -def findContoursAndDrawBoundingBox2(image_rgb): - - - line_upper = []; - line_lower = []; - - line_experiment = [] - - grouped_rects = [] - - gray_image = cv2.cvtColor(image_rgb,cv2.COLOR_BGR2GRAY) - - for k in np.linspace(-1.6, -0.2,10): - # for k in np.linspace(-15, 0, 15): - # # - # thresh_niblack = threshold_niblack(gray_image, window_size=21, k=k) - # binary_niblack = gray_image > thresh_niblack - # binary_niblack = binary_niblack.astype(np.uint8) * 255 - - binary_niblack = nt.niBlackThreshold(gray_image,19,k) - # cv2.imshow("binary_niblack_opencv",binary_niblack_) - # cv2.imshow("binary_niblack_skimage", binary_niblack) - - # cv2.waitKey(0) - imagex, contours, hierarchy = cv2.findContours(binary_niblack.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) - - for contour in contours: - bdbox = cv2.boundingRect(contour) - if (bdbox[3]/float(bdbox[2])>0.7 and bdbox[3]*bdbox[2]>100 and bdbox[3]*bdbox[2]<1000) or (bdbox[3]/float(bdbox[2])>3 and bdbox[3]*bdbox[2]<100): - # cv2.rectangle(rgb,(bdbox[0],bdbox[1]),(bdbox[0]+bdbox[2],bdbox[1]+bdbox[3]),(255,0,0),1) - line_upper.append([bdbox[0],bdbox[1]]) - line_lower.append([bdbox[0]+bdbox[2],bdbox[1]+bdbox[3]]) - - line_experiment.append([bdbox[0],bdbox[1]]) - line_experiment.append([bdbox[0]+bdbox[2],bdbox[1]+bdbox[3]]) - # grouped_rects.append(bdbox) - - rgb = cv2.copyMakeBorder(image_rgb,30,30,0,0,cv2.BORDER_REPLICATE) - leftyA, rightyA = fitLine_ransac(np.array(line_lower),2) - rows,cols = rgb.shape[:2] - - # rgb = cv2.line(rgb, (cols - 1, rightyA), (0, leftyA), (0, 0, 255), 1,cv2.LINE_AA) - - leftyB, rightyB = fitLine_ransac(np.array(line_upper),-4) - - rows,cols = rgb.shape[:2] - - # rgb = cv2.line(rgb, (cols - 1, rightyB), (0, leftyB), (0,255, 0), 1,cv2.LINE_AA) - pts_map1 = np.float32([[cols - 1, rightyA], [0, leftyA],[cols - 1, rightyB], [0, leftyB]]) - pts_map2 = np.float32([[136,36],[0,36],[136,0],[0,0]]) - mat = cv2.getPerspectiveTransform(pts_map1,pts_map2) - image = cv2.warpPerspective(rgb,mat,(136,36),flags=cv2.INTER_CUBIC) - image,M= deskew.fastDeskew(image) - - - return image diff --git a/hyperlpr_py3/finemapping_vertical.py b/hyperlpr_py3/finemapping_vertical.py deleted file mode 100644 index 83cbcf4..0000000 --- a/hyperlpr_py3/finemapping_vertical.py +++ /dev/null @@ -1,92 +0,0 @@ -#coding=utf-8 -from keras.layers import Conv2D, Input,MaxPool2D, Reshape,Activation,Flatten, Dense -from keras.models import Model, Sequential -from keras.layers.advanced_activations import PReLU -from keras.optimizers import adam -import numpy as np - -import cv2 - -def getModel(): - input = Input(shape=[16, 66, 3]) # change this shape to [None,None,3] to enable arbitraty shape input - x = Conv2D(10, (3, 3), strides=1, padding='valid', name='conv1')(input) - x = Activation("relu", name='relu1')(x) - x = MaxPool2D(pool_size=2)(x) - x = Conv2D(16, (3, 3), strides=1, padding='valid', name='conv2')(x) - x = Activation("relu", name='relu2')(x) - x = Conv2D(32, (3, 3), strides=1, padding='valid', name='conv3')(x) - x = Activation("relu", name='relu3')(x) - x = Flatten()(x) - output = Dense(2,name = "dense")(x) - output = Activation("relu", name='relu4')(output) - model = Model([input], [output]) - return model - - - -model = getModel() -model.load_weights("./model/model12.h5") - - -def getmodel(): - return model - -def gettest_model(): - input = Input(shape=[16, 66, 3]) # change this shape to [None,None,3] to enable arbitraty shape input - A = Conv2D(10, (3, 3), strides=1, padding='valid', name='conv1')(input) - B = Activation("relu", name='relu1')(A) - C = MaxPool2D(pool_size=2)(B) - x = Conv2D(16, (3, 3), strides=1, padding='valid', name='conv2')(C) - x = Activation("relu", name='relu2')(x) - x = Conv2D(32, (3, 3), strides=1, padding='valid', name='conv3')(x) - K = Activation("relu", name='relu3')(x) - - - x = Flatten()(K) - dense = Dense(2,name = "dense")(x) - output = Activation("relu", name='relu4')(dense) - x = Model([input], [output]) - x.load_weights("./model/model12.h5") - ok = Model([input], [dense]) - - for layer in ok.layers: - print(layer) - - return ok - - - - -def finemappingVertical(image): - resized = cv2.resize(image,(66,16)) - resized = resized.astype(np.float)/255 - res= model.predict(np.array([resized]))[0] - print("keras_predict",res) - res =res*image.shape[1] - res = res.astype(np.int) - H,T = res - H-=3 - #3 79.86 - #4 79.3 - #5 79.5 - #6 78.3 - - - #T - #T+1 80.9 - #T+2 81.75 - #T+3 81.75 - - - - if H<0: - H=0 - T+=2; - - if T>= image.shape[1]-1: - T= image.shape[1]-1 - - image = image[0:35,H:T+2] - - image = cv2.resize(image, (int(136), int(36))) - return image \ No newline at end of file diff --git a/hyperlpr_py3/niblack_thresholding.py b/hyperlpr_py3/niblack_thresholding.py deleted file mode 100644 index 652bae0..0000000 --- a/hyperlpr_py3/niblack_thresholding.py +++ /dev/null @@ -1,18 +0,0 @@ -import cv2 -import numpy as np - - - -def niBlackThreshold( src, blockSize, k, binarizationMethod= 0 ): - mean = cv2.boxFilter(src,cv2.CV_32F,(blockSize, blockSize),borderType=cv2.BORDER_REPLICATE) - sqmean = cv2.sqrBoxFilter(src, cv2.CV_32F, (blockSize, blockSize), borderType = cv2.BORDER_REPLICATE) - variance = sqmean - (mean*mean) - stddev = np.sqrt(variance) - thresh = mean + stddev * float(-k) - thresh = thresh.astype(src.dtype) - k = (src>thresh)*255 - k = k.astype(np.uint8) - return k - - -# cv2.imshow() \ No newline at end of file diff --git a/hyperlpr_py3/pipline.py b/hyperlpr_py3/pipline.py deleted file mode 100644 index 3a37dd3..0000000 --- a/hyperlpr_py3/pipline.py +++ /dev/null @@ -1,246 +0,0 @@ -#coding=utf-8 -from . import detect -from . import finemapping as fm - -from . import segmentation -import cv2 - -import time -import numpy as np - -from PIL import ImageFont -from PIL import Image -from PIL import ImageDraw -import json - -import sys -from . import typeDistinguish as td -import imp - - -imp.reload(sys) -fontC = ImageFont.truetype("./Font/platech.ttf", 14, 0); - -from . import e2e -#寻找车牌左右边界 - -def find_edge(image): - sum_i = image.sum(axis=0) - sum_i = sum_i.astype(np.float) - sum_i/=image.shape[0]*255 - # print sum_i - - start= 0 ; - end = image.shape[1]-1 - - for i,one in enumerate(sum_i): - if one>0.4: - start = i; - if start-3<0: - start = 0 - else: - start -=3 - - break; - for i,one in enumerate(sum_i[::-1]): - - if one>0.4: - end = end - i; - if end+4>image.shape[1]-1: - end = image.shape[1]-1 - else: - end+=4 - break - return start,end - - -#垂直边缘检测 -def verticalEdgeDetection(image): - image_sobel = cv2.Sobel(image.copy(),cv2.CV_8U,1,0) - # image = auto_canny(image_sobel) - - # img_sobel, CV_8U, 1, 0, 3, 1, 0, BORDER_DEFAULT - # canny_image = auto_canny(image) - flag,thres = cv2.threshold(image_sobel,0,255,cv2.THRESH_OTSU|cv2.THRESH_BINARY) - print(flag) - flag,thres = cv2.threshold(image_sobel,int(flag*0.7),255,cv2.THRESH_BINARY) - # thres = simpleThres(image_sobel) - kernal = np.ones(shape=(3,15)) - thres = cv2.morphologyEx(thres,cv2.MORPH_CLOSE,kernal) - return thres - - -#确定粗略的左右边界 -def horizontalSegmentation(image): - - thres = verticalEdgeDetection(image) - # thres = thres*image - head,tail = find_edge(thres) - # print head,tail - # cv2.imshow("edge",thres) - tail = tail+5 - if tail>135: - tail = 135 - image = image[0:35,head:tail] - image = cv2.resize(image, (int(136), int(36))) - return image - - -#打上boundingbox和标签 -def drawRectBox(image,rect,addText): - cv2.rectangle(image, (int(rect[0]), int(rect[1])), (int(rect[0] + rect[2]), int(rect[1] + rect[3])), (0,0, 255), 2, cv2.LINE_AA) - cv2.rectangle(image, (int(rect[0]-1), int(rect[1])-16), (int(rect[0] + 115), int(rect[1])), (0, 0, 255), -1, cv2.LINE_AA) - - img = Image.fromarray(image) - draw = ImageDraw.Draw(img) - #draw.text((int(rect[0]+1), int(rect[1]-16)), addText.decode("utf-8"), (255, 255, 255), font=fontC) - draw.text((int(rect[0]+1), int(rect[1]-16)), addText, (255, 255, 255), font=fontC) - imagex = np.array(img) - - return imagex - - -from . import cache -from . import finemapping_vertical as fv - -def RecognizePlateJson(image): - images = detect.detectPlateRough(image,image.shape[0],top_bottom_padding_rate=0.1) - jsons = [] - for j,plate in enumerate(images): - plate,rect,origin_plate =plate - res, confidence = e2e.recognizeOne(origin_plate) - print("res",res) - - cv2.imwrite("./"+str(j)+"_rough.jpg",plate) - - # print "车牌类型:",ptype - # plate = cv2.cvtColor(plate, cv2.COLOR_RGB2GRAY) - plate =cv2.resize(plate,(136,int(36*2.5))) - t1 = time.time() - - ptype = td.SimplePredict(plate) - if ptype>0 and ptype<4: - plate = cv2.bitwise_not(plate) - # demo = verticalEdgeDetection(plate) - - image_rgb = fm.findContoursAndDrawBoundingBox(plate) - image_rgb = fv.finemappingVertical(image_rgb) - cache.verticalMappingToFolder(image_rgb) - # print time.time() - t1,"校正" - print("e2e:",e2e.recognizeOne(image_rgb)[0]) - image_gray = cv2.cvtColor(image_rgb,cv2.COLOR_BGR2GRAY) - - cv2.imwrite("./"+str(j)+".jpg",image_gray) - # image_gray = horizontalSegmentation(image_gray) - - t2 = time.time() - res, confidence = e2e.recognizeOne(image_rgb) - res_json = {} - if confidence > 0.6: - res_json["Name"] = res - res_json["Type"] = td.plateType[ptype] - res_json["Confidence"] = confidence; - res_json["x"] = int(rect[0]) - res_json["y"] = int(rect[1]) - res_json["w"] = int(rect[2]) - res_json["h"] = int(rect[3]) - jsons.append(res_json) - print(json.dumps(jsons,ensure_ascii=False,encoding="gb2312")) - - return json.dumps(jsons,ensure_ascii=False,encoding="gb2312") - - - - -def SimpleRecognizePlateByE2E(image): - t0 = time.time() - images = detect.detectPlateRough(image,image.shape[0],top_bottom_padding_rate=0.1) - res_set = [] - for j,plate in enumerate(images): - plate, rect, origin_plate =plate - # plate = cv2.cvtColor(plate, cv2.COLOR_RGB2GRAY) - plate =cv2.resize(plate,(136,36*2)) - res,confidence = e2e.recognizeOne(origin_plate) - print("res",res) - - t1 = time.time() - ptype = td.SimplePredict(plate) - if ptype>0 and ptype<5: - # pass - plate = cv2.bitwise_not(plate) - image_rgb = fm.findContoursAndDrawBoundingBox(plate) - image_rgb = fv.finemappingVertical(image_rgb) - image_rgb = fv.finemappingVertical(image_rgb) - cache.verticalMappingToFolder(image_rgb) - cv2.imwrite("./"+str(j)+".jpg",image_rgb) - res,confidence = e2e.recognizeOne(image_rgb) - print(res,confidence) - res_set.append([[],res,confidence]) - - if confidence>0.7: - image = drawRectBox(image, rect, res+" "+str(round(confidence,3))) - return image,res_set - - -def SimpleRecognizePlate(image): - t0 = time.time() - images = detect.detectPlateRough(image,image.shape[0],top_bottom_padding_rate=0.1) - res_set = [] - for j,plate in enumerate(images): - plate, rect, origin_plate =plate - # plate = cv2.cvtColor(plate, cv2.COLOR_RGB2GRAY) - plate =cv2.resize(plate,(136,36*2)) - t1 = time.time() - - ptype = td.SimplePredict(plate) - if ptype>0 and ptype<5: - plate = cv2.bitwise_not(plate) - - image_rgb = fm.findContoursAndDrawBoundingBox(plate) - - image_rgb = fv.finemappingVertical(image_rgb) - cache.verticalMappingToFolder(image_rgb) - print("e2e:", e2e.recognizeOne(image_rgb)) - image_gray = cv2.cvtColor(image_rgb,cv2.COLOR_RGB2GRAY) - - # image_gray = horizontalSegmentation(image_gray) - cv2.imshow("image_gray",image_gray) - # cv2.waitKey() - - cv2.imwrite("./"+str(j)+".jpg",image_gray) - # cv2.imshow("image",image_gray) - # cv2.waitKey(0) - print("校正",time.time() - t1,"s") - # cv2.imshow("image,",image_gray) - # cv2.waitKey(0) - t2 = time.time() - val = segmentation.slidingWindowsEval(image_gray) - # print val - print("分割和识别",time.time() - t2,"s") - if len(val)==3: - blocks, res, confidence = val - if confidence/7>0.7: - image = drawRectBox(image,rect,res) - res_set.append(res) - for i,block in enumerate(blocks): - - block_ = cv2.resize(block,(25,25)) - block_ = cv2.cvtColor(block_,cv2.COLOR_GRAY2BGR) - image[j * 25:(j * 25) + 25, i * 25:(i * 25) + 25] = block_ - if image[j*25:(j*25)+25,i*25:(i*25)+25].shape == block_.shape: - pass - - - if confidence>0: - print("车牌:",res,"置信度:",confidence/7) - else: - pass - - # print "不确定的车牌:", res, "置信度:", confidence - - print(time.time() - t0,"s") - return image,res_set - - - - diff --git a/hyperlpr_py3/plateStructure.py b/hyperlpr_py3/plateStructure.py deleted file mode 100644 index e69de29..0000000 diff --git a/hyperlpr_py3/precise.py b/hyperlpr_py3/precise.py deleted file mode 100644 index e69de29..0000000 diff --git a/hyperlpr_py3/recognizer.py b/hyperlpr_py3/recognizer.py deleted file mode 100644 index 72171b3..0000000 --- a/hyperlpr_py3/recognizer.py +++ /dev/null @@ -1,154 +0,0 @@ -#coding=utf-8 -from keras.models import Sequential -from keras.layers import Dense, Dropout, Activation, Flatten -from keras.layers import Conv2D,MaxPool2D -from keras.optimizers import SGD -from keras import backend as K - -K.set_image_dim_ordering('tf') - - -import cv2 -import numpy as np - - - -index = {"京": 0, "沪": 1, "津": 2, "渝": 3, "冀": 4, "晋": 5, "蒙": 6, "辽": 7, "吉": 8, "黑": 9, "苏": 10, "浙": 11, "皖": 12, - "闽": 13, "赣": 14, "鲁": 15, "豫": 16, "鄂": 17, "湘": 18, "粤": 19, "桂": 20, "琼": 21, "川": 22, "贵": 23, "云": 24, - "藏": 25, "陕": 26, "甘": 27, "青": 28, "宁": 29, "新": 30, "0": 31, "1": 32, "2": 33, "3": 34, "4": 35, "5": 36, - "6": 37, "7": 38, "8": 39, "9": 40, "A": 41, "B": 42, "C": 43, "D": 44, "E": 45, "F": 46, "G": 47, "H": 48, - "J": 49, "K": 50, "L": 51, "M": 52, "N": 53, "P": 54, "Q": 55, "R": 56, "S": 57, "T": 58, "U": 59, "V": 60, - "W": 61, "X": 62, "Y": 63, "Z": 64,"港":65,"学":66 ,"O":67 ,"使":68,"警":69,"澳":70,"挂":71}; - -chars = ["京", "沪", "津", "渝", "冀", "晋", "蒙", "辽", "吉", "黑", "苏", "浙", "皖", "闽", "赣", "鲁", "豫", "鄂", "湘", "粤", "桂", - "琼", "川", "贵", "云", "藏", "陕", "甘", "青", "宁", "新", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "A", - "B", "C", "D", "E", "F", "G", "H", "J", "K", "L", "M", "N", "P", - "Q", "R", "S", "T", "U", "V", "W", "X", - "Y", "Z","港","学","O","使","警","澳","挂" ]; - - - -def Getmodel_tensorflow(nb_classes): - # nb_classes = len(charset) - - img_rows, img_cols = 23, 23 - # number of convolutional filters to use - nb_filters = 32 - # size of pooling area for max pooling - nb_pool = 2 - # convolution kernel size - nb_conv = 3 - - # x = np.load('x.npy') - - # y = np_utils.to_categorical(range(3062)*45*5*2, nb_classes) - # weight = ((type_class - np.arange(type_class)) / type_class + 1) ** 3 - # weight = dict(zip(range(3063), weight / weight.mean())) # 调整权重,高频字优先 - - model = Sequential() - model.add(Conv2D(32, (5, 5),input_shape=(img_rows, img_cols,1))) - model.add(Activation('relu')) - model.add(MaxPool2D(pool_size=(nb_pool, nb_pool))) - model.add(Dropout(0.25)) - model.add(Conv2D(32, (3, 3))) - model.add(Activation('relu')) - model.add(MaxPool2D(pool_size=(nb_pool, nb_pool))) - model.add(Dropout(0.25)) - model.add(Conv2D(512, (3, 3))) - # model.add(Activation('relu')) - # model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool))) - # model.add(Dropout(0.25)) - model.add(Flatten()) - model.add(Dense(512)) - model.add(Activation('relu')) - model.add(Dropout(0.5)) - model.add(Dense(nb_classes)) - model.add(Activation('softmax')) - model.compile(loss='categorical_crossentropy', - optimizer='adam', - metrics=['accuracy']) - return model - - - - -def Getmodel_ch(nb_classes): - # nb_classes = len(charset) - - img_rows, img_cols = 23, 23 - # number of convolutional filters to use - nb_filters = 32 - # size of pooling area for max pooling - nb_pool = 2 - # convolution kernel size - nb_conv = 3 - - # x = np.load('x.npy') - # y = np_utils.to_categorical(range(3062)*45*5*2, nb_classes) - # weight = ((type_class - np.arange(type_class)) / type_class + 1) ** 3 - # weight = dict(zip(range(3063), weight / weight.mean())) # 调整权重,高频字优先 - - model = Sequential() - model.add(Conv2D(32, (5, 5),input_shape=(img_rows, img_cols,1))) - model.add(Activation('relu')) - model.add(MaxPool2D(pool_size=(nb_pool, nb_pool))) - model.add(Dropout(0.25)) - model.add(Conv2D(32, (3, 3))) - model.add(Activation('relu')) - model.add(MaxPool2D(pool_size=(nb_pool, nb_pool))) - model.add(Dropout(0.25)) - model.add(Conv2D(512, (3, 3))) - # model.add(Activation('relu')) - # model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool))) - # model.add(Dropout(0.25)) - model.add(Flatten()) - model.add(Dense(756)) - model.add(Activation('relu')) - model.add(Dropout(0.5)) - model.add(Dense(nb_classes)) - model.add(Activation('softmax')) - model.compile(loss='categorical_crossentropy', - optimizer='adam', - metrics=['accuracy']) - return model - - - -model = Getmodel_tensorflow(65) -#构建网络 - -model_ch = Getmodel_ch(31) - -model_ch.load_weights("./model/char_chi_sim.h5") -# model_ch.save_weights("./model/char_chi_sim.h5") -model.load_weights("./model/char_rec.h5") -# model.save("./model/char_rec.h5") - - -def SimplePredict(image,pos): - image = cv2.resize(image, (23, 23)) - image = cv2.equalizeHist(image) - image = image.astype(np.float) / 255 - image -= image.mean() - image = np.expand_dims(image, 3) - if pos!=0: - res = np.array(model.predict(np.array([image]))[0]) - else: - res = np.array(model_ch.predict(np.array([image]))[0]) - - zero_add = 0 ; - - if pos==0: - res = res[:31] - elif pos==1: - res = res[31+10:65] - zero_add = 31+10 - else: - res = res[31:] - zero_add = 31 - - max_id = res.argmax() - - - return res.max(),chars[max_id+zero_add],max_id+zero_add - diff --git a/hyperlpr_py3/segmentation.py b/hyperlpr_py3/segmentation.py deleted file mode 100644 index 365489a..0000000 --- a/hyperlpr_py3/segmentation.py +++ /dev/null @@ -1,307 +0,0 @@ -#coding=utf-8 -import cv2 -import numpy as np - -# from matplotlib import pyplot as plt -import scipy.ndimage.filters as f -import scipy - -import time -import scipy.signal as l - - - - - -from keras.models import Sequential -from keras.layers import Dense, Dropout, Activation, Flatten -from keras.layers import Conv2D, MaxPool2D -from keras.optimizers import SGD -from keras import backend as K - -K.set_image_dim_ordering('tf') - - -def Getmodel_tensorflow(nb_classes): - # nb_classes = len(charset) - img_rows, img_cols = 23, 23 - # number of convolutional filters to use - nb_filters = 16 - # size of pooling area for max pooling - nb_pool = 2 - # convolution kernel size - nb_conv = 3 - # x = np.load('x.npy') - # y = np_utils.to_categorical(range(3062)*45*5*2, nb_classes) - # weight = ((type_class - np.arange(type_class)) / type_class + 1) ** 3 - # weight = dict(zip(range(3063), weight / weight.mean())) # 调整权重,高频字优先 - - model = Sequential() - model.add(Conv2D(nb_filters, (nb_conv, nb_conv),input_shape=(img_rows, img_cols,1))) - model.add(Activation('relu')) - model.add(MaxPool2D(pool_size=(nb_pool, nb_pool))) - model.add(Conv2D(nb_filters, (nb_conv, nb_conv))) - model.add(Activation('relu')) - model.add(MaxPool2D(pool_size=(nb_pool, nb_pool))) - model.add(Flatten()) - model.add(Dense(256)) - model.add(Dropout(0.5)) - - model.add(Activation('relu')) - model.add(Dense(nb_classes)) - model.add(Activation('softmax')) - model.compile(loss='categorical_crossentropy', - optimizer='sgd', - metrics=['accuracy']) - return model - - - -def Getmodel_tensorflow_light(nb_classes): - # nb_classes = len(charset) - img_rows, img_cols = 23, 23 - # number of convolutional filters to use - nb_filters = 8 - # size of pooling area for max pooling - nb_pool = 2 - # convolution kernel size - nb_conv = 3 - # x = np.load('x.npy') - # y = np_utils.to_categorical(range(3062)*45*5*2, nb_classes) - # weight = ((type_class - np.arange(type_class)) / type_class + 1) ** 3 - # weight = dict(zip(range(3063), weight / weight.mean())) # 调整权重,高频字优先 - - model = Sequential() - model.add(Conv2D(nb_filters, (nb_conv, nb_conv),input_shape=(img_rows, img_cols, 1))) - model.add(Activation('relu')) - model.add(MaxPool2D(pool_size=(nb_pool, nb_pool))) - model.add(Conv2D(nb_filters, (nb_conv * 2, nb_conv * 2))) - model.add(Activation('relu')) - model.add(MaxPool2D(pool_size=(nb_pool, nb_pool))) - model.add(Flatten()) - model.add(Dense(32)) - # model.add(Dropout(0.25)) - - model.add(Activation('relu')) - model.add(Dense(nb_classes)) - model.add(Activation('softmax')) - model.compile(loss='categorical_crossentropy', - optimizer='adam', - metrics=['accuracy']) - return model - - - - -model = Getmodel_tensorflow_light(3) -model2 = Getmodel_tensorflow(3) - -import os -model.load_weights("./model/char_judgement1.h5") -# model.save("./model/char_judgement1.h5") -model2.load_weights("./model/char_judgement.h5") -# model2.save("./model/char_judgement.h5") - - -model = model2 -def get_median(data): - data = sorted(data) - size = len(data) - # print size - - if size % 2 == 0: # 判断列表长度为偶数 - median = (data[size//2]+data[size//2-1])//2 - data[0] = median - if size % 2 == 1: # 判断列表长度为奇数 - median = data[(size-1)//2] - data[0] = median - return data[0] -import time - -def searchOptimalCuttingPoint(rgb,res_map,start,width_boundingbox,interval_range): - t0 = time.time() - # - # for x in xrange(10): - # res_map = np.vstack((res_map,res_map[-1])) - length = res_map.shape[0] - refine_s = -2; - - if width_boundingbox>20: - refine_s = -9 - score_list = [] - interval_big = int(width_boundingbox * 0.3) # - p = 0 - for zero_add in range(start,start+50,3): - # for interval_small in xrange(-0,width_boundingbox/2): - for i in range(-8,int(width_boundingbox/1)-8): - for refine in range(refine_s, int(width_boundingbox/2+3)): - p1 = zero_add# this point is province - p2 = p1 + width_boundingbox +refine # - p3 = p2 + width_boundingbox + interval_big+i+1 - p4 = p3 + width_boundingbox +refine - p5 = p4 + width_boundingbox +refine - p6 = p5 + width_boundingbox +refine - p7 = p6 + width_boundingbox +refine - if p7>=length: - continue - score = res_map[p1][2]*3 -(res_map[p3][1]+res_map[p4][1]+res_map[p5][1]+res_map[p6][1]+res_map[p7][1])+7 - # print score - score_list.append([score,[p1,p2,p3,p4,p5,p6,p7]]) - p+=1 - print(p) - - score_list = sorted(score_list , key=lambda x:x[0]) - # for one in score_list[-1][1]: - # cv2.line(debug,(one,0),(one,36),(255,0,0),1) - # # - # cv2.imshow("one",debug) - # cv2.waitKey(0) - # - print("寻找最佳点",time.time()-t0) - return score_list[-1] - - -import sys - -sys.path.append('../') -from . import recognizer as cRP -from . import niblack_thresholding as nt - -def refineCrop(sections,width=16): - new_sections = [] - for section in sections: - # cv2.imshow("section¡",section) - - # cv2.blur(section,(3,3),3) - - sec_center = np.array([section.shape[1]/2,section.shape[0]/2]) - binary_niblack = nt.niBlackThreshold(section,17,-0.255) - imagex, contours, hierarchy = cv2.findContours(binary_niblack,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) - boxs = [] - for contour in contours: - x,y,w,h = cv2.boundingRect(contour) - - ratio = w/float(h) - if ratio<1 and h>36*0.4 and y<16\ - : - box = [x,y,w,h] - - boxs.append([box,np.array([x+w/2,y+h/2])]) - # cv2.rectangle(section,(x,y),(x+w,y+h),255,1) - - - - - # print boxs - - dis_ = np.array([ ((one[1]-sec_center)**2).sum() for one in boxs]) - if len(dis_)==0: - kernal = [0, 0, section.shape[1], section.shape[0]] - else: - kernal = boxs[dis_.argmin()][0] - - center_c = (kernal[0]+kernal[2]/2,kernal[1]+kernal[3]/2) - w_2 = int(width/2) - h_2 = kernal[3]/2 - - if center_c[0] - w_2< 0: - w_2 = center_c[0] - new_box = [center_c[0] - w_2,kernal[1],width,kernal[3]] - # print new_box[2]/float(new_box[3]) - if new_box[2]/float(new_box[3])>0.5: - # print "异常" - h = int((new_box[2]/0.35 )/2) - if h>35: - h = 35 - new_box[1] = center_c[1]- h - if new_box[1]<0: - new_box[1] = 1 - new_box[3] = h*2 - - section = section[int(new_box[1]):int(new_box[1]+new_box[3]), int(new_box[0]):int(new_box[0]+new_box[2])] - # cv2.imshow("section",section) - # cv2.waitKey(0) - new_sections.append(section) - # print new_box - return new_sections - - -def slidingWindowsEval(image): - windows_size = 16; - stride = 1 - height= image.shape[0] - t0 = time.time() - data_sets = [] - - for i in range(0,image.shape[1]-windows_size+1,stride): - data = image[0:height,i:i+windows_size] - data = cv2.resize(data,(23,23)) - # cv2.imshow("image",data) - data = cv2.equalizeHist(data) - data = data.astype(np.float)/255 - data= np.expand_dims(data,3) - data_sets.append(data) - - res = model2.predict(np.array(data_sets)) - print("分割",time.time() - t0) - - pin = res - p = 1 - (res.T)[1] - p = f.gaussian_filter1d(np.array(p,dtype=np.float),3) - lmin = l.argrelmax(np.array(p),order = 3)[0] - interval = [] - for i in range(len(lmin)-1): - interval.append(lmin[i+1]-lmin[i]) - - if(len(interval)>3): - mid = get_median(interval) - else: - return [] - pin = np.array(pin) - res = searchOptimalCuttingPoint(image,pin,0,mid,3) - - cutting_pts = res[1] - last = cutting_pts[-1] + mid - if last < image.shape[1]: - cutting_pts.append(last) - else: - cutting_pts.append(image.shape[1]-1) - name = "" - confidence =0.00 - seg_block = [] - for x in range(1,len(cutting_pts)): - if x != len(cutting_pts)-1 and x!=1: - section = image[0:36,cutting_pts[x-1]-2:cutting_pts[x]+2] - elif x==1: - c_head = cutting_pts[x - 1]- 2 - if c_head<0: - c_head=0 - c_tail = cutting_pts[x] + 2 - section = image[0:36, c_head:c_tail] - elif x==len(cutting_pts)-1: - end = cutting_pts[x] - diff = image.shape[1]-end - c_head = cutting_pts[x - 1] - c_tail = cutting_pts[x] - if diff<7 : - section = image[0:36, c_head-5:c_tail+5] - else: - diff-=1 - section = image[0:36, c_head - diff:c_tail + diff] - elif x==2: - section = image[0:36, cutting_pts[x - 1] - 3:cutting_pts[x-1]+ mid] - else: - section = image[0:36,cutting_pts[x-1]:cutting_pts[x]] - seg_block.append(section) - refined = refineCrop(seg_block,mid-1) - - t0 = time.time() - for i,one in enumerate(refined): - res_pre = cRP.SimplePredict(one, i ) - # cv2.imshow(str(i),one) - # cv2.waitKey(0) - confidence+=res_pre[0] - name+= res_pre[1] - print("字符识别",time.time() - t0) - - return refined,name,confidence diff --git a/hyperlpr_py3/typeDistinguish.py b/hyperlpr_py3/typeDistinguish.py deleted file mode 100644 index d3e538a..0000000 --- a/hyperlpr_py3/typeDistinguish.py +++ /dev/null @@ -1,56 +0,0 @@ -#coding=utf-8 -from keras.models import Sequential -from keras.layers import Dense, Dropout, Activation, Flatten -from keras.layers import Conv2D, MaxPool2D -from keras.optimizers import SGD -from keras import backend as K - -K.set_image_dim_ordering('tf') - - -import cv2 -import numpy as np - - -plateType = ["蓝牌","单层黄牌","新能源车牌","白色","黑色-港澳"] -def Getmodel_tensorflow(nb_classes): - # nb_classes = len(charset) - - img_rows, img_cols = 9, 34 - # number of convolutional filters to use - nb_filters = 32 - # size of pooling area for max pooling - nb_pool = 2 - # convolution kernel size - nb_conv = 3 - - # x = np.load('x.npy') - # y = np_utils.to_categorical(range(3062)*45*5*2, nb_classes) - # weight = ((type_class - np.arange(type_class)) / type_class + 1) ** 3 - # weight = dict(zip(range(3063), weight / weight.mean())) # 调整权重,高频字优先 - - model = Sequential() - model.add(Conv2D(16, (5, 5),input_shape=(img_rows, img_cols,3))) - model.add(Activation('relu')) - model.add(MaxPool2D(pool_size=(nb_pool, nb_pool))) - model.add(Flatten()) - model.add(Dense(64)) - model.add(Activation('relu')) - model.add(Dropout(0.5)) - model.add(Dense(nb_classes)) - model.add(Activation('softmax')) - model.compile(loss='categorical_crossentropy', - optimizer='adam', - metrics=['accuracy']) - return model - -model = Getmodel_tensorflow(5) -model.load_weights("./model/plate_type.h5") -model.save("./model/plate_type.h5") -def SimplePredict(image): - image = cv2.resize(image, (34, 9)) - image = image.astype(np.float) / 255 - res = np.array(model.predict(np.array([image]))[0]) - return res.argmax() - - diff --git a/wxpy_uploader.py b/wxpy_uploader.py index ee54ade..00f3092 100644 --- a/wxpy_uploader.py +++ b/wxpy_uploader.py @@ -17,8 +17,8 @@ bot = Bot(console_qr=True, cache_path=True) def pr_msg(msg): image_name = msg.file_name friend = msg.chat - print msg.chat - print '接收图片' + print(msg.chat) + print('接收图片') # face(image_name) msg.get_file('' + msg.file_name) json_text = recognize(image_name)