You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

recognizer.py 5.3 kB

7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154
  1. #coding=utf-8
  2. from keras.models import Sequential
  3. from keras.layers import Dense, Dropout, Activation, Flatten
  4. from keras.layers import Conv2D,MaxPool2D
  5. from keras.optimizers import SGD
  6. from keras import backend as K
  7. K.image_data_format()
  8. import cv2
  9. import numpy as np
  10. index = {u"京": 0, u"沪": 1, u"津": 2, u"渝": 3, u"冀": 4, u"晋": 5, u"蒙": 6, u"辽": 7, u"吉": 8, u"黑": 9, u"苏": 10, u"浙": 11, u"皖": 12,
  11. u"闽": 13, u"赣": 14, u"鲁": 15, u"豫": 16, u"鄂": 17, u"湘": 18, u"粤": 19, u"桂": 20, u"琼": 21, u"川": 22, u"贵": 23, u"云": 24,
  12. u"藏": 25, u"陕": 26, u"甘": 27, u"青": 28, u"宁": 29, u"新": 30, u"0": 31, u"1": 32, u"2": 33, u"3": 34, u"4": 35, u"5": 36,
  13. u"6": 37, u"7": 38, u"8": 39, u"9": 40, u"A": 41, u"B": 42, u"C": 43, u"D": 44, u"E": 45, u"F": 46, u"G": 47, u"H": 48,
  14. u"J": 49, u"K": 50, u"L": 51, u"M": 52, u"N": 53, u"P": 54, u"Q": 55, u"R": 56, u"S": 57, u"T": 58, u"U": 59, u"V": 60,
  15. u"W": 61, u"X": 62, u"Y": 63, u"Z": 64,u"港":65,u"学":66 ,u"O":67 ,u"使":68,u"警":69,u"澳":70,u"挂":71};
  16. chars = ["京", "沪", "津", "渝", "冀", "晋", "蒙", "辽", "吉", "黑", "苏", "浙", "皖", "闽", "赣", "鲁", "豫", "鄂", "湘", "粤", "桂",
  17. "琼", "川", "贵", "云", "藏", "陕", "甘", "青", "宁", "新", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "A",
  18. "B", "C", "D", "E", "F", "G", "H", "J", "K", "L", "M", "N", "P",
  19. "Q", "R", "S", "T", "U", "V", "W", "X",
  20. "Y", "Z","港","学","O","使","警","澳","挂" ];
  21. def Getmodel_tensorflow(nb_classes):
  22. # nb_classes = len(charset)
  23. img_rows, img_cols = 23, 23
  24. # number of convolutional filters to use
  25. nb_filters = 32
  26. # size of pooling area for max pooling
  27. nb_pool = 2
  28. # convolution kernel size
  29. nb_conv = 3
  30. # x = np.load('x.npy')
  31. # y = np_utils.to_categorical(range(3062)*45*5*2, nb_classes)
  32. # weight = ((type_class - np.arange(type_class)) / type_class + 1) ** 3
  33. # weight = dict(zip(range(3063), weight / weight.mean())) # 调整权重,高频字优先
  34. model = Sequential()
  35. model.add(Conv2D(32, (5, 5),input_shape=(img_rows, img_cols,1)))
  36. model.add(Activation('relu'))
  37. model.add(MaxPool2D(pool_size=(nb_pool, nb_pool)))
  38. model.add(Dropout(0.25))
  39. model.add(Conv2D(32, (3, 3)))
  40. model.add(Activation('relu'))
  41. model.add(MaxPool2D(pool_size=(nb_pool, nb_pool)))
  42. model.add(Dropout(0.25))
  43. model.add(Conv2D(512, (3, 3)))
  44. # model.add(Activation('relu'))
  45. # model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
  46. # model.add(Dropout(0.25))
  47. model.add(Flatten())
  48. model.add(Dense(512))
  49. model.add(Activation('relu'))
  50. model.add(Dropout(0.5))
  51. model.add(Dense(nb_classes))
  52. model.add(Activation('softmax'))
  53. model.compile(loss='categorical_crossentropy',
  54. optimizer='adam',
  55. metrics=['accuracy'])
  56. return model
  57. def Getmodel_ch(nb_classes):
  58. # nb_classes = len(charset)
  59. img_rows, img_cols = 23, 23
  60. # number of convolutional filters to use
  61. nb_filters = 32
  62. # size of pooling area for max pooling
  63. nb_pool = 2
  64. # convolution kernel size
  65. nb_conv = 3
  66. # x = np.load('x.npy')
  67. # y = np_utils.to_categorical(range(3062)*45*5*2, nb_classes)
  68. # weight = ((type_class - np.arange(type_class)) / type_class + 1) ** 3
  69. # weight = dict(zip(range(3063), weight / weight.mean())) # 调整权重,高频字优先
  70. model = Sequential()
  71. model.add(Conv2D(32, (5, 5),input_shape=(img_rows, img_cols,1)))
  72. model.add(Activation('relu'))
  73. model.add(MaxPool2D(pool_size=(nb_pool, nb_pool)))
  74. model.add(Dropout(0.25))
  75. model.add(Conv2D(32, (3, 3)))
  76. model.add(Activation('relu'))
  77. model.add(MaxPool2D(pool_size=(nb_pool, nb_pool)))
  78. model.add(Dropout(0.25))
  79. model.add(Conv2D(512, (3, 3)))
  80. # model.add(Activation('relu'))
  81. # model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
  82. # model.add(Dropout(0.25))
  83. model.add(Flatten())
  84. model.add(Dense(756))
  85. model.add(Activation('relu'))
  86. model.add(Dropout(0.5))
  87. model.add(Dense(nb_classes))
  88. model.add(Activation('softmax'))
  89. model.compile(loss='categorical_crossentropy',
  90. optimizer='adam',
  91. metrics=['accuracy'])
  92. return model
  93. model = Getmodel_tensorflow(65)
  94. #构建网络
  95. model_ch = Getmodel_ch(31)
  96. model_ch.load_weights("./model/char_chi_sim.h5")
  97. # model_ch.save_weights("./model/char_chi_sim.h5")
  98. model.load_weights("./model/char_rec.h5")
  99. # model.save("./model/char_rec.h5")
  100. def SimplePredict(image,pos):
  101. image = cv2.resize(image, (23, 23))
  102. image = cv2.equalizeHist(image)
  103. image = image.astype(np.float) / 255
  104. image -= image.mean()
  105. image = np.expand_dims(image, 3)
  106. if pos!=0:
  107. res = np.array(model.predict(np.array([image]))[0])
  108. else:
  109. res = np.array(model_ch.predict(np.array([image]))[0])
  110. zero_add = 0 ;
  111. if pos==0:
  112. res = res[:31]
  113. elif pos==1:
  114. res = res[31+10:65]
  115. zero_add = 31+10
  116. else:
  117. res = res[31:]
  118. zero_add = 31
  119. max_id = res.argmax()
  120. return res.max(),chars[max_id+zero_add],max_id+zero_add