You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

recognizer.py 5.2 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154
  1. #coding=utf-8
  2. from keras.models import Sequential
  3. from keras.layers import Dense, Dropout, Activation, Flatten
  4. from keras.layers import Conv2D,MaxPool2D
  5. from keras.optimizers import SGD
  6. from keras import backend as K
  7. K.image_data_format()
  8. import cv2
  9. import numpy as np
  10. index = {"京": 0, "沪": 1, "津": 2, "渝": 3, "冀": 4, "晋": 5, "蒙": 6, "辽": 7, "吉": 8, "黑": 9, "苏": 10, "浙": 11, "皖": 12,
  11. "闽": 13, "赣": 14, "鲁": 15, "豫": 16, "鄂": 17, "湘": 18, "粤": 19, "桂": 20, "琼": 21, "川": 22, "贵": 23, "云": 24,
  12. "藏": 25, "陕": 26, "甘": 27, "青": 28, "宁": 29, "新": 30, "0": 31, "1": 32, "2": 33, "3": 34, "4": 35, "5": 36,
  13. "6": 37, "7": 38, "8": 39, "9": 40, "A": 41, "B": 42, "C": 43, "D": 44, "E": 45, "F": 46, "G": 47, "H": 48,
  14. "J": 49, "K": 50, "L": 51, "M": 52, "N": 53, "P": 54, "Q": 55, "R": 56, "S": 57, "T": 58, "U": 59, "V": 60,
  15. "W": 61, "X": 62, "Y": 63, "Z": 64,"港":65,"学":66 ,"O":67 ,"使":68,"警":69,"澳":70,"挂":71};
  16. chars = ["京", "沪", "津", "渝", "冀", "晋", "蒙", "辽", "吉", "黑", "苏", "浙", "皖", "闽", "赣", "鲁", "豫", "鄂", "湘", "粤", "桂",
  17. "琼", "川", "贵", "云", "藏", "陕", "甘", "青", "宁", "新", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "A",
  18. "B", "C", "D", "E", "F", "G", "H", "J", "K", "L", "M", "N", "P",
  19. "Q", "R", "S", "T", "U", "V", "W", "X",
  20. "Y", "Z","港","学","O","使","警","澳","挂" ];
  21. def Getmodel_tensorflow(nb_classes):
  22. # nb_classes = len(charset)
  23. img_rows, img_cols = 23, 23
  24. # number of convolutional filters to use
  25. nb_filters = 32
  26. # size of pooling area for max pooling
  27. nb_pool = 2
  28. # convolution kernel size
  29. nb_conv = 3
  30. # x = np.load('x.npy')
  31. # y = np_utils.to_categorical(range(3062)*45*5*2, nb_classes)
  32. # weight = ((type_class - np.arange(type_class)) / type_class + 1) ** 3
  33. # weight = dict(zip(range(3063), weight / weight.mean())) # 调整权重,高频字优先
  34. model = Sequential()
  35. model.add(Conv2D(32, (5, 5),input_shape=(img_rows, img_cols,1)))
  36. model.add(Activation('relu'))
  37. model.add(MaxPool2D(pool_size=(nb_pool, nb_pool)))
  38. model.add(Dropout(0.25))
  39. model.add(Conv2D(32, (3, 3)))
  40. model.add(Activation('relu'))
  41. model.add(MaxPool2D(pool_size=(nb_pool, nb_pool)))
  42. model.add(Dropout(0.25))
  43. model.add(Conv2D(512, (3, 3)))
  44. # model.add(Activation('relu'))
  45. # model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
  46. # model.add(Dropout(0.25))
  47. model.add(Flatten())
  48. model.add(Dense(512))
  49. model.add(Activation('relu'))
  50. model.add(Dropout(0.5))
  51. model.add(Dense(nb_classes))
  52. model.add(Activation('softmax'))
  53. model.compile(loss='categorical_crossentropy',
  54. optimizer='adam',
  55. metrics=['accuracy'])
  56. return model
  57. def Getmodel_ch(nb_classes):
  58. # nb_classes = len(charset)
  59. img_rows, img_cols = 23, 23
  60. # number of convolutional filters to use
  61. nb_filters = 32
  62. # size of pooling area for max pooling
  63. nb_pool = 2
  64. # convolution kernel size
  65. nb_conv = 3
  66. # x = np.load('x.npy')
  67. # y = np_utils.to_categorical(range(3062)*45*5*2, nb_classes)
  68. # weight = ((type_class - np.arange(type_class)) / type_class + 1) ** 3
  69. # weight = dict(zip(range(3063), weight / weight.mean())) # 调整权重,高频字优先
  70. model = Sequential()
  71. model.add(Conv2D(32, (5, 5),input_shape=(img_rows, img_cols,1)))
  72. model.add(Activation('relu'))
  73. model.add(MaxPool2D(pool_size=(nb_pool, nb_pool)))
  74. model.add(Dropout(0.25))
  75. model.add(Conv2D(32, (3, 3)))
  76. model.add(Activation('relu'))
  77. model.add(MaxPool2D(pool_size=(nb_pool, nb_pool)))
  78. model.add(Dropout(0.25))
  79. model.add(Conv2D(512, (3, 3)))
  80. # model.add(Activation('relu'))
  81. # model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
  82. # model.add(Dropout(0.25))
  83. model.add(Flatten())
  84. model.add(Dense(756))
  85. model.add(Activation('relu'))
  86. model.add(Dropout(0.5))
  87. model.add(Dense(nb_classes))
  88. model.add(Activation('softmax'))
  89. model.compile(loss='categorical_crossentropy',
  90. optimizer='adam',
  91. metrics=['accuracy'])
  92. return model
  93. model = Getmodel_tensorflow(65)
  94. #构建网络
  95. model_ch = Getmodel_ch(31)
  96. model_ch.load_weights("./model/char_chi_sim.h5")
  97. # model_ch.save_weights("./model/char_chi_sim.h5")
  98. model.load_weights("./model/char_rec.h5")
  99. # model.save("./model/char_rec.h5")
  100. def SimplePredict(image,pos):
  101. image = cv2.resize(image, (23, 23))
  102. image = cv2.equalizeHist(image)
  103. image = image.astype(np.float) / 255
  104. image -= image.mean()
  105. image = np.expand_dims(image, 3)
  106. if pos!=0:
  107. res = np.array(model.predict(np.array([image]))[0])
  108. else:
  109. res = np.array(model_ch.predict(np.array([image]))[0])
  110. zero_add = 0 ;
  111. if pos==0:
  112. res = res[:31]
  113. elif pos==1:
  114. res = res[31+10:65]
  115. zero_add = 31+10
  116. else:
  117. res = res[31:]
  118. zero_add = 31
  119. max_id = res.argmax()
  120. return res.max(),chars[max_id+zero_add],max_id+zero_add