Browse Source

Merge pull request #214 from AlanNewImage/v2

update Proj-Win to V2
v2
syan GitHub 5 years ago
parent
commit
49ab6d2ba3
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
49 changed files with 2693 additions and 1899 deletions
  1. BIN
      Prj-Win/.vs/Prj-Win/v14/.suo
  2. BIN
      Prj-Win/Prj-Win.VC.db
  3. +10
    -17
      Prj-Win/Prj-Win/Prj-Win.vcxproj
  4. +7
    -37
      Prj-Win/Prj-Win/Prj-Win.vcxproj.filters
  5. +16
    -0
      Prj-Win/Prj-Win/Release/Prj-Win.Build.CppClean.log
  6. +1
    -0
      Prj-Win/Prj-Win/Release/Prj-Win.log
  7. +1
    -0
      Prj-Win/Prj-Win/x64/Debug/Prj-Win.log
  8. +0
    -24
      Prj-Win/lpr/include/CNNRecognizer.h
  9. +0
    -18
      Prj-Win/lpr/include/FastDeskew.h
  10. +0
    -32
      Prj-Win/lpr/include/FineMapping.h
  11. +18
    -0
      Prj-Win/lpr/include/Finetune.h
  12. +39
    -59
      Prj-Win/lpr/include/Pipeline.h
  13. +0
    -33
      Prj-Win/lpr/include/PlateDetection.h
  14. +5
    -5
      Prj-Win/lpr/include/PlateInfo.h
  15. +15
    -0
      Prj-Win/lpr/include/PlateRecognation.h
  16. +0
    -35
      Prj-Win/lpr/include/PlateSegmentation.h
  17. +16
    -0
      Prj-Win/lpr/include/Platedetect.h
  18. +0
    -23
      Prj-Win/lpr/include/Recognizer.h
  19. +0
    -28
      Prj-Win/lpr/include/SegmentationFreeRecognizer.h
  20. +0
    -109
      Prj-Win/lpr/include/niBlackThreshold.h
  21. BIN
      Prj-Win/lpr/model/HorizonalFinemapping.caffemodel
  22. +95
    -0
      Prj-Win/lpr/model/HorizonalFinemapping.prototxt
  23. +7
    -13
      Prj-Win/lpr/model/README.md
  24. BIN
      Prj-Win/lpr/model/SegmenationFree-Inception.caffemodel
  25. +454
    -0
      Prj-Win/lpr/model/SegmenationFree-Inception.prototxt
  26. BIN
      Prj-Win/lpr/model/mininet_ssd_v1.caffemodel
  27. +1462
    -0
      Prj-Win/lpr/model/mininet_ssd_v1.prototxt
  28. BIN
      Prj-Win/lpr/model/refinenet.caffemodel
  29. +300
    -0
      Prj-Win/lpr/model/refinenet.prototxt
  30. BIN
      Prj-Win/lpr/res/test.jpg
  31. +0
    -19
      Prj-Win/lpr/src/CNNRecognizer.cpp
  32. +0
    -108
      Prj-Win/lpr/src/FastDeskew.cpp
  33. +0
    -170
      Prj-Win/lpr/src/FineMapping.cpp
  34. +67
    -0
      Prj-Win/lpr/src/FineTune.cpp
  35. +34
    -82
      Prj-Win/lpr/src/Pipeline.cpp
  36. +58
    -30
      Prj-Win/lpr/src/PlateDetection.cpp
  37. +61
    -0
      Prj-Win/lpr/src/PlateRecognation.cpp
  38. +0
    -404
      Prj-Win/lpr/src/PlateSegmentation.cpp
  39. +0
    -23
      Prj-Win/lpr/src/Recognizer.cpp
  40. +0
    -89
      Prj-Win/lpr/src/SegmentationFreeRecognizer.cpp
  41. +0
    -68
      Prj-Win/lpr/src/util.h
  42. +27
    -0
      Prj-Win/lpr/tests/testPipeLine.cpp
  43. +0
    -34
      Prj-Win/lpr/tests/test_detection.cpp
  44. +0
    -34
      Prj-Win/lpr/tests/test_fastdeskew.cpp
  45. +0
    -25
      Prj-Win/lpr/tests/test_finemapping.cpp
  46. +0
    -229
      Prj-Win/lpr/tests/test_pipeline.cpp
  47. +0
    -54
      Prj-Win/lpr/tests/test_recognization.cpp
  48. +0
    -43
      Prj-Win/lpr/tests/test_segmentation.cpp
  49. +0
    -54
      Prj-Win/lpr/tests/test_segmentationFree.cpp

BIN
Prj-Win/.vs/Prj-Win/v14/.suo View File


BIN
Prj-Win/Prj-Win.VC.db View File


+ 10
- 17
Prj-Win/Prj-Win/Prj-Win.vcxproj View File

@@ -34,7 +34,7 @@
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v141</PlatformToolset>
<PlatformToolset>v140_xp</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>Unicode</CharacterSet>
</PropertyGroup>
@@ -82,6 +82,8 @@
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<LinkIncremental>false</LinkIncremental>
<IncludePath>D:\opencv-3.4.2\build\install\include\opencv2;D:\opencv-3.4.2\build\install\include;$(IncludePath)</IncludePath>
<LibraryPath>D:\opencv-3.4.2\build\install\x86\vc14\lib;$(LibraryPath)</LibraryPath>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<LinkIncremental>false</LinkIncremental>
@@ -131,6 +133,7 @@
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalDependencies>opencv_aruco342.lib;opencv_aruco342d.lib;opencv_bgsegm342.lib;opencv_bgsegm342d.lib;opencv_bioinspired342.lib;opencv_bioinspired342d.lib;opencv_calib3d342.lib;opencv_calib3d342d.lib;opencv_ccalib342.lib;opencv_ccalib342d.lib;opencv_core342.lib;opencv_core342d.lib;opencv_datasets342.lib;opencv_datasets342d.lib;opencv_dnn342.lib;opencv_dnn342d.lib;opencv_dnn_objdetect342.lib;opencv_dnn_objdetect342d.lib;opencv_dpm342.lib;opencv_dpm342d.lib;opencv_face342.lib;opencv_face342d.lib;opencv_features2d342.lib;opencv_features2d342d.lib;opencv_flann342.lib;opencv_flann342d.lib;opencv_fuzzy342.lib;opencv_fuzzy342d.lib;opencv_hfs342.lib;opencv_hfs342d.lib;opencv_highgui342.lib;opencv_highgui342d.lib;opencv_imgcodecs342.lib;opencv_imgcodecs342d.lib;opencv_imgproc342.lib;opencv_imgproc342d.lib;opencv_img_hash342.lib;opencv_img_hash342d.lib;opencv_line_descriptor342.lib;opencv_line_descriptor342d.lib;opencv_ml342.lib;opencv_ml342d.lib;opencv_objdetect342.lib;opencv_objdetect342d.lib;opencv_optflow342.lib;opencv_optflow342d.lib;opencv_phase_unwrapping342.lib;opencv_phase_unwrapping342d.lib;opencv_photo342.lib;opencv_photo342d.lib;opencv_plot342.lib;opencv_plot342d.lib;opencv_reg342.lib;opencv_reg342d.lib;opencv_rgbd342.lib;opencv_rgbd342d.lib;opencv_saliency342.lib;opencv_saliency342d.lib;opencv_shape342.lib;opencv_shape342d.lib;opencv_stereo342.lib;opencv_stereo342d.lib;opencv_stitching342.lib;opencv_stitching342d.lib;opencv_structured_light342.lib;opencv_structured_light342d.lib;opencv_superres342.lib;opencv_superres342d.lib;opencv_surface_matching342.lib;opencv_surface_matching342d.lib;opencv_text342.lib;opencv_text342d.lib;opencv_tracking342.lib;opencv_tracking342d.lib;opencv_video342.lib;opencv_video342d.lib;opencv_videoio342.lib;opencv_videoio342d.lib;opencv_videostab342.lib;opencv_videostab342d.lib;opencv_xfeatures2d342.lib;opencv_xfeatures2d342d.lib;opencv_ximgproc342.lib;opencv_ximgproc342d.lib;opencv_xobjdetect342.lib;opencv_xobjdetect342d.lib;opencv_xphoto342.lib;opencv_xphoto342d.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
@@ -152,28 +155,18 @@
</Link>
</ItemDefinitionGroup>
<ItemGroup>
<ClInclude Include="..\lpr\include\CNNRecognizer.h" />
<ClInclude Include="..\lpr\include\FastDeskew.h" />
<ClInclude Include="..\lpr\include\FineMapping.h" />
<ClInclude Include="..\lpr\include\niBlackThreshold.h" />
<ClInclude Include="..\lpr\include\Finetune.h" />
<ClInclude Include="..\lpr\include\Pipeline.h" />
<ClInclude Include="..\lpr\include\PlateDetection.h" />
<ClInclude Include="..\lpr\include\Platedetect.h" />
<ClInclude Include="..\lpr\include\PlateInfo.h" />
<ClInclude Include="..\lpr\include\PlateSegmentation.h" />
<ClInclude Include="..\lpr\include\Recognizer.h" />
<ClInclude Include="..\lpr\include\SegmentationFreeRecognizer.h" />
<ClInclude Include="..\lpr\src\util.h" />
<ClInclude Include="..\lpr\include\PlateRecognation.h" />
</ItemGroup>
<ItemGroup>
<ClCompile Include="..\lpr\src\CNNRecognizer.cpp" />
<ClCompile Include="..\lpr\src\FastDeskew.cpp" />
<ClCompile Include="..\lpr\src\FineMapping.cpp" />
<ClCompile Include="..\lpr\src\FineTune.cpp" />
<ClCompile Include="..\lpr\src\Pipeline.cpp" />
<ClCompile Include="..\lpr\src\PlateDetection.cpp" />
<ClCompile Include="..\lpr\src\PlateSegmentation.cpp" />
<ClCompile Include="..\lpr\src\Recognizer.cpp" />
<ClCompile Include="..\lpr\src\SegmentationFreeRecognizer.cpp" />
<ClCompile Include="..\lpr\tests\test_pipeline.cpp" />
<ClCompile Include="..\lpr\src\PlateRecognation.cpp" />
<ClCompile Include="..\lpr\tests\testPipeLine.cpp" />
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">


+ 7
- 37
Prj-Win/Prj-Win/Prj-Win.vcxproj.filters View File

@@ -18,67 +18,37 @@
</Filter>
</ItemGroup>
<ItemGroup>
<ClInclude Include="..\lpr\include\CNNRecognizer.h">
<Filter>头文件</Filter>
</ClInclude>
<ClInclude Include="..\lpr\include\FastDeskew.h">
<Filter>头文件</Filter>
</ClInclude>
<ClInclude Include="..\lpr\include\FineMapping.h">
<Filter>头文件</Filter>
</ClInclude>
<ClInclude Include="..\lpr\include\niBlackThreshold.h">
<ClInclude Include="..\lpr\include\Finetune.h">
<Filter>头文件</Filter>
</ClInclude>
<ClInclude Include="..\lpr\include\Pipeline.h">
<Filter>头文件</Filter>
</ClInclude>
<ClInclude Include="..\lpr\include\PlateDetection.h">
<ClInclude Include="..\lpr\include\Platedetect.h">
<Filter>头文件</Filter>
</ClInclude>
<ClInclude Include="..\lpr\include\PlateInfo.h">
<Filter>头文件</Filter>
</ClInclude>
<ClInclude Include="..\lpr\include\PlateSegmentation.h">
<Filter>头文件</Filter>
</ClInclude>
<ClInclude Include="..\lpr\include\Recognizer.h">
<Filter>头文件</Filter>
</ClInclude>
<ClInclude Include="..\lpr\src\util.h">
<Filter>源文件</Filter>
</ClInclude>
<ClInclude Include="..\lpr\include\SegmentationFreeRecognizer.h">
<ClInclude Include="..\lpr\include\PlateRecognation.h">
<Filter>头文件</Filter>
</ClInclude>
</ItemGroup>
<ItemGroup>
<ClCompile Include="..\lpr\src\CNNRecognizer.cpp">
<Filter>源文件</Filter>
</ClCompile>
<ClCompile Include="..\lpr\src\FastDeskew.cpp">
<ClCompile Include="..\lpr\src\FineTune.cpp">
<Filter>源文件</Filter>
</ClCompile>
<ClCompile Include="..\lpr\src\FineMapping.cpp">
<ClCompile Include="..\lpr\src\Pipeline.cpp">
<Filter>源文件</Filter>
</ClCompile>
<ClCompile Include="..\lpr\src\PlateDetection.cpp">
<Filter>源文件</Filter>
</ClCompile>
<ClCompile Include="..\lpr\src\PlateSegmentation.cpp">
<Filter>源文件</Filter>
</ClCompile>
<ClCompile Include="..\lpr\src\Recognizer.cpp">
<ClCompile Include="..\lpr\src\PlateRecognation.cpp">
<Filter>源文件</Filter>
</ClCompile>
<ClCompile Include="..\lpr\tests\test_pipeline.cpp">
<ClCompile Include="..\lpr\tests\testPipeLine.cpp">
<Filter>源文件\test</Filter>
</ClCompile>
<ClCompile Include="..\lpr\src\Pipeline.cpp">
<Filter>源文件</Filter>
</ClCompile>
<ClCompile Include="..\lpr\src\SegmentationFreeRecognizer.cpp">
<Filter>源文件</Filter>
</ClCompile>
</ItemGroup>
</Project>

+ 16
- 0
Prj-Win/Prj-Win/Release/Prj-Win.Build.CppClean.log View File

@@ -0,0 +1,16 @@
e:\jdkj\hyperlpr\prj-win\prj-win\release\vc140.pdb
e:\jdkj\hyperlpr\prj-win\prj-win\release\testpipeline.obj
e:\jdkj\hyperlpr\prj-win\prj-win\release\platerecognation.obj
e:\jdkj\hyperlpr\prj-win\prj-win\release\platedetection.obj
e:\jdkj\hyperlpr\prj-win\prj-win\release\pipeline.obj
e:\jdkj\hyperlpr\prj-win\prj-win\release\finetune.obj
e:\jdkj\hyperlpr\prj-win\release\prj-win.exe
e:\jdkj\hyperlpr\prj-win\release\prj-win.ipdb
e:\jdkj\hyperlpr\prj-win\release\prj-win.iobj
e:\jdkj\hyperlpr\prj-win\release\prj-win.pdb
e:\jdkj\hyperlpr\prj-win\prj-win\release\prj-win.tlog\cl.command.1.tlog
e:\jdkj\hyperlpr\prj-win\prj-win\release\prj-win.tlog\cl.read.1.tlog
e:\jdkj\hyperlpr\prj-win\prj-win\release\prj-win.tlog\cl.write.1.tlog
e:\jdkj\hyperlpr\prj-win\prj-win\release\prj-win.tlog\link.command.1.tlog
e:\jdkj\hyperlpr\prj-win\prj-win\release\prj-win.tlog\link.read.1.tlog
e:\jdkj\hyperlpr\prj-win\prj-win\release\prj-win.tlog\link.write.1.tlog

+ 1
- 0
Prj-Win/Prj-Win/Release/Prj-Win.log View File

@@ -0,0 +1 @@


+ 1
- 0
Prj-Win/Prj-Win/x64/Debug/Prj-Win.log View File

@@ -0,0 +1 @@
C:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\V140\Microsoft.Cpp.Platform.targets(57,5): error MSB8020: The build tools for v141 (Platform Toolset = 'v141') cannot be found. To build using the v141 build tools, please install v141 build tools. Alternatively, you may upgrade to the current Visual Studio tools by selecting the Project menu or right-click the solution, and then selecting "Retarget solution".

+ 0
- 24
Prj-Win/lpr/include/CNNRecognizer.h View File

@@ -1,24 +0,0 @@
//
// Created by Jack Yu on 21/10/2017.
//

#ifndef SWIFTPR_CNNRECOGNIZER_H
#define SWIFTPR_CNNRECOGNIZER_H

#include "Recognizer.h"
namespace pr{
class CNNRecognizer: public GeneralRecognizer{
public:
const int CHAR_INPUT_W = 14;
const int CHAR_INPUT_H = 30;

CNNRecognizer(std::string prototxt,std::string caffemodel);
label recognizeCharacter(cv::Mat character);
private:
cv::dnn::Net net;

};

}

#endif //SWIFTPR_CNNRECOGNIZER_H

+ 0
- 18
Prj-Win/lpr/include/FastDeskew.h View File

@@ -1,18 +0,0 @@
//
// Created by 庾金科 on 22/09/2017.
//

#ifndef SWIFTPR_FASTDESKEW_H
#define SWIFTPR_FASTDESKEW_H

#include <math.h>
#include <opencv2/opencv.hpp>
namespace pr{

cv::Mat fastdeskew(cv::Mat skewImage,int blockSize);
// cv::Mat spatialTransformer(cv::Mat skewImage);

}//namepace pr


#endif //SWIFTPR_FASTDESKEW_H

+ 0
- 32
Prj-Win/lpr/include/FineMapping.h View File

@@ -1,32 +0,0 @@
//
// Created by 庾金科 on 22/09/2017.
//

#ifndef SWIFTPR_FINEMAPPING_H
#define SWIFTPR_FINEMAPPING_H

#include <opencv2/opencv.hpp>
#include <opencv2/dnn.hpp>

#include <string>
namespace pr{
class FineMapping{
public:
FineMapping();


FineMapping(std::string prototxt,std::string caffemodel);
static cv::Mat FineMappingVertical(cv::Mat InputProposal,int sliceNum=15,int upper=0,int lower=-50,int windows_size=17);
cv::Mat FineMappingHorizon(cv::Mat FinedVertical,int leftPadding,int rightPadding);


private:
cv::dnn::Net net;

};




}
#endif //SWIFTPR_FINEMAPPING_H

+ 18
- 0
Prj-Win/lpr/include/Finetune.h View File

@@ -0,0 +1,18 @@
#ifndef _FINETUNE_H_
#define _FINETUNE_H_
#include<vector>
#include<opencv2\dnn.hpp>
#include<opencv.hpp>
namespace pr {
class FineTune {
public:

FineTune(std::string finetune_prototxt, std::string finetune_caffemodel);
void Finetune(cv::Mat img, cv::Mat &resImg);
void to_refine(cv::Mat img, std::vector<cv::Point> pts, cv::Mat &out);
void affine_crop(cv::Mat img, std::vector<cv::Point> pts, cv::Mat &out);
private:
cv::dnn::Net FTNet;
};
}//namespace pr
#endif // !_FINETUNE_H_

+ 39
- 59
Prj-Win/lpr/include/Pipeline.h View File

@@ -1,60 +1,40 @@
//
// Created by â×½ð¿Æ on 22/10/2017.
//

#ifndef SWIFTPR_PIPLINE_H
#define SWIFTPR_PIPLINE_H

#include "PlateDetection.h"
#include "PlateSegmentation.h"
#include "CNNRecognizer.h"
#include "PlateInfo.h"
#include "FastDeskew.h"
#include "FineMapping.h"
#include "Recognizer.h"
#include "SegmentationFreeRecognizer.h"

namespace pr{

const std::vector<std::string> CH_PLATE_CODE{"¾©", "»¦", "½ò", "Óå", "¼½", "½ú", "ÃÉ", "ÁÉ", "¼ª", "ºÚ", "ËÕ", "Õã", "Íî", "Ãö", "¸Ó", "³", "Ô¥", "¶õ", "Ïæ", "ÔÁ", "¹ð",
"Çí", "´¨", "¹ó", "ÔÆ", "²Ø", "ÉÂ", "¸Ê", "Çà", "Äþ", "ÐÂ", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "A",
"B", "C", "D", "E", "F", "G", "H", "J", "K", "L", "M", "N", "P", "Q", "R", "S", "T", "U", "V", "W", "X",
"Y", "Z","¸Û","ѧ","ʹ","¾¯","°Ä","¹Ò","¾ü","±±","ÄÏ","¹ã","Éò","À¼","³É","¼Ã","º£","Ãñ","º½","¿Õ"};



const int SEGMENTATION_FREE_METHOD = 0;
const int SEGMENTATION_BASED_METHOD = 1;

class PipelinePR{
public:
GeneralRecognizer *generalRecognizer;
PlateDetection *plateDetection;
PlateSegmentation *plateSegmentation;
FineMapping *fineMapping;
SegmentationFreeRecognizer *segmentationFreeRecognizer;

PipelinePR(std::string detector_filename,
std::string finemapping_prototxt,std::string finemapping_caffemodel,
std::string segmentation_prototxt,std::string segmentation_caffemodel,
std::string charRecognization_proto,std::string charRecognization_caffemodel,
std::string segmentationfree_proto,std::string segmentationfree_caffemodel
);
~PipelinePR();



std::vector<std::string> plateRes;
std::vector<PlateInfo> RunPiplineAsImage(cv::Mat plateImage,int method);







};


#pragma warning(disable:4430)
#ifndef _PIPLINE_H
#define _PIPLINE_H
#include <vector>
#include "Finetune.h"
#include "Platedetect.h"
#include "PlateRecognation.h"
//#include "PlateColor.h"
using namespace std;
using namespace cv;
namespace pr
{
const std::vector<std::string> CH_PLATE_CODE{ "京", "沪", "津", "�", "冀", "晋", "蒙", "辽", "�", "黑", "�", "浙", "皖", "闽", "赣", "�", "豫", "鄂", "湘", "粤", "桂",
"�", "�", "贵", "云", "�", "陕", "甘", "�", "�", "新", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "A",
"B", "C", "D", "E", "F", "G", "H", "J", "K", "L", "M", "N", "P", "Q", "R", "S", "T", "U", "V", "W", "X",
"Y", "Z","港","学","使","警","澳","挂","军","北","�","广","沈","兰","�","济","海","民","航","空" };


class PipelinePR {

public:
PlateDetection *platedetection;
FineTune *finetune;
PlateRecognation *platerecognation;
//PlateColorClass *platecolorclass;


PipelinePR(std::string detect_prototxt, std::string detect_caffemodel,
std::string finetune_prototxt, std::string finetune_caffemodel,
std::string platerec_prototxt, std::string platerec_caffemodel/*,
std::string platecolor_mnn*/);
~PipelinePR();

std::vector<std::string> plateRes;
std::vector<PlateInfo> RunPiplineAsImage(cv::Mat srcImage);

};
}
#endif //SWIFTPR_PIPLINE_H
#endif // !SWIFTPR_PIPLINE_H

+ 0
- 33
Prj-Win/lpr/include/PlateDetection.h View File

@@ -1,33 +0,0 @@
//
// Created by 庾金科 on 20/09/2017.
//

#ifndef SWIFTPR_PLATEDETECTION_H
#define SWIFTPR_PLATEDETECTION_H

#include <opencv2/opencv.hpp>
#include <PlateInfo.h>
#include <vector>
namespace pr{
class PlateDetection{
public:
PlateDetection(std::string filename_cascade);
PlateDetection();
void LoadModel(std::string filename_cascade);
void plateDetectionRough(cv::Mat InputImage,std::vector<pr::PlateInfo> &plateInfos,int min_w=36,int max_w=800);
// std::vector<pr::PlateInfo> plateDetectionRough(cv::Mat InputImage,int min_w= 60,int max_h = 400);


// std::vector<pr::PlateInfo> plateDetectionRoughByMultiScaleEdge(cv::Mat InputImage);



private:
cv::CascadeClassifier cascade;


};

}// namespace pr

#endif //SWIFTPR_PLATEDETECTION_H

+ 5
- 5
Prj-Win/lpr/include/PlateInfo.h View File

@@ -1,7 +1,3 @@
//
// Created by 庾金科 on 20/09/2017.
//

#ifndef SWIFTPR_PLATEINFO_H
#define SWIFTPR_PLATEINFO_H
#include <opencv2/opencv.hpp>
@@ -62,7 +58,11 @@ namespace pr {
int getPlateType() {
return Type;
}

int setPlateType(PlateColor platetype)
{
Type = platetype;
return 0;
}
void appendPlateChar(const std::pair<CharType,cv::Mat> &plateChar)
{
plateChars.push_back(plateChar);


+ 15
- 0
Prj-Win/lpr/include/PlateRecognation.h View File

@@ -0,0 +1,15 @@
#ifndef _PLATERECOGNATION_H_
#define _PLATERECOGNATION_H_
#include <opencv2\dnn.hpp>
#include "PlateInfo.h"
namespace pr {
class PlateRecognation {
public:
PlateRecognation(std::string Rec_prototxt, std::string Rec_cafffemodel);
void segmentation_free_recognation(cv::Mat src, pr::PlateInfo &plateinfo);
private:
cv::dnn::Net RecNet;
};
}//namespace pr
#endif // !_PLATERECOGNATION_H_


+ 0
- 35
Prj-Win/lpr/include/PlateSegmentation.h View File

@@ -1,35 +0,0 @@
#ifndef SWIFTPR_PLATESEGMENTATION_H
#define SWIFTPR_PLATESEGMENTATION_H

#include "opencv2/opencv.hpp"
#include <opencv2/dnn.hpp>
#include "PlateInfo.h"

namespace pr{


class PlateSegmentation{
public:
const int PLATE_NORMAL = 6;
const int PLATE_NORMAL_GREEN = 7;
const int DEFAULT_WIDTH = 20;
PlateSegmentation(std::string phototxt,std::string caffemodel);
PlateSegmentation(){}
void segmentPlatePipline(PlateInfo &plateInfo,int stride,std::vector<cv::Rect> &Char_rects);

void segmentPlateBySlidingWindows(cv::Mat &plateImage,int windowsWidth,int stride,cv::Mat &respones);
void templateMatchFinding(const cv::Mat &respones,int windowsWidth,std::pair<float,std::vector<int>> &candidatePts);
void refineRegion(cv::Mat &plateImage,const std::vector<int> &candidatePts,const int padding,std::vector<cv::Rect> &rects);
void ExtractRegions(PlateInfo &plateInfo,std::vector<cv::Rect> &rects);
cv::Mat classifyResponse(const cv::Mat &cropped);
private:
cv::dnn::Net net;


// RefineRegion()

};

}//namespace pr

#endif //SWIFTPR_PLATESEGMENTATION_H

+ 16
- 0
Prj-Win/lpr/include/Platedetect.h View File

@@ -0,0 +1,16 @@
#ifndef _PLATEDETECT_H_
#define _PLATEDETECT_H_
#include <opencv2/opencv.hpp>
#include <vector>
#include "PlateInfo.h"
namespace pr
{
class PlateDetection {
public:
PlateDetection(std::string ssd_prototxt, std::string ssd_caffe_model);
void Detectssd(cv::Mat inputImg, std::vector<pr::PlateInfo> &plateInfos);
private:
cv::dnn::Net ssdNet;
};
}//namespace pr
#endif // !_PLATEDETECT_H_

+ 0
- 23
Prj-Win/lpr/include/Recognizer.h View File

@@ -1,23 +0,0 @@
//
// Created by 庾金科 on 20/10/2017.
//


#ifndef SWIFTPR_RECOGNIZER_H
#define SWIFTPR_RECOGNIZER_H

#include "PlateInfo.h"
#include "opencv2/dnn.hpp"
namespace pr{
typedef cv::Mat label;
class GeneralRecognizer{
public:
virtual label recognizeCharacter(cv::Mat character) = 0;
// virtual cv::Mat SegmentationFreeForSinglePlate(cv::Mat plate) = 0;
void SegmentBasedSequenceRecognition(PlateInfo &plateinfo);
void SegmentationFreeSequenceRecognition(PlateInfo &plateInfo);

};

}
#endif //SWIFTPR_RECOGNIZER_H

+ 0
- 28
Prj-Win/lpr/include/SegmentationFreeRecognizer.h View File

@@ -1,28 +0,0 @@
//
// Created by 庾金科 on 28/11/2017.
//

#ifndef SWIFTPR_SEGMENTATIONFREERECOGNIZER_H
#define SWIFTPR_SEGMENTATIONFREERECOGNIZER_H

#include "Recognizer.h"
namespace pr{


class SegmentationFreeRecognizer{
public:
const int CHAR_INPUT_W = 14;
const int CHAR_INPUT_H = 30;
const int CHAR_LEN = 84;

SegmentationFreeRecognizer(std::string prototxt,std::string caffemodel);
std::pair<std::string,float> SegmentationFreeForSinglePlate(cv::Mat plate,std::vector<std::string> mapping_table);


private:
cv::dnn::Net net;

};

}
#endif //SWIFTPR_SEGMENTATIONFREERECOGNIZER_H

+ 0
- 109
Prj-Win/lpr/include/niBlackThreshold.h View File

@@ -1,109 +0,0 @@
//
// Created by 庾金科 on 26/10/2017.
//

#ifndef SWIFTPR_NIBLACKTHRESHOLD_H
#define SWIFTPR_NIBLACKTHRESHOLD_H


#include <opencv2/opencv.hpp>
using namespace cv;

enum LocalBinarizationMethods{
BINARIZATION_NIBLACK = 0, //!< Classic Niblack binarization. See @cite Niblack1985 .
BINARIZATION_SAUVOLA = 1, //!< Sauvola's technique. See @cite Sauvola1997 .
BINARIZATION_WOLF = 2, //!< Wolf's technique. See @cite Wolf2004 .
BINARIZATION_NICK = 3 //!< NICK technique. See @cite Khurshid2009 .
};


void niBlackThreshold( InputArray _src, OutputArray _dst, double maxValue,
int type, int blockSize, double k, int binarizationMethod )
{
// Input grayscale image
Mat src = _src.getMat();
CV_Assert(src.channels() == 1);
CV_Assert(blockSize % 2 == 1 && blockSize > 1);
if (binarizationMethod == BINARIZATION_SAUVOLA) {
CV_Assert(src.depth() == CV_8U);
}
type &= THRESH_MASK;
// Compute local threshold (T = mean + k * stddev)
// using mean and standard deviation in the neighborhood of each pixel
// (intermediate calculations are done with floating-point precision)
Mat test;
Mat thresh;
{
// note that: Var[X] = E[X^2] - E[X]^2
Mat mean, sqmean, variance, stddev, sqrtVarianceMeanSum;
double srcMin, stddevMax;
boxFilter(src, mean, CV_32F, Size(blockSize, blockSize),
Point(-1,-1), true, BORDER_REPLICATE);
sqrBoxFilter(src, sqmean, CV_32F, Size(blockSize, blockSize),
Point(-1,-1), true, BORDER_REPLICATE);
variance = sqmean - mean.mul(mean);
sqrt(variance, stddev);
switch (binarizationMethod)
{
case BINARIZATION_NIBLACK:
thresh = mean + stddev * static_cast<float>(k);

break;
case BINARIZATION_SAUVOLA:
thresh = mean.mul(1. + static_cast<float>(k) * (stddev / 128.0 - 1.));
break;
case BINARIZATION_WOLF:
minMaxIdx(src, &srcMin,NULL);
minMaxIdx(stddev, NULL, &stddevMax);
thresh = mean - static_cast<float>(k) * (mean - srcMin - stddev.mul(mean - srcMin) / stddevMax);
break;
case BINARIZATION_NICK:
sqrt(variance + sqmean, sqrtVarianceMeanSum);
thresh = mean + static_cast<float>(k) * sqrtVarianceMeanSum;
break;
default:
// CV_Error( CV_StsBadArg, "Unknown binarization method" );
CV_Error(-5, "Unknown binarization method");
break;
}
thresh.convertTo(thresh, src.depth());

thresh.convertTo(test, src.depth());
//
// cv::imshow("imagex",test);
// cv::waitKey(0);

}
// Prepare output image
_dst.create(src.size(), src.type());
Mat dst = _dst.getMat();
CV_Assert(src.data != dst.data); // no inplace processing
// Apply thresholding: ( pixel > threshold ) ? foreground : background
Mat mask;
switch (type)
{
case THRESH_BINARY: // dst = (src > thresh) ? maxval : 0
case THRESH_BINARY_INV: // dst = (src > thresh) ? 0 : maxval
compare(src, thresh, mask, (type == THRESH_BINARY ? CMP_GT : CMP_LE));
dst.setTo(0);
dst.setTo(maxValue, mask);
break;
case THRESH_TRUNC: // dst = (src > thresh) ? thresh : src
compare(src, thresh, mask, CMP_GT);
src.copyTo(dst);
thresh.copyTo(dst, mask);
break;
case THRESH_TOZERO: // dst = (src > thresh) ? src : 0
case THRESH_TOZERO_INV: // dst = (src > thresh) ? 0 : src
compare(src, thresh, mask, (type == THRESH_TOZERO ? CMP_GT : CMP_LE));
dst.setTo(0);
src.copyTo(dst, mask);
break;
default:
// CV_Error( CV_StsBadArg, "Unknown threshold type" );
CV_Error(-5, "Unknown threshold type");
break;
}
}

#endif //SWIFTPR_NIBLACKTHRESHOLD_H

BIN
Prj-Win/lpr/model/HorizonalFinemapping.caffemodel View File


+ 95
- 0
Prj-Win/lpr/model/HorizonalFinemapping.prototxt View File

@@ -0,0 +1,95 @@
input: "data"
input_dim: 1
input_dim: 3
input_dim: 16
input_dim: 66
layer {
name: "conv1"
type: "Convolution"
bottom: "data"
top: "conv1"
convolution_param {
num_output: 10
bias_term: true
pad: 0
kernel_size: 3
stride: 1
}
}
layer {
name: "relu1"
type: "ReLU"
bottom: "conv1"
top: "conv1"
}
layer {
name: "max_pooling2d_3"
type: "Pooling"
bottom: "conv1"
top: "max_pooling2d_3"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
pad: 0
}
}
layer {
name: "conv2"
type: "Convolution"
bottom: "max_pooling2d_3"
top: "conv2"
convolution_param {
num_output: 16
bias_term: true
pad: 0
kernel_size: 3
stride: 1
}
}
layer {
name: "relu2"
type: "ReLU"
bottom: "conv2"
top: "conv2"
}
layer {
name: "conv3"
type: "Convolution"
bottom: "conv2"
top: "conv3"
convolution_param {
num_output: 32
bias_term: true
pad: 0
kernel_size: 3
stride: 1
}
}
layer {
name: "relu3"
type: "ReLU"
bottom: "conv3"
top: "conv3"
}
layer {
name: "flatten_2"
type: "Flatten"
bottom: "conv3"
top: "flatten_2"
}
layer {
name: "dense"
type: "InnerProduct"
bottom: "flatten_2"
top: "dense"
inner_product_param {
num_output: 2
}
}
layer {
name: "relu4"
type: "ReLU"
bottom: "dense"
top: "dense"
}

+ 7
- 13
Prj-Win/lpr/model/README.md View File

@@ -1,17 +1,11 @@
将/Prj-Linux/lpr/model目录下的
将/hyperlpr_pip_pkg/hyperlpr/models/dnn/目录下的

cascade.xml
mininet_ssd_v1.caffemodel
mininet_ssd_v1.prototxt
refinenet.caffemodel
refinenet.prototxt
SegmenationFree-Inception.caffemodel
SegmenationFree-Inception.prototxt

CharacterRecognization.caffemodel

CharacterRecognization.prototxt

HorizonalFinemapping.caffemodel

HorizonalFinemapping.prototxt

SegmentationFree.caffemodel

SegmentationFree.prototxt

放置在该目录

BIN
Prj-Win/lpr/model/SegmenationFree-Inception.caffemodel View File


+ 454
- 0
Prj-Win/lpr/model/SegmenationFree-Inception.prototxt View File

@@ -0,0 +1,454 @@
input: "data"
input_dim: 1
input_dim: 3
input_dim: 160
input_dim: 40
layer {
name: "conv0"
type: "Convolution"
bottom: "data"
top: "conv0"
convolution_param {
num_output: 32
bias_term: true
pad_h: 1
pad_w: 1
kernel_h: 3
kernel_w: 3
stride_h: 1
stride_w: 1
}
}
layer {
name: "bn0"
type: "BatchNorm"
bottom: "conv0"
top: "bn0"
batch_norm_param {
moving_average_fraction: 0.99
eps: 0.001
}
}
layer {
name: "bn0_scale"
type: "Scale"
bottom: "bn0"
top: "bn0"
scale_param {
bias_term: true
}
}
layer {
name: "relu0"
type: "ReLU"
bottom: "bn0"
top: "bn0"
}
layer {
name: "pool0"
type: "Pooling"
bottom: "bn0"
top: "pool0"
pooling_param {
pool: MAX
kernel_h: 2
kernel_w: 2
stride_h: 2
stride_w: 2
pad_h: 0
pad_w: 0
}
}
layer {
name: "conv1"
type: "Convolution"
bottom: "pool0"
top: "conv1"
convolution_param {
num_output: 64
bias_term: true
pad_h: 1
pad_w: 1
kernel_h: 3
kernel_w: 3
stride_h: 1
stride_w: 1
}
}
layer {
name: "bn1"
type: "BatchNorm"
bottom: "conv1"
top: "bn1"
batch_norm_param {
moving_average_fraction: 0.99
eps: 0.001
}
}
layer {
name: "bn1_scale"
type: "Scale"
bottom: "bn1"
top: "bn1"
scale_param {
bias_term: true
}
}
layer {
name: "relu1"
type: "ReLU"
bottom: "bn1"
top: "bn1"
}
layer {
name: "pool1"
type: "Pooling"
bottom: "bn1"
top: "pool1"
pooling_param {
pool: MAX
kernel_h: 2
kernel_w: 2
stride_h: 2
stride_w: 2
pad_h: 0
pad_w: 0
}
}
layer {
name: "conv2"
type: "Convolution"
bottom: "pool1"
top: "conv2"
convolution_param {
num_output: 128
bias_term: true
pad_h: 1
pad_w: 1
kernel_h: 3
kernel_w: 3
stride_h: 1
stride_w: 1
}
}
layer {
name: "bn2"
type: "BatchNorm"
bottom: "conv2"
top: "bn2"
batch_norm_param {
moving_average_fraction: 0.99
eps: 0.001
}
}
layer {
name: "bn2_scale"
type: "Scale"
bottom: "bn2"
top: "bn2"
scale_param {
bias_term: true
}
}
layer {
name: "relu2"
type: "ReLU"
bottom: "bn2"
top: "bn2"
}
layer {
name: "pool2"
type: "Pooling"
bottom: "bn2"
top: "pool2"
pooling_param {
pool: MAX
kernel_h: 2
kernel_w: 2
stride_h: 2
stride_w: 2
pad_h: 0
pad_w: 0
}
}
layer {
name: "conv2d_1"
type: "Convolution"
bottom: "pool2"
top: "conv2d_1"
convolution_param {
num_output: 256
bias_term: true
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 5
stride_h: 1
stride_w: 1
}
}
layer {
name: "batch_normalization_1"
type: "BatchNorm"
bottom: "conv2d_1"
top: "batch_normalization_1"
batch_norm_param {
moving_average_fraction: 0.99
eps: 0.001
}
}
layer {
name: "batch_normalization_1_scale"
type: "Scale"
bottom: "batch_normalization_1"
top: "batch_normalization_1"
scale_param {
bias_term: true
}
}
layer {
name: "activation_1"
type: "ReLU"
bottom: "batch_normalization_1"
top: "batch_normalization_1"
}
layer {
name: "conv2d_2"
type: "Convolution"
bottom: "batch_normalization_1"
top: "conv2d_2"
convolution_param {
num_output: 256
bias_term: true
pad_h: 3
pad_w: 0
kernel_h: 7
kernel_w: 1
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv2d_3"
type: "Convolution"
bottom: "batch_normalization_1"
top: "conv2d_3"
convolution_param {
num_output: 256
bias_term: true
pad_h: 2
pad_w: 0
kernel_h: 5
kernel_w: 1
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv2d_4"
type: "Convolution"
bottom: "batch_normalization_1"
top: "conv2d_4"
convolution_param {
num_output: 256
bias_term: true
pad_h: 1
pad_w: 0
kernel_h: 3
kernel_w: 1
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv2d_5"
type: "Convolution"
bottom: "batch_normalization_1"
top: "conv2d_5"
convolution_param {
num_output: 256
bias_term: true
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 1
stride_w: 1
}
}
layer {
name: "batch_normalization_2"
type: "BatchNorm"
bottom: "conv2d_2"
top: "batch_normalization_2"
batch_norm_param {
moving_average_fraction: 0.99
eps: 0.001
}
}
layer {
name: "batch_normalization_2_scale"
type: "Scale"
bottom: "batch_normalization_2"
top: "batch_normalization_2"
scale_param {
bias_term: true
}
}
layer {
name: "batch_normalization_3"
type: "BatchNorm"
bottom: "conv2d_3"
top: "batch_normalization_3"
batch_norm_param {
moving_average_fraction: 0.99
eps: 0.001
}
}
layer {
name: "batch_normalization_3_scale"
type: "Scale"
bottom: "batch_normalization_3"
top: "batch_normalization_3"
scale_param {
bias_term: true
}
}
layer {
name: "batch_normalization_4"
type: "BatchNorm"
bottom: "conv2d_4"
top: "batch_normalization_4"
batch_norm_param {
moving_average_fraction: 0.99
eps: 0.001
}
}
layer {
name: "batch_normalization_4_scale"
type: "Scale"
bottom: "batch_normalization_4"
top: "batch_normalization_4"
scale_param {
bias_term: true
}
}
layer {
name: "batch_normalization_5"
type: "BatchNorm"
bottom: "conv2d_5"
top: "batch_normalization_5"
batch_norm_param {
moving_average_fraction: 0.99
eps: 0.001
}
}
layer {
name: "batch_normalization_5_scale"
type: "Scale"
bottom: "batch_normalization_5"
top: "batch_normalization_5"
scale_param {
bias_term: true
}
}
layer {
name: "activation_2"
type: "ReLU"
bottom: "batch_normalization_2"
top: "batch_normalization_2"
}
layer {
name: "activation_3"
type: "ReLU"
bottom: "batch_normalization_3"
top: "batch_normalization_3"
}
layer {
name: "activation_4"
type: "ReLU"
bottom: "batch_normalization_4"
top: "batch_normalization_4"
}
layer {
name: "activation_5"
type: "ReLU"
bottom: "batch_normalization_5"
top: "batch_normalization_5"
}
layer {
name: "concatenate_1"
type: "Concat"
bottom: "batch_normalization_2"
bottom: "batch_normalization_3"
bottom: "batch_normalization_4"
bottom: "batch_normalization_5"
top: "concatenate_1"
concat_param {
axis: 1
}
}
layer {
name: "conv_1024_11"
type: "Convolution"
bottom: "concatenate_1"
top: "conv_1024_11"
convolution_param {
num_output: 1024
bias_term: true
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 1
stride_w: 1
}
}
layer {
name: "batch_normalization_6"
type: "BatchNorm"
bottom: "conv_1024_11"
top: "batch_normalization_6"
batch_norm_param {
moving_average_fraction: 0.99
eps: 0.001
}
}
layer {
name: "batch_normalization_6_scale"
type: "Scale"
bottom: "batch_normalization_6"
top: "batch_normalization_6"
scale_param {
bias_term: true
}
}
layer {
name: "activation_6"
type: "ReLU"
bottom: "batch_normalization_6"
top: "batch_normalization_6"
}
layer {
name: "conv_class_11"
type: "Convolution"
bottom: "batch_normalization_6"
top: "conv_class_11"
convolution_param {
num_output: 84
bias_term: true
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 1
stride_w: 1
}
}
layer {
name: "prob"
type: "Softmax"
bottom: "conv_class_11"
top: "prob"
}


BIN
Prj-Win/lpr/model/mininet_ssd_v1.caffemodel View File


+ 1462
- 0
Prj-Win/lpr/model/mininet_ssd_v1.prototxt
File diff suppressed because it is too large
View File


BIN
Prj-Win/lpr/model/refinenet.caffemodel View File


+ 300
- 0
Prj-Win/lpr/model/refinenet.prototxt View File

@@ -0,0 +1,300 @@
name: "ONet"
input: "data"
input_dim: 1
input_dim: 3
input_dim: 48
input_dim: 120

##################################
layer {
name: "conv1"
type: "Convolution"
bottom: "data"
top: "conv1"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 1
}
convolution_param {
num_output: 32
kernel_size: 3
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "prelu1"
type: "PReLU"
bottom: "conv1"
top: "conv1"
}
layer {
name: "pool1"
type: "Pooling"
bottom: "conv1"
top: "pool1"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layer {
name: "conv2"
type: "Convolution"
bottom: "pool1"
top: "conv2"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 1
}
convolution_param {
num_output: 64
kernel_size: 3
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}

layer {
name: "prelu2"
type: "PReLU"
bottom: "conv2"
top: "conv2"
}
layer {
name: "pool2"
type: "Pooling"
bottom: "conv2"
top: "pool2"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}

layer {
name: "conv3"
type: "Convolution"
bottom: "pool2"
top: "conv3"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 1
}
convolution_param {
num_output: 64
kernel_size: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "prelu3"
type: "PReLU"
bottom: "conv3"
top: "conv3"
}
layer {
name: "pool3"
type: "Pooling"
bottom: "conv3"
top: "pool3"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
name: "conv4"
type: "Convolution"
bottom: "pool3"
top: "conv4"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 1
}
convolution_param {
num_output: 128
kernel_size: 2
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "prelu4"
type: "PReLU"
bottom: "conv4"
top: "conv4"
}


layer {
name: "conv5i"
type: "InnerProduct"
bottom: "conv4"
top: "conv5"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 1
}
inner_product_param {
#kernel_size: 3
num_output: 256
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}

layer {
name: "drop5i"
type: "Dropout"
bottom: "conv5"
top: "conv5"
dropout_param {
dropout_ratio: 0.25
}
}
layer {
name: "prelu5"
type: "PReLU"
bottom: "conv5"
top: "conv5"
}


layer {
name: "conv6i-1"
type: "InnerProduct"
bottom: "conv5"
top: "conv6-1"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 1
}
inner_product_param {
#kernel_size: 1
num_output: 2
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}


layer {
name: "conv6i-2"
type: "InnerProduct"
bottom: "conv5"
top: "conv6-2"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 1
}
inner_product_param {
#kernel_size: 1
num_output: 4
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}

layer {
name: "conv6-3"
type: "InnerProduct"
bottom: "conv5"
top: "conv6-3"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 1
}
inner_product_param {
#kernel_size: 1
num_output: 8
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}


layer {
name: "prob1"
type: "Softmax"
bottom: "conv6-1"
top: "prob1"
}

BIN
Prj-Win/lpr/res/test.jpg View File

Before After
Width: 500  |  Height: 375  |  Size: 32 kB Width: 1280  |  Height: 720  |  Size: 188 kB

+ 0
- 19
Prj-Win/lpr/src/CNNRecognizer.cpp View File

@@ -1,19 +0,0 @@
//
// Created by Jack Yu on 21/10/2017.
//

#include "../include/CNNRecognizer.h"

namespace pr{
CNNRecognizer::CNNRecognizer(std::string prototxt,std::string caffemodel){
net = cv::dnn::readNetFromCaffe(prototxt, caffemodel);
}

label CNNRecognizer::recognizeCharacter(cv::Mat charImage){
if(charImage.channels()== 3)
cv::cvtColor(charImage,charImage,cv::COLOR_BGR2GRAY);
cv::Mat inputBlob = cv::dnn::blobFromImage(charImage, 1/255.0, cv::Size(CHAR_INPUT_W,CHAR_INPUT_H), cv::Scalar(0,0,0),false);
net.setInput(inputBlob,"data");
return net.forward();
}
}

+ 0
- 108
Prj-Win/lpr/src/FastDeskew.cpp View File

@@ -1,108 +0,0 @@
//
// Created by Jack Yu on 02/10/2017.
//



#include <../include/FastDeskew.h>

namespace pr{
const int ANGLE_MIN = 30 ;
const int ANGLE_MAX = 150 ;
const int PLATE_H = 36;
const int PLATE_W = 136;
int angle(float x,float y)
{
return atan2(x,y)*180/3.1415;
}

std::vector<float> avgfilter(std::vector<float> angle_list,int windowsSize) {
std::vector<float> angle_list_filtered(angle_list.size() - windowsSize + 1);
for (int i = 0; i < angle_list.size() - windowsSize + 1; i++) {
float avg = 0.00f;
for (int j = 0; j < windowsSize; j++) {
avg += angle_list[i + j];
}
avg = avg / windowsSize;
angle_list_filtered[i] = avg;
}

return angle_list_filtered;
}


void drawHist(std::vector<float> seq){
cv::Mat image(300,seq.size(),CV_8U);
image.setTo(0);

for(int i = 0;i<seq.size();i++)
{
float l = *std::max_element(seq.begin(),seq.end());

int p = int(float(seq[i])/l*300);

cv::line(image,cv::Point(i,300),cv::Point(i,300-p),cv::Scalar(255,255,255));
}
cv::imshow("vis",image);
}

cv::Mat correctPlateImage(cv::Mat skewPlate,float angle,float maxAngle)
{
cv::Mat dst;
cv::Size size_o(skewPlate.cols,skewPlate.rows);
int extend_padding = 0;
extend_padding = static_cast<int>(skewPlate.rows*tan(cv::abs(angle)/180* 3.14) );
cv::Size size(skewPlate.cols + extend_padding ,skewPlate.rows);
float interval = abs(sin((angle /180) * 3.14)* skewPlate.rows);
cv::Point2f pts1[4] = {cv::Point2f(0,0),cv::Point2f(0,size_o.height),cv::Point2f(size_o.width,0),cv::Point2f(size_o.width,size_o.height)};
if(angle>0) {
cv::Point2f pts2[4] = {cv::Point2f(interval, 0), cv::Point2f(0, size_o.height),
cv::Point2f(size_o.width, 0), cv::Point2f(size_o.width - interval, size_o.height)};
cv::Mat M = cv::getPerspectiveTransform(pts1,pts2);
cv::warpPerspective(skewPlate,dst,M,size);
}
else {
cv::Point2f pts2[4] = {cv::Point2f(0, 0), cv::Point2f(interval, size_o.height), cv::Point2f(size_o.width-interval, 0),
cv::Point2f(size_o.width, size_o.height)};
cv::Mat M = cv::getPerspectiveTransform(pts1,pts2);
cv::warpPerspective(skewPlate,dst,M,size,cv::INTER_CUBIC);
}
return dst;
}
cv::Mat fastdeskew(cv::Mat skewImage,int blockSize){
const int FILTER_WINDOWS_SIZE = 5;
std::vector<float> angle_list(180);
memset(angle_list.data(),0,angle_list.size()*sizeof(int));
cv::Mat bak;
skewImage.copyTo(bak);
if(skewImage.channels() == 3)
cv::cvtColor(skewImage,skewImage,cv::COLOR_RGB2GRAY);
if(skewImage.channels() == 1)
{
cv::Mat eigen;
cv::cornerEigenValsAndVecs(skewImage,eigen,blockSize,5);
for( int j = 0; j < skewImage.rows; j+=blockSize )
{ for( int i = 0; i < skewImage.cols; i+=blockSize )
{
float x2 = eigen.at<cv::Vec6f>(j, i)[4];
float y2 = eigen.at<cv::Vec6f>(j, i)[5];
int angle_cell = angle(x2,y2);
angle_list[(angle_cell + 180)%180]+=1.0;
}
}
}
std::vector<float> filtered = avgfilter(angle_list,5);
int maxPos = std::max_element(filtered.begin(),filtered.end()) - filtered.begin() + FILTER_WINDOWS_SIZE/2;
if(maxPos>ANGLE_MAX)
maxPos = (-maxPos+90+180)%180;
if(maxPos<ANGLE_MIN)
maxPos-=90;
maxPos=90-maxPos;
cv::Mat deskewed = correctPlateImage(bak, static_cast<float>(maxPos),60.0f);
return deskewed;
}



}//namespace pr

+ 0
- 170
Prj-Win/lpr/src/FineMapping.cpp View File

@@ -1,170 +0,0 @@
#include "FineMapping.h"
namespace pr{

const int FINEMAPPING_H = 60 ;
const int FINEMAPPING_W = 140;
const int PADDING_UP_DOWN = 30;
void drawRect(cv::Mat image,cv::Rect rect)
{
cv::Point p1(rect.x,rect.y);
cv::Point p2(rect.x+rect.width,rect.y+rect.height);
cv::rectangle(image,p1,p2,cv::Scalar(0,255,0),1);
}


FineMapping::FineMapping(std::string prototxt,std::string caffemodel) {
net = cv::dnn::readNetFromCaffe(prototxt, caffemodel);

}

cv::Mat FineMapping::FineMappingHorizon(cv::Mat FinedVertical,int leftPadding,int rightPadding)
{

// if(FinedVertical.channels()==1)
// cv::cvtColor(FinedVertical,FinedVertical,cv::COLOR_GRAY2BGR);
cv::Mat inputBlob = cv::dnn::blobFromImage(FinedVertical, 1/255.0, cv::Size(66,16),
cv::Scalar(0,0,0),false);

net.setInput(inputBlob,"data");
cv::Mat prob = net.forward();
int front = static_cast<int>(prob.at<float>(0,0)*FinedVertical.cols);
int back = static_cast<int>(prob.at<float>(0,1)*FinedVertical.cols);
front -= leftPadding ;
if(front<0) front = 0;
back +=rightPadding;
if(back>FinedVertical.cols-1) back=FinedVertical.cols - 1;
cv::Mat cropped = FinedVertical.colRange(front,back).clone();
return cropped;


}
std::pair<int,int> FitLineRansac(std::vector<cv::Point> pts,int zeroadd = 0 )
{
std::pair<int,int> res;
if(pts.size()>2)
{
cv::Vec4f line;
cv::fitLine(pts,line,cv::DIST_HUBER,0,0.01,0.01);
float vx = line[0];
float vy = line[1];
float x = line[2];
float y = line[3];
int lefty = static_cast<int>((-x * vy / vx) + y);
int righty = static_cast<int>(((136- x) * vy / vx) + y);
res.first = lefty+PADDING_UP_DOWN+zeroadd;
res.second = righty+PADDING_UP_DOWN+zeroadd;
return res;
}
res.first = zeroadd;
res.second = zeroadd;
return res;
}

cv::Mat FineMapping::FineMappingVertical(cv::Mat InputProposal,int sliceNum,int upper,int lower,int windows_size){
cv::Mat PreInputProposal;
cv::Mat proposal;
cv::resize(InputProposal,PreInputProposal,cv::Size(FINEMAPPING_W,FINEMAPPING_H));
if(InputProposal.channels() == 3)
cv::cvtColor(PreInputProposal,proposal,cv::COLOR_BGR2GRAY);
else
PreInputProposal.copyTo(proposal);
// this will improve some sen
cv::Mat kernal = cv::getStructuringElement(cv::MORPH_ELLIPSE,cv::Size(1,3));
float diff = static_cast<float>(upper-lower);
diff/=static_cast<float>(sliceNum-1);
cv::Mat binary_adaptive;
std::vector<cv::Point> line_upper;
std::vector<cv::Point> line_lower;
int contours_nums=0;
for(int i = 0 ; i < sliceNum ; i++)
{
std::vector<std::vector<cv::Point> > contours;
float k =lower + i*diff;
cv::adaptiveThreshold(proposal,binary_adaptive,255,cv::ADAPTIVE_THRESH_MEAN_C,cv::THRESH_BINARY,windows_size,k);
cv::Mat draw;
binary_adaptive.copyTo(draw);
cv::findContours(binary_adaptive,contours,cv::RETR_EXTERNAL,cv::CHAIN_APPROX_SIMPLE);
for(auto contour: contours)
{
cv::Rect bdbox =cv::boundingRect(contour);
float lwRatio = bdbox.height/static_cast<float>(bdbox.width);
int bdboxAera = bdbox.width*bdbox.height;
if (( lwRatio>0.7&&bdbox.width*bdbox.height>100 && bdboxAera<300)
|| (lwRatio>3.0 && bdboxAera<100 && bdboxAera>10))
{
cv::Point p1(bdbox.x, bdbox.y);
cv::Point p2(bdbox.x + bdbox.width, bdbox.y + bdbox.height);
line_upper.push_back(p1);
line_lower.push_back(p2);
contours_nums+=1;
}
}
}
if(contours_nums<41)
{
cv::bitwise_not(InputProposal,InputProposal);
cv::Mat kernal = cv::getStructuringElement(cv::MORPH_ELLIPSE,cv::Size(1,5));
cv::Mat bak;
cv::resize(InputProposal,bak,cv::Size(FINEMAPPING_W,FINEMAPPING_H));
cv::erode(bak,bak,kernal);
if(InputProposal.channels() == 3)
cv::cvtColor(bak,proposal,cv::COLOR_BGR2GRAY);
else
proposal = bak;
int contours_nums=0;
for(int i = 0 ; i < sliceNum ; i++)
{
std::vector<std::vector<cv::Point> > contours;
float k =lower + i*diff;
cv::adaptiveThreshold(proposal,binary_adaptive,255,cv::ADAPTIVE_THRESH_MEAN_C,cv::THRESH_BINARY,windows_size,k);
cv::Mat draw;
binary_adaptive.copyTo(draw);
cv::findContours(binary_adaptive,contours,cv::RETR_EXTERNAL,cv::CHAIN_APPROX_SIMPLE);
for(auto contour: contours)
{
cv::Rect bdbox =cv::boundingRect(contour);
float lwRatio = bdbox.height/static_cast<float>(bdbox.width);
int bdboxAera = bdbox.width*bdbox.height;
if (( lwRatio>0.7&&bdbox.width*bdbox.height>120 && bdboxAera<300)
|| (lwRatio>3.0 && bdboxAera<100 && bdboxAera>10))
{

cv::Point p1(bdbox.x, bdbox.y);
cv::Point p2(bdbox.x + bdbox.width, bdbox.y + bdbox.height);
line_upper.push_back(p1);
line_lower.push_back(p2);
contours_nums+=1;
}
}
}
}
cv::Mat rgb;
cv::copyMakeBorder(PreInputProposal, rgb, PADDING_UP_DOWN, PADDING_UP_DOWN, 0, 0, cv::BORDER_REPLICATE);
std::pair<int, int> A;
std::pair<int, int> B;
A = FitLineRansac(line_upper, -1);
B = FitLineRansac(line_lower, 1);
int leftyB = A.first;
int rightyB = A.second;
int leftyA = B.first;
int rightyA = B.second;
int cols = rgb.cols;
int rows = rgb.rows;
std::vector<cv::Point2f> corners(4);
corners[0] = cv::Point2f(cols - 1, rightyA);
corners[1] = cv::Point2f(0, leftyA);
corners[2] = cv::Point2f(cols - 1, rightyB);
corners[3] = cv::Point2f(0, leftyB);
std::vector<cv::Point2f> corners_trans(4);
corners_trans[0] = cv::Point2f(136, 36);
corners_trans[1] = cv::Point2f(0, 36);
corners_trans[2] = cv::Point2f(136, 0);
corners_trans[3] = cv::Point2f(0, 0);
cv::Mat transform = cv::getPerspectiveTransform(corners, corners_trans);
cv::Mat quad = cv::Mat::zeros(36, 136, CV_8UC3);
cv::warpPerspective(rgb, quad, transform, quad.size());
return quad;
}
}



+ 67
- 0
Prj-Win/lpr/src/FineTune.cpp View File

@@ -0,0 +1,67 @@
#include "../include/Finetune.h"
using namespace std;
using namespace cv;

namespace pr {
FineTune::FineTune(string finetune_prototxt, string finetune_caffemodel) {
FTNet = dnn::readNetFromCaffe(finetune_prototxt, finetune_caffemodel);
}
void FineTune::affine_crop(Mat src, vector<Point> pts, Mat &crop)
{
Point2f dst[4] = { Point2f(0,0),Point2f(160,0),Point2f(160,40),Point2f(0,40) };
Point2f srcpt[4] = { Point2f(pts[0]),Point2f(pts[1]) ,Point2f(pts[2]) ,Point2f(pts[3]) };
Mat _mat = getPerspectiveTransform(srcpt, dst);
warpPerspective(src, crop, _mat, Size(160, 40));
}
void FineTune::to_refine(Mat src, vector<Point>pts, Mat& crop)
{
float scale = 3.f;
int cx = 64; int cy = 24;
int cw = 64; int ch = 24;
int tx1 = cx - cw / 2;
int ty1 = cy - ch / 2;
int tx2 = cx + cw / 2;
int ty2 = cy - ch / 2;
int tx3 = cx + cw / 2;
int ty3 = cy + ch / 2;
int tx4 = cx - cw / 2;
int ty4 = cy + ch / 2;
vector<Point2f> dstp(4);
Point2f dst[4] = { (Point2f(tx1*scale, ty1*scale)) ,(Point2f(tx2*scale, ty2*scale)) ,(Point2f(tx3*scale, ty3*scale)) ,(Point2f(tx4*scale, ty4*scale)) };
Point2f pt[4] = { Point2f(pts[0]),Point2f(pts[1]) ,Point2f(pts[2]) ,Point2f(pts[3]) };
//estimater
Mat _mat = getPerspectiveTransform(pt, dst);
warpPerspective(src, crop, _mat, Size(120 * scale, 48 * scale));
}
void FineTune::Finetune(Mat src, Mat& dst)
{
Mat tof;// = src.clone();
resize(src, tof, Size(120, 48));
Mat blob = dnn::blobFromImage(tof, 0.0078125, Size(120, 48), Scalar(127.5, 127.5, 127.5), false, false);
FTNet.setInput(blob);
Mat outblob = FTNet.forward("conv6-3");

float *data = outblob.ptr<float>();
vector<Point> pts(4);
Mat fineMat(Size(2, 4), CV_32F, data);
for (int i = 0; i < fineMat.rows; i++)
{
pts[i].x = fineMat.at<float>(i, 0)*src.cols;
pts[i].y = fineMat.at<float>(i, 1)*src.rows;
}
Mat crop;
to_refine(src, pts, crop);
blob = dnn::blobFromImage(crop, 0.0078128, Size(120, 48), Scalar(127.5, 127.5, 127.5), false, false);
FTNet.setInput(blob);
outblob = FTNet.forward("conv6-3");
data = outblob.ptr<float>();
Mat fineMat2(Size(2, 4), CV_32F, data);
for (int i = 0; i < fineMat.rows; i++)
{
pts[i].x = fineMat2.at<float>(i, 0)*crop.cols;
pts[i].y = fineMat2.at<float>(i, 1)*crop.rows;
}
affine_crop(crop, pts, crop);
dst = crop.clone();
}
}

+ 34
- 82
Prj-Win/lpr/src/Pipeline.cpp View File

@@ -1,85 +1,37 @@
//
// Created by Jack Yu on 23/10/2017.
//

#include "../include/Pipeline.h"


namespace pr {



const int HorizontalPadding = 4;
PipelinePR::PipelinePR(std::string detector_filename,
std::string finemapping_prototxt, std::string finemapping_caffemodel,
std::string segmentation_prototxt, std::string segmentation_caffemodel,
std::string charRecognization_proto, std::string charRecognization_caffemodel,
std::string segmentationfree_proto,std::string segmentationfree_caffemodel) {
plateDetection = new PlateDetection(detector_filename);
fineMapping = new FineMapping(finemapping_prototxt, finemapping_caffemodel);
plateSegmentation = new PlateSegmentation(segmentation_prototxt, segmentation_caffemodel);
generalRecognizer = new CNNRecognizer(charRecognization_proto, charRecognization_caffemodel);
segmentationFreeRecognizer = new SegmentationFreeRecognizer(segmentationfree_proto,segmentationfree_caffemodel);

}

PipelinePR::~PipelinePR() {

delete plateDetection;
delete fineMapping;
delete plateSegmentation;
delete generalRecognizer;
delete segmentationFreeRecognizer;


}

std::vector<PlateInfo> PipelinePR:: RunPiplineAsImage(cv::Mat plateImage,int method) {
std::vector<PlateInfo> results;
std::vector<pr::PlateInfo> plates;
plateDetection->plateDetectionRough(plateImage,plates,36,700);

for (pr::PlateInfo plateinfo:plates) {

cv::Mat image_finemapping = plateinfo.getPlateImage();
image_finemapping = fineMapping->FineMappingVertical(image_finemapping);
image_finemapping = pr::fastdeskew(image_finemapping, 5);



//Segmentation-based

if(method==SEGMENTATION_BASED_METHOD)
{
image_finemapping = fineMapping->FineMappingHorizon(image_finemapping, 2, HorizontalPadding);
cv::resize(image_finemapping, image_finemapping, cv::Size(136+HorizontalPadding, 36));
plateinfo.setPlateImage(image_finemapping);
std::vector<cv::Rect> rects;
plateSegmentation->segmentPlatePipline(plateinfo, 1, rects);
plateSegmentation->ExtractRegions(plateinfo, rects);
cv::copyMakeBorder(image_finemapping, image_finemapping, 0, 0, 0, 20, cv::BORDER_REPLICATE);
plateinfo.setPlateImage(image_finemapping);
generalRecognizer->SegmentBasedSequenceRecognition(plateinfo);
plateinfo.decodePlateNormal(pr::CH_PLATE_CODE);

}
//Segmentation-free
else if(method==SEGMENTATION_FREE_METHOD)
{
image_finemapping = fineMapping->FineMappingHorizon(image_finemapping, 4, HorizontalPadding+3);
cv::resize(image_finemapping, image_finemapping, cv::Size(136+HorizontalPadding, 36));
plateinfo.setPlateImage(image_finemapping);
std::pair<std::string,float> res = segmentationFreeRecognizer->SegmentationFreeForSinglePlate(plateinfo.getPlateImage(),pr::CH_PLATE_CODE);
plateinfo.confidence = res.second;
plateinfo.setPlateName(res.first);
}
results.push_back(plateinfo);
}

return results;

}//namespace pr



}
PipelinePR::PipelinePR(std::string detect_prototxt, std::string detect_caffemodel,
std::string finetune_prototxt, std::string finetune_caffemodel,
std::string platerec_prototxt, std::string platerec_caffemodel/*,
std::string platecolor_color*/)
{
platedetection = new PlateDetection(detect_prototxt, detect_caffemodel);
finetune = new FineTune(finetune_prototxt, finetune_caffemodel);
platerecognation = new PlateRecognation(platerec_prototxt, platerec_caffemodel);
//platecolorclass = new PlateColorClass(platecolor_color);
}
PipelinePR::~PipelinePR()
{
delete platedetection;
delete finetune;
delete platerecognation;
//delete platecolorclass;
}
std::vector<PlateInfo> PipelinePR::RunPiplineAsImage(cv::Mat plateimg)
{
std::vector<pr::PlateInfo> plates;
std::vector<PlateInfo> plateres;

platedetection->Detectssd(plateimg, plates);
for (pr::PlateInfo plateinfo : plates) {
cv::Mat image = plateinfo.getPlateImage();
cv::Mat CropImg;
finetune->Finetune(image, CropImg);
platerecognation->segmentation_free_recognation(CropImg, plateinfo);
//platecolorclass->plateColor(CropImg, plateinfo);
plateres.push_back(plateinfo);
}
return plateres;
}
}

+ 58
- 30
Prj-Win/lpr/src/PlateDetection.cpp View File

@@ -1,32 +1,60 @@
#include "../include/PlateDetection.h"
#include "util.h"
namespace pr{
PlateDetection::PlateDetection(std::string filename_cascade){
cascade.load(filename_cascade);
#include "../include/Platedetect.h"

};
void PlateDetection::plateDetectionRough(cv::Mat InputImage,std::vector<pr::PlateInfo> &plateInfos,int min_w,int max_w){
cv::Mat processImage;
cv::cvtColor(InputImage,processImage,cv::COLOR_BGR2GRAY);
std::vector<cv::Rect> platesRegions;
cv::Size minSize(min_w,min_w/4);
cv::Size maxSize(max_w,max_w/4);
cascade.detectMultiScale( processImage, platesRegions,
1.1, 3, cv::CASCADE_SCALE_IMAGE,minSize,maxSize);
for(auto plate:platesRegions)
{
int zeroadd_w = static_cast<int>(plate.width*0.30);
int zeroadd_h = static_cast<int>(plate.height*2);
int zeroadd_x = static_cast<int>(plate.width*0.15);
int zeroadd_y = static_cast<int>(plate.height*1);
plate.x-=zeroadd_x;
plate.y-=zeroadd_y;
plate.height += zeroadd_h;
plate.width += zeroadd_w;
cv::Mat plateImage = util::cropFromImage(InputImage,plate);
PlateInfo plateInfo(plateImage,plate);
plateInfos.push_back(plateInfo);
using namespace cv;
using namespace std;
namespace pr {

}
}
}//namespace pr
PlateDetection::PlateDetection(std::string ssd_prototxt, std::string ssd_caffemodel)
{
ssdNet = cv::dnn::readNetFromCaffe(ssd_prototxt, ssd_caffemodel);
}
void PlateDetection::Detectssd(cv::Mat img, std::vector<pr::PlateInfo> &plateInfos)
{
int cols = img.cols;
int rows = img.rows;
Mat in;
img.convertTo(in, CV_32F);
Mat input(img.size(), CV_32FC3);
Mat inputblob1 = input.reshape(1, { 1, 3,rows,cols });
Mat input_blob = dnn::blobFromImages(in, 0.225, Size(), Scalar(103.53, 116.28, 123.675), false);
float *blobdata = input_blob.ptr<float>();
float *blobdata2 = inputblob1.ptr<float>();
{
for (int i = 0; i < rows; i++)
{
memcpy(blobdata2 + i * cols, blobdata + 3 * i * cols, cols * sizeof(float));
memcpy(blobdata2 + i * cols + rows * cols, blobdata + (1 + 3 * i) * cols, cols * sizeof(float));
memcpy(blobdata2 + i * cols + rows * cols * 2, blobdata + (2 + 3 * i) * cols, cols * sizeof(float));
}
}
ssdNet.setInput(inputblob1);

Mat outputBlob = ssdNet.forward("detection_out");

Mat detectmat(outputBlob.size[2], outputBlob.size[3], CV_32F, outputBlob.ptr<float>());
vector<Rect> recs;
vector<float>scs;
for (int i = 0; i < detectmat.rows; i++)
{
float confidence = detectmat.at<float>(i, 2);
if (confidence > 0.5)
{
int x1, x2, y1, y2;
Rect rec;
Mat cimg;
x1 = int(detectmat.at<float>(i, 3) * cols);
y1 = int(detectmat.at<float>(i, 4) * rows);
x2 = int(detectmat.at<float>(i, 5) * cols);
y2 = int(detectmat.at<float>(i, 6) * rows);
x1 = max(x1, 0);
y1 = max(y1, 0);
x2 = min(x2, cols - 1);
y2 = min(y2, rows - 1);
rec.x = x1; rec.y = y1; rec.width = (x2 - x1 + 1); rec.height = (y2 - y1 + 1);
img(rec).copyTo(cimg);
PlateInfo plateInfo(cimg, rec);
plateInfos.push_back(plateInfo);
}
}
}
}

+ 61
- 0
Prj-Win/lpr/src/PlateRecognation.cpp View File

@@ -0,0 +1,61 @@
#include "../include/PlateRecognation.h"
#include "../include/Pipeline.h"
using namespace std;
using namespace cv;
namespace pr {

PlateRecognation::PlateRecognation(std::string rec_prototxt, std::string rec_caffemodel)
{
RecNet = cv::dnn::readNetFromCaffe(rec_prototxt, rec_caffemodel);
}
void PlateRecognation::segmentation_free_recognation(cv::Mat src, pr::PlateInfo &plateinfo)
{
float score = 0;
string text = "";
Mat src1 = src.clone();
Mat inputMat(Size(40, 160), CV_8UC3);

for (int j = 0; j < src.rows; j++)
{
for (int i = 0; i < src.cols; i++)
{
inputMat.at<Vec3b>(i, j) = src1.at<Vec3b>(j, i);
}
}
Mat blob = dnn::blobFromImage(inputMat, 1 / 255.f, Size(40, 160), Scalar(0, 0, 0), false, false);
RecNet.setInput(blob);
Mat outblob = RecNet.forward();
int x = outblob.size[2];
int y = outblob.size[0];
float *data = outblob.ptr<float>();
vector<float> scores(84);
vector<int>maxidxs;
vector<float> maxscore;
for (int i = 2; i < 20; i++)
{
for (int j = 0; j < 84; j++)
{
scores[j] = data[j * 20 + i];
}
int idx = max_element(scores.begin(), scores.end()) - scores.begin();
maxidxs.push_back(idx);
maxscore.push_back(scores[idx]);
}
int charnum = 0;
for (int i = 0; i < maxidxs.size(); i++)
{
if (maxidxs[i] < pr::CH_PLATE_CODE.size() && (i == 0 || (maxidxs[i - 1] != maxidxs[i])))
{
text += pr::CH_PLATE_CODE[maxidxs[i]];
score += maxscore[i];
charnum++;
}
}
if (charnum > 0)
{
score /= charnum;
}
plateinfo.setPlateName(text);
plateinfo.confidence = score;
}
}

+ 0
- 404
Prj-Win/lpr/src/PlateSegmentation.cpp View File

@@ -1,404 +0,0 @@
//
// Created by Jack Yu on 16/10/2017.
//

#include "../include/PlateSegmentation.h"
#include "../include/niBlackThreshold.h"


//#define DEBUG
namespace pr{

PlateSegmentation::PlateSegmentation(std::string prototxt,std::string caffemodel) {
net = cv::dnn::readNetFromCaffe(prototxt, caffemodel);
}
cv::Mat PlateSegmentation::classifyResponse(const cv::Mat &cropped){
cv::Mat inputBlob = cv::dnn::blobFromImage(cropped, 1/255.0, cv::Size(22,22), cv::Scalar(0,0,0),false);
net.setInput(inputBlob,"data");
return net.forward();
}

void drawHist(float* seq,int size,const char* name){
cv::Mat image(300,size,CV_8U);
image.setTo(0);
float* start =seq;
float* end = seq+size;
float l = *std::max_element(start,end);
for(int i = 0;i<size;i++)
{
int p = int(float(seq[i])/l*300);
cv::line(image,cv::Point(i,300),cv::Point(i,300-p),cv::Scalar(255,255,255));
}
cv::resize(image,image,cv::Size(600,100));
cv::imshow(name,image);
}

inline void computeSafeMargin(int &val,const int &rows){
val = std::min(val,rows);
val = std::max(val,0);
}

cv::Rect boxFromCenter(const cv::Point center,int left,int right,int top,int bottom,cv::Size bdSize)
{
cv::Point p1(center.x - left ,center.y - top);
cv::Point p2( center.x + right, center.y + bottom);
p1.x = std::max(0,p1.x);
p1.y = std::max(0,p1.y);
p2.x = std::min(p2.x,bdSize.width-1);
p2.y = std::min(p2.y,bdSize.height-1);
cv::Rect rect(p1,p2);
return rect;
}

cv::Rect boxPadding(cv::Rect rect,int left,int right,int top,int bottom,cv::Size bdSize)
{

cv::Point center(rect.x+(rect.width>>1),rect.y + (rect.height>>1));
int rebuildLeft = (rect.width>>1 )+ left;
int rebuildRight = (rect.width>>1 )+ right;
int rebuildTop = (rect.height>>1 )+ top;
int rebuildBottom = (rect.height>>1 )+ bottom;
return boxFromCenter(center,rebuildLeft,rebuildRight,rebuildTop,rebuildBottom,bdSize);

}



void PlateSegmentation:: refineRegion(cv::Mat &plateImage,const std::vector<int> &candidatePts,const int padding,std::vector<cv::Rect> &rects){
int w = candidatePts[5] - candidatePts[4];
int cols = plateImage.cols;
int rows = plateImage.rows;
for(int i = 0 ; i < candidatePts.size() ; i++)
{
int left = 0;
int right = 0 ;

if(i == 0 ){
left= candidatePts[i];
right = left+w+padding;
}
else {
left = candidatePts[i] - padding;
right = left + w + padding * 2;
}

computeSafeMargin(right,cols);
computeSafeMargin(left,cols);
cv::Rect roi(left,0,right - left,rows-1);
cv::Mat roiImage;
plateImage(roi).copyTo(roiImage);

if (i>=1)
{

cv::Mat roi_thres;
// cv::threshold(roiImage,roi_thres,0,255,cv::THRESH_OTSU|cv::THRESH_BINARY);

niBlackThreshold(roiImage,roi_thres,255,cv::THRESH_BINARY,15,0.27,BINARIZATION_NIBLACK);

std::vector<std::vector<cv::Point>> contours;
cv::findContours(roi_thres,contours,cv::RETR_LIST,cv::CHAIN_APPROX_SIMPLE);
cv::Point boxCenter(roiImage.cols>>1,roiImage.rows>>1);

cv::Rect final_bdbox;
cv::Point final_center;
int final_dist = INT_MAX;


for(auto contour:contours)
{
cv::Rect bdbox = cv::boundingRect(contour);
cv::Point center(bdbox.x+(bdbox.width>>1),bdbox.y + (bdbox.height>>1));
int dist = (center.x - boxCenter.x)*(center.x - boxCenter.x);
if(dist<final_dist && bdbox.height > rows>>1)
{ final_dist =dist;
final_center = center;
final_bdbox = bdbox;
}
}

//rebuild box
if(final_bdbox.height/ static_cast<float>(final_bdbox.width) > 3.5 && final_bdbox.width*final_bdbox.height<10)
final_bdbox = boxFromCenter(final_center,8,8,(rows>>1)-3 , (rows>>1) - 2,roiImage.size());
else {
if(i == candidatePts.size()-1)
final_bdbox = boxPadding(final_bdbox, padding/2, padding, padding/2, padding/2, roiImage.size());
else
final_bdbox = boxPadding(final_bdbox, padding, padding, padding, padding, roiImage.size());


// std::cout<<final_bdbox<<std::endl;
// std::cout<<roiImage.size()<<std::endl;
#ifdef DEBUG
cv::imshow("char_thres",roi_thres);

cv::imshow("char",roiImage(final_bdbox));
cv::waitKey(0);
#endif


}


final_bdbox.x += left;

rects.push_back(final_bdbox);
//

}
else
{
rects.push_back(roi);
}

// else
// {
//
// }

// cv::GaussianBlur(roiImage,roiImage,cv::Size(7,7),3);
//
// cv::imshow("image",roiImage);
// cv::waitKey(0);


}



}
void avgfilter(float *angle_list,int size,int windowsSize) {
float *filterd = new float[size];
for(int i = 0 ; i < size ; i++) filterd [i] = angle_list[i];
// memcpy(filterd,angle_list,size);

cv::Mat kernal_gaussian = cv::getGaussianKernel(windowsSize,3,CV_32F);
float *kernal = (float*)kernal_gaussian.data;
// kernal+=windowsSize;
int r = windowsSize/2;




for (int i = 0; i < size; i++) {
float avg = 0.00f;
for (int j = 0; j < windowsSize; j++) {
if(i+j-r>0&&i+j+r<size-1)
avg += filterd[i + j-r]*kernal[j];
}
// avg = avg / windowsSize;
angle_list[i] = avg;

}

delete filterd;
}

void PlateSegmentation::templateMatchFinding(const cv::Mat &respones,int windowsWidth,std::pair<float,std::vector<int>> &candidatePts){
int rows = respones.rows;
int cols = respones.cols;



float *data = (float*)respones.data;
float *engNum_prob = data;
float *false_prob = data+cols;
float *ch_prob = data+cols*2;

avgfilter(engNum_prob,cols,5);
avgfilter(false_prob,cols,5);
// avgfilter(ch_prob,cols,5);
std::vector<int> candidate_pts(7);
#ifdef DEBUG
drawHist(engNum_prob,cols,"engNum_prob");
drawHist(false_prob,cols,"false_prob");
drawHist(ch_prob,cols,"ch_prob");
cv::waitKey(0);
#endif




int cp_list[7];
float loss_selected = -10;

for(int start = 0 ; start < 20 ; start+=2)
for(int width = windowsWidth-5; width < windowsWidth+5 ; width++ ){
for(int interval = windowsWidth/2; interval < windowsWidth; interval++)
{
int cp1_ch = start;
int cp2_p0 = cp1_ch+ width;
int cp3_p1 = cp2_p0+ width + interval;
int cp4_p2 = cp3_p1 + width;
int cp5_p3 = cp4_p2 + width+1;
int cp6_p4 = cp5_p3 + width+2;
int cp7_p5= cp6_p4+ width+2;

int md1 = (cp1_ch+cp2_p0)>>1;
int md2 = (cp2_p0+cp3_p1)>>1;
int md3 = (cp3_p1+cp4_p2)>>1;
int md4 = (cp4_p2+cp5_p3)>>1;
int md5 = (cp5_p3+cp6_p4)>>1;
int md6 = (cp6_p4+cp7_p5)>>1;




if(cp7_p5>=cols)
continue;
// float loss = ch_prob[cp1_ch]+
// engNum_prob[cp2_p0] +engNum_prob[cp3_p1]+engNum_prob[cp4_p2]+engNum_prob[cp5_p3]+engNum_prob[cp6_p4] +engNum_prob[cp7_p5]
// + (false_prob[md2]+false_prob[md3]+false_prob[md4]+false_prob[md5]+false_prob[md5] + false_prob[md6]);
float loss = ch_prob[cp1_ch]*3 -(false_prob[cp3_p1]+false_prob[cp4_p2]+false_prob[cp5_p3]+false_prob[cp6_p4]+false_prob[cp7_p5]);

if(loss>loss_selected)
{
loss_selected = loss;
cp_list[0]= cp1_ch;
cp_list[1]= cp2_p0;
cp_list[2]= cp3_p1;
cp_list[3]= cp4_p2;
cp_list[4]= cp5_p3;
cp_list[5]= cp6_p4;
cp_list[6]= cp7_p5;
}
}
}
candidate_pts[0] = cp_list[0];
candidate_pts[1] = cp_list[1];
candidate_pts[2] = cp_list[2];
candidate_pts[3] = cp_list[3];
candidate_pts[4] = cp_list[4];
candidate_pts[5] = cp_list[5];
candidate_pts[6] = cp_list[6];

candidatePts.first = loss_selected;
candidatePts.second = candidate_pts;

};


void PlateSegmentation::segmentPlateBySlidingWindows(cv::Mat &plateImage,int windowsWidth,int stride,cv::Mat &respones){


// cv::resize(plateImage,plateImage,cv::Size(136,36));

cv::Mat plateImageGray;
cv::cvtColor(plateImage,plateImageGray,cv::COLOR_BGR2GRAY);
int padding = plateImage.cols-136 ;
// int padding = 0 ;
int height = plateImage.rows - 1;
int width = plateImage.cols - 1 - padding;
for(int i = 0 ; i < width - windowsWidth +1 ; i +=stride)
{
cv::Rect roi(i,0,windowsWidth,height);
cv::Mat roiImage = plateImageGray(roi);
cv::Mat response = classifyResponse(roiImage);
respones.push_back(response);
}




respones = respones.t();
// std::pair<float,std::vector<int>> images ;
//
//
// std::cout<<images.first<<" ";
// for(int i = 0 ; i < images.second.size() ; i++)
// {
// std::cout<<images.second[i]<<" ";
//// cv::line(plateImageGray,cv::Point(images.second[i],0),cv::Point(images.second[i],36),cv::Scalar(255,255,255),1); //DEBUG
// }

// int w = images.second[5] - images.second[4];

// cv::line(plateImageGray,cv::Point(images.second[5]+w,0),cv::Point(images.second[5]+w,36),cv::Scalar(255,255,255),1); //DEBUG
// cv::line(plateImageGray,cv::Point(images.second[5]+2*w,0),cv::Point(images.second[5]+2*w,36),cv::Scalar(255,255,255),1); //DEBUG


// RefineRegion(plateImageGray,images.second,5);

// std::cout<<w<<std::endl;

// std::cout<<<<std::endl;

// cv::resize(plateImageGray,plateImageGray,cv::Size(600,100));



}

// void filterGaussian(cv::Mat &respones,float sigma){
//
// }


void PlateSegmentation::segmentPlatePipline(PlateInfo &plateInfo,int stride,std::vector<cv::Rect> &Char_rects){
cv::Mat plateImage = plateInfo.getPlateImage(); // get src image .
cv::Mat plateImageGray;
cv::cvtColor(plateImage,plateImageGray,cv::COLOR_BGR2GRAY);
//do binarzation
//
std::pair<float,std::vector<int>> sections ; // segment points variables .

cv::Mat respones; //three response of every sub region from origin image .
segmentPlateBySlidingWindows(plateImage,DEFAULT_WIDTH,1,respones);
templateMatchFinding(respones,DEFAULT_WIDTH/stride,sections);
for(int i = 0; i < sections.second.size() ; i++)
{
sections.second[i]*=stride;

}

// std::cout<<sections<<std::endl;

refineRegion(plateImageGray,sections.second,5,Char_rects);
#ifdef DEBUG
for(int i = 0 ; i < sections.second.size() ; i++)
{
std::cout<<sections.second[i]<<" ";
cv::line(plateImageGray,cv::Point(sections.second[i],0),cv::Point(sections.second[i],36),cv::Scalar(255,255,255),1); //DEBUG
}
cv::imshow("plate",plateImageGray);
cv::waitKey(0);
#endif
// cv::waitKey(0);

}

void PlateSegmentation::ExtractRegions(PlateInfo &plateInfo,std::vector<cv::Rect> &rects){
cv::Mat plateImage = plateInfo.getPlateImage();
for(int i = 0 ; i < rects.size() ; i++){
cv::Mat charImage;
plateImage(rects[i]).copyTo(charImage);
if(charImage.channels())
cv::cvtColor(charImage,charImage,cv::COLOR_BGR2GRAY);
// cv::imshow("image",charImage);
// cv::waitKey(0);
cv::equalizeHist(charImage,charImage);
//

//


std::pair<CharType,cv::Mat> char_instance;
if(i == 0 ){

char_instance.first = CHINESE;


} else if(i == 1){
char_instance.first = LETTER;
}
else{
char_instance.first = LETTER_NUMS;
}
char_instance.second = charImage;
plateInfo.appendPlateChar(char_instance);

}

}

}//namespace pr

+ 0
- 23
Prj-Win/lpr/src/Recognizer.cpp View File

@@ -1,23 +0,0 @@
//
// Created by Jack Yu on 22/10/2017.
//

#include "../include/Recognizer.h"

namespace pr{
void GeneralRecognizer::SegmentBasedSequenceRecognition(PlateInfo &plateinfo){
for(auto char_instance:plateinfo.plateChars)
{
std::pair<CharType,cv::Mat> res;
if(char_instance.second.rows*char_instance.second.cols>40) {
label code_table = recognizeCharacter(char_instance.second);
res.first = char_instance.first;
code_table.copyTo(res.second);
plateinfo.appendPlateCoding(res);
} else{
res.first = INVALID;
plateinfo.appendPlateCoding(res);
}
}
}
}

+ 0
- 89
Prj-Win/lpr/src/SegmentationFreeRecognizer.cpp View File

@@ -1,89 +0,0 @@
//
// Created by Jack Yu on 28/11/2017.
//
#include "../include/SegmentationFreeRecognizer.h"

namespace pr {
SegmentationFreeRecognizer::SegmentationFreeRecognizer(std::string prototxt, std::string caffemodel) {
net = cv::dnn::readNetFromCaffe(prototxt, caffemodel);
}
inline int judgeCharRange(int id)
{return id<31 || id>63;
}
std::pair<std::string,float> decodeResults(cv::Mat code_table,std::vector<std::string> mapping_table,float thres)
{
cv::MatSize mtsize = code_table.size;
int sequencelength = mtsize[2];
int labellength = mtsize[1];
cv::transpose(code_table.reshape(1,1).reshape(1,labellength),code_table);
std::string name = "";
std::vector<int> seq(sequencelength);
std::vector<std::pair<int,float>> seq_decode_res;
for(int i = 0 ; i < sequencelength; i++) {
float *fstart = ((float *) (code_table.data) + i * labellength );
int id = std::max_element(fstart,fstart+labellength) - fstart;
seq[i] =id;
}

float sum_confidence = 0;
int plate_lenghth = 0 ;
for(int i = 0 ; i< sequencelength ; i++)
{
if(seq[i]!=labellength-1 && (i==0 || seq[i]!=seq[i-1]))
{
float *fstart = ((float *) (code_table.data) + i * labellength );
float confidence = *(fstart+seq[i]);
std::pair<int,float> pair_(seq[i],confidence);
seq_decode_res.push_back(pair_);
}
}
int i = 0;
if (seq_decode_res.size()>1 && judgeCharRange(seq_decode_res[0].first) && judgeCharRange(seq_decode_res[1].first))
{
i=2;
int c = seq_decode_res[0].second<seq_decode_res[1].second;
name+=mapping_table[seq_decode_res[c].first];
sum_confidence+=seq_decode_res[c].second;
plate_lenghth++;
}

for(; i < seq_decode_res.size();i++)
{
name+=mapping_table[seq_decode_res[i].first];
sum_confidence +=seq_decode_res[i].second;
plate_lenghth++;
}
std::pair<std::string,float> res;
res.second = sum_confidence/plate_lenghth;
res.first = name;
return res;

}
std::string decodeResults(cv::Mat code_table,std::vector<std::string> mapping_table)
{
cv::MatSize mtsize = code_table.size;
int sequencelength = mtsize[2];
int labellength = mtsize[1];
cv::transpose(code_table.reshape(1,1).reshape(1,labellength),code_table);
std::string name = "";
std::vector<int> seq(sequencelength);
for(int i = 0 ; i < sequencelength; i++) {
float *fstart = ((float *) (code_table.data) + i * labellength );
int id = std::max_element(fstart,fstart+labellength) - fstart;
seq[i] =id;
}
for(int i = 0 ; i< sequencelength ; i++)
{
if(seq[i]!=labellength-1 && (i==0 || seq[i]!=seq[i-1]))
name+=mapping_table[seq[i]];
}
return name;
}
std::pair<std::string,float> SegmentationFreeRecognizer::SegmentationFreeForSinglePlate(cv::Mat Image,std::vector<std::string> mapping_table) {
cv::transpose(Image,Image);
cv::Mat inputBlob = cv::dnn::blobFromImage(Image, 1 / 255.0, cv::Size(40,160));
net.setInput(inputBlob, "data");
cv::Mat char_prob_mat = net.forward();
return decodeResults(char_prob_mat,mapping_table,0.00);
}
}

+ 0
- 68
Prj-Win/lpr/src/util.h View File

@@ -1,68 +0,0 @@
//
// Created by Jack Yu on 04/04/2017.
//

#include <opencv2/opencv.hpp>
namespace util{
template <class T> void swap ( T& a, T& b )
{
T c(a); a=b; b=c;
}
template <class T> T min(T& a,T& b )
{
return a>b?b:a;
}

cv::Mat cropFromImage(const cv::Mat &image,cv::Rect rect){
int w = image.cols-1;
int h = image.rows-1;
rect.x = std::max(rect.x,0);
rect.y = std::max(rect.y,0);
rect.height = std::min(rect.height,h-rect.y);
rect.width = std::min(rect.width,w-rect.x);
cv::Mat temp(rect.size(), image.type());
cv::Mat cropped;
temp = image(rect);
temp.copyTo(cropped);
return cropped;

}

cv::Mat cropBox2dFromImage(const cv::Mat &image,cv::RotatedRect rect)
{
cv::Mat M, rotated, cropped;
float angle = rect.angle;
cv::Size rect_size(rect.size.width,rect.size.height);
if (rect.angle < -45.) {
angle += 90.0;
swap(rect_size.width, rect_size.height);
}
M = cv::getRotationMatrix2D(rect.center, angle, 1.0);
cv::warpAffine(image, rotated, M, image.size(), cv::INTER_CUBIC);
cv::getRectSubPix(rotated, rect_size, rect.center, cropped);
return cropped;
}

cv::Mat calcHist(const cv::Mat &image)
{
cv::Mat hsv;
std::vector<cv::Mat> hsv_planes;
cv::cvtColor(image,hsv,cv::COLOR_BGR2HSV);
cv::split(hsv,hsv_planes);
cv::Mat hist;
int histSize = 256;
float range[] = {0,255};
const float* histRange = {range};
cv::calcHist( &hsv_planes[0], 1, 0, cv::Mat(), hist, 1, &histSize, &histRange,true, true);
return hist;
}
float computeSimilir(const cv::Mat &A,const cv::Mat &B)
{
cv::Mat histA,histB;
histA = calcHist(A);
histB = calcHist(B);
// return cv::compareHist(histA,histB,CV_COMP_CORREL);
return cv::compareHist(histA, histB, 0);
}
}//namespace util

+ 27
- 0
Prj-Win/lpr/tests/testPipeLine.cpp View File

@@ -0,0 +1,27 @@
#include"../include/Pipeline.h"

void TEST_PIPELINE()
{
pr::PipelinePR prc("../lpr\\model\\mininet_ssd_v1.prototxt", "../lpr\\model\\mininet_ssd_v1.caffemodel",
"../lpr\\model\\refinenet.prototxt", "../lpr\\model\\refinenet.caffemodel",
"../lpr\\model\\SegmenationFree-Inception.prototxt", "../lpr\\model\\SegmenationFree-Inception.caffemodel");
cv::Mat img = cv::imread("../lpr\\res\\test.jpg");
std::vector<pr::PlateInfo> res = prc.RunPiplineAsImage(img);
for (auto st : res) {
if (st.confidence > 0.75) {
std::cout << st.getPlateName() << " " << st.confidence << std::endl;
cv::Rect region = st.getPlateRect();

cv::rectangle(img, cv::Point(region.x, region.y), cv::Point(region.x + region.width, region.y + region.height), cv::Scalar(255, 255, 0), 2);
}
}

cv::imshow("image", img);
cv::waitKey(0);

}
int main()
{
TEST_PIPELINE();
return 0;
}

+ 0
- 34
Prj-Win/lpr/tests/test_detection.cpp View File

@@ -1,34 +0,0 @@
//
// Created by 庾金科 on 20/09/2017.
//

#include <../include/PlateDetection.h>


void drawRect(cv::Mat image,cv::Rect rect)
{
cv::Point p1(rect.x,rect.y);
cv::Point p2(rect.x+rect.width,rect.y+rect.height);
cv::rectangle(image,p1,p2,cv::Scalar(0,255,0),1);
}


int main()
{
cv::Mat image = cv::imread("res/test1.jpg");
pr::PlateDetection plateDetection("model/cascade.xml");
std::vector<pr::PlateInfo> plates;
plateDetection.plateDetectionRough(image,plates);
for(pr::PlateInfo platex:plates)
{
drawRect(image,platex.getPlateRect());
cv::imwrite("res/cache/test.png",platex.getPlateImage());
cv::imshow("image",platex.getPlateImage());
cv::waitKey(0);
}
cv::imshow("image",image);
cv::waitKey(0);
return 0 ;


}

+ 0
- 34
Prj-Win/lpr/tests/test_fastdeskew.cpp View File

@@ -1,34 +0,0 @@
//
// Created by Jack Yu on 02/10/2017.
//


#include <../include/FastDeskew.h>


void drawRect(cv::Mat image,cv::Rect rect)
{
cv::Point p1(rect.x,rect.y);
cv::Point p2(rect.x+rect.width,rect.y+rect.height);
cv::rectangle(image,p1,p2,cv::Scalar(0,255,0),1);
}
void TEST_DESKEW(){

cv::Mat image = cv::imread("res/3.png",cv::IMREAD_GRAYSCALE);
// cv::resize(image,image,cv::Size(136*2,36*2));
cv::Mat deskewed = pr::fastdeskew(image,12);
// cv::imwrite("./res/4.png",deskewed);
// cv::Mat deskewed2 = pr::fastdeskew(deskewed,12);
//
cv::imshow("image",deskewed);
cv::waitKey(0);

}
int main()
{

TEST_DESKEW();
return 0 ;


}

+ 0
- 25
Prj-Win/lpr/tests/test_finemapping.cpp View File

@@ -1,25 +0,0 @@
//
// Created by Jack Yu on 24/09/2017.
//

#include "FineMapping.h"






int main()
{
cv::Mat image = cv::imread("res/cache/test.png");
cv::Mat image_finemapping = pr::FineMapping::FineMappingVertical(image);
pr::FineMapping finemapper = pr::FineMapping("model/HorizonalFinemapping.prototxt","model/HorizonalFinemapping.caffemodel");
image_finemapping = finemapper.FineMappingHorizon(image_finemapping,0,-3);
cv::imwrite("res/cache/finemappingres.png",image_finemapping);
cv::imshow("image",image_finemapping);
cv::waitKey(0);


return 0 ;

}

+ 0
- 229
Prj-Win/lpr/tests/test_pipeline.cpp View File

@@ -1,229 +0,0 @@
//
// Created by Jack Yu on 23/10/2017.
//

#include "../include/Pipeline.h"
#include<fstream>
#include<vector>



using namespace std;


template<class T>
static unsigned int levenshtein_distance(const T &s1, const T &s2) {
const size_t len1 = s1.size(), len2 = s2.size();
std::vector<unsigned int> col(len2 + 1), prevCol(len2 + 1);

for (unsigned int i = 0; i < prevCol.size(); i++) prevCol[i] = i;
for (unsigned int i = 0; i < len1; i++) {
col[0] = i + 1;
for (unsigned int j = 0; j < len2; j++)
col[j + 1] = min(
min(prevCol[1 + j] + 1, col[j] + 1),
prevCol[j] + (s1[i] == s2[j] ? 0 : 1));
col.swap(prevCol);
}
return prevCol[len2];
}


void TEST_CAM()
{
cv::VideoCapture capture("test1.mp4");
cv::Mat frame;
pr::PipelinePR prc("../lpr/model/cascade.xml",
"../lpr/model/HorizonalFinemapping.prototxt", "../lpr/model/HorizonalFinemapping.caffemodel",
"../lpr/model/Segmentation.prototxt", "../lpr/model/Segmentation.caffemodel",
"../lpr/model/CharacterRecognization.prototxt", "../lpr/model/CharacterRecognization.caffemodel",
"../lpr/model/SegmentationFree.prototxt", "../lpr/model/SegmentationFree.caffemodel"
);
while (1) {
//读取下一帧
if (!capture.read(frame)) {
std::cout << "读取视频失败" << std::endl;
exit(1);
}
//
// cv::transpose(frame,frame);
// cv::flip(frame,frame,2);

// cv::resize(frame,frame,cv::Size(frame.cols/2,frame.rows/2));


std::vector<pr::PlateInfo> res = prc.RunPiplineAsImage(frame, pr::SEGMENTATION_FREE_METHOD);

for (auto st : res) {
if (st.confidence > 0.75) {
std::cout << st.getPlateName() << " " << st.confidence << std::endl;
cv::Rect region = st.getPlateRect();

cv::rectangle(frame, cv::Point(region.x, region.y), cv::Point(region.x + region.width, region.y + region.height), cv::Scalar(255, 255, 0), 2);
}
}

cv::imshow("image", frame);
cv::waitKey(1);
}
}


void TEST_ACC() {

pr::PipelinePR prc("../lpr/model/cascade.xml",
"../lpr/model/HorizonalFinemapping.prototxt", "../lpr/model/HorizonalFinemapping.caffemodel",
"../lpr/model/Segmentation.prototxt", "../lpr/model/Segmentation.caffemodel",
"../lpr/model/CharacterRecognization.prototxt", "../lpr/model/CharacterRecognization.caffemodel",
"../lpr/model/SegmentationFree.prototxt", "../lpr/model/SegmentationFree.caffemodel"
);

ifstream file;
string imagename;
int n = 0, correct = 0, j = 0, sum = 0;
char filename[] = "/Users/yujinke/Downloads/general_test/1.txt";
string pathh = "/Users/yujinke/Downloads/general_test/";
file.open(filename, ios::in);
while (!file.eof())
{
file >> imagename;
string imgpath = pathh + imagename;
std::cout << "------------------------------------------------" << endl;
cout << "图片名:" << imagename << endl;
cv::Mat image = cv::imread(imgpath);
// cv::imshow("image", image);
// cv::waitKey(0);

std::vector<pr::PlateInfo> res = prc.RunPiplineAsImage(image, pr::SEGMENTATION_FREE_METHOD);

float conf = 0;
vector<float> con;
vector<string> name;
for (auto st : res) {
if (st.confidence > 0.1) {
//std::cout << st.getPlateName() << " " << st.confidence << std::endl;
con.push_back(st.confidence);
name.push_back(st.getPlateName());
//conf += st.confidence;
}
else
cout << "no string" << endl;
}
// std::cout << conf << std::endl;
int num = con.size();
float max = 0;
string platestr, chpr, ch;
int diff = 0, dif = 0;
for (int i = 0; i < num; i++) {

if (con.at(i) > max)
{
max = con.at(i);
platestr = name.at(i);
}

}
// cout << "max:"<<max << endl;
cout << "string:" << platestr << endl;
chpr = platestr.substr(0, 2);
ch = imagename.substr(0, 2);
diff = levenshtein_distance(imagename, platestr);
dif = diff - 4;
cout << "差距:" << dif << endl;
sum += dif;
if (ch != chpr) n++;
if (diff == 0) correct++;
j++;
}
float cha = 1 - float(n) / float(j);
std::cout << "------------------------------------------------" << endl;
cout << "车牌总数:" << j << endl;
cout << "汉字识别准确率:" << cha << endl;
float chaccuracy = 1 - float(sum - n * 2) / float(j * 8);
cout << "字符识别准确率:" << chaccuracy << endl;

}


void TEST_PIPELINE() {

pr::PipelinePR prc("../lpr/model/cascade.xml",
"../lpr/model/HorizonalFinemapping.prototxt", "../lpr/model/HorizonalFinemapping.caffemodel",
"../lpr/model/Segmentation.prototxt", "../lpr/model/Segmentation.caffemodel",
"../lpr/model/CharacterRecognization.prototxt", "../lpr/model/CharacterRecognization.caffemodel",
"../lpr/model/SegmentationFree.prototxt", "../lpr/model/SegmentationFree.caffemodel"
);

cv::Mat image = cv::imread("../lpr/res/test.jpg");


std::vector<pr::PlateInfo> res = prc.RunPiplineAsImage(image, pr::SEGMENTATION_FREE_METHOD);

for (auto st : res) {
if (st.confidence > 0.75) {
std::cout << st.getPlateName() << " " << st.confidence << std::endl;
cv::Rect region = st.getPlateRect();

cv::rectangle(image, cv::Point(region.x, region.y), cv::Point(region.x + region.width, region.y + region.height), cv::Scalar(255, 255, 0), 2);
}
}

cv::imshow("image", image);
cv::waitKey(0);

}




/*void TEST_CAM()
{

cv::VideoCapture capture("test1.mp4");
cv::Mat frame;

pr::PipelinePR prc("../lpr/model/cascade.xml",
"../lpr/model/HorizonalFinemapping.prototxt", "../lpr/model/HorizonalFinemapping.caffemodel",
"../lpr/model/Segmentation.prototxt", "../lpr/model/Segmentation.caffemodel",
"../lpr/model/CharacterRecognization.prototxt", "../lpr/model/CharacterRecognization.caffemodel",
"../lpr/model/SegmentationFree.prototxt", "../lpr/model/SegmentationFree.caffemodel"
);
while (1) {
//读取下一帧
if (!capture.read(frame)) {
std::cout << "读取视频失败" << std::endl;
exit(1);
}
//
// cv::transpose(frame,frame);
// cv::flip(frame,frame,2);

// cv::resize(frame,frame,cv::Size(frame.cols/2,frame.rows/2));



std::vector<pr::PlateInfo> res = prc.RunPiplineAsImage(frame, pr::SEGMENTATION_FREE_METHOD);

for (auto st : res) {
if (st.confidence > 0.75) {
std::cout << st.getPlateName() << " " << st.confidence << std::endl;
cv::Rect region = st.getPlateRect();

cv::rectangle(frame, cv::Point(region.x, region.y), cv::Point(region.x + region.width, region.y + region.height), cv::Scalar(255, 255, 0), 2);
}
}

cv::imshow("image", frame);
cv::waitKey(1);
}
}*/


int main()
{
// TEST_ACC();

// TEST_CAM();
TEST_PIPELINE();
return 0;
}

+ 0
- 54
Prj-Win/lpr/tests/test_recognization.cpp View File

@@ -1,54 +0,0 @@
//
// Created by Jack Yu on 23/10/2017.
//

#include "../include/CNNRecognizer.h"

std::vector<std::string> chars{"京","沪","津","渝","冀","晋","蒙","辽","吉","黑","苏","浙","皖","闽","赣","鲁","豫","鄂","湘","粤","桂","琼","川","贵","云","藏","陕","甘","青","宁","新","0","1","2","3","4","5","6","7","8","9","A","B","C","D","E","F","G","H","J","K","L","M","N","P","Q","R","S","T","U","V","W","X","Y","Z"};

#include <opencv2/dnn.hpp>
using namespace cv::dnn;


void getMaxClass(cv::Mat &probBlob, int *classId, double *classProb)
{
// cv::Mat probMat = probBlob.matRefConst().reshape(1, 1); //reshape the blob to 1x1000 matrix
cv::Point classNumber;

cv::minMaxLoc(probBlob, NULL, classProb, NULL, &classNumber);

*classId = classNumber.x;
}

void TEST_RECOGNIZATION(){
// pr::CNNRecognizer instance("model/CharacterRecognization.prototxt","model/CharacterRecognization.caffemodel");
Net net = cv::dnn::readNetFromCaffe("model/CharacterRecognization.prototxt","model/CharacterRecognization.caffemodel");
cv::Mat image = cv::imread("res/char1.png",cv::IMREAD_GRAYSCALE);
cv::resize(image,image,cv::Size(14,30));
cv::equalizeHist(image,image);
cv::Mat inputBlob = cv::dnn::blobFromImage(image, 1/255.0, cv::Size(14,30), false);

net.setInput(inputBlob,"data");

cv::Mat res = net.forward();
std::cout<<res<<std::endl;
float *p = (float*)res.data;
int maxid= 0;
double prob = 0;

getMaxClass(res,&maxid,&prob);



std::cout<<chars[maxid]<<std::endl;





};
int main()
{TEST_RECOGNIZATION();


}

+ 0
- 43
Prj-Win/lpr/tests/test_segmentation.cpp View File

@@ -1,43 +0,0 @@
//
// Created by Jack Yu on 16/10/2017.
//


#include "../include/PlateSegmentation.h"
#include "../include/CNNRecognizer.h"
#include "../include/Recognizer.h"


std::vector<std::string> chars{"京","沪","津","渝","冀","晋","蒙","辽","吉","黑","苏","浙","皖","闽","赣","鲁","豫","鄂","湘","粤","桂","琼","川","贵","云","藏","陕","甘","青","宁","新","0","1","2","3","4","5","6","7","8","9","A","B","C","D","E","F","G","H","J","K","L","M","N","P","Q","R","S","T","U","V","W","X","Y","Z"};


void TEST_SLIDINGWINDOWS_EVAL(){
cv::Mat demo = cv::imread("res/cache/finemappingres.png");
cv::resize(demo,demo,cv::Size(136,36));

cv::Mat respones;
pr::PlateSegmentation plateSegmentation("model/Segmentation.prototxt","model/Segmentation.caffemodel");
pr::PlateInfo plate;
plate.setPlateImage(demo);
std::vector<cv::Rect> rects;
plateSegmentation.segmentPlatePipline(plate,1,rects);
plateSegmentation.ExtractRegions(plate,rects);

pr::GeneralRecognizer *recognizer = new pr::CNNRecognizer("model/CharacterRecognization.prototxt","model/CharacterRecognization.caffemodel");
recognizer->SegmentBasedSequenceRecognition(plate);
std::cout<<plate.decodePlateNormal(chars)<<std::endl;


delete(recognizer);





}
int main(){

TEST_SLIDINGWINDOWS_EVAL();

return 0;
}

+ 0
- 54
Prj-Win/lpr/tests/test_segmentationFree.cpp View File

@@ -1,54 +0,0 @@
//
// Created by Jack Yu on 29/11/2017.
//
#include "../include/SegmentationFreeRecognizer.h"
#include "../include/Pipeline.h"

#include "../include/PlateInfo.h"



std::string decodeResults(cv::Mat code_table,std::vector<std::string> mapping_table)
{
cv::MatSize mtsize = code_table.size;
int sequencelength = mtsize[2];
int labellength = mtsize[1];
cv::transpose(code_table.reshape(1,1).reshape(1,labellength),code_table);
std::string name = "";
std::vector<int> seq(sequencelength);
for(int i = 0 ; i < sequencelength; i++) {
float *fstart = ((float *) (code_table.data) + i * labellength );
int id = std::max_element(fstart,fstart+labellength) - fstart;
seq[i] =id;
}
for(int i = 0 ; i< sequencelength ; i++)
{
if(seq[i]!=labellength-1 && (i==0 || seq[i]!=seq[i-1]))
name+=mapping_table[seq[i]];
}
std::cout<<name;
return name;
}


int main()
{
cv::Mat image = cv::imread("res/cache/chars_segment.jpg");
// cv::transpose(image,image);

// cv::resize(image,image,cv::Size(160,40));
cv::imshow("xxx",image);
cv::waitKey(0);
pr::SegmentationFreeRecognizer recognizr("model/SegmenationFree-Inception.prototxt","model/ISegmenationFree-Inception.caffemodel");
std::pair<std::string,float> res = recognizr.SegmentationFreeForSinglePlate(image,pr::CH_PLATE_CODE);
std::cout<<res.first<<" "
<<res.second<<std::endl;


// decodeResults(plate,pr::CH_PLATE_CODE);
cv::imshow("image",image);
cv::waitKey(0);

return 0;

}

Loading…
Cancel
Save