projects:plate

Differences

This shows you the differences between two versions of the page.

Link to this comparison view

Both sides previous revision Previous revision
Next revision
Previous revision
projects:plate [2022/05/08 22:35] – old revision restored (2022/02/25 12:05) 216.244.66.228projects:plate [2022/06/20 14:19] (current) – old revision restored (2022/03/12 21:34) 154.54.249.201
Line 1: Line 1:
-====== plate ======+====== yolo train ======
  
-plate detection with neural network +<code> 
-  https://matthewearl.github.io/2016/05/06/cnn-anpr/ +git clone https://github.com/puzzledqs/BBox-Label-Tool.git 
-  * https://github.com/matthewearl/deep-anpr+</code>
  
-http://www.pyimagesearch.com/2017/02/13/recognizing-digits-with-opencv-and-python/ +<file python convert.py
-<code bash+import os 
-# load the example image +from os import walkgetcwd 
-image = cv2.imread("example.jpg"+from PIL import Image
-  +
-# pre-process the image by resizing itconverting it to +
-# graycale, blurring it, and computing an edge map +
-image = imutils.resize(image, height=500) +
-gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) +
-# Applying Gaussian blurring with a 5×5 kernel to reduce high-frequency noise +
-blurred = cv2.GaussianBlur(gray, (5, 5), 0)+
  
-# Computing the edge map via the Canny edge detector. +classes ["targa"]
-edged cv2.Canny(blurred, 50, 200, 255)+
  
-# find contours in the edge mapthen sort them by their +def convert(sizebox): 
-size in descending order +    dw = 1./size[0] 
-cnts cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, +    dh 1./size[1] 
- cv2.CHAIN_APPROX_SIMPLE) +    (box[0] + box[1])/2.0 
-cnts cnts[0] if imutils.is_cv2() else cnts[1] +    = (box[2] + box[3])/2.
-cnts sorted(cnts, key=cv2.contourArea, reverse=True) +    w box[1] - box[0] 
-displayCnt None +    box[3] - box[2] 
-  +    x = x*dw 
-# loop over the contours +    w = w*dw 
-for c in cnts: +    y*dh 
- # approximate the contour +    h*dh 
- peri cv2.arcLength(c, True) +    return (x,y,w,h
- approx cv2.approxPolyDP(c0.02 * periTrue) +     
-  +     
- # if the contour has four verticesthen we have found +"""-------------------------------------------------------------------""" 
- # the thermostat display +
- if len(approx== 4: +
- displayCnt = approx +
- break+
  
-# extract the plate, apply a perspective transform to it +""" Configure Paths"""    
-# Applying this perspective transform gives us a top-down, birds-eye-view of plate +mypath = "Labels/001/" 
-warped four_point_transform(gray, displayCnt.reshape(4, 2)) +outpath "Labels/output/"
-output = four_point_transform(image, displayCnt.reshape(4, 2))+
  
-# threshold the warped image, then apply a series of morphological 
-# operations to cleanup the thresholded image 
-thresh = cv2.threshold(warped, 0, 255, 
- cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1] 
-kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (1, 5)) 
-thresh = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel) 
  
-# find contours in the thresholded image, then initialize the +cls "001"
-# digit contours lists +
-cnts cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, +
- cv2.CHAIN_APPROX_SIMPLE) +
-cnts = cnts[0] if imutils.is_cv2() else cnts[1] +
-digitCnts = [] +
-  +
-# loop over the digit area candidates +
-for c in cnts: +
- # compute the bounding box of the contour +
- (x, y, w, h) = cv2.boundingRect(c) +
-  +
- # if the contour is sufficiently large, it must be a digit +
- if w >= 15 and (h >= 30 and h <= 40): +
- digitCnts.append(c)+
  
-# TODO display contour 
-# cv2.rectangle(output, (x, y), (x + w, y + h), (0, 255, 0), 1) 
  
 +wd = getcwd()
 +list_file = open('%s/%s_list.txt'%(wd, cls), 'w')
  
-# sort the contours from left-to-right, then initialize the +""" Get input text file list """ 
-# actual digits themselves +txt_name_list [] 
-digitCnts contours.sort_contours(digitCnts+for (dirpathdirnames, filenames) in walk(mypath): 
- method="left-to-right")[0] +    print(filenames
-digits = []+    txt_name_list.extend(filenames) 
 +    break 
 +print(txt_name_list)
  
- cv2.putText(outputstr(digit), (x - 10y - 10), +""" Process """ 
-# cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 2550), 2)+for txt_name in txt_name_list: 
 +    txt_file =  open("Labels/stop_sign/001.txt""r"
 +     
 +    """ Open input text files """ 
 +    txt_path = mypath + txt_name 
 +    print("Input:" + txt_path) 
 +    txt_file = open(txt_path"r"
 +    lines = txt_file.read().split('\r\n'  #for ubuntuuse "\r\n" instead of "\n" 
 +     
 +    """ Open output text files """ 
 +    txt_outpath = outpath + txt_name 
 +    print("Output:" + txt_outpath) 
 +    txt_outfile = open(txt_outpath"w") 
 +     
 +     
 +    """ Convert the data to YOLO format """ 
 +    ct = 0 
 +    for line in lines: 
 +        #print('lenth of line is: ') 
 +        #print(len(line)) 
 +        #print('\n'
 +        if(len(line) >= 2): 
 +            ct = ct + 1 
 +            print(line + "\n"
 +            elems = line.split(' ') 
 +            print(elems) 
 +            cls_id = elems[0].split('\n')[0] 
 +            xmin = elems[0].split('\n')[1] 
 +            xmax = elems[2] 
 +            ymin = elems[1] 
 +            ymax = elems[3][:-1] 
 +            # 
 +            img_path = str('%s/Images/%s/%s.JPEG'%(wdcls, os.path.splitext(txt_name)[0])) 
 +            #t = magic.from_file(img_path) 
 +            #wh= re.search('(\d+) x (\d+)'t).groups(
 +            im=Image.open(img_path) 
 +            w= int(im.size[0]) 
 +            h= int(im.size[1]) 
 +            #w = int(xmax) - int(xmin) 
 +            #h = int(ymax) - int(ymin) 
 +            # print(xmin) 
 +            print(wh) 
 +            b = (float(xmin)float(xmax), float(ymin), float(ymax)) 
 +            bb = convert((w,h), b) 
 +            print(bb) 
 +            txt_outfile.write(str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n')
  
 +    """ Save those images with bb into list"""
 +    if(ct != 0):
 +        list_file.write('%s/images/%s/%s.JPEG\n'%(wd, cls, os.path.splitext(txt_name)[0]))
 +                
 +list_file.close() 
 +</file>
 +
 +
 +Train.txt Text.txt
 +
 +
 +<file python process.py>
 +import glob, os
 +
 +# Current directory
 +current_dir = os.path.dirname(os.path.abspath(__file__))
 +
 +# Directory where the data will reside, relative to 'darknet.exe'
 +path_data = '*IMAGE DIRECTORY*'
 +
 +# Percentage of images to be used for the test set
 +percentage_test = 10;
 +
 +# Create and/or truncate train.txt and test.txt
 +file_train = open('train.txt', 'w')  
 +file_test = open('test.txt', 'w')
 +
 +# Populate train.txt and test.txt
 +counter = 1  
 +index_test = round(100 / percentage_test)  
 +for pathAndFilename in glob.iglob(os.path.join(current_dir, "*.JPEG")):  
 +    title, ext = os.path.splitext(os.path.basename(pathAndFilename))
 +
 +    if counter == index_test:
 +        counter = 1
 +        file_test.write(path_data + title + '.JPEG' + "\n")
 +    else:
 +        file_train.write(path_data + title + '.JPEG' + "\n")
 +        counter = counter + 1
 +</file>
 +
 +Put images inside BBox-Label-Tool/Images/001/
 +convert to JPEG and delete old images
 +<code>
 +mogrify -format JPEG *.jpg
 +rm *.jpg
 </code> </code>
 +
 +Go to main folder and run python main.py
 +<code>
 +python main.py
 +</code>
 +
 +Write 001 inside Image Dir box and load
 +
 +Create a label for each image
 +
 +After that, exit and create a new directory inside Label
 +<code>
 +mkdir output
 +</code>
 +Run convert.py
 +<code>
 +python convert.py
 +</code>
 +
 +Now create test.txt and train.txt with process.py
 +<code>
 +python process.py
 +</code>
 +<code>
 +├── Images (input)
 +│   ├── 001
 +│   │   ├── 20180319_113309.JPEG
 +│   └── targa
 +├── Labels (output)
 +│   ├── 001
 +│   │   ├── 20180319_113309.txt
 +│   └── output
 +│       ├── 20180319_113309.txt
 +</code>
 +====== Darknet ======
 +<code>
 +git clone https://github.com/pjreddie/darknet
 +cd darknet
 +make
 +</code>
 +
 +Copy train.txt and test.txt inside darknet/cfg/
 +
 +Create 3 files:
 +obj.data
 +obj.names
 +obj.cfg
 +
 +<file obj.data>
 +classes= *NUMBER CLASSES*
 +train  = *TRAIN DIRECTORY+
 +valid = *TEST DIRECTORY*
 +names = obj.names
 +backup = *BACKUP FOLDER*
 +</file>
 +
 +<file obj.names>
 +*CLASS NAME*
 +</file>
 +
 +Copy yolov2-tiny.cfg and change [region]:classes to 
 +classes = *NUMBER CLASSES*
 +filters = (*NUMBER CLASSES* +5)*5
 +
 +
 +
  • projects/plate.1652042129.txt.gz
  • Last modified: 2022/05/08 22:35
  • by 216.244.66.228