projects:plate

Differences

This shows you the differences between two versions of the page.

Link to this comparison view

Both sides previous revision Previous revision
Next revision
Previous revision
projects:plate [2022/05/13 23:00] – old revision restored (2022/02/14 09:29) 216.244.66.228projects:plate [2022/06/20 14:19] (current) – old revision restored (2022/03/12 21:34) 154.54.249.201
Line 1: Line 1:
-====== plate ======+====== yolo train ======
  
-  * https://github.com/quantsolutions/anpr+<code> 
 +git clone https://github.com/puzzledqs/BBox-Label-Tool.git 
 +</code>
  
 +<file python convert.py>
 +import os
 +from os import walk, getcwd
 +from PIL import Image
  
-=====  install  =====+classes ["targa"]
  
-==== darknet ====+def convert(size, box): 
 +    dw 1./size[0] 
 +    dh 1./size[1] 
 +    x (box[0] + box[1])/2.0 
 +    y (box[2] + box[3])/2.0 
 +    w box[1] - box[0] 
 +    h box[3] - box[2] 
 +    x x*dw 
 +    w w*dw 
 +    y = y*dh 
 +    h = h*dh 
 +    return (x,y,w,h) 
 +     
 +     
 +"""-------------------------------------------------------------------""" 
  
-install  +""" Configure Paths"""    
-<code>+mypath = "Labels/001/" 
 +outpath = "Labels/output/"
  
-</code> 
  
-==== bbox (optional) ====+cls "001"
  
  
-===== old =====+wd getcwd() 
 +list_file open('%s/%s_list.txt'%(wd, cls), 'w')
  
 +""" Get input text file list """
 +txt_name_list = []
 +for (dirpath, dirnames, filenames) in walk(mypath):
 +    print(filenames)
 +    txt_name_list.extend(filenames)
 +    break
 +print(txt_name_list)
  
-plate detection with neural network +""" Process """ 
-  * https://matthewearl.github.io/2016/05/06/cnn-anpr/ +for txt_name in txt_name_list: 
-  * https://github.com/matthewearl/deep-anpr+    # txt_file =  open("Labels/stop_sign/001.txt", "r"
 +     
 +    """ Open input text files """ 
 +    txt_path = mypath + txt_name 
 +    print("Input:" + txt_path) 
 +    txt_file = open(txt_path, "r"
 +    lines = txt_file.read().split('\r\n'  #for ubuntu, use "\r\n" instead of "\n" 
 +     
 +    """ Open output text files """ 
 +    txt_outpath = outpath + txt_name 
 +    print("Output:" + txt_outpath) 
 +    txt_outfile = open(txt_outpath, "w"
 +     
 +     
 +    """ Convert the data to YOLO format """ 
 +    ct = 0 
 +    for line in lines: 
 +        #print('lenth of line is: ') 
 +        #print(len(line)) 
 +        #print('\n'
 +        if(len(line) >= 2): 
 +            ct = ct + 1 
 +            print(line + "\n"
 +            elems = line.split(' ') 
 +            print(elems) 
 +            cls_id = elems[0].split('\n')[0] 
 +            xmin = elems[0].split('\n')[1] 
 +            xmax = elems[2] 
 +            ymin = elems[1] 
 +            ymax = elems[3][:-1] 
 +            # 
 +            img_path = str('%s/Images/%s/%s.JPEG'%(wd, cls, os.path.splitext(txt_name)[0])) 
 +            #t = magic.from_file(img_path) 
 +            #wh= re.search('(\d+) x (\d+)', t).groups() 
 +            im=Image.open(img_path) 
 +            w= int(im.size[0]) 
 +            h= int(im.size[1]) 
 +            #w = int(xmax) int(xmin) 
 +            #h = int(ymax) - int(ymin) 
 +            # print(xmin) 
 +            print(w, h) 
 +            b = (float(xmin), float(xmax), float(ymin), float(ymax)) 
 +            bb = convert((w,h), b) 
 +            print(bb) 
 +            txt_outfile.write(str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n')
  
-http://www.pyimagesearch.com/2017/02/13/recognizing-digits-with-opencv-and-python/ +    """ Save those images with bb into list""" 
-<code python> +    if(ct != 0): 
-# load the example image +        list_file.write('%s/images/%s/%s.JPEG\n'%(wdclsos.path.splitext(txt_name)[0])
-image = cv2.imread("example.jpg"+                 
-  +list_file.close()  
-# pre-process the image by resizing itconverting it to +</file>
-# graycaleblurring it, and computing an edge map +
-image = imutils.resize(image, height=500) +
-gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY+
-# Applying Gaussian blurring with a 5×5 kernel to reduce high-frequency noise +
-blurred = cv2.GaussianBlur(gray, (5, 5), 0)+
  
-# Computing the edge map via the Canny edge detector. 
-edged = cv2.Canny(blurred, 50, 200, 255) 
  
-# find contours in the edge map, then sort them by their +Train.txt Text.txt
-# size in descending order +
-cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, +
- cv2.CHAIN_APPROX_SIMPLE) +
-cnts = cnts[0] if imutils.is_cv2() else cnts[1] +
-cnts = sorted(cnts, key=cv2.contourArea, reverse=True) +
-displayCnt = None +
-  +
-# loop over the contours +
-for c in cnts: +
- # approximate the contour +
- peri = cv2.arcLength(c, True) +
- approx = cv2.approxPolyDP(c, 0.02 * peri, True) +
-  +
- # if the contour has four vertices, then we have found +
- # the thermostat display +
- if len(approx) == 4: +
- displayCnt = approx +
- break+
  
-# extract the plate, apply a perspective transform to it 
-# Applying this perspective transform gives us a top-down, birds-eye-view of plate 
-warped = four_point_transform(gray, displayCnt.reshape(4, 2)) 
-output = four_point_transform(image, displayCnt.reshape(4, 2)) 
  
-# threshold the warped image, then apply a series of morphological +<file python process.py> 
-# operations to cleanup the thresholded image +import globos
-thresh = cv2.threshold(warped, 0, 255, +
- cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1] +
-kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (1, 5)) +
-thresh = cv2.morphologyEx(thresh, cv2.MORPH_OPENkernel)+
  
-find contours in the thresholded image, then initialize the +Current directory 
-# digit contours lists +current_dir os.path.dirname(os.path.abspath(__file__))
-cnts cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, +
- cv2.CHAIN_APPROX_SIMPLE) +
-cnts = cnts[0] if imutils.is_cv2() else cnts[1] +
-digitCnts = [] +
-  +
-# loop over the digit area candidates +
-for c in cnts: +
- # compute the bounding box of the contour +
- (x, y, w, h) = cv2.boundingRect(c) +
-  +
- # if the contour is sufficiently large, it must be a digit +
- if w >= 15 and (h >= 30 and h <= 40): +
- digitCnts.append(c)+
  
-TODO display contour +Directory where the data will reside, relative to 'darknet.exe' 
-# cv2.rectangle(output, (x, y), (x + w, y + h), (0, 255, 0), 1)+path_data = '*IMAGE DIRECTORY*'
  
 +# Percentage of images to be used for the test set
 +percentage_test = 10;
  
-sort the contours from left-to-right, then initialize the +Create and/or truncate train.txt and test.txt 
-# actual digits themselves +file_train open('train.txt''w'  
-digitCnts contours.sort_contours(digitCnts, +file_test open('test.txt', 'w')
- method="left-to-right")[0] +
-digits []+
  
- cv2.putText(output, str(digit)(x - 10y - 10), +Populate train.txt and test.txt 
-# cv2.FONT_HERSHEY_SIMPLEX0.65, (0, 255, 0), 2)+counter = 1   
 +index_test = round(100 / percentage_test  
 +for pathAndFilename in glob.iglob(os.path.join(current_dir"*.JPEG")):   
 +    titleext = os.path.splitext(os.path.basename(pathAndFilename))
  
 +    if counter == index_test:
 +        counter = 1
 +        file_test.write(path_data + title + '.JPEG' + "\n")
 +    else:
 +        file_train.write(path_data + title + '.JPEG' + "\n")
 +        counter = counter + 1
 +</file>
 +
 +Put images inside BBox-Label-Tool/Images/001/
 +convert to JPEG and delete old images
 +<code>
 +mogrify -format JPEG *.jpg
 +rm *.jpg
 </code> </code>
 +
 +Go to main folder and run python main.py
 +<code>
 +python main.py
 +</code>
 +
 +Write 001 inside Image Dir box and load
 +
 +Create a label for each image
 +
 +After that, exit and create a new directory inside Label
 +<code>
 +mkdir output
 +</code>
 +Run convert.py
 +<code>
 +python convert.py
 +</code>
 +
 +Now create test.txt and train.txt with process.py
 +<code>
 +python process.py
 +</code>
 +<code>
 +├── Images (input)
 +│   ├── 001
 +│   │   ├── 20180319_113309.JPEG
 +│   └── targa
 +├── Labels (output)
 +│   ├── 001
 +│   │   ├── 20180319_113309.txt
 +│   └── output
 +│       ├── 20180319_113309.txt
 +</code>
 +====== Darknet ======
 +<code>
 +git clone https://github.com/pjreddie/darknet
 +cd darknet
 +make
 +</code>
 +
 +Copy train.txt and test.txt inside darknet/cfg/
 +
 +Create 3 files:
 +obj.data
 +obj.names
 +obj.cfg
 +
 +<file obj.data>
 +classes= *NUMBER CLASSES*
 +train  = *TRAIN DIRECTORY+
 +valid = *TEST DIRECTORY*
 +names = obj.names
 +backup = *BACKUP FOLDER*
 +</file>
 +
 +<file obj.names>
 +*CLASS NAME*
 +</file>
 +
 +Copy yolov2-tiny.cfg and change [region]:classes to 
 +classes = *NUMBER CLASSES*
 +filters = (*NUMBER CLASSES* +5)*5
 +
 +
 +
  • projects/plate.1652475626.txt.gz
  • Last modified: 2022/05/13 23:00
  • by 216.244.66.228