This shows you the differences between two versions of the page.
| Both sides previous revisionPrevious revisionNext revision | Previous revision | ||
| projects:plate [2022/05/08 22:35] – old revision restored (2022/02/25 12:05) 216.244.66.228 | projects:plate [2022/06/20 14:19] (current) – old revision restored (2022/03/12 21:34) 154.54.249.201 | ||
|---|---|---|---|
| Line 1: | Line 1: | ||
| - | ====== | + | ====== |
| - | plate detection with neural network | + | < |
| - | | + | git clone https:// |
| - | * https:// | + | </code> |
| - | http://www.pyimagesearch.com/ | + | <file python convert.py> |
| - | <code bash> | + | import os |
| - | # load the example image | + | from os import walk, getcwd |
| - | image = cv2.imread(" | + | from PIL import Image |
| - | + | ||
| - | # pre-process the image by resizing it, converting it to | + | |
| - | # graycale, blurring it, and computing an edge map | + | |
| - | image = imutils.resize(image, | + | |
| - | gray = cv2.cvtColor(image, | + | |
| - | # Applying Gaussian blurring with a 5×5 kernel to reduce high-frequency noise | + | |
| - | blurred = cv2.GaussianBlur(gray, | + | |
| - | # Computing the edge map via the Canny edge detector. | + | classes |
| - | edged = cv2.Canny(blurred, | + | |
| - | # find contours in the edge map, then sort them by their | + | def convert(size, box): |
| - | # size in descending order | + | dw = 1./size[0] |
| - | cnts = cv2.findContours(edged.copy(), | + | |
| - | cv2.CHAIN_APPROX_SIMPLE) | + | |
| - | cnts = cnts[0] if imutils.is_cv2() else cnts[1] | + | |
| - | cnts = sorted(cnts, key=cv2.contourArea, | + | w = box[1] - box[0] |
| - | displayCnt | + | |
| - | + | x = x*dw | |
| - | # loop over the contours | + | w = w*dw |
| - | for c in cnts: | + | |
| - | # approximate the contour | + | |
| - | peri = cv2.arcLength(c, | + | return |
| - | approx | + | |
| - | + | | |
| - | # if the contour has four vertices, then we have found | + | """ |
| - | # the thermostat display | + | |
| - | if len(approx) == 4: | + | |
| - | displayCnt = approx | + | |
| - | break | + | |
| - | # extract the plate, apply a perspective transform to it | + | """ |
| - | # Applying this perspective transform gives us a top-down, birds-eye-view of plate | + | mypath = " |
| - | warped | + | outpath |
| - | output | + | |
| - | # threshold the warped image, then apply a series of morphological | ||
| - | # operations to cleanup the thresholded image | ||
| - | thresh = cv2.threshold(warped, | ||
| - | cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1] | ||
| - | kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, | ||
| - | thresh = cv2.morphologyEx(thresh, | ||
| - | # find contours in the thresholded image, then initialize the | + | cls = " |
| - | # digit contours lists | + | |
| - | cnts = cv2.findContours(thresh.copy(), | + | |
| - | cv2.CHAIN_APPROX_SIMPLE) | + | |
| - | cnts = cnts[0] if imutils.is_cv2() else cnts[1] | + | |
| - | digitCnts = [] | + | |
| - | + | ||
| - | # loop over the digit area candidates | + | |
| - | for c in cnts: | + | |
| - | # compute the bounding box of the contour | + | |
| - | (x, y, w, h) = cv2.boundingRect(c) | + | |
| - | + | ||
| - | # if the contour is sufficiently large, it must be a digit | + | |
| - | if w >= 15 and (h >= 30 and h <= 40): | + | |
| - | digitCnts.append(c) | + | |
| - | # TODO display contour | ||
| - | # cv2.rectangle(output, | ||
| + | wd = getcwd() | ||
| + | list_file = open(' | ||
| - | # sort the contours from left-to-right, | + | """ |
| - | # actual digits themselves | + | txt_name_list |
| - | digitCnts | + | for (dirpath, dirnames, filenames) in walk(mypath): |
| - | method=" | + | |
| - | digits = [] | + | |
| + | break | ||
| + | print(txt_name_list) | ||
| - | # cv2.putText(output, str(digit), (x - 10, y - 10), | + | """ |
| - | # cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 255, 0), 2) | + | for txt_name in txt_name_list: |
| + | | ||
| + | |||
| + | """ | ||
| + | txt_path = mypath + txt_name | ||
| + | print(" | ||
| + | txt_file = open(txt_path, " | ||
| + | lines = txt_file.read().split(' | ||
| + | |||
| + | """ | ||
| + | txt_outpath = outpath + txt_name | ||
| + | print(" | ||
| + | txt_outfile = open(txt_outpath, " | ||
| + | |||
| + | |||
| + | """ | ||
| + | ct = 0 | ||
| + | for line in lines: | ||
| + | | ||
| + | # | ||
| + | # | ||
| + | if(len(line) >= 2): | ||
| + | ct = ct + 1 | ||
| + | print(line + " | ||
| + | elems = line.split(' | ||
| + | print(elems) | ||
| + | cls_id = elems[0].split(' | ||
| + | xmin = elems[0].split(' | ||
| + | xmax = elems[2] | ||
| + | ymin = elems[1] | ||
| + | ymax = elems[3][: | ||
| + | # | ||
| + | img_path = str(' | ||
| + | #t = magic.from_file(img_path) | ||
| + | #wh= re.search(' | ||
| + | im=Image.open(img_path) | ||
| + | w= int(im.size[0]) | ||
| + | h= int(im.size[1]) | ||
| + | #w = int(xmax) - int(xmin) | ||
| + | #h = int(ymax) - int(ymin) | ||
| + | # print(xmin) | ||
| + | print(w, h) | ||
| + | b = (float(xmin), float(xmax), float(ymin), | ||
| + | bb = convert((w, | ||
| + | print(bb) | ||
| + | txt_outfile.write(str(cls_id) + " " + " " | ||
| + | """ | ||
| + | if(ct != 0): | ||
| + | list_file.write(' | ||
| + | | ||
| + | list_file.close() | ||
| + | </ | ||
| + | |||
| + | |||
| + | Train.txt Text.txt | ||
| + | |||
| + | |||
| + | <file python process.py> | ||
| + | import glob, os | ||
| + | |||
| + | # Current directory | ||
| + | current_dir = os.path.dirname(os.path.abspath(__file__)) | ||
| + | |||
| + | # Directory where the data will reside, relative to ' | ||
| + | path_data = ' | ||
| + | |||
| + | # Percentage of images to be used for the test set | ||
| + | percentage_test = 10; | ||
| + | |||
| + | # Create and/or truncate train.txt and test.txt | ||
| + | file_train = open(' | ||
| + | file_test = open(' | ||
| + | |||
| + | # Populate train.txt and test.txt | ||
| + | counter = 1 | ||
| + | index_test = round(100 / percentage_test) | ||
| + | for pathAndFilename in glob.iglob(os.path.join(current_dir, | ||
| + | title, ext = os.path.splitext(os.path.basename(pathAndFilename)) | ||
| + | |||
| + | if counter == index_test: | ||
| + | counter = 1 | ||
| + | file_test.write(path_data + title + ' | ||
| + | else: | ||
| + | file_train.write(path_data + title + ' | ||
| + | counter = counter + 1 | ||
| + | </ | ||
| + | |||
| + | Put images inside BBox-Label-Tool/ | ||
| + | convert to JPEG and delete old images | ||
| + | < | ||
| + | mogrify -format JPEG *.jpg | ||
| + | rm *.jpg | ||
| </ | </ | ||
| + | |||
| + | Go to main folder and run python main.py | ||
| + | < | ||
| + | python main.py | ||
| + | </ | ||
| + | |||
| + | Write 001 inside Image Dir box and load | ||
| + | |||
| + | Create a label for each image | ||
| + | |||
| + | After that, exit and create a new directory inside Label | ||
| + | < | ||
| + | mkdir output | ||
| + | </ | ||
| + | Run convert.py | ||
| + | < | ||
| + | python convert.py | ||
| + | </ | ||
| + | |||
| + | Now create test.txt and train.txt with process.py | ||
| + | < | ||
| + | python process.py | ||
| + | </ | ||
| + | < | ||
| + | ├── Images (input) | ||
| + | │ | ||
| + | │ | ||
| + | │ | ||
| + | ├── Labels (output) | ||
| + | │ | ||
| + | │ | ||
| + | │ | ||
| + | │ | ||
| + | </ | ||
| + | ====== Darknet ====== | ||
| + | < | ||
| + | git clone https:// | ||
| + | cd darknet | ||
| + | make | ||
| + | </ | ||
| + | |||
| + | Copy train.txt and test.txt inside darknet/ | ||
| + | |||
| + | Create 3 files: | ||
| + | obj.data | ||
| + | obj.names | ||
| + | obj.cfg | ||
| + | |||
| + | <file obj.data> | ||
| + | classes= *NUMBER CLASSES* | ||
| + | train = *TRAIN DIRECTORY+ | ||
| + | valid = *TEST DIRECTORY* | ||
| + | names = obj.names | ||
| + | backup = *BACKUP FOLDER* | ||
| + | </ | ||
| + | |||
| + | <file obj.names> | ||
| + | *CLASS NAME* | ||
| + | </ | ||
| + | |||
| + | Copy yolov2-tiny.cfg and change [region]: | ||
| + | classes = *NUMBER CLASSES* | ||
| + | filters = (*NUMBER CLASSES* +5)*5 | ||
| + | |||
| + | |||
| + | |||