Differences
This shows you the differences between two versions of the page.
Both sides previous revision Previous revision Next revision | Previous revision | ||
projects:plate [2022/05/14 14:54] – old revision restored (2022/02/12 16:16) 216.244.66.228 | projects:plate [2022/06/20 14:19] (current) – old revision restored (2022/03/12 21:34) 154.54.249.201 | ||
---|---|---|---|
Line 1: | Line 1: | ||
- | ====== | + | ====== |
- | * https:// | + | < |
+ | git clone https:// | ||
+ | </ | ||
- | ===== old ===== | + | <file python convert.py> |
+ | import os | ||
+ | from os import walk, getcwd | ||
+ | from PIL import Image | ||
+ | classes = [" | ||
- | plate detection with neural network | + | def convert(size, |
- | * https:// | + | dw = 1./size[0] |
- | * https:// | + | dh = 1./size[1] |
+ | x = (box[0] + box[1])/2.0 | ||
+ | y = (box[2] + box[3])/2.0 | ||
+ | w = box[1] | ||
+ | h = box[3] - box[2] | ||
+ | x = x*dw | ||
+ | w = w*dw | ||
+ | y = y*dh | ||
+ | h = h*dh | ||
+ | return (x,y,w,h) | ||
+ | |||
+ | |||
+ | """ | ||
- | http://www.pyimagesearch.com/ | + | """ |
- | <code python> | + | mypath = " |
- | # load the example image | + | outpath |
- | image = cv2.imread("example.jpg") | + | |
- | + | ||
- | # pre-process the image by resizing it, converting it to | + | |
- | # graycale, blurring it, and computing an edge map | + | |
- | image = imutils.resize(image, | + | |
- | gray = cv2.cvtColor(image, | + | |
- | # Applying Gaussian blurring with a 5×5 kernel to reduce high-frequency noise | + | |
- | blurred = cv2.GaussianBlur(gray, | + | |
- | # Computing the edge map via the Canny edge detector. | ||
- | edged = cv2.Canny(blurred, | ||
- | # find contours in the edge map, then sort them by their | + | cls = " |
- | # size in descending order | + | |
- | cnts = cv2.findContours(edged.copy(), | + | |
- | cv2.CHAIN_APPROX_SIMPLE) | + | |
- | cnts = cnts[0] if imutils.is_cv2() else cnts[1] | + | |
- | cnts = sorted(cnts, | + | |
- | displayCnt = None | + | |
- | + | ||
- | # loop over the contours | + | |
- | for c in cnts: | + | |
- | # approximate the contour | + | |
- | peri = cv2.arcLength(c, | + | |
- | approx = cv2.approxPolyDP(c, | + | |
- | + | ||
- | # if the contour has four vertices, then we have found | + | |
- | # the thermostat display | + | |
- | if len(approx) == 4: | + | |
- | displayCnt = approx | + | |
- | break | + | |
- | # extract the plate, apply a perspective transform to it | ||
- | # Applying this perspective transform gives us a top-down, birds-eye-view of plate | ||
- | warped = four_point_transform(gray, | ||
- | output = four_point_transform(image, | ||
- | # threshold the warped image, then apply a series of morphological | + | wd = getcwd() |
- | # operations to cleanup the thresholded image | + | list_file |
- | thresh | + | |
- | cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1] | + | |
- | kernel | + | |
- | thresh = cv2.morphologyEx(thresh, | + | |
- | # find contours in the thresholded image, then initialize the | + | """ |
- | # digit contours lists | + | txt_name_list = [] |
- | cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, | + | for (dirpath, dirnames, filenames) in walk(mypath): |
- | cv2.CHAIN_APPROX_SIMPLE) | + | print(filenames) |
- | cnts = cnts[0] if imutils.is_cv2() else cnts[1] | + | txt_name_list.extend(filenames) |
- | digitCnts = [] | + | break |
- | + | print(txt_name_list) | |
- | # loop over the digit area candidates | + | |
- | for c in cnts: | + | |
- | # compute the bounding box of the contour | + | |
- | (x, y, w, h) = cv2.boundingRect(c) | + | |
- | + | ||
- | # if the contour is sufficiently large, it must be a digit | + | |
- | if w >= 15 and (h >= 30 and h <= 40): | + | |
- | digitCnts.append(c) | + | |
- | # TODO display contour | + | """ |
- | # cv2.rectangle(output, (x, y), (x + w, y + h), (0, 255, 0), 1) | + | for txt_name in txt_name_list: |
+ | | ||
+ | |||
+ | """ | ||
+ | txt_path = mypath + txt_name | ||
+ | print(" | ||
+ | txt_file = open(txt_path, " | ||
+ | lines = txt_file.read().split(' | ||
+ | |||
+ | """ | ||
+ | txt_outpath = outpath + txt_name | ||
+ | print(" | ||
+ | txt_outfile = open(txt_outpath, " | ||
+ | |||
+ | |||
+ | """ | ||
+ | ct = 0 | ||
+ | for line in lines: | ||
+ | #print('lenth of line is: ') | ||
+ | # | ||
+ | # | ||
+ | if(len(line) >= 2): | ||
+ | ct = ct + 1 | ||
+ | print(line + " | ||
+ | elems = line.split(' | ||
+ | print(elems) | ||
+ | cls_id = elems[0].split(' | ||
+ | xmin = elems[0].split(' | ||
+ | xmax = elems[2] | ||
+ | ymin = elems[1] | ||
+ | ymax = elems[3][: | ||
+ | # | ||
+ | img_path = str(' | ||
+ | #t = magic.from_file(img_path) | ||
+ | #wh= re.search(' | ||
+ | im=Image.open(img_path) | ||
+ | w= int(im.size[0]) | ||
+ | h= int(im.size[1]) | ||
+ | #w = int(xmax) - int(xmin) | ||
+ | #h = int(ymax) - int(ymin) | ||
+ | # print(xmin) | ||
+ | print(w, h) | ||
+ | b = (float(xmin), float(xmax), float(ymin), float(ymax)) | ||
+ | bb = convert((w,h), b) | ||
+ | print(bb) | ||
+ | txt_outfile.write(str(cls_id) + " " + " " | ||
+ | """ | ||
+ | if(ct != 0): | ||
+ | list_file.write(' | ||
+ | | ||
+ | list_file.close() | ||
+ | </ | ||
- | # sort the contours from left-to-right, | ||
- | # actual digits themselves | ||
- | digitCnts = contours.sort_contours(digitCnts, | ||
- | method=" | ||
- | digits = [] | ||
- | # cv2.putText(output, | + | Train.txt Text.txt |
- | # cv2.FONT_HERSHEY_SIMPLEX, | + | |
+ | |||
+ | <file python process.py> | ||
+ | import glob, os | ||
+ | |||
+ | # Current directory | ||
+ | current_dir = os.path.dirname(os.path.abspath(__file__)) | ||
+ | |||
+ | # Directory where the data will reside, relative to ' | ||
+ | path_data = ' | ||
+ | |||
+ | # Percentage of images to be used for the test set | ||
+ | percentage_test = 10; | ||
+ | |||
+ | # Create and/or truncate train.txt and test.txt | ||
+ | file_train = open(' | ||
+ | file_test = open(' | ||
+ | |||
+ | # Populate train.txt and test.txt | ||
+ | counter = 1 | ||
+ | index_test = round(100 / percentage_test) | ||
+ | for pathAndFilename in glob.iglob(os.path.join(current_dir, | ||
+ | title, ext = os.path.splitext(os.path.basename(pathAndFilename)) | ||
+ | |||
+ | if counter == index_test: | ||
+ | counter = 1 | ||
+ | file_test.write(path_data + title + ' | ||
+ | else: | ||
+ | file_train.write(path_data + title + ' | ||
+ | counter = counter + 1 | ||
+ | </ | ||
+ | |||
+ | Put images inside BBox-Label-Tool/ | ||
+ | convert to JPEG and delete old images | ||
+ | < | ||
+ | mogrify -format JPEG *.jpg | ||
+ | rm *.jpg | ||
</ | </ | ||
+ | |||
+ | Go to main folder and run python main.py | ||
+ | < | ||
+ | python main.py | ||
+ | </ | ||
+ | |||
+ | Write 001 inside Image Dir box and load | ||
+ | |||
+ | Create a label for each image | ||
+ | |||
+ | After that, exit and create a new directory inside Label | ||
+ | < | ||
+ | mkdir output | ||
+ | </ | ||
+ | Run convert.py | ||
+ | < | ||
+ | python convert.py | ||
+ | </ | ||
+ | |||
+ | Now create test.txt and train.txt with process.py | ||
+ | < | ||
+ | python process.py | ||
+ | </ | ||
+ | < | ||
+ | ├── Images (input) | ||
+ | │ | ||
+ | │ | ||
+ | │ | ||
+ | ├── Labels (output) | ||
+ | │ | ||
+ | │ | ||
+ | │ | ||
+ | │ | ||
+ | </ | ||
+ | ====== Darknet ====== | ||
+ | < | ||
+ | git clone https:// | ||
+ | cd darknet | ||
+ | make | ||
+ | </ | ||
+ | |||
+ | Copy train.txt and test.txt inside darknet/ | ||
+ | |||
+ | Create 3 files: | ||
+ | obj.data | ||
+ | obj.names | ||
+ | obj.cfg | ||
+ | |||
+ | <file obj.data> | ||
+ | classes= *NUMBER CLASSES* | ||
+ | train = *TRAIN DIRECTORY+ | ||
+ | valid = *TEST DIRECTORY* | ||
+ | names = obj.names | ||
+ | backup = *BACKUP FOLDER* | ||
+ | </ | ||
+ | |||
+ | <file obj.names> | ||
+ | *CLASS NAME* | ||
+ | </ | ||
+ | |||
+ | Copy yolov2-tiny.cfg and change [region]: | ||
+ | classes = *NUMBER CLASSES* | ||
+ | filters = (*NUMBER CLASSES* +5)*5 | ||
+ | |||
+ | |||
+ |