天天看點

通過模闆比對先定位檢測的對象再檢測圓形零件和孔針02

通過模闆比對先定位檢測的對象再檢測圓形零件和孔針02

 圖中紅色區域的孔和針的檢測

如果相機像素很大,直接找很慢,可以通過深度學習的方式或模闆比對定位,然後再做檢測

import os
import cv2
import numpy as np
import math
import xml.etree.ElementTree as ET
import imutils
# 檢測針腳位置
def needelCenter_detect(img, bool_needle):
    params = cv2.SimpleBlobDetector_Params()
    # Setup SimpleBlobDetector parameters.
    # print('params')
    # print(params)
    # print(type(params))

    # Filter by Area.
    params.filterByArea = True
    params.minArea = 100
    params.maxArea = 10e3
    params.minDistBetweenBlobs = 50
    # params.filterByColor = True
    params.filterByConvexity = False
    # tweak these as you see fit
    # Filter by Circularity
    params.filterByCircularity = False
    params.minCircularity = 0.2
    # params.blobColor = 0
    # # # Filter by Convexity
    # params.filterByConvexity = True
    # params.minConvexity = 0.87
    # Filter by Inertia
    # params.filterByInertia = True
    # params.filterByInertia = False
    # params.minInertiaRatio = 0.01

    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    # Detect blobs.
    minThreshValue = 110
    _, gray = cv2.threshold(gray, minThreshValue, 255, cv2.THRESH_BINARY)
    # erosion_size = 3
    # # element = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2 * erosion_size + 1, 2 * erosion_size + 1),
    # #                                     (erosion_size, erosion_size))
    # element = cv2.getStructuringElement(cv2.MORPH_ERODE, (2 * erosion_size + 1, 2 * erosion_size + 1),
    #                                     (erosion_size, erosion_size))
    # gray = cv2.erode(gray, element, 2)
    # cv2.imshow("gray",gray)
    # cv2.waitKey()



    detector = cv2.SimpleBlobDetector_create(params)
    if bool_needle == '0':
        gray = 255 - gray
    keypoints = detector.detect(gray)
    # print(len(keypoints))
    # print(keypoints[0].pt[0])
    # 如果這兒沒檢測到可能會出錯
    if len(keypoints) == 0:
        print("沒有檢測到針角坐标,可能需要調整針角斑點檢測參數")
        return keypoints

    else:
        # print(len(keypoints))
        # im_with_keypoints = cv2.drawKeypoints(gray, keypoints, np.array([]), (255, 0, 0),
        #                                       cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

        # if keypoints is not None:

        # color_img = cv2.cvtColor(im_with_keypoints, cv2.COLOR_BGR2RGB)
        # 畫出圓的圓心
        # cv2.circle(color_img, (int(keypoints[0].pt[0]), int(keypoints[0].pt[1])), 5, (0, 255, 0), -1)
        # cv2.imshow("color_img",color_img)
        # cv2.waitKey()

        return keypoints



# 檢測連接配接器圓形位置
def circle_detect(image, bool_needle):
    # 灰階化
    circle_img = image.copy()
    gray = cv2.cvtColor(circle_img, cv2.COLOR_BGR2GRAY)

    img = cv2.medianBlur(gray, 3)


    # 針角圓心坐标
    out_x = 0
    out_y = 0

    # 霍夫變換圓檢測
    circles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1, 10e10, param1=100, param2=30, minRadius=100, maxRadius=500)
    # 如果沒檢測到會報錯
    # 這種判斷方式過于簡單
    if circles is None:
        print("沒有檢測到連接配接器外圓")

    else:
        for circle in circles[0]:
            # 圓的基本資訊
            # print(circle[2])
            # 坐标行列-圓心坐标
            out_x = int(circle[0])
            out_y = int(circle[1])
            # 半徑
            r = int(circle[2])
            # 在原圖用指定顔色标記出圓的邊界
            cv2.circle(circle_img, (out_x, out_y), r, (0, 0, 255), 2)
            # # 畫出圓的圓心
            cv2.circle(circle_img, (out_x, out_y), 3, (0, 0, 255), -1)

        # cv2.namedWindow("circle_img", 2)
        # cv2.imshow("circle_img", circle_img)
        # cv2.waitKey()
        # 記錄外圓坐标
        out_xpoint = out_x
        out_ypoint = out_y

        # 隻框出單個針角的位置區域
        step_center = 30
        step_rect = 60
        out_x -= step_center
        out_y -= step_center

        needleRect = image[out_y: out_y + step_rect, out_x: out_x + step_rect]
        # cv2.namedWindow("needleRect", 2)
        # cv2.imshow("needleRect", needleRect)
        # cv2.waitKey()

        # 根據檢測到的圓形連接配接器中心找針角位置
        centerPoint = needelCenter_detect(needleRect, bool_needle)

        if len(centerPoint) == 0:
            print("調整位置")
            return False
        else:
            # 将針角的坐标原還至原圖
            in_x = int(centerPoint[0].pt[0])
            in_y = int(centerPoint[0].pt[1])
            in_x +=   out_x
            in_y +=   out_y

            # 畫出針角的圓心
            cv2.circle(circle_img, (in_x, in_y), 3, (0, 255, 0), -1)

            # 計算兩者的距離
            # 假設通過标定其一個像素代表0.0056mm
            DPI = 0.019696448792778324
            dis = math.sqrt(math.pow(out_xpoint - in_x,2) + math.pow(out_ypoint - in_y,2))*DPI
            print("兩者互相之間的距離為(mm):", dis)

            # 将計算結果儲存在xml檔案
            configFile_xml = "wellConfig.xml"
            tree = ET.parse(configFile_xml)
            root = tree.getroot()
            secondRoot = root.find("distance")
            print(secondRoot.text)

            secondRoot.text = str(dis)
            tree.write("wellConfig.xml")


            cv2.namedWindow("image", 2)
            cv2.imshow("image",circle_img)
            cv2.waitKey()
            return True




# 1 路徑寫入xml檔案
# 2 檢測結果也寫入xml
if __name__ == "__main__":

    # 從xml檔案中讀取要檢測的圖檔位置
    configFile_xml = "wellConfig.xml"
    tree = ET.parse(configFile_xml)
    root = tree.getroot()
    needleImagePath = root.find("needleFilePath")
    needleTemplate = root.find("needleMatchFilePath")
    holeImagePath = root.find("holeFilePath")
    holeTemplate = root.find("holeMatchFilePath")
     # 檢測有針的
    bool_needle = root.find("needle")
    print(bool_needle.text)

    if bool_needle.text == '0':
        image = cv2.imread(needleImagePath.text)
        # 直接從圖像中心選ROI再模闆比對減小時間
        h, w = image.shape[:2]
        height_y = int(h/3)
        height_x = int(w/3)
        image = image[height_y:2*height_y, height_x:2*height_x]
        template = cv2.imread(needleTemplate.text)
        # print("needle")

    else:
        image = cv2.imread(holeImagePath.text)
        # 直接從圖像中心選ROI再模闆比對減小時間
        h, w = image.shape[:2]
        height_y = int(h/3)
        height_x = int(w/3)
        image = image[height_y:2*height_y, height_x:2*height_x]
        template = cv2.imread(holeTemplate.text)

    theight, twidth = template.shape[:2]
    # 執行模闆比對,采用的比對方式cv2.TM_SQDIFF_NORMED
    result = cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED)
    # 歸一化處理
    cv2.normalize(result, result, 0, 1, cv2.NORM_MINMAX, -1)
    # 尋找矩陣(一維數組當做向量,用Mat定義)中的最大值和最小值的比對結果及其位置
    min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
    # 比對值轉換為字元串
    # 對于cv2.TM_SQDIFF及cv2.TM_SQDIFF_NORMED方法min_val越趨近與0比對度越好,比對位置取min_loc
    # 對于其他方法max_val越趨近于1比對度越好,比對位置取max_loc
    strmin_val = str(max_val)
    # 繪制矩形邊框,将比對區域标注出來
    # min_loc:矩形定點
    # (min_loc[0]+twidth,min_loc[1]+theight):矩形的寬高
    # (0,0,225):矩形的邊框顔色;2:矩形邊框寬度
    # cv2.rectangle(image, max_loc, (max_loc[0] + twidth, max_loc[1] + theight), (0, 0, 225), 2)
    # 顯示結果,并将比對值顯示在标題欄上
    # cv2.namedWindow("MatchResult----MatchingValue=" +  strmin_val, 2)
    # cv2.imshow("MatchResult----MatchingValue=" + strmin_val, image)
    # cv2.waitKey()

    roi = image[max_loc[1]:max_loc[1] + theight, max_loc[0]:max_loc[0] + twidth]
    # cv2.imshow("roi",roi)
    # cv2.waitKey()
    if circle_detect(roi, bool_needle.text):
        print("檢測成功")
    else:
        print("檢測失敗")      
<DOCUMENT content_method="full">
    <needleFilePath>images/OneType02/Image_04.jpg</needleFilePath>
    <needleMatchFilePath>images/NeedleTemplate.jpg</needleMatchFilePath>
      <needle>0</needle>
      <holeFilePath>images/OneType03/Image_01.jpg</holeFilePath>
      <holeMatchFilePath>images/HoleTemplate.jpg</holeMatchFilePath>
      <hole>False</hole>
      <distance>0.15383418280030892</distance>
    <display>
      <url>https://www.baidu.com/</url>
      <title>Good</title>
    </display>
  </DOCUMENT>      

繼續閱讀