yolo.py 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144
  1. import numpy as np
  2. import time
  3. import cv2
  4. import os
  5. from PIL import Image
  6. min_confidence = 0.3
  7. nm_threshold = 0.3
  8. classIDs = []
  9. boxes = []
  10. labelsPath = os.path.sep.join(['yolo-config', "bot.names"])
  11. # weightsPath = os.path.sep.join(['yolo-config', "yolov3-tiny-bot_2000.weights"])
  12. weightsPath = os.path.sep.join(['yolo-config', "yolov3-tiny-bot_2500.weights"])
  13. # weightsPath = os.path.sep.join(['yolo-config', "yolov3-tiny-bot_1600.weights"])
  14. # weightsPath = os.path.sep.join(['yolo-config', "yolov3-tiny-bot_1700.weights"])
  15. # weightsPath = os.path.sep.join(['yolo-config', "yolov3-tiny-bot_400.weights"])
  16. # weightsPath = os.path.sep.join(['yolo-config', "yolov3-tiny-bot_6000.weights"])
  17. # weightsPath = os.path.sep.join(['yolo-config', "yolov3-tiny-bot_2700.weights"])
  18. configPath = os.path.sep.join(['yolo-config', "yolov3-tiny-bot.cfg"])
  19. LABELS = open(labelsPath).read().strip().split("\n")
  20. COLORS = np.random.randint(0, 255, size=(len(LABELS), 3), dtype="uint8")
  21. net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)
  22. def yolo_detect(image):
  23. (H, W) = image.shape[:2]
  24. # determine only the *output* layer names that we need from YOLO
  25. ln = net.getLayerNames()
  26. ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
  27. # construct a blob from the input image and then perform a forward
  28. # pass of the YOLO object detector, giving us our bounding boxes and
  29. # associated probabilities
  30. blob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416), swapRB=True, crop=False)
  31. net.setInput(blob)
  32. start = time.time()
  33. layerOutputs = net.forward(ln)
  34. end = time.time()
  35. # show timing information on YOLO
  36. print("[INFO] YOLO took {:.6f} seconds".format(end - start))
  37. # initialize our lists of detected bounding boxes, confidences, and
  38. # class IDs, respectively
  39. boxes = []
  40. positions = []
  41. confidences = []
  42. classIDs = []
  43. # loop over each of the layer outputs
  44. for output in layerOutputs:
  45. # loop over each of the detections
  46. for detection in output:
  47. # extract the class ID and confidence (i.e., probability) of
  48. # the current object detection
  49. scores = detection[5:]
  50. classID = np.argmax(scores)
  51. confidence = scores[classID]
  52. # filter out weak predictions by ensuring the detected
  53. # probability is greater than the minimum probability
  54. if confidence > min_confidence:
  55. # scale the bounding box coordinates back relative to the
  56. # size of the image, keeping in mind that YOLO actually
  57. # returns the center (x, y)-coordinates of the bounding
  58. # box followed by the boxes' width and height
  59. box = detection[0:4] * np.array([W, H, W, H])
  60. (centerX, centerY, width, height) = box.astype("int")
  61. # use the center (x, y)-coordinates to derive the top and
  62. # and left corner of the bounding box
  63. x = int(centerX - (width / 2))
  64. y = int(centerY - (height / 2))
  65. # update our list of bounding box coordinates, confidences,
  66. # and class IDs
  67. boxes.append([x, y, int(width), int(height)])
  68. confidences.append(float(confidence))
  69. classIDs.append(classID)
  70. # apply non-maxima suppression to suppress weak, overlapping bounding
  71. # boxes
  72. idxs = cv2.dnn.NMSBoxes(boxes, confidences, min_confidence, nm_threshold)
  73. # ensure at least one detection exists
  74. flag = 0
  75. if len(idxs) > 0:
  76. # loop over the indexes we are keeping
  77. for i in idxs.flatten():
  78. # extract the bounding box coordinates
  79. (x, y) = (boxes[i][0], boxes[i][1])
  80. (w, h) = (boxes[i][2], boxes[i][3])
  81. flag = 1
  82. # draw a bounding box rectangle and label on the image
  83. color = [int(c) for c in COLORS[classIDs[i]]]
  84. cv2.rectangle(image, (x, y), (x + w, y + h), color, 2)
  85. position = [(x + w / 2) - (W / 2), (H / 2) - (y + h / 2)]
  86. positions.append(position)
  87. text = "{}: {:.4f} {}".format(LABELS[classIDs[i]], confidences[i], position)
  88. cv2.putText(image, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
  89. return image, boxes, positions, confidences, classIDs
  90. def get_postition(classId):
  91. global classIDs
  92. global boxes
  93. i = 0
  94. exits = False
  95. for v in classIDs:
  96. if v == classId:
  97. exits = True
  98. break
  99. i += 1
  100. if not exits:
  101. return [0, 0, 0, 0]
  102. box = boxes[i]
  103. print("classid", classId, "x is", box[0], "y is ", box[1])
  104. return box
  105. if __name__ == "__main__":
  106. im = Image.open('16.png')
  107. im_array = np.array(im)
  108. im_array = cv2.cvtColor(im_array, cv2.COLOR_BGR2RGB)
  109. image_detection, boxes, positions, confidences, classIDs = yolo_detect(im_array)
  110. print(positions) # [[350.5, 27.0], [242.0, -13.0], [-3.5, -59.5]]
  111. print(classIDs) # [2, 2, 0]
  112. print(confidences) # [0.9958043098449707, 0.9548179507255554, 0.8634780645370483]
  113. print(boxes) # [[1135, 412, 31, 22], [1025, 451, 34, 24], [761, 371, 71, 277]];
  114. # [761, 371, 71, 277]代表,x,y,宽,高(坐标以左上角为原点)
  115. box = get_postition(0)
  116. print(box)
  117. cv2.imshow('detection', image_detection)
  118. cv2.waitKey(0)
  119. cv2.destroyAllWindows()