| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144 |
- import numpy as np
- import time
- import cv2
- import os
- from PIL import Image
- min_confidence = 0.3
- nm_threshold = 0.3
- classIDs = []
- boxes = []
- labelsPath = os.path.sep.join(['yolo-config', "bot.names"])
- # weightsPath = os.path.sep.join(['yolo-config', "yolov3-tiny-bot_2000.weights"])
- weightsPath = os.path.sep.join(['yolo-config', "yolov3-tiny-bot_2500.weights"])
- # weightsPath = os.path.sep.join(['yolo-config', "yolov3-tiny-bot_1600.weights"])
- # weightsPath = os.path.sep.join(['yolo-config', "yolov3-tiny-bot_1700.weights"])
- # weightsPath = os.path.sep.join(['yolo-config', "yolov3-tiny-bot_400.weights"])
- # weightsPath = os.path.sep.join(['yolo-config', "yolov3-tiny-bot_6000.weights"])
- # weightsPath = os.path.sep.join(['yolo-config', "yolov3-tiny-bot_2700.weights"])
- configPath = os.path.sep.join(['yolo-config', "yolov3-tiny-bot.cfg"])
- LABELS = open(labelsPath).read().strip().split("\n")
- COLORS = np.random.randint(0, 255, size=(len(LABELS), 3), dtype="uint8")
- net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)
- def yolo_detect(image):
- (H, W) = image.shape[:2]
- # determine only the *output* layer names that we need from YOLO
- ln = net.getLayerNames()
- ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
- # construct a blob from the input image and then perform a forward
- # pass of the YOLO object detector, giving us our bounding boxes and
- # associated probabilities
- blob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416), swapRB=True, crop=False)
- net.setInput(blob)
- start = time.time()
- layerOutputs = net.forward(ln)
- end = time.time()
- # show timing information on YOLO
- print("[INFO] YOLO took {:.6f} seconds".format(end - start))
- # initialize our lists of detected bounding boxes, confidences, and
- # class IDs, respectively
- boxes = []
- positions = []
- confidences = []
- classIDs = []
- # loop over each of the layer outputs
- for output in layerOutputs:
- # loop over each of the detections
- for detection in output:
- # extract the class ID and confidence (i.e., probability) of
- # the current object detection
- scores = detection[5:]
- classID = np.argmax(scores)
- confidence = scores[classID]
- # filter out weak predictions by ensuring the detected
- # probability is greater than the minimum probability
- if confidence > min_confidence:
- # scale the bounding box coordinates back relative to the
- # size of the image, keeping in mind that YOLO actually
- # returns the center (x, y)-coordinates of the bounding
- # box followed by the boxes' width and height
- box = detection[0:4] * np.array([W, H, W, H])
- (centerX, centerY, width, height) = box.astype("int")
- # use the center (x, y)-coordinates to derive the top and
- # and left corner of the bounding box
- x = int(centerX - (width / 2))
- y = int(centerY - (height / 2))
- # update our list of bounding box coordinates, confidences,
- # and class IDs
- boxes.append([x, y, int(width), int(height)])
- confidences.append(float(confidence))
- classIDs.append(classID)
- # apply non-maxima suppression to suppress weak, overlapping bounding
- # boxes
- idxs = cv2.dnn.NMSBoxes(boxes, confidences, min_confidence, nm_threshold)
- # ensure at least one detection exists
- flag = 0
- if len(idxs) > 0:
- # loop over the indexes we are keeping
- for i in idxs.flatten():
- # extract the bounding box coordinates
- (x, y) = (boxes[i][0], boxes[i][1])
- (w, h) = (boxes[i][2], boxes[i][3])
- flag = 1
- # draw a bounding box rectangle and label on the image
- color = [int(c) for c in COLORS[classIDs[i]]]
- cv2.rectangle(image, (x, y), (x + w, y + h), color, 2)
- position = [(x + w / 2) - (W / 2), (H / 2) - (y + h / 2)]
- positions.append(position)
- text = "{}: {:.4f} {}".format(LABELS[classIDs[i]], confidences[i], position)
- cv2.putText(image, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
- return image, boxes, positions, confidences, classIDs
- def get_postition(classId):
- global classIDs
- global boxes
- i = 0
- exits = False
- for v in classIDs:
- if v == classId:
- exits = True
- break
- i += 1
- if not exits:
- return [0, 0, 0, 0]
- box = boxes[i]
- print("classid", classId, "x is", box[0], "y is ", box[1])
- return box
- if __name__ == "__main__":
- im = Image.open('16.png')
- im_array = np.array(im)
- im_array = cv2.cvtColor(im_array, cv2.COLOR_BGR2RGB)
- image_detection, boxes, positions, confidences, classIDs = yolo_detect(im_array)
- print(positions) # [[350.5, 27.0], [242.0, -13.0], [-3.5, -59.5]]
- print(classIDs) # [2, 2, 0]
- print(confidences) # [0.9958043098449707, 0.9548179507255554, 0.8634780645370483]
- print(boxes) # [[1135, 412, 31, 22], [1025, 451, 34, 24], [761, 371, 71, 277]];
- # [761, 371, 71, 277]代表,x,y,宽,高(坐标以左上角为原点)
- box = get_postition(0)
- print(box)
- cv2.imshow('detection', image_detection)
- cv2.waitKey(0)
- cv2.destroyAllWindows()
|