Forráskód Böngészése

前期代码完成

dongguoliang@kuaifazs.com 4 éve
commit
d6120ad0fc
7 módosított fájl, 287 hozzáadás és 0 törlés
  1. 4 0
      .gitignore
  2. 53 0
      game.py
  3. 25 0
      get_point.py
  4. 38 0
      main.py
  5. 4 0
      setup.py
  6. 20 0
      ui.py
  7. 143 0
      yolo.py

+ 4 - 0
.gitignore

@@ -0,0 +1,4 @@
+*.png
+dist/
+venv/
+yolo-config/

+ 53 - 0
game.py

@@ -0,0 +1,53 @@
+import ui
+import yolo
+import time
+import pyautogui
+
+dx = ""  # direction_x:x方向
+dy = ""  # direction_y:y方向
+
+ak = "e"  # action_knife :动刀
+aa = ""  # action_all:大招
+
+
+def move(lead, stone):
+    global dx
+    global dy
+    pyautogui.keyUp(dx)
+    pyautogui.keyUp(dy)
+
+    if stone[0] == 0:
+        return
+    x = lead[0] - stone[0]
+    y = lead[1] - lead[3] - stone[1]  # 主角的纵坐标定位到自己的脚lead[3]是自己的身高
+    direction_x = "right"
+    direction_y = "up"
+
+    if x > 0:
+        direction_x = "left"
+    if x != 0:
+        print("x is", x, direction_x)
+        pyautogui.keyDown(direction_x)
+        dx = direction_x
+
+    if y > 0:
+        direction_y = "down"
+
+    if y != 0:
+        print("y is", y, direction_y)
+        pyautogui.keyDown(direction_y)
+        dy = direction_y
+
+
+# 打怪
+def strange(lead, m):
+    global ak
+    # global aa
+    pyautogui.keyUp(ak)
+    move(lead, m)
+    pyautogui.keyDown(ak)
+
+
+if __name__ == "__main__":
+    ""
+    # move()

+ 25 - 0
get_point.py

@@ -0,0 +1,25 @@
+import cv2
+import numpy as np
+import pyyolo
+
+names_filepath = os.path.sep.join(['yolo-config', "bot.names"])
+weights_filepath = os.path.sep.join(['yolo-config', "yolov3-tiny-bot_2000.weights"])
+cfg_filepath = os.path.sep.join(['yolo-config', "yolov3-tiny-bot.cfg"])
+
+
+image_filepath = './1.png'
+
+meta = pyyolo.load_names(names_filepath)
+net = pyyolo.load_net(cfg_filepath, weights_filepath, False)
+
+im = cv2.imread(image_filepath)
+yolo_img = pyyolo.array_to_image(im)
+res = pyyolo.detect(net, meta, yolo_img)
+colors = np.random.rand(meta.classes, 3) * 255
+
+for r in res:
+        cv2.rectangle(im, r.bbox.get_point(pyyolo.BBox.Location.TOP_LEFT, is_int=True),
+                                  r.bbox.get_point(pyyolo.BBox.Location.BOTTOM_RIGHT, is_int=True), tuple(colors[r.id].tolist()), 2)
+
+        cv2.imshow('Frame', im)
+        cv2.waitKey(0)

+ 38 - 0
main.py

@@ -0,0 +1,38 @@
+# coding=gbk
+import numpy as np
+import cv2
+from PIL import ImageGrab
+from win32gui import FindWindow, GetWindowRect
+import yolo
+import game
+import pyautogui
+
+while True:
+
+    window_name = "雷电模拟器(64)"
+    id = FindWindow(None, window_name)
+    bbox = GetWindowRect(id)
+    image_array = np.array(ImageGrab.grab(bbox=bbox))
+    image_array = cv2.cvtColor(image_array, cv2.COLOR_BGR2RGB)
+
+    image_array, boxes, positions, confidences, classIDs = yolo.yolo_detect(image_array)
+    yolo.boxes = boxes
+    yolo.classIDs = classIDs
+
+    pos_lead = yolo.get_postition(0)  # 主角
+    pos_stone_green = yolo.get_postition(2)  # 蓝宝石指引
+    pos_stone_yellow = yolo.get_postition(3)  # 蓝宝石指引
+    pos_m1 = yolo.get_postition(15)  # 第5级小怪
+    pos_m2 = yolo.get_postition(16)  # 第5级大怪
+
+    # if pos_stone_green[0] != 0:
+    game.move(pos_lead, pos_stone_green)
+    if pos_m1[0] != 0:
+        game.strange(pos_lead, pos_m1)
+    if pos_m2[0] != 0:
+        game.strange(pos_lead, pos_m2)
+
+    cv2.imshow('screenshot', image_array)
+    if cv2.waitKey(25) & 0xFF == ord('q'):
+        cv2.destroyAllWindows()
+        break

+ 4 - 0
setup.py

@@ -0,0 +1,4 @@
+from distutils.core import setup
+import py2exe
+
+setup(console=['main.py'])

+ 20 - 0
ui.py

@@ -0,0 +1,20 @@
+import pyautogui
+
+
+# 键名用字符串表示,支持的所有键名,存在pyautogui.KEYBOARD_KEYS变量中,包括26个字母、数字、符号、F1~F20、方向等等所有按键
+def press(key):
+    pyautogui.press(key)
+
+
+# 拖动
+# 将鼠标从当前位置向左100像素、向下200像素拖动,过渡时间0.5秒,指定右键
+# pyautogui.dragRel(-100, 200, duration=0.5, button='left')
+# x:左-,右+
+# y:上-,下+
+def drag(x, y):
+    pyautogui.dragRel(x, y, duration=0.5, button='left')
+
+
+if __name__ == "__main__":
+    drag(100, 100)
+    # press("a")

+ 143 - 0
yolo.py

@@ -0,0 +1,143 @@
+import numpy as np
+import time
+import cv2
+import os
+from PIL import Image
+
+min_confidence = 0.3
+nm_threshold = 0.3
+classIDs = []
+boxes = []
+
+labelsPath = os.path.sep.join(['yolo-config', "bot.names"])
+# weightsPath = os.path.sep.join(['yolo-config', "yolov3-tiny-bot_2000.weights"])
+weightsPath = os.path.sep.join(['yolo-config', "yolov3-tiny-bot_2500.weights"])
+# weightsPath = os.path.sep.join(['yolo-config', "yolov3-tiny-bot_1600.weights"])
+# weightsPath = os.path.sep.join(['yolo-config', "yolov3-tiny-bot_1700.weights"])
+# weightsPath = os.path.sep.join(['yolo-config', "yolov3-tiny-bot_400.weights"])
+# weightsPath = os.path.sep.join(['yolo-config', "yolov3-tiny-bot_6000.weights"])
+# weightsPath = os.path.sep.join(['yolo-config', "yolov3-tiny-bot_2700.weights"])
+configPath = os.path.sep.join(['yolo-config', "yolov3-tiny-bot.cfg"])
+
+LABELS = open(labelsPath).read().strip().split("\n")
+COLORS = np.random.randint(0, 255, size=(len(LABELS), 3), dtype="uint8")
+net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)
+
+
+def yolo_detect(image):
+    (H, W) = image.shape[:2]
+
+    # determine only the *output* layer names that we need from YOLO
+    ln = net.getLayerNames()
+    ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
+
+    # construct a blob from the input image and then perform a forward
+    # pass of the YOLO object detector, giving us our bounding boxes and
+    # associated probabilities
+    blob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416), swapRB=True, crop=False)
+    net.setInput(blob)
+    start = time.time()
+    layerOutputs = net.forward(ln)
+    end = time.time()
+
+    # show timing information on YOLO
+    print("[INFO] YOLO took {:.6f} seconds".format(end - start))
+
+    # initialize our lists of detected bounding boxes, confidences, and
+    # class IDs, respectively
+    boxes = []
+    positions = []
+    confidences = []
+    classIDs = []
+
+    # loop over each of the layer outputs
+    for output in layerOutputs:
+        # loop over each of the detections
+        for detection in output:
+            # extract the class ID and confidence (i.e., probability) of
+            # the current object detection
+            scores = detection[5:]
+            classID = np.argmax(scores)
+            confidence = scores[classID]
+
+            # filter out weak predictions by ensuring the detected
+            # probability is greater than the minimum probability
+            if confidence > min_confidence:
+                # scale the bounding box coordinates back relative to the
+                # size of the image, keeping in mind that YOLO actually
+                # returns the center (x, y)-coordinates of the bounding
+                # box followed by the boxes' width and height
+                box = detection[0:4] * np.array([W, H, W, H])
+                (centerX, centerY, width, height) = box.astype("int")
+
+                # use the center (x, y)-coordinates to derive the top and
+                # and left corner of the bounding box
+                x = int(centerX - (width / 2))
+                y = int(centerY - (height / 2))
+
+                # update our list of bounding box coordinates, confidences,
+                # and class IDs
+                boxes.append([x, y, int(width), int(height)])
+                confidences.append(float(confidence))
+                classIDs.append(classID)
+
+    # apply non-maxima suppression to suppress weak, overlapping bounding
+    # boxes
+    idxs = cv2.dnn.NMSBoxes(boxes, confidences, min_confidence, nm_threshold)
+
+    # ensure at least one detection exists
+    flag = 0
+    if len(idxs) > 0:
+        # loop over the indexes we are keeping
+        for i in idxs.flatten():
+            # extract the bounding box coordinates
+            (x, y) = (boxes[i][0], boxes[i][1])
+            (w, h) = (boxes[i][2], boxes[i][3])
+            flag = 1
+            # draw a bounding box rectangle and label on the image
+            color = [int(c) for c in COLORS[classIDs[i]]]
+            cv2.rectangle(image, (x, y), (x + w, y + h), color, 2)
+            position = [(x + w / 2) - (W / 2), (H / 2) - (y + h / 2)]
+            positions.append(position)
+            text = "{}: {:.4f} {}".format(LABELS[classIDs[i]], confidences[i], position)
+            cv2.putText(image, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
+
+    return image, boxes, positions, confidences, classIDs
+
+
+def get_postition(classId):
+    global classIDs
+    global boxes
+
+    i = 0
+    exits = False
+    for v in classIDs:
+        if v == classId:
+            exits = True
+            break
+        i += 1
+    if not exits:
+        return [0, 0, 0, 0]
+
+    box = boxes[i]
+    print("classid", classId, "x is", box[0], "y is ", box[1])
+    return box
+
+
+if __name__ == "__main__":
+    im = Image.open('16.png')
+    im_array = np.array(im)
+    im_array = cv2.cvtColor(im_array, cv2.COLOR_BGR2RGB)
+    image_detection, boxes, positions, confidences, classIDs = yolo_detect(im_array)
+
+    print(positions)  # [[350.5, 27.0], [242.0, -13.0], [-3.5, -59.5]]
+    print(classIDs)  # [2, 2, 0]
+    print(confidences)  # [0.9958043098449707, 0.9548179507255554, 0.8634780645370483]
+    print(boxes)  # [[1135, 412, 31, 22], [1025, 451, 34, 24], [761, 371, 71, 277]];
+    # [761, 371, 71, 277]代表,x,y,宽,高(坐标以左上角为原点)
+    box = get_postition(0)
+    print(box)
+
+    cv2.imshow('detection', image_detection)
+    cv2.waitKey(0)
+    cv2.destroyAllWindows()