Skip to content

Instantly share code, notes, and snippets.

@9Valjew
Created April 21, 2024 23:12
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save 9Valjew/26a8db5d957469f19d7cc3996b2a72a2 to your computer and use it in GitHub Desktop.
Save 9Valjew/26a8db5d957469f19d7cc3996b2a72a2 to your computer and use it in GitHub Desktop.
Stupid-ass histogram equalization and target detection/tracking that I mashed together using AI and my stupid prior openCV knowledge
import cv2
import numpy as np
import pyautogui
import time
import keyboard
# Constants
DETECTION_AREA_SIZE = 50
PREDICTION_DELAY = 0.1 # in seconds
# Global variables
priority_target = None
is_locked = False
last_position = None
last_time = None
def predict_movement(current_position):
global last_position, last_time
if last_position is None or last_time is None:
last_position = current_position
last_time = time.time()
return
# Calculate velocity
velocity_x = (current_position[0] - last_position[0]) / (time.time() - last_time)
velocity_y = (current_position[1] - last_position[1]) / (time.time() - last_time)
# Predict next position after PREDICTION_DELAY
predicted_x = current_position[0] + velocity_x * PREDICTION_DELAY
predicted_y = current_position[1] + velocity_y * PREDICTION_DELAY
return predicted_x, predicted_y
def toggle_lock():
global is_locked, priority_target
if is_locked:
# Unlock if already locked
is_locked = False
priority_target = None
else:
# Lock onto the target
is_locked = True
def on_g_press(event):
if event.name == "g":
toggle_lock()
def main():
global priority_target, is_locked, last_position, last_time
# Register the "G" key press event handler
keyboard.on_press(on_g_press)
while True:
# Check for user input to stop the program
key = cv2.waitKey(1) & 0xFF
if key == ord("z"):
break
# Capture the entire screen
screenshot = pyautogui.screenshot()
frame = np.array(screenshot)
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
# Get cursor position
cursor_x, cursor_y = pyautogui.position()
# Define the region around the cursor
x1, y1 = max(0, cursor_x - DETECTION_AREA_SIZE), max(0, cursor_y - DETECTION_AREA_SIZE)
x2, y2 = min(frame.shape[1], cursor_x + DETECTION_AREA_SIZE), min(frame.shape[0], cursor_y + DETECTION_AREA_SIZE)
# Convert the region to grayscale
gray = cv2.cvtColor(frame[y1:y2, x1:x2], cv2.COLOR_BGR2GRAY)
# Apply Canny edge detection to the region
edges = cv2.Canny(gray, 50, 150)
# Find contours in the edge-detected image
contours, _ = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# Find the largest contour
if contours:
largest_contour = max(contours, key=cv2.contourArea)
x, y, w, h = cv2.boundingRect(largest_contour)
# Update priority target
priority_target = (x + x1 + w // 2, y + y1 + h // 2)
# Draw a rectangle around the detected target
cv2.rectangle(frame, (x + x1, y + y1), (x + x1 + w, y + y1 + h), (0, 255, 0), 2)
# Lock onto the target if toggled
if is_locked and priority_target:
# Move cursor to target position on the original screen
pyautogui.moveTo(priority_target[0], priority_target[1], duration=0.05)
# Predict target movement and adjust cursor position
if last_position:
predicted_position = predict_movement(priority_target)
if predicted_position:
pyautogui.moveTo(predicted_position[0], predicted_position[1], duration=0.05)
# Convert the frame to grayscale
grayscale_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Increase contrast using histogram equalization
equalized_frame = cv2.equalizeHist(grayscale_frame)
# Display the entire screen with the detected target in grayscale and increased contrast
cv2.imshow("Screen with Target", equalized_frame)
# Unregister the event handler before exiting
keyboard.unhook_all()
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment