Skip to content

Instantly share code, notes, and snippets.

Embed
What would you like to do?
Self Driving Trucks
import ctypes
import time
SendInput = ctypes.windll.user32.SendInput
W = 0x11
A = 0x1E
S = 0x1F
D = 0x20
# C struct redefinitions
PUL = ctypes.POINTER(ctypes.c_ulong)
class KeyBdInput(ctypes.Structure):
_fields_ = [("wVk", ctypes.c_ushort),
("wScan", ctypes.c_ushort),
("dwFlags", ctypes.c_ulong),
("time", ctypes.c_ulong),
("dwExtraInfo", PUL)]
class HardwareInput(ctypes.Structure):
_fields_ = [("uMsg", ctypes.c_ulong),
("wParamL", ctypes.c_short),
("wParamH", ctypes.c_ushort)]
class MouseInput(ctypes.Structure):
_fields_ = [("dx", ctypes.c_long),
("dy", ctypes.c_long),
("mouseData", ctypes.c_ulong),
("dwFlags", ctypes.c_ulong),
("time",ctypes.c_ulong),
("dwExtraInfo", PUL)]
class Input_I(ctypes.Union):
_fields_ = [("ki", KeyBdInput),
("mi", MouseInput),
("hi", HardwareInput)]
class Input(ctypes.Structure):
_fields_ = [("type", ctypes.c_ulong),
("ii", Input_I)]
# Actuals Functions
def PressKey(hexKeyCode):
extra = ctypes.c_ulong(0)
ii_ = Input_I()
ii_.ki = KeyBdInput( 0, hexKeyCode, 0x0008, 0, ctypes.pointer(extra) )
x = Input( ctypes.c_ulong(1), ii_ )
ctypes.windll.user32.SendInput(1, ctypes.pointer(x), ctypes.sizeof(x))
def ReleaseKey(hexKeyCode):
extra = ctypes.c_ulong(0)
ii_ = Input_I()
ii_.ki = KeyBdInput( 0, hexKeyCode, 0x0008 | 0x0002, 0, ctypes.pointer(extra) )
x = Input( ctypes.c_ulong(1), ii_ )
ctypes.windll.user32.SendInput(1, ctypes.pointer(x), ctypes.sizeof(x))
# directx scan codes http://www.gamespp.com/directx/directInputKeyboardScanCodes.html
if __name__ == '__main__':
while (True):
PressKey(0x11)
time.sleep(1)
ReleaseKey(0x11)
time.sleep(1)
from numpy import ones , vstack
from numpy.linalg import lstsq
from statistics import mean
import numpy as np
def draw_lanes(img , lines , color=[ 0 , 255 , 255 ] , thickness=3):
# if this fails, go with some default line
try:
# finds the maximum y value for a lane marker
# (since we cannot assume the horizon will always be at the same point.)
ys = [ ]
for i in lines:
for ii in i:
ys += [ ii[ 1 ] , ii[ 3 ] ]
min_y = min ( ys )
max_y = 600
new_lines = [ ]
line_dict = {}
for idx , i in enumerate ( lines ):
for xyxy in i:
# These four lines:
# modified from http://stackoverflow.com/questions/21565994/method-to-return-the-equation-of-a-straight-line-given-two-points
# Used to calculate the definition of a line, given two sets of coords.
x_coords = (xyxy[ 0 ] , xyxy[ 2 ])
y_coords = (xyxy[ 1 ] , xyxy[ 3 ])
A = vstack ( [ x_coords , ones ( len ( x_coords ) ) ] ).T
m , b = lstsq ( A , y_coords )[ 0 ]
# Calculating our new, and improved, xs
x1 = (min_y - b) / m
x2 = (max_y - b) / m
line_dict[ idx ] = [ m , b , [ int ( x1 ) , min_y , int ( x2 ) , max_y ] ]
new_lines.append ( [ int ( x1 ) , min_y , int ( x2 ) , max_y ] )
final_lanes = {}
for idx in line_dict:
final_lanes_copy = final_lanes.copy ( )
m = line_dict[ idx ][ 0 ]
b = line_dict[ idx ][ 1 ]
line = line_dict[ idx ][ 2 ]
if len ( final_lanes ) == 0:
final_lanes[ m ] = [ [ m , b , line ] ]
else:
found_copy = False
for other_ms in final_lanes_copy:
if not found_copy:
if abs ( other_ms * 1.2 ) > abs ( m ) > abs ( other_ms * 0.8 ):
if abs ( final_lanes_copy[ other_ms ][ 0 ][ 1 ] * 1.2 ) > abs ( b ) > abs (
final_lanes_copy[ other_ms ][ 0 ][ 1 ] * 0.8 ):
final_lanes[ other_ms ].append ( [ m , b , line ] )
found_copy = True
break
else:
final_lanes[ m ] = [ [ m , b , line ] ]
line_counter = {}
for lanes in final_lanes:
line_counter[ lanes ] = len ( final_lanes[ lanes ] )
top_lanes = sorted ( line_counter.items ( ) , key=lambda item: item[ 1 ] )[ ::-1 ][ :2 ]
lane1_id = top_lanes[ 0 ][ 0 ]
lane2_id = top_lanes[ 1 ][ 0 ]
def average_lane(lane_data):
x1s = [ ]
y1s = [ ]
x2s = [ ]
y2s = [ ]
for data in lane_data:
x1s.append ( data[ 2 ][ 0 ] )
y1s.append ( data[ 2 ][ 1 ] )
x2s.append ( data[ 2 ][ 2 ] )
y2s.append ( data[ 2 ][ 3 ] )
return int ( mean ( x1s ) ) , int ( mean ( y1s ) ) , int ( mean ( x2s ) ) , int ( mean ( y2s ) )
l1_x1 , l1_y1 , l1_x2 , l1_y2 = average_lane ( final_lanes[ lane1_id ] )
l2_x1 , l2_y1 , l2_x2 , l2_y2 = average_lane ( final_lanes[ lane2_id ] )
return [ l1_x1 , l1_y1 , l1_x2 , l1_y2 ] , [ l2_x1 , l2_y1 , l2_x2 , l2_y2 ] , lane1_id , lane2_id
except Exception as e:
print ( str ( e ) )
# import numpy as np
# from PIL import ImageGrab
# import cv2
# import time
#
# last_time = time.time()
#
# while(True):
# screen = ImageGrab.grab(bbox=(0,40,800,640))
# screen_np = np.array(screen.getdata(), dtype='uint8')
# print('Frame taking {} seconds now'.format(time.time()-last_time))
# last_time = time.time()
# cv2.imshow('window', np.array(screen))
# if cv2.waitKey(25) & 0xFF == ord('q'):
# cv2.destroyAllWindows()
# break
import matplotlib.pyplot as plt
import tensorflow as tf
x = tf.Variable(3, name='x')
y = tf.Variable(4, name='y')
f = x*x*y + y + 2
# Done by Frannecklp
import cv2
import numpy as np
import win32gui , win32ui , win32con , win32api
def grab_screen(region=None):
hwin = win32gui.GetDesktopWindow ( )
if region:
left , top , x2 , y2 = region
width = x2 - left + 1
height = y2 - top + 1
else:
width = win32api.GetSystemMetrics ( win32con.SM_CXVIRTUALSCREEN )
height = win32api.GetSystemMetrics ( win32con.SM_CYVIRTUALSCREEN )
left = win32api.GetSystemMetrics ( win32con.SM_XVIRTUALSCREEN )
top = win32api.GetSystemMetrics ( win32con.SM_YVIRTUALSCREEN )
hwindc = win32gui.GetWindowDC ( hwin )
srcdc = win32ui.CreateDCFromHandle ( hwindc )
memdc = srcdc.CreateCompatibleDC ( )
bmp = win32ui.CreateBitmap ( )
bmp.CreateCompatibleBitmap ( srcdc , width , height )
memdc.SelectObject ( bmp )
memdc.BitBlt ( (0 , 0) , (width , height) , srcdc , (left , top) , win32con.SRCCOPY )
signedIntsArray = bmp.GetBitmapBits ( True )
img = np.fromstring ( signedIntsArray , dtype='uint8' )
img.shape = (height , width , 4)
srcdc.DeleteDC ( )
memdc.DeleteDC ( )
win32gui.ReleaseDC ( hwin , hwindc )
win32gui.DeleteObject ( bmp.GetHandle ( ) )
return cv2.cvtColor ( img , cv2.COLOR_BGRA2RGB )
import numpy as np
import cv2
import time
import pyautogui
from directkeys import PressKey , ReleaseKey , W , A , S , D
from draw_lanes import draw_lanes
from grabscreen import grab_screen
def roi(img , vertices):
# blank mask:
mask = np.zeros_like ( img )
# filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly ( mask , vertices , 255 )
# returning the image only where mask pixels are nonzero
masked = cv2.bitwise_and ( img , mask )
return masked
def process_img(image):
original_image = image
# convert to gray
processed_img = cv2.cvtColor ( image , cv2.COLOR_BGR2GRAY )
# edge detection
processed_img = cv2.Canny ( processed_img , threshold1=200 , threshold2=300 )
processed_img = cv2.GaussianBlur ( processed_img , (5 , 5) , 0 )
vertices = np.array (
[ [ 10 , 500 ] , [ 10 , 300 ] , [ 300 , 200 ] , [ 500 , 200 ] , [ 800 , 300 ] , [ 800 , 500 ] ,
] , np.int32 )
processed_img = roi ( processed_img , [ vertices ] )
# more info: http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_houghlines/py_houghlines.html
# rho theta thresh min length, max gap:
lines = cv2.HoughLinesP ( processed_img , 1 , np.pi / 180 , 180 , 20 , 15 )
m1 = 0
m2 = 0
try:
l1 , l2 , m1 , m2 = draw_lanes ( original_image , lines )
cv2.line ( original_image , (l1[ 0 ] , l1[ 1 ]) , (l1[ 2 ] , l1[ 3 ]) , [ 0 , 255 , 0 ] , 30 )
cv2.line ( original_image , (l2[ 0 ] , l2[ 1 ]) , (l2[ 2 ] , l2[ 3 ]) , [ 0 , 255 , 0 ] , 30 )
except Exception as e:
print ( str ( e ) )
pass
try:
for coords in lines:
coords = coords[ 0 ]
try:
cv2.line ( processed_img , (coords[ 0 ] , coords[ 1 ]) , (coords[ 2 ] , coords[ 3 ]) , [ 255 , 0 , 0 ] ,
3 )
except Exception as e:
print ( str ( e ) )
except Exception as e:
pass
return processed_img , original_image , m1 , m2
def straight():
PressKey ( W )
ReleaseKey ( A )
ReleaseKey ( D )
def left():
PressKey ( A )
ReleaseKey ( W )
ReleaseKey ( D )
ReleaseKey ( A )
def right():
PressKey ( D )
ReleaseKey ( A )
ReleaseKey ( W )
ReleaseKey ( D )
def slow_ya_roll():
ReleaseKey ( W )
ReleaseKey ( A )
ReleaseKey ( D )
for i in list ( range ( 4 ) )[ ::-1 ]:
print ( i + 1 )
time.sleep ( 1 )
last_time = time.time ( )
while True:
screen = grab_screen ( region=(0 , 40 , 800 , 640) )
print ( 'Frame took {} seconds'.format ( time.time ( ) - last_time ) )
last_time = time.time ( )
new_screen , original_image , m1 , m2 = process_img ( screen )
# cv2.imshow('window', new_screen)
cv2.imshow ( 'window2' , cv2.cvtColor ( original_image , cv2.COLOR_BGR2RGB ) )
if m1 < 0 and m2 < 0:
right ( )
elif m1 > 0 and m2 > 0:
left ( )
else:
straight ( )
# cv2.imshow('window',cv2.cvtColor(screen, cv2.COLOR_BGR2RGB))
if cv2.waitKey ( 25 ) & 0xFF == ord ( 'q' ):
cv2.destroyAllWindows ( )
break
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
You can’t perform that action at this time.