Skip to content

Instantly share code, notes, and snippets.

@noobsdt
Last active November 18, 2020 09:31
Show Gist options
  • Save noobsdt/fd0cfa274a44ab909b527b9d6551ee7e to your computer and use it in GitHub Desktop.
Save noobsdt/fd0cfa274a44ab909b527b9d6551ee7e to your computer and use it in GitHub Desktop.
#!/usr/bin/env python
# Usage: python googledorker.py example.com
# Need two dork files
# dorks, vendor
import sys
import requests
import subprocess
from time import sleep
from bs4 import BeautifulSoup
from requests.utils import requote_uri
'''
HEADERS = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; rv:68.0) Gecko/20100101 Firefox/68.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate, br',
}
'''
def dorking(dorks):
try:
url = 'https://www.google.com/search?q=site:' + sys.argv[1] + '%20' + dork + '&btnG=Search&hl=en-US&start=00&filter=0'
encoded_url = requote_uri(url)
print('\033[1;34m[*] Dork URL: ' + encoded_url + '\033[0m')
print()
req = requests.get(encoded_url)
soup = BeautifulSoup(req.text, 'html.parser')
#print(soup.prettify())
links = soup.select('div .kCrYT a')
for link in links:
glink = link.get('href')
echo = subprocess.Popen(['echo', glink], stdout=subprocess.PIPE,)
sed = subprocess.Popen(['sed', 's/\/url?q=//'], stdin=echo.stdout, stdout=subprocess.PIPE,)
sed2 = subprocess.Popen(['sed', 's/\/&.*//'], stdin=sed.stdout, stdout=subprocess.PIPE,)
sed3 = subprocess.Popen(['sed', 's/\&sa.*//'], stdin=sed2.stdout, stdout=subprocess.PIPE,)
for link in sed3.stdout:
res = link.decode('ascii')
out = res.strip()
print(out)
with open(sys.argv[1]+'.txt', 'a') as output:
output.write(out + '\n')
except ConnectionError:
print("Connection Error! 😣️")
if req.status_code == 429:
print("\033[1;36m[*] Need to solve Google reCaptcha! 😣️\033[0m")
sys.exit()
with open('dorks.txt') as file1:
print("\033[1;34m Domain: " + sys.argv[1] + '\033[0m')
print()
for dork in list(file1.readlines()):
#d = 'site:' + sys.argv[1] + '+' + dork
print(f'\033[1;32m[~] Searching for {dork} \033[0m')
try:
dorking(dork)
sleep(90)
print()
except:
print(f'\033[1;36m[*] Nothing found for {dork}\033[0m')
pass
print()
print("=" * 30)
print()
def Dorking(dorks):
try:
url = 'https://www.google.com/search?q=site:' + dork + '%20' + sys.argv[1] + '&btnG=Search&hl=en-US&start=00&filter=0'
encoded_url = requote_uri(url)
print('\033[1;34m[*] Dork URL: ' + encoded_url + '\033[0m')
print()
req = requests.get(encoded_url)
soup = BeautifulSoup(req.text, 'html.parser')
links = soup.select('div .kCrYT a')
for link in links:
glink = link.get('href')
echo = subprocess.Popen(['echo', glink], stdout=subprocess.PIPE,)
sed = subprocess.Popen(['sed', 's/\/url?q=//'], stdin=echo.stdout, stdout=subprocess.PIPE,)
sed2 = subprocess.Popen(['sed', 's/&.*//'], stdin=sed.stdout, stdout=subprocess.PIPE,)
sed3 = subprocess.Popen(['sed', 's/\&sa.*//'], stdin=sed2.stdout, stdout=subprocess.PIPE,)
for link in sed3.stdout:
res = link.decode('ascii')
out = res.strip()
print(out)
with open(sys.argv[1]+'.txt', 'a') as output:
output.write(out + '\n')
except ConnectionError:
print("Connection Error! 😣️")
if req.status_code == 429:
print("\033[1;36m[*] Need to solve Google reCaptcha! 😣️\033[0m")
sys.exit()
with open('vendors.txt') as file2:
print("\033[1;34m Domain: " + sys.argv[1] + '\033[0m')
print()
for dork in list(file2.readlines()):
#d = 'site:' + sys.argv[1] + '+' + dork
print(f'\033[1;32m[~] Searching for {dork} \033[0m')
try:
Dorking(dork)
sleep(90)
print()
except:
print(f'\033[1;36m[*] Nothing found for {dork}\033[0m')
pass
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment