Skip to content

Instantly share code, notes, and snippets.

@noobsdt
Last active August 31, 2021 16:00
Show Gist options
  • Save noobsdt/d56dcec1ede1d64b56ed1e31ee86c2f4 to your computer and use it in GitHub Desktop.
Save noobsdt/d56dcec1ede1d64b56ed1e31ee86c2f4 to your computer and use it in GitHub Desktop.
#!/usr/bin/env python
# Subdomain enumerate from subdomainfinder.c99.nl
# Usage: python c99finder.py example.com outputfile
import sys
import requests
import subprocess
from bs4 import BeautifulSoup
try:
req = requests.get('https://www.google.com/search?q=site:subdomainfinder.c99.nl+' + sys.argv[1] + '&btnG=Search&hl=en-US&biw=&bih=&gbv=1&start=00&filter=0')
soup = BeautifulSoup(req.text, 'html.parser')
res = soup.select('div .kCrYT a')
for links in res:
link = links.get('href')
echo = subprocess.Popen(['echo', link], stdout=subprocess.PIPE,)
grep = subprocess.Popen(['grep', sys.argv[1]], stdin=echo.stdout, stdout=subprocess.PIPE,)
sed = subprocess.Popen(['sed', 's/\/url?q=//'], stdin=grep.stdout, stdout=subprocess.PIPE,)
sed2 = subprocess.Popen(['sed', 's/&.*//'], stdin=sed.stdout, stdout=subprocess.PIPE,)
for l in sed2.stdout:
dlink = l.decode('ascii')
flink = dlink.strip()
print(f'\033[1;32m[~] Getting subdomains from {flink}\033[0m')
print()
req2 = requests.get(flink)
soup2 = BeautifulSoup(req2.text, 'html.parser')
res2 = soup2.find_all('a', rel='noreferrer')
if res2:
#with open('c99domains.txt', 'w'): pass
for l2 in res2:
sub = l2.text
#print(sub)
last_seen = set()
with open(sys.argv[2], 'a') as resultfile:
if sub not in last_seen:
resultfile.write(str(sub) + '\n')
last_seen.add(sub)
except ConnectionError:
print("Connection Error")
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment