This goes with the Traversy Media Scrapy tutorial on YouTube
pip install scrapy
subfinder | |
findomain | |
findomain -t google.com -o -> this outputs the file contents to file with given file name (--help for more) | |
findomain -t google.com -u customized-filename.txt | |
aron - parameter bruteforcing |
cat asset-domains.txt | while read line; do echo $line; done | |
cat asset-domains.txt | while read x; do nmap -Pn $(dig +short $x | head -n1); done | |
cat asset-domains.txt | while read x; do <perform any function like nmap OR directory brutE OR recursive subdomains , sub bruteforc>; done | |
#echo xyz.google.com | rev | awk -F'.' '{print $1\.$2\.$3}' | rev |
#! /bin/bash | |
# findomain, subfinder, crtsh, massdns, | |
# must include amass, bruteforcing domains , port scanning | |
# massdns, findomain binaries must be in /usr/local/bin | |
# my .bash_profile must be present | |
# Author oyenom |
#! /usr/bin/env python3 | |
import sys | |
import os | |
for line in sys.stdin.readlines(): | |
data = line.strip() | |
print(data) |
{ | |
"import-2431382345": { | |
"title": "Burp_suite", | |
"type": 1, | |
"color": "#130001", | |
"address": "127.0.0.1", | |
"port": 8080, | |
"active": true, | |
"whitePatterns": [ | |
{ |
#! /bin/bash | |
findomain -t $1 -q | tee $1.txt | |
subfinder -d $1 -silent | tee -a $1.txt | |
#crtsh | |
crtsh(){ | |
curl -s https://crt.sh/?q\=%.$1\&output\=json | jq -r '.[].name_value' | sed 's/\*\.//g' | sort -u | |
} |
#! /bin/bash | |
findomain -t $1 -q | tee $1.txt | |
subfinder -d $1 -silent | tee -a $1.txt | |
#crtsh | |
crtsh(){ | |
curl -s https://crt.sh/?q\=%.$1\&output\=json | jq -r '.[].name_value' | sed 's/\*\.//g' | sort -u | |
} |
This goes with the Traversy Media Scrapy tutorial on YouTube
pip install scrapy