Skip to content

Instantly share code, notes, and snippets.

@Th3redTea
Last active June 13, 2020 17:16
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save Th3redTea/576e9981b0e2019e2d51965aea836326 to your computer and use it in GitHub Desktop.
Save Th3redTea/576e9981b0e2019e2d51965aea836326 to your computer and use it in GitHub Desktop.
The requestor solves some CTF challneges using requests, RegEx, and BeautifulSoup. This gist was created execlusivly for the PyForGood Tutorial.
#!/usr/bin/python3
import requests
import re
from bs4 import BeautifulSoup
url= "http://challenge01.root-me.org/web-serveur/ch4/"
#This is a extremly simple directory brute forcer.
def dirbrute():
file = open('common.txt', 'r') #open a wordlist
for i in file.readlines(): #creat a for loop to rotate through the wordlist
i = i.replace('\n','') #usually items in the file.readlines() function returns with a newline so we user replace() to escap that
r = requests.get(url + i) #perform a GET request to the url above and add the item from the wordlist
soup = BeautifulSoup(r.text, 'html.parser') #make a soup of the response.
print( i + " is : " + str(r.status_code)+" Title: "+str(soup.title.string))
#print directory plus response code (200,404,403..) and the title of the page
#We used the title.string to parse the title from the page.
dirbrute()
#This function solve a challnege from root-me.
def admin():
headers = {'user-agent': 'admin'} #identifying headers we want to use.
r = requests.get("http://challenge01.root-me.org/web-serveur/ch2/", headers=headers) #Perform a GET request to the challenge and including a costume header
soup = BeautifulSoup(r.text, 'html.parser') # make a soup of it.
h3 = soup.find_all('h3') #search for all H3 tags in the page.
print(h3) #print them.
admin()
#This function solves a challnege from picoCTF 2018
def pico():
r = requests.get('http://2018shell1.picoctf.com:10157/robots.txt') #Perfome a GET request to the challenge URL and store it in the r variable.
source = r.text #This is the content of the response.
page = re.findall(r'Disallow: /(.+)', source)[0] #fetch the robots.txt for any disallowed directories. the '.' means return any character (except newline character)
print('Found: ' + page)
r = requests.get('http://2018shell1.picoctf.com:10157/{}'.format(page)) #Access the directories found in the robots.txt
source = r.text #Again, store the response's content in the source variable.
print(re.findall(r'(picoCTF\{.+\})', source)[0]) #fetch the content for the FLAG
#Notice that we used the \ to escape the character {}
pico()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment