Created
February 3, 2016 18:58
-
-
Save bored-engineer/3843c9998b1b87069ea4 to your computer and use it in GitHub Desktop.
Parse bugcrowd submissions to a csv
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import requests | |
import sys | |
import json | |
from bs4 import BeautifulSoup | |
# Create a session | |
s = requests.Session() | |
# Establish a session | |
sys.stderr.write("Getting authenticity_token...\n") | |
r = s.get("https://bugcrowd.com/user/sign_in") | |
if r.status_code != requests.codes.ok: | |
print(r.text) | |
exit() | |
soup = BeautifulSoup(r.content, "html.parser") | |
# Login with that session | |
sys.stderr.write("Logging in...\n") | |
r = s.post("https://bugcrowd.com/user/sign_in", data={ | |
"utf8": "✓", | |
"authenticity_token": soup.find('input', { "name": "authenticity_token" })["value"], | |
"user[email]": sys.argv[1], | |
"user[password]": sys.argv[2], | |
"button": "", | |
}) | |
if r.status_code != requests.codes.ok: | |
print(r.text) | |
exit() | |
soup = BeautifulSoup(r.content, "html.parser") | |
# Holds the results | |
results = {} | |
# Loops all pages | |
page = 0 | |
while True: | |
# Get the submissions page | |
page += 1 | |
sys.stderr.write("Downloading submissions page " + str(page) + "...\n") | |
r = s.get("https://bugcrowd.com/submissions", params={ "page": page }) | |
if r.status_code != requests.codes.ok: | |
break | |
# Reached end | |
if "Nothing to see here yet!" in r.text: | |
break | |
soup = BeautifulSoup(r.content, "html.parser") | |
# Extract the submissions | |
submissions = soup.find("ul", class_="submissions") | |
# For each link in the submission ul | |
for submission in submissions.find_all("a"): | |
# If it starts with submissions | |
if submission["href"].startswith("/submissions/"): | |
# Get info on that submission | |
sub_id = submission["href"][len("/submissions/"):] | |
sys.stderr.write("Downloading submission " + sub_id + "...\n") | |
r = s.get("https://bugcrowd.com" + submission["href"]) | |
if r.status_code != requests.codes.ok: | |
sys.stderr.write("Failed to download: " + sub_id) | |
continue | |
soup = BeautifulSoup(r.content, "html.parser") | |
# Extract the timeline | |
timelines = soup.find("ul", class_="timeline-list") | |
timeline = [] | |
# For each timeline entry | |
for activity in timelines.find_all("li", class_="activity"): | |
# Handle the initial report differently | |
if "bug-report" in activity["class"]: | |
points = soup.find("span", class_="green kudos-points") | |
if points: | |
points = points.text | |
# Add to timeline | |
timeline.append({ | |
"type": "Submitted", | |
"date": activity.find("h6", text="Submitted:").parent.find("p").text, | |
"text": points, | |
}) | |
else: | |
# Add to timeline | |
timeline.append({ | |
"type": activity.find("h5", class_="name").text, | |
"date": activity.find("span", class_="date").text, | |
"text": activity.find("div", class_="message").text, | |
}) | |
# Add to results | |
results[sub_id] = timeline | |
# Print the results | |
print(json.dumps(results)) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import json | |
import sys | |
import csv | |
import re | |
fieldnames = ['Submission', 'Submitted', 'Points', 'Triaged', 'Rewarded_Points', 'Rewarded', 'Reward'] | |
csvwriter = csv.DictWriter(sys.stdout, delimiter=',', fieldnames=fieldnames) | |
csvwriter.writeheader() | |
# Open and parse it | |
submissions = json.load(sys.stdin) | |
# Loop each submission | |
for sub_id in submissions: | |
sub = submissions[sub_id] | |
info = { | |
"Submission": sub_id | |
} | |
# Loop each activity | |
for activity in sub: | |
if activity["type"] == "Submitted": | |
info["Submitted"] = activity["date"] | |
if activity["text"]: | |
info["Points"] = re.search("^(\d+)", activity["text"]).group(1) | |
elif activity["type"] == "State changed": | |
if "triaged" in activity["text"]: | |
info["Triaged"] = activity["date"] | |
elif "kudos points" in activity["text"]: | |
info["Rewarded_Points"] = activity["date"] | |
elif activity["type"] == "Reward added": | |
info["Rewarded"] = activity["date"] | |
info["Reward"] = re.search("(\$[\d,]+)", activity["text"]).group(1) | |
csvwriter.writerow(info) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment