Skip to content

Instantly share code, notes, and snippets.

@kozmer
Last active October 2, 2024 12:50
Show Gist options
  • Save kozmer/725cde788e4b3c8bdd870468c243916b to your computer and use it in GitHub Desktop.
Save kozmer/725cde788e4b3c8bdd870468c243916b to your computer and use it in GitHub Desktop.
ldapsearch parser for bofhound v0.2.0
# ldapsearch parser for bofhound v0.2.0, useful for environments that have ldap signing/binding and require kerberos auth.
# add support for msDS-KeyCredentialLink
"""
$ ldapsearch -LLL -o ldif-wrap=no -E '!1.2.840.113556.1.4.801=::MAMCAQc=' -H ldap://dc.fake.com -Y GSSAPI -N -b "DC=fake,DC=com" "(&(objectClass=*))" > ldapsearch_out.txt
SASL/GSSAPI authentication started
SASL username: redacted@FAKE.COM
SASL SSF: 256
SASL data security layer installed.
$ python3 ldapsearch_parse.py ldapsearch_out.txt ldapsearch_bofhound.txt
$ bofhound -i ldapsearch_bofhound.txt -o bofhound_out --zip
_____________________________ __ __ ______ __ __ __ __ _______
| _ / / __ / | ____/| | | | / __ \ | | | | | \ | | | \
| |_) | | | | | | |__ | |__| | | | | | | | | | | \| | | .--. |
| _ < | | | | | __| | __ | | | | | | | | | | . ` | | | | |
| |_) | | `--' | | | | | | | | `--' | | `--' | | |\ | | '--' |
|______/ \______/ |__| |__| |___\_\________\_\________\|__| \___\|_________\
by Fortalice ✪
[16:05:29] INFO Parsed 347 objects from 1 log files
[16:05:29] INFO Sorting parsed objects by type...
[16:05:29] INFO Parsed 94 Users
[16:05:29] INFO Parsed 65 Groups
[16:05:29] INFO Parsed 14 Computers
[16:05:29] INFO Parsed 1 Domains
[16:05:29] INFO Parsed 0 Trust Accounts
[16:05:29] INFO Parsed 8 OUs
[16:05:29] INFO Parsed 4 GPOs
[16:05:29] INFO Parsed 0 Schemas
[16:05:29] INFO Parsed 161 Unknown Objects
[16:05:29] INFO Parsed 2485 ACL relationships
[16:05:29] INFO Created default users
[16:05:29] INFO Created default groups
[16:05:29] INFO Resolved group memberships
[16:05:29] INFO Resolved delegation relationships
[16:05:29] INFO Resolved OU memberships
[16:05:29] INFO Linked GPOs to OUs
[16:05:29] INFO JSON files written to bofhound_out
[16:05:29] INFO Files compressed into bofhound_out/bloodhound_20240928_160529.zip
"""
import argparse
import base64
import struct
def decode_sid(encoded_sid):
"""Decode a base64-encoded SID to a readable string."""
sid = base64.b64decode(encoded_sid)
revision = sid[0]
identifier_authority = int.from_bytes(sid[2:8], byteorder='big')
sub_authorities = sid[8:]
sub_auths = [
str(int.from_bytes(sub_authorities[i:i + 4], byteorder='little'))
for i in range(0, len(sub_authorities), 4)
]
sid_string = f"S-{revision}-{identifier_authority}" + "".join(
f"-{sub_auth}" for sub_auth in sub_auths
)
return sid_string
def decode_guid(encoded_guid):
"""Decode a base64-encoded GUID to a readable string."""
guid = base64.b64decode(encoded_guid)
parts = struct.unpack('<IHHBBBBBBBB', guid)
guid_str = (
f"{parts[0]:08x}-{parts[1]:04x}-{parts[2]:04x}-"
f"{''.join(f'{p:02x}' for p in parts[3:5])}-"
f"{''.join(f'{p:02x}' for p in parts[5:])}"
)
return guid_str.lower()
def decode_generic(encoded_guid):
"""Decode generic base64-encoded values to hexadecimal string."""
guid_bytes = base64.b64decode(encoded_guid)
return "b'" + ''.join(f'\\x{byte:02x}' for byte in guid_bytes) + "'"
def process_dns_records(encoded_records):
"""Decode and process base64-encoded DNS records."""
formatted_records = []
for record in encoded_records.strip('[]').split(', '):
try:
decoded_bytes = base64.b64decode(record)
readable_str = ''.join(
chr(byte) if 32 <= byte <= 126 else f'\\x{byte:02x}' for byte in decoded_bytes
)
formatted_records.append(f"b'{readable_str}'")
except (base64.binascii.Error, ValueError) as e:
formatted_records.append(f"Error decoding: {e}")
return f"[{', '.join(formatted_records)}]"
# Mapping attributes to their corresponding decoding functions
decoding_functions = {
"objectSid": decode_sid,
"objectGUID": decode_guid,
"dnsRecord": process_dns_records,
"msDFSR-ContentSetGuid": decode_generic,
"msDFSR-ReplicationGroupGuid": decode_generic,
"mS-DS-ConsistencyGuid": decode_generic,
"samDomainUpdates": decode_generic
}
def process_line(line):
"""Process and decode a single line from the LDAP output."""
if "::" in line:
attr, encoded_value = map(str.strip, line.split("::"))
decode_func = decoding_functions.get(attr, lambda x: x)
return f"{attr}: {decode_func(encoded_value)}"
return line
def needs_remove(line):
"""Check if a line should be removed based on certain prefixes or empty lines."""
remove_prefixes = ['dn: ', 'ref: ', 'result: ', 'search: ']
return (
any(line.startswith(prefix) for prefix in remove_prefixes) or
line.strip().startswith('#') or
not line.strip()
)
def process_object(obj):
"""Process and decode an entire LDAP object."""
dict_obj = {}
for line in obj.split("\n"):
if not needs_remove(line):
line = process_line(line)
if ':' in line:
attr, value = map(str.strip, line.split(': ', 1))
dict_obj[attr] = f"{dict_obj.get(attr, '')}, {value}".lstrip(', ')
return '\n'.join(f'{attr}: {value}' for attr, value in dict_obj.items())
def process_ldap_output(input_file, output_file):
"""Read and process the LDAP output from the input file and save the result."""
with open(input_file, 'r') as file, open(output_file, 'w') as output:
objects = file.read().split("\n\n")
output.write(
'\n'.join(
'--------------------\n' + process_object(obj)
for obj in objects if obj.strip() and not all(
needs_remove(line) for line in obj.split("\n")
)
).strip()
)
def main():
"""Parse arguments and process the LDAP output."""
parser = argparse.ArgumentParser(description="Process LDAP output for BOFHound.")
parser.add_argument("input_file", help="Path to the input file containing LDAP output")
parser.add_argument("output_file", help="Path to the output file to save processed data")
args = parser.parse_args()
process_ldap_output(args.input_file, args.output_file)
if __name__ == "__main__":
main()
@1r0BIT
Copy link

1r0BIT commented Oct 2, 2024

Love the script! <3
If you run into issues with size limitations e.g:

SASL username: XXXXXXXXXX
SASL SSF: 256
SASL data security layer installed.
Size limit exceeded (4)

and the limitations are not enforced by the server you can chunk the requests to pages with the following parameters:

-E pr=1000/noprompt -z 0

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment