Skip to content

Instantly share code, notes, and snippets.

@SciresM SciresM/bea.py
Created Oct 5, 2018

Embed
What would you like to do?
from struct import unpack as up
import sys, os, hashlib
import zstandard as zstd
dirs, files = None, None
def read_at(fp, off, len):
fp.seek(off)
return fp.read(len)
def read_u8(fp, off):
return up('<B', read_at(fp, off, 1))[0]
def read_u16(fp, off):
return up('<H', read_at(fp, off, 2))[0]
def read_u32(fp, off):
return up('<I', read_at(fp, off, 4))[0]
def read_u64(fp, off):
return up('<Q', read_at(fp, off, 8))[0]
def read_filename(fp, off, l):
if l == 0:
return ''
s = read_at(fp, off, l)
if '\0' in s:
s = s[:s.index('\0')]
return s
def read_from_string_table(fp, off):
return read_at(fp, off + 2, read_u16(fp, off))
def dump_asset(offset):
global archive, out_dir
if read_at(archive, offset, 4) != 'ASST':
print 'Error: Asset is not an ASST?'
cmp, algn, cmp_size, size, _, data_ofs, name_ofs = up('<HHIIIQQ', read_at(archive, offset + 0x10, 0x20))
name = read_from_string_table(archive, name_ofs)
path = os.path.join(out_dir, name)
try:
os.makedirs(os.path.dirname(path))
except:
pass
print 'Dumping %s (%012X-%012X) to %s...' % (name, data_ofs, data_ofs+size, path)
with open(path, 'wb') as f:
f.write(zstd.ZstdDecompressor().decompress(read_at(archive, data_ofs, cmp_size)))
print 'Dumped!'
def main(argc, argv):
if argc != 3:
print 'Usage: %s in_file out_dir' % argv[0]
return
global archive, dirs_off, files_off, fdata_off
try:
archive = open(argv[1], 'rb')
if read_at(archive, 0, 4) != 'SCNE':
print 'Error: Invalid archive.'
return
except:
print 'Failed to open %s.' % argv[1]
return
global out_dir
out_dir = argv[2]
num_files, asset_info_ofs = up('<QQ', read_at(archive, 0x20, 0x10))
print 'Num Files: %d' % num_files
for i in xrange(num_files):
dump_asset(read_u64(archive, asset_info_ofs + 8 * i))
print 'Done!'
if __name__ == '__main__':
main(len(sys.argv), sys.argv)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
You can’t perform that action at this time.