Skip to content

Instantly share code, notes, and snippets.

@uyjulian
Last active October 20, 2023 00:53
Show Gist options
  • Save uyjulian/a6ba33dc29858327ffa0db57f447abe5 to your computer and use it in GitHub Desktop.
Save uyjulian/a6ba33dc29858327ffa0db57f447abe5 to your computer and use it in GitHub Desktop.
# SPDX-License-Identifier: MIT
# Falcom YamaNeko engine it3 format reversing.
# For researching the file format structure.
# Doesn't do anything useful for the end user right now.
# For something useful for the end user, see https://github.com/TwnKey/YsVIII_model_dump or https://github.com/eArmada8/Ys8_IT3
# This script is a work in progress and does not output anything useful right now
# See also: https://ghpages.uyjulian.pw/falcom-dumps/
# CNode::Load2 function can be found only one of these two methods in each executable:
# 1. Search for string "INFO" and find cross-references to it (Gurumin)
# 2. Search for immediate 0x4F464E49 in functions (Zwei 2 and newer)
# If multiple results are found, the largest size function is the one that needs to be used.
# CNode::Load2 locations
# Gurumin PC: 0x755EF0
# Zwei 2 PC: 0x4FD550
# Ys 7 PC: 0x500C80
# Zero PC: 0x14002B7D0
# Ao PC: 0x14002AA50
# Nayuta PC: 0x14001F5E0
# Ys Celceta PC: 0x5794B0
# Ys 8 PC: 0x1403823F0
# Ys 9 PC: 0x1403F5500
# Sora no Kiseki FC (Serial=NPJH50373) passes all
# Sora no Kiseki SC (Serial=NPJH50706,NPJH50707) passes all except some unknown chunks
# Sora no Kiseki 3rd (Serial=NPJH50374) passes all except some unknown chunks
# Vantage Master Portable (Serial=NPJH50110) passes all
# Zwei!! (Serial=NPJH50111) passes all
# Brandish The Dark Revenant (Serial=NPJH50108) passes all
# Ys I & II Chronicles (Serial=NPJH50349) No it3 files
# Ys: The Oath in Felghana (Serial=NPJH50226) passes all
# Ys vs. Sora no Kiseki: Alternative Saga (Serial=NPJH50276) passes all
# Gurumin (AppID=322290) passes all
# Zwei 2 (AppID=427700) passes all
# Zero (AppID=1668510) passes all except some VPA8 chunks and some INFO chunks
# Ao (AppID=1668520) passes all
# Nauyta (AppID=1668530) passes all except some unknown chunks
# Ys 7 (AppID=587100) passes all
# Ys Memories of Celceta (AppID=587110) passes all except some VPA8 chunks
# Ys 8 (AppID=579180) passes all
# Ys 9 (AppID=1351630 u) passes all except some unknown chunks
# NPJH50706 failures:
# ./data_sc/map4/t0100_n.mc3/canban05.it3
# ./data_sc/map4/t0100_y.mc3/canban05.it3
# ./data_sc/map4/t0101.mc3/canban05.it3
# ./data_sc/map4/t0133_n.mc3/canban05.it3
# ./data_sc/map4/t0133_y.mc3/canban05.it3
# ./data_sc/map4/t0137.mc3/canban05.it3
# NPJH50707 failures:
# ./data_sc/map4/t0100_n.mc3/canban05.it3
# ./data_sc/map4/t0100_y.mc3/canban05.it3
# ./data_sc/map4/t0101.mc3/canban05.it3
# ./data_sc/map4/t0133_n.mc3/canban05.it3
# ./data_sc/map4/t0133_y.mc3/canban05.it3
# ./data_sc/map4/t0137.mc3/canban05.it3
# NPJH50374 failures:
# ./data_3rd/map4/t0100_n.mc3/canban05.it3
# 1668510 failures:
# ./data/map/a0000/a0002.it3
# ./data/map/a0000/a0004.it3
# ./data/map/a0000/a0005.it3
# ./data/map/a0000/a0008.it3
# ./data/map/a0000/a0009.it3
# ./data/map/a0000/a0010.it3
# ./data/map/a0000/a0011.it3
# ./data/map/a0000/a0012.it3
# ./data/map/a0000/a0013.it3
# ./data/map/a0001/a0001.it3
# ./data/map/a0002/a0002.it3
# 1668530 failures:
# ./chr/obj/common/ob_086.it3
# ./US/chr/obj/common/ob_086.it3
# 587110 failures:
# ./map/mp1213/mp1213.it3
# ./map/mp1213m/mp1213m.it3
# ./map/mp3101/mp3101.it3
# ./map/mp3101m/mp3101m.it3
# ./map/mp3102/mp3102.it3
# ./map/mp3105/mp3105.it3
# ./map/mp5211/mp5211.it3
# ./map/mp5211m/mp5211m.it3
# ./map/mp6122/mp6122.it3
# ./map/mp6135/mp6135.it3
# ./map/mp6143/mp6143.it3
# ./map/mp6191/mp6191.it3
# 1351630 failures:
# ./map/mp2000/mp2000.it3
# ./map/mp3100/mp3100.it3
import sys
import io
import struct
def check_eof(ident, f):
savepos = f.tell()
f.seek(0, io.SEEK_END)
savepos2 = f.tell()
f.seek(savepos)
if savepos != savepos2:
print(ident + " Not EOF " + str(savepos) + " " + str(savepos2))
def read_unpack(fmt, f):
return struct.unpack(fmt, f.read(struct.calcsize(fmt)))
def read_check(f, bytes_len):
d = f.read(bytes_len)
if len(d) != bytes_len:
raise Exception("Bytes read not expected")
def read_fixed_string(f, str_len=64):
return f.read(str_len).rstrip(b"\x00")
def read_matrix4_float(f):
return read_unpack("ffffffffffffffff", f)
def read_vector2_float(f):
return read_unpack("ff", f)
def read_vector3_float(f):
return read_unpack("fff", f)
def read_vector4_float(f):
return read_unpack("ffff", f)
def read_vector2_uint32(f):
return read_unpack("II", f)
def read_vector3_uint32(f):
return read_unpack("III", f)
def read_vector4_uint32(f):
return read_unpack("IIII", f)
# Reference: CEgPacks3_C77::ExecUnpack
def decompress_blocks_single(f, dst, in_dst_offset, block_size, uncompressed_block_size):
dst_offset = in_dst_offset
block_type = read_unpack("<I", f)[0]
is_compressed = (block_type == 8)
if block_type not in [0, 8]:
raise Exception("Unsupported block type " + str(block_type))
if is_compressed:
src = f.read(block_size - 4)
src_offset = 0
while src_offset < len(src):
match_len = src[src_offset]
src_offset += 1
num = src[src_offset]
src_offset += 1
if match_len == 0:
dst[dst_offset:dst_offset + num] = src[src_offset:src_offset + num]
src_offset += num
dst_offset += num
else:
offset = num + 1
if offset < match_len:
for i in range(match_len):
dst[dst_offset + i] = dst[(dst_offset - offset) + i]
else:
sliding_window_pos = dst_offset - offset
dst[dst_offset:dst_offset + match_len] = dst[sliding_window_pos:sliding_window_pos + match_len]
dst_offset += match_len
dst[dst_offset:dst_offset + 1] = src[src_offset:src_offset + 1]
src_offset += 1
dst_offset += 1
else:
dst[dst_offset:dst_offset + uncompressed_block_size] = f.read(uncompressed_block_size)
return dst
# Reference: CEgPacks2::UnpackBZMode2
# Also known as falcom_compress / BZ / BZip / zero method
def decompress(buffer, output, size):
offset = 0 # u16
bits = 8 # 8 to start off with, then 16
flags = int.from_bytes(buffer[offset:offset + 2], byteorder="little")
offset += 2
flags >>= 8
outputoffset = 0 # u16
def getflag():
nonlocal bits
nonlocal flags
nonlocal offset
if bits == 0:
slice_ = buffer[offset:offset + 2]
if len(slice_) < 2:
raise Exception("Out of data")
flags = int.from_bytes(slice_, byteorder="little")
offset += 2
bits = 16
flag = flags & 1
flags >>= 1
bits -= 1
return flag
def setup_run(prev_u_buffer_pos):
nonlocal offset
nonlocal buffer
nonlocal output
nonlocal outputoffset
run = 2 # u16
if getflag() == 0:
run += 1
if getflag() == 0:
run += 1
if getflag() == 0:
run += 1
if getflag() == 0:
if getflag() == 0:
slice_ = buffer[offset:offset + 1]
if len(slice_) < 1:
raise Exception("Out of data")
run = int.from_bytes(slice_, byteorder="little")
offset += 1
run += 0xE
else:
run = 0
for i in range(3):
run = (run << 1) | getflag()
run += 0x6
# Does the 'copy from buffer' thing
for i in range(run):
output[outputoffset] = output[outputoffset - prev_u_buffer_pos]
outputoffset += 1
while True:
if getflag() != 0: # Call next method to process next flag
if getflag() != 0: # Long look-back distance or exit program or repeating sequence (flags = 11)
run = 0 # u16
for i in range(5): # Load high-order distance from flags (max = 0x31)
run = (run << 1) | getflag()
prev_u_buffer_pos = int.from_bytes(buffer[offset:offset + 1], byteorder="little") # Load low-order distance (max = 0xFF)
# Also acts as flag byte
# run = 0 and byte = 0 -> exit program
# run = 0 and byte = 1 -> sequence of repeating bytes
offset += 1
if run != 0:
prev_u_buffer_pos = prev_u_buffer_pos | (run << 8) # Add high and low order distance (max distance = 0x31FF)
setup_run(prev_u_buffer_pos) # Get run length and finish unpacking (write to output)
elif prev_u_buffer_pos > 2: # Is this used? Seems inefficient.
setup_run(prev_u_buffer_pos)
elif prev_u_buffer_pos == 0: # Decompression complete. End program.
break
else: # Repeating byte
branch = getflag() # True = long repeating sequence (> 30)
for i in range(4):
run = (run << 1) | getflag()
if branch != 0:
run = (run << 0x8) | int.from_bytes(buffer[offset:offset + 1], byteorder="little") # Load run length from byte and add high-order run length (max = 0xFFF + 0xE)
offset += 1
run += 0xE
output[outputoffset:outputoffset + run] = bytes(buffer[offset:offset + 1]) * run
offset += 1
outputoffset += run
else: # Short look-back distance (flags = 10)
prev_u_buffer_pos = int.from_bytes(buffer[offset:offset + 1], byteorder="little") # Get the look-back distance (max = 0xFF)
offset += 1
setup_run(prev_u_buffer_pos) # Get run length and finish unpacking (write to output)
else: # Copy byte (flags = 0)
output[outputoffset:outputoffset + 1] = buffer[offset:offset + 1]
outputoffset += 1
offset += 1
return outputoffset, offset
# Reference: CSafeFile::freadP
# Also known as FALCOM3 compression
def decompress_blocks_stream(f):
flags = read_unpack("<I", f)[0]
dst = None
dst_offset = 0
if (flags & 0x80000000) != 0:
num_blocks, compressed_size, segment_size, uncompressed_size = read_unpack("<4I", f)
dst = bytearray(uncompressed_size)
for i in range(num_blocks):
block_size, uncompressed_block_size = read_unpack("<2I", f)
if (flags & 0x7FFFFFFF) == 1:
decompress_blocks_single(f, dst, dst_offset, block_size, uncompressed_block_size)
dst_offset += block_size
else:
compressed_size = flags
uncompressed_size, num_blocks = read_unpack("<2I", f)
dst = bytearray(uncompressed_size) # Should already be initialized with 0
cdata = io.BytesIO(f.read(compressed_size - 8))
for i in range(num_blocks):
block_size = read_unpack("<H", cdata)[0]
output_tmp = bytearray(65536)
inbuf = cdata.read(block_size - 2)
if inbuf[0] != 0:
raise Exception("Non-zero method currently not supported")
num1, num2 = decompress(inbuf, output_tmp, block_size)
dst[dst_offset:dst_offset + num1] = output_tmp[0:num1]
dst_offset += num1
if dst_offset >= uncompressed_size:
break
x = cdata.read(1)
if len(x) == 0:
break
if x[0] == 0:
break
return bytes(dst)
def read_chunks(f, arr):
while True:
chunk = f.read(4)
if len(chunk) < 4:
break
length = f.read(4)
if len(length) < 4:
break
length = int.from_bytes(length, byteorder="little")
arr.append([chunk, f.read(length)])
if chunk == b"IEND":
break
def dispatch_chunks(chunks, dispatch):
# Chunks are loaded in CNode::Load2
for chunk in chunks:
if chunk[0] in dispatch:
dispatch[chunk[0]](chunk[1], chunk[0])
else:
print("Unhandled chunk ", chunk[0])
def read_itp(df):
ihdrs = []
def decode_chunk_ihdr(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
unk0 = read_unpack("I", f)[0]
dim1 = read_unpack("I", f)[0]
dim2 = read_unpack("I", f)[0]
compressed_size = read_unpack("I", f)[0]
s1 = read_unpack("H", f)[0]
bpp = read_unpack("H", f)[0]
s3 = read_unpack("H", f)[0]
s4 = read_unpack("H", f)[0]
type_ = read_unpack("H", f)[0]
s6 = read_unpack("H", f)[0]
unk1 = read_unpack("I", f)[0]
dic = {}
dic["type"] = type_
dic["bpp"] = bpp
dic["dim1"] = dim1
dic["dim2"] = dim2
ihdrs.append(dic)
check_eof("IHDR", f)
def decode_chunk_ialp(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
unk0 = read_unpack("I", f)[0]
unk1 = read_unpack("I", f)[0]
check_eof("IALP", f)
def decode_chunk_imip(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
unk0 = read_unpack("I", f)[0]
unk1 = read_unpack("I", f)[0]
unk2 = read_unpack("I", f)[0]
check_eof("IMIP", f)
def decode_chunk_ihas(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
unk0 = read_unpack("I", f)[0]
unk1 = read_unpack("I", f)[0]
unk2 = read_unpack("f", f)[0]
unk3 = read_unpack("f", f)[0]
check_eof("IHAS", f)
def decode_chunk_ipal(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
return # FIXME
infox = read_unpack("6BH", f)
if infox[4]:
raise Exception("Palette from external file currently not supported")
f.read(infox[6])
if len(ihdrs) > 0:
ihdr0 = ihdrs[0]
if (ihdr0["type"] & 0xFFFFFF00) != 0:
raise Exception("Unexpected codepath")
elif ihdr0["type"] == 2:
# FIXME: Fix this
# unk2 = read_unpack("I", f)[0]
# unk3 = read_unpack("I", f)[0]
palette_data = decompress_blocks_stream(f)
else:
raise Exception("Unexpected codepath")
check_eof("IPAL", f)
def decode_chunk_idat(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
valid_bpps = [0,1,2,4,5,6,7,8,10]
bpp_multipliers = {0:8,1:8,2:8,4:0x10,5:0x20,6:4,7:8,8:8,10:8}
unk0 = read_unpack("I", f)[0]
unk1 = read_unpack("I", f)[0]
if len(ihdrs) > 0:
ihdr0 = ihdrs[0]
if (ihdr0["type"] & 0xFFFFFF00) != 0:
raise Exception("Unexpected codepath")
elif ihdr0["type"] == 2:
# FIXME: Fix this
return
unk2 = read_unpack("I", f)[0]
unk3 = read_unpack("I", f)[0]
texture_data = decompress_blocks_stream(f)
else:
if ihdr0["type"] in [1,2,3]:
texture_data = decompress_blocks_stream(f)
elif ihdr0["bpp"] in valid_bpps:
texture_data = f.read(bpp_multipliers[ihdr0["bpp"]] * ihdr0["dim1"] * ihdr0["dim2"])
else:
raise Exception("Unexpected codepath")
check_eof("IDAT", f)
def decode_chunk_iend(data_bytes, chunk_id):
pass
chunks = []
if df.read(4) == b'ITP\xff':
read_chunks(df, chunks)
# All known chunk names from CTexMgr::LoadITP
# HHDR cmp only
# HMIP cmp only
# IALP (Ys8)
# IDAT (Ys8)
# IEND (Ys8)
# IEXT
# IHDR (Ys8)
# IMIP (Ys8)
# IPAL (Ys8) (?)
chunk_dispatch = {
b"IHDR" : decode_chunk_ihdr,
b"IALP" : decode_chunk_ialp,
b"IMIP" : decode_chunk_imip,
b"IHAS" : decode_chunk_ihas,
b"IPAL" : decode_chunk_ipal,
b"IDAT" : decode_chunk_idat,
b"IEND" : decode_chunk_iend,
}
dispatch_chunks(chunks, chunk_dispatch)
def read_it3(df):
info_flags = [0]
def decode_chunk_info(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
name = read_fixed_string(f)
transform = read_matrix4_float(f)
info_flags[0] = read_unpack("I", f)[0]
v0 = read_vector2_float(f)
check_eof("INFO", f)
def decode_chunk_rtyp(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
material_variant = read_unpack("I", f)[0]
v0 = read_vector3_float(f)
check_eof("RTYP", f)
def decode_chunk_rty2(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
material_variant = read_unpack("I", f)[0]
byte = read_unpack("B", f)[0]
v0 = read_vector3_float(f)
check_eof("RTY2", f)
def decode_chunk_ligt(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
v0 = read_vector4_float(f)
byte = read_unpack("B", f)[0]
check_eof("LIGT", f)
def decode_chunk_lig2(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
v0 = read_vector4_float(f)
byte = read_unpack("B", f)[0]
v1 = read_vector4_float(f)
check_eof("LIG2", f)
def decode_chunk_lig3(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
v0 = read_vector4_float(f)
byte = read_unpack("B", f)[0]
float0 = read_unpack("f", f)[0]
v1 = read_vector4_float(f)
check_eof("LIG3", f)
def decode_chunk_infs(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
v0 = read_vector3_uint32(f)
check_eof("INFS", f)
def decode_chunk_infz(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
v0 = read_vector4_uint32(f)
check_eof("INFZ", f)
def decode_chunk_ikng(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
unkx = read_unpack("32B", f)
check_eof("IKNG", f)
def decode_chunk_ikpw(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
unkx = read_unpack("20B", f)
check_eof("IKPW", f)
def decode_chunk_altt(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
unkx = read_unpack("I", f)
check_eof("ALTT", f)
def decode_chunk_zfnk(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
i0, = read_unpack("I", f)
check_eof("ZFNK", f)
def decode_chunk_bbox(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
a = read_vector3_float(f)
b = read_vector3_float(f)
c = read_vector3_float(f)
d = read_vector3_float(f)
check_eof("BBOX", f)
def decode_chunk_came(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
unkx = read_unpack("3B", f)
unky = read_unpack("2B", f)
check_eof("CAME", f)
def decode_chunk_cdlo(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
read_check(f, 0x1800)
check_eof("CDLO", f)
def decode_chunk_chid(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
name = read_fixed_string(f)
num_strings = read_unpack("I", f)[0]
for i in range(num_strings):
children = read_fixed_string(f)
check_eof("CHID", f)
def decode_chunk_cstr(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
i0 = read_unpack("I", f)[0]
if i0 <= 0x100:
_ = read_fixed_string(f)
i1 = read_unpack("I", f)[0]
i2 = read_unpack("I", f)[0]
i3 = read_unpack("I", f)[0]
if i3 == 32:
read_check(f, 32)
i4 = read_unpack("I", f)[0]
if i4 == 0:
pass
# done reading
check_eof("CSTR", f)
def decode_chunk_jntv(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
v0 = read_vector4_float(f)
id_ = read_unpack("I", f)[0]
check_eof("JNTV", f)
def decode_chunk_mate(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
i0, = read_unpack("I", f)
for i in range(i0):
read_check(f, 384)
# TODO: parse datablock, segmented in 384 byte chunks
check_eof("MATE", f)
def decode_chunk_mat2(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
i0, = read_unpack("I", f)
datablock = decompress_blocks_stream(f)
# TODO: parse datablock, segmented in 384 byte chunks
check_eof("MAT2", f)
def decode_chunk_mat3(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
i0, = read_unpack("I", f)
for i in range(i0):
read_check(f, 384)
check_eof("MAT3", f)
def decode_chunk_mat4(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
unk1 = read_unpack("I", f)[0]
datablock = decompress_blocks_stream(f)
# TODO: parse datablock, segmented in 384 byte chunks
check_eof("MAT4", f)
def decode_chunk_mat5(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
unk1 = read_unpack("I", f)[0]
unk2 = read_unpack("I", f)[0]
datablock = decompress_blocks_stream(f)
# TODO: parse datablock, segmented in 384 byte chunks
check_eof("MAT5", f)
def decode_chunk_mat6(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
count = read_unpack("I", f)[0]
datablocks = []
for i in range(count):
segment_length = read_unpack("I", f)[0]
datablocks.append(decompress_blocks_stream(f))
for datablock in datablocks:
df = io.BytesIO(datablock)
magic_matm = read_unpack("I", df)[0]
matm_flags = read_unpack("I", df)[0]
matm_part_size = read_unpack("I", df)[0]
df.seek(0x28)
count_parameters = read_unpack("I", df)[0]
df.seek(4, io.SEEK_CUR)
count_tex = read_unpack("I", df)[0]
for i in range(count_parameters):
parameter = read_unpack("I", df)[0]
addr_tex = df.tell()
df.seek(matm_part_size)
magic_mate = read_unpack("I", df)[0]
mate_flags = read_unpack("I", df)[0]
mate_part_flags = read_unpack("I", df)[0]
offset_to_first_texture = read_unpack("I", df)[0]
start_addr = df.tell()
cur_mat_name = read_fixed_string(df)
df.seek(start_addr + offset_to_first_texture)
block_size = read_unpack("I", df)[0]
for i in range(count_tex):
tex_name = read_fixed_string(df)
df.seek(start_addr + block_size) # bug?
df.seek(addr_tex)
for i in range(count_tex):
uint0 = read_unpack("I", df)[0]
texture_type = read_unpack("I", df)[0]
Xwrap = read_unpack("I", df)[0]
uint3 = read_unpack("I", df)[0]
uint4 = read_unpack("I", df)[0]
uint5 = read_unpack("I", df)[0]
uint6 = read_unpack("I", df)[0]
check_eof("MAT6", f)
def decode_chunk_plug(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
for i in range(16):
count = read_unpack("I", f)[0]
if count != 0:
i0 = read_unpack("I", f)[0]
read_check(f, i0)
_ = read_unpack("I", f)[0]
check_eof("PLUG", f)
def decode_chunk_plg2(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
arr = list(read_unpack("16I", f))
for x in arr:
if x != 0:
i0, _ = read_unpack("2I", f)
read_check(f, i0)
check_eof("PLG2", f)
def decode_chunk_plu2(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
for i in range(16):
count = read_unpack("I", f)[0]
if count != 0:
_ = read_unpack("I", f)[0]
_ = decompress_blocks_stream(f)
_ = read_unpack("I", f)[0]
check_eof("PLU2", f)
def decode_chunk_plu3(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
arr = list(read_unpack("16I", f))
for x in arr:
if x != 0:
_ = read_unpack("2I", f)
_ = decompress_blocks_stream(f)
check_eof("PLU3", f)
def decode_chunk_bone(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
_ = read_fixed_string(f)
int0, = read_unpack("I", f)
for i in range(int0):
_ = read_fixed_string(f)
for i in range(68):
_ = read_fixed_string(f)
_ = read_fixed_string(f)
_ = read_fixed_string(f)
check_eof("BONE", f)
def decode_chunk_bon2(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
_ = read_fixed_string(f)
int0, = read_unpack("I", f)
_ = decompress_blocks_stream(f) # int0 << 16 >> 10 chunks
_ = decompress_blocks_stream(f) # 4352
_ = decompress_blocks_stream(f) # 4352; seeked past/skipped
_ = decompress_blocks_stream(f) # 4352; seeked past/skipped
check_eof("BON2", f)
def decode_chunk_bon3(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
int0 = read_unpack("I", f)[0]
mesh_name = read_fixed_string(f)
int1 = read_unpack("I", f)[0]
datablocks = []
for i in range(3):
datablocks.append(decompress_blocks_stream(f))
with io.BytesIO(datablocks[0]) as df:
while df.tell() < len(datablocks[0]):
mesh_name = read_fixed_string(df)
with io.BytesIO(datablocks[1]) as df:
while df.tell() < len(datablocks[1]):
mesh_name = read_fixed_string(df)
# This is seeked past/skipped in CNode::Load2
# with io.BytesIO(datablocks[2]) as df:
# while df.tell() < len(datablocks[2]):
# offset_mat = read_matrix4_float(df)
check_eof("BON3", f)
def decode_chunk_atre(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
int0 = read_unpack("I", f)[0]
int1 = read_unpack("I", f)[0]
int2 = read_unpack("I", f)[0]
# 76, 32, and 2 byte chunks in datablocks respectively
datablocks = []
for i in range(3):
datablocks.append(decompress_blocks_stream(f))
check_eof("ATRE", f)
def decode_chunk_texf(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
name1 = read_fixed_string(f, str_len=36)
name2 = read_fixed_string(f, str_len=32)
check_eof("TEXF", f)
def decode_chunk_texi(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
name = read_fixed_string(f, str_len=32)
int0 = read_unpack("I", f)[0]
return # FIXME itp
read_itp(f)
check_eof("TEXI", f)
def decode_chunk_tex2(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
int0 = read_unpack("I", f)[0]
name = read_fixed_string(f)
return # FIXME itp
read_itp(f)
check_eof("TEX2", f)
def decode_chunk_glow(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
i0 = read_unpack("I", f)[0]
check_eof("GLOW", f)
def decode_chunk_glky(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
i0 = read_unpack("I", f)[0]
for i in range(i0):
read_check(f, 12)
check_eof("GLKY", f)
def decode_chunk_glk2(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
i0 = read_unpack("I", f)[0]
_ = decompress_blocks_stream(f) # ?
_ = decompress_blocks_stream(f) # ?
_ = decompress_blocks_stream(f) # ?
check_eof("GLK2", f)
def decode_chunk_glk3(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
i0 = read_unpack("I", f)[0]
_ = decompress_blocks_stream(f) # 16 bytes segmented
_ = decompress_blocks_stream(f) # 2 bytes segmented
_ = decompress_blocks_stream(f) # 2 bytes segmented
_ = decompress_blocks_stream(f) # 1 byte segmented
check_eof("GLK3", f)
def decode_chunk_vpac(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
i0, _ = read_unpack("HH", f)
for i in range(i0):
cnt0 = read_unpack("I", f)[0]
read_check(f, 112)
read_check(f, 112 * cnt0)
cnt1 = read_unpack("I", f)[0]
_ = read_unpack("I", f)[0]
read_check(f, 4 * cnt1)
check_eof("VPAC", f)
def decode_chunk_vpa2(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
i0, _ = read_unpack("HH", f)
for i in range(i0):
cnt0 = read_unpack("I", f)[0]
read_check(f, 112)
if cnt0 != 0:
_ = decompress_blocks_stream(f) # 112 bytes segmented
cnt1 = read_unpack("I", f)[0]
_ = read_unpack("I", f)[0]
if cnt1 != 0:
_ = decompress_blocks_stream(f) # 4 bytes segmented
check_eof("VPA2", f)
def decode_chunk_vpa4(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
i0, i1 = read_unpack("II", f)
total_size_1 = 0
for i in range(i0):
total_size_1 += 40 * read_unpack("I", f)[0]
read_check(f, 112)
sz1 = i0 * 120
chunk_count1 = (total_size_1 // 0x40000) + (1 if (total_size_1 % 0x40000) != 0 else 0)
for i in range(chunk_count1):
_ = decompress_blocks_stream(f) # 120 bytes segmented
# while True:
# partbuf1 = decompress_blocks_stream(f) # 120 bytes segmented
# if len(partbuf1) != 0x40000:
# break
i2, = read_unpack("I", f)
total_size_2 = 0
for i in range(i0):
total_size_2 += 2 * read_unpack("I", f)[0]
sz2 = i0 * 16
chunk_count2 = (total_size_2 // 0x40000) + (1 if (total_size_2 % 0x40000) != 0 else 0)
for i in range(chunk_count2):
_ = decompress_blocks_stream(f) # 16 bytes segmented
# while True:
# partbuf2 = decompress_blocks_stream(f) # 120 bytes segmented
# if len(partbuf2) != 0x40000:
# break
check_eof("VPA4", f)
def decode_chunk_vpa5(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
i0, i1 = read_unpack("II", f)
total_size_1 = 0
for i in range(i0):
total_size_1 += 40 * read_unpack("I", f)[0]
read_check(f, 112)
sz1 = i0 * 120
chunk_count1 = (total_size_1 // 0x40000) + (1 if (total_size_1 % 0x40000) != 0 else 0)
for i in range(chunk_count1):
_ = decompress_blocks_stream(f) # 120 bytes segmented
# while True:
# partbuf1 = decompress_blocks_stream(f) # 120 bytes segmented
# if len(partbuf1) != 0x40000:
# break
i2, = read_unpack("I", f)
total_size_2 = 0
for i in range(i0):
total_size_2 += 2 * read_unpack("I", f)[0]
sz2 = i0 * 16
chunk_count2 = (total_size_2 // 0x40000) + (1 if (total_size_2 % 0x40000) != 0 else 0)
for i in range(chunk_count2):
_ = decompress_blocks_stream(f) # 16 bytes segmented
# while True:
# partbuf2 = decompress_blocks_stream(f) # 120 bytes segmented
# if len(partbuf2) != 0x40000:
# break
check_eof("VPA5", f)
def decode_chunk_vpa6(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
i0, i1 = read_unpack("II", f)
total_size_1 = 0
for i in range(i0):
total_size_1 += 40 * read_unpack("I", f)[0]
read_check(f, 112)
sz1 = i0 * 120
chunk_count1 = (total_size_1 // 0x40000) + (1 if (total_size_1 % 0x40000) != 0 else 0)
for i in range(chunk_count1):
_ = decompress_blocks_stream(f) # 120 bytes segmented
# while True:
# partbuf1 = decompress_blocks_stream(f) # 120 bytes segmented
# if len(partbuf1) != 0x40000:
# break
i2, = read_unpack("I", f)
total_size_2 = 0
for i in range(i0):
total_size_2 += 2 * read_unpack("I", f)[0]
sz2 = i0 * 16
chunk_count2 = (total_size_2 // 0x40000) + (1 if (total_size_2 % 0x40000) != 0 else 0)
for i in range(chunk_count2):
_ = decompress_blocks_stream(f) # 16 bytes segmented
# while True:
# partbuf2 = decompress_blocks_stream(f) # 120 bytes segmented
# if len(partbuf2) != 0x40000:
# break
check_eof("VPA6", f)
def decode_chunk_vpa7(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
i0, i1 = read_unpack("II", f)
total_size_1 = 0
for i in range(i0):
total_size_1 += 40 * read_unpack("I", f)[0]
read_check(f, 112)
sz1 = i0 * 120
chunk_count1 = (total_size_1 // 0x40000) + (1 if (total_size_1 % 0x40000) != 0 else 0)
for i in range(chunk_count1):
_ = decompress_blocks_stream(f) # 120 bytes segmented
# while True:
# partbuf1 = decompress_blocks_stream(f) # 120 bytes segmented
# if len(partbuf1) != 0x40000:
# break
i2, = read_unpack("I", f)
total_size_2 = 0
for i in range(i0):
total_size_2 += 2 * read_unpack("I", f)[0]
sz2 = i0 * 16
chunk_count2 = (total_size_2 // 0x40000) + (1 if (total_size_2 % 0x40000) != 0 else 0)
for i in range(chunk_count2):
_ = decompress_blocks_stream(f) # 16 bytes segmented
# while True:
# partbuf2 = decompress_blocks_stream(f) # 120 bytes segmented
# if len(partbuf2) != 0x40000:
# break
check_eof("VPA7", f)
def decode_chunk_vpa8(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
i0, _, sz1, sz2 = read_unpack("HHII", f)
info_1 = decompress_blocks_stream(f) # 116 bytes segmented
total_size_1 = 0
for i in range(i0):
info_segment = info_1[i * 116:(i + 1) * 116]
total_size_1 += 40 * int.from_bytes(info_segment[28:32], byteorder="little")
chunk_count1 = (sz1 // 0x40000) + (1 if (sz1 % 0x40000) != 0 else 0)
for i in range(chunk_count1):
_ = decompress_blocks_stream(f) # 120 bytes segmented
# while True:
# partbuf1 = decompress_blocks_stream(f) # 120 bytes segmented
# if len(partbuf1) != 0x40000:
# break
info_2 = decompress_blocks_stream(f) # 12 bytes segmented
total_size_2 = 0
for i in range(i0):
info_segment = info_2[i * 12:(i + 1) * 12]
total_size_2 += 2 * int.from_bytes(info_segment[4:8], byteorder="little")
chunk_count2 = (sz2 // 0x40000) + (1 if (sz2 % 0x40000) != 0 else 0)
for i in range(chunk_count2):
_ = decompress_blocks_stream(f) # 16 bytes segmented
# while True:
# partbuf2 = decompress_blocks_stream(f) # 16 bytes segmented
# if len(partbuf2) != 0x40000:
# break
check_eof("VPA8", f)
def decode_chunk_vpa9(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
cnt1 = read_unpack("I", f)[0]
for i in range(cnt1):
sz1 = read_unpack("I", f)[0]
chunk_count1 = (sz1 // 0x40000) + (1 if (sz1 % 0x40000) != 0 else 0)
for ii in range(chunk_count1):
_ = decompress_blocks_stream(f)
sz2 = read_unpack("I", f)[0]
chunk_count2 = (sz2 // 0x40000) + (1 if (sz2 % 0x40000) != 0 else 0)
for ii in range(chunk_count2):
_ = decompress_blocks_stream(f)
check_eof("VPA9", f)
def decode_chunk_vpXX(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
count = read_unpack("I", f)[0]
vertices_datablocks = []
for i in range(count):
sz = read_unpack("I", f)[0]
vertices_datablocks.append(decompress_blocks_stream(f))
indexes_datablocks = []
for i in range(count):
sz = read_unpack("I", f)[0]
indexes_datablocks.append(decompress_blocks_stream(f))
for i in range(count):
dfv = io.BytesIO(vertices_datablocks[i])
FourCC = read_unpack("I", dfv)[0] # == 0x43415056
if FourCC == 0x43415056: # VPAC
version = read_unpack("I", dfv)[0]
v0 = read_vector4_float(dfv)
v1 = read_vector4_float(dfv)
v2 = read_vector4_float(dfv)
uint0_bytes_array = memoryview(dfv.read(0x4D * 4))
uint0_array = uint0_bytes_array.cast("I")
for ii in range(uint0_array[0]):
position = read_vector4_float(dfv)
_ = read_vector4_float(dfv)
_ = read_vector4_float(dfv)
_ = read_vector4_float(dfv)
_ = read_vector2_float(dfv)
_ = read_unpack("I", dfv)[0]
_ = read_unpack("I", dfv)[0]
uv = read_vector2_float(dfv)
uv2 = read_vector2_float(dfv)
uv3 = read_vector4_float(dfv)
_ = read_vector4_float(dfv)
_ = read_vector4_float(dfv)
array_weights = read_unpack("8B", dfv)
bone_index_weights = read_unpack("8B", dfv)
if chunk_id == b"VPAX":
indeces = memoryview(indexes_datablocks[i]).cast("H")
elif chunk_id == b"VP11":
indeces = memoryview(indexes_datablocks[i]).cast("I")
check_eof("VPAX", f)
def decode_chunk_mani(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
i0 = read_unpack("I", f)[0]
for i in range(i0):
_ = read_fixed_string(f)
check_eof("MANI", f)
def decode_chunk_man2(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
i0 = read_unpack("I", f)[0]
_ = decompress_blocks_stream(f)
check_eof("MAN2", f)
def decode_chunk_kant(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
cnt1 = read_unpack("I", f)[0]
for i in range(cnt1):
_ = read_unpack("I", f)
_ = read_unpack("IIII", f)
_ = read_unpack("B", f)
cnt2 = read_unpack("I", f)[0]
for i in range(cnt2):
_ = read_unpack("I", f)
_ = read_unpack("IIII", f)
_ = read_unpack("B", f)
cnt3 = read_unpack("I", f)[0]
for i in range(cnt3):
_ = read_unpack("I", f)
_ = read_unpack("IIII", f)
_ = read_unpack("B", f)
cnt4 = read_unpack("I", f)[0]
for i in range(cnt4):
_ = read_unpack("I", f)
_ = read_unpack("IIII", f)
_ = read_unpack("B", f)
cnt5 = read_unpack("I", f)[0]
for i in range(cnt5):
_ = read_unpack("I", f)
_ = read_unpack("IIII", f)
_ = read_unpack("B", f)
cnt6 = read_unpack("I", f)[0]
for i in range(cnt6):
_ = read_unpack("I", f)
_ = read_unpack("IIII", f)
_ = read_unpack("B", f)
cnt7 = read_unpack("I", f)[0]
for i in range(cnt7):
_ = read_unpack("I", f)
_ = read_unpack("IIII", f)
_ = read_unpack("B", f)
cnt8 = read_unpack("I", f)[0]
for i in range(cnt8):
_ = read_unpack("I", f)
_ = read_unpack("IIII", f)
_ = read_unpack("B", f)
cnt9 = read_unpack("I", f)[0]
for i in range(cnt9):
_ = read_unpack("I", f)
_ = read_unpack("IIII", f)
_ = read_unpack("B", f)
_ = read_unpack("I", f)[0]
check_eof("KANT", f)
def decode_chunk_knt2(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
cnt1 = read_unpack("I", f)[0]
cnt2 = read_unpack("I", f)[0]
cnt3 = read_unpack("I", f)[0]
cnt4 = read_unpack("I", f)[0]
cnt5 = read_unpack("I", f)[0]
cnt6 = read_unpack("I", f)[0]
cnt7 = read_unpack("I", f)[0]
cnt8 = read_unpack("I", f)[0]
cnt9 = read_unpack("I", f)[0]
cnt10 = read_unpack("I", f)[0]
for i in range(cnt1):
_ = read_unpack("I", f)
_ = read_unpack("IIII", f)
_ = read_unpack("B", f)
for i in range(cnt2):
_ = read_unpack("I", f)
_ = read_unpack("IIII", f)
_ = read_unpack("B", f)
for i in range(cnt3):
_ = read_unpack("I", f)
_ = read_unpack("IIII", f)
_ = read_unpack("B", f)
for i in range(cnt4):
_ = read_unpack("I", f)
_ = read_unpack("IIII", f)
_ = read_unpack("B", f)
for i in range(cnt5):
_ = read_unpack("I", f)
_ = read_unpack("IIII", f)
_ = read_unpack("B", f)
for i in range(cnt6):
_ = read_unpack("I", f)
_ = read_unpack("IIII", f)
_ = read_unpack("B", f)
for i in range(cnt7):
_ = read_unpack("I", f)
_ = read_unpack("IIII", f)
_ = read_unpack("B", f)
for i in range(cnt8):
_ = read_unpack("I", f)
_ = read_unpack("IIII", f)
_ = read_unpack("B", f)
for i in range(cnt9):
_ = read_unpack("I", f)
_ = read_unpack("IIII", f)
_ = read_unpack("B", f)
for i in range(cnt10):
_ = read_unpack("I", f)
_ = read_unpack("IIII", f)
_ = read_unpack("B", f)
check_eof("KNT2", f)
def decode_chunk_knt3(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
i0 = read_unpack("I", f)[0]
i1 = read_unpack("I", f)[0]
i2 = read_unpack("I", f)[0]
i3 = read_unpack("I", f)[0]
i4 = read_unpack("I", f)[0]
i5 = read_unpack("I", f)[0]
i6 = read_unpack("I", f)[0]
i7 = read_unpack("I", f)[0]
i8 = read_unpack("I", f)[0]
i9 = read_unpack("I", f)[0]
if i0 > 0:
for i in range(i0):
_ = read_unpack("IIII", f)
_ = read_unpack("HH", f)
_ = read_unpack("B", f)
if i1 > 0:
for i in range(i1):
_ = read_unpack("IIII", f)
_ = read_unpack("HH", f)
_ = read_unpack("B", f)
if i2 > 0:
for i in range(i2):
_ = read_unpack("IIII", f)
_ = read_unpack("HH", f)
_ = read_unpack("B", f)
if i3 > 0:
for i in range(i3):
_ = read_unpack("IIII", f)
_ = read_unpack("HH", f)
_ = read_unpack("B", f)
if i4 > 0:
for i in range(i4):
_ = read_unpack("IIII", f)
_ = read_unpack("HH", f)
_ = read_unpack("B", f)
check_eof("KNT3", f)
def decode_chunk_knt4(data_bytes, chunk_id):
return # Never used in CNode::Load2
f = io.BytesIO(data_bytes)
i0 = read_unpack("I", f)[0]
i1 = read_unpack("I", f)[0]
i2 = read_unpack("I", f)[0]
i3 = read_unpack("I", f)[0]
i4 = read_unpack("I", f)[0]
i5 = read_unpack("I", f)[0]
i6 = read_unpack("I", f)[0]
i7 = read_unpack("I", f)[0]
i8 = read_unpack("I", f)[0]
i9 = read_unpack("I", f)[0]
if i0 > 0:
for i in range(i0):
_ = read_unpack("IIII", f)
_ = read_unpack("HH", f)
_ = read_unpack("B", f)
if i1 > 0:
for i in range(i1):
_ = read_unpack("IIII", f)
_ = read_unpack("HH", f)
_ = read_unpack("B", f)
if i2 > 0:
for i in range(i2):
_ = read_unpack("IIII", f)
_ = read_unpack("HH", f)
_ = read_unpack("B", f)
if i3 > 0:
for i in range(i3):
_ = read_unpack("IIII", f)
_ = read_unpack("HH", f)
_ = read_unpack("B", f)
if i4 > 0:
for i in range(i4):
_ = read_unpack("IIII", f)
_ = read_unpack("HH", f)
_ = read_unpack("B", f)
if i5 > 0:
for i in range(i5):
_ = read_unpack("IIII", f)
_ = read_unpack("HH", f)
_ = read_unpack("B", f)
check_eof("KNT4", f)
def decode_chunk_kani(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
cnt1 = read_unpack("I", f)[0]
for i in range(cnt1):
_ = read_unpack("I", f)
_ = read_unpack("IIII", f)
_ = read_unpack("B", f)
cnt2 = read_unpack("I", f)[0]
for i in range(cnt2):
_ = read_unpack("I", f)
_ = read_unpack("IIII", f)
_ = read_unpack("B", f)
cnt3 = read_unpack("I", f)[0]
for i in range(cnt3):
_ = read_unpack("I", f)
_ = read_unpack("IIII", f)
_ = read_unpack("B", f)
check_eof("KANI", f)
def decode_chunk_kan2(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
cnt1 = read_unpack("I", f)[0]
for i in range(cnt1):
_ = read_unpack("I", f)
_ = read_unpack("IIII", f)
_ = read_unpack("B", f)
cnt2 = read_unpack("I", f)[0]
for i in range(cnt2):
_ = read_unpack("I", f)
_ = read_unpack("IIII", f)
_ = read_unpack("B", f)
cnt3 = read_unpack("I", f)[0]
for i in range(cnt3):
_ = read_unpack("I", f)
_ = read_unpack("IIII", f)
_ = read_unpack("B", f)
_ = read_unpack("II", f)
cnt4 = read_unpack("I", f)[0]
for i in range(cnt4):
_ = read_unpack("I", f)
_ = read_unpack("IIII", f)
_ = read_unpack("B", f)
check_eof("KAN2", f)
def decode_chunk_kan3(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
cnt1 = read_unpack("I", f)[0]
for i in range(cnt1):
_ = read_unpack("I", f)
_ = read_unpack("IIII", f)
_ = read_unpack("B", f)
cnt2 = read_unpack("I", f)[0]
for i in range(cnt2):
_ = read_unpack("I", f)
_ = read_unpack("IIII", f)
_ = read_unpack("B", f)
cnt3 = read_unpack("I", f)[0]
for i in range(cnt3):
_ = read_unpack("I", f)
_ = read_unpack("IIII", f)
_ = read_unpack("B", f)
_ = read_unpack("II", f)
cnt4 = read_unpack("I", f)[0]
for i in range(cnt4):
_ = read_unpack("I", f)
_ = read_unpack("IIII", f)
_ = read_unpack("B", f)
_ = read_unpack("II", f)
cnt5 = read_unpack("I", f)[0]
for i in range(cnt5):
_ = read_unpack("I", f)
_ = read_unpack("IIII", f)
_ = read_unpack("B", f)
check_eof("KAN3", f)
def decode_chunk_kan4(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
for i in range(3):
cnt = read_unpack("I", f)[0]
for ii in range(cnt):
_ = read_unpack("I", f)
_ = read_unpack("IIII", f)
_ = read_unpack("B", f)
_ = read_unpack("II", f)
cnt2 = read_unpack("I", f)[0]
for i in range(cnt2):
_ = read_unpack("I", f)
_ = read_unpack("IIII", f)
_ = read_unpack("B", f)
_ = read_unpack("II", f)
cnt3 = read_unpack("I", f)[0]
for i in range(cnt3):
_ = read_unpack("I", f)
_ = read_unpack("IIII", f)
_ = read_unpack("B", f)
_ = read_unpack("I", f)
check_eof("KAN4", f)
def decode_chunk_kan5(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
i0 = read_unpack("I", f)[0]
i1 = read_unpack("I", f)[0]
i2 = read_unpack("I", f)[0]
i3 = read_unpack("I", f)[0]
i4 = read_unpack("I", f)[0]
i5 = read_unpack("I", f)[0]
i6 = read_unpack("I", f)[0]
i7 = read_unpack("I", f)[0]
i8 = read_unpack("I", f)[0]
i9 = read_unpack("I", f)[0]
if i0 > 0:
for i in range(i0):
_ = read_unpack("HH", f)
_ = read_unpack("IIII", f)
_ = read_unpack("B", f)
if i1 > 0:
for i in range(i1):
_ = read_unpack("HH", f)
_ = read_unpack("IIII", f)
_ = read_unpack("B", f)
if i2 > 0:
for i in range(i2):
_ = read_unpack("HH", f)
_ = read_unpack("IIII", f)
_ = read_unpack("B", f)
if i3 > 0:
for i in range(i3):
_ = read_unpack("HH", f)
_ = read_unpack("IIII", f)
_ = read_unpack("B", f)
if i4 > 0:
for i in range(i4):
_ = read_unpack("HH", f)
_ = read_unpack("IIII", f)
_ = read_unpack("B", f)
check_eof("KAN5", f)
def decode_chunk_kan6(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
i0 = read_unpack("I", f)[0]
i1 = read_unpack("I", f)[0]
i2 = read_unpack("I", f)[0]
i3 = read_unpack("I", f)[0]
i4 = read_unpack("I", f)[0]
i5 = read_unpack("I", f)[0]
i6 = read_unpack("I", f)[0]
i7 = read_unpack("I", f)[0]
i8 = read_unpack("I", f)[0]
i9 = read_unpack("I", f)[0]
if i0 > 0:
for i in range(i0):
_ = read_unpack("IIII", f)
_ = read_unpack("HH", f)
_ = read_unpack("B", f)
if i1 > 0:
for i in range(i1):
_ = read_unpack("IIII", f)
_ = read_unpack("HH", f)
_ = read_unpack("B", f)
if i2 > 0:
for i in range(i2):
_ = read_unpack("IIII", f)
_ = read_unpack("HH", f)
_ = read_unpack("B", f)
if i3 > 0:
for i in range(i3):
_ = read_unpack("IIII", f)
_ = read_unpack("HH", f)
_ = read_unpack("B", f)
if i4 > 0:
for i in range(i4):
_ = read_unpack("IIII", f)
_ = read_unpack("HH", f)
_ = read_unpack("B", f)
check_eof("KAN6", f)
def decode_chunk_kan7(data_bytes, chunk_id):
f = io.BytesIO(data_bytes)
i0 = read_unpack("I", f)[0]
i1 = read_unpack("I", f)[0]
i2 = read_unpack("I", f)[0]
i3 = read_unpack("I", f)[0]
i4 = read_unpack("I", f)[0]
i5 = read_unpack("I", f)[0]
i6 = read_unpack("I", f)[0]
i7 = read_unpack("I", f)[0]
i8 = read_unpack("I", f)[0]
i9 = read_unpack("I", f)[0]
datablocks = []
if i0 > 0:
_ = read_unpack("I", f)[0]
datablocks.append(decompress_blocks_stream(f))
if i1 > 0:
_ = read_unpack("I", f)[0]
datablocks.append(decompress_blocks_stream(f))
if i2 > 0:
_ = read_unpack("I", f)[0]
datablocks.append(decompress_blocks_stream(f))
if i3 > 0:
_ = read_unpack("I", f)[0]
datablocks.append(decompress_blocks_stream(f))
if i4 > 0:
_ = read_unpack("I", f)[0]
datablocks.append(decompress_blocks_stream(f))
for datablock in datablocks:
df = io.BytesIO(datablock)
magic = read_unpack("I", df)[0]
check = read_unpack("I", df)[0]
df.seek(4, io.SEEK_CUR)
nb_structs = read_unpack("I", df)[0]
df.seek(4, io.SEEK_CUR)
unit = read_unpack("I", df)[0]
df.seek(0x28, io.SEEK_CUR)
for i in range(nb_structs):
data = read_vector4_float(df)
_ = read_vector4_float(df)
_ = read_vector4_float(df)
_ = read_vector4_float(df)
tick = read_unpack("I", df)[0]
_ = read_unpack("I", df)[0]
check_eof("KAN7", f)
chunks = []
read_chunks(df, chunks)
# All known chunk names from CNode::Load2
# @LTT cmp only
# ALTT (Ys8) (?)
# AON3 cmpe, actually BON3
# ATRE (Ys8) (?)
# BAME cmpe, actually CAME
# BBOX (Ys8)
# BON2 (?)
# BON3 (Ys8)
# BONE (?)
# BSTR cmp only
# CAME (Ys8) (?)
# CDLO (?)
# CHID (Ys8)
# CSTR (?)
# HNFO cmp only
# IKNG (Ys8) (?)
# IKPW (Ys8) (?)
# INFO (Ys8)
# INFS (?)
# INFZ (Ys8)
# JNTV (Ys8)
# KAN4 (Ys8) (?)
# KAN5 (?)
# KAN6 (Ys8) (?)
# KAN7 (Ys8)
# LAN2 cmpe, actually KAN2
# LANI cmpe, actually KANI
# LATE cmp only
# LIG2 (?)
# LIG3 (Ys8)
# LIGT (?)
# MAN2 (?)
# MANI (?)
# MAT2 (?)
# MAT3 (?)
# MAT4 (Ys8) (?)
# MAT5 (Ys8) (?)
# MAT6 (Ys8)
# MATE (?)
# OLU2 cmpe, actually PLU2
# PLG2 (?)
# PLU2 (Ys8) (?)
# PLU3 (Ys8) (?)
# PLUG (?)
# RTY2 (Ys8)
# RTYP (?)
# TEXI (Ys8)
# TEX2 (Ys9)
# UPA4 cmpe, acutally VPA4
# UPA5 cmp only
# UPA6 cmp only
# UPA7 cmpe, acutally VPA7
# UPA9 cmpe, acutally VPA7
# UPAX cmpe, acutally VPAX
# VPA2 (?)
# VPA4 (?)
# VPA5 (?)
# VPA6 (?)
# VPA7 (Ys8) (?)
# VPA8 (Ys8) (?)
# VPA9 (Ys8) (?)
# VPAC (?)
# VPAX (Ys8)
# VP11 (Ys9)
# ZFNK (?)
chunk_dispatch = {
b"INFO" : decode_chunk_info,
b"RTYP" : decode_chunk_rtyp,
b"RTY2" : decode_chunk_rty2,
b"LIGT" : decode_chunk_ligt,
b"LIG2" : decode_chunk_lig2,
b"LIG3" : decode_chunk_lig3,
b"INFS" : decode_chunk_infs,
b"INFZ" : decode_chunk_infz,
b"IKNG" : decode_chunk_ikng,
b"IKPW" : decode_chunk_ikpw,
b"ALTT" : decode_chunk_altt,
b"ZFNK" : decode_chunk_zfnk,
b"BBOX" : decode_chunk_bbox,
b"CHID" : decode_chunk_chid,
b"CAME" : decode_chunk_came,
b"CDLO" : decode_chunk_cdlo,
b"CSTR" : decode_chunk_cstr,
b"JNTV" : decode_chunk_jntv,
b"MATE" : decode_chunk_mate,
b"MAT2" : decode_chunk_mat2,
b"MAT3" : decode_chunk_mat3,
b"MAT4" : decode_chunk_mat4,
b"MAT5" : decode_chunk_mat5,
b"MAT6" : decode_chunk_mat6,
b"PLUG" : decode_chunk_plug,
b"PLG2" : decode_chunk_plg2,
b"PLU2" : decode_chunk_plu2,
b"PLU3" : decode_chunk_plu3,
b"BONE" : decode_chunk_bone,
b"BON2" : decode_chunk_bon2,
b"BON3" : decode_chunk_bon3,
b"ATRE" : decode_chunk_atre,
b"TEXF" : decode_chunk_texf,
b"TEXI" : decode_chunk_texi,
b"TEX2" : decode_chunk_tex2,
b"GLOW" : decode_chunk_glow,
b"GLKY" : decode_chunk_glky,
b"GLK2" : decode_chunk_glk2,
b"GLK3" : decode_chunk_glk3,
b"VPAC" : decode_chunk_vpac,
b"VPA2" : decode_chunk_vpa2,
b"VPA4" : decode_chunk_vpa4,
b"VPA5" : decode_chunk_vpa5,
b"VPA6" : decode_chunk_vpa6,
b"VPA7" : decode_chunk_vpa7,
b"VPA8" : decode_chunk_vpa8,
b"VPA9" : decode_chunk_vpa9,
b"VPAX" : decode_chunk_vpXX,
b"VP11" : decode_chunk_vpXX,
b"MANI" : decode_chunk_mani,
b"MAN2" : decode_chunk_man2,
b"KANT" : decode_chunk_kant,
b"KNT2" : decode_chunk_knt2,
b"KNT3" : decode_chunk_knt3,
b"KNT4" : decode_chunk_knt4,
b"KANI" : decode_chunk_kani,
b"KAN2" : decode_chunk_kan2,
b"KAN3" : decode_chunk_kan3,
b"KAN4" : decode_chunk_kan4,
b"KAN5" : decode_chunk_kan5,
b"KAN6" : decode_chunk_kan6,
b"KAN7" : decode_chunk_kan7,
}
dispatch_chunks(chunks, chunk_dispatch)
if __name__ == "__main__":
with open(sys.argv[1], "rb") as f:
read_it3(f)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment