Skip to content

Instantly share code, notes, and snippets.

@Podshot
Last active September 20, 2023 21:04
Show Gist options
  • Save Podshot/537e5e8f12fd580bdf1f705eb2b19119 to your computer and use it in GitHub Desktop.
Save Podshot/537e5e8f12fd580bdf1f705eb2b19119 to your computer and use it in GitHub Desktop.
Proof of Concept for converting BlockState Long IDs to Palette indices. Critical for loading 1.13 Minecraft Java Edition worlds. Run with the command: "python proof_of_concept.py" to use. If you use any snippet of code from this, please give appropriate credit to the respective authors.
"""
Standalone chunk loader/reader from pymclevel with all the fancy repair/OOP stuff removed
"""
from __future__ import unicode_literals, print_function
import os
try:
from pymclevel import nbt
except ImportError:
import nbt
import struct
import zlib
import numpy as np
SECTOR_BYTES = 4096
SECTOR_INTS = SECTOR_BYTES / 4
CHUNK_HEADER_SIZE = 5
VERSION_GZIP = 1
VERSION_DEFLATE = 2
def read_region_for_chunk_file(region, cx, cz):
fp = open(region, 'rb+')
file_size = os.path.getsize(region)
if file_size & 0xfff:
file_size = (file_size | 0xfff) + 1
fp.truncate(file_size)
if file_size == 0:
file_size = SECTOR_BYTES * 2
fp.truncate(file_size)
fp.seek(0)
_offsets = fp.read(SECTOR_BYTES)
_modification_times = fp.read(SECTOR_BYTES)
free_sectors = [True] * (file_size / SECTOR_BYTES)
free_sectors[0:2] = False, False
offsets = np.fromstring(_offsets, dtype='>u4')
modification_times = np.fromstring(_modification_times, dtype='>u4')
for offset in offsets:
sector = offset >> 8
count = offset & 0xff
for i in xrange(sector, sector + count):
if i >= len(free_sectors):
print('Offset table went past EOF')
break
free_sectors[i] = False
cx &= 0x1f
cz &= 0x1f
chunk_offset = offsets[(cx & 0x1f) + (cz & 0x1f) * 32]
if chunk_offset == 0: print('Chunk does not exist')
sector_start = chunk_offset >> 8
sector_nums = chunk_offset & 0xff
if sector_nums == 0: print('Chunk does not exist')
if sector_start + sector_nums > len(free_sectors): print('Chunk does not exist')
fp.seek(sector_start * SECTOR_BYTES)
data = fp.read(sector_nums * SECTOR_BYTES)
if len(data) < 5: print('Chunk/Sector is malformed')
length = struct.unpack_from('>I', data)[0]
_format = struct.unpack_from('B', data, 4)[0]
data = data[5:length + 5]
readable_data = None
if _format == VERSION_GZIP:
readable_data = nbt.gunzip(data)
#print 'Chunk is in GZIP format'
if _format == VERSION_DEFLATE:
#print 'Chunk is in DEFLATE format'
readable_data = zlib.decompress(data)
root = nbt.load(buf=readable_data)
return root
#print root_tag
if __name__ == '__main__':
region_file = raw_input('Enter region file path: ')
chunk_coord_x = int(raw_input('Enter chunk X coordinate: '))
chunk_coord_z = int(raw_input('Enter chunk Z coordinate: '))
root_tag = read_region_for_chunk_file(region_file, chunk_coord_x, chunk_coord_z)
print(root_tag)
# vim:set sw=2 sts=2 ts=2:
"""
Named Binary Tag library. Serializes and deserializes TAG_* objects
to and from binary data. Load a Minecraft level by calling nbt.load().
Create your own TAG_* objects and set their values.
Save a TAG_* object to a file or StringIO object.
Read the test functions at the end of the file to get started.
This library requires Numpy. Get it here:
http://new.scipy.org/download.html
Official NBT documentation is here:
http://www.minecraft.net/docs/NBT.txt
Copyright 2010 David Rio Vierra
"""
import collections
from contextlib import contextmanager
import gzip
import itertools
import logging
import struct
import zlib
from cStringIO import StringIO
import numpy
from numpy import array, zeros, fromstring
#-----------------------------------------------------------------------------
# TRACKING PE ERRORS
#
# DEBUG_PE and dump_fName are overridden by leveldbpocket module
import sys
DEBUG_PE = False
dump_fName = 'dump_pe.txt'
logging.basicConfig()
log = logging.getLogger(__name__)
class NBTFormatError(RuntimeError):
pass
TAG_BYTE = 1
TAG_SHORT = 2
TAG_INT = 3
TAG_LONG = 4
TAG_FLOAT = 5
TAG_DOUBLE = 6
TAG_BYTE_ARRAY = 7
TAG_STRING = 8
TAG_LIST = 9
TAG_COMPOUND = 10
TAG_INT_ARRAY = 11
TAG_LONG_ARRAY = 12
TAG_SHORT_ARRAY = -1
class TAG_Value(object):
"""Simple values. Subclasses override fmt to change the type and size.
Subclasses may set data_type instead of overriding setValue for automatic data type coercion"""
__slots__ = ('_name', '_value')
def __init__(self, value=0, name=""):
self.value = value
self.name = name
fmt = struct.Struct("b")
tagID = NotImplemented
data_type = NotImplemented
_name = None
_value = None
def __str__(self):
return nested_string(self)
@property
def value(self):
return self._value
@value.setter
def value(self, newVal):
"""Change the TAG's value. Data types are checked and coerced if needed."""
self._value = self.data_type(newVal)
@property
def name(self):
return self._name
@name.setter
def name(self, newVal):
"""Change the TAG's name. Coerced to a unicode."""
self._name = unicode(newVal)
@classmethod
def load_from(cls, ctx):
data = ctx.data[ctx.offset:]
# 'data' may be empty or not have the required length. Shall we bypass?
value = None
try:
(value,) = cls.fmt.unpack_from(data)
except Exception as e:
if DEBUG_PE:
fp = open(dump_fName)
n_lines = len(fp.readlines()) + 1
fp.close()
msg = ("*** NBT support could not load data\n"
"{e}\n"
"----------\nctx.data (length: {lcd}):\n{cd}\n"
"..........\ndata (length: {lrd}):\n{rd}\n"
"''''''''''\nctx.offset:\n{co}\n"
"^^^^^^^^^^\ncls.fmt.format: {cf}\n***\n".format(e=e, cd=repr(ctx.data), rd=repr(data), co=ctx.offset, cf=cls.fmt.format,
lcd=len(ctx.data), lrd=len(data)
)
)
open(dump_fName, 'a').write(msg)
added_n_lines = len(msg.splitlines())
log.warning("Could not unpack NBT data: information written in {fn}, from line {b} to line {e}".format(fn=dump_fName, b=n_lines, e=(n_lines + added_n_lines - 1)))
else:
raise e
if value == None:
self = cls()
self.name = 'Unknown'
else:
self = cls(value=value)
ctx.offset += self.fmt.size
return self
def __repr__(self):
return "<%s name=\"%s\" value=%r>" % (str(self.__class__.__name__), self.name, self.value)
def write_tag(self, buf):
buf.write(chr(self.tagID))
def write_name(self, buf):
if self.name is not None:
write_string(self.name, buf)
def write_value(self, buf):
buf.write(self.fmt.pack(self.value))
def isCompound(self):
return False
class TAG_Byte(TAG_Value):
__slots__ = ('_name', '_value')
tagID = TAG_BYTE
fmt = struct.Struct(">b")
data_type = int
class TAG_Short(TAG_Value):
__slots__ = ('_name', '_value')
tagID = TAG_SHORT
fmt = struct.Struct(">h")
data_type = int
class TAG_Int(TAG_Value):
__slots__ = ('_name', '_value')
tagID = TAG_INT
fmt = struct.Struct(">i")
data_type = int
class TAG_Long(TAG_Value):
__slots__ = ('_name', '_value')
tagID = TAG_LONG
fmt = struct.Struct(">q")
data_type = long
class TAG_Float(TAG_Value):
__slots__ = ('_name', '_value')
tagID = TAG_FLOAT
fmt = struct.Struct(">f")
data_type = float
class TAG_Double(TAG_Value):
__slots__ = ('_name', '_value')
tagID = TAG_DOUBLE
fmt = struct.Struct(">d")
data_type = float
class TAG_Byte_Array(TAG_Value):
"""Like a string, but for binary data. Four length bytes instead of
two. Value is a numpy array, and you can change its elements"""
tagID = TAG_BYTE_ARRAY
def __init__(self, value=None, name=""):
if value is None:
value = zeros(0, self.dtype)
self.name = name
self.value = value
def __repr__(self):
return "<%s name=%s length=%d>" % (self.__class__, self.name, len(self.value))
__slots__ = ('_name', '_value')
def data_type(self, value):
return array(value, self.dtype)
dtype = numpy.dtype('uint8')
@classmethod
def load_from(cls, ctx):
data = ctx.data[ctx.offset:]
(string_len,) = TAG_Int.fmt.unpack_from(data)
value = fromstring(data[4:string_len * cls.dtype.itemsize + 4], cls.dtype)
self = cls(value)
ctx.offset += string_len * cls.dtype.itemsize + 4
return self
def write_value(self, buf):
value_str = self.value.tostring()
buf.write(struct.pack(">I%ds" % (len(value_str),), self.value.size, value_str))
class TAG_Int_Array(TAG_Byte_Array):
"""An array of big-endian 32-bit integers"""
tagID = TAG_INT_ARRAY
__slots__ = ('_name', '_value')
dtype = numpy.dtype('>u4')
class TAG_Short_Array(TAG_Int_Array):
"""An array of big-endian 16-bit integers. Not official, but used by some mods."""
tagID = TAG_SHORT_ARRAY
__slots__ = ('_name', '_value')
dtype = numpy.dtype('>u2')
class TAG_Long_Array(TAG_Int_Array):
tagID = TAG_LONG_ARRAY
__slots__ = ('_name', '_value')
dtype = numpy.dtype('>q')
class TAG_String(TAG_Value):
"""String in UTF-8
The value parameter must be a 'unicode' or a UTF-8 encoded 'str'
"""
tagID = TAG_STRING
def __init__(self, value="", name=""):
if name:
self.name = name
self.value = value
_decodeCache = {}
__slots__ = ('_name', '_value')
def data_type(self, value):
if isinstance(value, unicode):
return value
else:
decoded = self._decodeCache.get(value)
if decoded is None:
decoded = value.decode('utf-8')
self._decodeCache[value] = decoded
return decoded
@classmethod
def load_from(cls, ctx):
value = load_string(ctx)
return cls(value)
def write_value(self, buf):
write_string(self._value, buf)
string_len_fmt = struct.Struct(">H")
def load_string(ctx):
data = ctx.data[ctx.offset:]
(string_len,) = string_len_fmt.unpack_from(data)
value = data[2:string_len + 2].tostring()
ctx.offset += string_len + 2
return value
def write_string(string, buf):
encoded = string.encode('utf-8')
buf.write(struct.pack(">h%ds" % (len(encoded),), len(encoded), encoded))
# noinspection PyMissingConstructor
class TAG_Compound(TAG_Value, collections.MutableMapping):
"""A heterogenous list of named tags. Names must be unique within
the TAG_Compound. Add tags to the compound using the subscript
operator []. This will automatically name the tags."""
tagID = TAG_COMPOUND
ALLOW_DUPLICATE_KEYS = False
__slots__ = ('_name', '_value')
def __init__(self, value=None, name=""):
self.value = value or []
self.name = name
def __repr__(self):
return "<%s name='%s' keys=%r>" % (str(self.__class__.__name__), self.name, self.keys())
def data_type(self, val):
for i in val:
self.check_value(i)
return list(val)
@staticmethod
def check_value(val):
if not isinstance(val, TAG_Value):
raise TypeError("Invalid type for TAG_Compound element: %s" % val.__class__.__name__)
if not val.name:
raise ValueError("Tag needs a name to be inserted into TAG_Compound: %s" % val)
@classmethod
def load_from(cls, ctx):
self = cls()
while ctx.offset < len(ctx.data):
tag_type = ctx.data[ctx.offset]
ctx.offset += 1
if tag_type == 0:
break
tag_name = load_string(ctx)
tag = tag_classes[tag_type].load_from(ctx)
tag.name = tag_name
self._value.append(tag)
return self
def save(self, filename_or_buf=None, compressed=True):
"""
Save the TAG_Compound element to a file. Since this element is the root tag, it can be named.
Pass a filename to save the data to a file. Pass a file-like object (with a read() method)
to write the data to that object. Pass nothing to return the data as a string.
"""
if self.name is None:
self.name = ""
buf = StringIO()
self.write_tag(buf)
self.write_name(buf)
self.write_value(buf)
data = buf.getvalue()
if compressed:
gzio = StringIO()
gz = gzip.GzipFile(fileobj=gzio, mode='wb')
gz.write(data)
gz.close()
data = gzio.getvalue()
if filename_or_buf is None:
return data
if isinstance(filename_or_buf, basestring):
f = file(filename_or_buf, "wb")
f.write(data)
else:
filename_or_buf.write(data)
def write_value(self, buf):
for tag in self.value:
tag.write_tag(buf)
tag.write_name(buf)
tag.write_value(buf)
buf.write("\x00")
# --- collection functions ---
def __getitem__(self, key):
# hits=filter(lambda x: x.name==key, self.value)
# if(len(hits)): return hits[0]
for tag in self.value:
if tag.name == key:
return tag
raise KeyError("Key {0} not found".format(key))
def __iter__(self):
return itertools.imap(lambda x: x.name, self.value)
def __contains__(self, key):
return key in map(lambda x: x.name, self.value)
def __len__(self):
return self.value.__len__()
def __setitem__(self, key, item):
"""Automatically wraps lists and tuples in a TAG_List, and wraps strings
and unicodes in a TAG_String."""
if isinstance(item, (list, tuple)):
item = TAG_List(item)
elif isinstance(item, basestring):
item = TAG_String(item)
item.name = key
self.check_value(item)
# remove any items already named "key".
if not self.ALLOW_DUPLICATE_KEYS:
self._value = filter(lambda x: x.name != key, self._value)
self._value.append(item)
def __delitem__(self, key):
self.value.__delitem__(self.value.index(self[key]))
def add(self, value):
if value.name is None:
raise ValueError("Tag %r must have a name." % value)
self[value.name] = value
def get_all(self, key):
return [v for v in self._value if v.name == key]
def isCompound(self):
return True
class TAG_List(TAG_Value, collections.MutableSequence):
"""A homogenous list of unnamed data of a single TAG_* type.
Once created, the type can only be changed by emptying the list
and adding an element of the new type. If created with no arguments,
returns a list of TAG_Compound
Empty lists in the wild have been seen with type TAG_Byte"""
tagID = 9
def __init__(self, value=None, name="", list_type=TAG_BYTE):
# can be created from a list of tags in value, with an optional
# name, or created from raw tag data, or created with list_type
# taken from a TAG class or instance
self.name = name
self.list_type = list_type
self.value = value or []
__slots__ = ('_name', '_value')
def __repr__(self):
return "<%s name='%s' list_type=%r length=%d>" % (self.__class__.__name__, self.name,
tag_classes[self.list_type],
len(self))
def data_type(self, val):
if val:
self.list_type = val[0].tagID
assert all([x.tagID == self.list_type for x in val])
return list(val)
@classmethod
def load_from(cls, ctx):
self = cls()
self.list_type = ctx.data[ctx.offset]
ctx.offset += 1
(list_length,) = TAG_Int.fmt.unpack_from(ctx.data, ctx.offset)
ctx.offset += TAG_Int.fmt.size
for i in xrange(list_length):
tag = tag_classes[self.list_type].load_from(ctx)
self.append(tag)
return self
def write_value(self, buf):
buf.write(chr(self.list_type))
buf.write(TAG_Int.fmt.pack(len(self.value)))
for i in self.value:
i.write_value(buf)
def check_tag(self, value):
if value.tagID != self.list_type:
raise TypeError("Invalid type %s for TAG_List(%s)" % (value.__class__, tag_classes[self.list_type]))
# --- collection methods ---
def __iter__(self):
return iter(self.value)
def __contains__(self, tag):
return tag in self.value
def __getitem__(self, index):
return self.value[index]
def __len__(self):
return len(self.value)
def __setitem__(self, index, value):
if isinstance(index, slice):
for tag in value:
self.check_tag(tag)
else:
self.check_tag(value)
self.value[index] = value
def __delitem__(self, index):
del self.value[index]
def insert(self, index, value):
if len(self) == 0:
self.list_type = value.tagID
else:
self.check_tag(value)
value.name = ""
self.value.insert(index, value)
tag_classes = {}
for c in (
TAG_Byte, TAG_Short, TAG_Int, TAG_Long, TAG_Float, TAG_Double, TAG_String, TAG_Byte_Array, TAG_List, TAG_Compound,
TAG_Int_Array, TAG_Long_Array, TAG_Short_Array):
tag_classes[c.tagID] = c
def gunzip(data):
return gzip.GzipFile(fileobj=StringIO(data)).read()
def try_gunzip(data):
try:
data = gunzip(data)
except IOError, zlib.error:
pass
return data
def load(filename="", buf=None):
"""
Unserialize data from an NBT file and return the root TAG_Compound object. If filename is passed,
reads from the file, otherwise uses data from buf. Buf can be a buffer object with a read() method or a string
containing NBT data.
"""
if filename:
buf = file(filename, "rb")
if hasattr(buf, "read"):
buf = buf.read()
return _load_buffer(try_gunzip(buf))
class load_ctx(object):
pass
def _load_buffer(buf):
if isinstance(buf, str):
buf = fromstring(buf, 'uint8')
data = buf
if not len(data):
raise NBTFormatError("Asked to load root tag of zero length")
tag_type = data[0]
if tag_type != 10:
magic = data[:4]
raise NBTFormatError('Not an NBT file with a root TAG_Compound '
'(file starts with "%s" (0x%08x)' % (magic.tostring(), magic.view(dtype='uint32')))
ctx = load_ctx()
ctx.offset = 1
ctx.data = data
tag_name = load_string(ctx)
tag = TAG_Compound.load_from(ctx)
# For PE debug
try:
tag.name = tag_name
except:
pass
return tag
__all__ = [a.__name__ for a in tag_classes.itervalues()] + ["load", "gunzip"]
@contextmanager
def littleEndianNBT():
"""
Pocket edition NBT files are encoded in little endian, instead of big endian.
This sets all the required paramaters to read little endian NBT, and makes sure they get set back after usage.
:return: None
"""
# We need to override the function to access the hard-coded endianness.
def override_write_string(string, buf):
encoded = string.encode('utf-8')
buf.write(struct.pack("<h%ds" % (len(encoded),), len(encoded), encoded))
def reset_write_string(string, buf):
encoded = string.encode('utf-8')
buf.write(struct.pack(">h%ds" % (len(encoded),), len(encoded), encoded))
def override_byte_array_write_value(self, buf):
value_str = self.value.tostring()
buf.write(struct.pack("<I%ds" % (len(value_str),), self.value.size, value_str))
def reset_byte_array_write_value(self, buf):
value_str = self.value.tostring()
buf.write(struct.pack(">I%ds" % (len(value_str),), self.value.size, value_str))
global string_len_fmt
string_len_fmt = struct.Struct("<H")
TAG_Byte.fmt = struct.Struct("<b")
TAG_Short.fmt = struct.Struct("<h")
TAG_Int.fmt = struct.Struct("<i")
TAG_Long.fmt = struct.Struct("<q")
TAG_Float.fmt = struct.Struct("<f")
TAG_Double.fmt = struct.Struct("<d")
TAG_Int_Array.dtype = numpy.dtype("<u4")
TAG_Long_Array.dtype = numpy.dtype("<q")
TAG_Short_Array.dtype = numpy.dtype("<u2")
global write_string
write_string = override_write_string
TAG_Byte_Array.write_value = override_byte_array_write_value
yield
string_len_fmt = struct.Struct(">H")
TAG_Byte.fmt = struct.Struct(">b")
TAG_Short.fmt = struct.Struct(">h")
TAG_Int.fmt = struct.Struct(">i")
TAG_Long.fmt = struct.Struct(">q")
TAG_Float.fmt = struct.Struct(">f")
TAG_Double.fmt = struct.Struct(">d")
TAG_Int_Array.dtype = numpy.dtype(">u4")
TAG_Long_Array.dtype = numpy.dtype(">q")
TAG_Short_Array.dtype = numpy.dtype(">u2")
write_string = reset_write_string
TAG_Byte_Array.write_value = reset_byte_array_write_value
def nested_string(tag, indent_string=" ", indent=0):
result = ""
if tag.tagID == TAG_COMPOUND:
result += 'TAG_Compound({\n'
indent += 1
for key, value in tag.iteritems():
result += indent_string * indent + '"%s": %s,\n' % (key, nested_string(value, indent_string, indent))
indent -= 1
result += indent_string * indent + '})'
elif tag.tagID == TAG_LIST:
result += 'TAG_List([\n'
indent += 1
for index, value in enumerate(tag):
result += indent_string * indent + nested_string(value, indent_string, indent) + ",\n"
indent -= 1
result += indent_string * indent + '])'
else:
result += "%s(%r)" % (tag.__class__.__name__, tag.value)
return result
"""
Proof of Concept for decoding Blockstate Longs and converting them to Palette indices.
Authors: Ben Gothard (@Podshot on Github) and NeunEinser (@NeunEinser on Twitter)
Huge thanks to NeunEinser for his help and supplying the original Java snippet for the long decoder that this one was
based on. Without his insight and code, this would've taken far longer than it did.
"""
from __future__ import unicode_literals, print_function
try:
from pymclevel import nbt # Use pymclevel cythonized nbt module if running in MCEdit-Unified's source directory
except ImportError:
import nbt # Use inplace pure-python NBT module
import chunk_reader
import numpy as np
import os
import math
def getBlockArray(blockstates):
"""
Converts an array of Blockstate Longs to Palette indices
:param blockstates: A list of Longs
:type blockstates: list
:return: A list of Palette indices
:rtype: list
"""
return_value = [0] * 4096
bit_per_index = len(blockstates) * 64 / 4096
current_reference_index = 0
for i in xrange(len(blockstates)):
current = blockstates[i]
overhang = (bit_per_index - (64 * i) % bit_per_index) % bit_per_index
if overhang > 0:
return_value[current_reference_index - 1] |= current % ((1 << overhang) << (bit_per_index - overhang))
current >>= overhang
remaining_bits = 64 - overhang
for j in xrange((remaining_bits + (bit_per_index - remaining_bits % bit_per_index) % bit_per_index) / bit_per_index):
return_value[current_reference_index] = current % (1 << bit_per_index)
current_reference_index += 1
current >>= bit_per_index
return return_value
def get_coords():
print('Enter the block coodinates in this format: <x>,<y>,<z>')
coords = raw_input('Enter the coordinates of the Block to inspect: ')
coords = [int(c) for c in coords.split(',')]
return coords
def build_blockstate(tag):
"""
Builds a Blockstate string from a Blockstate TAG_Compound that can be found in the Palette tag
:param tag: The Blockstate TAG_Compound
:type tag: TAG_Compound
:return: The Blockstate string
:rtype: str
"""
blockstate = tag['Name'].value
if 'Properties' in tag:
props = '['
for (key, value) in tag['Properties'].iteritems():
props += '{}={},'.format(key, value.value)
blockstate += props[:-1] + ']'
return blockstate
def main():
level = raw_input('Enter path to the level.dat file: ')
if not (os.path.exists(level) and os.path.isfile(level)):
print('The target level.dat must exist and be a file!')
return
region_dir = os.path.join(os.path.dirname(level), 'region')
level_nbt = nbt.load(level)
if level_nbt['Data']['Version']['Id'].value < 1463:
print('The target world must be 1.13 that is saved in the Blockstate format!')
return
print('')
coords = get_coords()
cx = int(math.floor(coords[0] / 16.0))
cz = int(math.floor(coords[2] / 16.0))
rx = cx >> 5
rz = cz >> 5
region_file = os.path.join(region_dir, 'r.{}.{}.mca'.format(rx,rz))
while not (os.path.exists(region_file) and os.path.isfile(region_file)):
print()
print('The coordinates {} have not been generated/saved to disk yet!'.format(coords))
print('Please enter in different coordinates')
print()
coords = get_coords()
cx = int(math.floor(coords[0] / 16.0))
cz = int(math.floor(coords[2] / 16.0))
rx = cx >> 5
rz = cz >> 5
region_file = os.path.join(region_dir, 'r.{}.{}.mca'.format(rx,rz))
if os.path.exists(region_file) and os.path.isfile(region_file):
break
chunk_tag = chunk_reader.read_region_for_chunk_file(region_file, cx, cz)
sections = chunk_tag['Level']['Sections']
wanted_section = None
for section in sections:
if (section['Y'].value << 4) <= coords[1] < ((section['Y'].value + 1) << 4):
wanted_section = section
break
if not wanted_section:
print('The section of chunk that the coordinate {} resides in hasn\'t been generated/saved yet!')
return
relative_coords = [ # Translate world coordinates to chunk relative coordinates
coords[0] - (cx << 4),
coords[1] - (wanted_section['Y'].value << 4),
coords[2] - (cz << 4)
]
blockstates = wanted_section['BlockStates'].value
blockstates = [long(int(n)) for n in blockstates] # Convert numpy array to pure-python array of longs
indexed_blockstates = getBlockArray(blockstates)
indexed_blockstates = np.array(indexed_blockstates).reshape((16,16,16)) # Reshape 1 dimensional array to a 3 dimensional one
blocks = np.swapaxes(np.swapaxes(indexed_blockstates, 0, 1), 0, 2) # Swap axis order from yzx to zyx and then from zyx to xyz
block_index = blocks[relative_coords[0], relative_coords[1], relative_coords[2]] # Grab the Palette index
blockstate_tag = wanted_section['Palette'][block_index] # Get Blockstate
print('Block at {}: {}'.format(coords, build_blockstate(blockstate_tag)))
if __name__ == '__main__':
main()
@gl91306
Copy link

gl91306 commented Aug 18, 2022

damn ive been looking for so long for this

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment