Skip to content

Instantly share code, notes, and snippets.

@arizvisa
Created June 21, 2023 19:57
Show Gist options
  • Save arizvisa/5e1c624a3b4e61c450f9b42c97abf72f to your computer and use it in GitHub Desktop.
Save arizvisa/5e1c624a3b4e61c450f9b42c97abf72f to your computer and use it in GitHub Desktop.
random python code
if __name__ == 'TELEFRAG':
import math, random, ptypes, office.storage
from ptypes import *
source = ptypes.provider.file(rp, 'rw')
source = ptypes.setsource(source)
state = random.Random()
## file
store = office.storage.File()
store = store.l
## directory and sectors
directory = store.Directory()
world = [sector for sector in store['Data']]
locations = {index : sector.getoffset() for index, sector in enumerate(world)}
fat = store.Fat()
# find the sectors we shouldn't modify
difat = store.DiFat()
ignore = {x.int() for x in difat.iterate()}
# build a table that we can consume sectors from.
table = {index for index in range(len(world))} - {index for index in ignore}
shuffled = sorted(table)
state.shuffle(shuffled)
take = lambda count, table=shuffled: [item.pop() for item in [table] * count]
# grab the sectors for the directory, minifat, and directory entries (fat).
dirsectors = store.chain(store['fat']['sectDirectory'].int())
mfsectors = store.chain(store['minifat']['sectMiniFat'].int())
directoryentries = {}
for index, entry in enumerate(directory):
if entry.stream():
directoryentries[index] = store.chain(entry['sectLocation'].int())
continue
# now we collect all the sectors that we're going to use.
newdirsectors = take(len(dirsectors))
newmfsectors = take(len(mfsectors))
newdirectoryentries = {index : take(len(sectors)) for index, sectors in directoryentries.items()}
unused = take(len(shuffled))
# then we'll write contents from the original sector to the new location.
for old, new in zip(dirsectors, newdirsectors):
world[old].commit(offset=locations[new])
for old, new in zip(mfsectors, newmfsectors):
world[old].commit(offset=locations[new])
for index in directoryentries:
oldchain, newchain = directoryentries[index], newdirectoryentries[index]
for old, new in zip(oldchain, newchain):
world[old].commit(offset=locations[new])
continue
# check that sectors have been committed in their shuffled order
for old, new in zip(dirsectors, newdirsectors):
item = world[old].copy(offset=locations[new])
assert(world[old].serialize() == item.serialize() == item.l.serialize())
for old, new in zip(mfsectors, newmfsectors):
item = world[old].copy(offset=locations[new])
assert(world[old].serialize() == item.serialize() == item.l.serialize())
for index in directoryentries:
oldchain, newchain = directoryentries[index], newdirectoryentries[index]
for old, new in zip(oldchain, newchain):
item = world[old].copy(offset=locations[new])
assert(world[old].serialize() == item.serialize() == item.l.serialize())
continue
# now we need to update the fat and update their entrypoints.
uncommitted = fat.link(newdirsectors)
store['fat']['sectDirectory'].set(uncommitted[0] if uncommitted else 'FREESECT').c
assert([index for index in fat.chain(store['fat']['sectDirectory'].int())] == uncommitted)
assert(all(index not in unused for index in uncommitted))
[fat[index].c for index in uncommitted]
directory.source = ptypes.provider.disorderly([world[index].copy().l for index in uncommitted], autocommit={})
uncommitted = fat.link(newmfsectors)
store['minifat']['sectMiniFat'].set(uncommitted[0] if uncommitted else 'FREESECT').c
assert([index for index in fat.chain(store['minifat']['sectMiniFat'].int())] == uncommitted)
assert(all(index not in unused for index in uncommitted))
[fat[index].c for index in uncommitted]
for index in directoryentries:
entry, newchain = directory[index], newdirectoryentries[index]
uncommitted = fat.link(newchain)
entry['sectLocation'].set(uncommitted[0] if uncommitted else 'FREESECT').c
assert([index for index in fat.chain(entry['sectLocation'].int())] == uncommitted)
assert(all(index not in unused for index in uncommitted))
[fat[index].c for index in uncommitted]
# last step is to clear the free sectors.
[fat[index].set('FREESECT').c for index in unused]
# and also overwrite them with garbage.
size = store['SectorShift'].SectorSize()
for index in unused:
samples = map(state.getrandbits, [8] * size)
block = ptype.block().set(bytearray(samples))
block.commit(source=source, offset=locations[index])
# reload the directory and the minifat using the data we fixed up.
store = store.l
mfat = store.MiniFat()
directory = store.Directory()
assert(item.serialize() == world[index].serialize() for index, item in zip(newdirsectors, directory.source.contiguous))
# now we can do a good job screwing up the minifat.
tinystream = directory.RootEntry().Data()
smallworld = [sector for sector in tinystream]
locations = {index : sector.getoffset() for index, sector in enumerate(smallworld)}
directoryentries = {}
for index, entry in enumerate(directory):
if entry.ministream():
directoryentries[index] = store.minichain(entry['sectLocation'].int())
continue
# build a (tiny) table that we can consume sectors from.
table = {index for index in range(len(smallworld))}
tinyshuffle = sorted(table)
state.shuffle(tinyshuffle)
take = lambda count, table=tinyshuffle: [item.pop() for item in [table] * count]
# collect the new sector indices for each directory entry.
newdirectoryentries = {index : take(len(sectors)) for index, sectors in directoryentries.items()}
unused = take(len(tinyshuffle))
# move the old minisectors to their new location.
for index in directoryentries:
oldchain, newchain = directoryentries[index], newdirectoryentries[index]
for old, new in zip(oldchain, newchain):
smallworld[old].commit(offset=locations[new])
continue
# verify that the minisectors have the expected data.
for index in directoryentries:
oldchain, newchain = directoryentries[index], newdirectoryentries[index]
for old, new in zip(oldchain, newchain):
item = smallworld[old].copy(offset=locations[new])
assert(smallworld[old].serialize() == item.serialize() == item.l.serialize())
continue
# final thing to do is to update the minifat and fix the directory entrypoints.
for index in directoryentries:
entry, newchain = directory[index], newdirectoryentries[index]
uncommitted = mfat.link(newchain)
entry['sectLocation'].set(uncommitted[0] if uncommitted else 'FREESECT').c
assert([index for index in mfat.chain(entry['sectLocation'].int())] == uncommitted)
assert(all(index not in unused for index in uncommitted))
[mfat[index].c for index in uncommitted]
# then we clean up for clearing the free sectors and overwriting them w/ garbage.
[mfat[index].set('FREESECT').c for index in unused]
size = store['SectorShift'].MiniSectorSize()
for index in unused:
samples = map(state.getrandbits, [8] * size)
block = ptype.block().set(bytearray(samples))
block.commit(source=tinystream.source, offset=locations[index])
# we're done, so we just close everything and let the i/o cache do its thing.
source.close()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment