Created
January 30, 2017 10:18
-
-
Save yaronvel/93a455cc5584d741f4efac0ba0e3151d to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import pickle | |
from pycoin.serialize import b2h, h2b | |
from pycoin import encoding | |
import rlp | |
from ethereum import tester, utils, abi, blocks, transactions | |
import sha3, copy | |
def get_seedhash(block_number): | |
s = '\x00' * 32 | |
for i in range(block_number // EPOCH_LENGTH): | |
s = serialize_hash(sha3_256(s)) | |
return s | |
# Assumes little endian bit ordering (same as Intel architectures) | |
def decode_int(s): | |
return int(s[::-1].encode('hex'), 16) if s else 0 | |
def encode_int(s): | |
a = "%x" % s | |
return '' if s == 0 else ('0' * (len(a) % 2) + a).decode('hex')[::-1] | |
def zpad(s, length): | |
return s + '\x00' * max(0, length - len(s)) | |
def serialize_hash(h): | |
return ''.join([zpad(encode_int(x), 4) for x in h]) | |
def deserialize_hash(h): | |
return [decode_int(h[i:i+WORD_BYTES]) for i in range(0, len(h), WORD_BYTES)] | |
def hash_words(h, sz, x): | |
if isinstance(x, list): | |
x = serialize_hash(x) | |
y = h(x) | |
return deserialize_hash(y) | |
def serialize_cache(ds): | |
return ''.join([serialize_hash(h) for h in ds]) | |
serialize_dataset = serialize_cache | |
# sha3 hash function, outputs 64 bytes | |
from Crypto.Hash import keccak | |
mysha3_512 = lambda x: keccak.new(digest_bits=512, data=x).digest() | |
def sha3_512(x): | |
return hash_words(lambda v: mysha3_512(v), 64, x) | |
def sha3_256(x): | |
return hash_words(lambda v: utils.sha3_256(v), 32, x) | |
def xor(a, b): | |
return a ^ b | |
def isprime(x): | |
for i in range(2, int(x**0.5)): | |
if x % i == 0: | |
return False | |
return True | |
WORD_BYTES = 4 # bytes in word | |
DATASET_BYTES_INIT = 2**30 # bytes in dataset at genesis | |
DATASET_BYTES_GROWTH = 2**23 # dataset growth per epoch | |
CACHE_BYTES_INIT = 2**24 # bytes in cache at genesis | |
CACHE_BYTES_GROWTH = 2**17 # cache growth per epoch | |
CACHE_MULTIPLIER=1024 # Size of the DAG relative to the cache | |
EPOCH_LENGTH = 30000 # blocks per epoch | |
MIX_BYTES = 128 # width of mix | |
HASH_BYTES = 64 # hash length in bytes | |
DATASET_PARENTS = 256 # number of parents of each dataset element | |
CACHE_ROUNDS = 3 # number of rounds in cache production | |
ACCESSES = 64 # number of accesses in hashimoto loop | |
def get_cache_size(block_number): | |
sz = CACHE_BYTES_INIT + CACHE_BYTES_GROWTH * (block_number // EPOCH_LENGTH) | |
sz -= HASH_BYTES | |
while not isprime(sz / HASH_BYTES): | |
sz -= 2 * HASH_BYTES | |
return sz | |
def get_full_size(block_number): | |
sz = DATASET_BYTES_INIT + DATASET_BYTES_GROWTH * (block_number // EPOCH_LENGTH) | |
sz -= MIX_BYTES | |
while not isprime(sz / MIX_BYTES): | |
sz -= 2 * MIX_BYTES | |
return sz | |
def mkcache(cache_size, seed): | |
n = cache_size // HASH_BYTES | |
# Sequentially produce the initial dataset | |
o = [sha3_512(seed)] | |
for i in range(1, n): | |
o.append(sha3_512(o[-1])) | |
# Use a low-round version of randmemohash | |
for _ in range(CACHE_ROUNDS): | |
for i in range(n): | |
v = o[i][0] % n | |
o[i] = sha3_512(map(xor, o[(i-1+n) % n], o[v])) | |
return o | |
FNV_PRIME = 0x01000193 | |
def fnv(v1, v2): | |
return (v1 * FNV_PRIME ^ v2) % 2**32 | |
def calc_dataset_item(cache, i): | |
n = len(cache) | |
r = HASH_BYTES // WORD_BYTES | |
# initialize the mix | |
mix = copy.copy(cache[i % n]) | |
mix[0] ^= i | |
mix = sha3_512(mix) | |
# fnv it with a lot of random cache nodes based on i | |
for j in range(DATASET_PARENTS): | |
cache_index = fnv(i ^ j, mix[j % r]) | |
mix = map(fnv, mix, cache[cache_index % n]) | |
return sha3_512(mix) | |
elements_input_for_contract = [] | |
def push_data( element ): | |
global elements_input_for_contract | |
first_int = 0 | |
second_int = 0 | |
for i in range(8): | |
first_int = first_int + element[i] * 2 ** (32*i) | |
second_int = second_int + element[i + 8] * 2 ** (32*i) | |
elements_input_for_contract.append(first_int) | |
elements_input_for_contract.append(second_int) | |
def hashimoto(header, nonce, full_size, dataset_lookup): | |
ps = [] | |
n = full_size / HASH_BYTES | |
w = MIX_BYTES // WORD_BYTES | |
mixhashes = MIX_BYTES / HASH_BYTES | |
# combine header+nonce into a 64 byte seed | |
s = sha3_512(header + nonce[::-1]) | |
# start the mix with replicated s | |
mix = [] | |
for _ in range(MIX_BYTES / HASH_BYTES): | |
mix.extend(s) | |
# mix in random dataset nodes | |
for i in range(ACCESSES): | |
p = fnv(i ^ s[0], mix[i % w]) % (n // mixhashes) * mixhashes | |
newdata = [] | |
for j in range(MIX_BYTES / HASH_BYTES): | |
push_data(dataset_lookup(p + j)) | |
newdata.extend(dataset_lookup(p + j)) | |
ps.append(p) | |
mix = map(fnv, mix, newdata) | |
print str(ps) | |
sd | |
# compress mix | |
cmix = [] | |
for i in range(0, len(mix), 4): | |
cmix.append(fnv(fnv(fnv(mix[i], mix[i+1]), mix[i+2]), mix[i+3])) | |
return { | |
"mix digest": serialize_hash(cmix), | |
"result": serialize_hash(sha3_256(s+cmix)) | |
} | |
def hashimoto_light(full_size, cache, header, nonce): | |
return hashimoto(header, nonce, full_size, lambda x: calc_dataset_item(cache, x)) | |
def hashimoto_full(full_size, dataset, header, nonce): | |
return hashimoto(header, nonce, full_size, lambda x: dataset[x]) | |
################################################################################ | |
class MerkleNode: | |
def __init__(self, is_leaf, value, left_elem, right_elem): | |
if is_leaf: | |
self.value = value | |
self.leaf = True | |
else: | |
self.left = left_elem | |
self.right = right_elem | |
self.value = utils.sha3(left_elem.value + right_elem.value) | |
self.leaf = False | |
def get_branch(self, index, depth): # note that index should be reversed | |
if self.leaf: | |
return [] | |
move_right = index & (1 << depth) | |
if move_right: | |
result += self.right.get_branch(index,depth+1) | |
result += [self.left.value] | |
else: | |
result += self.left.get_branch(index,depth+1) | |
result += [self.right.value] | |
return result | |
def compute_cache_merkle_tree( full_size, dataset_lookup, level, prev_level_nodes = None ): | |
print "level: " + str(level) | |
level_nodes = [] | |
if level == 1: | |
return (prev_level_nodes[0],level) | |
elif level == 0: | |
print "Computing first level" | |
for i in range(full_size//128): | |
value = utils.sha3( dataset_lookup[i] + dataset_lookup[i+1] ) | |
node = MerkleNode( True, value, None, None ) | |
level_nodes.append(node) | |
if i % 1000 == 0: | |
print "level 0, element " + str(i) | |
else: | |
print "computing level " + str(level) | |
size = len(prev_level_nodes) | |
for i in range(size//2): | |
right = None | |
if 2*i+1 >= size: | |
right = MerkleNode( True, 0, None, None ) | |
else: | |
right = prev_level_nodes[2*i+1] | |
left = prev_level_nodes[2*i] | |
node = MerkleNode(False,0,left,right) | |
print "level 0, element " + str(i) | |
return compute_cache_merkle_tree(full_size,dataset_lookup,level+1,level_nodes) | |
block_number = 0 | |
full_size = get_full_size(block_number) | |
#print b2h(utils.sha3(h2b("100cbec5e5ef82991290d0d93d758f19082e71f234cf479192a8b94df6da6bfe")[::-1])) | |
#sd | |
print "full size = " + str(full_size//128) | |
#sd | |
block_number = 0 | |
full_size = get_full_size(block_number) | |
print "full size = " + str(full_size) | |
seed = get_seedhash(block_number) | |
cache_size = get_cache_size(block_number) | |
print "making cache" | |
cache = mkcache(cache_size, seed) | |
#cache_file = open('cache.dat', 'w') | |
#pickle.dump(cache,cache_file) | |
#cache_file = open('cache.dat', 'r') | |
#cache = pickle.load(cache_file) | |
#cache_file.close() | |
print "done" | |
#header = h2b("c7670049269c10462d3c5e86f42c60309eed72602fc1e6185efbba44328b9218")[::-1] | |
#nonce = h2b("a8e9c2db86671707") | |
header = h2b("100cbec5e5ef82991290d0d93d758f19082e71f234cf479192a8b94df6da6bfe")#[::-1] | |
nonce = h2b("307692cf71b12f6d") | |
hash = hashimoto_light(full_size, cache, header, nonce) | |
#print str(hash) | |
result = hash["result"] | |
#print str(result) | |
print b2h(result) | |
print str(elements_input_for_contract) | |
sd | |
print "full size = " + str(full_size) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment