Last active
June 18, 2018 09:46
-
-
Save JonnyLatte/5668aa02f3d8a2d69de65f03c060c656 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
//------------------------------------------------------------------------------------------------------------------- | |
// Tezos potential contribution output finder. | |
//------------------------------------------------------------------------------------------------------------------- | |
// | |
//Block parser credits: | |
// | |
// CoinFabrik | |
// https://github.com/CoinFabrik/simple-nodejs-blockchain-parser | |
// | |
// John Ratcliff's Code Suppository: | |
// https://codesuppository.blogspot.com/2014/01/how-to-parse-bitcoin-blockchain.html#! | |
// | |
//External dependency: | |
// | |
//npm install bitcore-lib | |
//------------------------------------------------------------------------------------------------------------------- | |
// requires | |
var fs = require('fs'); | |
var bitcore = require('bitcore-lib'); | |
//------------------------------------------------------------------------------------------------------------------- | |
// globals | |
var bitcoinDataDir = "F:/bitcoindata/blocks/"; // change me to the path of your bitcoin | |
var distribution_filename = "bitcoin_distribution"; | |
var distribution = {}; // will contain the mapping of bitcoin p2sh payments to potential tez allocations when finished | |
var targetBlocks = JSON.parse(fs.readFileSync('blockHashes.json')); // file containing target blocks and their block heights | |
var blocksTouched = {}; // when target blocks are found their hash will be added to this as a mapping to true so that we can check if all target blocks are found | |
var firstFile = 919; // dont check all block files hashing the entire chain instead only check from firstFile to lastFile | |
var lastFile = 935; // if for some reason the blocks are out of order to the extent that they are not in these files modify the range to try more files | |
var totalOutputs = 0; // for display purposes keep a running total of outputs | |
//------------------------------------------------------------------------------------------------------------------- | |
// helper functions | |
function readVarInt(stream) { | |
var size = stream.read(1); | |
var sizeInt = size.readUInt8(); | |
if (sizeInt < 253) { | |
return size; | |
} | |
var add; | |
if (sizeInt == 253) add = 2; | |
if (sizeInt == 254) add = 4; | |
if (sizeInt == 255) add = 8; | |
if (add) { | |
return Buffer.concat([size, stream.read(add)], 1 + add); | |
} | |
return -1; | |
} | |
function toInt(varInt) { | |
if (!varInt) { | |
return -1; | |
} | |
if (varInt[0] < 253) return varInt.readUInt8(); | |
switch(varInt[0]) { | |
case 253: return varInt.readUIntLE(1, 2); | |
case 254: return varInt.readUIntLE(1, 4); | |
case 255: return varInt.readUIntLE(1, 8); | |
} | |
} | |
function getRawTx(reader) { | |
var txParts = []; | |
txParts.push(reader.read(4)); //Version | |
//Inputs | |
var inputCount = readVarInt(reader); | |
txParts.push(inputCount); | |
for(var i = toInt(inputCount) - 1; i >= 0; i--) { | |
txParts.push(reader.read(32)); //Previous tx | |
txParts.push(reader.read(4)); //Index | |
var scriptLength = readVarInt(reader); | |
txParts.push(scriptLength); | |
txParts.push(reader.read(toInt(scriptLength))); //Script Sig | |
txParts.push(reader.read(4)); //Sequence Number | |
} | |
//Outputs | |
var outputCount = readVarInt(reader); | |
txParts.push(outputCount); | |
for(i = toInt(outputCount) - 1; i >= 0; i--) { | |
txParts.push(reader.read(8)); //Value | |
var scriptLen = readVarInt(reader); | |
txParts.push(scriptLen); | |
txParts.push(reader.read(toInt(scriptLen))); //ScriptPubKey | |
} | |
txParts.push(reader.read(4)); //Lock time | |
return Buffer.concat(txParts); | |
} | |
class blockBuffer { | |
constructor(buffer) { | |
this.index = 0; | |
this.buffer = buffer; | |
} | |
getindex() { | |
return this.index; | |
} | |
read(bytes) { | |
if (this.index + bytes > this.buffer.length) { | |
return null; | |
} | |
var result = this.buffer.slice(this.index, this.index + bytes); | |
this.index += bytes; | |
return result; | |
} | |
}; | |
function readHeader(reader) | |
{ | |
while(true) // assume empty block repeat until full block or end of buffer (modified to a loop to remove recursion) | |
{ | |
var block = reader.read(80); | |
if(block == null || block.length < 4) return null; | |
version = block.slice(0,4); | |
if (version == null) { | |
return null; | |
} | |
if (version.toString('hex') == 'f9beb4d9') { | |
//It's actually the magic number of a different block (previous one was empty) | |
reader.read(4); //block size | |
continue; | |
} | |
return block; //previous hash + merkle hash + time + bits + nonce | |
} | |
} | |
function sha256d(msg) { | |
return bitcore.crypto.Hash.sha256(bitcore.crypto.Hash.sha256(msg)); | |
} | |
var fromBytesInt32=function(b) { | |
if(b == null) return null; | |
var result=0; | |
for (let i=3;i>=0;i--) { | |
result = (result << 8) | b[i]; | |
} | |
return result; | |
}; | |
function reverseDigest(b) { | |
for(let i = 0; i < 16; i++) { | |
[b[i],b[31-i]] = [b[31-i],b[i]]; | |
} | |
return b; | |
} | |
//------------------------------------------------------------------------------------------------------------------- | |
// Process an individual output from a target block | |
// | |
function caclulateReward(sat_contributed,height) { | |
// 1 tez = 0.0002 BTC or 20000 sat | |
var reward = sat_contributed / 20000; | |
var block = height - 473623; | |
var bonus = 0; | |
if(block >= 0 && block <= 399) bonus = reward * 0.2; | |
if(block >= 400 && block <= 799) bonus = reward * 0.15; | |
if(block >= 800 && block <= 1199) bonus = reward * 0.1; | |
if(block >= 1200 && block <= 1599) bonus = reward * 0.05; | |
return reward + bonus; | |
} | |
function onTargetBlockTx(hash,address,sat) | |
{ | |
if(address.type == "scripthash") { | |
//console.log(address.toString() + " block height: "+targetBlocks[hash] + " sat: " + sat); | |
let addressString = address.toString(); | |
if(!distribution.hasOwnProperty(addressString)) distribution[addressString] = 0; | |
distribution[addressString] += caclulateReward(sat,targetBlocks[hash]); // modify allocation here based on block height and reward function | |
return 1; | |
} | |
return 0; | |
} | |
function save_distribution() { | |
let output = ""; | |
let keys = Object.keys(distribution).sort( | |
(a,b) => {return a.localeCompare(b);} | |
); | |
for(let i = 0; i < keys.length; i++) { | |
output += (keys[i] + " | "+ distribution[keys[i]].toFixed(6) +"\r\n"); | |
}; | |
fs.writeFile(distribution_filename,output, function (err) { | |
if (err) return console.log(err); | |
console.log('Distribution output: ' + distribution_filename); | |
}); | |
} | |
//------------------------------------------------------------------------------------------------------------------- | |
// parse a bitcoin core file containing blocks | |
// | |
function parseBlockFile(blockFileIndex) | |
{ | |
let total_outputs = 0; | |
//console.log("Block file " + i); | |
var fileNumber = ('0000' + blockFileIndex).slice(-5); | |
try { | |
var data = fs.readFileSync(bitcoinDataDir + 'blk' + fileNumber + '.dat'); | |
} catch(e) { | |
return 0; | |
} | |
var reader = new blockBuffer(data), | |
magic = reader.read(4), | |
blockSize = fromBytesInt32(reader.read(4)), | |
blockHeader = readHeader(reader); | |
while(blockHeader !== null) { | |
var hash = reverseDigest(sha256d(blockHeader)).toString('hex'); | |
var target = targetBlocks.hasOwnProperty(hash); | |
var txCount = toInt(readVarInt(reader)); | |
//console.log("Block " + hash + " txcount " + txCount); | |
// there is a possibility here to check if this is a target block and if so skip parsing individual transactions | |
// by skipping forward in the buffer by the size of the block. Getting the offset wrong results in a still functional | |
// parser that skips some blocks I chose not to mess with what works | |
// The speed difference is unlikely to be significant as most of the time used is actually reading from the file system anyway | |
for(var j = 0; j < txCount; j++) { | |
var rawTx = getRawTx(reader); | |
var parsedTx = new bitcore.Transaction(rawTx); | |
//console.log(JSON.stringify(parsedTx.toObject())); | |
var outputs = parsedTx.toObject().outputs; | |
if(target) { | |
blocksTouched[hash] = true; | |
for(var k = 0; k < outputs.length; k++) { | |
total_outputs += onTargetBlockTx(hash,bitcore.Script(outputs[k].script).toAddress(),outputs[k].satoshis); | |
} | |
} | |
} | |
magic = reader.read(4); | |
blockSize = fromBytesInt32(reader.read(4)); | |
blockHeader = readHeader(reader); | |
} | |
return total_outputs; | |
} | |
//------------------------------------------------------------------------------------------------------------------- | |
// | |
// loop through a range of block files looking for target blocks | |
for(let index = firstFile; index <= lastFile; index++) { | |
let count = parseBlockFile(index); | |
totalOutputs += count; | |
console.log( | |
"File: " + index + | |
" outputs: " + count + | |
" total: " + totalOutputs + | |
" progress: " + (((index-firstFile+1) / (lastFile-firstFile+1) * 100)).toFixed(2) + "%"); | |
} | |
// verify that all target blocks have been processed | |
// print missing hashes if logit is true | |
function allBlocksTouched(logit) { | |
var allFound = true; | |
for (var hash in targetBlocks) { | |
if (targetBlocks.hasOwnProperty(hash)) { | |
if(!blocksTouched.hasOwnProperty(hash)) { | |
if(logit) console.log("Missing "+hash); | |
allFound = false; | |
} | |
} | |
} | |
return allFound; | |
} | |
if(allBlocksTouched(true)) { | |
console.log("All required blocks parsed"); | |
save_distribution(); | |
} | |
console.log("done"); | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment