Skip to content

Instantly share code, notes, and snippets.

@darkryder
Last active March 29, 2023 02:16
Show Gist options
  • Save darkryder/5a92647cd268458239720eec44a5d8a7 to your computer and use it in GitHub Desktop.
Save darkryder/5a92647cd268458239720eec44a5d8a7 to your computer and use it in GitHub Desktop.
p2b for CS639
# forked from https://github.com/dvf/blockchain
import hashlib
import json
import time
import threading
import logging
import requests
from flask import Flask, request
class Transaction(object):
def __init__(self, sender, recipient, amount):
self.sender = sender # constraint: should exist in state
self.recipient = recipient # constraint: need not exist in state. Should exist in state if transaction is applied.
self.amount = amount # constraint: sender should have enough balance to send this amount
def __str__(self) -> str:
return "T(%s -> %s: %s)" % (self.sender, self.recipient, self.amount)
def encode(self) -> str:
return self.__dict__.copy()
@staticmethod
def decode(data):
return Transaction(data['sender'], data['recipient'], data['amount'])
def __lt__(self, other):
if self.sender < other.sender: return True
if self.sender > other.sender: return False
if self.recipient < other.recipient: return True
if self.recipient > other.recipient: return False
if self.amount < other.amount: return True
return False
def __eq__(self, other) -> bool:
return self.sender == other.sender and self.recipient == other.recipient and self.amount == other.amount
class Block(object):
def __init__(self, number, transactions, previous_hash, miner):
self.number = number # constraint: should be 1 larger than the previous block
self.transactions = transactions # constraint: list of transactions. Ordering matters. They will be applied sequentlally.
self.previous_hash = previous_hash # constraint: Should match the previous mined block's hash
self.miner = miner # constraint: The node_identifier of the miner who mined this block
self.hash = self._hash()
def _hash(self):
return hashlib.sha256(
str(self.number).encode('utf-8') +
str([str(txn) for txn in self.transactions]).encode('utf-8') +
str(self.previous_hash).encode('utf-8') +
str(self.miner).encode('utf-8')
).hexdigest()
def __str__(self) -> str:
return "B(#%s, %s, %s, %s, %s)" % (self.hash[:5], self.number, self.transactions, self.previous_hash, self.miner)
def encode(self):
encoded = self.__dict__.copy()
encoded['transactions'] = [t.encode() for t in self.transactions]
return encoded
@staticmethod
def decode(data):
txns = [Transaction.decode(t) for t in data['transactions']]
return Block(data['number'], txns, data['previous_hash'], data['miner'])
class State(object):
def __init__(self):
# TODO: You might want to think how you will store balance per person.
# You don't need to worry about persisting to disk. Storing in memory is fine.
pass
def encode(self):
dumped = {}
# TODO: Add all person -> balance pairs into `dumped`.
return dumped
def validate_txns(self, txns):
result = []
# TODO: returns a list of valid transactions.
# You receive a list of transactions, and you try applying them to the state.
# If a transaction can be applied, add it to result. (should be included)
return result
def apply_block(self, block):
# TODO: apply the block to the state.
logging.info("Block (#%s) applied to state. %d transactions applied" % (block.hash, len(block.transactions)))
def history(self, account):
# TODO: return a list of (blockNumber, value changes) that this account went through
# Here is an example
blockNumber = 3
amount = 200
blockNumber2 = 10
amount2 = -25
return [(blockNumber, amount), (blockNumber2, amount2)]
class Blockchain(object):
def __init__(self):
self.nodes = []
self.node_identifier = 0
self.block_mine_time = 5
# in memory datastructures.
self.current_transactions = [] # A list of `Transaction`
self.chain = [] # A list of `Block`
self.state = State()
def is_new_block_valid(self, block, received_blockhash):
"""
Determine if I should accept a new block.
Does it pass all semantic checks? Search for "constraint" in this file.
:param block: A new proposed block
:return: True if valid, False if not
"""
# TODO: check if received block is valid
# 1. Hash should match content
# 2. Previous hash should match previous block
# 3. Transactions should be valid (all apply to block)
# 4. Block number should be one higher than previous block
# 5. miner should be correct (next RR)
return True
def trigger_new_block_mine(self, genesis=False):
thread = threading.Thread(target=self.__mine_new_block_in_thread, args=(genesis,))
thread.start()
def __mine_new_block_in_thread(self, genesis=False):
"""
Create a new Block in the Blockchain
:return: New Block
"""
logging.info("[MINER] waiting for new transactions before mining new block...")
time.sleep(self.block_mine_time) # Wait for new transactions to come in
miner = self.node_identifier
if genesis:
block = Block(1, [], '0xfeedcafe', miner)
else:
self.current_transactions.sort()
# TODO: create a new *valid* block with available transactions. Replace the arguments in the line below.
block = Block(1, [], '1', miner)
# TODO: make changes to in-memory data structures to reflect the new block. Check Blockchain.__init__ method for in-memory datastructures
self.chain.append(block)
if genesis:
pass
# TODO: at time of genesis, change state to have 'A': 10000 (person A has 10000)
logging.info("[MINER] constructed new block with %d transactions. Informing others about: #%s" % (len(block.transactions), block.hash[:5]))
# broadcast the new block to all nodes.
for node in self.nodes:
if node == self.node_identifier: continue
requests.post(f'http://localhost:{node}/inform/block', json=block.encode())
def new_transaction(self, sender, recipient, amount):
""" Add this transaction to the transaction mempool. We will try
to include this transaction in the next block until it succeeds.
"""
self.current_transactions.append(Transaction(sender, recipient, amount))

Overview

In this assignment, we will expand the implementation of a simple account-based blockchain system.

Updates:

  • You are supposed to create a State::history() method as well. Check post 79 on Piazza.

A consortium setting

Background

In class, we have looked at systems that use Nakomoto consensus using Proof of Work. This is one of the primary consensus mechanisms that exists for pure permissionaless settings (a permissionless setting is one where a node does not need to know about all other nodes in the system).

In permissioned settings however, one can utilize the knowledge of node membership to come up with consensus protocols that are cheaper than proof of work. One such example is Proof of Stake (Ethereum recently switched to this). Similarly, Algorand's leader selection can also be reduced to a sortitioned variant of Proof of Stake (this will become clearer in the Algorand section)

Fundamentally, we want to retain the property that everyone should eventually get a chance to propose a block and have it accepted. This solves problems around censorship, denial of service etc. However, we want to limit the havoc malicious nodes can cause. For e.g. they can continuously propose blocks, forcing honest nodes to do extra wasteful work. Fundamentally, PoS/PoW helps us narrow down which node should be allowed to propose a new block, and solve these problems.

Why consortium

The above properties are needed in a wild-wild-west malicious setting where no one trusts anyone else. This is indeed the case for decentralized cryptocurrency applications.

However, we need not be always pessimistic. Applications of blockchains exist in semi-trusted settings as well. E.g. consider the 3 banks we have here: UWCU, Chase and BOA(Bank of America). People will want to transfer money using Zelle between accounts that might be on different banks. These banks can run a blockchain amongst themselves where interbank transactions are posted.

This is a consortium setting. Typically, such a setting has the following properties:

  • Everyone is fully aware of the other participating nodes (e.g node-UWCU-1, node-chase-1, node-chase-2, node-BOA-1). Thus, a permissioned setting.
  • There is a reputation associated with each node. If one node acts maliciously, that would lead to bad press for the bank.
  • Nodes act cautiously.
    • They do not have to fight for proposing a block (in contrast to PoW/PoS). This prevents wasted work and therefore leads to extremely high performance.
    • They are still suspicious of the block proposed. (They do not blindly trust the block/transactions received by the other bank and should validate them).

(HyperLedger Fabric)[https://www.hyperledger.org/use/fabric] from IBM is a blockchain for a consortium setting. Consortium blockchains can afford to run simple consensus protocols and do not require BFT protocols. Any deviation from the accepted protocol can be used to blacklist nodes.

Our assignment

The assignment description is fairly straightforward. We give you an incomplete implementation of a simple consortium system. You complete the implementation using all the "# TODO" and "# constraint" comments sprinkled throughout the code.

Download (TODO requirements.txt) the files (blockchain.py, server.py), and let's get started.

We will play around with a 3-node blockchain, though ideally, it should work for >3 as well. Use tmux, or multiple terminals to run the following commands on 3 different terminals.

 > python3 server.py -p 5001 -n 5001 5002 5003
 > python3 server.py -p 5002 -n 5001 5002 5003
 > python3 server.py -p 5003 -n 5001 5002 5003

You're going to need a fourth terminal to send commands/instructions to these nodes. E.g. run this to propose a new transaction:

 > curl -X POST http://localhost:5001/transactions/new -H 'Content-Type: application/json' -d '{"sender": "A", "recipient": "B", "amount": 10}'

You can check the state of a node by running curl http://localhost:5001/dump. Change the port number to talk with a different node.

Whenever you make any changes to code, make sure to restart all three instances!

In our consortium setting, we will have nodes propose blocks in a round robin manner. If we have 3 nodes, block 1 needs to be proposed by 5001, the second block should be from 5002, third from 5003, the fourth one from 5001 again, and so on.

Code architecture

blockchain.py

This contains the relevant data structures for storing your blockchain's data, and logic for validations and making the blockchain tick.

  • Transaction: what you might expect. Observe the comments with "# constraint". You don't need to do add anything in these lines, but these are important whenever you want to validate a transaction.
  • Block: just read through what all is part of a block, and what constraints they have (again, needed for validation).
  • State: you have to implement this class, and most methods in this class.
    • Reminder, this is an account based blockchain. Thus we would typically be storing key values pairs. e.g. "personA" currently has 500 amount, "personB" has 1000 amount, etc..
    • validate_txns is called with a list of transactions. It will try to apply each transaction sequentially. If a transaction cannot be applied, or is invalid, it should not be considered. It will eventually return a list of transactions that can be applied. Consider an example where we call it with [T1, T2, T3, T4, T5]. Assume thatT2 is invalid because the sender does not exist in State. T3 is invalid because the sender had insufficient funds. We should return [T1, T4, T5]. It should not commit these transactions or apply them to the state.
    • apply_block: A new block exists. Change our account info based on this new block.
  • Blockchain: The key in-memory datastructures are current_transactions which is the list of pending transactions. chain is a list of committed blocks, and state is your State.
    • is_new_block_valid: triggered to see if this is a valid block. Remember, it should fulfill all constraints.
    • trigger_new_block_mine: call this method when you want this node to create a block.
    • __mine_new_block_in_thread: this is where you are supposed to create a new valid block. A transaction that fails to get in should still be retried during next block. This will also automatically inform its nodes about a new block!

server.py

This defines what a node does when it receives a RPC call (network request).

  • new_block_received: Observe that it makes a call to is_new_block_valid before accepting it.
    • What all should a node do when it gets a block?

How you might want to tackle this:

While we will release tests asap, this is an approximate order in which you might want to implement this assignment:

  • Make nodes propose blocks in a Round Robin fashion. (hint: server.py::new_block_received)
  • Work on constructing (Blockchain.__mine_new_block_in_thread) a valid block when it's your turn. Change validate_txns to always return all the txns. Propose a couple of transactions to different nodes. See if these transactions end up being included in proposed blocks. You can check it by dumping the state (as discussed above, and below in how-to-debug). Or just print stuff to the log.
  • Work on validate_txns: see that invalid txns are not included.
  • transactions which did not get in should be retried whenever this node tries to mine a new block.

How to debug.

  • You can send transactions to each node (as shown above the code architecture section)
  • You can get the in-memory state of each node (as discussed above: curl http://localhost:5001/dump)
  • In order to actually start the experiment manually, you will need to do this once (curl http://localhost:5001/startexp/). This forces the first node to create the first (genesis) block and kick-start the rest of the blockchain pipeline.
  • You can control the time a node waits before committing a block to help you propose transactions to a specific node. Just add -t 10 when you run the command. Each node will wait 10 seconds (or whatever you pass) before it proposes a block.
  • logging.info("intelligent debugging like Received: " + str(variable) + ' while state was ' + str(somethingelse))
  • import pdb; pdb.set_trace(). Add this line to the code where you want to debug. Then run the code and it will stop at that line. You can then inspect the variables and run the code line by line.
  • good ol' print("bleh")
from flask import Flask, request, jsonify
import logging
import blockchain as bc
# Instantiate the Node
app = Flask(__name__)
# Instantiate the Blockchain
blockchain = bc.Blockchain()
@app.route('/inform/block', methods=['POST'])
def new_block_received():
values = request.get_json()
logging.info("Received: " + str(values))
# Check that the required fields are in the POST'ed data
required = ['number', 'transactions', 'miner', 'previous_hash', 'hash']
if not all(k in values for k in required):
logging.warning("[RPC: inform/block] Missing values")
return 'Missing values', 400
block = bc.Block.decode(values)
valid = blockchain.is_new_block_valid(block, values['hash'])
if not valid:
logging.warning("[RPC: inform/block] Invalid block")
return 'Invalid block', 400
blockchain.chain.append(block) # Add the block to the chain
# Modify any other in-memory data structures to reflect the new block
# TODO: if I am responsible for next block, start mining it (trigger_new_block_mine).
return "OK", 201
@app.route('/transactions/new', methods=['POST'])
def new_transaction():
values = request.get_json()
# Check that the required fields are in the POST'ed data
required = ['sender', 'recipient', 'amount']
if not all(k in values for k in required):
return 'Missing values', 400
# Create a new Transaction
blockchain.new_transaction(values['sender'], values['recipient'], int(values['amount']))
return "OK", 201
@app.route('/dump', methods=['GET'])
def full_chain():
response = {
'chain': [b.encode() for b in blockchain.chain],
'pending_transactions': [txn.encode() for txn in sorted(blockchain.current_transactions)],
'state': blockchain.state.encode()
}
return jsonify(response), 200
@app.route('/startexp/', methods=['GET'])
def startexp():
if blockchain.node_identifier == min(blockchain.nodes):
blockchain.trigger_new_block_mine(genesis=True)
return 'OK'
@app.route('/health', methods=['GET'])
def health():
return 'OK', 200
@app.route('/history', methods=['GET'])
def history():
account = request.args.get('account', '')
if account == '':
return 'Missing values', 400
data = blockchain.state.history(account)
return jsonify(data), 200
if __name__ == '__main__':
from argparse import ArgumentParser
logging.getLogger().setLevel(logging.INFO)
parser = ArgumentParser()
parser.add_argument('-p', '--port', default=5000, type=int, help='port to listen on')
parser.add_argument('-t', '--blocktime', default=5, type=int, help='Transaction collection time (in seconds) before creating a new block.')
parser.add_argument('-n', '--nodes', nargs='+', help='ports of all participating nodes (space separated). e.g. -n 5001 5002 5003', required=True)
args = parser.parse_args()
# Use port as node identifier.
port = args.port
blockchain.node_identifier = port
blockchain.block_mine_time = args.blocktime
for nodeport in args.nodes:
blockchain.nodes.append(int(nodeport))
app.run(host='0.0.0.0', port=port)
import unittest
import signal
import os, time, random, subprocess, requests
# https://stackoverflow.com/a/49567288
class TestTimeout(Exception):
pass
class test_timeout:
def __init__(self, seconds, error_message=None):
if error_message is None:
error_message = 'test timed out after {}s.'.format(seconds)
self.seconds = seconds
self.error_message = error_message
def handle_timeout(self, signum, frame):
raise TestTimeout(self.error_message)
def __enter__(self):
signal.signal(signal.SIGALRM, self.handle_timeout)
signal.alarm(self.seconds)
def __exit__(self, exc_type, exc_val, exc_tb):
signal.alarm(0)
def log(msg, level='INFO'):
print('[%s]: %s' % (level, msg))
server_ports = [5001, 5002, 5003]
BLOCK_COMMIT_TIME=2
POINTS = 0
def stagger():
time.sleep(BLOCK_COMMIT_TIME/2)
def commit():
time.sleep(BLOCK_COMMIT_TIME)
class ServerProcess:
def pid_fname(self):
return '.pid.server-%d.pid' % self.portnumber
def __init__(self, portnumber):
self.portnumber = portnumber
self.instance = None
self.base_url = 'http://localhost:%d' % self.portnumber
def kill_if_running(self):
fname = self.pid_fname()
if not os.path.isfile(fname): return
with open(fname, 'r') as f:
try:
pid = int(f.read().strip())
# log('Killing process with pid: %d' % pid, 'INFO')
os.kill(pid, 9)
if self.instance is not None: self.instance.wait()
except Exception as e:
log('Unable to read/kill server: %s' % e, 'WARN')
if os.path.isfile(fname):
os.remove(fname)
def restart(self, block_commit_time=4):
assert(block_commit_time%2 == 0)
self.kill_if_running()
if self.instance is not None:
self.instance.stagger()
self.instance = None
time.sleep(0.1)
if not os.path.exists('./server.py'):
raise Exception('./server.py not found.')
for _ in range(3):
args = [
'python3', './server.py',
'-p', str(self.portnumber),
'-t', str(block_commit_time),
'-n']
args.extend([str(x) for x in server_ports])
process = subprocess.Popen(args, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
pid = process.pid
with open(self.pid_fname(), 'w') as f:
f.write("%d\r\n" % pid)
time.sleep(0.5)
if process.poll() is None:
self.instance = process
break
else:
raise Exception("Unable to start the server. 99% of the time it means that your server crashed as soon as it started. Please check manually. 1% of the time it could be due to overloaded CSL machines, please try again in 10 seconds. This is almost never the case.")
def check_process_alive(self):
if self.instance is None: return False
if self.instance.poll() is not None: return False
try:
os.kill(self.instance.pid, 0)
return True
except OSError:
return False
def ping(self):
with test_timeout(1):
r = requests.get(self.base_url + '/health')
return r.status_code == 200
def send_txn(self, txn):
with test_timeout(1):
r = requests.post(self.base_url + '/transactions/new', json=txn)
return r.status_code == 201
def send_block(self, block):
with test_timeout(1):
r = requests.post(self.base_url + '/inform/block', json=block)
return r.status_code == 201
def dump(self):
with test_timeout(1):
r = requests.get(self.base_url + '/dump')
return r.json()
def genesis(self):
with test_timeout(1):
r = requests.get(self.base_url + '/startexp/')
return r.status_code == 200
def history(self, account):
with test_timeout(1):
r = requests.get(self.base_url + '/history', params={'account': account})
return r.json()
class TestsUtils():
@staticmethod
def txn(sender, recipient, amount):
return {'sender': sender, 'recipient': recipient, 'amount': amount}
@staticmethod
def block(num, txns, prev, miner, hash=None):
def tx_stringify(t):
return "T(%s -> %s: %s)" % (t['sender'], t['recipient'], t['amount'])
import hashlib
if hash is None: # Generate correct hash
hash = hashlib.sha256(
str(num).encode('utf-8') +
str([tx_stringify(txn) for txn in txns]).encode('utf-8') +
str(prev).encode('utf-8') +
str(miner).encode('utf-8')
).hexdigest()
return {'number': num, 'transactions': txns, 'previous_hash': prev, 'miner': miner, 'hash': hash}
@staticmethod
def checkChainEqualForAll(tst, chain1, chain2, chain3):
tst.assertTrue(chain1 == chain2 == chain3)
@staticmethod
def checkStateEqualForAll(tst, state1, state2, state3):
tst.assertTrue(state1 == state2 == state3)
@staticmethod
def checkBlockBasic(tst, block, expectedBlockNumber, expectedMiner, expectedPrevHash=None):
tst.assertTrue(block['number'] == expectedBlockNumber)
if expectedPrevHash is not None:
tst.assertTrue(block['previous_hash'] == expectedPrevHash)
expectedMiner = server_ports[(block['number'] - 1) % len(server_ports)]
tst.assertTrue(block['miner'] == expectedMiner)
class Test1ChainTests(unittest.TestCase):
def setUp(self):
self.nodes = []
for port in server_ports:
self.nodes.append(ServerProcess(port))
for node in self.nodes:
node.restart(BLOCK_COMMIT_TIME)
self.alive()
def tearDown(self):
self.alive() # check that all nodes are still alive
for node in self.nodes:
node.kill_if_running()
def alive(self):
for node in self.nodes:
self.assertTrue(node.check_process_alive())
self.assertTrue(node.ping())
def test_a_server_spinsup(self):
self.alive()
global POINTS; POINTS += 1
def test_f_test_genesis_block(self):
self.assertTrue(self.nodes[0].genesis())
commit()
stagger()
dumps = [n.dump() for n in self.nodes]
TestsUtils.checkStateEqualForAll(self, *[d['state'] for d in dumps])
TestsUtils.checkChainEqualForAll(self, *[d['chain'] for d in dumps])
one = dumps[0]
self.assertTrue(one['state'] == {'A': 10000})
self.assertTrue(len(one['chain']) == 1)
TestsUtils.checkBlockBasic(self, one['chain'][0], 1, server_ports[0], '0xfeedcafe')
self.assertTrue(len(one.get('transactions', [])) == 0)
global POINTS; POINTS += 2
def test_j_block_RR(self):
self.assertTrue(self.nodes[0].genesis())
stagger() # This makes us reside in boundary of commits for all future checks.
lastHash = '0xfeedcafe'
for blocknumber in range (0, 2*len(self.nodes) + 2):
if blocknumber == 0: # nothing's committed yet
for node in self.nodes:
self.assertTrue(node.dump().get('chain', []) == [])
commit()
continue
dumps = [node.dump() for node in self.nodes]
TestsUtils.checkChainEqualForAll(self, *[d['chain'] for d in dumps]) # chain is equal for all.
TestsUtils.checkStateEqualForAll(self, *[d['state'] for d in dumps]) # state is equal for all.
lastBlock = dumps[0]['chain'][-1] # pick any last block. we know all are same because of chain check
TestsUtils.checkBlockBasic(self, lastBlock, blocknumber, server_ports[blocknumber % len(server_ports)], lastHash)
lastHash = lastBlock['hash']
commit()
global POINTS; POINTS += 10
class Test2TxnStateSimple(unittest.TestCase):
def setUp(self):
self.nodes = []
for port in server_ports:
self.nodes.append(ServerProcess(port))
for node in self.nodes:
node.restart(BLOCK_COMMIT_TIME)
self.alive()
def tearDown(self):
self.alive() # check that all nodes are still alive
for node in self.nodes:
node.kill_if_running()
def alive(self):
for node in self.nodes:
self.assertTrue(node.check_process_alive())
self.assertTrue(node.ping())
def test_a_basic_txns_are_accepted(self):
for nodeid, node in enumerate(self.nodes):
txns = [TestsUtils.txn('s-%d-%d' % (nodeid, i), 'r-%d-%d' % (nodeid, i), i) for i in range(10)]
for txn in txns: node.send_txn(txn)
state = node.dump()
self.assertTrue(len(state.get('chain', [])) == 0)
self.assertTrue(txns == state.get('pending_transactions', []))
self.assertTrue(state.get('state', {}) == {})
global POINTS; POINTS += 2
def test_e_basic_txns_are_committed(self):
# Start and stagger
self.nodes[0].genesis()
stagger()
last_hash = '0xfeedcafe'
commit()
self.nodes[1].send_txn(TestsUtils.txn('A', 'B', 5000))
oneDump = self.nodes[1].dump()
self.assertTrue(oneDump['pending_transactions'] == [TestsUtils.txn('A', 'B', 5000)])
TestsUtils.checkBlockBasic(self, oneDump['chain'][0], 1, self.nodes[0], last_hash); last_hash = oneDump['chain'][-1]['hash']
commit()
dumps = [n.dump() for n in self.nodes]
TestsUtils.checkChainEqualForAll(self, *[d['chain'] for d in dumps])
TestsUtils.checkStateEqualForAll(self, *[d['state'] for d in dumps])
self.assertTrue(all(len(d['pending_transactions']) == 0 for d in dumps))
self.assertTrue(dumps[0]['state'] == {'A': 5000, 'B': 5000})
TestsUtils.checkBlockBasic(self, dumps[0]['chain'][1], 2, self.nodes[1], last_hash); last_hash = dumps[0]['chain'][-1]['hash']
self.nodes[2].send_txn(TestsUtils.txn('B', 'C', 1000))
self.nodes[2].send_txn(TestsUtils.txn('B', 'A', 1000))
commit()
dumps = [n.dump() for n in self.nodes]
TestsUtils.checkChainEqualForAll(self, *[d['chain'] for d in dumps])
TestsUtils.checkStateEqualForAll(self, *[d['state'] for d in dumps])
self.assertTrue(all(len(d['pending_transactions']) == 0 for d in dumps))
self.assertTrue(dumps[0]['state'] == {'A': 6000, 'B': 3000, 'C': 1000})
TestsUtils.checkBlockBasic(self, dumps[0]['chain'][2], 3, self.nodes[2], last_hash); last_hash = dumps[0]['chain'][-1]['hash']
global POINTS; POINTS += 5
def test_f_txns_are_aborted(self):
self.nodes[0].genesis()
stagger()
commit()
self.nodes[1].send_txn(TestsUtils.txn('A', 'B', 5000))
commit()
# Should fail because A does not have enough money left
self.nodes[2].send_txn(TestsUtils.txn('A', 'B', 5200))
commit()
dumps = [n.dump() for n in self.nodes]
TestsUtils.checkChainEqualForAll(self, *[d['chain'] for d in dumps])
TestsUtils.checkStateEqualForAll(self, *[d['state'] for d in dumps])
self.assertTrue(dumps[2]['pending_transactions'] == [TestsUtils.txn('A', 'B', 5200)])
self.assertTrue(dumps[2]['state'] == {'A': 5000, 'B': 5000})
self.nodes[0].send_txn(TestsUtils.txn('C', 'A', 1))
commit()
dumps = [n.dump() for n in self.nodes]
TestsUtils.checkChainEqualForAll(self, *[d['chain'] for d in dumps])
TestsUtils.checkStateEqualForAll(self, *[d['state'] for d in dumps])
self.assertTrue(dumps[0]['pending_transactions'] == [TestsUtils.txn('C', 'A', 1)])
self.assertTrue(dumps[0]['state'] == {'A': 5000, 'B': 5000})
global POINTS; POINTS += 5
def test_o_txns_are_retried(self):
self.nodes[0].genesis()
stagger()
commit() # 0 committed
self.nodes[1].send_txn(TestsUtils.txn('B', 'C', 1000))
commit() # 1 committed
commit() # 2 committed
self.nodes[0].send_txn(TestsUtils.txn('C', 'A', 1000))
commit() # 0 committed
commit() # 1 committed
self.nodes[2].send_txn(TestsUtils.txn('A', 'B', 5000))
commit() # 2 committed
dumps = [n.dump() for n in self.nodes]
TestsUtils.checkChainEqualForAll(self, *[d['chain'] for d in dumps])
TestsUtils.checkStateEqualForAll(self, *[d['state'] for d in dumps])
self.assertTrue(dumps[2]['pending_transactions'] == [])
self.assertTrue(dumps[2]['state'] == {'A': 5000, 'B': 5000})
self.assertTrue(dumps[2]['chain'][-1]['transactions'] == [TestsUtils.txn('A', 'B', 5000)])
commit() # 0 committed
commit() # 1 committed
dumps = [n.dump() for n in self.nodes]
TestsUtils.checkChainEqualForAll(self, *[d['chain'] for d in dumps])
TestsUtils.checkStateEqualForAll(self, *[d['state'] for d in dumps])
self.assertTrue(dumps[1]['pending_transactions'] == [])
self.assertTrue(dumps[1]['state'] == {'A': 5000, 'B': 4000, 'C': 1000})
self.assertTrue(dumps[1]['chain'][-1]['transactions'] == [TestsUtils.txn('B', 'C', 1000)])
commit() # 2 committed
commit() # 0 committed
dumps = [n.dump() for n in self.nodes]
TestsUtils.checkChainEqualForAll(self, *[d['chain'] for d in dumps])
TestsUtils.checkStateEqualForAll(self, *[d['state'] for d in dumps])
self.assertTrue(dumps[0]['pending_transactions'] == [])
self.assertTrue(dumps[0]['state'] == {'A': 6000, 'B': 4000, 'C': 0})
self.assertTrue(dumps[0]['chain'][-1]['transactions'] == [TestsUtils.txn('C', 'A', 1000)])
global POINTS; POINTS += 6
def test_u_invalid_txns_are_filtered(self):
self.nodes[0].genesis()
stagger()
commit()
self.nodes[1].send_txn(TestsUtils.txn('C', 'D', 201))
self.nodes[1].send_txn(TestsUtils.txn('A', 'B', 2000))
self.nodes[1].send_txn(TestsUtils.txn('D', 'E', 21))
commit()
dumps = [n.dump() for n in self.nodes]
TestsUtils.checkChainEqualForAll(self, *[d['chain'] for d in dumps])
TestsUtils.checkStateEqualForAll(self, *[d['state'] for d in dumps])
self.assertTrue(dumps[1]['pending_transactions'] == [TestsUtils.txn('C', 'D', 201), TestsUtils.txn('D', 'E', 21)])
self.assertTrue(dumps[0]['state'] == {'A': 8000, 'B': 2000})
self.assertTrue(dumps[0]['chain'][-1]['transactions'] == [TestsUtils.txn('A', 'B', 2000)])
global POINTS; POINTS += 3
class Tests3UpdateableState(unittest.TestCase):
def setUp(self):
self.nodes = []
for port in server_ports:
self.nodes.append(ServerProcess(port))
for node in self.nodes:
node.restart(BLOCK_COMMIT_TIME)
self.alive()
def tearDown(self):
self.alive() # check that all nodes are still alive
for node in self.nodes:
node.kill_if_running()
def alive(self):
for node in self.nodes:
self.assertTrue(node.check_process_alive())
self.assertTrue(node.ping())
def test_a_simple_state_updates(self):
self.nodes[0].genesis()
stagger()
commit()
self.nodes[1].send_txn(TestsUtils.txn('A', 'B', 2500))
self.nodes[1].send_txn(TestsUtils.txn('A', 'B', 3000))
self.nodes[1].send_txn(TestsUtils.txn('A', 'C', 550))
self.nodes[1].send_txn(TestsUtils.txn('A', 'C', 2800))
self.nodes[1].send_txn(TestsUtils.txn('A', 'B', 1000))
self.nodes[1].send_txn(TestsUtils.txn('A', 'C', 550))
commit()
dumps = [n.dump() for n in self.nodes]
TestsUtils.checkChainEqualForAll(self, *[d['chain'] for d in dumps])
TestsUtils.checkStateEqualForAll(self, *[d['state'] for d in dumps])
self.assertTrue(dumps[1]['pending_transactions'] == [TestsUtils.txn('A', 'C', 2800)])
self.assertTrue(dumps[1]['state'] == {'A': 2400, 'B': 6500, 'C': 1100})
self.assertTrue(dumps[1]['chain'][-1]['transactions'] == [TestsUtils.txn('A', 'B', 1000), TestsUtils.txn('A', 'B', 2500), TestsUtils.txn('A', 'B', 3000), TestsUtils.txn('A', 'C', 550), TestsUtils.txn('A', 'C', 550)])
global POINTS; POINTS += 5
def test_e_check_transitive_validity_changes(self):
self.nodes[0].genesis()
stagger()
commit()
self.nodes[1].send_txn(TestsUtils.txn('A', 'B', 4000))
self.nodes[1].send_txn(TestsUtils.txn('B', 'C', 1000))
self.nodes[1].send_txn(TestsUtils.txn('C', 'A', 500))
self.nodes[1].send_txn(TestsUtils.txn('A', 'D', 6500))
commit()
dumps = [n.dump() for n in self.nodes]
TestsUtils.checkChainEqualForAll(self, *[d['chain'] for d in dumps])
TestsUtils.checkStateEqualForAll(self, *[d['state'] for d in dumps])
self.assertTrue(dumps[1]['pending_transactions'] == [TestsUtils.txn('A', 'D', 6500)])
self.assertTrue(dumps[1]['state'] == {'A': 6500, 'B': 3000, 'C': 500})
self.assertTrue(dumps[1]['chain'][-1]['transactions'] == [TestsUtils.txn('A', 'B', 4000), TestsUtils.txn('B', 'C', 1000), TestsUtils.txn('C', 'A', 500)])
# We don't expect you to make multiple passes to check which all transactions go in. That becomes an algorithmically hard problem.
# Systemsy way to solve this is pick an ordering, validate txns based on ordering. And then retry during next block mining. Eventually, txn will commit
# Also, in real world systems, such a transaction is available with multiple nodes all of whom will be trying to include it in their block.
commit() # 2
commit() # 0
commit() # 1
dumps = [n.dump() for n in self.nodes]
TestsUtils.checkChainEqualForAll(self, *[d['chain'] for d in dumps])
TestsUtils.checkStateEqualForAll(self, *[d['state'] for d in dumps])
self.assertTrue(dumps[1]['pending_transactions'] == [])
self.assertTrue(dumps[1]['state'] == {'A': 0, 'B': 3000, 'C': 500, 'D': 6500})
self.assertTrue(dumps[1]['chain'][-1]['transactions'] == [TestsUtils.txn('A', 'D', 6500)])
global POINTS; POINTS += 7
def test_i_check_eventual_validity(self):
self.nodes[0].genesis()
stagger()
commit()
self.nodes[1].send_txn(TestsUtils.txn('C', 'A', 1500))
self.nodes[1].send_txn(TestsUtils.txn('B', 'C', 2000))
self.nodes[2].send_txn(TestsUtils.txn('A', 'B', 2500))
commit() #1 // empty
commit() #2 // will have A->B 2500
commit() #0 // empty
commit() #1 // should trigger interesting stuff.
dumps = [n.dump() for n in self.nodes]
TestsUtils.checkChainEqualForAll(self, *[d['chain'] for d in dumps])
TestsUtils.checkStateEqualForAll(self, *[d['state'] for d in dumps])
self.assertTrue(dumps[1]['pending_transactions'] == [])
self.assertTrue(dumps[1]['state'] == {'A': 9000, 'B': 500, 'C': 500})
self.assertTrue(dumps[1]['chain'][-1]['transactions'] == [TestsUtils.txn('B', 'C', 2000), TestsUtils.txn('C', 'A', 1500)])
global POINTS; POINTS += 6
class Tests4SemanticValidations(unittest.TestCase):
def setUp(self):
self.nodes = []
for port in server_ports:
self.nodes.append(ServerProcess(port))
for node in self.nodes:
node.restart(BLOCK_COMMIT_TIME)
self.alive()
def tearDown(self):
self.alive() # check that all nodes are still alive
for node in self.nodes:
node.kill_if_running()
def alive(self):
for node in self.nodes:
self.assertTrue(node.check_process_alive())
self.assertTrue(node.ping())
def test_a_txn_invalid_drop(self):
self.assertFalse(self.nodes[0].send_txn({}))
self.assertFalse(self.nodes[0].send_txn({'sender': 'A'}))
self.assertFalse(self.nodes[0].send_txn({'sender': 'A', 'recipient': 'B'}))
global POINTS; POINTS += 1
def test_e_correct_blocks(self):
prev = '0xfeedcafe'
block = TestsUtils.block(1, [], prev, server_ports[0]); prev = block['hash']
for node in self.nodes:
self.assertTrue(node.send_block(block))
txns = [TestsUtils.txn('A', 'B', 2500)]
block = TestsUtils.block(2, txns, prev, server_ports[1]); prev = block['hash']
for node in self.nodes:
self.assertTrue(node.send_block(block))
global POINTS; POINTS += 1
def test_f_incorrect_blocks_prev_hash(self):
# incorrect previous hash - genesis
prev = '0xincorrect'
block = TestsUtils.block(1, [], prev, server_ports[0]); prev = block['hash']
self.assertFalse(self.nodes[2].send_block(block))
# incorrect previous hash - normal
prev = '0xfeedcafe'
block = TestsUtils.block(1, [], prev, server_ports[0]); prev = block['hash']
self.assertTrue(self.nodes[2].send_block(block))
block = TestsUtils.block(2, [], prev, server_ports[1], '0xtamperedHash'); prev = block['hash']
self.assertFalse(self.nodes[2].send_block(block))
global POINTS; POINTS += 2
def test_g_incorrect_blocks_miner(self):
# Incorrect miner -- genesis
prev = '0xfeedcafe'
block = TestsUtils.block(1, [], prev, server_ports[1]); prev = block['hash']
self.assertFalse(self.nodes[2].send_block(block))
# incorrect miner - normal
prev = '0xfeedcafe'
block = TestsUtils.block(1, [], prev, server_ports[0]); prev = block['hash']
self.assertTrue(self.nodes[2].send_block(block))
block = TestsUtils.block(2, [], prev, server_ports[2]); prev = block['hash']
self.assertFalse(self.nodes[2].send_block(block))
global POINTS; POINTS += 2
def test_h_incorrect_block_number(self):
# Incorrect block number -- genesis
prev = '0xfeedcafe'
block = TestsUtils.block(2, [], prev, server_ports[0]); prev = block['hash']
self.assertFalse(self.nodes[2].send_block(block))
# Incorrect block number -- normal
prev = '0xfeedcafe'
block = TestsUtils.block(1, [], prev, server_ports[0]); prev = block['hash']
self.assertTrue(self.nodes[2].send_block(block))
block = TestsUtils.block(1, [], prev, server_ports[1]); prev = block['hash']
self.assertFalse(self.nodes[2].send_block(block))
block = TestsUtils.block(3, [], prev, server_ports[1]); prev = block['hash']
self.assertFalse(self.nodes[2].send_block(block))
global POINTS; POINTS += 2
def test_i_sus_miner(self):
prev = '0xfeedcafe'
block = TestsUtils.block(1, [], prev, 1337); prev = block['hash'] # random person is trying to commit a block.
self.assertFalse(self.nodes[2].send_block(block))
global POINTS; POINTS += 1
def test_j_invalid_txns_in_block(self):
self.nodes[0].genesis()
stagger()
commit()
# Now A->10000
prev = self.nodes[0].dump()['chain'][-1]['hash']
block = TestsUtils.block(2, [TestsUtils.txn('A', 'B', 20000)], prev, server_ports[1]);
self.assertFalse(self.nodes[0].send_block(block))
block = TestsUtils.block(2, [TestsUtils.txn('C', 'A', 200)], prev, server_ports[1]);
self.assertFalse(self.nodes[0].send_block(block))
block = TestsUtils.block(2, [TestsUtils.txn('A', 'B', 6000), TestsUtils.txn('A', 'C', 6000)], prev, server_ports[1])
self.assertFalse(self.nodes[0].send_block(block))
block = TestsUtils.block(2, [TestsUtils.txn('A', 'B', 6000), TestsUtils.txn('B', 'C', 3000)], prev, server_ports[1])
self.assertTrue(self.nodes[0].send_block(block))
state = self.nodes[0].dump()['state']
self.assertTrue(state == {'A': 4000, 'B': 3000, 'C': 3000})
global POINTS; POINTS += 4
class Tests5History(unittest.TestCase):
def setUp(self):
self.nodes = []
for port in server_ports:
self.nodes.append(ServerProcess(port))
for node in self.nodes:
node.restart(BLOCK_COMMIT_TIME)
self.alive()
def tearDown(self):
self.alive()
for node in self.nodes:
node.kill_if_running()
def alive(self):
for node in self.nodes:
self.assertTrue(node.check_process_alive())
self.assertTrue(node.ping())
def test_a_history_genesis(self):
self.nodes[0].genesis()
stagger()
commit()
self.assertTrue(self.nodes[0].history('A') == [[1, 10000]])
global POINTS; POINTS += 1
def test_b_history_missing(self):
self.assertTrue(self.nodes[0].history('404') == [])
global POINTS; POINTS += 1
def test_c_history_simple(self):
self.nodes[0].genesis()
stagger()
commit() # 0
self.nodes[1].send_txn(TestsUtils.txn('A', 'B', 5000))
self.nodes[2].send_txn(TestsUtils.txn('B', 'C', 1500))
self.nodes[0].send_txn(TestsUtils.txn('C', 'A', 100))
commit() # 1
commit() # 2
commit() # 0
historiesA = [node.history('A') for node in self.nodes]
historiesB = [node.history('B') for node in self.nodes]
historiesC = [node.history('C') for node in self.nodes]
# Everyone returned the same thing at least.
self.assertTrue(all(h == historiesA[0] for h in historiesA))
self.assertTrue(all(h == historiesB[0] for h in historiesB))
self.assertTrue(all(h == historiesC[0] for h in historiesC))
self.assertTrue(historiesA[0] == [[1, 10000], [2, -5000], [4, 100]])
self.assertTrue(historiesB[0] == [[2, 5000], [3, -1500]])
self.assertTrue(historiesC[0] == [[3, 1500], [4, -100]])
global POINTS; POINTS += 3
def test_e_history_aggregated_in_blocks(self):
self.nodes[0].genesis()
stagger()
commit() # 0
self.nodes[1].send_txn(TestsUtils.txn('A', 'B', 500))
self.nodes[1].send_txn(TestsUtils.txn('A', 'D', 100000))
self.nodes[2].send_txn(TestsUtils.txn('A', 'B', 1500))
self.nodes[2].send_txn(TestsUtils.txn('B', 'C', 100))
self.nodes[2].send_txn(TestsUtils.txn('B', 'A', 100))
commit() # 1
commit() # 2
historiesA = [node.history('A') for node in self.nodes]
historiesB = [node.history('B') for node in self.nodes]
historiesC = [node.history('C') for node in self.nodes]
# Everyone returned the same thing at least.
self.assertTrue(all(h == historiesA[0] for h in historiesA))
self.assertTrue(all(h == historiesB[0] for h in historiesB))
self.assertTrue(all(h == historiesC[0] for h in historiesC))
self.assertTrue(historiesA[0] == [[1, 10000], [2, -500], [3, -1400]])
self.assertTrue(historiesB[0] == [[2, 500], [3, 1300]])
self.assertTrue(historiesC[0] == [[3, 100]])
global POINTS; POINTS += 10
if __name__ == '__main__':
unittest.main(exit=False)
print("Points: %s" % POINTS)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment