Skip to content

Instantly share code, notes, and snippets.

@crizCraig

crizCraig/goa.py Secret

Last active April 27, 2021 17:26
Show Gist options
  • Save crizCraig/1310696bd0c66b67fd3a272f5da30530 to your computer and use it in GitHub Desktop.
Save crizCraig/1310696bd0c66b67fd3a272f5da30530 to your computer and use it in GitHub Desktop.
import json
import os
import sys
import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
import seaborn as sns
from numba import njit, jit
from loguru import logger as log
from typing import get_type_hints
np.random.seed(0)
MILLION = 1e6
BILLION = 1e9
TRILLION = 1e12
EPS = 1e-6
NUMBA_ARGS = dict(cache=True, fastmath=True, nogil=True)
DEBUG = 'NUMBA_DISABLE_JIT' in os.environ
DIR = os.path.dirname(os.path.realpath(__file__))
EMPTY_DIST = np.array((0, 0, 0, 0))
DAYS_IN_MONTH = 30.44
class Gaussian:
def __init__(self, u: float = 0, std: float = 1, min: float = -np.inf,
max: float = np.inf):
"""
:param u: mean
:param std: standard deviation
:param min: minimum value inclusive we will be clipped to
:param max: max value inclusive we will get clipped to
""""""
Note: There may be a better distribution for handling the
min and max like a log-normal distribution, but reasoning about the
variables of a log-normal vs. normal is not nearly as natural, so
going with min and max for now until we find something better.
This could be a problem if we wanted to differentiate through the sim
and learn the ideal settings, but I don't think that's really what
we want to do with this as humans should really be setting the inputs
based on an informed understanding of how the world works.
"""
self.mean = u
self.std = std
self.min = min
self.max = max
def sample(self):
return clip(np.random.normal(loc=self.mean, scale=self.std),
start=self.min, end=self.max)
class GameOfAgi:
agi_safe_by_default: Gaussian
dollars_agi_safety: Gaussian
dollars_ai: Gaussian
ideal_safety_to_ai_spend_ratio: Gaussian
number_of_originators: Gaussian
originator_ppl: Gaussian
snowden_prob: Gaussian
value_alignment_prob: Gaussian
days_to_replicate: Gaussian
takeoff_in_days: Gaussian
oversight_dollars: Gaussian
ideal_oversight_dollars: Gaussian
ignore_oversight: bool
ignore_spend: bool
ignore_takeoff_and_replication: bool
def __init__(self,
agi_safe_by_default: Gaussian,
number_of_originators: Gaussian,
ppl_per_originator: Gaussian,
snowden_prob: Gaussian,
oversight_dollars: Gaussian,
ideal_oversight_dollars: Gaussian,
ignore_oversight: bool,
value_alignment_prob: Gaussian,
days_to_replicate: Gaussian,
takeoff_in_days: Gaussian,
dollars_ai: Gaussian = None,
dollars_agi_safety: Gaussian = None,
ideal_safety_to_ai_spend_ratio: Gaussian = None,
ignore_spend: bool = False,
ignore_takeoff_and_replication: bool = True):
self.agi_safe_by_default = agi_safe_by_default
self.dollars_agi_safety = dollars_agi_safety
self.dollars_ai = dollars_ai
self.ideal_safety_to_ai_spend_ratio = ideal_safety_to_ai_spend_ratio
self.number_of_originators = number_of_originators
self.originator_ppl = ppl_per_originator
self.snowden_prob = snowden_prob
self.value_alignment_prob = value_alignment_prob
self.days_to_replicate = days_to_replicate
self.takeoff_in_days = takeoff_in_days
self.oversight_dollars = oversight_dollars
self.ideal_oversight_dollars = ideal_oversight_dollars
self.ignore_oversight = ignore_oversight
self.ignore_spend = ignore_spend
self.ignore_takeoff_and_replication = ignore_takeoff_and_replication
def simulate(self, num_samples: int = 100, show_graphs: bool = False):
log.debug('simulating')
start_sim = time.time()
ret = simulate(num_samples, **self.get_simulation_params())
xrisk, corruption_risk, value_alignment_prob = ret
if show_graphs:
# get_bins(xrisk)
# json.dump(xrisk.tolist(), open('xrisk.json', 'w'), indent=2)
# json.dump(dict(xrisk_binned=get_bins(xrisk)), open(f'{DIR}/site/xrisk.json', 'w'), indent=2)
# fig, ax = plt.subplots(figsize=(8, 5))
sns.set_style("ticks", {"xtick.major.size": 10, "ytick.major.size": 10})
g = sns.displot(data=xrisk, binwidth=0.01)
# ax.xaxis.set_major_formatter(mtick.PercentFormatter(1))
# ax.xaxis.set_major_formatter()
g.set(xlabel='X-risk', ylabel='Simulation Count',
title='AGI X-risk Probability Distribution')
# xlabels = ['{:.1f}%'.format(x) for x in g.get_xticks() / 1000]
# g.set_xticklabels(['0%', '20%', '40%', '60%', '80%', '100%'])
# ax.xaxis.set_major_formatter(tck.FormatStrFormatter('%g'))
log.info('xrisk percentiles -----------------')
log.info(f'Percentile xrisk')
log.info(f'1% {round(np.percentile(xrisk, 1), 2)}')
log.info(f'5% {round(np.percentile(xrisk, 5), 2)}')
log.info(f'10% {round(np.percentile(xrisk, 10), 2)}')
log.info(f'25% {round(np.percentile(xrisk, 25), 2)}')
log.info(f'50% {round(np.percentile(xrisk, 50), 2)}')
log.info(f'75% {round(np.percentile(xrisk, 75), 2)}')
log.info(f'90% {round(np.percentile(xrisk, 90), 2)}')
log.info(f'95% {round(np.percentile(xrisk, 95), 2)}')
log.info(f'99% {round(np.percentile(xrisk, 99), 2)}')
log.info('----------------------------------')
log.info(
f'Corruption risk: {round(np.percentile(corruption_risk, 50) * 100, 2)}%')
log.info(f'Adjusted value alignment probability:'
f' {round(np.percentile(value_alignment_prob, 50) * 100, 2)}%')
log.info(f'X-risk: {round(np.mean(xrisk) * 100, 2)}%')
plt.show()
log.debug(f'finished simulating in {time.time() - start_sim}s')
return ret
def get_simulation_params(self):
ret = dict()
types = get_type_hints(type(self))
for prop, value in vars(self).items():
if isinstance(value, Gaussian):
ret[prop + '_dist'] = np.array(
(value.mean, value.std, value.min, value.max))
elif value is None and types[prop] is Gaussian:
ret[prop + '_dist'] = EMPTY_DIST
else:
ret[prop] = value
return ret
@njit(**NUMBA_ARGS)
def get_value_alignment_prob(ignore_oversight, oversight_dollars,
ideal_oversight_dollars, value_alignment_prob):
o = 0.5 # Importance of oversight for value alignment
# TODO econ: There's a critical question here as to what role oversight
# will play in fostering value alignment. There's an interplay of variables
# where a time based sim would have to be used to unroll the circular
# dependencies between corruption, alignment, and oversight. Because
# really it's the probability that alignment knowledge will spread from
# whoever obtains it, to whoever originates/replicates AGI in the takeoff
# window. This simple model may be then that oversight allows for that
# transfer if they are not one in the same. The likelihood that a
# non-originator/replicator discovers alignment is extremely low in my
# opinion as safe AGI is a subset of AGI and therefore to discover it,
# you are almost by definition originating it sans an implementation free
# thought experiment or something crazy like that. So oversight is necessary
# to ensure originators are also implementing safety.
if ignore_oversight:
return value_alignment_prob
else:
r = get_oversight_ratio(oversight_dollars, ideal_oversight_dollars)
return (1 - o) * value_alignment_prob + o * r
@njit(**NUMBA_ARGS)
def simulate(num_samples, agi_safe_by_default_dist, dollars_agi_safety_dist,
dollars_ai_dist, ideal_safety_to_ai_spend_ratio_dist,
number_of_originators_dist, originator_ppl_dist,
snowden_prob_dist, value_alignment_prob_dist,
days_to_replicate_dist, takeoff_in_days_dist,
oversight_dollars_dist, ideal_oversight_dollars_dist,
ignore_oversight, ignore_spend, ignore_takeoff_and_replication):
"""
All args are np arrays that represent distributions with the params [mean, std, min, max]
"""
np.random.seed(0) # Needs to be in numba run code
# Note: Passing the args in as a numba dict increases runtime 2x ret = List()
ret, corruption_risk_ret, value_alignment_prob_ret, spend_risk_ret = [0.], [
0.], [0.], [0.]
# Initial values allow numba to infer type
ret.clear()
corruption_risk_ret.clear()
value_alignment_prob_ret.clear()
# TODO: Allow sequential iterations so that we can depend on previous step's
# values.
for _ in range(
num_samples): # TODO: Do these in parallel (need to fix prange for appending in numba)
agi_safe_by_default = sample(agi_safe_by_default_dist)
oversight_dollars = sample(oversight_dollars_dist)
ideal_oversight_dollars = sample(ideal_oversight_dollars_dist)
takeoff_in_days = sample(takeoff_in_days_dist)
days_to_replicate = sample(days_to_replicate_dist)
value_alignment_prob = get_value_alignment_prob(
ignore_oversight, oversight_dollars, ideal_oversight_dollars,
sample(value_alignment_prob_dist))
corruption_risk = get_corruption_risk(
number_of_originators_dist, snowden_prob_dist, originator_ppl_dist,
ignore_oversight, oversight_dollars, ideal_oversight_dollars,
takeoff_in_days, days_to_replicate, ignore_takeoff_and_replication)
# log.info(f'corruption risk {corruption_risk}')
if np.random.random() < corruption_risk:
# AGI originators are corrupt. Safety work now moot.
# We hope AGI will disobey creators and try to benefit all.
# We also don't assume human-engineered value alignment is zero.
xrisk = 1 - agi_safe_by_default
elif ignore_spend:
# Not corrupt and not considering AGI safety spend
ai_intrinsically_unsafe = 1 - agi_safe_by_default
prob_we_make_safe = ai_intrinsically_unsafe * value_alignment_prob
xrisk = ai_intrinsically_unsafe - prob_we_make_safe
xrisk = clip(xrisk)
# This could be done also with the following, which may be simpler in some ways
# especially if there are more interactions to work out.
# if np.random.random() > ai_intrinsically_safe:
# # AGI is not intrinsically safe, we must engineer value alignment
# xrisk = value_alignment_prob
# else:
# # AGI intrinsically safe, no risk
# xrisk = 0
else:
dollars_agi_safety = sample(dollars_agi_safety_dist)
dollars_ai = sample(dollars_ai_dist)
# Assume more money is spent on AI than AGI safety
# assert dollars_ai > dollars_agi_safety
ideal_safety_to_ai_spend_ratio = sample(
ideal_safety_to_ai_spend_ratio_dist)
xrisk, value_alignment_prob = get_xrisk_considering_spend(
dollars_agi_safety=dollars_agi_safety,
dollars_ai=dollars_ai,
ideal_safety_to_ai_spend_ratio=ideal_safety_to_ai_spend_ratio,
ai_intrinsically_safe=agi_safe_by_default,
value_alignment_prob=value_alignment_prob)
ret.append(xrisk)
corruption_risk_ret.append(corruption_risk)
value_alignment_prob_ret.append(value_alignment_prob)
return (np.array(ret),
np.array(corruption_risk_ret),
np.array(value_alignment_prob_ret))
@njit(**NUMBA_ARGS)
def sample(guassian):
return clip(np.random.normal(loc=guassian[0], scale=guassian[1]),
start=guassian[2], end=guassian[3])
@njit(**NUMBA_ARGS)
def get_xrisk_considering_spend(dollars_agi_safety: float, dollars_ai: float,
ideal_safety_to_ai_spend_ratio: float,
ai_intrinsically_safe: float,
value_alignment_prob: float):
"""
Returns xrisk based on AGI vs total AI spending
:param dollars_agi_safety: Global dollars spent on AGI safety
:param dollars_ai: Global dollars spent on AI
:param ideal_safety_to_ai_spend_ratio: Ideal dollars spent on AGI / dollars spent on AI
:param ai_intrinsically_safe: Probability that AGI will be safe regardless of what we do
:param value_alignment_prob: Probability that we achieve coherent extrapolated
volition in AGI before considering spend
:return:
"""
spend_ratio = dollars_agi_safety / (dollars_ai + EPS)
# Assume we spend less than ideal
spend_ratio = min(spend_ratio, ideal_safety_to_ai_spend_ratio)
# It's not realistic to assume we will spend less on AI than saftey.
# assert spend_ratio < 1
# Assumption here is that there's a linear relationship between spending
# and safety up to ideal ratio - i.e. y = min(x / C, 1)
# In actuality, there's probably a sigmoidal relationship between spending
# and safety with diminishing returns on the high end and who knows what
# on the low end, but I don't think we need to worry about the extremes
# right now.
actual_to_ideal_ratio = spend_ratio / (ideal_safety_to_ai_spend_ratio + EPS)
ai_intrinsically_unsafe = (1 - ai_intrinsically_safe)
# There's some portion of the intrinsically unsafe probability
# that we can make safe by spending $
value_alignment_prob = actual_to_ideal_ratio * value_alignment_prob
prob_we_make_safe = ai_intrinsically_unsafe * value_alignment_prob
prob_safe = ai_intrinsically_safe + prob_we_make_safe
risk = 1 - prob_safe
return risk, value_alignment_prob
@njit(**NUMBA_ARGS)
def arithmetic_series_sum(a, b, step):
"""
Sum of arithmetic series
"""
num_terms = 1 + (b - a) // step
last_term = b - (b - a) % step
sum_of_extrema = a + last_term
ret = num_terms // 2 * sum_of_extrema
if num_terms % 2:
ret += sum_of_extrema // 2
return ret
@njit(**NUMBA_ARGS)
def get_corruption_risk(number_of_originators_dist, snowden_prob_dist,
originator_ppl_dist, ignore_oversight,
oversight_dollars,
ideal_oversight_dollars, takeoff_in_days,
days_to_replicate, ignore_takeoff_and_replication):
# Should be some combination of
# - number of simultaneous originators,
# - Takeoff time
# - How open source the originators are (the more open, the less corrupt you would assume)
# - How much of AGI dev depends on HW - i.e. even if software open, GPT-3 wouldn't run on
# but a handful of setups in the world. Custom ASICS could also mean power usage
# decreases and number of possible originators decreases.
# - Controllability
# - Internal checks and balances
num_originators = int(round(sample(number_of_originators_dist)))
if not ignore_takeoff_and_replication and days_to_replicate < takeoff_in_days:
# Increase replicators by 1/2 current originators every replication cycle.
# So this assumes a virality (R) of 1.5
# TODO: Need a closed form geometric series sum paired with the
# arithmetic series sum to model virality. Right now this underestimates
# risk which doesn't matter much as the risk is already insanely high.
# The code below needs to be transformed into a closed form to execute
# in a reasonable amount of time.
V = 0.5 # V = R - 1
# while days_left > 0:
# time_weighting = days_left / takeoff_in_days
# num_originators += V * num_originators * time_weighting
# days_left -= days_to_replicate
# log.info(f'days left: {days_left}')
# TODO: Take into account probability that an originator is smaller,
# has less oversight, and more likely to be a military as the number of
# originators grows. This assumes Google and OpenAI are and remain to be
# leaders in developing AGI.
days_left = takeoff_in_days - days_to_replicate
# Create a closed form weighting factor for each replication that decreases
# as we get closer to the singularity, i.e. if there are only 2 days left
# then we
# would only add 2/takeoff_in_days * V * num_originators to num_originators
# Since we do this for every replication, we need to add the numbers
# between
# (takeoff_in_days - days_to_replicate) / takeoff_in_days and
# days_to_replicate / takeoff_in_days
# call this total_weighting
if days_to_replicate >= 1:
total_weighting = arithmetic_series_sum(days_to_replicate, days_left,
step=days_to_replicate)
total_weighting *= 2 * V / takeoff_in_days
num_originators += num_originators * total_weighting
if not ignore_oversight:
# The basic effect of oversight is adding checks and balances, similar
# to what competition does.
ov = get_oversight_ratio(oversight_dollars, ideal_oversight_dollars)
num_originators += ov * num_originators
# Exponentially decrease risk with the number of originators, where two originators
# yields 0.05 risk and one originator is 1.0 risk.
ret = num_originators ** -4.3 # TODO: Parameterize this exponent?
# For each originator, reduce corruption risk if a Snowden/Houtermans like
# character exists there.
originator_ppl = sample(originator_ppl_dist)
num_snowdens_per_originator = sample(snowden_prob_dist) * originator_ppl
if not np.isnan(num_originators):
for _ in range(int(num_originators)):
# Exponentially decrease risk by 1% for each snowden in org
ret *= 0.99 ** num_snowdens_per_originator # TODO: Parameterize this?
# if not ignore_oversight:
# p_not_corrupt = 1 - ret
# p_not_corrupt *= get_oversight_ratio(oversight_dollars, ideal_oversight_dollars)
# ret = 1 - p_not_corrupt
# TODO: Consider probability that absolute power does not corrupt.
# i.e. we have some beneficent dictator scenario.
# - Looking at history, there are a handful of benevolent dictators, so
# perhaps a very small probability could be ascribed to this
# happening in the traditional sense. However, AGI represents
# unprecedented power by orders of magnitude. So if AGI is not
# intrinsically benevolent, and is controlled by a corrupt leader,
# I don't think that leader would be able to handle it. They would
# make some catastrophic mistake by virtue of simply being a single
# organism evolved to desire control of its environment and motivated
# at a fundamental level by its individualistic concerns.
return ret
@njit(**NUMBA_ARGS)
def get_oversight_ratio(oversight_dollars, ideal_oversight_dollars):
return min(1, oversight_dollars / (EPS + ideal_oversight_dollars))
@njit(**NUMBA_ARGS)
def clip(x, start: float = 0, end: float = 1):
return min(max(x, start), end)
@njit(**NUMBA_ARGS)
def logit(x, shift, slope):
return 1 / (1 + np.exp(-(shift + slope * x)))
def main():
start = time.time()
log.info('Game of AGI')
game_of_agi()
log.debug(f'Ran for {round(time.time() - start, 2)}s')
def game_of_agi():
ai_intrinsically_safe = Gaussian(u=0.95, std=0.1, min=0, max=1)
value_alignment_prob = Gaussian(u=0.8, std=0.1, min=0, max=1)
ignore_takeoff_and_replication = False
takeoff_in_days = Gaussian(u=365, std=60, min=0)
days_to_replicate = Gaussian(u=150, std=50, min=1)
number_of_originators = Gaussian(u=1, std=0, min=1)
ppl_per_originator = Gaussian(u=200, std=5000, min=1)
snowden_prob = Gaussian(u=1 / 500, std=1 / 500, min=0, max=1)
ignore_oversight = False
oversight_dollars = Gaussian(u=0 * MILLION, std=5 * MILLION, min=0)
ideal_oversight_dollars = Gaussian(u=200 * MILLION, std=200 * MILLION, min=0)
ignore_spend = False
dollars_agi_safety = Gaussian(u=10 * MILLION, std=100 * MILLION, min=0)
dollars_ai = Gaussian(u=100 * BILLION, std=100 * MILLION, min=0)
ideal_safety_to_ai_spend_ratio = Gaussian(u=0.01, std=0.0, min=0.01, max=1)
# TODO: Temper the corruption risk - right now corruption == x-risk,
# however there is some chance that a corrupt lab is only slightly
# corrupt. i.e. they are somewhat dishonest in how they are using AI and
# how strong their AI is. However, they may still try to make decisions which
# benefit everyone. They may have compassion for everyone, and due to that do
# not want share control of the tech they have developed with people that
# may not share the same compassion. The flaw in this logic is that humans
# generally become corrupted and unempathetic when they gain power - this
# as their personal concerns become further distanced from people with
# less power. This makes alignment with others' concerns harder and
# harder as it requires more disassociation with their personal
# circumstances, i.e. the increasingly harder ability to put themselves in
# less powerful peoples' others shoes.
# TODO: When considering multiple human controlled AGIs we should try to understand
# the interplay between good and bad AGI. This as destruction is relatively easy,
# and pernicious AGI must therefore be counterbalanced with
# disproportionately greater resources allocated to benevolent AGI.
# TODO: Add probability that oversight slows less corrupt labs, and gives advantage to corrupt labs without oversight. Also oversight would be limited to the n largest labs due to resource constraints of the oversight agency.
goa = GameOfAgi(
value_alignment_prob=value_alignment_prob,
# Oversight could perhaps be modeled better in a time based sim, but time variance
# not enough to matter in my estimation.
ignore_oversight=ignore_oversight,
oversight_dollars=oversight_dollars,
ideal_oversight_dollars=ideal_oversight_dollars,
ignore_spend=ignore_spend,
dollars_agi_safety=dollars_agi_safety,
dollars_ai=dollars_ai,
ideal_safety_to_ai_spend_ratio=ideal_safety_to_ai_spend_ratio,
agi_safe_by_default=ai_intrinsically_safe,
number_of_originators=number_of_originators,
ppl_per_originator=ppl_per_originator,
snowden_prob=snowden_prob,
ignore_takeoff_and_replication=ignore_takeoff_and_replication,
takeoff_in_days=takeoff_in_days,
days_to_replicate=days_to_replicate,
# TODO: Add weaponization, rogue use, and openness (though openness is already implicit in days_to_replicate)
# https://en.wikipedia.org/wiki/Corruption_Perceptions_Index#Rankings
# The more corrupt the host country, the more likely that the
# government will take control of the AI developed there.
# The main effect of this will be fewer independent AIs.
# Therefore if corruption is high enough in a host country, we can
# assume the originator will be about that corrupt as well.
# A likely argument for host countries to take over labs will be
# made in the name of defense, even in relatively less corrupt countries.
# This makes international oversight that much more important.
# Snowdens can still play a role here. The risk
# reduction from multiple groups controlling AGIs will be lessened
# in the case of a corrupt government host, essentially consolidating labs
# in that country.
# One intermediate variable this affects is the likelihood of
# scientists and engineers to control of their creation vs government
# assuming control. Looking back at the atomic bomb, we see that
# early on the U.S. govt told scientists they would not be in
# control over when and where bombs were dropped. Whereas
# scientists, engineers, and the private sector do maintain some
# control over nuclear energy. So we can
# expect defense related AI to be controlled by government, and
# other types of AI to be less so. Scientists maintaining control
# with government oversight may be an ideal, similar to nuclear power.
# TODO: Consider also using/combining with democracy index
# corruption_rank_of_host_country=Gaussian(u=50, std=100, min=1, max=179),
# TODO: We need to add something around how humanlike AGI is
# and whether it's foundation is humanlike (i.e. GPT / neuralink) or
# is trained in virtual / learned environments (i.e. Clune's generative teaching networks etc...)
# Then we can get into tradeoffs between human vices and the uncertainty that
# comes from less human like behavior.
# With the above we can assume some amount of time for a successor to come and
# reduce corruption, etc... This should be combined with the takeoff rate (some doubling time)
# to calculate the risk/reward of being open.
# origination_is_open_source=Gaussian(u=0.01, std=0.02, min=0, max=1),
# origination_is_open_data=Gaussian(u=0.001, std=0.002, min=0, max=1),
# origination_is_open_hardware=Gaussian(u=0.001, std=0.002, min=0, max=1),
# The smaller this is, the more likely that many groups will
# originate AI around the same time. While doing so decreases
# internal checks and balances, it increases external ones (i.e.
# good AGIs can keep bad AGIs in check.) Also, due to the
# relative ease of destruction vs creation, we likely need many more
# good AGIs than bad.
# This is redundant to number_of_originators currently
# dollars_to_originate=Gaussian(u=100 * BILLION, std=25 * BILLION, min=0),
)
return goa.simulate(num_samples=10 ** 6, show_graphs=True)
# @njit(**NUMBA_ARGS)
def get_bins(xrisk, num_bins=100):
"""
Returns list of [x_risk, count(x_risk)]
IDEA: Perhaps use np.histogram which is more sophisticated
"""
step_size = (np.max(xrisk) - np.min(xrisk)) / num_bins
ret = []
xrisk.sort()
for i in range(num_bins):
lo_x = i * step_size
hi_x = (i + 1) * step_size
lo_y = np.searchsorted(xrisk, lo_x)
hi_y = np.searchsorted(xrisk, hi_x)
ret.append([float(lo_x + (hi_x - lo_x) / 2), float(hi_y - lo_y)])
return ret
if __name__ == '__main__':
main()
"""
42% x risk with 100x less spending on safety.
If there is a lot of safety spending and AI originators still are corrupt,
then there's likely not going to be much benefit to safety. There could be
some advancements around federated AI (https://deepai.org/publication/safe-artificial-general-intelligence-via-distributed-ledger-technology), but it seems unlikely that work could be effectively
split up across institutions like this.
"""
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment