Skip to content

Instantly share code, notes, and snippets.

@phil8192
Created April 3, 2020 21:10
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save phil8192/8c0b6ce907cbe192cd1b86ef79768f51 to your computer and use it in GitHub Desktop.
Save phil8192/8c0b6ce907cbe192cd1b86ef79768f51 to your computer and use it in GitHub Desktop.
class C:
def __init__(self, a, test_x, test_y, pri_key=None):
self.a = a # regerence to Host A.
self.test_x = test_x
self.test_y = test_y
self.features = test_x.shape[1]
self.pri_key = pri_key
def optimise(self, epochs, batch_size, eta, gamma):
# The global model. Theta = [A's Theta | B's Theta]
theta = np.zeros(self.features)
loss = []
for epoch in range(0, epochs):
# Coordinator requests Host A to calculate gradients for a batch.
# This is a blocking call, resulting in (encypted) gradient vectors
# from A and B.
gradient_a, gradient_b = self.a.gradients(theta)
# Concatenate the gradients to match length of Theta
gradients = np.concatenate((gradient_a, gradient_b))
# Decrypt the gradients (using private key)
gradients = decrypt(self.pri_key, gradients)
# Normalise (although this could be done runner side.)
gradients = 1/batch_size * gradients
# Update the model weights. (assumes weight 0 = bias/intercept)
# Gamma = regularisation parameter.
theta = theta - (eta * (gradients + gamma*theta))
# Calculate the loss (using a hold out test set.)
# We could stop training here if the loss begins to climb
# (early-stopping to avoid overfitting)
loss.append(taylor_loss(theta, self.test_x, self.test_y))
return theta, loss
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment