Skip to content

Instantly share code, notes, and snippets.

@ashok-arjun
Last active February 5, 2022 12:51
Show Gist options
  • Save ashok-arjun/dc5b717429b846a5c60f6e3c870b33bc to your computer and use it in GitHub Desktop.
Save ashok-arjun/dc5b717429b846a5c60f6e3c870b33bc to your computer and use it in GitHub Desktop.
"""CODE FOR ANALYSIS"""
def eigenDecompositionAnalysis(self, b1_model, X_train_cumuls, Y_train_cumuls, T_train_cumuls, \
X_valid_cumuls, Y_valid_cumuls, T_valid_cumuls, X_protoset_cumuls, \
Y_protoset_cumuls, T_protoset_cumuls, \
iteration_index, start_iter, end_iter, order_list, device,
num_classes, num_phases, model_list, threshold=0.9):
"""SOME CUSTOM FUNCTIONS"""
def torch_cat(main_array, new_array):
"""Custom `cat` function"""
if type(main_array) == type(None):
main_array = new_array
else:
main_array = torch.cat((main_array, new_array))
return main_array
def torch_scale(x):
"""Custom `scale` function"""
m = x.mean(0, keepdim=True)
s = x.std(0, unbiased=False, keepdim=True)
x -= m
x /= s
return x
def torch_normalize(x):
"""Custom `normalize` function"""
minimum = x.min(dim=0)[0]
maximum = x.max(dim=0)[0]
return (x - minimum) / (maximum - minimum)
def torch_cov(X):
"""Custom `covariance` function"""
D = X.shape[-1]
mean = torch.mean(X, dim=-1).unsqueeze(-1)
X = X - mean
return 1/(D-1) * X @ X.transpose(-1, -2)
"""START OF ANALYSIS"""
# Clone and load models
model_previous = model_list[iteration_index-1]
model_previous = model_previous.to(device)
model_current = model_list[iteration_index]
model_current = model_current.to(device)
# Get previous task data
# Training
X_train = X_train_cumuls[iteration_index-1]
Y_train = np.array([order_list.index(i) for i in Y_train_cumuls[iteration_index-1]])
T_train = T_train_cumuls[iteration_index-1]
# Exemplar
X_protoset = X_protoset_cumuls[iteration_index-1]
Y_protoset = np.array([order_list.index(i) for i in Y_protoset_cumuls[iteration_index-1]])
T_protoset = T_protoset_cumuls[iteration_index-1]
# Validation
X_valid = X_valid_cumuls[iteration_index-1]
Y_valid = np.array([order_list.index(i) for i in Y_valid_cumuls[iteration_index-1]])
T_valid = T_valid_cumuls[iteration_index-1]
# Create trainloader
trainloader, testloader = self.get_dataloader(X_train, Y_train, \
X_valid, Y_valid, \
iteration, start_iter, self.save_path, device, \
T_train, T_valid, \
batch_size=64)
# Get device
if device is None:
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
b1_model.eval()
# Initialize these tensors
all_targets = None
all_features_previous = None
all_features_current = None
# Perform forward pass, store features from previous model, and current model
with torch.no_grad():
for batch_idx, (inputs, targets, tasks) in enumerate(trainloader):
inputs, targets = inputs.to(device), targets.to(device)
_, outputs_feature_previous = get_features(model_previous, inputs)
_, outputs_feature_current = get_features(model_current, inputs)
all_targets = torch_cat(all_targets, targets)
all_features_previous = torch_cat(all_features_previous, outputs_feature_previous)
all_features_current = torch_cat(all_features_current, outputs_feature_current)
# Iterate through each class of the task, and get similairty of eigen vectors
# between previous model and current model
for target in torch.unique(all_targets):
# Filtering for this class
index = torch.where(all_targets == target)[0]
features_previous = all_features_previous[index]
features_current = all_features_current[index]
# Get eigen vectors / values
features_previous = torch_scale(features_previous)
covariance_previous = torch_cov(features_previous.T)
eigenvalues_prev, eigenvectors_prev = torch.eig(covariance_previous, eigenvectors=True)
eigenvalues_prev = eigenvalues_prev[:, 0]
idx = eigenvalues_prev.argsort(descending=True)
eigenvalues_prev = eigenvalues_prev[idx]
eigenvectors_prev = eigenvectors_prev[:, idx]
features_current = torch_scale(features_current)
covariance_current = torch_cov(features_current.T)
eigenvalues_cur, eigenvectors_cur =torch.eig(covariance_current, eigenvectors=True)
eigenvalues_cur = eigenvalues_cur[:, 0]
idx = eigenvalues_cur.argsort(descending=True)
eigenvalues_cur = eigenvalues_cur[idx]
eigenvectors_cur = eigenvectors_cur[:, idx]
# Print plots or perform dot products
full_dot_products = F.cosine_similarity(eigenvectors_prev, eigenvectors_cur, dim=0).cpu().numpy()
full_dot_products = (full_dot_products + 1) / 2 # rescale to [0,1]
plt.figure()
plt.plot(list(range(len(full_dot_products))), full_dot_products, 'o', color='black')
plt.close()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment