Skip to content

Instantly share code, notes, and snippets.

@ucalyptus
Created April 8, 2020 11:05
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save ucalyptus/063c2d38009292183f1eb5a18b8d9600 to your computer and use it in GitHub Desktop.
Save ucalyptus/063c2d38009292183f1eb5a18b8d9600 to your computer and use it in GitHub Desktop.
import numpy as np
from sklearn import metrics, preprocessing
from sklearn.preprocessing import MinMaxScaler
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report, cohen_kappa_score
from operator import truediv
from plotly.offline import init_notebook_mode
import matplotlib.pyplot as plt
import scipy.io as sio
import os
import spectral
import torch
import cv2
from operator import truediv
def sampling(proportion, ground_truth):
train = {}
test = {}
labels_loc = {}
m = max(ground_truth)
for i in range(m):
indexes = [j for j, x in enumerate(ground_truth.ravel().tolist()) if x == i + 1]
np.random.shuffle(indexes)
labels_loc[i] = indexes
if proportion != 1:
nb_val = max(int((1 - proportion) * len(indexes)), 3)
else:
nb_val = 0
train[i] = indexes[:nb_val]
test[i] = indexes[nb_val:]
train_indexes = []
test_indexes = []
for i in range(m):
train_indexes += train[i]
test_indexes += test[i]
np.random.shuffle(train_indexes)
np.random.shuffle(test_indexes)
return train_indexes, test_indexes
def index_assignment(index, row, col, pad_length):
new_assign = {}
for counter, value in enumerate(index):
assign_0 = value // col + pad_length
assign_1 = value % col + pad_length
new_assign[counter] = [assign_0, assign_1]
return new_assign
def assignment_index(assign_0, assign_1, col):
new_index = assign_0 * col + assign_1
return new_index
def select_patch(matrix, pos_row, pos_col, ex_len):
selected_rows = matrix[range(pos_row-ex_len, pos_row+ex_len+1)]
selected_patch = selected_rows[:, range(pos_col-ex_len, pos_col+ex_len+1)]
return selected_patch
def select_small_cubic(data_size, data_indices, whole_data, patch_length, padded_data, dimension):
small_cubic_data = np.zeros((data_size, 2 * patch_length + 1, 2 * patch_length + 1, dimension))
data_assign = index_assignment(data_indices, whole_data.shape[0], whole_data.shape[1], patch_length)
for i in range(len(data_assign)):
small_cubic_data[i] = select_patch(padded_data, data_assign[i][0], data_assign[i][1], patch_length)
return small_cubic_data
def set_figsize(figsize=(3.5, 2.5)):
display.set_matplotlib_formats('svg')
plt.rcParams['figure.figsize'] = figsize
def record_output(oa_ae, aa_ae, kappa_ae, element_acc_ae, training_time_ae, testing_time_ae, path):
f = open(path, 'a')
sentence0 = 'OAs for each iteration are:' + str(oa_ae) + '\n'
f.write(sentence0)
sentence1 = 'AAs for each iteration are:' + str(aa_ae) + '\n'
f.write(sentence1)
sentence2 = 'KAPPAs for each iteration are:' + str(kappa_ae) + '\n' + '\n'
f.write(sentence2)
sentence3 = 'mean_OA ± std_OA is: ' + str(np.mean(oa_ae)) + ' ± ' + str(np.std(oa_ae)) + '\n'
f.write(sentence3)
sentence4 = 'mean_AA ± std_AA is: ' + str(np.mean(aa_ae)) + ' ± ' + str(np.std(aa_ae)) + '\n'
f.write(sentence4)
sentence5 = 'mean_KAPPA ± std_KAPPA is: ' + str(np.mean(kappa_ae)) + ' ± ' + str(np.std(kappa_ae)) + '\n' + '\n'
f.write(sentence5)
sentence6 = 'Total average Training time is: ' + str(np.sum(training_time_ae)) + '\n'
f.write(sentence6)
sentence7 = 'Total average Testing time is: ' + str(np.sum(testing_time_ae)) + '\n' + '\n'
f.write(sentence7)
element_mean = np.mean(element_acc_ae, axis=0)
element_std = np.std(element_acc_ae, axis=0)
sentence8 = "Mean of all elements in confusion matrix: " + str(element_mean) + '\n'
f.write(sentence8)
sentence9 = "Standard deviation of all elements in confusion matrix: " + str(element_std) + '\n'
f.write(sentence9)
f.close()
def classification_map(map, ground_truth, dpi, save_path):
fig = plt.figure(frameon=False)
fig.set_size_inches(ground_truth.shape[1] * 2.0 / dpi, ground_truth.shape[0] * 2.0 / dpi)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
fig.add_axes(ax)
ax.imshow(map)
fig.savefig(save_path, dpi=dpi)
return 0
def list_to_colormap(x_list):
y = np.zeros((x_list.shape[0], 3))
for index, item in enumerate(x_list):
if item == 0:
y[index] = np.array([255, 0, 0]) / 255.
if item == 1:
y[index] = np.array([0, 255, 0]) / 255.
if item == 2:
y[index] = np.array([0, 0, 255]) / 255.
if item == 3:
y[index] = np.array([255, 255, 0]) / 255.
if item == 4:
y[index] = np.array([0, 255, 255]) / 255.
if item == 5:
y[index] = np.array([255, 0, 255]) / 255.
if item == 6:
y[index] = np.array([192, 192, 192]) / 255.
if item == 7:
y[index] = np.array([128, 128, 128]) / 255.
if item == 8:
y[index] = np.array([128, 0, 0]) / 255.
if item == 9:
y[index] = np.array([128, 128, 0]) / 255.
if item == 10:
y[index] = np.array([0, 128, 0]) / 255.
if item == 11:
y[index] = np.array([128, 0, 128]) / 255.
if item == 12:
y[index] = np.array([0, 128, 128]) / 255.
if item == 13:
y[index] = np.array([0, 0, 128]) / 255.
if item == 14:
y[index] = np.array([255, 165, 0]) / 255.
if item == 15:
y[index] = np.array([255, 215, 0]) / 255.
if item == 16:
y[index] = np.array([0, 0, 0]) / 255.
if item == 17:
y[index] = np.array([215, 255, 0]) / 255.
if item == 18:
y[index] = np.array([0, 255, 215]) / 255.
if item == -1:
y[index] = np.array([0, 0, 0]) / 255.
return y
def generate_png(all_iter, net, gt_hsi, Dataset, device, total_indices):
pred_test = []
for X, y in all_iter:
X = X.permute(0, 3, 1, 2)
X = X.to(device)
net.eval()
pred_test.extend(np.array(net(X).cpu().argmax(axis=1)))
gt = gt_hsi.flatten()
x_label = np.zeros(gt.shape)
for i in range(len(gt)):
if gt[i] == 0:
gt[i] = 17
x_label[i] = 16
gt = gt[:] - 1
x_label[total_indices] = pred_test
x = np.ravel(x_label)
y_list = list_to_colormap(x)
y_gt = list_to_colormap(gt)
y_re = np.reshape(y_list, (gt_hsi.shape[0], gt_hsi.shape[1], 3))
gt_re = np.reshape(y_gt, (gt_hsi.shape[0], gt_hsi.shape[1], 3))
path = '/content/'
classification_map(y_re, gt_hsi, 300,
path + '/classification_maps/' + Dataset + '_' + '.png')
classification_map(gt_re, gt_hsi, 300,
path + '/classification_maps/' + Dataset + '_gt.png')
print('------Get classification maps successful-------')
def evaluate_accuracy(data_iter, net, loss, device):
acc_sum, n = 0.0, 0
with torch.no_grad():
for X, y in data_iter:
test_l_sum, test_num = 0, 0
X = X.permute(0, 3, 1, 2)
X = X.to(device)
y = y.to(device)
net.eval()
y_hat = net(X)
l = loss(y_hat, y.long())
acc_sum += (y_hat.argmax(dim=1) == y.to(device)).float().sum().cpu().item()
test_l_sum += l
test_num += 1
net.train()
n += y.shape[0]
return [acc_sum / n, test_l_sum] # / test_num]
def aa_and_each_accuracy(confusion_matrix):
list_diag = np.diag(confusion_matrix)
list_raw_sum = np.sum(confusion_matrix, axis=1)
each_acc = np.nan_to_num(truediv(list_diag, list_raw_sum))
average_acc = np.mean(each_acc)
return each_acc, average_acc
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment