Skip to content

Instantly share code, notes, and snippets.

@suhaskv
Created January 2, 2021 06:47
Show Gist options
  • Save suhaskv/1632c6547bf0d148f54a9fc2e245ee5c to your computer and use it in GitHub Desktop.
Save suhaskv/1632c6547bf0d148f54a9fc2e245ee5c to your computer and use it in GitHub Desktop.
VSB Power Line Blog - Permutation Entropy
def _embed(x, order=3, delay=1):
N = len(x)
# In our case, N-(order-1)*delay = 800000 - (3-1)*1 = 799998
# Y.shape = 3x799998
Y = np.zeros((order, N - (order - 1) * delay))
for i in range(order):
# Y[0] = x[0:799998], Y[1] = x[1:799999], Y[2] = x[3:800000]
Y[i] = x[(i * delay) : (i * delay) + Y.shape[1]].T
# Y.T[0] = [x[0],x[1],x[2]], Y.T[1] = [x[1],x[2],x[3]], ... , Y.T[799998] = [x[799998],x[799999],x[800000]]
return Y.T
def factorial(val):
if val == 1:
return 1
else:
return val * factorial(val-1)
def perm_entropy(x, order=3, delay=1, normalize=False):
x = np.array(x)
ran_order = range(order)
hashmult = np.power(order, ran_order)
# Embed x and sort the order of permutations
sorted_idx = _embed(x, order=order, delay=delay).argsort(kind='quicksort')
# Associate unique integer to each permutations
hashval = (np.multiply(sorted_idx, hashmult)).sum(1)
# Return the counts
_, c = np.unique(hashval, return_counts=True)
# p is the probability of occurrence of each perumutation
# np.true_divide() performs element wise division
p = np.true_divide(c, c.sum())
pe = -np.multiply(p, np.log2(p)).sum()
if normalize:
pe /= np.log2(factorial(order))
return pe
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment