-
-
Save epifanio/13685c4245de3fadb956 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# import library in order to read and process the dataset | |
from __future__ import print_function | |
import pandas as pd | |
import scipy as Sci | |
import scipy.linalg | |
import scipy.signal | |
import numpy as np | |
import os | |
#import qgrid | |
import collections | |
import matplotlib.pyplot as plt | |
class ListTable(list): | |
""" Overridden list class which takes a 2-dimensional list of | |
the form [[1,2,3],[4,5,6]], and renders an HTML Table in | |
IPython Notebook. """ | |
def _repr_html_(self): | |
html = ["<table>"] | |
for row in self: | |
html.append("<tr>") | |
for col in row: | |
html.append("<td>{0}</td>".format(col)) | |
html.append("</tr>") | |
html.append("</table>") | |
return ''.join(html) | |
def bottomloss(path, filelist, gap=10000, nstep=400): | |
bottoml = [] | |
for i in filelist: | |
dataset = os.path.join(path,i) | |
data = pd.read_csv(dataset, sep=",", header=3, names=['SN', 'TTP', 'CH1', 'CH2', 'REF1', 'REF2']) | |
gap = 10000 | |
index = data.shape[0]/2 | |
xcorr = np.correlate(data['CH2'].values[index:-1],data['CH1'].values[index:-1], mode='full')[index+gap:-1] | |
lag = np.where(np.abs(xcorr)>=np.abs(xcorr).max())[0] | |
r1 = data['TTP'].values[index:-1][float(lag+gap)]*1500/2 | |
TL = 20*np.log10(r1) | |
SL = 122.716245276 | |
bspl = data.CH2[lag: lag+400] | |
mx = 158 | |
rms=Sci.linalg.norm(bspl.values)/np.sqrt(bspl.shape[0]) | |
spl=mx-5+20*np.log10(rms) | |
BL = SL - 2 * TL - spl | |
bottoml.append(BL) | |
return bottoml | |
def transmissionloss(dataset, SL=127.272539983, hz=600, TTP='TTP', CH1='CH1', CH2='CH2', nstep=400, c=1500): | |
data = pd.read_csv(dataset, sep=",", header=3, names=['SN', 'TTP', 'CH1', 'CH2', 'REF1', 'REF2']) | |
xcorr = np.correlate(data[CH1].values,data[CH2].values, mode='full') | |
lag = np.where(np.abs(xcorr)>=np.abs(xcorr).max())[0] | |
lagindex=int(data.shape[0]/2+data.shape[0]-lag) | |
ch2dim = data[CH2].shape[0] | |
newdata = data[CH2][ch2dim/2+ch2dim-lag: ch2dim/2+ch2dim-lag+nstep] | |
newdata = newdata[findSteadyStateStart(newdata):] | |
r = c*data[TTP][lagindex-hz]/2 | |
tloss = SL - 20*np.log10(r) | |
return [r, tloss] | |
def rtloss(files, plot=False, figsize=(14,7,60), rmax=35, SL=127.272539983): | |
tl = [] | |
rtl = [] | |
for i in files: | |
r, tloss = transmissionloss(i) | |
tl.append(tloss) | |
rtl.append(r) | |
if plot: | |
fig = plt.figure(figsize=(figsize[0],figsize[1]), dpi=figsize[2]) | |
ax = fig.add_subplot(211) | |
ax.grid() | |
ax.plot(np.arange(0,rmax,1),[SL - 20*np.log10(i) for i in np.arange(0,rmax,1)], '-'); | |
ax.plot(rtl,tl,'o') | |
plt.show() | |
return [rtl,tl] | |
def lagcut(data, ch2='CH2', nstep=350, plot=False, figsize=(14,7,60)): | |
lag, xcorr = lagFinder(data) | |
newdata = data[ch2][data[ch2].shape[0]/2+data[ch2].shape[0]-lag: data[ch2].shape[0]/2+data[ch2].shape[0]-lag+nstep] | |
if plot: | |
fig = plt.figure(figsize=(figsize[0],figsize[1]), dpi=figsize[2]) | |
ax = fig.add_subplot(211) | |
ax.grid() | |
ax.plot(np.arange(0, xcorr.shape[0]), xcorr); | |
plt.show() | |
return [lag,newdata] | |
def chplot(data, figsize=(14,7,60), x='TTP', ch1='CH1', ch2='CH2'): | |
x = data[x].values | |
y1 = data[ch1].values | |
y2 = data[ch2].values | |
fig = plt.figure(figsize=(figsize[0],figsize[1]), dpi=figsize[2]) | |
ax = fig.add_subplot(111) | |
ax.grid() | |
ax.plot(x,y1); | |
ax.plot(x,y2); | |
plt.show() | |
def freqplot(datadict, xkey='khz', ykey='spl', figsize=(14,7,60)): | |
od = collections.OrderedDict(sorted(datadict.items(), key=lambda (key, val): val[xkey])) | |
x = [] | |
y = [] | |
for i,v in enumerate(od): | |
x.append(i) | |
y.append(od[v][ykey]) | |
fig = plt.figure(figsize=(figsize[0],figsize[1]), dpi=figsize[2]) | |
ax = fig.add_subplot(111) | |
ax.plot(x,y,'-', lw=2) | |
ax.grid() | |
plt.xlim(min(x)-1, max(x)+1) | |
plt.ylim(min(y)-1, max(y)+1) | |
plt.plot((min(x), max(x)), (max(y), max(y)), 'r-') | |
plt.annotate(max(y), xy = (min(x), max(y)), xytext = (10, 10), textcoords = 'offset points') | |
index=[] | |
label=[] | |
for i,v in enumerate(od): | |
ax.plot(i,od[v][ykey],'ro') | |
plt.annotate("{0:.2f}".format(od[v]['spl']), xy = (i,od[v]['spl']), xytext = (10, 10), textcoords = 'offset points') | |
index.append(i) | |
label.append(od[v][xkey]) | |
plt.xticks(index, label) | |
plt.show() | |
def loadwave(filename): | |
inputfile=filename | |
data = pd.read_csv(inputfile, | |
sep=",", | |
header=3, | |
names=['SN', 'TTP', 'CH1', 'CH2', 'REF1', 'REF2']) | |
return data | |
def lagFinder(data): | |
# correlate CH1 and CH2 to find the 'lag' index | |
xcorr = np.correlate(data.CH1.values,data.CH2.values, mode='full') | |
lag = np.where(np.abs(xcorr)>=np.abs(xcorr).max())[0] | |
return [lag,xcorr] | |
def splcalc2(dataset, nstep=350, mx=158, storedict={}, verbose=False): | |
data = pd.read_csv(dataset, | |
sep=",", | |
header=3, | |
names=['SN', 'TTP', 'CH1', 'CH2', 'REF1', 'REF2']) | |
xcorr = np.correlate(data.CH1.values,data.CH2.values, mode='full') | |
lag = np.where(np.abs(xcorr)>=np.abs(xcorr).max())[0] | |
ch2dim = data.CH2.shape[0] | |
newdata = data.CH2[ch2dim/2+ch2dim-lag: ch2dim/2+ch2dim-lag+nstep] | |
newdata = newdata[findSteadyStateStart(newdata):] | |
rms=Sci.linalg.norm(newdata.values)/np.sqrt(newdata.shape[0]) | |
spl=mx-10+20*np.log10(rms) | |
filename = dataset.split('/')[-1] | |
#volt = float(filename.split('_')[0]) | |
#N = int(filename.split('N')[-1].split('.')[0]) | |
#khz = float(filename.split('khz')[0].split('_')[-1]) | |
#if len(filename.split('khz')[0].split('_'))>2: | |
# khz = float(filename.split('khz')[0].split('_')[-2]+'.'+filename.split('khz')[0].split('_')[-1]) | |
#depth = float(data['TTP'][lag-25000]*1564) | |
#print depth | |
spldict = {'filename':filename, 'spl':spl, 'data':newdata} | |
storedict[filename] = spldict | |
if verbose: | |
print('Filename =', dataset.split('/')[-1], '\n', 'SPL =', spl, '\n \n') | |
return spldict | |
def splcalc(dataset, nstep=350, mx=158, storedict={}, verbose=False): | |
data = pd.read_csv(dataset, | |
sep=",", | |
header=3, | |
names=['SN', 'TTP', 'CH1', 'CH2', 'REF1', 'REF2']) | |
xcorr = np.correlate(data.CH1.values,data.CH2.values, mode='full') | |
lag = np.where(np.abs(xcorr)>=np.abs(xcorr).max())[0] | |
ch2dim = data.CH2.shape[0] | |
newdata = data.CH2[ch2dim/2+ch2dim-lag: ch2dim/2+ch2dim-lag+nstep] | |
newdata = newdata[findSteadyStateStart(newdata):] | |
rms=Sci.linalg.norm(newdata.values)/np.sqrt(newdata.shape[0]) | |
spl=mx-10+20*np.log10(rms) | |
depth = data['TTP'][lag]*1564 | |
try: | |
filename = dataset.split('/')[-1] | |
volt = float(filename.split('_')[0]) | |
N = int(filename.split('N')[-1].split('.')[0]) | |
khz = float(filename.split('khz')[0].split('_')[-1]) | |
if len(filename.split('khz')[0].split('_'))>2: | |
khz = float(filename.split('khz')[0].split('_')[-2]+'.'+filename.split('khz')[0].split('_')[-1]) | |
except: | |
print('bad file name') | |
spldict = {'filename':filename, 'volt':volt, 'N':N, 'khz':khz, 'spl':spl, 'data':newdata, 'depth':depth} | |
storedict[filename] = spldict | |
if verbose: | |
print('Filename =', dataset.split('/')[-1], '\n', 'SPL =', spl, '\n \n') | |
return spldict | |
def findSteadyStateStart(data, peackwindow=5, threshold=0.05): | |
peakind = scipy.signal.find_peaks_cwt(data.values, np.arange(1,peackwindow)) | |
for i,v in enumerate(data.values[peakind]): | |
if i != 0: | |
diff = data.values[peakind][i]-data.values[peakind][i-1] | |
if np.abs(diff) <= threshold: | |
index=peakind[i] | |
return index | |
break |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment