Skip to content

Instantly share code, notes, and snippets.

@Dhiraj1703
Last active August 31, 2018 03:32
Show Gist options
  • Save Dhiraj1703/b104a8c38403f8b9a8868062bd66b4b5 to your computer and use it in GitHub Desktop.
Save Dhiraj1703/b104a8c38403f8b9a8868062bd66b4b5 to your computer and use it in GitHub Desktop.
Memory Issues
Num_profile=100
modes=10
v0=3.08e4
R=0.5
q_rings=np.array([0.00235])
regions=np.array([82.408])
x=np.linspace(-R,R,40)
taus=np.linspace(1e-3,1,100)
Np=40
beta=0.18
baseline=1
import random
Initial_amplitudes=[v0* random.uniform(-1, 1) for j in range(modes+1) for i in range(Num_profile)]
Initial_amplitudes=np.asarray(Initial_amplitudes)
Initial_amplitudes=Initial_amplitudes.reshape(Num_profile,modes+1)
def fourierSeries(x,amplitudes):
profiles=[]
for k in range(Num_profile):
partialSums =amplitudes[k,0]
for n in range(1,modes+1):
mode=amplitudes[k,n]
partialSums= partialSums + (mode* np.cos(n*np.pi/(2*R)*x))
#print(partialSums)
profiles.append(partialSums)
return profiles
def g2_numeric(flow_vel,time,q_rings,regions,N):
phi=abs(np.cos(np.radians(regions)-np.radians(90)))
qphi=np.dot(q_rings[:,None],phi[None,:])
qphit=np.dot(qphi[:,:,None],time[None,:])
doublesum= np.sum(np.cos(qphit*(vj-vi)) for i,vi in enumerate(flow_vel) for j,vj in enumerate(flow_vel))
return doublesum/N**2
def mse(g2_exp,g2_num):
if len(g2_num)!=len(g2_exp):
raise ValueError("data size does not match with size of numeric g2")
diff=(g2_exp-g2_num)/g2_exp
squared=diff**2
mean_of_differences_squared = squared.mean()
return mean_of_differences_squared
def get_analytic(q_rings,regions,time,vel):
phi=abs(np.cos(np.radians(regions)-np.radians(90)))
qphi=np.dot(q_rings[:,None],phi[None,:])
qphit=np.dot(qphi[:,:,None],time[None,:])
Flow_part= np.pi/(8*qphit*vel)* abs( erf((1+1j)/2* np.sqrt( 4* qphit*vel) ) )**2
return Flow_part
import time
start_time = time.time()
epsilon=1*1e-6
import operator
import pandas as pd
previous_value=[]
Errors_alliterations=[]
Global_chisquare=[]
count=0
amplitudes=Initial_amplitudes
#while True:
for a in range(5000):
fourier_profiles=fourierSeries(x=x,amplitudes=amplitudes)
#print(fourier_profiles)
errors=[]
for j,value in enumerate(fourier_profiles):
flow_prof=fourier_profiles[j]
#print(flow_prof)
g2_num=g2_numeric(flow_vel=flow_prof,q_rings=q_rings,regions=regions,time=taus,N=Np).flatten()
g2_num=g2_num*beta + baseline
g2_theoretical=get_analytic(q_rings=q_rings,regions=regions,time=taus,vel=v0).flatten()
g2_theoretical=g2_theoretical*beta+baseline
error=mse(g2_exp=g2_theoretical,g2_num=g2_num)
errors.append(error)
d=dict(enumerate(errors[:]))
sorted_d = sorted(d.items(), key=operator.itemgetter(1))
error_best_case=(sorted_d)[0][1] #best profile
Global_chisquare.append(error_best_case)
errors_all=[t[1] for t in sorted_d[:]]
Errors_alliterations.append(errors_all)
best_profile=fourier_profiles[(sorted_d)[0][0]]
del errors
#plot1D(best_profile, x, color='green', marker='^',title = 'arbitrary flow profile',xlabel='pos'
# )
#fig,ax=plt.subplots()
#ax.plot(x,best_profile,'g^-')
if error_best_case<=epsilon :
break
num_good=int(len(sorted_d)/5)
sorted_min_mid=sorted_d[0:num_good]
#print(len(sorted_min_mid))
sorted_mid_max=sorted_d[num_good::]
good_amplitudes=[]
for i in range(len(sorted_min_mid)):
good_amplitude_ind=pd.DataFrame(sorted_min_mid)[0][i]
good_amplitude=amplitudes[good_amplitude_ind]
#print(good_amplitude)
good_amplitudes.append(good_amplitude)
average_amp=np.sum((good_amplitudes[i]) for i in range(len(good_amplitudes)))/num_good
new=[]
alpha=1.5
for i in range(len(sorted_mid_max)):
old_amplitude_ind=pd.DataFrame(sorted_mid_max)[0][i]
old_amplitude=amplitudes[old_amplitude_ind]
new_amp=old_amplitude-alpha*(old_amplitude-average_amp)
new.append(new_amp)
bottom_half= new
top= amplitudes[1:num_good]
top_half=np.row_stack((amplitudes[0],top))
new_amplitudes=np.concatenate((top_half,bottom_half))
amplitudes=new_amplitudes
print("--- %s seconds ---" % (time.time() - start_time))
@Dhiraj1703
Copy link
Author

Dhiraj1703 commented Aug 30, 2018

I think the part of code that needs attention here is from lines **65-72 ** .Basically at every iteration, it takes new values and updates the variable "new_amplitudes"(line :71) .so when we iterate the script is run for long enough time, not only does it slows the computation time but also it eventually runs out of memory .

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment