Skip to content

Instantly share code, notes, and snippets.

@jimsrc
Last active June 17, 2017 00:17
Show Gist options
  • Save jimsrc/a40ac6bf54d973ad43b19355b722c916 to your computer and use it in GitHub Desktop.
Save jimsrc/a40ac6bf54d973ad43b19355b722c916 to your computer and use it in GitHub Desktop.
Auger Histograms: read corrected (by AoP, pressure, and geop-height) charge-histograms according to commit <648dc8b> of auger-repo

run


  • To build temperature-corrected histograms:
./build_temp.corr.py -- -o ../out/out.build_temp.corr/shape.ok_and_3pmt.ok/15min/test.h5 
# see the rest of options in the default argument values:
./build_temp.corr.py -- -h
  • Generate figure (and ASCII data associated) of long-term profiles (use either scals or histos):
export FINP=../out/out.build_temp.corr/shape.ok_and_3pmt.ok/15min/final_histos.h5
export FAVR=../out/out.build_temp_hist.and.fits/avr_histos_press_shape.ok_and_3pmt.ok.txt
./ch_Eds_smoo2.py -- --mode scals --fprefix stest --odir . --fname_inp $FINP --fname_avr $FAVR 

# See argument descriptions:
./ch_Eds_smoo2.py -- -h

Read

  • Basically, we walk into the data like this:
from h5py import File as h5
f = h5('file.h5','r')

# time in fractions of years-since-01Jan2006, with resolution of 15min.
t = f['2008/01/07/t'][...]

# vector of the number of tanks aporting with valid-filtered-data at each 
# time during the day 7/Jan/2008.
ntanks = f['2008/01/07/tanks'][...]

# vector of the count-rate for the charge-histograms at the energy-bin centered 
# at Ed=30MeV. This data is syncronized with `ntanks` and `t` above.
cts = f['2008/01/07/cts_temp-corr_030MeV'][...]
#!/usr/bin/env ipython
# -*- coding: utf-8 -*-
"""
build smoothed charge-histogram-count-rates
in the bands specified by `ch_Eds` (energy channel
indexes)
"""
from pylab import *
#from scipy.io.netcdf import netcdf_file
from datetime import datetime, timedelta
import numpy as np
import os, argparse
from os.path import isfile, isdir
from h5py import File as h5
#--- usefull variables
this_dir = '<auger_repo>' if 'AUGER_REPO' not in os.environ else os.environ['AUGER_REPO'] # repo dir
maskname= 'shape.ok_and_3pmt.ok'#'shape.ok_and_3pmt.ok'#'3pmt.ok' # 'all'
#--- retrieve args
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'-m', '--mode',
type=str,
default='scals',
help='scals or histos',
)
parser.add_argument(
'-fp', '--fprefix',
type=str,
default='smoo2',
help='prefix for output filenames (for figure and ASCII files)',
)
parser.add_argument(
'-odir', '--odir',
type=str,
default='.',
help='directory for output files',
)
parser.add_argument(
'-finp', '--fname_inp',
type=str,
default='%s/out/out.build_temp.corr/%s/15min/final_histos.h5' % (this_dir, maskname),
help='path of .h5 file (final charge histogram data)',
)
parser.add_argument(
'-favr', '--fname_avr',
type=str,
default='%s/out/out.build_temp_hist.and.fits/avr_histos_press_%s.txt' % (this_dir, maskname),
help='path of the ASCII .txt file. It contains time-average (along several months) rates for each energy channel in the histograms data.',
)
pa = parser.parse_args()
assert isdir(pa.odir), " --> doesn't exist:\n %s" % pa.odir
#--- input data: temperature-corrected data
f = h5(pa.fname_inp, 'r')
#---------------------- agarro valores tipicos para construir rangos
prom = np.loadtxt(pa.fname_avr)
typic = np.nanmean(prom[:,1:], axis=0) #agarro el promedio del array de los 8 anios!
#----------------------------------------------------------------
year_ini = 2006
year_end = 2015 # including
date = datetime(year_ini, 1, 1)
MAX_DATE = datetime(year_end+1, 1, 1)
nyr = year_end - year_ini + 1
mintank = 150. # we want averages of more than `mintank` tanks
#----------------------------------------------------------------
fig = figure(1, figsize=(6, 4))
ax = fig.add_subplot(111)
ax.grid()
ax.set_xlabel('years since jan/2006')
ax.set_xlim(-.1, nyr)
ax.set_ylim(0.95, 1.1)
day = 86400./(365.*86400.) # [yr]
dT = 0.05 #0.01 #0.05 #0.3 #0.05 # [yr]
ntot = 0
date = datetime(year_ini, 1, 1)
#--- detect mode (energy band)
if pa.mode=='scals':
ch_Eds = (3,4,5)
elif pa.mode=='histos':
ch_Eds = (10,11,12,13)#(10,11,12,13)#(3,4,5) # indices de los canales de Ed a promediar
else:
print " >> wrong mode!"; raise SystemExit
#--- output paths
fname_out = '%s_%s_%02d-%02d.MeV' % (pa.fprefix, maskname, ch_Eds[0]*20, (ch_Eds[-1]+1)*20)
fname_fig = '%s/%s.png' % (pa.odir, fname_out)
fname_ascii = '%s/%s.txt' % (pa.odir, fname_out)
print(" --> INPUT FILE: \n %s" % pa.fname_inp)
#--- iterate through time period
j=0; k=1; r=0.0; n=0;
nall=0 # number of ALL input dates
tt=[]; rr=[]
while date < MAX_DATE:
yyyy = date.year
mm = date.month
dd = date.day
path = '%04d/%02d/%02d' % (yyyy, mm, dd)
nall += 1 # count all input dates
# sometimes data is not present in some dates due
# to filtering in previous data processing
if path not in f:
date += timedelta(days=1) # next day...
continue
ntanks = f['%s/tanks'%path][...]
cc = ntanks > mintank
# we'll only process if more than 1 data-point are
# averages of more than `mintank` tanks. If this condition is not
# fulfilled then go to next date.
if not len(find(cc))>1:
date += timedelta(days=1)
continue
#rate = f.variables['cnts_press.corrected'].data
#time = (f.variables['time'].data - 1136073600.)/(86400.*365) # anios desde 01/jan/2006
time = f['%s/t'%path][...] # anios desde 01/jan/2006
cts = np.zeros(96, dtype=np.float64); typ=0.0
for i in ch_Eds:
Ed = i*20.+10.
cts += f['%s/cts_temp-corr_%04dMeV'%(path,Ed)][...]
#cts += rate[:,i] # vector
typ += typic[i] # escalar
cts_norm = cts/typ
aux = np.nanmean(cts_norm[cc])
if not(np.isnan(aux) or np.isinf(aux)):
r += aux; n+=1
j += 1
if (time[0] > k*dT) and (n>0):
tt += [ (k-.5)*dT ]
rr += [ r/n ]
#isoformat('DD/MM/YYYY HH:MM')
#print(" --> date: %2.2f" % time[0] + ", ", date, ", rr: ", r/n)
print(" --> date: %2.2f, %s, rr: %r" % (time[0], date.strftime('%d/%b/%Y %H:%M'), r/n))
#ax.plot((k-.5)*dT, r/n, '-o', alpha=0.6, mec='none', c='purple', ms=8.)
k += 1
r = 0.0
n = 0
ntot += 1
date += timedelta(days=1) # next day...
#-----------------------------------------------------------------
nn = len(tt)
tt = np.array(tt).reshape(1, nn)
rr = np.array(rr).reshape(1, nn)
data_out = np.concatenate([tt, rr], axis=0)
TITLE = 'integrated Ed channels\nEd/MeV: %02d-%02d' % (ch_Eds[0]*20, (ch_Eds[-1]+1)*20)
ax.plot(tt[0,:], rr[0,:], '-o', alpha=0.6, mec='none', c='purple', ms=5.)
ax.set_title(TITLE)
print("\n ---> number of processed dates (processed/all): %d / %d" % (ntot, nall))
print("\n --> saving figure...")
fig.savefig(fname_fig, dpi=135, bbox_inches='tight')
print('\n --> WE GENERATED:')
print(" [+] %s" % fname_fig)
close()
#-----------------------------------------------------------------
np.savetxt(fname_ascii, data_out.T, fmt='%3.4g')
print(" [+] %s" % fname_ascii)
print("\n --> INPUT FILES: ")
print(" [+] main data: " + pa.fname_inp)
print(" [+] aux data: " + pa.fname_avr)
print("\n --> PERIOD OF ANALYSIS: %d - %d \n" % (year_ini, year_end))
#EOF
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment