Last active
October 4, 2023 05:27
-
-
Save GarryLai/8b7aa15fb2992bbb1a77af68285da09c to your computer and use it in GitHub Desktop.
將WISSDOM輸出檔(大多數的Fortran Binary也可以)轉為NETCDF格式
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
########## N O T I C E ########## | |
#此版本尚未實做時間維度(TDEF)的轉換 | |
#因此只適用於轉換單一時間維度的檔案 | |
################################# | |
import numpy as np | |
import struct | |
import netCDF4 | |
import os, sys | |
import glob | |
##### C O N F I G ##### | |
ctlfile = 'wissdom_0608.ctl' | |
ncfile = 'wissdom_0608.nc' | |
####################### | |
data_file = "" | |
lons = [] | |
lats = [] | |
heights = [] | |
nx = 0 | |
ny = 0 | |
nz = 0 | |
undef = -999. | |
endian = '' | |
title = None | |
in_vars = False | |
vars = [] | |
z_layers = [] | |
titles = [] | |
if len(sys.argv) >= 2: | |
ctlfile = sys.argv[1] | |
with open(ctlfile, 'r') as f: | |
lines = f.read().replace('\r', '').replace('\n ', ' ').split('\n') | |
print('===== CTL CONFIG =====') | |
for line_no, line in enumerate(lines): | |
line_no = line_no + 1 | |
words = line.split() | |
if len(words) == 0: | |
continue | |
offset = 0 | |
for word in words: | |
if word == '': | |
offset += 1 | |
if not in_vars: | |
if words[offset+0].upper() == 'DSET': | |
try: | |
data_file = ' '.join(words[offset+1:]).replace('^', '') | |
print('data_file=', data_file) | |
continue | |
except: | |
raise SyntaxError('DSET value error', (ctlfile, line_no, 0, line)) | |
if words[offset+0].upper() == 'XDEF': | |
try: | |
nx = int(words[offset+1]) | |
print('nx=', nx) | |
if words[offset+2].upper() == 'LINEAR': | |
start = float(words[offset+3]) | |
gap = float(words[offset+4]) | |
lons = np.linspace(start, start+gap*(nx-1), nx) | |
if words[offset+2].upper() == 'LEVELS': | |
lons = [float(i) for i in words[offset+3:]] | |
print('lons=', lons) | |
continue | |
except: | |
raise SyntaxError('XDEF value error', (ctlfile, line_no, 0, line)) | |
if words[offset+0].upper() == 'YDEF': | |
try: | |
ny = int(words[offset+1]) | |
print('ny=', ny) | |
if words[offset+2].upper() == 'LINEAR': | |
start = float(words[offset+3]) | |
gap = float(words[offset+4]) | |
lats = np.linspace(start, start+gap*(ny-1), ny) | |
if words[offset+2].upper() == 'LEVELS': | |
lats = [float(i) for i in words[offset+3:]] | |
print('lats=', lats) | |
continue | |
except: | |
raise SyntaxError('YDEF value error', (ctlfile, line_no, 0, line)) | |
if words[offset+0].upper() == 'ZDEF': | |
try: | |
nz = int(words[offset+1]) | |
print('nz=', nz) | |
if words[offset+2].upper() == 'LINEAR': | |
start = float(words[offset+3]) | |
gap = float(words[offset+4]) | |
heights = np.linspace(start, start+gap*(nz-1), nz) | |
if words[offset+2].upper() == 'LEVELS': | |
heights = [float(i) for i in words[offset+3:]] | |
print('heights=', heights) | |
continue | |
except: | |
raise SyntaxError('ZDEF value error', (ctlfile, line_no, 0, line)) | |
if words[offset+0].upper() == 'UNDEF': | |
try: | |
undef = float(words[offset+1]) | |
print('undef=', undef) | |
continue | |
except: | |
raise SyntaxError('UNDEF value error', (ctlfile, line_no, 0, line)) | |
if words[offset+0].upper() == 'TITLE': | |
try: | |
title = ' '.join(words[offset+1:]) | |
print('title=', title) | |
continue | |
except: | |
raise SyntaxError('TITLE value error', (ctlfile, line_no, 0, line)) | |
if words[offset+0].upper() == 'OPTIONS': | |
try: | |
if 'big_endian' in words: | |
endian = '>' | |
if 'little_endian' in words: | |
endian = '<' | |
if 'big_endian' in words and 'little_endian' in words: | |
raise SyntaxError('Cannot using big_endian with little_endian in OPTIONS', (ctlfile, line_no, 0, line)) | |
print('endian=', endian) | |
continue | |
except: | |
raise SyntaxError('OPTIONS value error', (ctlfile, line_no, 0, line)) | |
if words[offset+0].upper() == 'VARS': | |
in_vars = True | |
else: | |
if words[offset+0].upper() == 'ENDVARS': | |
in_vars = False | |
continue | |
try: | |
vars.append(words[offset+0]) | |
z_layers.append(int(words[offset+1])) | |
titles.append(' '.join(words[offset+3:])) | |
except: | |
raise SyntaxError('Variable list error', (ctlfile, line_no, 0, line)) | |
print('=== END CTL CONFIG ===') | |
input("Check the config then press the <ENTER> key to continue...") | |
for data_file in glob.glob(data_file, recursive=True): | |
if len(sys.argv) >= 3: | |
ncfile = sys.argv[2] | |
else: | |
ncfile = data_file.replace('.bin', '').replace('.dat', '') + '.nc' | |
print(data_file, '-->', ncfile) | |
ncfile = netCDF4.Dataset(ncfile, mode='w', format='NETCDF4_CLASSIC') | |
ncfile.createDimension('lat', ny) | |
ncfile.createDimension('lon', nx) | |
ncfile.createDimension('h', nz) | |
if title: | |
ncfile.title = title | |
ncfile.subtitle = data_file.split('\\')[-1].split('/')[-1].replace('.bin', '').replace('.dat', '') | |
else: | |
ncfile.title = data_file.split('\\')[-1].split('/')[-1].replace('.bin', '').replace('.dat', '') | |
lat = ncfile.createVariable('lat', np.float32, ('lat',)) | |
lat.units = 'degrees_north' | |
lat.long_name = 'latitude' | |
lon = ncfile.createVariable('lon', np.float32, ('lon',)) | |
lon.units = 'degrees_east' | |
lon.long_name = 'longitude' | |
h = ncfile.createVariable('h', np.float32, ('h',)) | |
h.units = 'kilometers' | |
h.long_name = 'height' | |
lat[:] = lats | |
lon[:] = lons | |
h[:] = heights | |
f = open(data_file,"rb") | |
for id, var in enumerate(vars): | |
try: | |
rawdata = [] | |
for i in range(z_layers[id]*ny*nx): | |
b = f.read(4) | |
rawdata.append(struct.unpack(endian+'f', b)[0]) | |
rawdata = np.array(rawdata).reshape((z_layers[id], ny, nx)) | |
rawdata[rawdata == undef] = np.nan | |
data = ncfile.createVariable(var, np.float32, ('h','lat','lon')) | |
data.standard_name = titles[id] | |
data[:,:,:] = rawdata | |
print(id, var, '[OK]') | |
except Exception as e: | |
print(id, var, '[ERROR]', e) | |
continue | |
f.close() | |
#print(ncfile) | |
ncfile.close() | |
print() | |
print('FINISH!') |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment