Scripts for running ACCESS-ESM with payu
-
-
Save blimlim/1f103fbe8fac57e4176d1e094522a2f9 to your computer and use it in GitHub Desktop.
Scripts for starting ESM1.5 from a CSIRO restart
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/usr/bin/env python2 | |
""" | |
Read AusCOM/CICE restart file and | |
reset date to required. | |
""" | |
__Author__ = 'Petteri Uotila <petteri.uotila@csiro.au>' | |
__Version__ = 0.1 | |
__Date__ = '07/04/2011' | |
import sys | |
import struct | |
import getopt | |
class CICEdump: | |
def __init__(self,nx=360,ny=300,ncat=5,\ | |
nilyr=4,nslyr=1,ocnmixed=False): | |
#AusCOM grid specifics: | |
self.nx = nx # no points in x | |
self.ny = ny # no points in y | |
self.ncat = ncat # no ice categories | |
self.nilyr = nilyr # no of layers in ice | |
self.nslyr = nslyr # no of layers in snow | |
self.ntilyr = ncat*nilyr | |
self.ntslyr = ncat*nslyr | |
self.bsize = nx*ny | |
self.doublesize = 8 | |
# if CICE standalone set this True: | |
self.ocnmixed = ocnmixed | |
nr = 24 if self.ocnmixed else 22 | |
self.nrecs = self.ncat*4+self.ntilyr+self.ntslyr+nr | |
def read_buffer(self,fi): | |
""" | |
read record and separator as strings without unpack | |
""" | |
recsep = fi.read(self.doublesize) | |
buf = fi.read(self.doublesize*self.bsize) | |
return recsep, buf | |
def read_buffers(self,fin='iced.sis2-cnyf2-41.00100101'): | |
""" | |
read buffers and separators to lists | |
""" | |
fi = open(fin,'rb') | |
bufs = []; rseps = [] | |
# read timing information | |
self.header = fi.read(24) | |
for n in range(self.nrecs): | |
recsep,buf = self.read_buffer(fi) | |
rseps.append(recsep) | |
bufs.append(buf) | |
self.footer = fi.read(4) | |
fi.close() | |
self.rseps = rseps | |
self.bufs = bufs | |
def write_buffers(self,fon): | |
""" | |
write buffers to fon | |
""" | |
fp = open(fon,'wb') | |
fp.write(self.header) | |
for n in range(self.nrecs): | |
fp.write(self.rseps[n]) | |
fp.write(self.bufs[n]) | |
fp.write(self.footer) | |
fp.close() | |
def print_header(self): | |
bint, istep0, time, time_forc = \ | |
struct.unpack('>iidd',self.header) | |
print "istep0=%d" % istep0 | |
print "time=%f" % time | |
print "time_forc=%f" % time_forc | |
def change_header(self,istep0=0,time=0.0,time_forc=0.0): | |
""" Change binary header | |
""" | |
self.oldheader = self.header | |
bint, istep0old, timeold, time_forcold = \ | |
struct.unpack('>iidd',self.header) | |
self.header = struct.pack('>iidd',bint,istep0,time,time_forc) | |
if __name__=='__main__': | |
def usage(): | |
print """%s: | |
Change time of the CICE dumpfile. | |
-i input file | |
-o output file [if not given shows timestamp of input file] | |
-h this help | |
-v verbose output | |
--istep0 time step to be written [default 0] | |
--time to be written [default 0.0] | |
--time_forc to be written [default 0.0] | |
""" % sys.argv[0] | |
def change_time(ocnmixed=False): | |
dump = CICEdump(ocnmixed=ocnmixed) | |
dump.read_buffers(config["-i"]) | |
if config.has_key("-v"): | |
print "Read %s" % config["-i"] | |
print "Time information:" | |
dump.print_header() | |
dump.change_header(istep0=int(config["--istep0"]),\ | |
time=float(config["--time"]),\ | |
time_forc=float(config["--time_forc"])) | |
if config.has_key("-v"): | |
print "Time changed to:" | |
dump.print_header() | |
dump.write_buffers(config["-o"]) | |
# default values for command line args | |
config = { "-i" : "/short/p66/pju565/OUTPUT/AusCOM1.0/sis3-cnyf2-23/restart/cice/iced.00110101",\ | |
"--istep0" : "0",\ | |
"--time" : "0.0",\ | |
"--time_forc" : "0.0"} | |
try: | |
if len(sys.argv)>1: | |
options, operands = getopt.getopt( sys.argv[1:],'hvi:o:',\ | |
['istep0=','time=','time_forc=']) | |
config.update( dict(options) ) | |
except: | |
usage(); sys.exit(0) | |
if config.has_key("-h"): | |
usage(); sys.exit(0) | |
if not config.has_key("-o"): | |
dump = CICEdump() | |
dump.read_buffers(config["-i"]) | |
dump.print_header() | |
sys.exit(0) | |
try: | |
change_time(ocnmixed=False) | |
except: | |
if config.has_key("-v"): | |
print "This dump seems to have ocean mixed layer data." | |
change_time(ocnmixed=True) | |
print "Finished" |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/bin/bash | |
set -eu | |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/bin/bash | |
# Pre-run script, runs after payu sets up the work directory and before the model is run | |
# Sets the appropriate land use for the model start date. | |
# The model doesn't vary the land use itself, this is instead done as a | |
# post-processing step - the old land use values are moved to a new STASH code, | |
# and new land use values are read in from an external file. | |
source /etc/profile.d/modules.sh | |
module use /g/data/hh5/public/modules | |
module load conda/analysis3 | |
set -eu | |
# Input land use file | |
lu_file=$1 | |
# Get the current year from field t2_year of the restart file | |
year=$(mule-pumf --component fixed_length_header work/atmosphere/restart_dump.astart | sed -n 's/.*t2_year\s*:\s*//p') | |
# If that year is in the land use file, save a single timestep to a new netcdf file | |
if cdo selyear,$(( year )) -chname,fraction,field1391 $lu_file work/atmosphere/land_frac.nc; then | |
# Back up the original restart file | |
mv work/atmosphere/restart_dump.astart work/atmosphere/restart_dump.astart.orig | |
# Use the CSIRO script to set the land use | |
python scripts/update_cable_vegfrac.py -i work/atmosphere/restart_dump.astart.orig -o work/atmosphere/restart_dump.astart -f work/atmosphere/land_frac.nc | |
fi |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/bin/bash | |
# Copyright 2020 Scott Wales | |
# | |
# \author Scott Wales <scott.wales@unimelb.edu.au> | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
# Modifications made to original scripts by Spencer Wong for | |
# compatibility with new payu versions. | |
# ---------------------------------------------------------------------- | |
# Sets the start year in each model component | |
source /etc/profile.d/modules.sh | |
module use /g/data/vk83/modules | |
module load payu | |
module load nco | |
set -eu | |
trap "echo Error in set_restart_year.sh" ERR | |
export UMDIR=~access/umdir | |
# Load some helper scripts | |
SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" | |
source $SCRIPTDIR/utils.sh | |
# Starting year | |
start_year=$1 | |
# Set the restart year in the atmosphere and ice namelists | |
set_um_start_year $start_year | |
# With latest payu/ESM1.5 versions, restart timing is read from the | |
# reastart_date.nml file in the restart directory, rather than the | |
# control directory, and set_cice_start_year() is not needed. | |
# set_cice_start_year $start_year | |
# Most recent restart directory | |
payu_restart=$(ls -d ./archive/restart* | sort -t t -k 3 -n | tail -n 1) | |
echo "Setting restart year in ${payu_restart} to ${start_year}" | |
if [ ! -d $payu_restart/ocean ]; then | |
echo "No restart directory" | |
exit 1 | |
fi | |
# Update ocean start time - read from a text file | |
cat > $payu_restart/ocean/ocean_solo.res << EOF | |
3 | |
1 1 1 0 0 0 | |
$start_year 1 1 0 0 0 | |
EOF | |
# Update atmos start time - field in the restart file | |
python scripts/update_um_year.py $start_year $payu_restart/atmosphere/restart_dump.astart 2> /dev/null | |
cat > $payu_restart/atmosphere/um.res.yaml << EOF | |
end_date: $(printf %04d $start_year)-01-01 00:00:00 | |
EOF | |
# Clear ice step count | |
cat > $payu_restart/ice/cice_in.nml << EOF | |
&setup_nml | |
istep0=0, | |
npt=0, | |
dt=3600, | |
/ | |
EOF | |
# NB: Since `time` is set to 0 in the cicedumpdatemodify.py call, we need | |
# inidate (run start date) and init_date (experiment initialisation date) | |
# to match. This was done previously with set_cice_start_year for the old restart | |
# date format. | |
cat > $payu_restart/ice/restart_date.nml << EOF | |
&coupling | |
inidate=${start_year}0101 | |
init_date=${start_year}0101 | |
/ | |
EOF | |
# Set the date in the cice netcdf file | |
ncatted -a units,time,o,c,"seconds since ${start_year}-01-01 00:00:00" $payu_restart/ice/mice.nc | |
# Seconds between init_date and inidate | |
secs_realyr=0 | |
ice_restart=$(ls $payu_restart/ice/iced.*0101) | |
mv $ice_restart ${ice_restart}.orig | |
# Set the date in the cice binary restart file | |
scripts/cicedumpdatemodify.py -i ${ice_restart}.orig -o $payu_restart/ice/iced.${start_year}0101 --istep0=0 --time=${secs_realyr}. --time_forc=0. | |
rm ${ice_restart}.orig |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# From rcf_headaddress_mod.F90 | |
# Also defined in UMDP F3 | |
# Fixed length header constants | |
# Values that are array indices have had one subtraced so they work properly | |
# with python arrays | |
# Integers | |
FH_Version = 0 | |
FH_SubModel = 1 | |
FH_VertCoord = 2 | |
FH_HorizGrid = 3 | |
FH_Dataset = 4 | |
FH_RunId = 5 | |
FH_ExptNo = 6 | |
FH_CalendarType = 7 | |
FH_GridStagger = 8 | |
FH_AncilDataId = 9 | |
FH_ProjNo = 10 | |
FH_OcnBndyConds = 10 # ocean boundary condition | |
FH_ModelVersion = 11 | |
FH_OceanDynamics= 11 | |
FH_ObsFileType = 13 | |
FH_DTYear = 20 | |
FH_DTMonth = 21 | |
FH_DTDay = 22 | |
FH_DTHour = 23 | |
FH_DTMinute = 24 | |
FH_DTSecond = 25 | |
FH_DTDayNo = 26 | |
FH_VTYear = 27 | |
FH_VTMonth = 28 | |
FH_VTDay = 29 | |
FH_VTHour = 30 | |
FH_VTMinute = 31 | |
FH_VTSecond = 32 | |
FH_VTDayNo = 33 | |
FH_CTYear = 34 | |
FH_CTMonth = 35 | |
FH_CTDay = 36 | |
FH_CTHour = 37 | |
FH_CTMinute = 38 | |
FH_CTSecond = 39 | |
FH_CTDayNo = 40 | |
FH_IntCStart = 99 | |
FH_IntCSize = 100 | |
FH_RealCStart = 104 | |
FH_RealCSize = 105 | |
FH_LevDepCStart = 109 | |
FH_LevDepCSize1 = 110 | |
FH_LevDepCSize2 = 111 | |
FH_RowDepCStart = 114 | |
FH_RowDepCSize1 = 115 | |
FH_RowDepCSize2 = 116 | |
FH_ColDepCStart = 119 | |
FH_ColDepCSize1 = 120 | |
FH_ColDepCSize2 = 121 | |
FH_FldsOfCStart = 124 | |
FH_FldsOfCSize1 = 125 | |
FH_FldsOfCSize2 = 126 | |
FH_ExtraCStart = 129 | |
FH_ExtraCSize = 130 | |
FH_HistStart = 134 | |
FH_HistSize = 135 | |
FH_CompFldI1Start = 139 | |
FH_CompFldI1Size = 140 | |
FH_CompFldI2Start = 141 | |
FH_CompFldI2Size = 142 | |
FH_CompFldI3Start = 143 | |
FH_CompFldI3Size = 144 | |
FH_LookupStart = 149 | |
FH_LookupSize1 = 150 | |
FH_LookupSize2 = 151 | |
FH_NumProgFields = 152 | |
FH_DataStart = 159 | |
FH_DataSize = 160 | |
FH_MaxDataSize = 161 | |
# These are flags values rather than indices so are unchanged | |
FH_Version_Value = 20 | |
FH_SubModel_Atmos = 1 | |
FH_SubModel_Ocean = 2 | |
FH_SubModel_Wave = 4 | |
FH_VertCoord_Hybrid = 1 | |
FH_VertCoord_Sigma = 2 | |
FH_VertCoord_Pressure = 3 | |
FH_VertCoord_Depth = 4 | |
FH_VertCoord_CP = 5 | |
FH_VertCoord_Wave = 6 | |
FH_HorizGrid_Global = 0 | |
FH_HorizGrid_NH = 1 | |
FH_HorizGrid_SH = 2 | |
FH_HorizGrid_LamNoWrap = 3 | |
FH_HorizGrid_LamWrap = 4 | |
FH_HorizGrid_Eq = 100 | |
FH_HorizGrid_LamNoWrapEq = 103 | |
FH_HorizGrid_LamWrapEq = 104 | |
FH_GridStagger_A = 1 | |
FH_GridStagger_B = 2 | |
FH_GridStagger_C = 3 | |
FH_GridStagger_D = 4 | |
FH_GridStagger_E = 5 | |
FH_Dataset_InstDump = 1 | |
FH_Dataset_MeanDump = 2 | |
FH_Dataset_FF = 3 | |
FH_Dataset_Ancil = 4 | |
FH_Dataset_Boundary = 5 | |
FH_Dataset_ACOBS = 6 | |
FH_Dataset_VAROBS = 7 | |
FH_Dataset_CX = 8 | |
FH_Dataset_COV = 9 | |
FH_Dataset_OBSTORE = 10 | |
FH_ObsFileType_Atmos = 1 | |
FH_ObsFileType_Ocean = 2 | |
FH_ObsFileType_SST = 3 | |
FH_ObsFileType_Wave = 4 | |
# Indices with one subtracted | |
IC_TorTheta = 0 #location in header | |
IC_TorTheta_T = 1 #value of above if T | |
IC_TorTheta_Theta = 2 #value of above if Theta | |
IC_XLen = 5 | |
IC_YLen = 6 | |
IC_PLevels = 7 | |
IC_WetLevels = 8 | |
IC_SoilTLevels = 9 | |
IC_NoCloudLevels = 10 # ATMOS only | |
IC_NoSeaPts = 10 # OCEAN only | |
IC_TracerLevs = 11 | |
IC_BLevels = 12 | |
IC_TracerVars = 13 | |
IC_HeightMethod = 16 #method for creating heights | |
IC_RiverRowLength= 18 #river-routing row-length | |
IC_RiverRows = 19 #river-routing rows | |
IC_MDI = 20 | |
IC_1stConstRho = 23 | |
IC_NumLandPoints = 24 | |
IC_NumOzoneLevs = 25 | |
IC_SoilMoistLevs = 27 | |
IC_NumObsTotal = 27 | |
IC_LenObCol = 28 | |
IC_LenCxCol = 29 # Varobs, not acobs | |
IC_ObsGroup = 30 # " | |
IC_ObsRelease = 31 # " | |
IC_NumMetaMax = 32 # " | |
IC_ConvectLevs = 33 | |
IC_NumItemMax = 33 # " | |
IC_NumObVarMax = 34 | |
IC_NumObPItemMax = 35 | |
IC_NumCxPItemMax = 36 | |
IC_NumCxSFVarMax = 37 | |
IC_NumCxUaVarMax = 38 | |
IC_NumMeta = 39 | |
IC_NumItem = 40 | |
IC_NumObVar = 41 | |
IC_NumObPItem = 42 | |
IC_NumCxPItem = 43 | |
IC_NumCxSfVar = 44 | |
IC_NumCxUaVar = 45 | |
IC_NumObLev = 46 | |
IC_NumCxLev = 47 | |
IC_NumVarBatches = 48 | |
RC_LongSpacing = 0 | |
RC_LatSpacing = 1 | |
RC_FirstLat = 2 | |
RC_FirstLong = 3 | |
RC_PoleLat = 4 | |
RC_PoleLong = 5 | |
RC_SWLDEG = 6 # Ocean - lat of South wall | |
RC_WEDGEDEG = 7 # " = long of West bdy | |
RC_ModelTop = 15 | |
RC_PressureTop = 16 | |
# Not sure what these are | |
## CC_Meta_Latitude = 1 # Used in varobs | |
## CC_Meta_Longitude = 2 # " | |
## CC_Meta_Time = 3 # " | |
## CC_Meta_Type = 4 # " | |
## CC_Meta_Call = 5 # " | |
## CC_Meta_Level = 6 # " | |
## CC_Meta_RepPGE = 7 # " | |
## CC_Item_Value = 1 # Used in varobs | |
## CC_Item_Error = 2 # " | |
## CC_Item_PGE = 3 # " | |
## LDC_EtaTheta = 1 | |
## LDC_Pressure = 1 | |
## LDC_MLIndex = 1 | |
## LDC_EtaRho = 2 | |
## LDC_RHCrit = 3 | |
## SoilDepths = 4 | |
## LDC_ZseaTheta = 5 | |
## LDC_CkTheta = 6 | |
## LDC_ZseaRho = 7 | |
## LDC_CkRho = 8 | |
# From clookadd.h | |
#!*L------------------ COMDECK LOOKADD ---------------------------------- | |
#!LL | |
#!LL Purpose : Contains information about the format | |
#!LL of the PP header | |
# Validity time | |
LBYR =0 # Year | |
LBMON =1 # Month | |
LBDAT =2 # Day of month | |
LBHR =3 # Hour | |
LBMIN =4 # Minute | |
LBDAY =5 # Day number | |
LBSEC =5 # Seconds (if LBREL >= 3) | |
# Data time | |
LBYRD =6 # Year | |
LBMOND =7 # Month | |
LBDATD =8 # Day of month | |
LBHRD =9 # Hour | |
LBMIND =10 # Minute | |
LBDAYD =11 # Day number | |
LBSECD =11 # Seconds (if LBREL >= 3) | |
LBTIM =12 # Time indicator | |
LBFT =13 # Forcast period (hours) | |
LBLREC =14 # Length of data record | |
LBCODE =15 # Grid type code | |
LBHEM =16 # Hemisphere indicator | |
LBROW =17 # Number of rows in grid | |
LBNPT =18 # Number of points per row | |
LBEXT =19 # Length of extra data | |
LBPACK =20 # Packing method indicator | |
LBREL =21 # Header release number | |
LBFC =22 # Field code | |
LBCFC =23 # Second field code | |
LBPROC =24 # Processing code | |
LBVC =25 # Vertical coordinate type | |
LBRVC =26 # Coordinate type for reference level | |
LBEXP =27 # Experiment number | |
LBEGIN =28 # Start record | |
LBNREC =29 # No of records-Direct access only | |
LBPROJ =30 # Met-O-8 projection number | |
LBTYP =31 # Met-O-8 field type | |
LBLEV =32 # Met-O-8 level code | |
LBRSVD1=33 # Reserved for future PP-package use | |
LBRSVD2=34 # Reserved for future PP-package use | |
LBRSVD3=35 # Reserved for future PP-package use | |
LBRSVD4=36 # Reserved for future PP-package use | |
LBSRCE =37 # =1111 to indicate following apply to UM | |
DATA_TYPE =38 # Indicator for real/int or timeseries | |
NADDR =39 # Start address in DATA_REAL or DATA_INT | |
LBUSER3=40 # Free for user-defined function | |
ITEM_CODE =41 #Stash code | |
LBPLEV =42 # Pseudo-level indicator (if defined) | |
LBUSER6=43 # Free for user-defined function | |
MODEL_CODE =44 # internal model identifier | |
BULEV =45 # Upper level boundary | |
BHULEV =46 # Upper level boundary | |
BRSVD3 =47 # Reserved for future PP-package use | |
BRSVD4 =48 # Reserved for future PP-package use | |
BDATUM =49 # Datum value | |
BACC =50 # (Packed fields) Packing accuracy | |
BLEV =51 # Level | |
BRLEV =52 # Lower level boundary | |
BHLEV =53 # (Hybrid levels) A-level of value | |
BHRLEV =54 # Lower level boundary | |
BPLAT =55 # Real latitude of 'pseudo' N Pole | |
BPLON =56 # Real longitude of 'pseudo' N Pole | |
BGOR =57 # Grid orientation | |
BZY =58 # Zeroth latitude | |
BDY =59 # Latitude interval | |
BZX =60 # Zeroth longitude | |
BDX =61 # Longitude interval | |
BMDI =62 # Missing data indicator | |
BMKS =63 # M,K,S scaling factor | |
## LBCC_LBYR = 1 # Year | |
## LBCC_LBMON = 2 # Month | |
## LBCC_LBDAT = 3 # Day of the month | |
## LBCC_LBHR = 4 # Hour | |
## LBCC_LBMIN = 5 # Minute | |
## LBCC_LBDAY = 6 # Day number | |
## LBCC_LBEGIN = 7 # Start record | |
## LBCC_NADDR = 8 # Start address of DATA |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from __future__ import print_function | |
from um_fileheaders import * | |
import numpy as np | |
from six.moves import builtins | |
import types | |
class umfile_error(Exception): | |
pass | |
class packerr(Exception): | |
pass | |
class UMFile(): | |
# Should this inherit from io.something? | |
""" Extended version of file class that uses 8 byte words """ | |
missval_i = -32768 | |
missval_r = -1073741824 | |
def __init__(self, filename, mode=None): | |
if not mode: | |
mode = 'rb' | |
if not "b" in mode: | |
mode += "b" | |
self.fileobj = builtins.open(filename, mode) | |
if "r" in mode: | |
self.determine_file_type() | |
self.readheader() | |
self.readlookup() | |
self.sectorsize = self.getsectorsize() | |
self.mask = None | |
self.nland = None | |
def close(self): | |
# Unless file was opened readonly, need to write the new header | |
# information before closing. | |
if not self.fileobj.mode == 'r': | |
self.writeheader() | |
self.writelookup() | |
self.fileobj.close() | |
def wordseek(self,offset): | |
self.fileobj.seek(offset*self.wordsize) | |
def wordread(self,size): | |
return self.fileobj.read(size*self.wordsize) | |
def arraywrite(self,array): | |
# Could use tofile here, but no real advantage. | |
# Need to check whether the native format is big or little | |
# Here assuming little | |
if array.dtype.byteorder == self.byteorder: | |
return self.fileobj.write(array.tobytes()) | |
else: | |
return self.fileobj.write(array.byteswap().tobytes()) | |
def determine_file_type(self): | |
# Get word length and byte order? | |
# Read first 16 bytes and try to interpret in various ways | |
self.fileobj.seek(0) | |
s = self.fileobj.read(16) | |
# For a UM fieldsfile, first word should be 20 and second 1, 2, or 4 | |
# For ancillary file first word -32768 | |
# Include = in the test to make output easier | |
self.fieldsfile = False | |
self.ppfile = False | |
for endian in ('=', '>', '<'): | |
h = np.fromstring(s,np.int64).newbyteorder(endian) | |
# print "testing 64 bit", h[:2] | |
if h[0] in [15, 20, -32768] and h[1] in (1, 2, 4): | |
self.byteorder = endian | |
self.wordsize = 8 | |
self.int = np.int64 | |
self.float = np.float64 | |
self.fieldsfile = True | |
return | |
h = np.fromstring(s,np.int32).newbyteorder(endian) | |
# print "testing 32 bit", h[:2] | |
if h[0] in [15, 20, -32768] and h[1] in (1, 2, 4): | |
self.byteorder = endian | |
self.wordsize = 4 | |
self.int = np.int32 | |
self.float = np.float32 | |
self.fieldsfile = True | |
return | |
if h[0] == 256: | |
self.byteorder = endian | |
self.wordsize = 4 | |
self.int = np.int32 | |
self.float = np.float32 | |
self.ppfile = True | |
return | |
raise umfile_error("Error - file type not determined") | |
def readheader(self): | |
if not self.fieldsfile: | |
return | |
self.fileobj.seek(0) | |
# Fixed length header of length 256 | |
s = self.wordread(256) | |
self.fixhd = np.fromstring(s,self.int).newbyteorder(self.byteorder) | |
# Integer constants | |
self.wordseek(self.fixhd[FH_IntCStart]-1) | |
nint = self.fixhd[FH_IntCSize] | |
s = self.wordread(nint) | |
self.inthead = np.fromstring(s,self.int).newbyteorder(self.byteorder) | |
# Real constants | |
self.wordseek(self.fixhd[FH_RealCStart]-1) | |
nreal = self.fixhd[FH_RealCSize] | |
s = self.wordread(nreal) | |
self.realhead = np.fromstring(s,self.float).newbyteorder(self.byteorder) | |
# Level dependent constants | |
if self.fixhd[FH_LevDepCStart] > 0: | |
self.wordseek(self.fixhd[FH_LevDepCStart]-1) | |
nlconst = self.fixhd[FH_LevDepCSize1]*self.fixhd[FH_LevDepCSize2] | |
s=self.wordread(nlconst) | |
self.levdep = np.fromstring(s,self.float).newbyteorder(self.byteorder) | |
self.levdep.shape=(self.fixhd[FH_LevDepCSize2],self.fixhd[FH_LevDepCSize1]) | |
# Row dependent constants | |
if self.fixhd[FH_RowDepCStart] > 0: | |
self.wordseek(self.fixhd[FH_RowDepCStart]-1) | |
nlconst = self.fixhd[FH_RowDepCSize1]*self.fixhd[FH_RowDepCSize2] | |
s=self.wordread(nlconst) | |
self.rowdep = np.fromstring(s,self.float).newbyteorder(self.byteorder) | |
self.rowdep.shape=(self.fixhd[FH_RowDepCSize2],self.fixhd[FH_RowDepCSize1]) | |
# Column dependent constants | |
if self.fixhd[FH_ColDepCStart] > 0: | |
self.wordseek(self.fixhd[FH_ColDepCStart]-1) | |
nlconst = self.fixhd[FH_ColDepCSize1]*self.fixhd[FH_ColDepCSize2] | |
s=self.wordread(nlconst) | |
# Should reshape this to a 2D array | |
self.coldep = np.fromstring(s,self.float).newbyteorder(self.byteorder) | |
self.coldep.shape=(self.fixhd[FH_ColDepCSize2],self.fixhd[FH_ColDepCSize1]) | |
def getsectorsize(self): | |
# Calculate sectorsize as gcd of the data offsets. | |
# Assume it's not larger than default 2048 | |
sector = gcd(2048,self.fixhd[FH_DataStart] - 1) # Actual start off by 1. | |
for k in range(self.fixhd[FH_LookupSize2]): | |
if self.ilookup[k,LBEGIN] == -99: | |
break | |
sector = gcd(sector,self.ilookup[k,LBNREC]) | |
return sector | |
def createheader(self, intsize, realsize, levdepdim1=0, levdepdim2=0): | |
# Create a standard header, given level dependent constants as arguments | |
# Lengths of other sections may be version dependent? | |
# Fixed length header of length 256 | |
self.fixhd = np.zeros(256,self.int) | |
# Integer constants | |
self.inthead = np.zeros(intsize,self.int) | |
# Real constants | |
self.realhead = np.zeros(realsize,self.float) | |
# Level dependent constants | |
if levdepdim1 > 0 and levdepdim2 > 0: | |
self.levdep = np.zeros((levdepdim2,levdepdim1),self.float) | |
def copyheader(self,f): | |
"""Copy all the header properties from specified open file""" | |
for attr in ["wordsize", "byteorder", "int", "float", "fieldsfile", | |
"ppfile"]: | |
setattr(self, attr, getattr(f,attr)) | |
# Array attributes need to be copied. | |
for attr in ["fixhd", "realhead", "inthead"]: | |
setattr(self, attr, getattr(f,attr).copy()) | |
# These ones need not exist | |
for attr in ["levdep", "rowdep", "coldep"]: | |
if hasattr(f, attr): | |
setattr(self, attr, getattr(f,attr).copy()) | |
self.ilookup = f.ilookup.copy() | |
self.rlookup = f.rlookup.copy() | |
self.sectorsize = f.sectorsize | |
def writeheader(self): | |
# Header must already be defined by copying or creating | |
# Fixed length header of length 256 | |
self.wordseek(0) | |
self.arraywrite(self.fixhd) | |
# Integer constants | |
self.wordseek(self.fixhd[FH_IntCStart]-1) | |
self.arraywrite(self.inthead) | |
# Real constants | |
self.wordseek(self.fixhd[FH_RealCStart]-1) | |
self.arraywrite(self.realhead) | |
# Level dependent constants | |
if self.fixhd[FH_LevDepCStart] > 0: | |
self.wordseek(self.fixhd[FH_LevDepCStart]-1) | |
self.arraywrite(self.levdep) | |
if self.fixhd[FH_RowDepCStart] > 0: | |
self.wordseek(self.fixhd[FH_RowDepCStart]-1) | |
self.arraywrite(self.rowdep) | |
if self.fixhd[FH_ColDepCStart] > 0: | |
self.wordseek(self.fixhd[FH_ColDepCStart]-1) | |
self.arraywrite(self.coldep) | |
def readlookup(self): | |
lookdim1 = self.fixhd[FH_LookupSize1] | |
lookdim2 = self.fixhd[FH_LookupSize2] | |
# Read lookup | |
self.wordseek(self.fixhd[FH_LookupStart]-1) | |
s = self.wordread(lookdim1*lookdim2) | |
# The lookup table has separate integer 1;45 and real 46-64 sections | |
# Simplest to have duplicate integer and real versions and just index | |
# into the appropriate parts | |
# Is it possible to make just make them views of the same data? | |
if lookdim1 != 64: | |
raise umfile_error("Unexpected lookup table dimension %d %d" % (lookdim1, lookdim2)) | |
self.ilookup = np.reshape( np.fromstring(s, self.int).newbyteorder(self.byteorder), [lookdim2, lookdim1]) | |
self.rlookup = np.reshape( np.fromstring(s, self.float).newbyteorder(self.byteorder), [lookdim2, lookdim1]) | |
def print_fixhead(self): | |
print("FIXED HEADER") | |
for i in range(256): | |
if i % 8 == 0: | |
print("%5d:" % i,end="") | |
if self.fixhd[i] == self.missval_i or self.fixhd[i] == self.missval_r: | |
# -32768 is integer missing value, -1073741824 is an FP NaN | |
print(" _",end="") | |
else: | |
print("%8d" % self.fixhd[i],end="") | |
if i % 8 == 7: | |
print() | |
def getmask(self): | |
# Is it already defined | |
if self.mask != None: | |
return | |
# Get the land sea mask, code 30 | |
for k in range(self.fixhd[FH_LookupSize2]): | |
if self.ilookup[k,LBEGIN] == -99: | |
break | |
if self.ilookup[k,ITEM_CODE] == 30: | |
self.mask = self.readfld(k) | |
self.nland = np.sum(self.mask!=0) | |
return | |
raise packerr("Land sea mask required for packing/unpacking") | |
def readfld(self, k, raw=False): | |
# Read field number k | |
ilookup = self.ilookup[k] | |
lbnrec = ilookup[LBNREC] # Size padded to record size | |
lblrec = ilookup[LBLREC] # Actual size w/o padding | |
lbegin = ilookup[LBEGIN] # lbegin is offset from start | |
self.wordseek(lbegin) | |
s = self.wordread(lbnrec) | |
if raw: | |
return s | |
packing = [0, ilookup[LBPACK]%10, ilookup[LBPACK]//10 % 10, | |
ilookup[LBPACK]//100 % 10, ilookup[LBPACK]//1000 % 10, | |
ilookup[LBPACK]//10000] | |
if packing[1] == 0: | |
# IEEE at same precision as the file | |
nbytes = lblrec*self.wordsize | |
if ilookup[DATA_TYPE]==1: | |
dtype = self.float | |
else: | |
# Treat integer and logical together | |
dtype = self.int | |
elif packing[1] == 2: | |
# 32 bit IEEE | |
nbytes = lblrec*4 | |
if ilookup[DATA_TYPE]==1: | |
dtype = np.float32 | |
else: | |
dtype = np.int32 | |
else: | |
raise packerr("Packing with N1 = %d not supported" % packing[1]) | |
if packing[2] == 0: | |
# No compression | |
npts = ilookup[LBNPT] | |
nrows = ilookup[LBROW] | |
# print "S", len(s), nbytes, len(np.fromstring(s[:nbytes], ftype)) | |
if nrows*npts == ilookup[LBLREC]: | |
# As expected | |
data = np.reshape( np.fromstring(s[:nbytes], dtype).newbyteorder(self.byteorder), [nrows, npts]) | |
else: | |
# There are some fields (accumulated runoff) that are packed to | |
# land points, but don't have packing set correctly | |
data = np.fromstring(s[:nbytes], dtype).newbyteorder(self.byteorder) | |
elif packing[2] == 2: | |
# Compressed using mask, nlon, nlat are the values from the land | |
# sea mask | |
if self.mask is None: | |
self.getmask() | |
nrows, npts = self.mask.shape | |
tmp = np.fromstring(s[:nbytes], dtype).newbyteorder(self.byteorder) | |
# Set output array to missing, forcing the missing value to the | |
# correct type. | |
data = np.zeros((nrows,npts), dtype) + np.array([self.missval_r], dtype) | |
if packing[3]==1: | |
# Use land mask (non-zero) values | |
# Should check the sizes match that expected | |
data.flat[self.mask.flat!=0] = tmp | |
else: | |
# Ocean values | |
data.flat[self.mask.flat==0] = tmp | |
else: | |
raise packerr("Packing with N2 = %d not supported - field code %d" % (packing[2],ilookup[ITEM_CODE])) | |
return data | |
def writefld(self, data, k, raw=False, overwrite=False): | |
# write the kth field | |
if overwrite: | |
filepos = self.ilookup[k,LBEGIN] | |
else: | |
if k==0: | |
filepos = self.fixhd[FH_DataStart] - 1 | |
else: | |
filepos = self.ilookup[k-1,LBEGIN] + self.ilookup[k-1,LBNREC] | |
self.wordseek(filepos) | |
# If overwriting a field in an existing file don't change the header | |
if not overwrite: | |
self.ilookup[k,LBEGIN] = filepos | |
# Need to set the output record size here | |
if self.fixhd[FH_Dataset] == 3: | |
# Fieldsfile, NADDR is relative to start of fixed length header | |
# (i.e. relative to start of file) | |
self.ilookup[k,NADDR] = filepos | |
else: | |
# Ancillary files behave like dumps? | |
# NADDR is relative to start of data. Note that this uses LBLREC | |
# so ignores the packing and the record padding. No relation to | |
# the actual disk address in LBEGIN. | |
if k == 0: | |
self.ilookup[k,NADDR] = 1 | |
else: | |
self.ilookup[k,NADDR] = self.ilookup[k-1,NADDR] + self.ilookup[k-1,LBLREC] | |
if raw: | |
# Data is just array of bytes | |
self.fileobj.write(data) | |
# Header is unchanged | |
return | |
else: | |
# Need to pack properly | |
packing = [0, self.ilookup[k,LBPACK]%10, self.ilookup[k,LBPACK]//10 % 10, | |
self.ilookup[k,LBPACK]//100 % 10, self.ilookup[k,LBPACK]//1000 % 10, | |
self.ilookup[k,LBPACK]//10000] | |
# First consider packing to land or sea points | |
if packing[2] == 0: | |
# No packing | |
packdata = data | |
elif packing[2] == 2: | |
if self.mask is None: | |
self.getmask() | |
# Need to restore the file pointer after the mask read | |
self.wordseek(filepos) | |
if packing[3]==1: | |
# Use land mask (non-zero) values | |
# Should check the sizes match that expected | |
packdata = data[self.mask!=0] | |
else: | |
# Ocean values | |
packdata = data[self.mask==0] | |
else: | |
raise packerr("Packing with N2 = %d not supported - field code %d" % (packing[2],self.ilookup[k,ITEM_CODE])) | |
# Now write the data | |
# arraywrite could actually return the sizes? | |
lblrec = packdata.size | |
self.arraywrite(packdata) | |
if not overwrite: | |
# Make the sector size a variable? | |
self.ilookup[k,LBLREC] = lblrec | |
if packing[1] == 2 and self.wordsize == 8: | |
size = (lblrec+1)/2 | |
else: | |
size = lblrec | |
lbnrec = int(np.ceil(size/float(self.sectorsize))) * self.sectorsize | |
self.ilookup[k,LBNREC] = lbnrec | |
def writelookup(self): | |
# lookdim1 = self.fixhd[FH_LookupSize1] | |
# lookdim2 = self.fixhd[FH_LookupSize2] | |
# For compatibility with the old version use the full size | |
lookdim2, lookdim1 = self.ilookup.shape | |
# Need to combine the ilookup and rlookup arrays to a single array | |
# Convert the float part to an integer array | |
lookup = np.fromstring(self.rlookup[:lookdim2,:].tobytes(),self.int).newbyteorder(self.byteorder) | |
# Now copy the true integer part on top | |
lookup.shape = (lookdim2, lookdim1) | |
lookup[:,:45] = self.ilookup[:,:45] | |
self.wordseek(self.fixhd[FH_LookupStart]-1) | |
self.arraywrite(lookup) | |
class Axis: | |
def __init__(self,name,values): | |
# Should check name is lat, lon or lev and that the values are | |
# appropriate for the axis type. | |
self.name = name | |
self.values = values | |
def __eq__(self, a): | |
if self.name == a.name and len(self.values) == len(a.values): | |
return np.allclose(self.values, a.values) | |
else: | |
return False | |
def gcd(a,b): | |
while a > 0: | |
c = b%a | |
b = a | |
a = c | |
return b | |
class UniqueList(list): | |
# List without duplicates | |
def append(self,a): | |
if type(a) in [types.ListType,np.ndarray]: | |
for x in a: | |
if not x in self: | |
list.append(self,x) | |
else: | |
if not a in self: | |
list.append(self,a) | |
class Grid: | |
def __init__(self, lon, lat, lev): | |
# Check that dimensions match | |
# Really only for timeseries? | |
if len(lat) == len(lon) == len(lev): | |
self.lon = lon | |
self.lat = lat | |
self.lev = lev | |
else: | |
raise umfile_error("Inconsistent grids") | |
def __eq__(self, g): | |
if len(self.lon) == len(g.lon) and len(self.lat) == len(g.lat) and len(self.lev) == len(g.lev): | |
return np.allclose(self.lon, g.lon) and np.allclose(self.lat, g.lat) \ | |
and np.allclose(self.lev, g.lev) | |
else: | |
return False | |
def isprog(ilookup): | |
# Check whether a STASH code corresponds to a prognostic variable. | |
# Section 33 is tracers, 34 is UKCA | |
# Also check whether variable is instantaneous, LBTIM < 10 | |
# No time processing ilookup[LBPROC] == 0 | |
# Not a time series LBCODE < 30000 | |
# Also 3100 - 3129 seem to be treated as prognostics | |
varcheck = ilookup[ITEM_CODE]//1000 in [0,33,34] or \ | |
3100 <= ilookup[ITEM_CODE] <= 3129 | |
timecheck = ilookup[LBTIM] < 10 and ilookup[LBPROC] == 0 and ilookup[LBCODE] < 30000 | |
return varcheck and timecheck | |
def istracer(ilookup): | |
return ilookup[ITEM_CODE]//1000 == 33 and ilookup[LBTIM] < 10 and ilookup[LBPROC] == 0 and ilookup[LBCODE] < 30000 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Update CABLE vegfrac field and set new tiles to the grid box mean | |
# Arguments are the old and new dump file and the new vegetation fraction ancillary | |
import numpy as np, sys, umfile, argparse | |
from um_fileheaders import * | |
import netCDF4 | |
parser = argparse.ArgumentParser(description="Update vegetation fractions in dump file") | |
parser.add_argument('-i', dest='ifile', help='Input UM dump') | |
parser.add_argument('-o', dest='ofile', help='Output UM dump') | |
parser.add_argument('-f', dest='fracfile', help='New vegetation fraction (ancillary or netCDF)') | |
parser.add_argument('-v', '--verbose', dest='verbose', action='store_true', | |
default=False, help='verbose output') | |
args = parser.parse_args() | |
ntiles = 17 | |
# Get old vegetation fraction from dump file | |
f = umfile.UMFile(args.ifile) | |
vlist = [] | |
for k in range(f.fixhd[FH_LookupSize2]): | |
ilookup = f.ilookup[k] | |
lbegin = ilookup[LBEGIN] | |
if lbegin == -99: | |
break | |
if ilookup[ITEM_CODE]==216: | |
a = f.readfld(k) | |
vlist.append(a) | |
assert len(vlist)==ntiles, 'Error - expected %d vegetation classes' % ntiles | |
old_vegfrac = np.array(vlist) | |
if args.fracfile.endswith(".nc"): | |
# Read from a netCDF version of a dump file | |
d = netCDF4.Dataset(args.fracfile) | |
v = d.variables['field1391'] | |
# There may be some points outside the valid range | |
v.set_auto_mask(False) | |
vegfrac = v[0] | |
vegfrac = vegfrac.astype(old_vegfrac.dtype) | |
# Normalise sums to exactly 1 | |
vegfrac /= vegfrac.sum(axis=0) | |
vegfrac[np.isnan(vegfrac)] = f.missval_r | |
d.close() | |
else: | |
# Read the vegetation fraction ancillary | |
ffrac = umfile.UMFile(args.fracfile) | |
vlist = [] | |
for k in range(ffrac.fixhd[FH_LookupSize2]): | |
ilookup = ffrac.ilookup[k] | |
lbegin = ilookup[LBEGIN] | |
if lbegin == -99: | |
break | |
assert ilookup[ITEM_CODE]==216, "Field with unexpected stash code %s" % ilookup[ITEM_CODE] | |
a = ffrac.readfld(k) | |
vlist.append(a) | |
# Create a single array with dimensions [vegtype, lat, lon] | |
vegfrac = np.array(vlist) | |
assert vegfrac.shape[0]==ntiles, 'Error - expected %d vegetation classes' % ntiles | |
if np.all(old_vegfrac == vegfrac): | |
print("Vegetation fields are identical. No output file created") | |
sys.exit(0) | |
# # Check that the masks are identical | |
# old_mask = (old_vegfrac == f.missval_r) | |
# new_mask = (vegfrac == f.missval_r) | |
# if not np.all(old_mask == new_mask): | |
# print("Error - land sea masks are different") | |
# sys.exit(1) | |
# Fix all 800 tiled CABLE variables | |
g = umfile.UMFile(args.ofile, "w") | |
g.copyheader(f) | |
k = 0 | |
while k < f.fixhd[FH_LookupSize2]: | |
ilookup = f.ilookup[k] | |
lbegin = ilookup[LBEGIN] | |
if lbegin == -99: | |
break | |
if 800 <= ilookup[ITEM_CODE] < 920 and ilookup[ITEM_CODE] not in [883, 884, 885, 887, 888]: | |
# These don't seem to be necessary | |
# or ilookup[ITEM_CODE] in [230, 234, 236]: | |
code = ilookup[ITEM_CODE] | |
if args.verbose: | |
print("Processing", code) | |
a = f.readfld(k) | |
vlist = [a] | |
# Expect another 16 fields with the same code | |
for i in range(1,ntiles): | |
ilookup = f.ilookup[k+i] | |
if ilookup[ITEM_CODE] != code: | |
print("Missing tiled fields with", code, k, i) | |
sys.exit(1) | |
a = f.readfld(k+i) | |
vlist.append(a) | |
var = np.array(vlist) | |
# Grid box mean | |
mean = (var*old_vegfrac).sum(axis=0) | |
if var.dtype == np.int: | |
# 3 layer snow flag is an integer field | |
mean = np.round(mean).astype(np.int) | |
# If old fraction was zero and new > 0, set to grid box mean | |
var = np.where(np.logical_and(old_vegfrac==0, vegfrac>0), mean, var) | |
# Set tiles with new zero fraction to zero | |
var[vegfrac==0] = 0. | |
if var.dtype != np.int: | |
# Scale the new grid box mean to match the old | |
newmean = (var*vegfrac).sum(axis=0) | |
# Leave points with zero mean alone. This may give a divide by zero warning | |
# even though such values are masked by the where. | |
scale = np.where(newmean==0.,1.,mean/newmean) | |
var *= scale | |
for i in range(ntiles): | |
g.writefld(var[i],k+i) | |
k += ntiles | |
elif ilookup[ITEM_CODE]==216: | |
# USet the new vegetation fractions | |
for i in range(ntiles): | |
g.writefld(vegfrac[i],k+i) | |
k += ntiles | |
else: | |
if ilookup[ITEM_CODE] == 30: | |
# Save the mask (needed for compression) | |
a = f.readfld(k) | |
g.writefld(a,k) | |
g.mask = a | |
else: | |
a = f.readfld(k, raw=True) | |
g.writefld(a,k, raw=True) | |
k += 1 | |
g.close() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/g/data/hh5/public/apps/nci_scripts/python-analysis3 | |
# Copyright 2020 Scott Wales | |
# author: Scott Wales <scott.wales@unimelb.edu.au> | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
import mule | |
import xarray | |
import os | |
import shutil | |
import sys | |
import tempfile | |
landuse = xarray.open_dataset('work/atmosphere/INPUT/cableCMIP6_LC_1850-2015.nc').fraction | |
class ReplaceOp(mule.DataOperator): | |
def __init__(self, da): | |
self.da = da | |
def new_field(self, source): | |
return source | |
def transform(self, source, result): | |
return self.da.isel(vegtype = source.lbuser5 - 1).data | |
restart = sys.argv[1] | |
stash_landfrac = 216 | |
stash_landfrac_lastyear = 835 | |
mf = mule.DumpFile.from_file(restart) | |
year = mf.fixed_length_header.t2_year | |
print(f'Updating land use for year {year}') | |
out = mf.copy() | |
out.validate = lambda *args, **kwargs: True | |
set_current_landuse = ReplaceOp(landuse.sel(time=f'{year:04d}', method='nearest')) | |
set_previous_landuse = ReplaceOp(landuse.sel(time=f'{year-1:04d}', method='nearest')) | |
for f in mf.fields: | |
if f.lbuser4 == stash_landfrac: | |
f = set_current_landuse(f) | |
if f.lbuser4 == stash_landfrac_lastyear: | |
f = set_previous_landuse(f) | |
out.fields.append(f) | |
temp = tempfile.NamedTemporaryFile() | |
out.to_file(temp.name) | |
shutil.copy(temp.name, restart) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/g/data/hh5/public/apps/nci_scripts/python-analysis3 | |
# Copyright 2020 Scott Wales | |
# author: Scott Wales <scott.wales@unimelb.edu.au> | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
import mule | |
import sys | |
import tempfile | |
import shutil | |
year = int(sys.argv[1]) | |
restart = sys.argv[2] | |
mf = mule.DumpFile.from_file(restart) | |
mf.fixed_length_header.t1_year = year | |
mf.fixed_length_header.t2_year = year | |
for f in mf.fields: | |
f.lbyr = year | |
mf.validate = lambda *args, **kwargs: True | |
temp = tempfile.NamedTemporaryFile() | |
mf.to_file(temp.name) | |
shutil.copy(temp.name, restart) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/bin/bash | |
# Copyright 2021 Scott Wales | |
# | |
# \author Scott Wales <scott.wales@unimelb.edu.au> | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
# | |
# Modifications made to original scripts by Spencer Wong for | |
# compatibility with new payu versions. | |
# ---------------------------------------------------------------------- | |
get_payu_start_year() { | |
python <<EOF | |
import yaml | |
with open('config.yaml') as f: | |
c = yaml.safe_load(f) | |
print(c['calendar']['start']['year']) | |
EOF | |
} | |
set_um_start_year() { | |
start_year="$1" | |
python <<EOF | |
import f90nml | |
nml = f90nml.read('atmosphere/namelists') | |
nml['NLSTCALL']['MODEL_BASIS_TIME'][0] = $start_year | |
nml.write('atmosphere/namelists', force=True) | |
EOF | |
} | |
# With latest payu/ESM1.5 versions, restart timing is read from the | |
# reastart_date.nml file in the restart directory, rather than the | |
# control directory, and set_cice_start_year() is not needed. | |
# set_cice_start_year() { | |
# start_year="$1" | |
# python <<EOF | |
# import f90nml | |
# nml = f90nml.read('ice/input_ice.nml') | |
# nml['coupling']['init_date'] = ${start_year}0101 # Experiment start date | |
# nml['coupling']['inidate'] = ${start_year}0101 # First run date | |
# nml.write('ice/input_ice.nml', force=True) | |
# EOF | |
# } |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/bin/bash | |
# Initialise an ACCESS-ESM Payu run from a CSIRO experiment | |
# This should be run from the top-level warm-start.sh script, which sets up the $csiro_source etc. environment variables | |
set -eu | |
trap "echo Error in warm_start_csiro.sh" ERR | |
echo "Sourcing restarts from ${csiro_source} / year ${source_year}" | |
# Load some helper scripts | |
SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" | |
source $SCRIPTDIR/utils.sh | |
# Start year of this run - read from config.yaml | |
start_year=$(get_payu_start_year) | |
# ===================================================================== | |
# Setup the restart directory | |
payu sweep > /dev/null | |
payu setup --archive > /dev/null | |
payu_archive=./archive | |
payu_restart=${payu_archive}/restart000 | |
if [ -d ${payu_restart} ]; then | |
echo "ERROR: Restart directory already exists" | |
echo "Consider 'payu sweep --hard' to delete all restarts" | |
exit 1 | |
fi | |
mkdir $payu_restart | |
mkdir $payu_restart/{atmosphere,ocean,ice,coupler} | |
# Some restart files are marked jan 01, some are dec 31 of the previous year | |
yearstart="$(printf '%04d' $source_year)0101" | |
pyearend="$(printf '%04d' $(( source_year - 1 )) )1231" | |
# Copy in the restart files from the CSIRO run | |
cp -v $csiro_source/atm/${expname}.astart-${yearstart} $payu_restart/atmosphere/restart_dump.astart | |
for f in $csiro_source/cpl/*-${pyearend}; do | |
cp -v $f $payu_restart/coupler/$(basename ${f%-*}) | |
done | |
for f in $csiro_source/ocn/*-${pyearend}; do | |
cp -v $f $payu_restart/ocean/$(basename ${f%-*}) | |
done | |
for f in $csiro_source/ice/*-${pyearend}; do | |
cp -v $f $payu_restart/ice/$(basename ${f%-*}) | |
done | |
cp -v $csiro_source/ice/iced.${yearstart} $payu_restart/ice/ | |
# Set the year of each model component to the run start year | |
$SCRIPTDIR/set_restart_year.sh $start_year | |
# Cleanup to be ready to run the model | |
payu sweep |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/bin/bash | |
# Initialise an ACCESS-ESM Payu run from a Payu experiment | |
# This should be run from the top-level warm-start.sh script, which sets up the $payu_source environment variable | |
set -eu | |
trap "echo Error in warm_start_payu.sh" ERR | |
echo "Sourcing restarts from ${payu_source}" | |
# Load some helper scripts | |
SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" | |
source $SCRIPTDIR/utils.sh | |
# Start year of this run - read from config.yaml | |
start_year=$(get_payu_start_year) | |
# ===================================================================== | |
# Setup the restart directory | |
payu sweep > /dev/null | |
payu setup --archive > /dev/null | |
payu_archive=./archive | |
payu_restart=${payu_archive}/restart000 | |
if [ -d ${payu_restart} ]; then | |
echo "ERROR: Restart directory already exists" | |
echo "Consider 'payu sweep --hard' to delete all restarts" | |
exit 1 | |
fi | |
# Copy the source payu restart directory | |
cp -r "$payu_source" "$payu_restart" | |
# Set the year of each model component to the run start year | |
$SCRIPTDIR/set_restart_year.sh $start_year | |
# Cleanup to be ready to run the model | |
payu sweep |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/bin/bash | |
# Initialise an ACCESS-ESM Payu run from another experiment | |
# | |
# This script sets values specific to the experiment, it then calls the common | |
# 'warm-start' from the scripts directory which performs the copy and sets the | |
# start dates for all the component models to the date in config.yaml | |
set -eu | |
# Either CSIRO or PAYU | |
source=CSIRO | |
if [ $source == "CSIRO" ]; then | |
# CSIRO job to copy the warm start from | |
export expname=PI-02 # Source experiment - PI pre-industrial, HI historical | |
export source_year=670 # Change this to create different ensemble members | |
export csiro_source=/g/data/p73/archive/CMIP6/ACCESS-ESM1-5/${expname}/restart | |
# Call the main warm-start script | |
scripts/warm-start-csiro.sh | |
else | |
# Payu restart directory to copy | |
export payu_source=/g/data/access/payu/access-esm/restart/historical/PI-01.astart-05410101 | |
# Call the main warm-start script | |
scripts/warm-start-payu.sh | |
fi |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment