Skip to content

Instantly share code, notes, and snippets.

@andersy005
Last active November 28, 2018 23:58
Show Gist options
  • Save andersy005/5fb366af41fe5aac464660977876effb to your computer and use it in GitHub Desktop.
Save andersy005/5fb366af41fe5aac464660977876effb to your computer and use it in GitHub Desktop.
ESMLAB
#! /usr/bin/env python
from __future__ import absolute_import, division, print_function
import xarray as xr
import numpy as np
import cftime
xr_open_ds = {'chunks' : {'time':1},
'decode_coords' : False,
'decode_times' : False,
'data_vars' : 'minimal'}
#-------------------------------------------------------------------------------
#-- function
#-------------------------------------------------------------------------------
def _list_to_indexer(index_list):
'''
.. function:: _list_to_indexer(index_list)
Convert string formatted as: dimname,start[,stop[,stride]]
to index (for the case where only 'start' is provided)
or indexing object (slice).
:param index_list: index list as passed in from
--isel dimname:start,stop,stride
:returns: dict -- {dimname: indexing object}
'''
if len(index_list) == 1:
return index_list[0]
elif len(index_list) == 2:
return slice(index_list[0],index_list[1])
elif len(index_list) == 3:
return slice(index_list[0],index_list[1],index_list[2])
else:
raise ValueError('illformed dimension subset')
#-------------------------------------------------------------------------------
#-- function
#-------------------------------------------------------------------------------
def time_bound_var(ds):
tb_name = ''
if 'bounds' in ds['time'].attrs:
tb_name = ds['time'].attrs['bounds']
elif 'time_bound' in ds:
tb_name = 'time_bound'
else:
raise ValueError('No time_bound variable found')
tb_dim = ds[tb_name].dims[1]
return tb_name,tb_dim
#-------------------------------------------------------------------------------
#-- function
#-------------------------------------------------------------------------------
def compute_mon_climatology(dsm):
'''Compute a monthly climatology'''
tb_name,tb_dim = time_bound_var(dsm)
grid_vars = [v for v in dsm.variables if 'time' not in dsm[v].dims]
variables = [v for v in dsm.variables if 'time' in dsm[v].dims and v not in ['time',tb_name]]
# save attrs
attrs = {v:dsm[v].attrs for v in dsm.variables}
encoding = {v:{key:val for key,val in dsm[v].encoding.items()
if key in ['dtype','_FillValue','missing_value']}
for v in dsm.variables}
#-- compute time variable
date = cftime.num2date(dsm[tb_name].mean(tb_dim),
units = dsm.time.attrs['units'],
calendar = dsm.time.attrs['calendar'])
dsm.time.values = date
if len(date)%12 != 0:
raise ValueError('Time axis not evenly divisible by 12!')
#-- compute climatology
ds = dsm.drop(grid_vars).groupby('time.month').mean('time').rename({'month':'time'})
#-- put grid_vars back
ds = xr.merge((ds,dsm.drop([v for v in dsm.variables if v not in grid_vars])))
attrs['time'] = {'long_name':'Month','units':'month'}
del encoding['time']
# put the attributes back
for v in ds.variables:
ds[v].attrs = attrs[v]
# put the encoding back
for v in ds.variables:
if v in encoding:
ds[v].encoding = encoding[v]
return ds
#-------------------------------------------------------------------------------
#-- function
#-------------------------------------------------------------------------------
def compute_mon_anomaly(dsm):
'''Compute a monthly anomaly'''
tb_name,tb_dim = time_bound_var(dsm)
grid_vars = [v for v in dsm.variables if 'time' not in dsm[v].dims]
variables = [v for v in dsm.variables if 'time' in dsm[v].dims and v not in ['time',tb_name]]
# save attrs
attrs = {v:dsm[v].attrs for v in dsm.variables}
coords = {v:dsm[v].attrs for v in dsm.variables}
encoding = {v:{key:val for key,val in dsm[v].encoding.items()
if key in ['dtype','_FillValue','missing_value']}
for v in dsm.variables}
#-- compute time variable
time_values_orig = dsm.time.values
date = cftime.num2date(dsm[tb_name].mean(tb_dim),
units = dsm.time.attrs['units'],
calendar = dsm.time.attrs['calendar'])
dsm.time.values = date
if len(date)%12 != 0:
raise ValueError('Time axis not evenly divisible by 12!')
#-- compute anomaly
ds = dsm.drop(grid_vars).groupby('time.month') - dsm.drop(grid_vars).groupby('time.month').mean('time')
ds.reset_coords('month',inplace=True)
#-- put grid_vars back
ds = xr.merge((ds,dsm.drop([v for v in dsm.variables if v not in grid_vars])))
ds.time.values = time_values_orig
attrs['month'] = {'long_name':'Month'}
# put the attributes back
for v in ds.variables:
ds[v].attrs = attrs[v]
# put the encoding back
for v in ds.variables:
if v in encoding:
ds[v].encoding = encoding[v]
return ds
#-------------------------------------------------------------------------------
#-- function
#-------------------------------------------------------------------------------
def compute_ann_mean(dsm):
'''Compute an annual mean'''
tb_name,tb_dim = time_bound_var(dsm)
grid_vars = [v for v in dsm.variables if 'time' not in dsm[v].dims]
variables = [v for v in dsm.variables if 'time' in dsm[v].dims and v not in ['time',tb_name]]
# save attrs
attrs = {v:dsm[v].attrs for v in dsm.variables}
encoding = {v:{key:val for key,val in dsm[v].encoding.items()
if key in ['dtype','_FillValue','missing_value']}
for v in dsm.variables}
#-- compute time variable
date = cftime.num2date(dsm[tb_name].mean(tb_dim),
units = dsm.time.attrs['units'],
calendar = dsm.time.attrs['calendar'])
dsm.time.values = date
if len(date)%12 != 0:
raise ValueError('Time axis not evenly divisible by 12!')
nyr = len(date)/12
#-- compute weights
dt = dsm[tb_name].diff(dim=tb_dim)[:,0]
wgt = dt.groupby('time.year')/dt.groupby('time.year').sum()
np.testing.assert_allclose(wgt.groupby('time.year').sum(),1.)
# groupby.sum() does not seem to handle missing values correctly: yields 0 not nan
# the groupby.mean() does return nans, so create a mask of valid values for each variable
valid = {v : dsm[v].groupby('time.year').mean(dim='time').notnull().rename({'year':'time'}) for v in variables}
ones = dsm.drop(grid_vars).where(dsm.isnull()).fillna(1.).where(dsm.notnull()).fillna(0.)
# compute the annual means
ds = (dsm.drop(grid_vars) * wgt).groupby('time.year').sum('time').rename({'year':'time'},inplace=True)
ones_out = (ones * wgt).groupby('time.year').sum('time').rename({'year':'time'},inplace=True)
ones_out = ones_out.where(ones_out>0.)
# renormalize to appropriately account for missing values
ds = ds / ones_out
# put the grid variables back
ds = xr.merge((ds,dsm.drop([v for v in dsm.variables if v not in grid_vars])))
# apply the valid-values mask
for v in variables:
ds[v] = ds[v].where(valid[v])
# put the attributes back
for v in ds.variables:
ds[v].attrs = attrs[v]
# put the encoding back
for v in ds.variables:
ds[v].encoding = encoding[v]
return ds
#-------------------------------------------------------------------------------
#-- function
#-------------------------------------------------------------------------------
def compute_diff_wrt_reference(ds_list,ds_ref):
ds_list_out = []
for ds in ds_list:
ds_list_out.append(ds-ds_ref)
return ds_list_out
#-------------------------------------------------------------------------------
#-- main
#-------------------------------------------------------------------------------
if __name__ == '__main__':
import os
from subprocess import call
import argparse
import sys
#---------------------------------------------------------------------------
#-- parse args
p = argparse.ArgumentParser(description='Process timeseries files.')
p.add_argument('file_in',
type = lambda kv: kv.split(','))
p.add_argument('file_out',
type = str)
p.add_argument('--op', dest = 'operation',
required = True,
help = 'Specify operation')
p.add_argument('-v', dest = 'variable_list',
default = [],
required = False,
help = 'variable list')
p.add_argument('-x', dest = 'invert_var_selction',
action = 'store_true',
required = False,
help = 'invert variable list')
p.add_argument('-O', dest = 'overwrite',
required = False,
action ='store_true',
help = 'overwrite')
p.add_argument('--verbose', dest = 'verbose',
required = False,
action ='store_true',
help = 'Verbose')
p.add_argument('--isel', dest = 'isel',
required = False,
default=[],
action = 'append',
help = 'subsetting mechanism "isel"')
p.add_argument('--pbs-cluster', dest = 'pbs_cluster',
required = False,
action ='store_true',
help = 'do PBS cluster')
p.add_argument('--pbs-spec', dest = 'pbs_spec',
type = lambda csv: {kv.split('=')[0]:kv.split('=')[1] for kv in csv.split(',')},
required = False,
default = {},
help = 'PBS cluster specifications')
args = p.parse_args()
#-- if the user has supplied a spec, assume pbs_cluster=True
if args.pbs_spec:
args.pbs_cluster = True
#---------------------------------------------------------------------------
#-- check output file existence and intentional overwrite
if os.path.exists(args.file_out):
if args.overwrite:
call(['rm','-rfv',args.file_out])
else:
raise ValueError(f'{args.file_out} exists. Use -O to overwrite.')
#---------------------------------------------------------------------------
#-- determine output format
ext = os.path.splitext(args.file_out)[1]
if ext == '.nc':
write_output = lambda ds: ds.to_netcdf(args.file_out,unlimited_dims=['time'])
elif ext == '.zarr':
write_output = lambda ds: ds.to_zarr(args.file_out)
else:
raise ValueError('Unknown output file extension: {ext}')
#---------------------------------------------------------------------------
#-- set the operator
if args.operation == 'annmean':
operator = compute_ann_mean
elif args.operation == 'monclim':
operator = compute_mon_climatology
elif args.operation == 'monanom':
operator = compute_mon_anomaly
else:
raise ValueError(f'Unknown operation {args.operation}')
#---------------------------------------------------------------------------
#-- parse index
isel = {}
for dim_index in args.isel:
dim = dim_index.split(',')[0]
isel[dim] = _list_to_indexer([int(i) for i in dim_index.split(',')[1:]])
#---------------------------------------------------------------------------
#-- report args
if args.verbose:
print(f'\n{__file__}')
for arg in vars(args):
print(f'{arg}: {getattr(args, arg)}')
print()
#---------------------------------------------------------------------------
#-- spin up dask cluster?
if args.pbs_cluster:
queue = args.pbs_spec.pop('queue','regular')
project = args.pbs_spec.pop('project','NCGD0011')
walltime = args.pbs_spec.pop('walltime','04:00:00')
n_nodes = args.pbs_spec.pop('n_nodes',4)
if args.pbs_spec:
raise ValueError(f'Unknown fields in pbs_spec: {args.pbs_spec.keys()}')
from dask.distributed import Client
from dask_jobqueue import PBSCluster
USER = os.environ['USER']
cluster = PBSCluster(queue = queue,
cores = 36,
processes = 9,
memory = '100GB',
project = project,
walltime = walltime,
local_directory=f'/glade/scratch/{USER}/dask-tmp')
client = Client(cluster)
cluster.scale(9*n_nodes)
#---------------------------------------------------------------------------
#-- read the input dataset
ds = xr.open_mfdataset(args.file_in,**xr_open_ds)
if isel:
ds = ds.isel(**isel)
if args.variable_list:
if args.invert_var_selction:
drop_vars = [v for v in ds.variables if v in args.variable_list]
else:
drop_vars = [v for v in ds.variables if v not in args.variable_list]
ds = ds.drop(drop_vars)
if args.verbose:
print('\ninput dateset:')
ds.info()
#---------------------------------------------------------------------------
#-- compute
if args.verbose:
print(f'\ncomputing {args.operation}')
dso = operator(ds)
if args.verbose:
print('\noutput dateset:')
dso.info()
#---------------------------------------------------------------------------
#-- write output
if args.verbose:
print(f'\nwriting {args.file_out}')
write_output(dso)
#---------------------------------------------------------------------------
#-- wrap up
if args.pbs_cluster:
cluster.close()
if args.verbose:
print('\ndone.')
Display the source blob
Display the rendered blob
Raw
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"%matplotlib inline\n",
"import os\n",
"from subprocess import check_call\n",
"from glob import glob\n",
"import importlib\n",
"\n",
"import yaml\n",
"import re\n",
"\n",
"import cftime\n",
"import xarray as xr\n",
"import numpy as np\n",
"\n",
"import time\n",
"\n",
"import project as P\n",
"import calc\n",
"import pop_regional_means as pop\n",
"\n",
"USER = os.environ['USER']"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"from dask.distributed import Client\n",
"from dask_jobqueue import PBSCluster\n",
"import dask\n",
"\n",
"Nnodes = 4\n",
"processes = 18\n",
"project = 'NCGD0033'\n",
"\n",
"cluster = PBSCluster(queue='regular',\n",
" cores = 18,\n",
" processes = processes,\n",
" memory = '100GB', \n",
" project = project,\n",
" walltime = '04:00:00',\n",
" local_directory=f'/glade/scratch/{USER}/dask-tmp')\n",
"client = Client(cluster)\n",
"cluster.scale(processes*Nnodes)"
]
},
{
"cell_type": "code",
"execution_count": 29,
"metadata": {},
"outputs": [],
"source": [
"with open('collections.yml') as f:\n",
" collection_spec = yaml.load(f)\n",
"\n",
"with open('pop_variable_defs.yml') as f:\n",
" pop_variable_defs = yaml.load(f)\n",
"\n",
"component = 'ocn'\n",
"stream = 'pop.h'\n",
"freq = 'monthly'\n",
"data_source_key = '_'.join(['data_sources',component,stream,freq])\n",
"data_sources = collection_spec[data_source_key]\n",
"\n",
"variable_list = ['TEMP','SALT','CFC11','CFC12',\n",
" 'NO3','PO4','SiO3','DIC','DIC_ALT_CO2','O2',\n",
" 'diatChl','spChl','diazChl',\n",
" 'IRON_FLUX','FG_CO2']\n",
"col = {}"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Compute monthly climatologies over 1990s"
]
},
{
"cell_type": "code",
"execution_count": 30,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'TEMP': <project.process_data_source at 0x2aacc9a102e8>,\n",
" 'SALT': <project.process_data_source at 0x2aacd901ca90>,\n",
" 'CFC11': <project.process_data_source at 0x2aacbd163048>,\n",
" 'CFC12': <project.process_data_source at 0x2aacc9a10e10>,\n",
" 'NO3': <project.process_data_source at 0x2aacc3fb1860>,\n",
" 'PO4': <project.process_data_source at 0x2aacd307bcc0>,\n",
" 'SiO3': <project.process_data_source at 0x2aacbd163e10>,\n",
" 'DIC': <project.process_data_source at 0x2aacd14a8748>,\n",
" 'DIC_ALT_CO2': <project.process_data_source at 0x2aacce002208>,\n",
" 'O2': <project.process_data_source at 0x2aacd05bb1d0>,\n",
" 'diatChl': <project.process_data_source at 0x2aacd3dd99e8>,\n",
" 'spChl': <project.process_data_source at 0x2aacbe8ff828>,\n",
" 'diazChl': <project.process_data_source at 0x2aaccf002550>,\n",
" 'IRON_FLUX': <project.process_data_source at 0x2aacd14a8908>,\n",
" 'FG_CO2': <project.process_data_source at 0x2aaccf002f98>}"
]
},
"execution_count": 30,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"kwargs = {'collection': data_sources,\n",
" 'data_source': 'historical',\n",
" 'analysis_name': 'clm-1990s',\n",
" 'sel_kwargs': {'time':slice('1990-01-01','1999-12-31')},\n",
" 'operators': calc.compute_mon_climatology,\n",
" 'clobber': False,\n",
" 'file_format': 'zarr'}\n",
"\n",
"results = []\n",
"for v in variable_list:\n",
" res = dask.delayed(P.process_data_source,pure=True)(variable=v,**kwargs)\n",
" results.append(res)\n",
"\n",
"res = dask.compute(*results)\n",
"col.update({r.variable:r for r in res})\n",
"col"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### add some \"derived variables\"\n",
"\n",
"First define the methods and dependent variables for each derived variable"
]
},
{
"cell_type": "code",
"execution_count": 31,
"metadata": {},
"outputs": [],
"source": [
"derived_var_defs = {'pCFC11' : P.derived_var_definition(methods=P.derive_var_pCFC11,\n",
" vars_dependent = ['TEMP','SALT','CFC11']),\n",
" }"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Do the computation"
]
},
{
"cell_type": "code",
"execution_count": 43,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'TEMP': <project.process_data_source at 0x2aacc9a102e8>,\n",
" 'SALT': <project.process_data_source at 0x2aacd901ca90>,\n",
" 'CFC11': <project.process_data_source at 0x2aacbd163048>,\n",
" 'CFC12': <project.process_data_source at 0x2aacc9a10e10>,\n",
" 'NO3': <project.process_data_source at 0x2aacc3fb1860>,\n",
" 'PO4': <project.process_data_source at 0x2aacd307bcc0>,\n",
" 'SiO3': <project.process_data_source at 0x2aacbd163e10>,\n",
" 'DIC': <project.process_data_source at 0x2aacd14a8748>,\n",
" 'DIC_ALT_CO2': <project.process_data_source at 0x2aacce002208>,\n",
" 'O2': <project.process_data_source at 0x2aacd05bb1d0>,\n",
" 'diatChl': <project.process_data_source at 0x2aacd3dd99e8>,\n",
" 'spChl': <project.process_data_source at 0x2aacbe8ff828>,\n",
" 'diazChl': <project.process_data_source at 0x2aaccf002550>,\n",
" 'IRON_FLUX': <project.process_data_source at 0x2aacd14a8908>,\n",
" 'FG_CO2': <project.process_data_source at 0x2aaccf002f98>,\n",
" 'pCFC11': <project.process_data_source at 0x2aaccf0d0908>}"
]
},
"execution_count": 43,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"results = []\n",
"for v,vardef in derived_var_defs.items():\n",
" res = dask.delayed(P.process_data_source,pure=True)(variable=v,**kwargs)\n",
" results.append(res)\n",
" \n",
"res = dask.compute(*results)\n",
"col.update({r.variable:r for r in res})\n",
"col"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Start working with the data!"
]
},
{
"cell_type": "code",
"execution_count": 44,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"<xarray.Dataset>\n",
"Dimensions: (d2: 2, ens: 4, lat_aux_grid: 395, moc_comp: 3, moc_z: 61, nlat: 384, nlon: 320, time: 120, transport_comp: 5, transport_reg: 2, z_t: 60, z_t_150m: 15, z_w: 60, z_w_bot: 60, z_w_top: 60)\n",
"Coordinates:\n",
" * lat_aux_grid (lat_aux_grid) float32 -79.48815 -78.952896 ... 90.0\n",
" * moc_z (moc_z) float32 0.0 1000.0 ... 525000.94 549999.06\n",
" * time (time) float64 7.264e+05 7.264e+05 ... 7.3e+05\n",
" * z_t (z_t) float32 500.0 1500.0 ... 512502.8 537500.0\n",
" * z_t_150m (z_t_150m) float32 500.0 1500.0 ... 13500.0 14500.0\n",
" * z_w (z_w) float32 0.0 1000.0 ... 500004.7 525000.94\n",
" * z_w_bot (z_w_bot) float32 1000.0 2000.0 ... 549999.06\n",
" * z_w_top (z_w_top) float32 0.0 1000.0 ... 500004.7 525000.94\n",
"Dimensions without coordinates: d2, ens, moc_comp, nlat, nlon, transport_comp, transport_reg\n",
"Data variables:\n",
" ANGLE (nlat, nlon) float64 0.0 0.0 ... -0.0259 2.804e-07\n",
" ANGLET (nlat, nlon) float64 0.0 0.0 ... -0.03805 -0.01268\n",
" DXT (nlat, nlon) float64 1.894e+06 ... 1.473e+06\n",
" DXU (nlat, nlon) float64 2.397e+06 ... 1.391e+06\n",
" DYT (nlat, nlon) float64 5.94e+06 5.94e+06 ... 5.046e+06\n",
" DYU (nlat, nlon) float64 5.94e+06 5.94e+06 ... 5.493e+06\n",
" HT (nlat, nlon) float64 0.0 0.0 0.0 0.0 ... 0.0 0.0 0.0\n",
" HTE (nlat, nlon) float64 5.94e+06 5.94e+06 ... 5.046e+06\n",
" HTN (nlat, nlon) float64 2.397e+06 ... 1.391e+06\n",
" HU (nlat, nlon) float64 0.0 0.0 0.0 0.0 ... 0.0 0.0 0.0\n",
" HUS (nlat, nlon) float64 2.397e+06 ... 1.473e+06\n",
" HUW (nlat, nlon) float64 5.94e+06 5.94e+06 ... 5.046e+06\n",
" KMT (nlat, nlon) float64 0.0 0.0 0.0 0.0 ... 0.0 0.0 0.0\n",
" KMU (nlat, nlon) float64 0.0 0.0 0.0 0.0 ... 0.0 0.0 0.0\n",
" REGION_MASK (nlat, nlon) float64 0.0 0.0 0.0 0.0 ... 0.0 0.0 0.0\n",
" T0_Kelvin float64 273.1\n",
" TAREA (nlat, nlon) float64 1.125e+13 ... 7.432e+12\n",
" TLAT (nlat, nlon) float64 -79.22 -79.22 ... 72.19 72.19\n",
" TLONG (nlat, nlon) float64 320.6 321.7 ... 319.4 319.8\n",
" UAREA (nlat, nlon) float64 1.423e+13 ... 7.639e+12\n",
" ULAT (nlat, nlon) float64 -78.95 -78.95 ... 72.41 72.41\n",
" ULONG (nlat, nlon) float64 321.1 322.3 ... 319.6 320.0\n",
" cp_air float64 1.005e+03\n",
" cp_sw float64 3.996e+07\n",
" days_in_norm_year float64 365.0\n",
" dz (z_t) float32 1000.0 1000.0 ... 24996.244 24998.11\n",
" dzw (z_w) float32 500.0 1000.0 ... 24994.459 24997.176\n",
" fwflux_factor float64 0.0001\n",
" grav float64 980.6\n",
" heat_to_PW float64 4.186e-15\n",
" hflux_factor float64 2.439e-05\n",
" latent_heat_fusion float64 3.337e+09\n",
" latent_heat_fusion_mks float64 3.337e+05\n",
" latent_heat_vapor float64 2.501e+06\n",
" mass_to_Sv float64 1e-12\n",
" moc_components (moc_comp) |S384 b'Eulerian Mean' ... b'Submeso'\n",
" momentum_factor float64 10.0\n",
" nsurface_t float64 8.61e+04\n",
" nsurface_u float64 8.297e+04\n",
" ocn_ref_salinity float64 34.7\n",
" omega float64 7.292e-05\n",
" ppt_to_salt float64 0.001\n",
" radius float64 6.371e+08\n",
" rho_air float64 1.292\n",
" rho_fw float64 1.0\n",
" rho_sw float64 1.026\n",
" salinity_factor float64 -0.00347\n",
" salt_to_Svppt float64 1e-09\n",
" salt_to_mmday float64 3.154e+05\n",
" salt_to_ppt float64 1e+03\n",
" sea_ice_salinity float64 4.0\n",
" sflux_factor float64 0.1\n",
" sound float64 1.5e+05\n",
" stefan_boltzmann float64 5.67e-08\n",
" time_bound (time, d2) float64 7.264e+05 7.264e+05 ... 7.3e+05\n",
" transport_components (transport_comp) |S384 b'Total' ... b'Submeso Advection'\n",
" transport_regions (transport_reg) |S384 b'Global Ocean - Marginal Seas' b'Atlantic Ocean + Mediterranean Sea + Labrador Sea + GIN Sea + Arctic Ocean + Hudson Bay'\n",
" vonkar float64 0.4\n",
" pCFC11 (ens, time, z_t, nlat, nlon) float32 dask.array<shape=(4, 120, 60, 384, 320), chunksize=(1, 1, 60, 384, 320)>\n",
"Attributes:\n",
" analysis_name: 'clm-1990s'\n",
" clobber: False\n",
" collection: {'ctrl': {'open_dataset': {'format': 'single_variable'...\n",
" data_source: 'historical'\n",
" derived_var_def: <project.derived_var_definition object at 0x2aaaf9dc9940>\n",
" dirout: '/glade/scratch/mclong/calcs/cmip6_cesm/processed_coll...\n",
" file_format: 'zarr'\n",
" filenames: ['/glade/scratch/mclong/calcs/cmip6_cesm/processed_col...\n",
" history: created by mclong on 2018-11-28 16:16:00\n",
" isel_kwargs: {}\n",
" operators: [<function derive_var_pCFC11 at 0x2aaccb269400>, <func...\n",
" operators_kwargs: [{}]\n",
" sel_kwargs: {'time': slice('1990-01-01', '1999-12-31', None)}\n",
" variable: 'pCFC11'"
]
},
"execution_count": 44,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"ds = col['pCFC11'].load()\n",
"ds"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.6"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment