Skip to content

Instantly share code, notes, and snippets.

@monkeybutter
Created October 23, 2015 04:06
Show Gist options
  • Save monkeybutter/b55180d70bf23a583c35 to your computer and use it in GitHub Desktop.
Save monkeybutter/b55180d70bf23a583c35 to your computer and use it in GitHub Desktop.
Display the source blob
Display the rendered blob
Raw
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Display the source blob
Display the rendered blob
Raw
{
"cells": [
{
"cell_type": "code",
"execution_count": 15,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"import glob\n",
"\n",
"def get_paths(years, lats, lons):\n",
" path = \"/g/data2/rs0/tiles/EPSG4326_1deg_0.00025pixel/LS5_TM/\"\n",
" nbar_list = []\n",
" \n",
" for lon in lons:\n",
" for lat in lats:\n",
" for year in years:\n",
" part = glob.glob(path + \"{lon:03d}_{lat:04d}/{year}/LS5_TM_NBAR*.tif\".format(lon=lon, lat=lat, year=year))\n",
" nbar_list.extend(part)\n",
" \n",
" return nbar_list\n",
"\n",
"def get_paths2(lat, lon, year):\n",
" path = \"/g/data1/ep1_2/z00/prl900/hdf5_test/1A_hdf5/LS5_TM_NBAR_{lon:03d}_{lat:04d}_{year}*.h5\".format(lon=lon, lat=lat, year=year)\n",
" return glob.glob(path)\n",
"\n",
"def get_paths3(lat, lon, year):\n",
" path = \"/g/data1/ep1_2/z00/prl900/hdf5_test/1B_hdf5/LS5_TM_NBAR_{lon:03d}_{lat:04d}_{year}*.h5\".format(lon=lon, lat=lat, year=year)\n",
" return glob.glob(path)\n",
"\n",
"def get_paths4(path):\n",
" path = \"{}*.h5\".format(path)\n",
" return glob.glob(path)"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"from osgeo import gdal\n",
"import numpy as np\n",
"\n",
"def tiff2xyb(tiff_path):\n",
" image = gdal.Open(tiff_path)\n",
" \n",
" #cube = None\n",
"\n",
" for band in range(image.RasterCount):\n",
" if band == 0:\n",
" band_pack = image.GetRasterBand(band+1).ReadAsArray()\n",
" else:\n",
" band_pack = np.dstack((band_pack, image.GetRasterBand(band+1).ReadAsArray()))\n",
" \n",
" return band_pack\n",
"\n",
"\n",
"def tiff2bxy(tiff_path):\n",
" image = gdal.Open(tiff_path)\n",
" \n",
" #cube = None\n",
"\n",
" for band in range(image.RasterCount):\n",
" if band == 0:\n",
" band_pack = image.GetRasterBand(band+1).ReadAsArray()[np.newaxis,:]\n",
" else:\n",
" band_pack = np.vstack((band_pack, image.GetRasterBand(band+1).ReadAsArray()[np.newaxis,:]))\n",
" \n",
" return band_pack\n",
"\n",
"\n",
"def tiff2txyb(tiff_path):\n",
" image = gdal.Open(tiff_path)\n",
" \n",
" cube = None\n",
"\n",
" for band in range(image.RasterCount):\n",
" if band == 0:\n",
" band_pack = image.GetRasterBand(band+1).ReadAsArray()\n",
" else:\n",
" band_pack = np.dstack((band_pack, image.GetRasterBand(band+1).ReadAsArray()))\n",
"\n",
" if cube is None:\n",
" cube = band_pack[np.newaxis,:]\n",
" else:\n",
" cube = np.concatenate((cube, band_pack[np.newaxis,:]), axis=0)\n",
" \n",
" return cube\n",
"\n",
"def stacker_first(cube, layer):\n",
"\n",
" if cube is None:\n",
" cube = layer[np.newaxis,:]\n",
" else:\n",
" cube = np.concatenate((cube, layer[np.newaxis,:]), axis=0)\n",
" \n",
" return cube\n",
"\n",
"\n",
"def stacker_last(cube, layer):\n",
"\n",
" if cube is None:\n",
" cube = layer[:, :, :, np.newaxis]\n",
" else:\n",
" cube = np.concatenate((cube, layer[:, :, :, np.newaxis]), axis=3)\n",
" \n",
" return cube"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"def time_stacker_first(paths):\n",
" cube = None\n",
"\n",
" for path in paths:\n",
" with h5py.File(path, 'r') as h5f:\n",
" layer = h5f[\"NBAR\"].value\n",
" cube = stacker_first(cube, layer)\n",
" \n",
" return cube\n",
"\n",
"def time_stacker_last(paths):\n",
" cube = None\n",
"\n",
" for path in paths:\n",
" with h5py.File(path, 'r') as h5f:\n",
" layer = h5f[\"NBAR\"].value\n",
" cube = stacker_last(cube, layer)\n",
"\n",
" return cube"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"import h5py\n",
"\n",
"def pack_data(out_path, data):\n",
" with h5py.File(out_path, 'w') as h5f:\n",
" ds = h5f.create_dataset(\"NBAR\", data=data, compression='lzf', chunks=(100, 100, 1))"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {
"collapsed": false,
"scrolled": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"149 -36 1990\n",
"149 -35 1989\n",
"149 -35 1990\n"
]
}
],
"source": [
"import os.path\n",
"\n",
"years = range(1988, 1998)\n",
"years = range(1989, 1991)\n",
"lats = range(-36, -34)\n",
"lons = range(148, 150)\n",
"\n",
"for lon in lons:\n",
" for lat in lats:\n",
" for year in years:\n",
" if not os.path.isfile('/g/data1/ep1_2/z00/prl900/hdf5_test/{lat:03d}_{lon:03d}_{year}.h5'.format(lat=abs(lat), lon=lon, year=year)): \n",
" print lon, lat, year\n",
" paths = get_paths3(lat, lon, year)\n",
" cube = time_stacker_first(paths)\n",
"\n",
"\n",
" with h5py.File('/g/data1/ep1_2/z00/prl900/hdf5_test/{lat:03d}_{lon:03d}_{year}.h5'.format(lat=abs(lat), lon=lon, year=year), 'w') as h5f:\n",
" h5f.create_dataset(\"NBAR\", data=cube, compression='lzf')\n",
"\n",
" "
]
},
{
"cell_type": "code",
"execution_count": 23,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"def get_filename(path):\n",
" return path.split('/')[-1]\n",
"\n",
"def chunker(in_folder, out_folder):\n",
" exp_path = \"/g/data1/ep1_2/z00/prl900/hdf5_test/\"\n",
" \n",
" in_paths = get_paths4(exp_path + in_folder + '/')\n",
" \n",
" for in_path in in_paths:\n",
" if not os.path.isfile(exp_path + out_folder + '/' + get_filename(in_path)):\n",
" with h5py.File(in_path, 'r') as h5i:\n",
" cube = h5i[\"NBAR\"].value\n",
"\n",
" shape = cube.shape\n",
"\n",
" chunks = [] \n",
" for dim in shape:\n",
" if dim == 4000:\n",
" chunks.append(100)\n",
" \n",
" elif dim == 6:\n",
" chunks.append(1)\n",
"\n",
" else:\n",
" chunks.append(dim)\n",
" \n",
" \"\"\"\n",
" else if 6 < dim < 1000:\n",
" chunks.append(1)\n",
" \"\"\"\n",
"\n",
" with h5py.File(exp_path + out_folder + '/' + get_filename(in_path), 'w') as h5o:\n",
" h5o.create_dataset(\"NBAR\", data=cube, compression='lzf', chunks=tuple(chunks))\n",
"\n",
" \n",
"chunker('1A_TF', '1A_TF_C2')\n",
"chunker('1A_TL', '1A_TL_C2')\n",
"chunker('1B_TF', '1B_TF_C2')\n",
"chunker('1B_TL', '1B_TL_C2')"
]
},
{
"cell_type": "raw",
"metadata": {},
"source": [
"#C1 x=100, y=100, b=[:], time=[:]\n",
"#C2 x=100, y=100, b=1, time=[:]\n",
"#C3 x=100, y=100, b=[:], time=1\n",
"#C4 x=100, y=100, b=1, time=1"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {
"collapsed": false
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"(6, 4000, 4000, 28)\n",
"(1, 100, 100, 10)\n",
"(6, 4000, 4000, 55)\n",
"(1, 100, 100, 10)\n",
"(6, 4000, 4000, 39)\n",
"(1, 100, 100, 10)\n",
"(6, 4000, 4000, 33)\n",
"(1, 100, 100, 10)\n",
"(6, 4000, 4000, 53)\n",
"(1, 100, 100, 10)\n",
"(6, 4000, 4000, 33)\n",
"(1, 100, 100, 10)\n",
"(6, 4000, 4000, 45)\n",
"(1, 100, 100, 10)\n",
"(6, 4000, 4000, 28)\n",
"(1, 100, 100, 10)\n"
]
}
],
"source": [
"import h5py\n",
"import os.path\n",
"import glob\n",
"\n",
"def get_files(path):\n",
" path = \"{}*.h5\".format(path)\n",
" return glob.glob(path)\n",
"\n",
"def get_filename(path):\n",
" return path.split('/')[-1]\n",
"\n",
"def repack(in_folder, out_folder):\n",
" exp_path = \"/g/data1/ep1_2/z00/prl900/hdf5_test/\"\n",
" in_paths = get_files(exp_path + in_folder + '/')\n",
" \n",
" for in_path in in_paths:\n",
" if not os.path.isfile(exp_path + out_folder + '/' + get_filename(in_path)):\n",
" with h5py.File(in_path, 'r') as h5i:\n",
" cube = h5i[\"NBAR\"].value\n",
" chunk_shape = (1, 100, 100, 10)\n",
" with h5py.File(exp_path + out_folder + '/' + get_filename(in_path), 'w') as h5o:\n",
" h5o.create_dataset(\"NBAR\", data=cube, compression='lzf', chunks=chunk_shape)\n",
"\n",
"col = '1B_TL'\n",
"repack(col, col + '_LZF_C3')"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"import glob\n",
"from random import randint\n",
"\n",
"def get_files(path):\n",
" path = \"{}*.h5\".format(path)\n",
" return glob.glob(path)\n",
"\n",
"#start & end both included\n",
"def get_rdm_list(number, start, end):\n",
" out = []\n",
" ready = False\n",
" \n",
" while not ready and len(out) < number:\n",
" n = randint(start, end)\n",
" if n not in out:\n",
" out.append(n)\n",
" \n",
" return out"
]
},
{
"cell_type": "code",
"execution_count": 65,
"metadata": {
"collapsed": false
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"CPU times: user 26.4 s, sys: 817 ms, total: 27.2 s\n",
"Wall time: 37.7 s\n"
]
}
],
"source": [
"import h5py\n",
"from osgeo import gdal\n",
"\n",
"#image is composed of 8 tile\n",
"def get_image(path):\n",
" paths = get_files(path)\n",
" f_idxs = get_rdm_list(8, 0, len(paths)-1)\n",
" \n",
" for f_idx in f_idxs:\n",
" with h5py.File(paths[f_idx], 'r') as h5f:\n",
" shape = h5f[\"NBAR\"].shape\n",
" cube = h5f[\"NBAR\"][randint(0, 5), :, :, randint(0, shape[3]-1)]\n",
"\n",
"\n",
"%time get_image('/g/data1/ep1_2/z00/prl900/hdf5_test/1B_TL_LZF_C3/')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"\n",
"\n",
"# 1A_TF (T, 4000, 4000, 6)\n",
"res_1A_TF = [4.3, 4.51, 4.05, 3.38, 3.78, 5.03, 4.11, 4.08, 4.25, 3.86]\n",
"# 1A_TL (4000, 4000, 6, T)\n",
"res_1A_TL = [149.0, 127.0, 136.0, 152.0, over the top]\n",
"\n",
"# 1B_TF (T, 6, 4000, 4000)\n",
"res_1B_TF = [1.16, 1.32, 1,07, 1.19, 1.04, 1.37, 1.18, 1.14, 1.04, 1.01]\n",
"# 1B_TL (6, 4000, 4000, T)\n",
"res_1B_TL = [25.8, 22.3, 27.5, 23.0, 24.3, 22.6, 36.8, 28.4, 26.9, 32.7]\n",
"\n",
"\n",
"# 1A_TF_LZF (T, 4000, 4000, 6) chunked as [T, 4000, 4000, 1] !Unable to create dataset (Chunk size must be < 4gb)\n",
"res_1A_TF_LZF = [21.6, 21.4, 21.5, 21.3, 21.6, 21.1, 20.9, 21.6, 21.4, ]\n",
"# 1A_TL_LZF (4000, 4000, 6, T) chunked as [4000, 4000, 1, T]\n",
"res_1A_TL_LZF = [over the top]\n",
"\n",
"# 1B_TF_LZF (T, 6, 4000, 4000) chunked as [T, 1, 4000, 4000]\n",
"res_1B_TF_LZF = [38.2, 36.4, 39.6, 30.3, 32.7, 34.2, 33.9, 37.2, 33.5, 29.1]\n",
"# 1B_TL_LZF (6, 4000, 4000, T) chunked as [1, 4000, 4000, T]\n",
"res_1B_TL_LZF = [81.0, 74.0, 79.0, 86.0, 78.0]\n",
"\n",
"\n",
"# Spatial access optimised\n",
"# 1A_TF_LZF_C1 (T, 4000, 4000, 6) chunked as [1, 4000, 4000, 1] !Unable to create dataset (Chunk size must be < 4gb)\n",
"res_1A_TF_LZF_C1 = [21.7, 21.4, 21.5, 21.2, 21.3, 21.6, 21.1, 21.7, 21.2, 21.3]\n",
"# 1A_TL_LZF_C1 (4000, 4000, 6, T) chunked as [4000, 4000, 1, 1]\n",
"res_1A_TL_LZF_C1 = [21.7, 22.0, 21.7, 20.9, 22.7, 21.4, 21.6, 20.7, 21.8, 21.3]\n",
"\n",
"# 1B_TF_LZF_C1 (T, 6, 4000, 4000) chunked as [1, 1, 4000, 4000]\n",
"res_1B_TF_LZF_C1 = [0.90, 0.84, 0.61, 0.92, 0.73, 0.86, 0.70, 0.80, 0.59, 0.74]\n",
"# 1B_TL_LZF_C1 (6, 4000, 4000, T) chunked as [1, 4000, 4000, 1]\n",
"res_1B_TL_LZF_C1 = [21.0, 20.5, 21.5, 22.2, 21.7, 20.3, 21.2, 22.0, 21.4, 21.2]\n",
"\n",
"\n",
"# Time drill optimised\n",
"# 1A_TF_LZF_C2 (T, 4000, 4000, 6) chunked as [T, 25, 25, 1]\n",
"res_1A_TF_LZF_C2 = [159.0, 147.0, 157.0, 162.0, 137.0, 143.0, 152.0, 143.0, 149.0, 143.0]\n",
"# 1A_TL_LZF_C2 (4000, 4000, 6, T) chunked as [25, 25, 1, T]\n",
"res_1A_TL_LZF_C2 = [185.0, over the top]\n",
"\n",
"# 1B_TF_LZF_C2 (T, 6, 4000, 4000) chunked as [T, 1, 25, 25]\n",
"res_1B_TF_LZF_C2 = [20.0, 17.6, 22.3, 18.7, 19.4, 18.4, 21.5, 20.9, 18.7, 18.5]\n",
"# 1B_TL_LZF_C2 (6, 4000, 4000, T) chunked as [1, 25, 25, T]\n",
"res_1B_TL_LZF_C2 = [48.1, 53.8, 45.7, 46.8, 47.4]\n",
"\n",
"\n",
"# Compromise\n",
"# 1A_TF_LZF_C3 (T, 4000, 4000, 6) chunked as [10, 100, 100, 1]\n",
"res_1A_TF_LZF_C3 = [35.3, 34.2, 33.8, 32.0, 32.3, 33.8, 31.6, 32.3, 30.5, 34.9]\n",
"# 1A_TL_LZF_C3 (4000, 4000, 6, T) chunked as [100, 100, 1, 10]\n",
"res_1A_TL_LZF_C3 = [52.7, 49.5, 56.2, 46.1]\n",
"\n",
"# 1B_TF_LZF_C3 (T, 6, 4000, 4000) chunked as [10, 1, 100, 100]\n",
"res_1A_TF_LZF_C3 = [4.54, 4.6, 4.42, 4.55, 4.99, 4.29, 4.83, 3.88, 4.28, 4.41]\n",
"# 1B_TL_LZF_C3 (6, 4000, 4000, T) chunked as [1, 100, 100, 10]\n",
"res_1A_TL_LZF_C3 = [38.1, 37.7, 38.2, 36.4, 39.6 ]"
]
},
{
"cell_type": "code",
"execution_count": 327,
"metadata": {
"collapsed": false
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"CPU times: user 5.68 s, sys: 1.92 s, total: 7.6 s\n",
"Wall time: 8.47 s\n"
]
}
],
"source": [
"#timeseries is composed of 80 tiles\n",
"def get_timeseries(path):\n",
" paths = get_files(path)\n",
" f_idxs = get_rdm_list(len(paths)/4, 0, len(paths)-1)\n",
" band = randint(0, 5)\n",
" x = randint(0, 3999)\n",
" y = randint(0, 3999)\n",
" \n",
" for f_idx in f_idxs:\n",
" with h5py.File(paths[f_idx], 'r') as h5f:\n",
" value = h5f[\"NBAR\"][band, x, y, :]\n",
" \n",
"%time get_timeseries('/g/data1/ep1_2/z00/prl900/hdf5_test/1B_TL_LZF/')"
]
},
{
"cell_type": "code",
"execution_count": 193,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"\n",
"\n",
"# 1A_TF (T, 4000, 4000, 6)\n",
"res_1A_TF = [0.14, 12, 0.11, 0.13, 0.12, 0.09, 0.14, 0.11, 0.10, 0.11, 0.14]\n",
"# 1A_TL (4000, 4000, 6, T)\n",
"res_1A_TL = [0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01]\n",
"\n",
"# 1B_TF (T, 6, 4000, 4000)\n",
"res_1B_TF = [0.46, 0.14, 0.13, 0.13, 0.34, 0.16, 0.51]\n",
"# 1B_TL (6, 4000, 4000, T)\n",
"res_1B_TL = [0.04, 0.05, 0.02, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01]\n",
"\n",
"\n",
"# 1A_TF_LZF (T, 4000, 4000, 6) chunked as [T, 4000, 4000, 1] !Unable to create dataset (Chunk size must be < 4gb)\n",
"res_1A_TF_LZF = [14.8, 6.5, 5.04, 11.2, 11.9, 7.95, 14.7, 6.33, 8.3, 8.45]\n",
"# 1A_TL_LZF (4000, 4000, 6, T) chunked as [4000, 4000, 1, T]\n",
"res_1A_TL_LZF = [11.6, 12.2, 13.2, 8.8, 6.81, 4.54, 11.3, 10.9, 9.02, 10.6]\n",
"\n",
"# 1B_TF_LZF (T, 6, 4000, 4000) chunked as [T, 1, 4000, 4000]\n",
"res_1B_TF_LZF = [14.4, 6.83, 5.66, 5.86, 10.2, 5.87, 4.8, 4.68, 11.4]\n",
"# 1B_TL_LZF (6, 4000, 4000, T) chunked as [1, 4000, 4000, T]\n",
"res_1B_TL_LZF = [13.9, 12.1, 13.0, 11.1, 17.4, 11.4, 7.51, 15.8, 8.47, 13.9, 12.1]\n",
"\n",
"\n",
"# Spatial access optimised\n",
"# 1A_TF_LZF_C1 (T, 4000, 4000, 6) chunked as [1, 4000, 4000, 1] !Unable to create dataset (Chunk size must be < 4gb)\n",
"res_1A_TF_LZF_C1 = [6.34, 4.44, 5.66, 6.09, 3.7, 4.5, 6.22, 4.88, 4.01, 3.95]\n",
"# 1A_TL_LZF_C1 (4000, 4000, 6, T) chunked as [4000, 4000, 1, 1]\n",
"res_1A_TL_LZF_C1 = [3.74, 4.37, 4.1, 3.97, 4.68, 3.71, 4.41, 4.05, 4.64, 3.83, 3.79]\n",
"\n",
"# 1B_TF_LZF_C1 (T, 6, 4000, 4000) chunked as [1, 1, 4000, 4000]\n",
"res_1B_TF_LZF_C1 = [6.24, 6.89, 7.09, 6.51, 5.26, 6.71, 5.48, 3.63, 5.81, 3.95, 3.74]\n",
"# 1B_TL_LZF_C1 (6, 4000, 4000, T) chunked as [1, 4000, 4000, 1]\n",
"res_1B_TL_LZF_C1 = [4.54, 3.69, 3.63, 3.86, 3.28, 3.02, 4.46, 3.16, 4.85, 4.51, 4.13]\n",
"\n",
"\n",
"# Time drill optimised\n",
"# 1A_TF_LZF_C2 (T, 4000, 4000, 6) chunked as [T, 25, 25, 1]\n",
"res_1A_TF_LZF_C2 = [0.19, 0.23, 0.21, 0.32, 0.18, 0.15, 0.13, 0.16, 0.16, 0.14]\n",
"# 1A_TL_LZF_C2 (4000, 4000, 6, T) chunked as [25, 25, 1, T]\n",
"res_1A_TL_LZF_C2 = [0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01]\n",
"\n",
"# 1B_TF_LZF_C2 (T, 6, 4000, 4000) chunked as [T, 1, 25, 25]\n",
"res_1B_TF_LZF_C2 = [0.01, 0.02, 0.01, 0.02, 0.01, 0.02, 0.01, 0.02, 0.01, 0.01]\n",
"# 1B_TL_LZF_C2 (6, 4000, 4000, T) chunked as [1, 25, 25, T]\n",
"res_1B_TL_LZF_C2 = [0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01]\n",
"\n",
"\n",
"# Compromise\n",
"# 1A_TF_LZF_C3 (T, 4000, 4000, 6) chunked as [10, 100, 100, 1]\n",
"res_1A_TF_LZF_C3 = [0.03, 0.08, 0.05, 0.07, 0.04, 0.06, 0.09, 0.05, 0.07]\n",
"# 1A_TL_LZF_C3 (4000, 4000, 6, T) chunked as [100, 100, 1, 10]\n",
"res_1A_TL_LZF_C3 = [0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01]\n",
"\n",
"# 1B_TF_LZF_C3 (T, 6, 4000, 4000) chunked as [10, 1, 100, 100]\n",
"res_1B_TF_LZF_C3 = [0.15, 0.05, 0.14, 0.04, 0.11, 0.14, 0.10, 0.10]\n",
"# 1B_TL_LZF_C3 (6, 4000, 4000, T) chunked as [1, 100, 100, 10]\n",
"res_1B_TL_LZF_C3 = [0.05, 0.05, 0.04, 0.05, 0.04, 0.05, 0.05, 0.04, 0.04, 0.05]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 2",
"language": "python",
"name": "python2"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 2
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.10"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment