Skip to content

Instantly share code, notes, and snippets.


Nick Weir nrweir

View GitHub Profile
nrweir / pyproj_rasterio_crs_interconversion.ipynb
Last active Mar 5, 2020
Exploring interconversion between pyproj and rasterio CRS objects
View pyproj_rasterio_crs_interconversion.ipynb
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
nrweir /
Created Dec 20, 2018
load in gdf and geotiff and return transformed gdf
import rasterio
import geopandas as gpd
def transform_gdf(gdf, geotiff):
"""Transform the coordinates of a GeoDataFrame.
gdf : gpd.GeoDataFrame
A geodataframe with an identity affine xform.
nrweir /
Created Nov 10, 2018
Script to produce predictions csv from geojsons
import spacenetutilities.labeltools.coreLabelTools as cLT
import os
import argparse
import re
argparser = argparse.ArgumentParser()
argparser.add_argument('--geojson_src_dir', '-j', type=str, required=True,
help='Path to the directory containing geojsons. If ' +
'the referenced directory contains subdirectories ' +
'that must be searched, the `--recursive` argument ' +
nrweir /
Created Jun 29, 2018
AJAX call using CustomJS
data = {'x': ["0-7 days", "7-30 days", "31-90 days", ">90 days"],
'All dogs': [41.4391, 34.8107, 15.6709, 4.4533],
'Filtered': [0, 0, 0, 0]}
source = ColumnDataSource(data=data)
callback = CustomJS(args=dict(source=source), code="""
url: '/_ajax',
data: $("#filter_form").serialize(),
type: 'GET',
success: function(response) {
nrweir /
Created Jun 7, 2018
Using the PMC API and parsing its XML output
# imports
import pandas as pd
import os
import ftplib as FTP
import urllib
from xml.etree import ElementTree as ET
import subprocess
# next line is the request url that I was using; everything after the first ? is the query terms.
# there's a place somewhere on pubmed (can't remember where) where you can get the string to use there.
flist = os.listdir()
imgs = [f for f in flist if '.tif' in f.lower()]
pex_imgs = [im for im in imgs if '594' in im]
mito_imgs = [im for im in imgs if '447' in im]
if len(pex_imgs) != len(mito_imgs):
raise ValueError('Length of pex and mito img sets do not match.')
ims_per_job = int(len(pex_imgs)/array_l)
View gist:b8470913639360ddfb1099bf35a57b3c
foci_cts_dict = {}
for i in np.unique(cells): # gets the unique IDs of the cells
if i == 0: # if it's bgrd
overlapping_foci = np.unique(seg_foci[cells == i]) # gets the unique IDs for each focus in the cell, as well as the bgrd
num_foci = overlapping_foci.shape[0]-1 # shape[0] is the length of the overlapping_foci array; subtract bgrd
foci_cts_dict[i] = num_foci # add a key:value pair to foci_cts_dict where key is cell # and val is num of foci
nrweir /
Created Nov 14, 2017
For matching parent cells to foci in an image
# c_foci = an array with numbers indicating where foci are (e.g. all voxels corresponding to focus #1 = 1, all vx corresp to foci #2 = 2, etc.)
# segmented_cells = an array with #s indicating where cells are, same format as c_foci
# x = focus ID that is being checked for parent cells
if verbose:
print('current ID: ' + str(x))
parent_cell, cell_cts = np.unique(
self.segmented_cells[i][c_foci == x],
nrweir /
Created Sep 29, 2017 save very large files using pickle in python
# 2 functions to create big pickle files (>2 gb) and read them back into python
def write_big_pkl(obj, path):
"""Pickle obj, where obj is a >2 gb object.
obj: The object to be pickled.
path: The absolute path to save the pickle to.