Skip to content

Instantly share code, notes, and snippets.

View sergiolucero's full-sized avatar
💭
coding the days away

Sergio Lucero sergiolucero

💭
coding the days away
View GitHub Profile
@sergiolucero
sergiolucero / chilechinaplotter.py
Last active September 20, 2017 17:58
de Chile a China
import folium
import geopandas as gp
import requests
data = requests.get('http://sergiolucerovera.pythonanywhere.com/static/chile.shp')
chile = gp.GeoDataFrame(data.json())
elihc = None # use geopandas.read_file??
centroid = [(cc[1],cc[0]) for cc in chile.centroid]
cmap = folium(location=centroid, zoom=15, tiles='Staro_Pramen')
@sergiolucero
sergiolucero / airquality.py
Created September 26, 2017 16:15
openAQ air quality
import openaq
parameters = ['co', 'no2', 'o3', 'pm10', 'pm25', 'so2']
api = openaq.OpenAQ()
country_list = api.countries(df=True)
print('MONITORING %d locations worldwide' %(country_list.locations.sum()))
for cid, cdata in country_list.iterrows():
ctry = cdata.name
@sergiolucero
sergiolucero / plot_parques.py
Last active September 27, 2017 14:55
parques de santiago
import geopandas as gp
import folium
# Fuentes SHAPEFILES... http://www.ide.cl/descarga/capas.html
# src: https://ocefpaf.github.io/python4oceanographers/blog/2015/02/02/cartopy_folium_shapefile/
@sergiolucero
sergiolucero / pandareader.py
Created October 2, 2017 14:26
Panda dreams
import pandas as pd
from pandasqlutils import * # to easily cross tables
bikes = pd.read_url('http://quant.cl/db/bikes') # serving from quant via sqlite
airquality = pd.read_url('s3://amazonxyz.aws.com/sdakjldajds') # using an S3 bucket
weather = pd.read_url('quant.cl/postgres/weather' # running on a docker (Rad?)
airvsclimate = cross(airquality, weather)
print(airvsclimate.head())
@sergiolucero
sergiolucero / pullwikipop.py
Created October 14, 2017 19:48
pull population data from Wikipedia
import wikipedia
CITIES=['Paris','Barcelona','Tokyo', 'New York City','Amsterdam','Copenhaguen','San Francisco']
AMBIGUOUS_CITIES = ['Santiago']
for city in CITIES:
citywiki = wikipedia.page(city)
cwsum = citywiki.summary
poploc = cwsum.index('population') # first and only? use re!
print(city, cwsum[poploc:poploc+30])
@sergiolucero
sergiolucero / georef.py
Created September 24, 2017 16:02
basic georeferencing
import googlemaps
GMAPS_API = 'AIzaSyBjB7GbQ2TL2SUW989uu7ZoP6S0bxsv3t8' # EKHOS July 2017
gmaps = googlemaps.Client(key=GMAPS_API)
gref = gmaps.geocode('Emilio Vaisse 564, Providencia')
print(gref[0]['geometry'])
@sergiolucero
sergiolucero / wordcloud.R
Created November 9, 2017 19:00
word cloud generator
library(tm);library(wordcloud);library(memoise)
books <<- list("A Mid Summer Night's Dream" = "summer",
"Glamorama" = "Glamorama1") # The list of valid books
getTermMatrix <- memoise(function(book) { # Using "memoise" to automatically cache the results
if (!(book %in% books)) stop("Unknown book")
text <- readLines(sprintf("./%s.txt.gz", book), encoding="UTF-8")
myCorpus = Corpus(VectorSource(text))
@sergiolucero
sergiolucero / googpath.py
Created November 21, 2017 20:57
google path
import json
import pandas as pd
import urllib.request
ori='75+9th+Ave+New+York,+NY'
des='MetLife+Stadium+1+MetLife+Stadium+Dr+East+Rutherford,+NJ+07073'
dep=int(datetime.datetime.now().timestamp())
URL='https://maps.googleapis.com/maps/api/directions/json?origin=%s&destination=%s&departure_time=%d' %(ori,des,dep)
URL+='&traffic_model=best_guess&key=AIzaSyB_ZS04dfON0PZVBRwhKTkeChK3rlYUgSk'
@sergiolucero
sergiolucero / cplex_demo.py
Last active December 4, 2017 20:05
calling IBM CPLEX
from forestry import ForestryLinearProgram # this is project-specific
from PuLP.solvers import CBC # open-source LinearProgramming library and solver
from creds import BASE_URL, API_KEY # these are used to id with IBM
from docloud.job import JobClient
my_problem = ForestryLinearProgram('forestry_test.xlsx') # input file contains all relevant tree data
#my_problem.solve(CBC) # this is how we used to work, until our problems grew too big for open-source
my_problem.save_to_MPS('forestry_LP.mps') # here we export to a format that IBM CPLEX can recognize
@sergiolucero
sergiolucero / copscraping.py
Created September 24, 2017 15:23
scraping from the cops
from selenium import webdriver
url='http://consultawebvehiculos.carabineros.cl/index.php'
fd = webdriver.Firefox()
plates = ['CZJB81','BDPW78']
sections = ['txtLetras','txtNumeros1','txtNumeros2']
def check_robo(patente):
fd.get(url) # point to the starting page again
patsplit = [patente[:2],patente[2:4], patente[4:]]
for ix, sec in enumerate(sections):