Skip to content

Instantly share code, notes, and snippets.

@bigjoedata
Created April 3, 2020 06:07
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save bigjoedata/523f67ba2a3d3c395470fbd3c69bf6dc to your computer and use it in GitHub Desktop.
Save bigjoedata/523f67ba2a3d3c395470fbd3c69bf6dc to your computer and use it in GitHub Desktop.
# -*- coding: utf-8 -*-
# Copyright 2018-2019 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An example of showing geographic data."""
import streamlit as st
import pandas as pd
import numpy as np
import altair as alt
import pydeck as pdk
import datetime
DATE = "date"
DATA_URL = (
"https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv"
)
ZIPDB = (
"https://raw.githubusercontent.com/btskinner/spatial/master/data/county_centers.csv"
)
st.title("Covid Growth in the US")
st.markdown(
"""
This Streamlit app shows Covid growth in the US. Use the slider
to pick a specific day and look at how the charts change.
This is heavily indebted to:
https://github.com/streamlit/demo-uber-nyc-pickups/blob/master/app.py
https://github.com/nytimes/covid-19-data
https://github.com/btskinner/spatial
""")
@st.cache(persist=True)
def load_data():
df = pd.read_csv(DATA_URL)
#lowercase = lambda x: str(x).lower()
#data.rename(lowercase, axis="columns", inplace=True)
#df = df.fillna(0)
df = df[df['fips'].notnull()]
df['fips']=df['fips'].astype(int)
df = df[df['date'].notna()]
df['date'] = pd.to_datetime(df['date'])
return df
@st.cache(persist=True)
def load_zips():
data = pd.read_csv(ZIPDB)
#lowercase = lambda x: str(x).lower()
#data.rename(lowercase, axis="columns", inplace=True)
data=data.rename(columns = {'pclon10':'lon'})
data=data.rename(columns = {'pclat10':'lat'})
data = data[data['lat'].notnull()]
data = data[data['lon'].notnull()]
data.drop(['clon00', 'clat00', 'clon10', 'clat10','pclon00', 'pclat00'], axis=1, inplace=True)
return data
data = load_data()
zips = load_zips()
datamerged = pd.merge(data, zips, on='fips', how='left')
st.write(datamerged)
#datamerged.drop(['clon00', 'clat00', 'clon10', 'clat10','pclon00', 'pclat00'], axis=1, inplace=True)
#hour = st.slider("Hour to look at", 0, 23)
#data = data[data[DATE_TIME].dt.date == date]
#today = datetime.date.today()
#tomorrow = today + datetime.timedelta(days=1)
#start_date = st.date_input('Start date', today)
#end_date = st.date_input('End date', tomorrow)
#if start_date < end_date:
# st.success('Start date: `%s`\n\nEnd date:`%s`' % (start_date, end_date))
#else:
# st.error('Error: End date must fall after start date.')
#st.subheader("Geo data between %i:00 and %i:00" % (hour, (hour + 1) % 24))
midpoint = (np.average(datamerged["lat"]), np.average(datamerged["lon"]))
st.write(pdk.Deck(
map_style="mapbox://styles/mapbox/light-v9",
initial_view_state={
"latitude": midpoint[0],
"longitude": midpoint[1],
# "latitude": 39.8283,
# "longitude": 98.5795,
"zoom": 4,
"pitch": 50,
},
layers=[
pdk.Layer(
"HexagonLayer",
data=datamerged,
get_position=["lon", "lat"],
radius=300,
elevation_scale=50,
elevation_range=[0, 3000],
pickable=True,
extruded=True,
),
],
))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment