Skip to content

Instantly share code, notes, and snippets.

View mrcreel's full-sized avatar

Michael R Creel mrcreel

View GitHub Profile
<div class="entry">
<p><br>
<meta http-equiv="Content-Type" content="text/html;CHARSET=ISO-8859-1"><br>
</p>
<br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><
import pandas as pd
url_confirmed = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_US.csv'
url_deaths = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_US.csv'
url_tests = 'https://covidtracking.com/api/v1/states/daily.csv'
confirmed = pd.read_csv(url_confirmed)
deaths = pd.read_csv(url_deaths)
tests = pd.read_csv(url_tests)
Adams
Alcorn
Amite
Attala
Benton
Bolivar
Calhoun
Carroll
Chickasaw
Choctaw
"""
See comments from https://gist.github.com/mrcreel/5432787a57b131a87c3f1724b1f7fffd
"""
import csv
import requests
from bs4 import BeautifulSoup
URL = 'https://msdh.ms.gov/msdhsite/_static/14,0,420.html'
page = requests.get(URL)
import csv # Needed to write the output file
import requests # Needed to call the website
from bs4 import BeautifulSoup # The miracle worker that reads the page data
URL = 'https://msdh.ms.gov/msdhsite/_static/14,0,420.html' # Your site
page = requests.get(URL) # Imports the page
soup = BeautifulSoup(page.content, 'html.parser') # Converts the page's html into text
cases = soup.find(id='msdhTotalCovid-19Cases') # Find the table with that id
for person in df_data:
legislator = person['node']
legislator_name = legislator['name']
chamber = legislator['chamber'][0]['organization']['name']
party = legislator['party'][0]['organization']['name']
print('Name: '+legislator_name+'('+party+' '+chamber+'-'+legislator['chamber'][0]['post']['label']+')')
links = legislator['links']
for link in links:
[{
"id": "aberdeen",
"name": "Aberdeen",
"url": "/Aberdeen.htm",
"status": "active"
}, {
"id": "adamscountychristian",
"name": "Adams County Christian",
"url": "/Adamscountychristian.htm",
"status": "active"

General

A custom URL has been created

Spelling and grammar are correct

Acronyms or language are likely to be known to recruiters or includes explanation

Does not include negative language