Skip to content

Instantly share code, notes, and snippets.

View dylanjm's full-sized avatar
💻
code is life

Dylan McDowell dylanjm

💻
code is life
  • Idaho National Laboratory (@idaholab)
  • North Western United States
View GitHub Profile
#!/bin/bash
# Take all .html files in directory and covert them to .docx files for grading
# Check all .html files in directory for the string "Passed all tests with no errors"
# This way we can quickly sort files of people who may have had more trouble
# Move files that pass into a seperate directory than those with failed tests.
str="Passed all tests with no errors"
for file in *.html; do
pandoc -o "${file%.*}.docx" -s -S "$file"
if grep -q "$str" "$file"; then
mv "${file%.*}.docx" ../docx/testPass/
math335_theme <- function() {
theme_bw() + theme(panel.border = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.line = element_line(colour = "black"),
plot.title = element_text(face="bold"),
axis.title = element_text(face="bold"),
plot.caption = element_text(face="italic", size = 8, color = "#4d4d4d"))
}
@dylanjm
dylanjm / econ_data_wrangle_clean.R
Created April 21, 2018 15:04
A script that uses purrr to automate the wrangling and cleaning of economic data
library(tidyverse)
library(rio)
library(rvest)
library(janitor)
# Rcode to go and fetch country codes
country_codes <- read_html("http://web.stanford.edu/~chadj/countrycodes6.3") %>%
html_text() %>%
str_extract_all("[A-Z]{3}") %>%
@dylanjm
dylanjm / commercial_bank_plots.R
Created May 28, 2018 16:40
A script automating the analysis of Commercial Bank Expansion Feasibility
library(blscrapeR)
library(tidyverse)
library(albersusa)
library(ggalt)
library(wesanderson)
library(cowplot)
library(tidycensus)
# The original blscrapeR::qcew_api() is not really suited to take multiple years
# or niac codes at once. Here is a function that grabs it all!
library(tidyverse)
read_lines(here::here("input_data/data04a.txt")) %>%
str_match("\\[(\\d+)-(\\d+-\\d+) (\\d+):(\\d+)\\] (.*$)") %>%
as_tibble() %>%
set_names(c("raw_data", "year", "date", "hour", "min", "comment")) %>%
mutate(id = str_extract_all(comment, "(\\d+)", simplify = T) %>%
ifelse(. == "", NA, .)) %>%
arrange(year, date, hour, min) %>%
fill(id) %>%
library(tidyverse)
library(hrbrthemes)
library(patchwork)
r_tweets <- read_rds(here::here("data/data_2019/week01_rstats_tweets.rds"))
tt_tweets <- read_rds(here::here("data/data_2019/week01_tidy_tuesday_tweets.rds"))
(top_users <- tt_tweets %>%
count(screen_name) %>%
top_n(10, n) %>%
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
url = 'https://python-graph-gallery.com/wp-content/uploads/gapminderData.csv'
def add_cols(grp):
grp['popl'] = grp['pop'].apply(lambda x: x/10**5)
grp['gdp_wt'] = np.average(grp['gdpPercap'], weights = grp['pop'])
structure(list(y = c(-428.846931773699, 2814.14354795795, 1041.57062019406,
413.053552770623, -2800.67011782234, -2198.16050963969, 801.924652033519,
732.090097673507, -18875.6488077687, -2832.38372468581, 173.46142036292,
-3786.71578136507, -1997.72041336496, 1592.56310472052, -486.657881249919,
593.68112891115, -21.9813204920439, 864.966591019571, -5237.93696303717,
-9202.40040067778, 425.320418694913, 797.650940028017, -123.700827591037,
-216.376695426808, 305.222605237999, 3.28251012543717, 991.586724694986,
1433.13448159131, 4629.28834528504, 643.801261103721, -1171.77757782828,
3346.60023414888, 2039.8742756452, 1695.36368497713, 1100.78951900258,
-284.355910530996, 1033.8900351293, 1000.68508364355, 202.456374187071,
#!/usr/bin/env python3
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def amort(principal, rate, n):
return principal * (rate * ((1 + rate) ** n)) / ((1 + rate) ** n - 1)
#!/usr/bin/env python3
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import textwrap
from tabulate import tabulate
def amort(principal, rate, n):
return principal * (rate * ((1 + rate) ** n)) / ((1 + rate) ** n - 1)