Instantly share code, notes, and snippets.

View unnest_byseat.R
library(tidyr)
setwd("~/Desktop/unnest")
fname = "file-name.csv"
df = read.csv(paste0(fname,'.csv'), stringsAsFactors = F)
df$seats =
sapply(1:nrow(df), function(x) {
seats = c(df[x,]$first_seat,df[x,]$last_seat)
View stubhub_inventory_v2.py
import requests
import base64
import pprint
import pandas as pd
import json
from tqdm import tqdm
# https://stubhubapi.zendesk.com/hc/en-us/articles/220922687-Inventory-Search
View hmtl_table_parser.py
# http://srome.github.io/Parsing-HTML-Tables-in-Python-with-BeautifulSoup-and-pandas/
class HTMLTableParser:
@staticmethod
def get_element(node):
# for XPATH we have to count only for nodes with same type!
length = len(list(node.previous_siblings)) + 1
if (length) > 1:
return '%s:nth-child(%s)' % (node.name, length)
else:
return node.name
View Large dataframe to csv in chunks in R
df = read.csv("your-df.csv")
# Number of items in each chunk
elements_per_chunk = 100000
# List of vectors [1] 1:100000, [2] 100001:200000, ...
l = split(1:nrow(df), ceiling(seq_along(1:nrow(df))/elements_per_chunk))
# Write large data frame to csv in chunks
fname = "inventory-cleaned.csv"
View reddit-posts.html
<!-- Produces a responsive list of top ten posts from a subreddit /worldnews. Working jsfiddle http://jsfiddle.net/KobaKhit/t42zkbnk/ -->
<div id="posts">
<h2> Today's top ten news <small>from <a href = '//reddit.com/r/worldnews' target = '_blank'>/r/worldnews</a></small></h2>
<hr>
<ul class="list-unstyled"></ul>
</div>
<!-- JS -->
<script src="https://rawgit.com/sahilm/reddit.js/master/reddit.js"></script>
<script src="https://code.jquery.com/jquery-2.1.3.min.js"></script>