Skip to content

Instantly share code, notes, and snippets.

@kshepp
kshepp / title.html
Created December 4, 2015 14:13
Create a Title in Django
<!--Extend your file up here-->
{% block head-title %}
Your title
{% endblock %}
<!--insert your body content code down here-->
{% extends "NAME OF YOUR PROJECT/repeatable_content.html" %}
@kshepp
kshepp / repeatable_content.html
Last active July 14, 2023 06:05
Django Header, Navbar, and Footer
<!-- This loads of all of files that you call below from your 'static' folder -->
{% load staticfiles %}
<!--The static folder consists of your bootstrap files (.css, .js, fonts, etc)-->
<!--When you load the static folders is calls all those folders in the static files without having to code each one in individually-->
<html>
<head>
<!--This is the meta information which is good for SEO-->
{% block head-meta %}
<meta charset="utf-8">
@kshepp
kshepp / running_brat
Created July 8, 2015 18:31
Running brat
python standalone.py
@kshepp
kshepp / stanford-postagger
Created July 1, 2015 15:09
stanford-postagger
java -mx1000m -jar stanford-postagger.jar
java -mx1000m -jar stanford-ner.jar
@kshepp
kshepp / nltk_parts_of_speech.py
Last active July 6, 2018 07:35
NLTK - Top 50 Most Frequent Part of Speech
from __future__ import division
import nltk, re
from nltk import FreqDist
from nltk import word_tokenize
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import brown
tokenizer = RegexpTokenizer(r'\w+') # Takes out the punctuation that Python can't read
f= open('Conservative.txt').read().decode('utf-8') # open the file
text = word_tokenize(f)
@kshepp
kshepp / Scraping_Twitter_2
Last active August 29, 2015 14:21
Scraping Twitter 2
import sys
import string
from twython import Twython
twitter = Twython('Insert Consumer Key Here',
'Insert Consumer Secret Key',oauth_version=2)
Access_token = twitter.obtain_access_token()
t = Twython('Insert Consumer Key', access_token=Access_token)
#MAKE THE QUERY TO TWITTER TO PULL THE DATA
@kshepp
kshepp / Token_Words.py
Last active August 29, 2015 14:21
[Working On] Tokenizing Twitter Statuses to Create Wordle Visual
from __future__ import division
import nltk
import re
import pprint
from nltk import word_tokenize
from nltk.tokenize import RegexpTokenizer
import numpy as np
tokenizer = RegexpTokenizer(r'\w+') #this is important because it takes out the punctuation that Python can't read
@kshepp
kshepp / Scraping Twitter Statuses
Created April 21, 2015 17:32
Scraping Twitter Statuses
import sys
import string
from twython import Twython
twitter = Twython('Insert Consumer Key Here',
'Insert Consumer Secret Key',oauth_version=2)
Access_token = twitter.obtain_access_token()
t = Twython('Insert Consumer Key', access_token=Access_token)
user_timeline = t.search(q='@puremichigan', count=20, include_rts=1)