Created
August 30, 2016 09:15
-
-
Save grigory-rechistov/32348c73d09de9454894f9aeb2d78800 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/usr/bin/env python | |
#Copyright (c) 2014, 2016, Grigory Rechistov <grigory.rechistov@phystech.edu> | |
#All rights reserved. | |
#Redistribution and use in source and binary forms, with or without modification, | |
#are permitted provided that the following conditions are met: | |
#1. Redistributions of source code must retain the above copyright notice, | |
#this list of conditions and the following disclaimer. | |
#2. Redistributions in binary form must reproduce the above copyright notice, | |
#this list of conditions and the following disclaimer in the documentation | |
#and/or other materials provided with the distribution. | |
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | |
#AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, | |
#THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
#ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE | |
#LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, | |
#OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | |
#SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; | |
#OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, | |
#WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR | |
#OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, | |
#EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
''' | |
Unique words in corpus created from given directoty tree of PDFs | |
Usage: unique-words.py <folder> <tmpdir> | |
Dependencies: | |
NLTK | |
pdftotext | |
Algorithm outline: | |
good_words = {} | |
bad_words ={} | |
failed_files = {} | |
For f in *.pdf | |
txt = pdftotext f | |
if txt is empty | |
failed_files += f | |
next file | |
(Optional) For words ending with - at end of line, concatenate with the next word. | |
list = convert txt to list | |
for word in list | |
attempt to stem the word http://stackoverflow.com/questions/771918/how-do-i-do-word-stemming-or-lemmatization | |
spell check word | |
if known good_words += word | |
else bad_words += word | |
Dump stats: | |
List of unique words (TODO with frequency of inclusion) | |
List of unique words that failed spell check | |
''' | |
import nltk | |
from nltk.stem.wordnet import WordNetLemmatizer | |
from nltk.corpus import words as CorpusWords | |
from nltk.stem.porter import PorterStemmer | |
from string import replace | |
import os | |
import sys | |
import re | |
import subprocess | |
import datetime | |
def get_nltk_data(): | |
dl = nltk.downloader.Downloader("http://nltk.github.com/nltk_data/") | |
dl.download('wordnet') | |
def filter_text(data): | |
# TODO this implementation is lame | |
filt = set(("\n", ".", ",", ":", ";", "[", "]", "\\", "/", "!", "?", "&", | |
"<", ">", "+", '"', '`', "*", "~", "$", "#", "@", "(", ")", "'", | |
"0", "1", "2", "3", "4", "5", "6", "7", "8", "9", | |
)) | |
result = "" | |
for c in data: | |
if c in filt: result += " " | |
else: result += c | |
return result | |
def main(): | |
lmtzr = WordNetLemmatizer() | |
known_words = CorpusWords.words() | |
stemmer = PorterStemmer() | |
good_words = set() | |
good_stems = set() | |
bad_words = set() | |
failed_files = set() | |
try: | |
rootdir = sys.argv[1] | |
tmpdir = sys.argv[2] | |
except: | |
print "Usage: %s <folder> <tmpdir>" % sys.argv[0] | |
return 1 | |
print "Started at %s for folder %s, texts are in %s" % (datetime.datetime.now(), rootdir, tmpdir) | |
for root, subfs, files in os.walk(rootdir): | |
for f in files: | |
m = re.match("^(.*)\.pdf$", f, re.IGNORECASE) | |
if m: | |
txtname = m.group(1) | |
txtname = replace(txtname, " ", "_") | |
txtname = replace(txtname, "-", "_") | |
pdfpath = os.path.join(root, f) | |
txtpath = os.path.join(tmpdir, txtname + ".txt") | |
statspath = os.path.join(tmpdir, txtname + ".stats") | |
print "Running pdftotext %s %s" % (pdfpath, txtpath) | |
exitcode = subprocess.call(["pdftotext", pdfpath, txtpath]) | |
if exitcode: # Note this is actually not reliable | |
print "pdftotext failed" | |
failed_files += pdfpath | |
continue | |
print "Analyzing %s:" % (txtpath), | |
sys.stdout.flush() | |
with open(txtpath) as d: | |
data = filter_text(d.read().lower()) | |
words = data.split(" ") | |
counter = 0 | |
good = set() | |
stems = set() | |
bad = set() | |
for word in words: | |
if len(word) == 0: continue | |
counter +=1 | |
if counter % 1000 == 0: | |
sys.stdout.write(".") | |
sys.stdout.flush() | |
word = lmtzr.lemmatize(word) | |
stem = stemmer.stem(word) | |
if word in known_words: | |
good.add(word) | |
stems.add(stem) | |
else: bad.add(word) | |
#print "Good words: ", good | |
#print "-------------------" | |
#print "Bad words: ", bad | |
#TODO dump individual stats for statspath | |
good_words.update(good) | |
good_stems.update(stems) | |
bad_words.update(bad) | |
# Print resulting stats | |
print "Good stems total: %d" % len(good_stems) | |
print "Good words total: %d" % len(good_words) | |
print "Bad words total: %d" % len(bad_words) | |
print "-------------------" | |
for stem in sorted(good_stems): print stem | |
print "-------------------" | |
for word in sorted(good_words): print word | |
print "-------------------" | |
for word in sorted(bad_words): print word | |
print "Finished at %s" % datetime.datetime.now() | |
if __name__ == '__main__': | |
main() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment