Skip to content

Instantly share code, notes, and snippets.

@MrEliptik
Created January 14, 2019 12:01
Show Gist options
  • Star 86 You must be signed in to star a gist
  • Fork 28 You must be signed in to fork a gist
  • Save MrEliptik/b3f16179aa2f530781ef8ca9a16499af to your computer and use it in GitHub Desktop.
Save MrEliptik/b3f16179aa2f530781ef8ca9a16499af to your computer and use it in GitHub Desktop.
A python script to preprocess text (remove URL, lowercase, tokenize, etc..)
import re, string, unicodedata
import nltk
import contractions
import inflect
from nltk import word_tokenize, sent_tokenize
from nltk.corpus import stopwords
from nltk.stem import LancasterStemmer, WordNetLemmatizer
def replace_contractions(text):
"""Replace contractions in string of text"""
return contractions.fix(text)
def remove_URL(sample):
"""Remove URLs from a sample string"""
return re.sub(r"http\S+", "", sample)
def remove_non_ascii(words):
"""Remove non-ASCII characters from list of tokenized words"""
new_words = []
for word in words:
new_word = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode('utf-8', 'ignore')
new_words.append(new_word)
return new_words
def to_lowercase(words):
"""Convert all characters to lowercase from list of tokenized words"""
new_words = []
for word in words:
new_word = word.lower()
new_words.append(new_word)
return new_words
def remove_punctuation(words):
"""Remove punctuation from list of tokenized words"""
new_words = []
for word in words:
new_word = re.sub(r'[^\w\s]', '', word)
if new_word != '':
new_words.append(new_word)
return new_words
def replace_numbers(words):
"""Replace all interger occurrences in list of tokenized words with textual representation"""
p = inflect.engine()
new_words = []
for word in words:
if word.isdigit():
new_word = p.number_to_words(word)
new_words.append(new_word)
else:
new_words.append(word)
return new_words
def remove_stopwords(words):
"""Remove stop words from list of tokenized words"""
new_words = []
for word in words:
if word not in stopwords.words('english'):
new_words.append(word)
return new_words
def stem_words(words):
"""Stem words in list of tokenized words"""
stemmer = LancasterStemmer()
stems = []
for word in words:
stem = stemmer.stem(word)
stems.append(stem)
return stems
def lemmatize_verbs(words):
"""Lemmatize verbs in list of tokenized words"""
lemmatizer = WordNetLemmatizer()
lemmas = []
for word in words:
lemma = lemmatizer.lemmatize(word, pos='v')
lemmas.append(lemma)
return lemmas
def normalize(words):
words = remove_non_ascii(words)
words = to_lowercase(words)
words = remove_punctuation(words)
words = replace_numbers(words)
words = remove_stopwords(words)
return words
def preprocess(sample):
sample = remove_URL(sample)
sample = replace_contractions(sample)
# Tokenize
words = nltk.word_tokenize(sample)
# Normalize
return normalize(words)
if __name__ == "__main__":
sample = "Blood test for Down's syndrome hailed http://bbc.in/1BO3eWQ"
sample = remove_URL(sample)
sample = replace_contractions(sample)
# Tokenize
words = nltk.word_tokenize(sample)
print(words)
# Normalize
words = normalize(words)
print(words)
@raviyadav44
Copy link

thnx bro it is really helpful.

@zachlagden
Copy link

this is awesome ty

@jppyykm
Copy link

jppyykm commented Dec 24, 2021

You should add a combo function that does all the steps at once to save computational power. Cheers.

@Paksssssss
Copy link

Thanks man. this saved me

@wormwithnoname
Copy link

thanks! really great help

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment