Skip to content

Instantly share code, notes, and snippets.

@zurk
Last active January 11, 2018 08:41
Show Gist options
  • Save zurk/faaa4187f542f4b4b1a2c9af476e276f to your computer and use it in GitHub Desktop.
Save zurk/faaa4187f542f4b4b1a2c9af476e276f to your computer and use it in GitHub Desktop.
We have 3 main algorithms in our sourced.ml (https://github.com/src-d/ml). One can find a description in the README. Now algorithms don't use our new cool engine (https://github.com/src-d/engine). We need to understand how we can rearchitect our tool. At first, let's see how we can reproduce them.
Display the source blob
Display the rendered blob
Raw
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
import re
import Stemmer
class TokenParser:
"""
Common utilities for splitting and stemming tokens.
"""
NAME_BREAKUP_RE = re.compile(r"[^a-zA-Z]+") #: Regexp to split source code identifiers.
STEM_THRESHOLD = 6 #: We do not stem splitted parts shorter than or equal to this size.
MAX_TOKEN_LENGTH = 256 #: We cut identifiers longer than thi value.
def __init__(self, stem_threshold=STEM_THRESHOLD, max_token_length=MAX_TOKEN_LENGTH):
self._stemmer = Stemmer.Stemmer("english")
self._stemmer.maxCacheSize = 0
self._stem_threshold = stem_threshold
self._max_token_length = max_token_length
def __call__(self, token):
return self.process_token(token)
def process_token(self, token):
for word in self.split(token):
yield self.stem(word)
def stem(self, word):
if len(word) <= self._stem_threshold:
return word
return self._stemmer.stemWord(word)
def split(self, token):
token = token.strip()[:self._max_token_length]
prev_p = [""]
def ret(name):
r = name.lower()
if len(name) >= 3:
yield r
if prev_p[0]:
yield prev_p[0] + r
prev_p[0] = ""
else:
prev_p[0] = r
for part in self.NAME_BREAKUP_RE.split(token):
if not part:
continue
prev = part[0]
pos = 0
for i in range(1, len(part)):
this = part[i]
if prev.islower() and this.isupper():
yield from ret(part[pos:i])
pos = i
elif prev.isupper() and this.islower():
if 0 < i - 1 - pos <= 3:
yield from ret(part[pos:i - 1])
pos = i - 1
elif i - 1 > pos:
yield from ret(part[pos:i])
pos = i
prev = this
last = part[pos:]
if last:
yield from ret(last)
def __getstate__(self):
state = self.__dict__.copy()
del state["_stemmer"]
return state
def __setstate__(self, state):
self.__dict__ = state
self._stemmer = Stemmer.Stemmer("english")
class NoTokenParser:
"""
One can use this class if he or she does not want to do any parsing.
"""
def process_token(self, token):
return [token]
@marnovo
Copy link

marnovo commented Dec 1, 2017

s/soursed/sourced on the title @zurk

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment