Last active
February 1, 2018 09:39
-
-
Save korakot/fe26c65dc9eed467f4497f784a805716 to your computer and use it in GitHub Desktop.
Return all possible ways to cut(tokenize) Thai text.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import re | |
from collections import defaultdict | |
from marisa_trie import Trie | |
wordlist = [li.strip() for li in open('wordlist.txt')] | |
trie = Trie(wordlist) # สร้างครั้งเดียว ข้างนอก function | |
class LatticeString(str): | |
''' String subclass เพื่อเก็บวิธีตัดหลายๆ วิธี | |
''' | |
def __new__(cls, value, multi=None, in_dict=True): | |
return str.__new__(cls, value) | |
def __init__(self, value, multi=None, in_dict=True): | |
self.unique = True | |
if multi: | |
self.multi = list(multi) | |
if len(self.multi) > 1: | |
self.unique = False | |
else: | |
self.multi = [value] | |
self.in_dict = in_dict # บอกว่าเป็นคำมีในดิกหรือเปล่า | |
pat_eng = re.compile(r'''(?x) | |
[-a-zA-Z]+| # english | |
\d[\d,\.]*| # number | |
[ \t]+| # space | |
\r?\n # newline | |
''') | |
def multicut(text): | |
''' ส่งคืน LatticeString คืนมาเป็นก้อนๆ | |
''' | |
words_at = defaultdict(list) # main data structure | |
def serialize(p, p2): # helper function | |
for w in words_at[p]: | |
p_ = p + len(w) | |
if p_== p2: | |
yield w | |
elif p_ < p2: | |
for path in serialize(p_, p2): | |
yield w+'/'+path | |
q = {0} | |
last_p = 0 # last position for yield | |
while min(q) < len(text): | |
p = min(q) | |
q -= {p} # q.pop, but for set | |
for w in trie.prefixes(text[p:]): | |
words_at[p].append(w) | |
q.add(p+len(w)) | |
if len(q)==1: | |
q0 = min(q) | |
yield LatticeString(text[last_p:q0], serialize(last_p, q0)) | |
last_p = q0 | |
# กรณี len(q) == 0 คือ ไม่มีใน dict | |
if len(q)==0: | |
m = pat_eng.match(text[p:]) | |
if m: # อังกฤษ, เลข, ว่าง | |
i = p + m.span()[1] | |
else: # skip น้อยที่สุด ที่เป็นไปได้ | |
for i in range(p, len(text)): | |
ww = trie.prefixes(text[i:]) | |
m = pat_eng.match(text[i:]) | |
if ww or m: | |
break | |
else: | |
i = len(text) | |
w = text[p:i] | |
words_at[p].append(w) | |
yield LatticeString(w, in_dict=False) | |
last_p = i | |
q.add(i) | |
def mmcut(text): | |
''' Maximal Matching algorithm ในการตัดคำภาษาไทย | |
''' | |
res = [] | |
for w in multicut(text): | |
mm = min(w.multi, key=lambda x: x.count('/')) | |
res.extend(mm.split('/')) | |
return res |
เขียนใหม่ ทำ maximal matching อย่างเดียว จะได้เร็วขึ้น ไม่ต้องเสียเวลาสร้าง LatticeString
https://colab.research.google.com/notebook#fileId=1V1Z657_5eSWPo8rLfVRwA0A5E4vkg7SI
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
แก้ bug mmcut กรณี 'กายอ้วน' ให้ตัดในกรณี w.unique ด้วย