Skip to content

Instantly share code, notes, and snippets.

@vanatteveldt
Created February 14, 2023 11:38
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save vanatteveldt/f07b8ca5778469879455066362404d0c to your computer and use it in GitHub Desktop.
Save vanatteveldt/f07b8ca5778469879455066362404d0c to your computer and use it in GitHub Desktop.
Text: Wij zijn Wouter van Atteveldt en Nel Ruigrok.
10.000 nieuwe stamcel- en bloeddonoren na oproep PSV-perschef Thijs Slegers.
Ongeneeslijk ziek Opvallend veel mannen meldden zich aan als donor na een oproep van de ongeneeslijk zieke Slegers, Matchis kreeg 7.000 nieuwe aanmeldingen, Sanquin 3.000.
Model: pdelobelle/robbert-v2-dutch-ner
NER output:
{'entity_group': 'PER', 'score': 0.9998577, 'word': ' Wouter van Atte', 'start': 9, 'end': 24, 'full_word': 'Wouter van Atteveldt'}
{'entity_group': 'PER', 'score': 0.9999995, 'word': ' Nel Ruig', 'start': 33, 'end': 41, 'full_word': 'Nel Ruigrok'}
{'entity_group': 'MISC', 'score': 0.99988997, 'word': ' PSV', 'start': 96, 'end': 99, 'full_word': 'PSV'}
{'entity_group': 'PER', 'score': 0.99999857, 'word': ' Thijs Sleg', 'start': 109, 'end': 119, 'full_word': 'Thijs Slegers'}
{'entity_group': 'PER', 'score': 0.92822623, 'word': ' Sleg', 'start': 231, 'end': 235, 'full_word': 'Slegers'}
{'entity_group': 'PER', 'score': 0.75320077, 'word': ' Match', 'start': 240, 'end': 245, 'full_word': 'Matchis'}
{'entity_group': 'PER', 'score': 0.79860216, 'word': ' Sanquin', 'start': 281, 'end': 288, 'full_word': 'Sanquin'}
from transformers import pipeline
from transformers import AutoTokenizer
import unicodedata
model = "wietsedv/bert-base-dutch-cased-finetuned-conll2002-ner"
model = "pdelobelle/robbert-v2-dutch-ner" # geeft zelfde probleem
text = """Wij zijn Wouter van Atteveldt en Nel Ruigrok.
10.000 nieuwe stamcel- en bloeddonoren na oproep PSV-perschef Thijs Slegers.
Ongeneeslijk ziek Opvallend veel mannen meldden zich aan als donor na een oproep van de ongeneeslijk zieke Slegers, Matchis kreeg 7.000 nieuwe aanmeldingen, Sanquin 3.000.
"""
class WordFinder:
def __init__(self, model, text):
self.model = model
tokenizer = AutoTokenizer.from_pretrained(self.model)
revocab = {id: word for (word, id) in tokenizer.vocab.items()}
tokens = tokenizer(text, return_offsets_mapping=True)
self.words = [revocab[id] for id in tokens['input_ids']]
self.offsets = tokens['offset_mapping']
self.tokenmap = {start: i for i, (start, end) in enumerate(tokens['offset_mapping']) if start != end}
def start_of_word(self, word):
if self.model == "wietsedv/bert-base-dutch-cased-finetuned-conll2002-ner":
return not word.startswith("##")
if self.model == "pdelobelle/robbert-v2-dutch-ner":
return unicodedata.category(word[:1]).startswith("P") or word.startswith("\u0120")
def get_full_word(self, start, end):
end_of_name = end
for j in range(self.tokenmap[start], len(self.words)):
end_of_token = self.offsets[j][1]
if (end_of_token > end) and self.start_of_word(self.words[j]):
break
end_of_name = end_of_token
return text[start:end_of_name]
classifier = pipeline("ner", model = model, aggregation_strategy='simple')
output = classifier([text])
wf = WordFinder(model, text)
print("\nText:", text)
print("\nModel:", model)
print("\nNER output:")
for token in output[0]:
token['full_word'] = wf.get_full_word(token['start'], token['end'])
print(token)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment